code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
# Repository's architecture
train.py : main script
dataset.py : defintion of the Librispeech dataset format
model.py : Basic encoders and AR models
feature_loader.py: different tools to load and save a CPC model.
transformers.py: an implementation of transformers
unit_tests.py : unit tests
criterion/: definition of the training criterions. Three criterion are currently available: CPC (unsupervised), speaker classification and phone classification.
eval/: evaluation scripts.
utils/: system utilities and misc.
| zerospeech-libriabx2 | /zerospeech-libriabx2-0.9.8.tar.gz/zerospeech-libriabx2-0.9.8/zrc_abx2/cpc/README.md | README.md |
import torch
import torchaudio
import os
import json
import argparse
from .cpc_default_config import get_default_cpc_config
from .dataset import parseSeqLabels
from .model import CPCModel, ConcatenatedModel
class FeatureModule(torch.nn.Module):
r"""
A simpler interface to handle CPC models. Useful for a smooth workflow when
working with CPC trained features.
"""
def __init__(self, featureMaker, get_encoded, collapse=False):
super(FeatureModule, self).__init__()
self.get_encoded = get_encoded
self.featureMaker = featureMaker
self.collapse = collapse
def getDownsamplingFactor(self):
return self.featureMaker.gEncoder.DOWNSAMPLING
def forward(self, data):
batchAudio, label = data
if next(self.featureMaker.parameters()).is_cuda:
cFeature, encoded, _ = self.featureMaker(batchAudio.cuda(), label)
else:
cFeature, encoded, _ = self.featureMaker(batchAudio, label)
if self.get_encoded:
cFeature = encoded
if self.collapse:
cFeature = cFeature.contiguous().view(-1, cFeature.size(2))
return cFeature
class ModelPhoneCombined(torch.nn.Module):
r"""
Concatenates a CPC feature maker and a phone predictor.
"""
def __init__(self, model, criterion, oneHot):
r"""
Arguments:
model (FeatureModule): feature maker
criterion (PhoneCriterion): phone predictor
oneHot (bool): set to True to get a one hot output
"""
super(ModelPhoneCombined, self).__init__()
self.model = model
self.criterion = criterion
self.oneHot = oneHot
def getDownsamplingFactor(self):
return self.model.getDownsamplingFactor()
def forward(self, data):
c_feature = self.model(data)
pred = self.criterion.getPrediction(c_feature)
P = pred.size(2)
if self.oneHot:
pred = pred.argmax(dim=2)
pred = toOneHot(pred, P)
else:
pred = torch.nn.functional.softmax(pred, dim=2)
return pred
def loadArgs(args, locArgs, forbiddenAttr=None):
for k, v in vars(locArgs).items():
if forbiddenAttr is not None:
if k not in forbiddenAttr:
setattr(args, k, v)
else:
setattr(args, k, v)
def loadSupervisedCriterion(pathCheckpoint):
from .criterion import CTCPhoneCriterion, PhoneCriterion
*_, args = getCheckpointData(os.path.dirname(pathCheckpoint))
_, nPhones = parseSeqLabels(args.pathPhone)
if args.CTC:
criterion = CTCPhoneCriterion(args.hiddenGar if not args.onEncoder
else args.hiddenEncoder,
nPhones, args.onEncoder)
else:
criterion = PhoneCriterion(args.hiddenGar, nPhones, args.onEncoder)
state_dict = torch.load(pathCheckpoint)
criterion.load_state_dict(state_dict["cpcCriterion"])
return criterion, nPhones
def getCheckpointData(pathDir):
if not os.path.isdir(pathDir):
return None
checkpoints = [x for x in os.listdir(pathDir)
if os.path.splitext(x)[1] == '.pt'
and os.path.splitext(x[11:])[0].isdigit()]
if len(checkpoints) == 0:
print("No checkpoints found at " + pathDir)
return None
checkpoints.sort(key=lambda x: int(os.path.splitext(x[11:])[0]))
data = os.path.join(pathDir, checkpoints[-1])
if os.path.exists(os.path.join(pathDir, 'checkpoint_logs.json')):
with open(os.path.join(pathDir, 'checkpoint_logs.json'), 'rb') as file:
logs = json.load(file)
else:
logs = None
with open(os.path.join(pathDir, 'checkpoint_args.json'), 'rb') as file:
args = json.load(file)
args = argparse.Namespace(**args)
defaultArgs = get_default_cpc_config()
loadArgs(defaultArgs, args)
return os.path.abspath(data), logs, defaultArgs
def getEncoder(args):
if args.encoder_type == 'mfcc':
from .model import MFCCEncoder
return MFCCEncoder(args.hiddenEncoder)
elif args.encoder_type == 'lfb':
from .model import LFBEnconder
return LFBEnconder(args.hiddenEncoder)
else:
from .model import CPCEncoder
return CPCEncoder(args.hiddenEncoder, args.normMode)
def getAR(args):
if args.arMode == 'transformer':
from .transformers import buildTransformerAR
arNet = buildTransformerAR(args.hiddenEncoder, 1,
args.sizeWindow // 160, args.abspos)
args.hiddenGar = args.hiddenEncoder
elif args.arMode == 'no_ar':
from .model import NoAr
arNet = NoAr()
else:
from .model import CPCAR
arNet = CPCAR(args.hiddenEncoder, args.hiddenGar,
args.samplingType == "sequential",
args.nLevelsGRU,
mode=args.arMode,
reverse=args.cpc_mode == "reverse")
return arNet
def loadModel(pathCheckpoints, loadStateDict=True, updateConfig=None):
models = []
hiddenGar, hiddenEncoder = 0, 0
for path in pathCheckpoints:
print(f"Loading checkpoint {path}")
_, _, locArgs = getCheckpointData(os.path.dirname(path))
doLoad = locArgs.load is not None and \
(len(locArgs.load) > 1 or
os.path.dirname(locArgs.load[0]) != os.path.dirname(path))
if updateConfig is not None and not doLoad:
print(f"Updating the configuartion file with ")
print(f'{json.dumps(vars(updateConfig), indent=4, sort_keys=True)}')
loadArgs(locArgs, updateConfig)
if doLoad:
m_, hg, he = loadModel(locArgs.load,
loadStateDict=False,
updateConfig=updateConfig)
hiddenGar += hg
hiddenEncoder += he
else:
encoderNet = getEncoder(locArgs)
arNet = getAR(locArgs)
m_ = CPCModel(encoderNet, arNet)
if loadStateDict:
print(f"Loading the state dict at {path}")
state_dict = torch.load(path, 'cpu')
m_.load_state_dict(state_dict["gEncoder"], strict=False)
if not doLoad:
hiddenGar += locArgs.hiddenGar
hiddenEncoder += locArgs.hiddenEncoder
models.append(m_)
if len(models) == 1:
return models[0], hiddenGar, hiddenEncoder
return ConcatenatedModel(models), hiddenGar, hiddenEncoder
def get_module(i_module):
if isinstance(i_module, torch.nn.DataParallel):
return get_module(i_module.module)
if isinstance(i_module, FeatureModule):
return get_module(i_module.module)
return i_module
def save_checkpoint(model_state, criterion_state, optimizer_state, best_state,
path_checkpoint):
state_dict = {"gEncoder": model_state,
"cpcCriterion": criterion_state,
"optimizer": optimizer_state,
"best": best_state}
torch.save(state_dict, path_checkpoint)
def toOneHot(inputVector, nItems):
batchSize, seqSize = inputVector.size()
out = torch.zeros((batchSize, seqSize, nItems),
device=inputVector.device, dtype=torch.long)
out.scatter_(2, inputVector.view(batchSize, seqSize, 1), 1)
return out
def seqNormalization(out):
# out.size() = Batch x Seq x Channels
mean = out.mean(dim=1, keepdim=True)
var = out.var(dim=1, keepdim=True)
return (out - mean) / torch.sqrt(var + 1e-08)
def buildFeature(featureMaker, seqPath, strict=False,
maxSizeSeq=64000, seqNorm=False):
r"""
Apply the featureMaker to the given file.
Arguments:
- featureMaker (FeatureModule): model to apply
- seqPath (string): path of the sequence to load
- strict (bool): if True, always work with chunks of the size
maxSizeSeq
- maxSizeSeq (int): maximal size of a chunk
- seqNorm (bool): if True, normalize the output along the time
dimension to get chunks of mean zero and var 1
Return:
a torch vector of size 1 x Seq_size x Feature_dim
"""
if next(featureMaker.parameters()).is_cuda:
device = 'cuda'
else:
device = 'cpu'
seq = torchaudio.load(seqPath)[0]
sizeSeq = seq.size(1)
start = 0
out = []
while start < sizeSeq:
if strict and start + maxSizeSeq > sizeSeq:
break
end = min(sizeSeq, start + maxSizeSeq)
subseq = (seq[:, start:end]).view(1, 1, -1).to(device)
with torch.no_grad():
features = featureMaker((subseq, None))
if seqNorm:
features = seqNormalization(features)
out.append(features.detach().cpu())
start += maxSizeSeq
if strict and start < sizeSeq:
subseq = (seq[:, -maxSizeSeq:]).view(1, 1, -1).to(device)
with torch.no_grad():
features = featureMaker((subseq, None))
if seqNorm:
features = seqNormalization(features)
delta = (sizeSeq - start) // featureMaker.getDownsamplingFactor()
out.append(features[:, -delta:].detach().cpu())
out = torch.cat(out, dim=1)
return out
def buildFeature_batch(featureMaker, seqPath, strict=False,
maxSizeSeq=8000, seqNorm=False, batch_size=8):
r"""
Apply the featureMaker to the given file. Apply batch-computation
Arguments:
- featureMaker (FeatureModule): model to apply
- seqPath (string): path of the sequence to load
- strict (bool): if True, always work with chunks of the size
maxSizeSeq
- maxSizeSeq (int): maximal size of a chunk
- seqNorm (bool): if True, normalize the output along the time
dimension to get chunks of mean zero and var 1
Return:
a torch vector of size 1 x Seq_size x Feature_dim
"""
if next(featureMaker.parameters()).is_cuda:
device = 'cuda'
else:
device = 'cpu'
seq = torchaudio.load(seqPath)[0]
sizeSeq = seq.size(1)
# Compute number of batches
n_chunks = sizeSeq//maxSizeSeq
n_batches = n_chunks//batch_size
if n_chunks % batch_size != 0:
n_batches += 1
out = []
# Treat each batch
for batch_idx in range(n_batches):
start = batch_idx*batch_size*maxSizeSeq
end = min((batch_idx+1)*batch_size*maxSizeSeq, maxSizeSeq*n_chunks)
batch_seqs = (seq[:, start:end]).view(-1, 1, maxSizeSeq).to(device)
with torch.no_grad():
# breakpoint()
batch_out = featureMaker((batch_seqs, None))
for features in batch_out:
features = features.unsqueeze(0)
if seqNorm:
features = seqNormalization(features)
out.append(features.detach().cpu())
# Remaining frames
if sizeSeq % maxSizeSeq >= featureMaker.getDownsamplingFactor():
remainders = sizeSeq % maxSizeSeq
if strict:
subseq = (seq[:, -maxSizeSeq:]).view(1, 1, -1).to(device)
with torch.no_grad():
features = featureMaker((subseq, None))
if seqNorm:
features = seqNormalization(features)
delta = remainders // featureMaker.getDownsamplingFactor()
out.append(features[:, -delta:].detach().cpu())
else:
subseq = (seq[:, -remainders:]).view(1, 1, -1).to(device)
with torch.no_grad():
features = featureMaker((subseq, None))
if seqNorm:
features = seqNormalization(features)
out.append(features.detach().cpu())
out = torch.cat(out, dim=1)
return out | zerospeech-libriabx2 | /zerospeech-libriabx2-0.9.8.tar.gz/zerospeech-libriabx2-0.9.8/zrc_abx2/cpc/feature_loader.py | feature_loader.py |
import torch
import torch.nn as nn
import math
class ScaledDotProductAttention(nn.Module):
def __init__(self,
sizeSeq, # Size of the input sequence
dk, # Dimension of the input sequence
dropout, # Dropout parameter
relpos=False): # Do we retrieve positional information ?
super(ScaledDotProductAttention, self).__init__()
self.drop = nn.Dropout(dropout)
self.softmax = nn.Softmax(dim=2)
self.relpos = relpos
self.sizeSeq = sizeSeq
if relpos:
self.Krelpos = nn.Parameter(torch.Tensor(dk, sizeSeq))
self.initmat_(self.Krelpos)
self.register_buffer('z', torch.zeros(1, sizeSeq, 1))
# A mask is set so that a node never queries data in the future
mask = torch.tril(torch.ones(sizeSeq, sizeSeq), diagonal=0)
mask = 1 - mask
mask[mask == 1] = -float('inf')
self.register_buffer('mask', mask.unsqueeze(0))
def initmat_(self, mat, dim=0):
stdv = 1. / math.sqrt(mat.size(dim))
mat.data.uniform_(-stdv, stdv)
def forward(self, Q, K, V):
# Input dim : N x sizeSeq x dk
QK = torch.bmm(Q, K.transpose(-2, -1))
if self.relpos:
bsz = Q.size(0)
QP = Q.matmul(self.Krelpos)
# This trick with z fills QP's diagonal with zeros
QP = torch.cat((self.z.expand(bsz, -1, -1), QP), 2)
QK += QP.view(bsz, self.sizeSeq + 1, self.sizeSeq)[:, 1:, :]
A = self.softmax(QK / math.sqrt(K.size(-1)) + self.mask)
return torch.bmm(self.drop(A), V)
class MultiHeadAttention(nn.Module):
def __init__(self,
sizeSeq, # Size of a sequence
dropout, # Dropout parameter
dmodel, # Model's dimension
nheads, # Number of heads in the model
abspos): # Is positional information encoded in the input ?
super(MultiHeadAttention, self).__init__()
self.Wo = nn.Linear(dmodel, dmodel, bias=False)
self.Wk = nn.Linear(dmodel, dmodel, bias=False)
self.Wq = nn.Linear(dmodel, dmodel, bias=False)
self.Wv = nn.Linear(dmodel, dmodel, bias=False)
self.nheads = nheads
self.dk = dmodel // nheads
self.Att = ScaledDotProductAttention(sizeSeq, self.dk,
dropout, not abspos)
def trans_(self, x):
bsz, bptt, h, dk = x.size(0), x.size(1), self.nheads, self.dk
return x.view(bsz, bptt, h, dk).transpose(1, 2).contiguous().view(bsz * h, bptt, dk)
def reverse_trans_(self, x):
bsz, bptt, h, dk = x.size(
0) // self.nheads, x.size(1), self.nheads, self.dk
return x.view(bsz, h, bptt, dk).transpose(1, 2).contiguous().view(bsz, bptt, h * dk)
def forward(self, Q, K, V):
q = self.trans_(self.Wq(Q))
k = self.trans_(self.Wk(K))
v = self.trans_(self.Wv(V))
y = self.reverse_trans_(self.Att(q, k, v))
return self.Wo(y)
class FFNetwork(nn.Module):
def __init__(self, din, dout, dff, dropout):
super(FFNetwork, self).__init__()
self.lin1 = nn.Linear(din, dff, bias=True)
self.lin2 = nn.Linear(dff, dout, bias=True)
self.relu = nn.ReLU()
self.drop = nn.Dropout(dropout)
def forward(self, x):
return self.lin2(self.drop(self.relu(self.lin1(x))))
class TransformerLayer(nn.Module):
def __init__(self, sizeSeq=32, dmodel=512, dff=2048,
dropout=0.1, nheads=8,
abspos=False):
super(TransformerLayer, self).__init__()
self.multihead = MultiHeadAttention(sizeSeq, dropout,
dmodel, nheads, abspos)
self.ln_multihead = nn.LayerNorm(dmodel)
self.ffnetwork = FFNetwork(dmodel, dmodel, dff, dropout)
self.ln_ffnetwork = nn.LayerNorm(dmodel)
def forward(self, x):
y = self.ln_multihead(x + self.multihead(Q=x, K=x, V=x))
return self.ln_ffnetwork(y + self.ffnetwork(y))
class StaticPositionEmbedding(nn.Module):
def __init__(self, seqlen, dmodel):
super(StaticPositionEmbedding, self).__init__()
pos = torch.arange(0., seqlen).unsqueeze(1).repeat(1, dmodel)
dim = torch.arange(0., dmodel).unsqueeze(0).repeat(seqlen, 1)
div = torch.exp(- math.log(10000) * (2*(dim//2)/dmodel))
pos *= div
pos[:, 0::2] = torch.sin(pos[:, 0::2])
pos[:, 1::2] = torch.cos(pos[:, 1::2])
self.register_buffer('pe', pos.unsqueeze(0))
def forward(self, x):
return x + self.pe[:, :x.size(1), :]
def buildTransformerAR(dimEncoded, # Output dimension of the encoder
nLayers, # Number of transformer layers
sizeSeq, # Expected size of the input sequence
abspos):
layerSequence = []
if abspos:
layerSequence += [StaticPositionEmbedding(sizeSeq, dimEncoded)]
layerSequence += [TransformerLayer(sizeSeq=sizeSeq,
dmodel=dimEncoded, abspos=abspos)
for i in range(nLayers)]
return nn.Sequential(*layerSequence) | zerospeech-libriabx2 | /zerospeech-libriabx2-0.9.8.tar.gz/zerospeech-libriabx2-0.9.8/zrc_abx2/cpc/transformers.py | transformers.py |
import torch.nn as nn
import torch.nn.functional as F
import torchaudio
import torch
###########################################
# Networks
###########################################
class IDModule(nn.Module):
def __init__(self, *args, **kwargs):
super(IDModule, self).__init__()
def forward(self, x):
return x
class ChannelNorm(nn.Module):
def __init__(self,
numFeatures,
epsilon=1e-05,
affine=True):
super(ChannelNorm, self).__init__()
if affine:
self.weight = nn.parameter.Parameter(torch.Tensor(1,
numFeatures, 1))
self.bias = nn.parameter.Parameter(torch.Tensor(1, numFeatures, 1))
else:
self.weight = None
self.bias = None
self.epsilon = epsilon
self.p = 0
self.affine = affine
self.reset_parameters()
def reset_parameters(self):
if self.affine:
torch.nn.init.ones_(self.weight)
torch.nn.init.zeros_(self.bias)
def forward(self, x):
cumMean = x.mean(dim=1, keepdim=True)
cumVar = x.var(dim=1, keepdim=True)
x = (x - cumMean)*torch.rsqrt(cumVar + self.epsilon)
if self.weight is not None:
x = x * self.weight + self.bias
return x
class CPCEncoder(nn.Module):
def __init__(self,
sizeHidden=512,
normMode="layerNorm"):
super(CPCEncoder, self).__init__()
validModes = ["batchNorm", "instanceNorm", "ID", "layerNorm"]
if normMode not in validModes:
raise ValueError(f"Norm mode must be in {validModes}")
if normMode == "instanceNorm":
def normLayer(x): return nn.InstanceNorm1d(x, affine=True)
elif normMode == "ID":
normLayer = IDModule
elif normMode == "layerNorm":
normLayer = ChannelNorm
else:
normLayer = nn.BatchNorm1d
self.dimEncoded = sizeHidden
self.conv0 = nn.Conv1d(1, sizeHidden, 10, stride=5, padding=3)
self.batchNorm0 = normLayer(sizeHidden)
self.conv1 = nn.Conv1d(sizeHidden, sizeHidden, 8, stride=4, padding=2)
self.batchNorm1 = normLayer(sizeHidden)
self.conv2 = nn.Conv1d(sizeHidden, sizeHidden, 4,
stride=2, padding=1)
self.batchNorm2 = normLayer(sizeHidden)
self.conv3 = nn.Conv1d(sizeHidden, sizeHidden, 4, stride=2, padding=1)
self.batchNorm3 = normLayer(sizeHidden)
self.conv4 = nn.Conv1d(sizeHidden, sizeHidden, 4, stride=2, padding=1)
self.batchNorm4 = normLayer(sizeHidden)
self.DOWNSAMPLING = 160
def getDimOutput(self):
return self.conv4.out_channels
def forward(self, x):
x = F.relu(self.batchNorm0(self.conv0(x)))
x = F.relu(self.batchNorm1(self.conv1(x)))
x = F.relu(self.batchNorm2(self.conv2(x)))
x = F.relu(self.batchNorm3(self.conv3(x)))
x = F.relu(self.batchNorm4(self.conv4(x)))
return x
class MFCCEncoder(nn.Module):
def __init__(self,
dimEncoded):
super(MFCCEncoder, self).__init__()
melkwargs = {"n_mels": max(128, dimEncoded), "n_fft": 321}
self.dimEncoded = dimEncoded
self.MFCC = torchaudio.transforms.MFCC(n_mfcc=dimEncoded,
melkwargs=melkwargs)
def forward(self, x):
x = x.view(x.size(0), -1)
x = self.MFCC(x)
return x.permute(0, 2, 1)
class LFBEnconder(nn.Module):
def __init__(self, dimEncoded, normalize=True):
super(LFBEnconder, self).__init__()
self.dimEncoded = dimEncoded
self.conv = nn.Conv1d(1, 2 * dimEncoded,
400, stride=1)
self.register_buffer('han', torch.hann_window(400).view(1, 1, 400))
self.instancenorm = nn.InstanceNorm1d(dimEncoded, momentum=1) \
if normalize else None
def forward(self, x):
N, C, L = x.size()
x = self.conv(x)
x = x.view(N, self.dimEncoded, 2, -1)
x = x[:, :, 0, :]**2 + x[:, :, 1, :]**2
x = x.view(N * self.dimEncoded, 1, -1)
x = torch.nn.functional.conv1d(x, self.han, bias=None,
stride=160, padding=350)
x = x.view(N, self.dimEncoded, -1)
x = torch.log(1 + torch.abs(x))
# Normalization
if self.instancenorm is not None:
x = self.instancenorm(x)
return x
class CPCAR(nn.Module):
def __init__(self,
dimEncoded,
dimOutput,
keepHidden,
nLevelsGRU,
mode="GRU",
reverse=False):
super(CPCAR, self).__init__()
self.RESIDUAL_STD = 0.1
if mode == "LSTM":
self.baseNet = nn.LSTM(dimEncoded, dimOutput,
num_layers=nLevelsGRU, batch_first=True)
elif mode == "RNN":
self.baseNet = nn.RNN(dimEncoded, dimOutput,
num_layers=nLevelsGRU, batch_first=True)
else:
self.baseNet = nn.GRU(dimEncoded, dimOutput,
num_layers=nLevelsGRU, batch_first=True)
self.hidden = None
self.keepHidden = keepHidden
self.reverse = reverse
def getDimOutput(self):
return self.baseNet.hidden_size
def forward(self, x):
if self.reverse:
x = torch.flip(x, [1])
try:
self.baseNet.flatten_parameters()
except RuntimeError:
pass
x, h = self.baseNet(x, self.hidden)
if self.keepHidden:
if isinstance(h, tuple):
self.hidden = tuple(x.detach() for x in h)
else:
self.hidden = h.detach()
# For better modularity, a sequence's order should be preserved
# by each module
if self.reverse:
x = torch.flip(x, [1])
return x
class NoAr(nn.Module):
def __init__(self, *args):
super(NoAr, self).__init__()
def forward(self, x):
return x
class BiDIRARTangled(nn.Module):
r"""
Research: bidirectionnal model for BERT training.
"""
def __init__(self,
dimEncoded,
dimOutput,
nLevelsGRU):
super(BiDIRARTangled, self).__init__()
assert(dimOutput % 2 == 0)
self.ARNet = nn.GRU(dimEncoded, dimOutput // 2,
num_layers=nLevelsGRU, batch_first=True,
bidirectional=True)
def getDimOutput(self):
return self.ARNet.hidden_size * 2
def forward(self, x):
self.ARNet.flatten_parameters()
xf, _ = self.ARNet(x)
return xf
class BiDIRAR(nn.Module):
r"""
Research: bidirectionnal model for BERT training.
"""
def __init__(self,
dimEncoded,
dimOutput,
nLevelsGRU):
super(BiDIRAR, self).__init__()
assert(dimOutput % 2 == 0)
self.netForward = nn.GRU(dimEncoded, dimOutput // 2,
num_layers=nLevelsGRU, batch_first=True)
self.netBackward = nn.GRU(dimEncoded, dimOutput // 2,
num_layers=nLevelsGRU, batch_first=True)
def getDimOutput(self):
return self.netForward.hidden_size * 2
def forward(self, x):
self.netForward.flatten_parameters()
self.netBackward.flatten_parameters()
xf, _ = self.netForward(x)
xb, _ = self.netBackward(torch.flip(x, [1]))
return torch.cat([xf, torch.flip(xb, [1])], dim=2)
###########################################
# Model
###########################################
class CPCModel(nn.Module):
def __init__(self,
encoder,
AR):
super(CPCModel, self).__init__()
self.gEncoder = encoder
self.gAR = AR
def forward(self, batchData, label):
encodedData = self.gEncoder(batchData).permute(0, 2, 1)
cFeature = self.gAR(encodedData)
return cFeature, encodedData, label
class ConcatenatedModel(nn.Module):
def __init__(self, model_list):
super(ConcatenatedModel, self).__init__()
self.models = torch.nn.ModuleList(model_list)
def forward(self, batchData, label):
outFeatures = []
outEncoded = []
for model in self.models:
cFeature, encodedData, label = model(batchData, label)
outFeatures.append(cFeature)
outEncoded.append(encodedData)
return torch.cat(outFeatures, dim=2), \
torch.cat(outEncoded, dim=2), label | zerospeech-libriabx2 | /zerospeech-libriabx2-0.9.8.tar.gz/zerospeech-libriabx2-0.9.8/zrc_abx2/cpc/model.py | model.py |
import argparse
def get_default_cpc_config():
parser = set_default_cpc_config(argparse.ArgumentParser())
return parser.parse_args([])
def set_default_cpc_config(parser):
# Run parameters
group = parser.add_argument_group('Architecture configuration',
description="The arguments defining the "
"model's architecture.")
group.add_argument('--hiddenEncoder', type=int, default=256,
help='Hidden dimension of the encoder network.')
group.add_argument('--hiddenGar', type=int, default=256,
help='Hidden dimension of the auto-regressive network')
group.add_argument('--nPredicts', type=int, default=12,
help='Number of steps to predict.')
group.add_argument('--negativeSamplingExt', type=int, default=128,
help='Number of negative samples to take.')
group.add_argument('--learningRate', type=float, default=2e-4)
group.add_argument('--schedulerStep', type=int, default=-1,
help='Step of the learning rate scheduler: at each '
'step the learning rate is divided by 2. Default: '
'no scheduler.')
group.add_argument('--schedulerRamp', type=int, default=None,
help='Enable a warm up phase for the learning rate: '
'adds a linear ramp of the given size.')
group.add_argument('--beta1', type=float, default=0.9,
help='Value of beta1 for the Adam optimizer')
group.add_argument('--beta2', type=float, default=0.999,
help='Value of beta2 for the Adam optimizer')
group.add_argument('--epsilon', type=float, default=1e-08,
help='Value of epsilon for the Adam optimizer')
group.add_argument('--sizeWindow', type=int, default=20480,
help='Number of frames to consider at each batch.')
group.add_argument('--nEpoch', type=int, default=200,
help='Number of epoch to run')
group.add_argument('--samplingType', type=str, default='samespeaker',
choices=['samespeaker', 'uniform',
'samesequence', 'sequential'],
help='How to sample the negative examples in the '
'CPC loss.')
group.add_argument('--nLevelsPhone', type=int, default=1,
help='(Supervised mode only). Number of layers in '
'the phone classification network.')
group.add_argument('--cpc_mode', type=str, default=None,
choices=['reverse', 'none'],
help='Some variations on CPC.')
group.add_argument('--encoder_type', type=str,
choices=['cpc', 'mfcc', 'lfb'],
default='cpc',
help='Replace the encoder network by mfcc features '
'or learned filter banks')
group.add_argument('--normMode', type=str, default='layerNorm',
choices=['instanceNorm', 'ID', 'layerNorm',
'batchNorm'],
help="Type of normalization to use in the encoder "
"network (default is layerNorm).")
group.add_argument('--onEncoder', action='store_true',
help="(Supervised mode only) Perform the "
"classification on the encoder's output.")
group.add_argument('--random_seed', type=int, default=None,
help="Set a specific random seed.")
group.add_argument('--speakerEmbedding', type=int, default=0,
help="(Depreciated) Feed the prediction network with "
"speaker embeddings along with the usual sequence.")
group.add_argument('--arMode', default='LSTM',
choices=['GRU', 'LSTM', 'RNN', 'no_ar', 'transformer'],
help="Architecture to use for the auto-regressive "
"network (default is lstm).")
group.add_argument('--nLevelsGRU', type=int, default=1,
help='Number of layers in the autoregressive network.')
group.add_argument('--rnnMode', type=str, default='transformer',
choices=['transformer', 'RNN', 'LSTM', 'linear',
'ffd', 'conv4', 'conv8', 'conv12'],
help="Architecture to use for the prediction network")
group.add_argument('--dropout', action='store_true',
help="Add a dropout layer at the output of the "
"prediction network.")
group.add_argument('--abspos', action='store_true',
help='If the prediction network is a transformer, '
'active to use absolute coordinates.')
return parser | zerospeech-libriabx2 | /zerospeech-libriabx2-0.9.8.tar.gz/zerospeech-libriabx2-0.9.8/zrc_abx2/cpc/cpc_default_config.py | cpc_default_config.py |
import torch
import torch.nn as nn
from .seq_alignment import collapseLabelChain
from .custom_layers import EqualizedLinear, EqualizedConv1d
class FFNetwork(nn.Module):
def __init__(self, din, dout, dff, dropout):
super(FFNetwork, self).__init__()
self.lin1 = EqualizedLinear(din, dff, bias=True, equalized=True)
self.lin2 = EqualizedLinear(dff, dout, bias=True, equalized=True)
self.relu = nn.ReLU()
self.drop = nn.Dropout(dropout)
def forward(self, x):
return self.lin2(self.drop(self.relu(self.lin1(x))))
class ShiftedConv(nn.Module):
def __init__(self, dimOutputAR, dimOutputEncoder, kernelSize):
super(ShiftedConv, self).__init__()
self.module = EqualizedConv1d(dimOutputAR, dimOutputEncoder,
kernelSize, equalized=True,
padding=0)
self.kernelSize = kernelSize
def forward(self, x):
# Input format: N, S, C -> need to move to N, C, S
N, S, C = x.size()
x = x.permute(0, 2, 1)
padding = torch.zeros(N, C, self.kernelSize - 1, device=x.device)
x = torch.cat([padding, x], dim=2)
x = self.module(x)
x = x.permute(0, 2, 1)
return x
class PredictionNetwork(nn.Module):
def __init__(self,
nPredicts,
dimOutputAR,
dimOutputEncoder,
rnnMode=None,
dropout=False,
sizeInputSeq=116):
super(PredictionNetwork, self).__init__()
self.predictors = nn.ModuleList()
self.RESIDUAL_STD = 0.01
self.dimOutputAR = dimOutputAR
self.dropout = nn.Dropout(p=0.5) if dropout else None
for i in range(nPredicts):
if rnnMode == 'RNN':
self.predictors.append(
nn.RNN(dimOutputAR, dimOutputEncoder))
self.predictors[-1].flatten_parameters()
elif rnnMode == 'LSTM':
self.predictors.append(
nn.LSTM(dimOutputAR, dimOutputEncoder, batch_first=True))
self.predictors[-1].flatten_parameters()
elif rnnMode == 'ffd':
self.predictors.append(
FFNetwork(dimOutputAR, dimOutputEncoder,
dimOutputEncoder, 0))
elif rnnMode == 'conv4':
self.predictors.append(
ShiftedConv(dimOutputAR, dimOutputEncoder, 4))
elif rnnMode == 'conv8':
self.predictors.append(
ShiftedConv(dimOutputAR, dimOutputEncoder, 8))
elif rnnMode == 'conv12':
self.predictors.append(
ShiftedConv(dimOutputAR, dimOutputEncoder, 12))
elif rnnMode == 'transformer':
from transformers import buildTransformerAR
self.predictors.append(
buildTransformerAR(dimOutputEncoder,
1,
sizeInputSeq,
False))
else:
self.predictors.append(
nn.Linear(dimOutputAR, dimOutputEncoder, bias=False))
if dimOutputEncoder > dimOutputAR:
residual = dimOutputEncoder - dimOutputAR
self.predictors[-1].weight.data.copy_(torch.cat([torch.randn(
dimOutputAR, dimOutputAR), self.RESIDUAL_STD * torch.randn(residual, dimOutputAR)], dim=0))
def forward(self, c, candidates):
assert(len(candidates) == len(self.predictors))
out = []
# UGLY
if isinstance(self.predictors[0], EqualizedConv1d):
c = c.permute(0, 2, 1)
for k in range(len(self.predictors)):
locC = self.predictors[k](c)
if isinstance(locC, tuple):
locC = locC[0]
if isinstance(self.predictors[k], EqualizedConv1d):
locC = locC.permute(0, 2, 1)
if self.dropout is not None:
locC = self.dropout(locC)
locC = locC.view(locC.size(0), 1, locC.size(1), locC.size(2))
outK = (locC*candidates[k]).mean(dim=3)
out.append(outK)
return out
class BaseCriterion(nn.Module):
def warmUp(self):
return False
def update(self):
return
class NoneCriterion(BaseCriterion):
def __init__(self):
super(NoneCriterion, self).__init__()
def forward(self, cFeature, encodedData, label):
return torch.zeros(1, 1, device=cFeature.device), \
torch.zeros(1, 1, device=cFeature.device)
class CPCUnsupersivedCriterion(BaseCriterion):
def __init__(self,
nPredicts, # Number of steps
dimOutputAR, # Dimension of G_ar
dimOutputEncoder, # Dimension of the convolutional net
negativeSamplingExt, # Number of negative samples to draw
mode=None,
rnnMode=False,
dropout=False,
speakerEmbedding=0,
nSpeakers=0,
sizeInputSeq=128):
super(CPCUnsupersivedCriterion, self).__init__()
if speakerEmbedding > 0:
print(
f"Using {speakerEmbedding} speaker embeddings for {nSpeakers} speakers")
self.speakerEmb = torch.nn.Embedding(nSpeakers, speakerEmbedding)
dimOutputAR += speakerEmbedding
else:
self.speakerEmb = None
self.wPrediction = PredictionNetwork(
nPredicts, dimOutputAR, dimOutputEncoder, rnnMode=rnnMode,
dropout=dropout, sizeInputSeq=sizeInputSeq - nPredicts)
self.nPredicts = nPredicts
self.negativeSamplingExt = negativeSamplingExt
self.lossCriterion = nn.CrossEntropyLoss()
if mode not in [None, "reverse"]:
raise ValueError("Invalid mode")
self.mode = mode
def sampleClean(self, encodedData, windowSize):
batchSize, nNegativeExt, dimEncoded = encodedData.size()
outputs = []
negExt = encodedData.contiguous().view(-1, dimEncoded)
# Draw nNegativeExt * batchSize negative samples anywhere in the batch
batchIdx = torch.randint(low=0, high=batchSize,
size=(self.negativeSamplingExt
* windowSize * batchSize, ),
device=encodedData.device)
seqIdx = torch.randint(low=1, high=nNegativeExt,
size=(self.negativeSamplingExt
* windowSize * batchSize, ),
device=encodedData.device)
baseIdx = torch.arange(0, windowSize, device=encodedData.device)
baseIdx = baseIdx.view(1, 1,
windowSize).expand(1,
self.negativeSamplingExt,
windowSize).expand(batchSize, self.negativeSamplingExt, windowSize)
seqIdx += baseIdx.contiguous().view(-1)
seqIdx = torch.remainder(seqIdx, nNegativeExt)
extIdx = seqIdx + batchIdx * nNegativeExt
negExt = negExt[extIdx].view(batchSize, self.negativeSamplingExt,
windowSize, dimEncoded)
labelLoss = torch.zeros((batchSize * windowSize),
dtype=torch.long,
device=encodedData.device)
for k in range(1, self.nPredicts + 1):
# Positive samples
if k < self.nPredicts:
posSeq = encodedData[:, k:-(self.nPredicts-k)]
else:
posSeq = encodedData[:, k:]
posSeq = posSeq.view(batchSize, 1, posSeq.size(1), dimEncoded)
fullSeq = torch.cat((posSeq, negExt), dim=1)
outputs.append(fullSeq)
return outputs, labelLoss
def getInnerLoss(self):
return "orthoLoss", self.orthoLoss * self.wPrediction.orthoCriterion()
def forward(self, cFeature, encodedData, label):
if self.mode == "reverse":
encodedData = torch.flip(encodedData, [1])
cFeature = torch.flip(cFeature, [1])
batchSize, seqSize, dimAR = cFeature.size()
windowSize = seqSize - self.nPredicts
cFeature = cFeature[:, :windowSize]
sampledData, labelLoss = self.sampleClean(encodedData, windowSize)
if self.speakerEmb is not None:
l_ = label.view(batchSize, 1).expand(batchSize, windowSize)
embeddedSpeaker = self.speakerEmb(l_)
cFeature = torch.cat([cFeature, embeddedSpeaker], dim=2)
predictions = self.wPrediction(cFeature, sampledData)
outLosses = [0 for x in range(self.nPredicts)]
outAcc = [0 for x in range(self.nPredicts)]
for k, locPreds in enumerate(predictions[:self.nPredicts]):
locPreds = locPreds.permute(0, 2, 1)
locPreds = locPreds.contiguous().view(-1, locPreds.size(2))
lossK = self.lossCriterion(locPreds, labelLoss)
outLosses[k] += lossK.view(1, -1)
_, predsIndex = locPreds.max(1)
outAcc[k] += torch.sum(predsIndex == labelLoss).float().view(1, -1)
return torch.cat(outLosses, dim=1), \
torch.cat(outAcc, dim=1) / (windowSize * batchSize)
class SpeakerCriterion(BaseCriterion):
def __init__(self, dimEncoder, nSpeakers):
super(SpeakerCriterion, self).__init__()
self.linearSpeakerClassifier = nn.Linear(
dimEncoder, nSpeakers)
self.lossCriterion = nn.CrossEntropyLoss()
self.entropyCriterion = nn.LogSoftmax(dim=1)
def forward(self, cFeature, otherEncoded, label):
# cFeature.size() : batchSize x seq Size x hidden size
batchSize = cFeature.size(0)
cFeature = cFeature[:, -1, :]
cFeature = cFeature.view(batchSize, -1)
predictions = self.linearSpeakerClassifier(cFeature)
loss = self.lossCriterion(predictions, label).view(1, -1)
acc = (predictions.max(1)[1] == label).double().mean().view(1, -1)
return loss, acc
class PhoneCriterion(BaseCriterion):
def __init__(self, dimEncoder, nPhones, onEncoder,
nLayers=1):
super(PhoneCriterion, self).__init__()
if nLayers == 1:
self.PhoneCriterionClassifier = nn.Linear(dimEncoder, nPhones)
else:
outLayers = [nn.Linear(dimEncoder, nPhones)]
for l in range(nLayers - 1):
outLayers.append(nn.ReLU())
outLayers.append(nn.Linear(nPhones, nPhones))
self.PhoneCriterionClassifier = nn.Sequential(*outLayers)
self.lossCriterion = nn.CrossEntropyLoss()
self.onEncoder = onEncoder
def forward(self, cFeature, otherEncoded, label):
# cFeature.size() : batchSize x seq Size x hidden size
if self.onEncoder:
predictions = self.getPrediction(otherEncoded)
else:
predictions = self.getPrediction(cFeature)
predictions = predictions.view(-1, predictions.size(2))
label = label.view(-1)
loss = self.lossCriterion(predictions, label).view(1, -1)
acc = (predictions.max(1)[1] == label).double().mean().view(1, -1)
return loss, acc
def getPrediction(self, cFeature):
batchSize, seqSize = cFeature.size(0), cFeature.size(1)
cFeature = cFeature.contiguous().view(batchSize * seqSize, -1)
output = self.PhoneCriterionClassifier(cFeature)
return output.view(batchSize, seqSize, -1)
class CTCPhoneCriterion(BaseCriterion):
def __init__(self, dimEncoder, nPhones, onEncoder):
super(CTCPhoneCriterion, self).__init__()
self.PhoneCriterionClassifier = nn.Linear(dimEncoder, nPhones + 1)
self.lossCriterion = nn.CTCLoss(blank=nPhones, zero_infinity=True)
self.onEncoder = onEncoder
if onEncoder:
raise ValueError("On encoder version not implemented yet")
self.BLANK_LABEL = nPhones
def getPrediction(self, cFeature):
B, S, H = cFeature.size()
cFeature = cFeature.contiguous().view(B*S, H)
return self.PhoneCriterionClassifier(cFeature).view(B, S, -1)
def forward(self, cFeature, otherEncoded, label):
# cFeature.size() : batchSize x seq Size x hidden size
B, S, H = cFeature.size()
predictions = self.getPrediction(cFeature)
label = label.to(predictions.device)
label, sizeLabels = collapseLabelChain(label)
avgPER = 0.
predictions = torch.nn.functional.log_softmax(predictions, dim=2)
predictions = predictions.permute(1, 0, 2)
targetSizePred = torch.ones(B, dtype=torch.int64,
device=predictions.device) * S
loss = self.lossCriterion(predictions, label,
targetSizePred, sizeLabels).view(1, -1)
return loss, avgPER * torch.ones(1, 1, device=loss.device)
class ModelCriterionCombined(torch.nn.Module):
def __init__(self, model, criterion):
super(ModelCriterionCombined, self).__init__()
self.model = model
self.criterion = criterion
def forward(self, data, label):
c_feature, encoded_data, label = self.model(data, label)
loss, acc = self.criterion(c_feature, encoded_data, label)
return loss, acc | zerospeech-libriabx2 | /zerospeech-libriabx2-0.9.8.tar.gz/zerospeech-libriabx2-0.9.8/zrc_abx2/cpc/criterion/criterion.py | criterion.py |
import torch
from multiprocessing import Lock, Manager, Process
from copy import deepcopy
def beam_search(score_preds, nKeep, blankLabel):
T, P = score_preds.shape
beams = set([''])
pb_t_1 = {"": 1}
pnb_t_1 = {"": 0}
def getLastNumber(b):
return int(b.split(',')[-1])
for t in range(T):
nextBeams = set()
pb_t = {}
pnb_t = {}
for i_beam, b in enumerate(beams):
if b not in pb_t:
pb_t[b] = 0
pnb_t[b] = 0
if len(b) > 0:
pnb_t[b] += pnb_t_1[b] * score_preds[t, getLastNumber(b)]
pb_t[b] = (pnb_t_1[b] + pb_t_1[b]) * score_preds[t, blankLabel]
nextBeams.add(b)
for c in range(P):
if c == blankLabel:
continue
b_ = b + "," + str(c)
if b_ not in pb_t:
pb_t[b_] = 0
pnb_t[b_] = 0
if b != "" and getLastNumber(b) == c:
pnb_t[b_] += pb_t_1[b] * score_preds[t, c]
else:
pnb_t[b_] += (pb_t_1[b] + pnb_t_1[b]) * score_preds[t, c]
nextBeams.add(b_)
allPreds = [(pb_t[b] + pnb_t[b], b) for b in nextBeams]
allPreds.sort(reverse=True)
beams = [x[1] for x in allPreds[:nKeep]]
pb_t_1 = deepcopy(pb_t)
pnb_t_1 = deepcopy(pnb_t)
output = []
for score, x in allPreds[:nKeep]:
output.append((score, [int(y) for y in x.split(',') if len(y) > 0]))
return output
def collapseLabelChain(inputLabels):
# Shape N,T
N, T = inputLabels.size()
outSizes = torch.zeros(N, device=inputLabels.device, dtype=torch.int64)
output = []
for l in range(N):
status = inputLabels[l, :-1] - inputLabels[l, 1:]
status = torch.cat([torch.ones(1, device=status.device,
dtype=status.dtype),
status], dim=0)
outSizes[l] = (status != 0).sum()
output.append(inputLabels[l][status != 0])
maxSize = int(outSizes.max().item())
paddedOutput = torch.zeros(N, maxSize,
device=inputLabels.device,
dtype=torch.int64)
for l in range(N):
S = int(outSizes[l])
paddedOutput[l, :S] = output[l]
return paddedOutput, outSizes
def NeedlemanWunschAlignScore(seq1, seq2, d, m, r, normalize=True):
N1, N2 = len(seq1), len(seq2)
# Fill up the errors
tmpRes_ = [[None for x in range(N2 + 1)] for y in range(N1 + 1)]
for i in range(N1 + 1):
tmpRes_[i][0] = i * d
for j in range(N2 + 1):
tmpRes_[0][j] = j * d
for i in range(N1):
for j in range(N2):
match = r if seq1[i] == seq2[j] else m
v1 = tmpRes_[i][j] + match
v2 = tmpRes_[i + 1][j] + d
v3 = tmpRes_[i][j + 1] + d
tmpRes_[i + 1][j + 1] = max(v1, max(v2, v3))
i = j = 0
res = -tmpRes_[N1][N2]
if normalize:
res /= float(N1)
return res
def get_seq_PER(seqLabels, detectedLabels):
return NeedlemanWunschAlignScore(seqLabels, detectedLabels, -1, -1, 0,
normalize=True)
def getPER(dataLoader, featureMaker, blankLabel):
out = 0
n_items = 0
n_keep_beam_search = 100
for index, data in enumerate(dataLoader):
with torch.no_grad():
output = featureMaker(data).cpu().numpy()
labels = data[1]
labels, targetSize = collapseLabelChain(labels)
lock = Lock()
def per(rank, outScore):
S = int(targetSize[rank])
seqLabels = labels[rank, :S]
preds = beam_search(output[rank],
n_keep_beam_search, blankLabel)[0][1]
value = get_seq_PER(seqLabels, preds)
with lock:
outScore.value += value
manager = Manager()
outScore = manager.Value('f', 0.)
N, S, D = output.shape
processes = []
for rank in range(N):
p = Process(
target=per, args=(rank, outScore))
p.start()
processes.append(p)
for p in processes:
p.join()
out += outScore.value
n_items += N
return (out / n_items) | zerospeech-libriabx2 | /zerospeech-libriabx2-0.9.8.tar.gz/zerospeech-libriabx2-0.9.8/zrc_abx2/cpc/criterion/seq_alignment.py | seq_alignment.py |
import math
import torch.nn as nn
from numpy import prod
class NormalizationLayer(nn.Module):
def __init__(self):
super(NormalizationLayer, self).__init__()
def forward(self, x, epsilon=1e-8):
return x * (((x**2).mean(dim=1, keepdim=True) + epsilon).rsqrt())
def Upscale2d(x, factor=2):
assert isinstance(factor, int) and factor >= 1
if factor == 1:
return x
s = x.size()
x = x.view(-1, s[1], s[2], 1, s[3], 1)
x = x.expand(-1, s[1], s[2], factor, s[3], factor)
x = x.contiguous().view(-1, s[1], s[2] * factor, s[3] * factor)
return x
def getLayerNormalizationFactor(x):
r"""
Get He's constant for the given layer
https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf
"""
size = x.weight.size()
fan_in = prod(size[1:])
return math.sqrt(2.0 / fan_in)
class ConstrainedLayer(nn.Module):
r"""
A handy refactor that allows the user to:
- initialize one layer's bias to zero
- apply He's initialization at runtime
"""
def __init__(self,
module,
equalized=True,
lrMul=1.0,
initBiasToZero=True):
r"""
equalized (bool): if true, the layer's weight should evolve within
the range (-1, 1)
initBiasToZero (bool): if true, bias will be initialized to zero
"""
super(ConstrainedLayer, self).__init__()
self.module = module
self.equalized = equalized
if initBiasToZero and module.bias is not None:
self.module.bias.data.fill_(0)
if self.equalized:
self.module.weight.data.normal_(0, 1)
self.weight = getLayerNormalizationFactor(self.module) * lrMul
def forward(self, x):
x = self.module(x)
if self.equalized:
x *= self.weight
return x
class EqualizedConv1d(ConstrainedLayer):
def __init__(self,
nChannelsPrevious,
nChannels,
kernelSize,
padding=0,
bias=True,
stride=1,
**kwargs):
r"""
A nn.Conv2d module with specific constraints
Args:
nChannelsPrevious (int): number of channels in the previous layer
nChannels (int): number of channels of the current layer
kernelSize (int): size of the convolutional kernel
padding (int): convolution's padding
bias (bool): with bias ?
"""
ConstrainedLayer.__init__(self,
nn.Conv1d(nChannelsPrevious, nChannels,
kernelSize, padding=padding,
bias=bias, stride=stride),
**kwargs)
class EqualizedConv2d(ConstrainedLayer):
def __init__(self,
nChannelsPrevious,
nChannels,
kernelSize,
padding=0,
bias=True,
**kwargs):
r"""
A nn.Conv2d module with specific constraints
Args:
nChannelsPrevious (int): number of channels in the previous layer
nChannels (int): number of channels of the current layer
kernelSize (int): size of the convolutional kernel
padding (int): convolution's padding
bias (bool): with bias ?
"""
ConstrainedLayer.__init__(self,
nn.Conv2d(nChannelsPrevious, nChannels,
kernelSize, padding=padding,
bias=bias),
**kwargs)
class EqualizedLinear(ConstrainedLayer):
def __init__(self,
nChannelsPrevious,
nChannels,
bias=True,
**kwargs):
r"""
A nn.Linear module with specific constraints
Args:
nChannelsPrevious (int): number of channels in the previous layer
nChannels (int): number of channels of the current layer
bias (bool): with bias ?
"""
ConstrainedLayer.__init__(self,
nn.Linear(nChannelsPrevious, nChannels,
bias=bias), **kwargs) | zerospeech-libriabx2 | /zerospeech-libriabx2-0.9.8.tar.gz/zerospeech-libriabx2-0.9.8/zrc_abx2/cpc/criterion/custom_layers.py | custom_layers.py |
import json
import numpy as np
import random
import torch
import sys
import psutil
from copy import deepcopy
from bisect import bisect_left
def untensor(d):
if isinstance(d, list):
return [untensor(v) for v in d]
if isinstance(d, dict):
return dict((k, untensor(v)) for k, v in d.items())
if hasattr(d, 'tolist'):
return d.tolist()
return d
def save_logs(data, pathLogs):
with open(pathLogs, 'w') as file:
json.dump(data, file, indent=2)
def update_logs(logs, logStep, prevlogs=None):
out = {}
for key in logs:
out[key] = deepcopy(logs[key])
if prevlogs is not None:
out[key] -= prevlogs[key]
out[key] /= logStep
return out
def show_logs(text, logs):
print("")
print('-'*50)
print(text)
for key in logs:
if key == "iter":
continue
nPredicts = logs[key].shape[0]
strSteps = ['Step'] + [str(s) for s in range(1, nPredicts + 1)]
formatCommand = ' '.join(['{:>16}' for x in range(nPredicts + 1)])
print(formatCommand.format(*strSteps))
strLog = [key] + ["{:10.6f}".format(s) for s in logs[key]]
print(formatCommand.format(*strLog))
print('-'*50)
def set_seed(seed):
random.seed(seed)
torch.manual_seed(seed)
np.random.seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
def cpu_stats():
print(sys.version)
print(psutil.cpu_percent())
print(psutil.virtual_memory())
def ramp_scheduling_function(n_epoch_ramp, epoch):
if epoch >= n_epoch_ramp:
return 1
else:
return (epoch + 1) / n_epoch_ramp
class SchedulerCombiner:
r"""
An object which applies a list of learning rate schedulers sequentially.
"""
def __init__(self, scheduler_list, activation_step, curr_step=0):
r"""
Args:
- scheduler_list (list): a list of learning rate schedulers
- activation_step (list): a list of int. activation_step[i]
indicates at which step scheduler_list[i] should be activated
- curr_step (int): the starting step. Must be lower than
activation_step[0]
"""
if len(scheduler_list) != len(activation_step):
raise ValueError("The number of scheduler must be the same as "
"the number of activation step")
if activation_step[0] > curr_step:
raise ValueError("The first activation step cannot be higher than "
"the current step.")
self.scheduler_list = scheduler_list
self.activation_step = deepcopy(activation_step)
self.curr_step = curr_step
def step(self):
self.curr_step += 1
index = bisect_left(self.activation_step, self.curr_step) - 1
for i in reversed(range(index, len(self.scheduler_list))):
self.scheduler_list[i].step()
def __str__(self):
out = "SchedulerCombiner \n"
out += "(\n"
for index, scheduler in enumerate(self.scheduler_list):
out += f"({index}) {scheduler.__str__()} \n"
out += ")\n"
return out | zerospeech-libriabx2 | /zerospeech-libriabx2-0.9.8.tar.gz/zerospeech-libriabx2-0.9.8/zrc_abx2/cpc/utils/misc.py | misc.py |
import math
from typing import Callable, Union
import numpy as np
import torch
import libri_light_dtw as dtw
from .ABXIterators.abx_iterators_anycontext import *
from .ABXIterators.abx_iterators import *
from .models import Pooling
def get_distance_function_from_name(name_str: str):
if name_str in ('euclidian', 'euclidean'):
return get_euclidian_distance_batch
if name_str == 'cosine':
return get_cosine_distance_batch
if name_str == 'kl':
return get_kl_distance_batch
if name_str == 'kl_symmetric':
return get_kl_distance_symmetric_batch
raise ValueError(f"Invalid distance mode")
def check_dtw_group_validity(a, b, x):
assert(len(a.size()) == len(b.size()))
assert(len(a.size()) == len(x.size()))
assert(a.size(2) == x.size(2))
assert(a.size(2) == b.size(2))
def get_kl_distance_batch(a1: torch.Tensor, a2: torch.Tensor, epsilon=1e-6):
N1, S1, D = a1.size() # Batch x Seq x Channel
N2, S2, D = a2.size() # Batch x Seq x Channel
# (P * (P / Q).log()).sum()
div = (a1.view(N1, 1, S1, 1, D) + epsilon) / (a2.view(1, N2, 1, S2, D) + epsilon)
prod: torch.Tensor = (a1.view(N1, 1, S1, 1, D)) * div.log()
return prod.sum(dim=4)
def get_kl_distance_symmetric_batch(a1: torch.Tensor, a2: torch.Tensor, epsilon=1e-6):
N1, S1, D = a1.size()
N2, S2, D = a2.size()
div1 = (a1.view(N1, 1, S1, 1, D) + epsilon) / (a2.view(1, N2, 1, S2, D) + epsilon)
div2 = (a2.view(1, N2, 1, S2, D) + epsilon) / (a1.view(N1, 1, S1, 1, D) + epsilon)
prod1 = (a1.view(N1, 1, S1, 1, D)) * div1.log()
prod2 = (a2.view(1, N2, 1, S2, D)) * div2.log()
r: torch.Tensor = (0.5*prod1 + 0.5*prod2).sum(dim=4)
return r
def get_cosine_distance_batch(a1: torch.Tensor, a2: torch.Tensor, epsilon=1e-6): # epsilon unused
r""" a1 and a2 must be normalized"""
N1, S1, D = a1.size() # Batch x Seq x Channel
N2, S2, D = a2.size() # Batch x Seq x Channel
prod = (a1.view(N1, 1, S1, 1, D)) * (a2.view(1, N2, 1, S2, D))
# Sum accross the channel dimension
prod = torch.clamp(prod.sum(dim=4), -1, 1).acos() / math.pi
return prod
def get_euclidian_distance_batch(a1: torch.Tensor, a2: torch.Tensor, epsilon=1e-6): # epsilon unused
N1, S1, D = a1.size()
N2, S2, D = a2.size()
diff = a1.view(N1, 1, S1, 1, D) - a2.view(1, N2, 1, S2, D)
return torch.sqrt((diff**2).sum(dim=4))
def get_distance_group_dtw(a1: torch.Tensor, a2: torch.Tensor, size1: torch.Tensor, size2: torch.Tensor,
pooling: Pooling, ignore_diag=False, symmetric=False,
distance_function = get_cosine_distance_batch) -> torch.Tensor:
N1, S1, D = a1.size()
N2, S2, D = a2.size()
if size1.size(0) != N1:
print(a1.size(), size1.size())
print(a2.size(), size2.size())
assert(size1.size(0) == N1)
assert(size2.size(0) == N2)
distance_mat: np.ndarray = distance_function(a1, a2).detach().cpu().numpy()
return dtw.dtw_batch(a1, a2, size1, size2,
distance_mat,
ignore_diag, symmetric)
# TODO: We call dtw even when we have pooled and no realignment is needed.
# This should be harmless, but it is unnecessary.
# We should do something like (but add code to check that always the right
# dimensions are returned):
# if pooling == Pooling.NONE:
# return dtw.dtw_batch(a1, a2, size1, size2,
# distance_mat,
# ignore_diag, symmetric)
#Recast and reshape to what we would get from the dtw function if given pooled input
#return torch.from_numpy(distance_mat.squeeze((1,2))) # Fix dims!
def get_theta_group_dtw(a: torch.Tensor, b: torch.Tensor, x: torch.Tensor,
sa: torch.Tensor, sb: torch.Tensor, sx: torch.Tensor,
distance_function: Callable[..., torch.Tensor],
symmetric: bool, pooling: Pooling):
check_dtw_group_validity(a, b, x)
dxb = get_distance_group_dtw(
x, b, sx, sb, pooling, distance_function=distance_function)
dxa = get_distance_group_dtw(x, a, sx, sa, pooling,
ignore_diag=symmetric, symmetric=symmetric,
distance_function=distance_function)
Nx, Na = dxa.size()
Nx, Nb = dxb.size()
if symmetric:
n_pos = Na * (Na - 1)
max_val = dxb.max().item()
for i in range(Na):
dxa[i, i] = max_val + 1
else:
n_pos = Na * Nx
dxb = dxb.view(Nx, 1, Nb).expand(Nx, Na, Nb)
dxa = dxa.view(Nx, Na, 1).expand(Nx, Na, Nb)
sc = (dxa < dxb).sum() + 0.5 * (dxa == dxb).sum()
sc /= (n_pos * Nb)
return sc.item()
def loc_dtw(data,
distance_function: Callable[..., torch.Tensor],
symmetric: bool, pooling: Pooling):
coords, group_a, group_b, group_x = data
group_a_data, group_a_size = group_a
group_b_data, group_b_size = group_b
group_x_data, group_x_size = group_x
theta = get_theta_group_dtw(group_a_data,
group_b_data,
group_x_data,
group_a_size,
group_b_size,
group_x_size,
distance_function,
symmetric,
pooling)
return (coords, 1 - theta)
# TODO: maybe define a protocol for group_iterator (it can be
# 4 different classes right now)
def get_abx_scores_dtw_on_group(group_iterator: Union[ABXWithinGroupIterator, ABXAcrossGroupIterator, ABXWithinGroupIteratorAnyContext, ABXAcrossGroupIteratorAnyContext],
distance_function: Callable[..., torch.Tensor],
symmetric: bool, pooling: Pooling):
data_list = []
coords_list = []
with torch.no_grad():
# See the iterator's def __iter__(self) for details
for _, group in enumerate(group_iterator):
coords, abx = loc_dtw(group, distance_function, symmetric, pooling)
data_list.append(abx)
coords_list.append(coords)
return torch.sparse.FloatTensor(torch.LongTensor(coords_list).t(),
torch.FloatTensor(data_list),
group_iterator.get_board_size()) | zerospeech-libriabx2 | /zerospeech-libriabx2-0.9.8.tar.gz/zerospeech-libriabx2-0.9.8/zrc_abx2/ABX_src/abx_group_computation.py | abx_group_computation.py |
from ..models import *
# ITEMFILE COLUMNS
FILEID_COL = 0
ONSET_COL = 1
OFFSET_COL = 2
PHONE_COL = 3
PREV_PHONE_COL = 4
NXT_PHONE_COL = 5
SPEAKER_COL = 6
COLUMN_COUNT = 7
# ITEMDATA_INDICES
# These are the indices for the constructed itemdata
# This assumes onset, offset, context_id, phone_id, speaker_id
ONSET_IDX = 0
OFFSET_IDX = 1
CONTEXT_IDX = 2
PHONE_IDX = 3
SPEAKER_IDX = 4
class ABXItemFileLoader:
def load_item_file(self, path_item_file: str) -> ItemFile:
r"""Load a .item file indicating the triplets for the ABX score. The
input file must have the following format:
line 0 : whatever (not read)
line > 0: #file_ID onset offset #phone prev-phone next-phone speaker
onset : begining of the triplet (in s)
onset : end of the triplet (in s)
Returns a tuple of files_data, context_match, phone_match, speaker_match where
files_data: dictionary whose key is the file id, and the value is the list of item tokens in that file, each item in turn
given as a list of onset, offset, context_id, phone_id, speaker_id.
context_match is a dictionary of the form { prev_phone_str+next_phone_str: context_id }.
phone_match is a dictionary of the form { phone_str: phone_id }.
speaker_match is a dictionary of the form { speaker_str: speaker_id }.
The id in each case is iterative (0, 1 ...)
"""
with open(path_item_file, "r") as file:
item_f_lines = file.readlines()[1:]
item_f_lines = [x.replace("\n", "") for x in item_f_lines]
# key: fileID, value: a list of items, each item in turn given as a list of
# onset, offset, context_id, phone_id, speaker_id (see below for the id constructions)
files_data: Dict[str, List[ItemData]] = {}
# Provide a phone_id for each phoneme type (a la B: 0, N: 1 ...)
phone_match: Dict[str, int] = {}
context_match: Dict[str, int] = {} # ... context_id ...
speaker_match: Dict[str, int] = {} # ... speaker_id ...
for line in item_f_lines:
items = line.split()
assert len(items) == COLUMN_COUNT # assumes 7-column files
fileID = items[FILEID_COL]
if fileID not in files_data:
files_data[fileID] = []
onset, offset = float(items[ONSET_COL]), float(items[OFFSET_COL])
phone = items[PHONE_COL]
speaker = items[SPEAKER_COL]
context = "+".join([items[PREV_PHONE_COL], items[NXT_PHONE_COL]])
if phone not in phone_match:
# We increment the id by 1 each time a new phoneme type is found
s = len(phone_match)
phone_match[phone] = s
phone_id = phone_match[phone]
if context not in context_match:
s = len(context_match)
context_match[context] = s
context_id = context_match[context]
if speaker not in speaker_match:
s = len(speaker_match)
speaker_match[speaker] = s
speaker_id = speaker_match[speaker]
files_data[fileID].append(
ItemData(onset, offset, context_id, phone_id, speaker_id)
)
return ItemFile(files_data, context_match, phone_match, speaker_match) | zerospeech-libriabx2 | /zerospeech-libriabx2-0.9.8.tar.gz/zerospeech-libriabx2-0.9.8/zrc_abx2/ABX_src/ABXDataset/abx_item_file_loader.py | abx_item_file_loader.py |
import math
from typing import Any, Callable, List, Tuple
import torch
import numpy as np
from typing_extensions import LiteralString
from zrc_abx2.ABX_src.ABXDataset.abx_feature_dataset import ABXFeatureDataset
from zrc_abx2.ABX_src.ABXDataset.abx_item_file_loader import *
from zrc_abx2.ABX_src.models import *
def normalize_with_singularity(x) -> torch.Tensor:
r"""
Normalize the given vector across the third dimension.
Extend all vectors by eps=1e-12 to put the null vector at the maximal
cosine distance from any non-null vector.
"""
S, H = x.size()
norm_x = (x**2).sum(dim=1, keepdim=True)
x /= torch.sqrt(norm_x)
zero_vals = (norm_x == 0).view(S)
x[zero_vals] = 1 / math.sqrt(H)
border_vect = torch.zeros((S, 1), dtype=x.dtype, device=x.device) + 1e-12
border_vect[zero_vals] = -2 * 1e12
return torch.cat([x, border_vect], dim=1)
class ABXFeatureLoader:
def __init__(
self,
pooling: Pooling,
path_item_file: str,
seqList: List[Tuple[str, LiteralString]],
feature_maker: Callable,
stepFeature: float,
normalize: bool,
):
"""
Args:
path_item_file (str): path to the .item (.pitem) files containing the ABX
triplets
seqList (list): list of files (fileID, path) where fileID refers to
the file's ID as used in (path_)item_file, and path
is the actual path to the input audio sequence
featureMaker (function): either a function or a callable object.
Takes a path as input and outputs the
feature sequence corresponding to the
given file.
normalize (bool): if True all input features will be noramlized
across the channels dimension.
Note:
You can use this dataset with pre-computed features. For example, if
you have a collection of features files in the torch .pt format then
you can just set featureMaker = torch.load.
"""
self.pooling = pooling
self.item_file = ABXItemFileLoader().load_item_file(path_item_file)
self.seqList = seqList
self.feature_maker = feature_maker
self.stepFeature = stepFeature
self.normalize = normalize
self.seqNorm = True
# INTERFACE
def loadFromFileData(self) -> ABXFeatureDataset:
return self._load_data(
self.pooling,
self.item_file.files_data,
self.seqList,
self.feature_maker,
self.normalize,
self.stepFeature,
self.item_file,
)
# PRIVATE METHODS
def _pool(self, feature: torch.Tensor, pooling: Pooling) -> torch.Tensor:
if pooling == Pooling.NONE:
return feature
elif pooling == Pooling.MEAN:
# vector avg. But keep the original shape.
# So e.g. if we had 4 frames with 51 feature dimensions [4,51],
# we will get back [1,51], not [51]
return feature.mean(dim=0, keepdim=True)
elif pooling == Pooling.HAMMING:
h: np.ndarray = np.hamming(feature.size(0))
np_f: np.ndarray = feature.detach().cpu().numpy()
# weight vec dot feature matrix: each row/frame gets its own
# hamming weight and all the rows are summed into a single
# vector. Then divide by sum of weights. Finally, reshape
# into original shape.
pooled: np.ndarray = (h.dot(np_f) / sum(h))[None, :]
return torch.from_numpy(pooled)
else:
raise ValueError("Invalid value for pooling.")
def _start_end_indices(
self,
phone_start: Any,
phone_end: Any,
all_features: torch.Tensor,
stepFeature: float,
) -> Tuple[int, int]:
index_start = max(0, int(math.ceil(stepFeature * phone_start - 0.5)))
index_end = int(
min(
all_features.size(0),
int(math.floor(stepFeature * phone_end - 0.5)),
)
)
return index_start, index_end
def _append_feature(
self,
index_start: int,
index_end: int,
totSize: int,
all_features: torch.Tensor,
context_id: int,
phone_id: int,
speaker_id: int,
data: List[torch.Tensor],
features_manifest: List[ManifestFeatureItem],
pooling: Pooling,
) -> int:
"""Build and append the feature to the features data list.
Add information on it to the manifest, i.e. to self.features.
Return the total size i.e. the total number of frames added to the data thus far."""
feature = all_features[index_start:index_end]
feature = self._pool(feature, pooling)
start_i = totSize
loc_size = feature.shape[0]
features_manifest.append(
ManifestFeatureItem(
start_i, loc_size, context_id, phone_id, speaker_id
)
)
data.append(feature)
return totSize + loc_size
def _load_data(
self,
pooling: Pooling,
files_data: Dict[str, List[ItemData]],
seqList: List[Tuple[str, LiteralString]],
feature_maker: Callable,
normalize: bool,
stepFeature: float,
item_file: ItemFile,
) -> ABXFeatureDataset:
# data[i] is the data for a given item.
# data contains all the item representations over all files
data: List[torch.Tensor] = []
# features_manifest[i]: index_start, size, context_id, phone_id,
# speaker_id. This is a manifest of what is in
# data_compressed (see below)
features_manifest: List[ManifestFeatureItem] = []
totSize = 0
print("Building the input features...")
for vals in seqList:
fileID, file_path = vals
if fileID not in files_data:
# file in submission not in the item file
continue
all_features: torch.Tensor = feature_maker(file_path)
if normalize:
all_features = normalize_with_singularity(all_features)
all_features = all_features.detach().cpu()
# The list of item tokens in a file as defined by the item file
# Each item is given as a list of onset, offset, context_id, phone_id, speaker_id
phone_data = files_data[fileID]
for item_data in phone_data:
index_start, index_end = self._start_end_indices(
item_data.onset,
item_data.offset,
all_features,
stepFeature,
)
if (
index_start >= all_features.size(0)
or index_end <= index_start
):
continue
totSize = self._append_feature(
index_start,
index_end,
totSize,
all_features,
item_data.context_id,
item_data.phone_id,
item_data.speaker_id,
data,
features_manifest,
pooling,
)
print("...done")
# A list of all the frames representations over all the items
# such that first we have the frame representations from the
# first item in data; then, all the frame representations from
# the second item in data get concatenated to this, etc.
data_compressed = torch.cat(data, dim=0)
feature_dim = data_compressed.size(1)
return ABXFeatureDataset(
data_compressed, features_manifest, feature_dim, item_file
) | zerospeech-libriabx2 | /zerospeech-libriabx2-0.9.8.tar.gz/zerospeech-libriabx2-0.9.8/zrc_abx2/ABX_src/ABXDataset/abx_feature_loader.py | abx_feature_loader.py |
# This is a modification of LibriLight ABX evaluation's abx_iterations.py.
#
# The original ABX takes the middle phone, its context (prev & next phones),
# and the speaker. It can run across- and within-speaker ABX, "within-context".
import random
from dataclasses import dataclass, field
from typing import Any, List, Tuple, Union
import torch
from ..ABXDataset.abx_feature_dataset import ABXFeatureDataset
from ..ABXDataset.abx_item_file_loader import *
from ..models import *
GROUP_PRIORITY_INDEX = 0
def get_features_group(
in_data: List[ManifestFeatureItem], index_order: List[int]
):
"""Returns: tuple[in_index, out_groups] where
in_index: list[int]. Contains indices for reordering a list of
ManifestFeatureItems according to the priority given in index_order.
E.g. ANY-CONTEXT: reorder by speaker, then phoneme; WITHIN-CONTEXT: reorder
by context, speaker, phoneme.
out_groups: list[Any]. Takes the indices in in_index and divides them in
groups into an n-dimensional matrix with the priority defined in index_order.
E.g. if index_order is [CONTEXT_IDX, SPEAKER_IDX, PHONE_IDX], then each
outermost 'row' of out_groups will delilmit a context, and each element in
that row will delimit a single speaker group. Finally, at the innermost level,
a tuple will mark the beginning and end indices for manifest items with the
same phoneme in that context and speaker group.
"""
in_index = list(range(len(in_data)))
# For instance, if index_order = [SPEAKER_IDX, PHONE_IDX]
# we get all the indexes for items from the first speaker first,
# ordered by phoneme; then all the indexes for items of the second speaker
# again ordered by phoneme, etc.
in_index.sort(key=lambda x: [in_data[x][i] for i in index_order])
out_groups = []
# E.g. might be [0, 0] in the any-context condition for the speaker_id,
# phoneme_id of the first item in the rearranged order
last_values = [in_data[in_index[0]][i] for i in index_order]
i_start = 0
curr_group = [[] for i in index_order]
n_orders = len(index_order) - 1
tmp = [in_data[i] for i in in_index]
for i_end, item in enumerate(tmp):
for order_index, order in enumerate(index_order):
if item[order] != last_values[order_index]:
curr_group[-1].append((i_start, i_end))
# This will run if there is a transition in one of the not-innermost
# (rightmost) levels. I.e if
# index_order=[CONTEXT_IDX, SPEAKER_IDX, PHONE_IDX], it will run if
# there is a transition context or speaker.
for i in range(n_orders, order_index, -1):
curr_group[i - 1].append(curr_group[i])
curr_group[i] = []
# reset curr_group when the outermost group changes
if order_index == GROUP_PRIORITY_INDEX:
out_groups += curr_group[0]
curr_group[0] = []
last_values = [item[i] for i in index_order]
i_start = i_end
break
if i_start < len(in_data):
curr_group[-1].append((i_start, len(in_data)))
for i in range(n_orders, 0, -1):
curr_group[i - 1].append(curr_group[i])
out_groups += curr_group[0]
return in_index, out_groups
@dataclass
class ABXIterator:
r"""
Base class building ABX's triplets.
"""
abxDataset: ABXFeatureDataset
max_size_group: Any # TODO: Type
symmetric: bool
reorder_priority: List[int]
context_type: ContextType
seed_n: int
len: int = 0
indices_items: List[int] = field(init=False)
# WITHIN CONTEXT CONDITION:
# context groups containing speaker groups
# WITHOUT CONTEXT CONDITION:
# speaker groups
indices_item_groups: List[Any] = field(init=False)
def __post_init__(self):
random.seed(self.seed_n)
self.indices_items, self.indices_item_groups = get_features_group(
self.abxDataset.features_manifest, self.reorder_priority
)
def get_group(
self, i_start, i_end
) -> Tuple[torch.Tensor, torch.Tensor, Tuple[int, ...]]:
data = []
max_size = 0
to_take = list(range(i_start, i_end))
if i_end - i_start > self.max_size_group:
to_take = random.sample(to_take, k=self.max_size_group)
loc_id: Tuple[int, ...] = ()
for i in to_take:
data_item = self.abxDataset[self.indices_items[i]]
loc_data = data_item.data
loc_size = data_item.out_size
loc_id = self._get_loc_id(data_item, self.context_type)
max_size = max(loc_size, max_size)
data.append(loc_data)
N = len(to_take)
out_data = torch.zeros(
N,
max_size,
self.abxDataset.feature_dim,
device=self.abxDataset.get_data_device(),
)
out_size = torch.zeros(
N, dtype=torch.long, device=self.abxDataset.get_data_device()
)
for i in range(N):
size = data[i].size(0)
out_data[i, :size] = data[i]
out_size[i] = size
return out_data, out_size, loc_id
def _get_loc_id(
self, data_item: ABXFeaturesDataItem, context_type: ContextType
) -> Union[Tuple[int, int, int], Tuple[int, int]]:
if context_type == ContextType.WITHIN:
return (
data_item.context_id,
data_item.phone_id,
data_item.speaker_id,
)
elif context_type == ContextType.ANY:
return (
data_item.phone_id,
data_item.speaker_id,
)
else:
raise ValueError("Invalid context type.")
def __len__(self):
return self.len
def get_board_size(self):
r"""
Get the output dimension of the triplet's space.
"""
pass
class ABXWithinGroupIterator(ABXIterator):
r"""
Iterator giving the triplets for the ABX within speaker score.
"""
def __init__(
self,
abxDataset: ABXFeatureDataset,
max_size_group,
reorder_priority: List[int],
context_type: ContextType,
seed_n: int,
):
super().__init__(
abxDataset=abxDataset,
max_size_group=max_size_group,
symmetric=True,
reorder_priority=reorder_priority,
context_type=context_type,
seed_n=seed_n,
)
for context_group in self.indices_item_groups: # always within context
for speaker_group in context_group:
if len(speaker_group) > 1:
for i_start, i_end in speaker_group:
if i_end - i_start > 1:
self.len += len(speaker_group) - 1
def __iter__(self):
for i_c, context_group in enumerate(self.indices_item_groups):
for i_s, speaker_group in enumerate(context_group):
n_phones = len(speaker_group)
if n_phones == 1:
continue
for i_a in range(n_phones):
i_start_a, i_end_a = self.indices_item_groups[i_c][i_s][i_a]
if i_end_a - i_start_a == 1:
continue
for i_b in range(n_phones):
if i_b == i_a:
continue
i_start_b, i_end_b = self.indices_item_groups[i_c][i_s][
i_b
]
data_b, size_b, id_b = self.get_group(i_start_b, i_end_b)
data_a, size_a, id_a = self.get_group(i_start_a, i_end_a)
out_coords = id_a[2], id_a[1], id_b[1], id_a[0]
yield out_coords, (data_a, size_a), (data_b, size_b), (
data_a,
size_a,
)
def get_board_size(self):
return (
self.abxDataset.get_n_speakers(),
self.abxDataset.get_n_phone(),
self.abxDataset.get_n_phone(),
self.abxDataset.get_n_context(),
)
class ABXAcrossGroupIterator(ABXIterator):
r"""
Iterator giving the triplets for the ABX across score.
"""
def __init__(
self,
abxDataset: ABXFeatureDataset,
max_size_group,
reorder_priority: List[int],
context_type: ContextType,
seed_n: int,
):
super().__init__(
abxDataset=abxDataset,
max_size_group=max_size_group,
symmetric=False,
reorder_priority=reorder_priority,
context_type=context_type,
seed_n=seed_n,
)
self.get_speakers_from_cp = {}
self.max_x = 5
for context_group in self.indices_item_groups:
for speaker_group in context_group:
for i_start, i_end in speaker_group:
c_id, p_id, s_id = self.abxDataset.get_ids(
self.indices_items[i_start]
)
if c_id not in self.get_speakers_from_cp:
self.get_speakers_from_cp[c_id] = {}
if p_id not in self.get_speakers_from_cp[c_id]:
self.get_speakers_from_cp[c_id][p_id] = {}
self.get_speakers_from_cp[c_id][p_id][s_id] = (i_start, i_end)
for context_group in self.indices_item_groups:
for speaker_group in context_group:
if len(speaker_group) > 1:
for i_start, i_end in speaker_group:
c_id, p_id, s_id = self.abxDataset.get_ids(
self.indices_items[i_start]
)
self.len += (len(speaker_group) - 1) * (
min(
self.max_x,
len(self.get_speakers_from_cp[c_id][p_id]) - 1,
)
)
def get_other_speakers_in_group(self, i_start_group):
c_id, p_id, s_id = self.abxDataset.get_ids(
self.indices_items[i_start_group]
)
return [
v
for k, v in self.get_speakers_from_cp[c_id][p_id].items()
if k != s_id
]
def get_abx_triplet(self, i_a, i_b, i_x):
i_start_a, i_end_a = i_a
data_a, size_a, id_a = self.get_group(i_start_a, i_end_a)
i_start_b, i_end_b = i_b
data_b, size_b, id_b = self.get_group(i_start_b, i_end_b)
i_start_x, i_end_x = i_x
data_x, size_x, id_x = self.get_group(i_start_x, i_end_x)
out_coords = id_a[2], id_a[1], id_b[1], id_a[0], id_x[2]
return out_coords, (data_a, size_a), (data_b, size_b), (data_x, size_x)
def __iter__(self):
for i_c, context_group in enumerate(self.indices_item_groups):
for i_s, speaker_group in enumerate(context_group):
n_phones = len(speaker_group)
if n_phones == 1:
continue
for i_a in range(n_phones):
i_start_a, i_end_a = self.indices_item_groups[i_c][i_s][i_a]
ref = self.get_other_speakers_in_group(i_start_a)
if len(ref) > self.max_x:
speakers_a = random.sample(ref, k=self.max_x)
else:
speakers_a = ref
for i_start_x, i_end_x in speakers_a:
for i_b in range(n_phones):
if i_b == i_a:
continue
i_start_b, i_end_b = self.indices_item_groups[i_c][
i_s
][i_b]
yield self.get_abx_triplet(
(i_start_a, i_end_a),
(i_start_b, i_end_b),
(i_start_x, i_end_x),
)
def get_board_size(self):
return (
self.abxDataset.get_n_speakers(),
self.abxDataset.get_n_phone(),
self.abxDataset.get_n_phone(),
self.abxDataset.get_n_context(),
self.abxDataset.get_n_speakers(),
) | zerospeech-libriabx2 | /zerospeech-libriabx2-0.9.8.tar.gz/zerospeech-libriabx2-0.9.8/zrc_abx2/ABX_src/ABXIterators/abx_iterators.py | abx_iterators.py |
from ..ABXDataset.abx_feature_dataset import ABXFeatureDataset
from ..ABXIterators.abx_iterators_anycontext import *
from ..ABXIterators.abx_iterators import *
from typing import Union
class IteratorFactory:
@classmethod
def get_iterator(
cls,
abxDataset: ABXFeatureDataset,
context_mode: str,
speaker_mode: str,
max_size_group: int,
seed_n: int,
) -> Union[
ABXWithinGroupIterator,
ABXAcrossGroupIterator,
ABXWithinGroupIteratorAnyContext,
ABXAcrossGroupIteratorAnyContext,
]:
if context_mode == "within":
retriever = cls.get_iterator_within_context
elif context_mode == "any":
retriever = cls.get_iterator_any_context
else:
raise ValueError(f"Invalid mode: {context_mode}")
return retriever(abxDataset, speaker_mode, max_size_group, seed_n)
@classmethod
def get_iterator_within_context(
cls,
abxDataset: ABXFeatureDataset,
speaker_mode: str,
max_size_group: int,
seed_n: int,
) -> Union[ABXWithinGroupIterator, ABXAcrossGroupIterator]:
if speaker_mode == "within":
return ABXWithinGroupIterator(
abxDataset=abxDataset,
max_size_group=max_size_group,
reorder_priority=[CONTEXT_IDX, SPEAKER_IDX, PHONE_IDX],
context_type=ContextType.WITHIN,
seed_n=seed_n,
)
if speaker_mode == "across":
return ABXAcrossGroupIterator(
abxDataset=abxDataset,
max_size_group=max_size_group,
reorder_priority=[CONTEXT_IDX, SPEAKER_IDX, PHONE_IDX],
context_type=ContextType.WITHIN,
seed_n=seed_n,
)
if speaker_mode == "any":
raise ValueError(f"Mode not yet supported: {speaker_mode}")
raise ValueError(f"Invalid mode: {speaker_mode}")
@classmethod
def get_iterator_any_context(
cls,
abxDataset: ABXFeatureDataset,
speaker_mode: str,
max_size_group: int,
seed_n: int,
) -> Union[
ABXWithinGroupIteratorAnyContext, ABXAcrossGroupIteratorAnyContext
]:
if speaker_mode == "within":
return ABXWithinGroupIteratorAnyContext(
abxDataset=abxDataset,
max_size_group=max_size_group,
reorder_priority=[SPEAKER_IDX, PHONE_IDX],
context_type=ContextType.ANY,
seed_n=seed_n,
)
if speaker_mode == "across":
return ABXAcrossGroupIteratorAnyContext(
abxDataset=abxDataset,
max_size_group=max_size_group,
reorder_priority=[SPEAKER_IDX, PHONE_IDX],
context_type=ContextType.ANY,
seed_n=seed_n,
)
raise ValueError(f"Invalid mode: {speaker_mode}") | zerospeech-libriabx2 | /zerospeech-libriabx2-0.9.8.tar.gz/zerospeech-libriabx2-0.9.8/zrc_abx2/ABX_src/ABXIterators/abx_iterator_factory.py | abx_iterator_factory.py |
# This version disregards the context parameters, i.e. to run
# "without-context".
# It supports across- and within-speaker ABX just like the original.
import random
from .abx_iterators import ABXIterator
from ..ABXDataset.abx_feature_dataset import ABXFeatureDataset
from ..models import *
## ITERATORS THAT IGNORE CONTEXT
class ABXWithinGroupIteratorAnyContext(ABXIterator):
r"""
Iterator giving the triplets for the ABX within speaker score.
"""
def __init__(
self,
abxDataset: ABXFeatureDataset,
max_size_group,
reorder_priority: List[int],
context_type: ContextType,
seed_n: int
):
super().__init__(
abxDataset=abxDataset,
max_size_group=max_size_group,
symmetric=True,
reorder_priority=reorder_priority,
context_type=context_type,
seed_n=seed_n
)
for speaker_group in self.indices_item_groups:
if len(speaker_group) > 1:
for i_start, i_end in speaker_group:
if i_end - i_start > 1:
self.len += len(speaker_group) - 1
def __iter__(self):
for i_s, speaker_group in enumerate(self.indices_item_groups):
n_phones = len(speaker_group)
if n_phones == 1:
continue
for i_a in range(n_phones):
i_start_a, i_end_a = self.indices_item_groups[i_s][i_a]
if i_end_a - i_start_a == 1:
continue
for i_b in range(n_phones):
if i_b == i_a:
continue
i_start_b, i_end_b = self.indices_item_groups[i_s][i_b]
data_b, size_b, id_b = self.get_group(i_start_b, i_end_b)
data_a, size_a, id_a = self.get_group(i_start_a, i_end_a)
out_coords = id_a[1], id_a[0], id_b[0]
yield out_coords, (data_a, size_a), (data_b, size_b), (
data_a,
size_a,
)
def get_board_size(self):
return (
self.abxDataset.get_n_speakers(),
self.abxDataset.get_n_phone(),
self.abxDataset.get_n_phone(),
)
class ABXAcrossGroupIteratorAnyContext(ABXIterator):
r"""
Iterator giving the triplets for the ABX across score.
"""
def __init__(
self,
abxDataset: ABXFeatureDataset,
max_size_group,
reorder_priority: List[int],
context_type: ContextType,
seed_n: int
):
super().__init__(
abxDataset=abxDataset,
max_size_group=max_size_group,
symmetric=False,
reorder_priority=reorder_priority,
context_type=context_type,
seed_n=seed_n
)
self.get_speakers_from_p = {}
self.max_x = 5
for speaker_group in self.indices_item_groups:
for i_start, i_end in speaker_group:
_, p_id, s_id = self.abxDataset.get_ids(
self.indices_items[i_start]
) # Different from original
if p_id not in self.get_speakers_from_p:
self.get_speakers_from_p[p_id] = {}
self.get_speakers_from_p[p_id][s_id] = (i_start, i_end)
for speaker_group in self.indices_item_groups:
if len(speaker_group) > 1:
for i_start, i_end in speaker_group:
_, p_id, s_id = self.abxDataset.get_ids(
self.indices_items[i_start]
) # Different from original
self.len += (len(speaker_group) - 1) * (
min(self.max_x, len(self.get_speakers_from_p[p_id]) - 1)
)
def get_other_speakers_in_group(self, i_start_group):
_, p_id, s_id = self.abxDataset.get_ids(
self.indices_items[i_start_group]
) # Different from original
return [v for k, v in self.get_speakers_from_p[p_id].items() if k != s_id]
def get_abx_triplet(self, i_a, i_b, i_x):
i_start_a, i_end_a = i_a
data_a, size_a, id_a = self.get_group(i_start_a, i_end_a)
i_start_b, i_end_b = i_b
data_b, size_b, id_b = self.get_group(i_start_b, i_end_b)
i_start_x, i_end_x = i_x
data_x, size_x, id_x = self.get_group(i_start_x, i_end_x)
out_coords = id_a[1], id_a[0], id_b[0], id_x[1]
return out_coords, (data_a, size_a), (data_b, size_b), (data_x, size_x)
def __iter__(self):
for i_s, speaker_group in enumerate(self.indices_item_groups):
n_phones = len(speaker_group)
if n_phones == 1:
continue
for i_a in range(n_phones):
i_start_a, i_end_a = self.indices_item_groups[i_s][i_a]
ref = self.get_other_speakers_in_group(i_start_a)
if len(ref) > self.max_x:
speakers_a = random.sample(ref, k=self.max_x)
else:
speakers_a = ref
for i_start_x, i_end_x in speakers_a:
for i_b in range(n_phones):
if i_b == i_a:
continue
i_start_b, i_end_b = self.indices_item_groups[i_s][i_b]
yield self.get_abx_triplet(
(i_start_a, i_end_a),
(i_start_b, i_end_b),
(i_start_x, i_end_x),
)
def get_board_size(self):
return (
self.abxDataset.get_n_speakers(),
self.abxDataset.get_n_phone(),
self.abxDataset.get_n_phone(),
self.abxDataset.get_n_speakers(),
) | zerospeech-libriabx2 | /zerospeech-libriabx2-0.9.8.tar.gz/zerospeech-libriabx2-0.9.8/zrc_abx2/ABX_src/ABXIterators/abx_iterators_anycontext.py | abx_iterators_anycontext.py |
[](
https://anaconda.org/coml/tde)
Term Discovery Evaluation
=========================
Toolbox to evaluate Term Discovery systems.
* Complete Documentation and metrics description ar
available at https://docs.cognitive-ml.fr/tde/
This toolbox transcribed phonetical each discovered interval, then applies
NLP evaluation to judge the quality of the discovery.
The metrics are:
- NED : mean of the edit distance between all the discovered pairs
- coverage: percentage of the corpus covered
- token/type: measure how good the system was at finding gold tokens and gold types
- boundary: measure how good the system was at finding gold boundaries
- grouping: judge the purity of the clusters formed by the system
| zerospeech-tde | /zerospeech-tde-2.0.3.tar.gz/zerospeech-tde-2.0.3/README.md | README.md |
.. _grouping_type:
Grouping and Type Measures
~~~~~~~~~~~~~~~~~~~~~~~~~~
Clustering quality is evaluated using two metrics. The first metrics
(Grouping precision, recall and F-score) computes the intrinsic
quality of the clusters in terms of their phonetic composition. This
score is equivalent to the purity and inverse purity scores used for
evaluating clustering. As the Matching score, it is computed over
pairs, but contrary to the Matching scores, it focusses on the covered
part of the corpus.
.. math::
\textrm{Grouping precision} &= \sum_{t\in\textrm{types}(\textrm{flat}(P_{clus}))}
freq(t, P_{clus})
\frac{|\textrm{match}(t, P_{clus} \cap P_{goldclus})|}{|\textrm{match}(t, P_{clus})|} \\
\textrm{Grouping recall} &= \sum_{t\in\textrm{types}(\textrm{flat}(P_{goldclus}))}
freq(t, P_{goldclus})
\frac{|\textrm{match}(t, P_{clus} \cap P_{goldclus})|}{|\textrm{match}(t, P_{goldclus})|}
where
.. math::
P_{clus} &= \{\langle \langle i, j\rangle , \langle k, l \rangle\rangle
| &\exists c\in C_{disc},\langle i, j\rangle\in c \wedge \langle k, l\rangle\in c\} \\
P_{goldclus} &= \{\langle \langle i, j\rangle , \langle k, l \rangle\rangle
| &\exists c_1,c_2\in C_{disc}:\langle i, j\rangle\in c_1 \wedge \langle k, l\rangle\in c_2 \\
&& \wedge T_{i,j}=T_{k,l} \wedge [i,j] \cap [k,l] = \varnothing \}
The second metrics (Type precision, recall and F-score) takes as the
gold cluster set the true lexicon and is therefore much more
demanding. Indeed, a system could have very pure clusters, but could
systematically missegment words. Since a discovered cluster could have
several transcriptions, we use all of them (rather than using some
kind of centroid).
.. math::
\textrm{Type precision} &= \frac{|\textrm{types}(F_{disc}) \cap \textrm{types}(F_{goldLex})|}
{|\textrm{types}(F_{disc})|} \\
\textrm{Type recall} &= \frac{|\textrm{types}(F_{disc}) \cap \textrm{types}(F_{goldLex})|}
{|\textrm{types}(F_{goldLex})|} \\
where
- :math:`F_{disc}`: the set of discovered fragments,
:math:`F_{disc} = \{ f | f \in c , c \in C_{disc} \}`
- :math:`F_{goldLex}`: the set of fragments corresponding to the
corpus transcribed at the word level (gold transcription).
| zerospeech-tde | /zerospeech-tde-2.0.3.tar.gz/zerospeech-tde-2.0.3/docs/source/measures/grouping_type.rst | grouping_type.rst |
.. _token_boundary:
Token and Boundary Measures
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Parsing quality is evaluated using two metrics. The first one (Token
precision, recall and F-score) evaluates how many of the word tokens
were correctly segmented (:math:`X = F_{disc}`, :math:`Y =
F_{goldLex}`). The second one (Boundary precision, recall and F-score)
evaluates how many of the gold word boundaries were found (:math:`X =
B_{disc}`, :math:`Y = B_{gold}`). These two metrics are typically
correlated, but researchers typically use the first. We provide
Boundary metrics for completeness, and also to enable system
diagnostic.
.. math::
\textrm{Token precision} &= \frac{|F_{disc}\cap F_{goldLex}|}{|F_{disc}|} \\
\textrm{Token recall} &= \frac{|F_{disc}\cap F_{goldLex}|}{|F_{goldLex}|} \\
\textrm{Boundary precision} &= \frac{|B_{disc}\cap B_{gold}|}{|B_{disc}|} \\
\textrm{Boundary recall} &= \frac{|B_{disc}\cap B_{gold}|}{|B_{gold}|}
where
- :math:`F_{disc}`: the set of discovered fragments,
:math:`F_{disc} = \{ f | f \in c , c \in C_{disc} \}`
- :math:`F_{goldLex}`: the set of fragments corresponding to the
corpus transcribed at the word level (gold transcription).
- :math:`B_{disc}`: the set of discovered fragment boundaries
(boundaries are defined in terms of *i*, the index of the nearest
phoneme boundary in the transcription if it is less than 30ms away,
and -1 (wrong boundary) otherwise)
- :math:`B_{gold}`: the set of boundaries in the parsed corpus.
The details of these metrics are given in the Ludusan et al (2014)
paper. The only divergence between this paper and the present
measures, is that contrary to the paper, we compute these scores on
the entirety of the corpus, rather than on the covered corpus. It is
necessary to do this if we want to compare systems that will cover
different subsets of the corpus. In the implementation for the
challenge, we use a subsampling scheme whereby the corpus is cut into
n-equal parts and each metric is computed on each of the subsample
separately and then averaged. This enables the computation to be more
tractable, and also to provide a standard deviation measure for each
metric. We also provide, in addition to each metric ran on the entire
corpus, the same metric restricted to within talker matches. This is
to enable the evaluation of systems that are specialized in within
talker spoken term discovery.
| zerospeech-tde | /zerospeech-tde-2.0.3.tar.gz/zerospeech-tde-2.0.3/docs/source/measures/token_boundary.rst | token_boundary.rst |
.. _0_evaluation_metrics:
Evaluation Metrics
~~~~~~~~~~~~~~~~~~
The metrics described in this section come from
https://core.ac.uk/download/pdf/48187287.pdf.
Spoken term discovery can be logically broken down into a series of 3
operations, which can be all evaluated independently (see :ref:`Figure
1 <term_discovery_2015>`). The first step consists in matching pairs of
stretches of speech on the basis of their global similarity. The
second step consists in clustering the matching pairs, thereby
building a library of classes with potentially many instances. This is
equivalent to building a lexicon. In the third step, the system can
use its acquired classes to parse the continuous stream into candidate
tokens and boundaries. Some systems may only implement some of these
steps, others may do them simultaneously rather than sequentially. The
metric below have been devised to enable comparisons between these
different systems by evaluating separately these logically distinct
steps.
.. figure:: ../_static/term_discovery.png
:width: 50%
:align: center
**Figure 1.** term discovery principles
All of our metric assume a time aligned transcription, where
:math:`T_{i,j}` is the (phoneme) transcription corresponding to the
speech fragment designed by the pair of indices :math:`\langle i,j
\rangle` (i.e., the speech fragment between frame *i* and *j*). If the
left or right edge of the fragment contains part of a phoneme, that
phoneme is included in the transcription if is corresponds to more
than more than 30ms or more than 50% of it's duration.
| zerospeech-tde | /zerospeech-tde-2.0.3.tar.gz/zerospeech-tde-2.0.3/docs/source/measures/0_evaluation_metrics.rst | 0_evaluation_metrics.rst |
.. _ned_coverage:
NED and Coverage Measure
~~~~~~~~~~~~~~~~~~~~~~~~
Many spoken term discovery systems incorporate a step whereby
fragments of speech are realigned and compared. Matching quality
measures the accuraty of this process. Here, we use the *NED/Coverage*
metrics for evaluating that.
*NED* and *Coverage* are quick to compute and give a qualitative
estimate of the matching step. *NED* is the Normalised Edit Distance;
it is equal to zero when a pair of fragments have exactly the same
transcription, and 1 when they differ in all phonemes. *Coverage* is
the fraction of corpus that contain matching pairs that has been
discovered.
.. math::
\textrm{NED} &= \sum_{\langle x, y\rangle \in P_{disc}}
\frac{\textrm{ned}(x, y)}{|P_{disc}|} \\
\textrm{Coverage} &= \frac{|\textrm{cover}(P_{disc})|}{|\textrm{cover}(P_{all})|}
where
.. math::
\textrm{ned}(\langle i, j \rangle, \langle k, l \rangle) &=
\frac{\textrm{Levenshtein}(T_{i,j}, T_{k,l})}{\textrm{max}(j-i+1,k-l+1)} \\
\textrm{cover}(P) &= \bigcup_{\langle i, j \rangle \in \textrm{flat}(P)}[i, j] \\
\textrm{flat}(P) &= \{p|\exists q:\{p,q\}\in P\}
with
- :math:`P_{all}`: the set of all possible non overlapping matching
fragment pairs. :math:`P_{all}=\{ \{a,b \}\in F_{all} \times F_{all}
| T_{a} = T_{b}, \neg \textrm{overlap}(a,b)\}`.
- :math:`P_{disc}`: the set of non overlapping discovered pairs,
:math:`P_{disc} = \{ \{a,b\} | a \in c, b \in c, \neg
\textrm{overlap}(a,b), c \in C_{disc} \}`
- :math:`P_{disc^*}`: the set of pairwise substring completion of
:math:`P_{disc}`, which mean that we compute all of the possible
minimal path realignments of the two strings, and extract all of the
substrings pairs along the path (e.g., for fragment pair
| zerospeech-tde | /zerospeech-tde-2.0.3.tar.gz/zerospeech-tde-2.0.3/docs/source/measures/ned_coverage.rst | ned_coverage.rst |
.. _format:
File Formats
~~~~~~~~~~~~
Input Format
------------
The package takes as input the same format as in the
Zero Resource Speech Challenge (zerospeech.com):
.. code-block:: bash
Class 1:
wav1 on1 off1
wav2 on2 off2
Class 2:
wav1 on3 off3
wav3 on4 off4
wav2 on5 off5
The ``onset`` and ``offset`` times are expressed **in seconds**.
Note that each class must end with an empty line, including the last class of
the file. So **the file must be terminated by a blank line**.
If you want to use other input formats, you need to edit the
``read_clusters`` method in ``tde/readers/disc_reader.py``.
Alignments
----------
The package uses gold phone and words alignments to evaluate the inputs.
The alignments are stored in ``tde/share``.
The formats for the alignements is (without header):
.. code-block:: bash
filename1 on1 off1 symbol1
filename2 on2 off2 symbol2
...
Where `filename` are the names of the wavs, and `symbol` are the words or
phones.
To add your own language in the package, you need to add ``yourlang.phn`` and
``yourlang.wrd`` in ``tde/share`` and add the option in ``tde/eval.py``
(line 39).
| zerospeech-tde | /zerospeech-tde-2.0.3.tar.gz/zerospeech-tde-2.0.3/docs/source/usage/format.rst | format.rst |
.. _usage_example:
Usage Example
~~~~~~~~~~~~~
To use, you can use the `tde/eval.py` script:
.. code-block:: bash
python eval.py discovered_class corpus output/
or you can use the API in python
.. code-block:: python
import pkg_resources
from tde.readers.gold_reader import *
from tde.readers.disc_reader import *
wrd_path = pkg_resources.resource_filename(
pkg_resources.Requirement.parse('tde'),
'tde/share/mandarin.wrd')
phn_path = pkg_resources.resource_filename(
pkg_resources.Requirement.parse('tde'),
'tde/share/mandarin.phn')
gold = Gold(wrd_path=wrd_path,
phn_path=phn_path)
disc_clsfile = "/path/to/discovered/file"
disc = Disc(disc_clsfile, gold)
from tde.measures.grouping import *
grouping = Grouping(discovered)
grouping.compute_grouping()
print(grouping.precision)
print(grouping.recall)
| zerospeech-tde | /zerospeech-tde-2.0.3.tar.gz/zerospeech-tde-2.0.3/docs/source/usage/usage_example.rst | usage_example.rst |
import time
import argparse
import pkg_resources
from tde.measures.ned import *
from tde.measures.boundary import *
from tde.measures.grouping import *
from tde.measures.coverage import *
from tde.measures.token_type import *
from tde.readers.gold_reader import *
from tde.readers.disc_reader import *
def main():
parser = argparse.ArgumentParser(
prog='TDE',
formatter_class=argparse.RawDescriptionHelpFormatter,
description='Evaluate spoken term discovery',
epilog="""Example usage:
$ ./english_eval2 my_sample.classes resultsdir/
evaluates STD output `my_sample.classes` on the english dataset and stores the
output in `resultsdir/`.
Classfiles must be formatted like this:
Class 1 (optional_name)
fileID starttime endtime
fileID starttime endtime
...
Class 2 (optional_name)
fileID starttime endtime
...
""")
parser.add_argument('disc_clsfile', metavar='discovered', type=str)
parser.add_argument('corpus', metavar='language', type=str,
choices=['buckeye', 'english', 'french',
'mandarin'],
help='Choose the corpus you want to evaluate')
parser.add_argument('--measures', '-m',
nargs='*',
default=[],
choices=['boundary', 'grouping',
'token/type', 'coverage',
'ned'])
parser.add_argument('--njobs', '-n',
default=1,
type=int,
help="number of cpus to be used in grouping")
parser.add_argument('output', type=str,
help="path in which to write the output")
args = parser.parse_args()
# load the corpus alignments
wrd_path = pkg_resources.resource_filename(
pkg_resources.Requirement.parse('tde'),
'tde/share/{}.wrd'.format(args.corpus))
phn_path = pkg_resources.resource_filename(
pkg_resources.Requirement.parse('tde'),
'tde/share/{}.phn'.format(args.corpus))
print('Reading gold')
gold = Gold(wrd_path=wrd_path,
phn_path=phn_path)
print('Reading discovered classes')
disc = Disc(args.disc_clsfile, gold)
measures = args.measures
output = args.output
# Launch evaluation of each metric and write it
# in the output
if len(measures) == 0 or "boundary" in measures:
print('Computing Boundary...')
boundary = Boundary(gold, disc, output)
boundary.compute_boundary()
boundary.write_score()
if len(measures) == 0 or "grouping" in measures:
print('Computing Grouping...')
grouping = Grouping(disc, output, args.njobs)
grouping.compute_grouping()
grouping.write_score()
if len(measures) == 0 or "token/type" in measures:
print('Computing Token and Type...')
token_type = TokenType(gold, disc, output)
token_type.compute_token_type()
token_type.write_score()
if len(measures) == 0 or "coverage" in measures:
print('Computing Coverage...')
coverage = Coverage(gold, disc, output)
coverage.compute_coverage()
coverage.write_score()
if len(measures) == 0 or "ned" in measures:
print('Computing NED...')
ned = Ned(disc, output)
ned.compute_ned()
ned.write_score()
if __name__ == "__main__":
main() | zerospeech-tde | /zerospeech-tde-2.0.3.tar.gz/zerospeech-tde-2.0.3/tde/eval.py | eval.py |
import numpy as np
from joblib import Parallel, delayed
from .measures import Measure
from itertools import combinations
from collections import defaultdict, Counter
from tde.utils import overlap
class Grouping(Measure):
"""Grouping measure
The grouping measures how pure the found clusters are, and
is close to the 'purity' measure in clustering.
See https://docs.cognitive-ml.fr/tde/measures/index.html
for a summary of all measures.
Input
:param disc: Discovered Object, contains the discovered elements
:param output_folder: string, path to the output folder
:param njobs: Number of cpus to be used.
Output
:param precision: Grouping Precision
:param recall: Grouping Recall
"""
def __init__(self, disc, output_folder=None, njobs=1):
self.metric_name = "grouping"
self.output_folder = output_folder
self.clusters = disc.clusters
self.intervals = disc.intervals
self.njobs = njobs
self.found_pairs = set()
self.gold_pairs = set()
self.found_types = set()
self.gold_types = set()
@property
def precision(self):
if len(self.found_types) == 0:
prec = np.nan
else:
prec = sum(self.found_weights[t] * self.found_gold_counter[t]
/ self.found_counter[t] for t in self.found_types)
return prec
@property
def recall(self):
if len(self.gold_types) == 0:
rec = np.nan
else:
rec = sum(self.gold_weights[t] * self.found_gold_counter[t]
/ self.gold_counter[t] for t in self.gold_types)
return rec
#def get_gold_pairs(self):
# """ Get all the gold pairs that can be created using the
# discovered intervals.
# The pairs are ordered by filename and onset.
# Input
# :param intervals: a list of all the discovered intervals, with
# their transcription
# Output
# :param gold_pairs: a set of all the gold pairs created from the
# discovered intervals
# :param gold_types: all the types (n-gram) that occur in gold_pairs
# """
# counter = Counter()
# gold_found_pairs = set()
# def _ngram_pairs(pair):
# # check if a pair should be kept as gold
# # and if it intersects with discovered pairs
# f1, f2 = pair
# # check if should be kept
# if (f1[0] == f2[0] and overlap((f1[1], f1[2]),
# (f2[1], f2[2]))[0] > 0):
# return (None, False)
# # check if discovered
# if (f1, f2) in self.found_pairs:
# intersection = True
# else:
# intersection = False
# return ((f1, f2), intersection)
# # get all discovered intervals
# same = defaultdict(set)
# for fname, disc_on, disc_off, token_ngram, ngram in self.intervals:
# # ngram = tuple(ph for on, off, ph in token_ngram)
# same[ngram].add((fname, disc_on, disc_off, token_ngram, ngram))
# # get all gold pairs
# seen_token = set()
# # parallelize over all possible pairs
# for ngram in same:
# _gold_pairs_found = Parallel(n_jobs=self.njobs, backend="threading")(
# delayed(_ngram_pairs)(sorted((f1, f2),
# key=lambda f: (f[0], f[1])))
# for f1, f2 in combinations(same[ngram], 2))
# _gold_pairs = {pair for pair, found in _gold_pairs_found if pair is not None}
# if len(_gold_pairs) > 0:
# self.gold_types.add(ngram)
# _intersection = {pair for pair, found in _gold_pairs_found if found == True}
# gold_found_pairs = gold_found_pairs.union(_intersection)
# # update counters
# for f1, f2 in _gold_pairs:
# if f1[3] not in seen_token:
# counter.update((f1[4],))
# # count token as seen
# seen_token.add(f1[3])
# if f2[3] not in seen_token:
# counter.update((f2[4],))
# seen_token.add(f2[3])
# # compute weights for each n gram
# weights = {ngram: counter[ngram]/len(seen_token) for ngram in counter}
#
# return gold_found_pairs, counter, weights
#def get_gold_pairs_buggy(self):
# """ Get all the gold pairs that can be created using the
# discovered intervals.
# The pairs are ordered by filename and onset.
# Input
# :param intervals: a list of all the discovered intervals, with
# their transcription
# Output
# :param gold_pairs: a set of all the gold pairs created from the
# discovered intervals
# :param gold_types: all the types (n-gram) that occur in gold_pairs
# """
# def _ngram_pairs(ngram_list):
# ngram_pairs = [tuple(sorted((f1, f2), key= lambda f:(f[0], f[1])))
# for f1, f2 in combinations(ngram_list, 2)
# if not (f1[0] == f2[0]
# and overlap((f1[1], f1[2]),
# (f2[1], f2[2]))[0] > 0)]
# return ngram_pairs
# same = defaultdict(set)
# for fname, disc_on, disc_off, token_ngram, ngram in self.intervals:
# # ngram = tuple(ph for on, off, ph in token_ngram)
# same[ngram].add((fname, disc_on, disc_off, token_ngram, ngram))
# # add gold pair as tuple if both elements don't overlap
# gold_pairs = {
# tuple(sorted((f1, f2), key=lambda f: (f[0], f[1])))
# for ngram in same
# for f1, f2 in combinations(same[ngram], 2)
# if not (f1[0] == f2[0]
# and overlap((f1[1], f1[2]),
# (f2[1], f2[2]))[0] > 0)}
# #gold_pairs = Parallel(n_jobs=15)(delayed(_ngram_pairs)(same[ngram]) for ngram in same)
# gold_types = {f1[4] for f1, f2 in self.gold_pairs}
# return gold_pairs, gold_types
def get_gold_pairs(self):
""" Get all the gold pairs that can be created using the
discovered intervals.
The pairs are ordered by filename and onset.
Input
:param intervals: a list of all the discovered intervals, with
their transcription
Output
:param gold_pairs: a set of all the gold pairs created from the
discovered intervals
:param gold_types: all the types (n-gram) that occur in gold_pairs
"""
same = defaultdict(set)
for fname, disc_on, disc_off, token_ngram, ngram in self.intervals:
# ngram = tuple(ph for on, off, ph in token_ngram)
same[ngram].add((fname, disc_on, disc_off, token_ngram, ngram))
# add gold pair as tuple if both elements don't overlap
self.gold_pairs = {
tuple(sorted((f1, f2), key=lambda f: (f[0], f[1])))
for ngram in same
for f1, f2 in combinations(same[ngram], 2)
if not (f1[0] == f2[0]
and overlap((f1[1], f1[2]),
(f2[1], f2[2]))[0] > 0)}
self.gold_types = {f1[4] for f1, f2 in self.gold_pairs}
def get_found_pairs(self):
""" Get all the pairs that were found.
The pairs are ordered by filename and onset.
Input
:param clusters: a dict of all the clusters found. the keys
are the clusters names, the values are
a list of the intervals in this cluster
Output
:param found_pairs: a set of all the discovered pairs
"""
for class_nb in self.clusters:
self.found_pairs = self.found_pairs.union(
set(combinations(self.clusters[class_nb], 2)))
# count type only if clusters has two elements
if len(self.clusters[class_nb]) > 1 :
self.found_types = self.found_types.union(
{ngram for _, _, _, token_ngram, ngram
in self.clusters[class_nb]})
# order found pairs
self.found_pairs = {
tuple(sorted((f1, f2), key=lambda f: (f[0], f[1])))
for f1, f2 in self.found_pairs}
@staticmethod
def get_weights(pairs):
""" For each type get its weight
Input
:params pairs: a set containing pairs of intervals, stored
as (filename, onset, offset, token_ngram, ngram),
where token_ngram is the ngram with the timestamps
of each of its phone, and ngram is just a tuple of
all the phones
Output
:return: weights, a dict that for each type (i.e. ngram)
gives its weight, which is computed as
number_of_tokens(ngram)/total_number_of_seen_tokens
counter, a dict that for each type (i.e. ngram)
gives the number of tokens of this ngram in the
pairs.
"""
# count occurences or each interval in pairs for frequency
counter = Counter()
seen_token = set()
for f1, f2 in pairs:
if f1[3] not in seen_token:
counter.update((f1[4],))
# count token as seen
seen_token.add(f1[3])
if f2[3] not in seen_token:
counter.update((f2[4],))
seen_token.add(f2[3])
weights = {ngram: counter[ngram]/len(seen_token) for ngram in counter}
return weights, counter
def compute_grouping(self):
""" Compute the grouping by essentially counting the number of tokens
of each type in three sets: the set of gold pairs, the set of
found pairs, and the intersection of gold pairs and found pairs
"""
self.get_gold_pairs()
self.get_found_pairs()
gold_found_pairs = self.found_pairs.intersection(self.gold_pairs)
self.gold_weights, self.gold_counter = self.get_weights(
self.gold_pairs)
## get intersection of discovered pairs and gold pairs
## and count occurences and weights for gold pairs
#gold_found_pairs, self.gold_counter, self.gold_weights = self.get_gold_pairs()
# count occurences and weights for found pairs
self.found_weights, self.found_counter = self.get_weights(
self.found_pairs)
# count occurences and weights for intersection of gold and
# found pairs
_, self.found_gold_counter = self.get_weights(gold_found_pairs) | zerospeech-tde | /zerospeech-tde-2.0.3.tar.gz/zerospeech-tde-2.0.3/tde/measures/grouping.py | grouping.py |
import os
import numpy as np
from .measures import Measure
from tde.utils import overlap
class TokenType(Measure):
"""Token Type measure
The Token measures how many 'Gold' token were found.
The Type measures how many 'Gold' type were found.
A 'Type' is a word type, and a 'token' is a specific occurence
of a 'type'.
See https://docs.cognitive-ml.fr/tde/measures/index.html
for a summary of all measures.
Input
:param disc: Discovered Object, contains the discovered boundaries
:param gold: Gold object, contains all the gold boundaries
:param output_folder: string, path to the output folder
Output
:param token_precision: Token Precision
:param token_recall: Token Recall
:param type_precision: Type Precision
:param type_recall: Type Recall
"""
def __init__(self, gold, disc, output_folder=None):
self.metric_name = "token_type"
self.output_folder = output_folder
# get gold as interval trees
self.gold_phn = gold.phones
assert type(self.gold_phn) == dict, (
"gold_phn should be a dict "
"of intervaltree objects but is {} ".format(type(self.gold_phn)))
self.gold_wrd = gold.words
assert type(self.gold_wrd) == dict, (
"gold_phn should be a dict "
"of intervaltree objects but is {} ".format(type(self.gold_wrd)))
self.all_type = set()
self.n_token = 0
# get gold types and count gold tokens
for fname in self.gold_wrd:
self.all_type.update(set(
[word for on, off, word in self.gold_wrd[fname]]))
self.n_token += len(self.gold_wrd[fname])
self.n_type = len(self.all_type)
# get discovered as list of intervals
self.disc = disc.intervals
# measures
self.n_discovered_words = 0
self.type_hit = set()
self.token_hit = 0
self.type_seen = set()
self.token_seen = set()
self.token_prec = None
self.token_rec = None
self.type_prec = None
self.type_rec = None
@property
def precision(self):
"""Return Token and Type precision"""
# Token precision/recall
if len(self.disc) == 0:
self.token_prec = np.nan
else:
self.token_prec = self.token_hit / len(self.disc)
# Types precision/recall
if len(self.type_seen) == 0:
self.type_prec = np.nan
else:
self.type_prec = len(self.type_hit) / len(self.type_seen)
return self.token_prec, self.type_prec
@property
def recall(self):
"""Return Token and Type recall"""
if self.n_token == 0:
self.token_rec = np.nan
else:
self.token_rec = self.token_hit / self.n_token
# type recall
if self.n_type == 0:
self.type_rec = np.nan
else:
self.type_rec = len(self.type_hit) / self.n_type
return self.token_rec, self.type_rec
@property
def fscore(self):
"""Return Token and Type fscore"""
assert self.token_prec, (
"Attempting to compute token fscore"
" when token precision is not computed yet.")
assert self.token_rec, (
"Attempting to compute token fscore"
" when token recall is not computed yet.")
self.token_fscore = 2 * (self.token_prec * self.token_rec) / (
self.token_prec + self.token_rec)
self.type_fscore = 2 * (self.type_prec * self.type_rec) / (
self.type_prec + self.type_rec)
return self.token_fscore, self.type_fscore
def compute_token_type(self):
""" Loop over all intervals and compute token
type measure.
The Token measure is computed by
counting all the gold words discovered by the
system.
The Type measure is computed by counting all
the unique type of gold words discovered by the
system.
Input:
:param gold_phn: the gold phone alignment
stored as an interval tree
:type gold_phn: Interval Tree
:param gold_wrd: the gold word alignment
stored as an interval tree
:type gold_wrd: Interval Tree
:param disc: a list of all the discovered
intervals
:type disc: list of tuples
Output:
:return: The Token Type measure
"""
for fname, disc_on, disc_off, token_ngram, ngram in self.disc:
if fname not in self.gold_wrd:
raise ValueError('{}: file not found in gold'.format(fname))
overlap_wrd = self.gold_wrd[fname].overlap(disc_on, disc_off)
# ngram = tuple(phn for _, _, phn in ngram)
# get type by getting ngram covered
self.type_seen.add(tuple(ngram))
# switch cases.
# if interval overlaps with less than 1 word
# don't count
# if overlapped word is not fully discovered, i.e.
# onset and offset are less than 30ms or 50% away
# from border phone boundaries, then don't count
if len(overlap_wrd) < 1:
continue
elif len(overlap_wrd) > 1:
# choose word with the most overlap
current_overlap = 0
for wrd_on, wrd_off, wrd in overlap_wrd:
ov, _ = overlap((disc_on, disc_off), (wrd_on, wrd_off))
if ov > current_overlap:
current_overlap = ov
chosen = (wrd_on, wrd_off, wrd)
overlap_wrd = chosen
else:
# Get word and add it to types seen (not necessarily hit)
overlap_wrd = overlap_wrd.pop()
gold_wrd_on, gold_wrd_off, gold_wrd_token = overlap_wrd
gold_wrd_trs = sorted(
[phn for phn
in self.gold_phn[fname].overlap(gold_wrd_on, gold_wrd_off)])
gold_wrd_trs = tuple(
[phn for phn_on, phn_off, phn in gold_wrd_trs])
if ((gold_wrd_trs == ngram) and
not ((fname, gold_wrd_on,
gold_wrd_off, gold_wrd_token) in self.token_seen)):
self.token_hit += 1
self.token_seen.add(
(fname, gold_wrd_on, gold_wrd_off, gold_wrd_token))
# TODO CHECK HOMOPHONE CASE W/ EMMANUEL
if ((gold_wrd_trs == ngram) and ngram not in self.type_hit):
self.type_hit.add(ngram)
def write_score(self):
#if not self.token_fscore:
# raise AttributeError('Attempting to print scores but fscore'
# ' is not yet computed!')
token_prec, type_prec = self.precision
token_rec, type_rec = self.recall
token_fscore, type_fscore = self.fscore
with open(os.path.join(self.output_folder, self.metric_name), 'w') as fout:
fout.write("metric: {}\n".format('token'))
fout.write("precision: {}\n".format(token_prec))
fout.write("recall: {}\n".format(token_rec))
fout.write("fscore: {}\n".format(token_fscore))
fout.write("metric: {}\n".format('type'))
fout.write("precision: {}\n".format(type_prec))
fout.write("recall: {}\n".format(type_rec))
fout.write("fscore: {}\n".format(type_fscore)) | zerospeech-tde | /zerospeech-tde-2.0.3.tar.gz/zerospeech-tde-2.0.3/tde/measures/token_type.py | token_type.py |
import numpy as np
from .measures import Measure
class Boundary(Measure):
"""Boundary measure
The boundary measures how many 'Gold' boundaries were found.
See https://docs.cognitive-ml.fr/tde/measures/index.html for
a summary of all measures.
Input
:param disc: Discovered Object, contains the discovered boundaries
:param gold: Gold object, contains all the gold boundaries
:param output_folder: string, path to the output folder
Output
:param precision: Boundary Precision
:param recall: Boundary Recall
"""
def __init__(self, gold, disc, output_folder=None):
self.metric_name = "boundary"
self.output_folder = output_folder
# get gold as interval trees
self.gold_boundaries_up = gold.boundaries[0]
self.gold_boundaries_down = gold.boundaries[1]
self.gold_wrd = gold.words
assert type(self.gold_wrd) == dict, (
"gold_phn should be a dict "
"of intervaltree objects but is {} ".format(type(self.gold_wrd)))
# get all discovered boundaries
bounds_down = [(fname, ngram[0][0])
for fname, _, _, ngram, _ in disc.intervals
if len(ngram) > 0]
bounds_up = [(fname, ngram[-1][1])
for fname, _, _, ngram, _ in disc.intervals
if len(ngram) > 0]
self.disc_down = set(bounds_down)
self.disc_up = set(bounds_up)
# measures
self.boundaries = dict()
self.boundaries_seen = set()
self.n_correct_disc_boundary = 0
# if boundary is discovered as up and down, only count it once
self.n_all_disc_boundary = len(self.disc_up.difference(
self.disc_up.intersection(self.disc_down))) + len(self.disc_down)
self.n_gold_boundary = 0
self.n_discovered_boundary = 0
for fname in self.gold_boundaries_up:
self.n_gold_boundary += len(
self.gold_boundaries_up[fname].difference(
self.gold_boundaries_up[fname].intersection(
self.gold_boundaries_down[fname])))
self.n_gold_boundary += len(self.gold_boundaries_down[fname])
@property
def precision(self):
"""Return Token and Type precision"""
# Token precision/recall
if self.n_all_disc_boundary == 0:
boundary_prec = np.nan
else:
boundary_prec = (
self.n_discovered_boundary / self.n_all_disc_boundary)
return boundary_prec
@property
def recall(self):
"""Return Token and Type recall"""
if self.n_gold_boundary == 0:
boundary_rec = np.nan
else:
boundary_rec = self.n_discovered_boundary / self.n_gold_boundary
return boundary_rec
def compute_boundary(self):
""" Create intervaltree containing only boundary phones.
Here we discriminate upward and downward boundaries,
because if a word is followed by a silence, the upward
boundary should be counted if discovered as upward, but not
if discovered as downward.
Input
:param disc_down: a list of all the downward boundaries of
discovered segments
:param disc_up: a list of all the upward boundaries of
discovered segments
:gold_boundaries_down: a set of all the downward gold boundaries
:gold_boundaries_up: a set of all the upward gold boundaries
"""
# downward boundaries
for fname, disc_time in self.disc_down:
if fname not in self.gold_boundaries_down:
raise ValueError('{}: file not found in gold'.format(fname))
if (
disc_time in self.gold_boundaries_down[fname]
and not (fname, disc_time) in self.boundaries_seen
):
self.n_discovered_boundary += 1
self.boundaries_seen.add((fname, disc_time))
# upward boundaries
for fname, disc_time in self.disc_up:
if fname not in self.gold_boundaries_up:
raise ValueError('{}: file not found in gold'.format(fname))
if (
disc_time in self.gold_boundaries_up[fname]
and not (fname, disc_time) in self.boundaries_seen
):
self.n_discovered_boundary += 1
self.boundaries_seen.add((fname, disc_time)) | zerospeech-tde | /zerospeech-tde-2.0.3.tar.gz/zerospeech-tde-2.0.3/tde/measures/boundary.py | boundary.py |
import os
from .measures import Measure
class Coverage(Measure):
"""Coverage measure
The coverage measures how many 'Gold' phonemes were found.
See https://docs.cognitive-ml.fr/tde/measures/index.html
for a summary of all measures.
Input
:param disc: Discovered Object, contains the discovered phonemes
:param gold: Gold object, contains all the gold phonemes
:param output_folder: string, path to the output folder
Output
:param coverage: Coverage
"""
def __init__(self, gold, disc, output_folder=None):
self.metric_name = "coverage"
self.output_folder = output_folder
# self.all_intervals = set()
self.n_phones = 0
for fname in gold.phones:
# TODO remove SIL here ?
self.n_phones += len([
ph for on, off, ph in gold.phones[fname]
if (ph != "SIL" and ph != "SPN")])
self.covered_phn = set(
(fname, phn_on, phn_off, phn)
for fname, disc_on, disc_off, token_ngram, ngram
in disc.intervals
for phn_on, phn_off, phn in token_ngram
if (phn != "SIL" and phn != "SPN"))
self.coverage = 0
def compute_coverage(self):
""" For coverage, simply compute the ratio of discovered phones over all phone
Input:
:param covered_phn: a set containing all the covered phones
Output:
:param coverage: the ratio of number of covered phones over
the overall number of phones in the corpus
"""
self.coverage = len(self.covered_phn) / self.n_phones
def write_score(self):
if not self.coverage:
raise AttributeError('Attempting to print scores but score'
' is not yet computed!')
with open(os.path.join(self.output_folder, self.metric_name), 'w') as fout:
fout.write("metric: {}\n".format(self.metric_name))
fout.write("coverage: {}\n".format(self.coverage)) | zerospeech-tde | /zerospeech-tde-2.0.3.tar.gz/zerospeech-tde-2.0.3/tde/measures/coverage.py | coverage.py |
import os
import numpy as np
import editdistance
from .measures import Measure
from itertools import combinations
class Ned(Measure):
"""NED measure
The NED (Normalized Edit Distance) measures how close each
pair of word found is. For 1 pair of intervals found, a
NED of 1 means all the phonemes are different, and a NED of 0 means
they contain the exact same phonemes in the same order.
See https://docs.cognitive-ml.fr/tde/measures/index.html
for a summary of all measures.
Input
:param disc: Discovered Object, contains the discovered phonemes
:param output_folder: string, path to the output folder
Output
:param coverage: NED
"""
def __init__(self, disc, output_folder=None):
self.metric_name = "ned"
self.output_folder = output_folder
self.disc = disc.clusters
# measures
self.n_pairs = None
self.ned = None
@staticmethod
def pairwise_ned(s1, s2):
s1 = tuple(phn for phn in s1 if phn != "SIL")
s2 = tuple(phn for phn in s2 if phn != "SIL")
if max(len(s1), len(s2)) > 0:
return float(editdistance.eval(s1, s2)) / max(len(s1), len(s2))
else:
return 1.0
def compute_ned(self):
""" compute edit distance over all discovered pairs and average across
all pairs
Input:
:param disc: a dictionnary containing all the discovered clusters.
Each key in the dict is a class, and its value is
all the intervals in this cluster.
Output:
:param ned: the average edit distance of all the pairs
"""
overall_ned = []
for class_nb in self.disc:
for discovered1, discovered2 in combinations(
self.disc[class_nb], 2):
fname1, disc_on1, disc_off1, token_ngram1, ngram1 = discovered1
fname2, disc_on2, disc_off2, token_ngram2, ngram2 = discovered2
pair_ned = self.pairwise_ned(ngram1, ngram2)
overall_ned.append(pair_ned)
# get number of pairs and ned value
self.n_pairs = len(overall_ned)
self.ned = np.mean(overall_ned)
def write_score(self):
if self.ned is None:
raise AttributeError('Attempting to print scores but score'
' is not yet computed!')
with open(os.path.join(self.output_folder, self.metric_name), 'w') as fout:
fout.write("metric: {}\n".format(self.metric_name))
fout.write("score: {}\n".format(self.ned)) | zerospeech-tde | /zerospeech-tde-2.0.3.tar.gz/zerospeech-tde-2.0.3/tde/measures/ned.py | ned.py |
import os
import codecs
import intervaltree
from tde.utils import check_boundary
class Disc():
""" Read the discovered intervals
Attributes
----------
:param disc_path: Path to the 'Discovered" file, to be evaluated
:param intervals: a list of all the discovered intervals
:param intervals_tree: an interval tree containing all the discovered
intervals
:param clusters: a dictionary where all the keys are class numbers, and the
values are all the intervals for that class
Raises
------
AssertionError
- if incorrect interval found (offset greater than onset)
- if two classes have the same class number
ValueError
- if discovered file is not found
- if discovered file is is wrong format
"""
def __init__(self, disc_path=None, gold=None):
if not os.path.isfile(disc_path):
raise ValueError('{}: File Not Found'.format(disc_path))
self.disc_path = disc_path
self.clusters = None
self.intervals = None
if gold:
self.gold_phn = gold.phones
else:
print("Warning: discovered file is read"
" without gold, so no transcription is given")
self.gold_phn = None
self.intervals_tree = None
self.read_clusters()
def __repr__(self):
return '\n'.join(
'{} {} {}'.format(fname, t0, t1)
for (fname, t0, t1) in self.intervals)
def read_clusters(self):
""" Read discovered clusters
Returns a dictionnary { class_number : [intervals_found]} that gives
a list of the intervals for each class_number as key.
The intervals are represented as a tuple:
(fname: str, name of the speaker
disc_on: float, onset of the interval
disc_off: float, offset of the interval
token_ngram: tuple, each discovered phone from the interval, with
their onset and offsets,
ngram: tuple, each)
Raises
------
AssertionError
- if incorrect interval found (offset greater than onset)
- if two classes have the same class number
ValueError
- if a line is badly formated
"""
classes = []
discovered = dict()
intervals = set()
# file is decoded line by line and ned statistics are computed in
# a streaming to avoid using a high amount of memory
with open(self.disc_path) as fin:
cfile = fin.readlines()
# check that last line is empty
assert cfile[-1] == '\n', ("discovered class file should end with"
" and empty line")
for lines in cfile:
line = lines.strip()
# check what type of line is being read, either it begins with
# "Class", so it's the start of a new cluster or it contains an
# interval, so add it to current cluster or it is empty, so the
# previous cluster has been read entirely
if line[:5] == 'Class': # class + number + ngram if available
class_number = line.strip().split(' ')[1]
elif len(line.split(' ')) == 3:
fname, start, end = line.split(' ')
disc_on, disc_off = float(start), float(end)
# check that timestamps are correct
assert disc_off > disc_on, ("timestamps are not"
" correct\n {} {} {}\n".format(fname, disc_on, disc_off))
# get the phone transcription for current interval
if self.gold_phn:
token_ngram, ngram = (self.get_transcription(
fname, disc_on, disc_off, self.gold_phn))
# throw away interval if outside of transcription
if len(token_ngram) == 0:
continue
else:
token_ngram, ngram = None, None
intervals.add(
(fname, disc_on, disc_off, token_ngram, ngram))
classes.append(
(fname, disc_on, disc_off, token_ngram, ngram))
elif len(line) == 0:
# empty line means that the class has ended
# add class to discovered dict.
# if entry already exists, exit with an error
assert class_number not in discovered, (
"Two Classes have the same number {}"
" in discovered classes".format(class_number))
#assert len(classes) > 0, (
# 'class {} if empty'.format(class_number))
if len(classes) > 0:
discovered[class_number] = classes
# re-initialize classes
classes = list()
else:
raise ValueError('Line in discovered classes has wrong'
' format\n {}\n'.format(line))
self.clusters = discovered
self.intervals = list(intervals)
print("Discovered Class file read\n")
print("{} unique intervals found".format(len(self.intervals)))
def read_intervals_tree(self):
""" Read discovered intervals as interval tree"""
self.intervals_tree = dict()
for fname in self.intervals:
self.intervals_tree[fname] = intervaltree.IntervalTree.from_tuples(
self.intervals[fname])
@staticmethod
def get_transcription(fname, disc_on, disc_off, gold_phn):
""" Given an interval, get its phone transcription
Parameters
----------
fname: str, name of the speaker on the interval
disc_on: float, onset of the interval
disc_off: float, offset of the interval
gold_phn: intervaltree, contains the gold phones
Returns
-------
token_ngram: list of tuples, list of all the
(onset, offset, phone) covered by request interval
ngram: list, list of all the phones covered by request interval
"""
# Get all covered phones
covered = sorted(
[phn for phn
in gold_phn[fname].overlap(disc_on, disc_off)],
key=lambda times: times[0])
if len(covered) == 0:
return tuple(), tuple()
# Check if first and last phones are discovered
#keep_first = check_boundary(
# (covered[0][0], covered[0][1]),
# (disc_on, covered[0][1]))
#keep_last = check_boundary(
# (covered[-1][0], covered[-1][1]),
# (covered[-1][0], disc_off))
keep_first = check_boundary(
(covered[0][0], covered[0][1]),
(disc_on, disc_off))
keep_last = check_boundary(
(covered[-1][0], covered[-1][1]),
(disc_on, disc_off))
if keep_first:
token_ngram = [
(covered[0][0], covered[0][1], covered[0][2])]
ngram = [covered[0][2]]
else:
token_ngram = []
ngram = []
token_ngram += [(on, off, phn) for on, off, phn in covered[1:-1]]
ngram += [phn for on, off, phn in covered[1:-1]]
if keep_last and len(covered) > 1:
token_ngram += [
(covered[-1][0], covered[-1][1], covered[-1][2])]
ngram += [covered[-1][2]]
return tuple(token_ngram), tuple(ngram) | zerospeech-tde | /zerospeech-tde-2.0.3.tar.gz/zerospeech-tde-2.0.3/tde/readers/disc_reader.py | disc_reader.py |
import os
import pandas as pd
import intervaltree
from collections import defaultdict
class Gold():
def __init__(self, vad_path=None, wrd_path=None, phn_path=None):
"""Object representing the gold.
Contains the VAD,the word alignement and the phone alignment. The
alignments can be stored as interval trees or as dictionnaries. The
interval tree of the silences can also be stored.
Attributes
----------
:param vad_path: string, path to the vad
:param_wrd_path: string, path to the word alignment
:param phn_path: string, path to the phone alignment
:param boundaries: tuples of two dicts, each dict contains the
database filename as key, and for each file,
contains the onset boundaries and offset boundaries
:param phones: a dict {fname: intervaltree} which returns the interval tree
of the gold phones for each file
:param words: a dict {fname: intervaltree} which returns the interval tree
of the gold words for each file
"""
# paths
self.vad_path = vad_path
self.wrd_path = wrd_path
self.phn_path = phn_path
# golds
self.boundaries = None
self.phones = None
self.words = None
# read alignments
self.words, _, self.ix2wrd, self.wrd2ix, self.boundaries = (
self.read_gold_intervalTree(self.wrd_path, "word"))
if "SIL" in self.wrd2ix:
print("WARNING: Word alignement contains silences, those will be counted as word by the evaluation.\n"
"You should keep them in the phone alignment but remove them from the word alignment.")
self.phones, _, self.ix2phn, self.phn2ix, _ = (
self.read_gold_intervalTree(self.phn_path, "phone"))
# self.boundaries = self.get_boundaries()
def read_gold_dict(self, gold_path):
"""Read the gold phoneme file with fields: speaker/file start end annotation
Returns a dict with the file/speaker as a key and the following
structure:
gold['speaker'] = [{'start': list(...)}, {'end': list(...), 'symbol': list(...)}]
"""
if not os.path.isfile(gold_path):
raise ValueError('{}: File Not Found'.format(gold_path))
# Read phone alignment using pandas
df = pd.read_table(
gold_path, sep=' ', header=None, encoding='utf8',
names=['file', 'start', 'end', 'symbol'])
# sort the data by file and onsets and round the onsets/offsets
df = df.sort_values(by=['file', 'start'])
df['start'] = df['start'].round(decimals=4)
df['end'] = df['end'].round(decimals=4)
# # number of phones tokens in corpus
# number_read_symbols = len(df['symbol'])
# get the lexicon and translate to as integers
symbols = list(set(df['symbol']))
symbol2ix = {v: k for k, v in enumerate(symbols)}
ix2symbols = dict((v, k) for k, v in symbol2ix.items())
df['symbol'] = df['symbol'].map(symbol2ix)
# timestamps in gold (start, end) must be in acending order for fast
# search
gold = {}
verification_num_symbols = 0
for k in df['file'].unique():
start = df[df['file'] == k]['start'].values
end = df[df['file'] == k]['end'].values
symbols = df[df['file'] == k]['symbol'].values
# check onsets/offsets are ordered
# assert not any(np.greater_equal.outer(start[:-1] - start[1:], 0)), 'start in annotation file is not odered!!!'
# assert not any(np.greater_equal.outer(end[:-1] - end[1:], 0)), 'end in annotation file is not odered!!!'
gold[k] = {
'start': list(start),
'end': list(end),
'symbol': list(symbols)}
verification_num_symbols += len(gold[k]['symbol'])
# logging.debug("%d symbolss read from %s (%d returned)", number_read_symbols,
# gold_path, verification_num_symbols)
return gold, ix2symbols, symbol2ix
def read_gold_intervalTree(self, gold_path, symbol_type=None):
'''Read the gold alignment and build an interval tree (O( log(n) )).
After that, take each found interval, search for its overlaps
(O( log(n) + m), m being the number of results found),
and check if we want to keep each interval.
Parameters
----------
- gold : the path to the gold alignment
- symbol_type: string, "word" or "phone",
if "word", don't keep the silences if some are found
if "phone", keep them and raise warning if none are found
Returns
-------
- gold: a dict {fname: intervaltree} which returns the interval tree
of the gold phones for each file
- ix2symbols: a dict that returns the symbols for each index of encoding
(to compute the ned, we assign numbers to symbols)
Raises
------
ValueError
- If the alignement is not well formated
UserWarning
- If the phone alignement does not contain silences
AssertionError
- If an interval contains an offset lower than the onset
'''
if not os.path.isfile(gold_path):
raise ValueError('{}: File Not Found'.format(gold_path))
# read the gold and create a list of tuples for each filename, then create an interval
# tree from this list of tuple.
intervals = defaultdict(list)
gold = dict()
symbols = set() # create a set of all the available symbols
transcription = dict() # create dict that returns the transcription for an interval
boundaries_up = defaultdict(set)
boundaries_down = defaultdict(set)
# keep flag to check that phone alignement contains silences
sil_flag = True
with open(gold_path, 'r') as fin:
ali = fin.readlines()
for line in ali:
try:
fname, on, off, symbol = line.strip('\n').split(' ')
except:
raise ValueError(
'format of alignement should be:\n'
'\tfilename onset offset symbol\n'
'but alignment contains wrongly formated line:\n'
'{}'.format(line))
# check timestamps are in correct order
assert float(off) > float(on), ("timestamps are not"
" correct\n {}".format(line))
# If word alignement, don't keep silences, else, keep them.
if symbol_type == "word" and symbol == "SIL":
continue
elif symbol_type == "phone" and symbol == "SIL":
sil_flag = True
transcription[(fname, float(on), float(off))] = symbol
symbols.add(symbol)
intervals[fname].append((float(on), float(off), symbol))
boundaries_up[fname].add(float(off))
boundaries_down[fname].add(float(on))
# for each filename, create an interval tree
for fname in intervals:
gold[fname] = intervaltree.IntervalTree.from_tuples(
intervals[fname])
# raise warning if phone alignment doesn't contain silences
if symbol_type == "phone" and not sil_flag:
raise UserWarning("phone alignment does not contain"
" silences, which are necessary for correct"
" evaluation.")
# create a mapping index -> symbols for the phones
symbol2ix = {v: k for k, v in enumerate(list(symbols))}
ix2symbols = dict((v, k) for k, v in symbol2ix.items())
return (gold, transcription, ix2symbols,
symbol2ix, (boundaries_up, boundaries_down))
def get_intervals(fname, on, off, gold, transcription):
""" Given a filename and an interval, retrieve the list of
covered intervals, and their transcription.
This is done using intervaltree.search, which is supposed to
work in O(log(n) + m), n being the number of intervals and m
the number of covered intervals.
Parameters
----------
fname: str, name of the speaker
on: float, onset of the interval
off: float, offset of the interval
gold: dict of intervaltree, contains all gold phones
transcription: dict of tuples, contains the transcription of each interval
"""
def overlap(a, b, interval):
ov = (min(b, interval[1]) - max(a, interval[0])) \
/ (interval[1] - interval[0])
time = min(b, interval[1]) - max(a, interval[0])
return ov, time
# search interval tree
_cov_int = gold[fname].overlap(on, off)
cov_int = set() # set of kept intervals
cov_trs = [] # retrieved transcription
# check each interval to see if we keep it or not.
# In particular, check if found interval contains
# more than 30 ms or more than 50% of phone.
for interval in _cov_int:
int_ov, time = overlap(on, off, interval)
if round(int_ov, 4) >= 0.50 or round(time, 4) >= 0.03:
cov_trs.append(
(interval[0], interval[1],
transcription[(fname, interval[0], interval[1])]))
cov_int.add((interval[0], interval[1]))
# finally, sort the transcription by onsets, because intervaltree
# doesn't necessarily return the intervals in order...
cov_trs.sort()
trs = [t for b, e, t in cov_trs]
return cov_int, trs
def get_silence_intervals(self, vad):
''' Compute interval tree of silences '''
pass | zerospeech-tde | /zerospeech-tde-2.0.3.tar.gz/zerospeech-tde-2.0.3/tde/readers/gold_reader.py | gold_reader.py |
# ZEROTEST [](https://pypi.python.org/pypi/zerotest) [](https://travis-ci.org/jjyr/zerotest)
Zerotest makes it easy to test API server, start a micro proxy, send requests, and generate test code by these behaviours.
## Install
Stable version: `pip install zerotest`
Develop version: `pip install git+https://github.com/jjyr/zerotest.git`
**zerotest require python2.7 or 3.3+**
## Quick Start
1. Start a local proxy to capture http traffic `zerotest server https://api.github.com -f octocat.data`
2. Make few requests `curl -i http://localhost:7000/users/octocat`
3. Press `C-c` to exit local proxy
4. Generate test code `zerotest generate octocat.data --ignore-all-headers > test_octocat.py`
5. Type `py.test test_octocat.py` to run test
## Usage
Type `zerotest -h` to see help message
### Server
Start local proxy server
`zerotest server http://target-endpoint.com [-f] [record_file.data]`
Type `zerotest server -h` to see help message
### Generate
Generate python test code from record data (the file generated by local proxy)
`zerotest generate [options] > test_file.py`
#### Ignore specific headers in comparison
Use option `--ignore-headers [date server ...]` or `--ignore-all-headers` to ignore headers comparison
#### Ignore specific fields
Use `--fuzzy-match` enable fuzzy matching mode, compare data schema instead the data itself,
or you can use `--ignore-fields [view_count ...]` to specific ignored fields
Type `zerotest generate -h` to see help message
### Replay
Generate test and run it with `pytest`
`zerotest replay [generate options] [--pytest] [pytest options]`
Type `zerotest replay -h` to see help message
## Develop
Export debug flag `ZEROTEST_DEBUG=true` to see verbose logs during program or test running.
## Contributors
[Contributors](https://github.com/jjyr/zerotest/graphs/contributors)
## Contribute
* Open issue if found bugs or some cool ideas
* Feel free to ask if have any questions
* Testing is very important for a test tool, commit your test file together with pull request
| zerotest | /zerotest-1.2.1.tar.gz/zerotest-1.2.1/README.md | README.md |
class AudioSource(object):
def __init__(self):
raise NotImplementedError("this is an abstract class")
def __enter__(self):
raise NotImplementedError("this is an abstract class")
def __exit__(self, exc_type, exc_value, traceback):
raise NotImplementedError("this is an abstract class")
class Microphone(AudioSource):
"""
Creates a new ``Microphone`` instance, which represents a physical microphone on the computer. Subclass of ``AudioSource``.
This will throw an ``AttributeError`` if you don't have PyAudio 0.2.9 or later installed.
If ``device_index`` is unspecified or ``None``, the default microphone is used as the audio source.
Otherwise, ``device_index`` should be the index of the device to use for audio input.
A device index is an integer between 0 and ``pyaudio.get_device_count() - 1`` (assume we have used ``import pyaudio`` beforehand) inclusive.
It represents an audio device such as a microphone or speaker.
See the `PyAudio documentation <http://people.csail.mit.edu/hubert/pyaudio/docs/>`__ for more details.
The microphone audio is recorded in chunks of ``chunk_size`` samples, at a rate of ``sample_rate`` samples per second (Hertz).
If not specified, the value of ``sample_rate`` is determined automatically from the system's microphone settings.
Higher ``sample_rate`` values result in better audio quality, but also more bandwidth (and therefore, slower recognition).
Additionally, some CPUs, such as those in older Raspberry Pi models, can't keep up if this value is too high.
Higher ``chunk_size`` values help avoid triggering on rapidly changing ambient noise, but also makes detection less sensitive.
This value, generally, should be left at its default.
"""
def __init__(self, device_index=None, sample_rate=None, chunk_size=1024):
assert device_index is None or isinstance(device_index, int), "Device index must be None or an integer"
assert sample_rate is None or (isinstance(sample_rate, int) and sample_rate > 0), "Sample rate must be None or a positive integer"
assert isinstance(chunk_size, int) and chunk_size > 0, "Chunk size must be a positive integer"
# set up PyAudio
self.pyaudio_module = self.get_pyaudio()
audio = self.pyaudio_module.PyAudio()
try:
count = audio.get_device_count() # obtain device count
if device_index is not None: # ensure device index is in range
assert 0 <= device_index < count, "Device index out of range ({} devices available; device index should be between 0 and {} inclusive)".format(count, count - 1)
if sample_rate is None: # automatically set the sample rate to the hardware's default sample rate if not specified
device_info = audio.get_device_info_by_index(device_index) if device_index is not None else audio.get_default_input_device_info()
assert isinstance(device_info.get("defaultSampleRate"), (float, int)) and device_info["defaultSampleRate"] > 0, "Invalid device info returned from PyAudio: {}".format(device_info)
sample_rate = int(device_info["defaultSampleRate"])
except:
audio.terminate()
raise
self.device_index = device_index
self.format = self.pyaudio_module.paInt16 # 16-bit int sampling
self.SAMPLE_WIDTH = self.pyaudio_module.get_sample_size(self.format) # size of each sample
self.SAMPLE_RATE = sample_rate # sampling rate in Hertz
self.CHUNK = chunk_size # number of frames stored in each buffer
self.audio = None
self.stream = None
@staticmethod
def get_pyaudio():
"""
Imports the pyaudio module and checks its version. Throws exceptions if pyaudio can't be found or a wrong version is installed
"""
try:
import pyaudio
except ImportError:
raise AttributeError("Could not find PyAudio; check installation")
from distutils.version import LooseVersion
if LooseVersion(pyaudio.__version__) < LooseVersion("0.2.9"):
raise AttributeError("PyAudio 0.2.9 or later is required (found version {})".format(pyaudio.__version__))
return pyaudio
@staticmethod
def list_microphone_names():
"""
Returns a list of the names of all available microphones. For microphones where the name can't be retrieved, the list entry contains ``None`` instead.
The index of each microphone's name is the same as its device index when creating a ``Microphone`` instance - indices in this list can be used as values of ``device_index``.
"""
audio = Microphone.get_pyaudio().PyAudio()
try:
result = []
for i in range(audio.get_device_count()):
device_info = audio.get_device_info_by_index(i)
result.append(device_info.get("name"))
finally:
audio.terminate()
return result
def __enter__(self):
assert self.stream is None, "This audio source is already inside a context manager"
self.audio = self.pyaudio_module.PyAudio()
try:
self.stream = Microphone.MicrophoneStream(
self.audio.open(
input_device_index=self.device_index, channels=1,
format=self.format, rate=self.SAMPLE_RATE, frames_per_buffer=self.CHUNK,
input=True, # stream is an input stream
)
)
except:
self.audio.terminate()
raise
return self
def __exit__(self, exc_type, exc_value, traceback):
try:
self.stream.close()
finally:
self.stream = None
self.audio.terminate()
class MicrophoneStream(object):
def __init__(self, pyaudio_stream):
self.pyaudio_stream = pyaudio_stream
def read(self, size):
return self.pyaudio_stream.read(size, exception_on_overflow=False)
def close(self):
try:
# sometimes, if the stream isn't stopped, closing the stream throws an exception
if not self.pyaudio_stream.is_stopped():
self.pyaudio_stream.stop_stream()
finally:
self.pyaudio_stream.close() | zeroth-client | /zeroth_client-0.1.2.1.tar.gz/zeroth_client-0.1.2/zeroth_client/at_mic.py | at_mic.py |
import sys
from . import at_mic as at
import argparse
import time
import threading
import sys
import urllib.parse
import queue
import json
import os
import audioop
import math
import logging
import logging.config
import yaml
from ws4py.client.threadedclient import WebSocketClient
import requests
import platform
import random
import re
import base64
RETRY_TIME_MIN = 2
RETRY_TIME_MAX = 3
logger = logging.getLogger(__name__)
def rate_limited(maxPerSecond):
minInterval = 1.0 / float(maxPerSecond)
def decorate(func):
lastTimeCalled = [0.0]
def rate_limited_function(*args,**kargs):
elapsed = time.process_time() - lastTimeCalled[0]
leftToWait = minInterval - elapsed
if leftToWait>0:
time.sleep(leftToWait)
ret = func(*args,**kargs)
lastTimeCalled[0] = time.process_time()
return ret
return rate_limited_function
return decorate
class MyClient(WebSocketClient):
def __init__(self, filename, url, pid, protocols=None, extensions=None, heartbeat_freq=None, byterate=32000, no_realtime=False,
save_adaptation_state_filename=None, send_adaptation_state_filename=None):
super(MyClient, self).__init__(url, protocols, extensions, heartbeat_freq)
self.final_hyps = []
self.filename = filename
self.byterate = byterate
self.no_realtime = no_realtime
self.final_hyp_queue = queue.Queue()
self.save_adaptation_state_filename = save_adaptation_state_filename
self.send_adaptation_state_filename = send_adaptation_state_filename
self.tik0 = 0
self.tik = 0
self.tok = 0
self.tok2 = 0
self.total_length = 0
self.pid = pid
self.platform = platform.system()
self.src_file = None
self.sessionId = None
self.count = 0
self.result_file = open('./result', 'w')
@rate_limited(4) # comment out this line to send as soon as possible
def send_data(self, data):
self.send(data, binary=True)
if self.platform is "Windows":
self.tik = time.monotonic()
else:
self.tik = time.clock_gettime(time.CLOCK_REALTIME)
if self.count == 0: self.tik0 = self.tik
self.count += 1
def send_data_norate(self, data):
self.send(data, binary=True)
if self.platform is "Windows":
self.tik = time.monotonic()
else:
self.tik = time.clock_gettime(time.CLOCK_REALTIME)
if self.count == 0: self.tik0 = self.tik
self.count += 1
def opened(self):
logger.info("Socket opened")
def send_data_to_ws():
# send adaptation state
if self.send_adaptation_state_filename is not None:
logger.info("Sending adaptation state from %s" % self.send_adaptation_state_filename)
try:
adaptation_state_props = json.load(open(self.send_adaptation_state_filename, "r"))
self.send(json.dumps(dict(adaptation_state=adaptation_state_props)))
except:
e = sys.exc_info()[0]
logger.info("Failed to send adaptation state: {}".format(e))
if self.filename != '-':
# sending audio blocks
try:
self.src_file = open(self.filename, "rb")
except FileNotFoundError as e:
logger.error(e)
raise
if self.no_realtime:
for block in iter(lambda: self.src_file.read(8000), b''):
self.send_data_norate(block)
else:
for block in iter(lambda: self.src_file.read(int(self.byterate/4)), b''):
try:
self.send_data(block)
except Exception as e:
logger.info("exception in send_data from {}".format(self.sessionId))
pass
# important: send EOS to finalize this connection
logger.info("sending EOS")
self.send("EOS")
if self.platform is "Windows":
self.tik = time.monotonic()
else:
self.tik = time.clock_gettime(time.CLOCK_REALTIME)
logger.info("Tik")
else:
try:
with at.Microphone(sample_rate=16000) as source:
#---------------------------------------------------------------------
# Sending audio chuck from microphone
# read samples from microphone stream (pyAudio)
# ex) source.stream.read(samples)
#
# need to send 32000/4=8000 byte/sec = 8000 * 8 bit/sec
# = 16 bit * 4000 samples/sec
# Parameters for end-point detection
energy_threshold = 300
dynamic_energy_adjustment_damping = 0.15
dynamic_energy_ratio = 1.5
pause_threshold = 4
pause_count = 0
nSample = int(self.byterate/8)
seconds_per_buffer = nSample / float(source.SAMPLE_RATE)
pause_buffer_count = int(math.ceil(pause_threshold / seconds_per_buffer))
logger.info("Mic. is ready, you can say something with proper volume")
for block in iter(lambda: source.stream.read(nSample), ""):
self.send_data(block)
# Energy measurement simple SAD
energy = audioop.rms(block, source.SAMPLE_WIDTH) # energy of the audio signal
damping = dynamic_energy_adjustment_damping ** seconds_per_buffer # account for different chunk sizes and rates
target_energy = energy * dynamic_energy_ratio
energy_threshold = energy_threshold * damping + target_energy * (1 - damping)
#print >> sys.stderr, "(energy = %f, threshould = %f)" %(energy, energy_threshold)
if energy > energy_threshold:
pause_count = 0
else:
pause_count += 1
if pause_count > pause_buffer_count: # end of the phrase
logger.info('silence detected')
break
# important: send EOS to finalize this connection
self.send("EOS")
except Exception as e:
logger.error("[Error] Can not sent data through websocket. Reason: %s", str(e))
self.close()
t = threading.Thread(target=send_data_to_ws)
t.start()
def received_message(self, m):
response = json.loads(str(m))
#logger.debug(response)
json_data = json.dumps(response, ensure_ascii=False) # to recognize unicode hangul
if 'objectives' in response: # NLU OUTPUT
logger.debug("NLU JSON was: {}".format(json_data))
if self.platform is "Windows":
self.tok2 = time.monotonic()
else:
self.tok2 = time.clock_gettime(time.CLOCK_REALTIME)
logger.debug("Tok2")
elif 'sessionId' in response:
self.sessionId = response['sessionId']
logger.debug("Session ID was: {}".format(response['sessionId']))
#self.tok = time.clock_gettime(time.CLOCK_REALTIME)
elif 'status' in response: # ASR OUTPUT
if response['status'] == 0:
# log JSON reponse from the server
if 'result' in response:
if 'total-length' in response:
self.total_length = response['total-length']
logger.debug("JSON was: {}".format(json_data))
if response['result']['final']:
trans = response['result']['hypotheses'][0]['transcript']
if self.platform is "Windows":
self.tok = time.monotonic()
else:
self.tok = time.clock_gettime(time.CLOCK_REALTIME)
logger.debug("Tok")
# save adaptation stat from the server
if 'adaptation_state' in response:
if self.save_adaptation_state_filename:
logger.info("Saving adaptation state to {}".format(self.save_adaptation_state_filename))
with open(self.save_adaptation_state_filename, "w") as f:
f.write(json.dumps(response['adaptation_state']))
else:
logger.info("Received error from server (status {})".format(response['status']))
if 'message' in response:
logger.info("Error message: {}".format(response['message']))
else:
logger.debug("Undefined JSON was: {}".format(json_data))
def get_full_hyp(self, timeout=60):
return self.final_hyp_queue.get(timeout)
def closed(self, code, reason=None):
logger.info("Websocket closed() called")
self.src_file.close()
if reason is not None:
logger.info("closed() is called with reason: {}".format(reason))
delay = self.tok - self.tik
tx_time = self.tik - self.tik0
logger.info("[{}] tik = {:.2f}, tok = {:.2f}, elapsed_time ASR = {:.2f}, total-length ASR = {:.2f}".format(self.pid,
self.tik, self.tok, delay, self.total_length))
logger.info("[{}] tx time = {:.2f}".format(self.pid, tx_time))
rtf = (delay + self.total_length)/self.total_length
logger.info("[{}] real-time factor = {} from {}".format(self.pid, rtf, self.filename))
if self.tok2 != 0:
delay2 = self.tok2 - self.tik
logger.info("[{}] tik = {:.2f}, tok2 = {:.2f}, elapsed_time NLU = {:.2f}, total-length NLU = {:.2f}".format(self.pid,
self.tik, self.tok2, delay2, self.total_length))
rtf = (delay2 + self.total_length)/self.total_length
logger.info("[{}] real-time factor NLU = {} from {}".format(self.pid, rtf, self.filename))
logger.info("[{}] difference between delays = {:.2f}".format(self.pid, (delay2 - delay)*1000))
self.final_hyp_queue.put(" ".join(self.final_hyps))
intro_txt="This is an example client code for Zeroth-Enterprise ASR server.\n"
intro_txt+="It takes audio stream from microphone or files, and sends to Zeroth ASR server.\n"
intro_txt+="Recognized result will be reported by JSON text as user speaks in every 0.25 ms.\n"
intro_txt+="Client can save an adaptation state reported from the server.\n"
intro_txt+="and it can send the adaptation to server before starting the next stream.\n"
def reqToken(endpoint, authinfo):
b64Val = base64.b64encode(authinfo.encode()).decode()
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'Authorization': 'Basic '+b64Val }
data = 'grant_type=client_credentials'
try:
res = requests.post(endpoint, headers = headers, data=data, verify=False)
except Exception as e:
logger.debug('http request is faild, bypass it. error = {}'.format(e))
return None
if res.status_code != 200:
logger.debug('predict_punctuation call is faild, bypass it')
return None
return json.loads(res.text)
def request(filename,uri='ws://13.125.20.108:3179/client/ws/trusted',rate='3200',model='',save_adaptation_state=None,send_adaptation_state=None,
conf_path='zeroth_client.yaml',no_realtime=None,single_mode=None,retry=None):
if filename == '-' and retry:
logging.info("for microphone input, retry mode is no-meaning")
sys.exit()
# load configuration
f_auth = False
conf = []
with open(conf_path) as f:
conf = yaml.safe_load(f)
if "logging" in conf:
logging.config.dictConfig(conf["logging"])
if "fork" in conf and conf["fork"] > 1:
import tornado.process
logging.info("Forking into %d processes" % conf["fork"])
ret = tornado.process.fork_processes(conf["fork"], 100000) # try 100000
logging.debug("return of fork_processes pid:{}, task_id:{}".format(os.getpid(), tornado.process.task_id()))
if 'auth' in conf:
f_auth = True
# build client websocket instance with uri information
uri = uri
if single_mode:
uri += '?%s' % (urllib.parse.urlencode([("single", "true")]))
else:
uri += '?%s' % (urllib.parse.urlencode([("single", "false")]))
# for authentication
if f_auth:
auth_key = reqToken(conf['auth']['endpoint'], conf['auth']['info'])
if auth_key:
access_token = auth_key['access_token']
uri += '&%s' % (urllib.parse.urlencode([("access-token", access_token)]))
uri += '&%s' % (urllib.parse.urlencode([("pos", "dvd")]))
else:
logger.info('Authentication failed')
sys.exit()
# for a specified model name
if model != "":
uri += '&%s' % (urllib.parse.urlencode([("model", model)]))
# for sending raw audio type such as uncompressed PCM or stream from microphone
# for encoded audio such as
content_type=''
if filename == '-' or filename.endswith(".raw"):
content_type = "audio/x-raw, layout=(string)interleaved, rate=(int)%d, format=(string)S16LE, channels=(int)1" %(rate/2)
uri += '&%s' % (urllib.parse.urlencode([("content-type", content_type)]))
logger.info("URI: " + uri)
logger.info("filename: " + filename)
count = 0
while True:
ws = MyClient(
filename,
uri,
os.getpid(),
byterate=rate,
no_realtime=no_realtime,
save_adaptation_state_filename=save_adaptation_state,
send_adaptation_state_filename=send_adaptation_state
)
try:
# connect websocket
ws.connect()
# if there is no respons in 60 seconds, it will return
#result = ws.get_full_hyp()
ws.run_forever()
except Exception as e:
logger.error("Couldn't connect to the server. Reason: {}".format(e))
sys.exit()
if not retry: break
if 'retry_time' in conf:
_min = float(conf['retry_time'].split(':')[0])
_max = float(conf['retry_time'].split(':')[1])
time.sleep(random.uniform(_min, _max))
else:
time.sleep(random.uniform(RETRY_TIME_MIN, RETRY_TIME_MAX))
#def main():
# parser = argparse.ArgumentParser(description=intro_txt)
# parser.add_argument('-u', '--uri', default="ws://13.125.20.108:3179/client/ws/trusted", dest="uri", help="Server websocket URI")
# #parser.add_argument('-u', '--uri', default="ws://127.0.0.1:3179/client/ws/speech", dest="uri", help="Server websocket URI")
# #parser.add_argument('-u', '--uri', default="ws://13.125.20.108:3177/client/ws/speech", dest="uri", help="Server websocket URI")
# parser.add_argument('-r', '--rate', default=32000, dest="rate", type=int,
# help="Rate in bytes/sec at which audio should be sent to the server. \
# For raw 16-bit audio it must be 2*samplerate! Set this to '-' if you don't need real-time")
# parser.add_argument('--model', dest="model", default="", help="connect to specific model")
# parser.add_argument('--save-adaptation-state', help="Save adaptation state to file")
# parser.add_argument('--send-adaptation-state', help="Send adaptation state from file")
# parser.add_argument('-c', '--conf', dest="conf", default="zeroth_client.yaml", help="configuration file (YAML)")
# parser.add_argument('--no-realtime', dest="no_realtime", action='store_true', help='flag for testing no real-time transmission')
# parser.add_argument('--single-mode', dest="single_mode", action='store_true',
# help='if this flag is true, server will finalize output with the first EPD point')
# parser.add_argument('--retry', dest="retry", action='store_true', help='flag for testing one file repeatedly')
# parser.add_argument('filename', help="Audio filename to be sent to the server. \
# Set this to '-' for using microphone stream")
# args = parser.parse_args()
# request(args.filename,args.uri,args.rate,args.model,args.save_adaptation_state,args.send_adaptation_state,
# args.conf,args.no_realtime,args.single_mode,args.retry)
#
#
#if __name__ == "__main__":
# main() | zeroth-client | /zeroth_client-0.1.2.1.tar.gz/zeroth_client-0.1.2/zeroth_client/client_module.py | client_module.py |
# zeroth-normalizer
[Project: Zeroth](https://github.com/goodatlas/zeroth)에서 한국어를 처리할 때 사용된 normalizer코드를 다른 작업에 적용할 수 있도록 외부 모듈로 분리하였습니다.
- 함수 타입 적용 및 전체 코드 클래스로 분리
# Installation
# Examples
```
원문: 대왕 대비가 대행왕이 예(睿)로써 시호를 삼도록 말했었다고 알리다
step1: 대왕 대비가 대행왕이 예로써 시호를 삼도록 말했었다고 알리다
step2: 대왕 대비가 대행왕이 예로써 시호를 삼도록 말했었다고 알리다
step3: 대왕 대비가 대행왕이 예로써 시호를 삼도록 말했었다고 알리다
step4: 대왕 대비가 대행왕이 예로써 시호를 삼도록 말했었다고 알리다
원문: 한국어 위키백과(영어: Korean Wikipedia)는 한국어로 운영되는 위키백과의 다언어판 가운데 하나로서, 2002년 10월 11일에 시작되었다.
step1: 한국어 위키백과는 한국어로 운영되는 위키백과의 다언어판 가운데 하나로서 , 2002 년 10 월 11 일에 시작되었다.
step2: 한국어 위키백과는 한국어로 운영되는 위키백과의 다언어판 가운데 하나로서, 2002 년 10 월 11 일에 시작되었다.
step3: 한국어 위키백과는 한국어로 운영되는 위키백과의 다언어판 가운데 하나로서, 2002 년 10 월 11 일에 시작되었다.
step4: 한국어 위키백과는 한국어로 운영되는 위키백과의 다언어판 가운데 하나로서 이천 이 년 10 월 11 일에 시작되었다
원문: 공식 문서에는 'Corea' 또는 'Korea'가 혼용되어 사용되었고, 1900년대 초기부터 영어권에서는 'Korea'의 사용 빈도가 높았다.
step1: 공식 문서에는 Corea 또는 Korea 가 혼용되어 사용되었고 , 1900 년대 초기부터 영어권에서는 Korea 의 사용 빈도가 높았다.
step2: 공식 문서에는 Corea 또는 Korea 가 혼용되어 사용되었고, 1900 년대 초기부터 영어권에서는 Korea 의 사용 빈도가 높았다.
step3: 공식 문서에는 Corea 또는 Korea 가 혼용되어 사용되었고, 1900 년대 초기부터 영어권에서는 Korea 의 사용 빈도가 높았다.
step4: 공식 문서에는 Corea 또는 Korea 가 혼용되어 사용되었고 천 구백 년대 초기부터 영어권에서는 Korea 의 사용 빈도가 높았다
원문: 북위 33도~38도, 동경 126~132도에 걸쳐 있어 냉대 동계 소우 기후와 온대 하우 기후, 온난 습윤 기후가 나타난다.
step1: 북위 33 도 ~ 38 도 , 동경 126 ~ 132 도에 걸쳐 있어 냉대 동계 소우 기후와 온대 하우 기후 , 온난 습윤 기후가 나타난다.
step2: 북위 33 도 ~ 38 도 , 동경 126 ~ 132 도에 걸쳐 있어 냉대 동계 소우 기후와 온대 하우 기후 , 온난 습윤 기후가 나타난다.
step3: 북위 33 도 ~ 38 도 , 동경 126 ~ 132 도에 걸쳐 있어 냉대 동계 소우 기후와 온대 하우 기후 , 온난 습윤 기후가 나타난다.
step4: 북위 33 도 ~ 38 도 동경 백 이십 육 ~ 백 삼십 이 도에 걸쳐 있어 냉대 동계 소우 기후와 온대 하우 기후 온난 습윤 기후가 나타난다
원문: 3.1운동
step1: 3.1 운동
step2: 3.1 운동
step3: 3.1 운동
step4: 삼 쩜 일 운동
원문: 평균 기온은 10 ~ 16℃이며, 가장 무더운 달인 8월은 23 ~ 36℃, 5월은 16 ~ 19℃, 10월은 11 ~ 19℃, 가장 추운 달인 1월은 -6 ~ 3℃이다.
step1: 평균 기온은 10 ~ 16 이며 , 가장 무더운 달인 8 월은 23 ~ 36 , 5 월은 16 ~ 19 , 10 월은 11 ~ 19 , 가장 추운 달인 1 월은 - 6 ~ 3 이다.
step2: 평균 기온은 10 ~ 16 이며 , 가장 무더운 달인 8 월은 23 ~ 36 , 5 월은 16 ~ 19 , 10 월은 11 ~ 19 , 가장 추운 달인 1 월은 - 6 ~ 3 이다.
step3: 평균 기온은 10 ~ 16 이며 , 가장 무더운 달인 8 월은 23 ~ 36 , 5 월은 16 ~ 19 , 10 월은 11 ~ 19 , 가장 추운 달인 1 월은 - 6 ~ 3 이다.
step4: 평균 기온은 10 ~ 16 이며 가장 무더운 달인 8 월은 23 ~ 36 5 월은 16 ~ 19 10 월은 11 ~ 19 가장 추운 달인 1 월은 - 6 ~ 3 이다
원문: 예시로서, 만약 크기 n의 모든 입력에 대한 알고리즘에 필요한 시간이 최대 (어떤 n0보다 크지 않은 모든 n에 대하여) 5n^3 + 3n의 식을 가진다면, 이 알고리즘의 점 근적 시간 복잡도는 O(n3)이라고 할 수 있다.
step1: 예시로서 , 만약 크기 n 의 모든 입력에 대한 알고리즘에 필요한 시간이 최대 5 n 3 + 3 n 의 식을 가진다면 , 이 알고리즘의 점근적 시간 복잡도는 O 이라고 할 수 있다.
step2: 예시로서 , 만약 크기 n 의 모든 입력에 대한 알고리즘에 필요한 시간이 최대 5 n 3 + 3 n 의 식을 가진다면 , 이 알고리즘의 점근적 시간 복잡도는 O 이라고 할 수 있다.
step3: 예시로서 , 만약 크기 n 의 모든 입력에 대한 알고리즘에 필요한 시간이 최대 5 n 3 + 3 n 의 식을 가진다면 , 이 알고리즘의 점근적 시간 복잡도는 O 이라고 할 수 있다.
step4: 예시로서 만약 크기 n 의 모든 입력에 대한 알고리즘에 필요한 시간이 최대 5 n 3 + 3 n 의 식을 가진다면 이 알고리즘의 점근적 시간 복잡도는 O 이라고 할 수 있다
```
| zeroth-normalizer | /zeroth_normalizer-0.1.0.tar.gz/zeroth_normalizer-0.1.0/README.md | README.md |
import re
import unicodedata
from .normalizer import step1, step2, step3, step4
#
# forked from openai/whisper
#
# non-ASCII letters that are not separated by "NFKD" normalization
ADDITIONAL_DIACRITICS = {
"œ": "oe",
"Œ": "OE",
"ø": "o",
"Ø": "O",
"æ": "ae",
"Æ": "AE",
"ß": "ss",
"ẞ": "SS",
"đ": "d",
"Đ": "D",
"ð": "d",
"Ð": "D",
"þ": "th",
"Þ": "th",
"ł": "l",
"Ł": "L",
}
def remove_symbols_and_diacritics(s: str, keep=""):
"""
Replace any other markers, symbols, and punctuations with a space,
and drop any diacritics (category 'Mn' and some manual mappings)
"""
return "".join(
c
if c in keep
else ADDITIONAL_DIACRITICS[c]
if c in ADDITIONAL_DIACRITICS
else ""
if unicodedata.category(c) == "Mn"
else " "
if unicodedata.category(c)[0] in "MSP"
else c
for c in unicodedata.normalize("NFKD", s)
)
def remove_symbols(s: str):
"""
Replace any other markers, symbols, punctuations with a space, keeping diacritics
"""
return "".join(
" " if unicodedata.category(c)[0] in "MSP" else c
for c in unicodedata.normalize("NFKC", s)
)
class ZerothKoreanNormalizer:
""" Text Normalizer for Korean Text
"""
def __init__(self, remove_diacritics: bool = False, split_letters: bool = False):
self.normalizers = [step1.normalize, step2.normalize, step3.normalize, step4.normalize]
self.clean = (
remove_symbols_and_diacritics if remove_diacritics else remove_symbols
)
self.split_letters = split_letters
def __call__(self, s: str, steps=4):
"""_summary_
Args:
s (str): input korean text
steps (int, optional): `Zeroth` script processes input string up to 4 steps.
This argument specifies how many steps it applies to input text. Defaults to 4.
Returns:
_type_: Normalized input text
"""
if steps < 1: steps = 1
if steps > 4: steps = 4
for i in range(steps):
s = self.normalizers[i](s)
s = re.sub(r'(\ )+', ' ', s).strip()
return s | zeroth-normalizer | /zeroth_normalizer-0.1.0.tar.gz/zeroth_normalizer-0.1.0/zeroth_normalizer/__init__.py | __init__.py |
import re
from . import at_unicode
def normalize(text: str) -> str:
# separator (conventions)
text = re.sub('['+re.escape(at_unicode.separators)+']','\n', text)
# remove bracked contents
text = re.sub('\([^\)]+\)', '', text)
text = re.sub('\[[^\]]+\]', '', text)
text = re.sub('【[^】]+】', '', text)
text = re.sub('\<[^\>]+\>', '', text)
# handle apostrophe
quotes = at_unicode.apostrophe + at_unicode.quatation
text = re.sub('([a-zA-Z])['+re.escape(quotes)+']([a-zA-Z])', '\\1<apostrophe>\\2', text)
text = re.sub('['+re.escape(quotes)+']', '', text)
text = re.sub('<apostrophe>', '\'',text)
# replace various percent into one
text = re.sub('['+re.escape(at_unicode.percents)+']', '%' ,text)
# miscellaneous
text = re.sub('%p', '% 포인트', text)
text = re.sub('±', '플러스 마이너스', text)
text = re.sub('[a-zA-Z0-9_.]+@[a-zA-Z0-9_.]*',' ', text) # delete e-mail
# remove chinese and japanese characters
text = re.sub(at_unicode.chinese, '', text)
text = re.sub(at_unicode.japanese, '', text)
# segment b/w Hangul and non-Hangul
text = re.sub(r"([가-힣])([^ 가-힣])",r"\1 \2", text)
text = re.sub(r"([^ 가-힣])([가-힣])",r"\1 \2", text)
# segment b/w numerices and non-numerics
text = re.sub('([0-9])([^ \.\,0-9])', '\\1 \\2', text)
text = re.sub('([^ \+\-\.\,0-9])([0-9])', '\\1 \\2', text)
# Leave only valid characters
text = re.sub(at_unicode.invalids_chars, ' ', text)
# remove repeated valid symbols
text = re.sub('(['+re.escape(at_unicode.valids)+'])+', '\\1', text)
# make valid symbols, except puctuations, as a unique word
symbols = at_unicode.measureUnits + at_unicode.percents + at_unicode.currencies + at_unicode.userDefines
regexEpr = r"([" + re.escape(symbols) + "])"
text = re.sub(regexEpr, ' \\1 ', text)
# remove spaces before puctuations
#text = re.sub('\s+(['+re.escape(at_unicode.puctuations)+'])', '\\1', text)
# segment sentences
text = re.sub('([가-힣])\s*\.', '\\1.\n', text)
# segment sentences 2
text = re.sub('([가-힣])\s*([\.?!])\s*([^가-힣]+ )', '\\1\\2\n\\3', text)
# segment sentences 3
# / (not readable)
text = re.sub('([가-힣])\s+[/=:]\s+([가-힣])', '\\1\n\\2', text)
text = re.sub('([a-zA-Z])\s+[/=:]\s+([가-힣])', '\\1\n\\2', text)
text = re.sub('([가-힣])\s+[/=:]\s+([a-zA-Z])', '\\1\n\\2', text)
text = re.sub(r'(\ )+', ' ', text).strip()
return text | zeroth-normalizer | /zeroth_normalizer-0.1.0.tar.gz/zeroth_normalizer-0.1.0/zeroth_normalizer/normalizer/step1.py | step1.py |
#
# Copyright 2017 Atlas Guide (Author : Lucas Jo)
#
# Apache 2.0
#
import unicodedata
import re
measureUnits = "".join(chr(i) for i in range(0xffff) if i >= 0x3380 and i<=0x33DD)
percents = ''.join(chr(i) for i in range(0xffff) \
if unicodedata.category(chr(i)) == 'Po' and re.search('PERCENT', unicodedata.name(chr(i))))
currencies = "".join(chr(i) for i in range(0xffff) if unicodedata.category(chr(i)) == 'Sc')
quatation = ''.join(chr(i) for i in range(0xffff) if unicodedata.category(chr(i)) in ['Pc', 'Pd', 'Pe', 'Pf', 'Pi',
'Po', 'Ps'] and re.search('QUOTATION', unicodedata.name(chr(i))))
apostrophe = ''.join(chr(i) for i in range(0xffff) if unicodedata.category(chr(i)) in ['Pc', 'Pd', 'Pe', 'Pf', 'Pi',
'Po', 'Ps'] and re.search('APOSTROPHE', unicodedata.name(chr(i))))
userDefines = "-~+=%/:;"
puctuations = ".,?!'"
triangles = ''.join(chr(i) for i in range(0xffff) if unicodedata.category(chr(i)) == 'So'
and re.search(' TRIANGLE\\b', unicodedata.name(chr(i))))
circles = ''.join(chr(i) for i in range(0xffff) if unicodedata.category(chr(i)) == 'So'
and re.search(' CIRCLE\\b', unicodedata.name(chr(i))))
squares = ''.join(chr(i) for i in range(0xffff) if unicodedata.category(chr(i)) == 'So'
and re.search(' SQUARE\\b', unicodedata.name(chr(i))))
separators = triangles + circles + squares
valids = measureUnits + percents + currencies + userDefines + puctuations
invalids_chars = r"[^ \n가-힣0-9a-zA-Z" + re.escape(valids) + r"]+"
valids_chars = r"[ \n가-힣0-9a-zA-Z" + re.escape(valids) + r"]+"
chinese = re.compile(u'[⺀-⺙⺛-⻳⼀-⿕々〇〡-〩〸-〺〻㐀-䶵一-鿃豈-鶴侮-頻並-龎]', re.UNICODE)
#3000-303F : punctuation
#3040-309F : hiragana
#30A0-30FF : katakana
#FF00-FFEF : Full-width roman + half-width katakana
#4E00-9FAF : Common and uncommon kanji
japanese = re.compile(u'[\u3040-\u309f\u30a0-\u30ff\uff00-\uffef\u4e00-\u9faf]', re.UNICODE)
userDefines_pronun={
'-': ['마이너스', '에서', '다시'],
'~': ['에서', '부터'],
'+': ['더하기', '플러스'],
#'=': ['는', '은', '이콜'],
'%': ['퍼센트', '프로', '퍼센티지'],
'/': ['나누기', '퍼', '슬래쉬'],
}
measureUnits_pronun = {
'㎀': ['피코 암페어'],
'㎁': ['나노 암페어'],
'㎂': ['마이크로 암페어'],
'㎃': ['밀리 암페어'],
'㎄': ['킬로 암페어'],
'㎅': ['킬로 바이트'],
'㎆': ['메가 바이트'],
'㎇': ['기가 바이트'],
'㎈': ['칼로리'],
'㎉': ['킬로 칼로리'],
'㎊': ['피코 페럿'],
'㎋': ['나노 페럿'],
'㎌': ['마이크로 페럿'],
'㎍': ['마이크로 그램'],
'㎎': ['밀리 그램'],
'㎏': ['킬로 그램'],
'㎐': ['헤르츠'],
'㎑': ['킬로 헤르츠'],
'㎒': ['메가 헤르츠'],
'㎓': ['기가 헤르츠'],
'㎔': ['킬로 헤르츠'],
'㎕': ['마이크로 리터'],
'㎖': ['밀리 리터'],
'㎗': ['데시 리터'],
'㎘': ['킬로 리터'],
'㎙': ['펨토 미터'],
'㎚': ['나노 미터'],
'㎛': ['마이크로 미터'],
'㎜': ['밀리 미터'],
'㎝': ['센티 미터'],
'㎞': ['킬로 미터'],
'㎟': ['제곱 밀리 미터'],
'㎠': ['제곱 센티 미터'],
'㎡': ['제곱 미터'],
'㎢': ['제곱 킬로 미터'],
'㎣': ['세 제곱 밀리 미터'],
'㎤': ['세 제곱 센티 미터'],
'㎥': ['세 제곱 미터'],
'㎦': ['세 제곱 킬로 미터'],
'㎧': ['미터 퍼 쎄크'],
'㎨': ['미터 퍼 제곱 쎄그'],
'㎩': ['파스칼'],
'㎪': ['킬로 파스칼'],
'㎫': ['메가 파스칼'],
'㎬': ['기가 파스칼'],
'㎭': ['라디안'],
'㎮': ['라디안 퍼 쎄크'],
'㎯': ['라디안 퍼 제곱 쎄크'],
'㎰': ['피코 쎄크'],
'㎱': ['나노 쎄크'],
'㎲': ['마이크로 쎄크'],
'㎳': ['밀리 쎄크'],
'㎴': ['피코 볼트'],
'㎵': ['나노 볼트'],
'㎶': ['마이크로 볼트'],
'㎷': ['밀리 볼트'],
'㎸': ['킬로 볼트'],
'㎹': ['메가 볼트'],
'㎺': ['피코 와트'],
'㎻': ['나노 와트'],
'㎼': ['마이크로 와트'],
'㎽': ['밀리 와트'],
'㎾': ['킬로 와트'],
'㎿': ['메가 와트'],
'㏀': ['킬로 옴'],
'㏁': ['메가 옴'],
'㏂': ['오전'],
'㏃': ['베크렐'],
'㏄': ['씨씨'],
'㏅': ['칸델라'],
'㏆': ['쿨롱 퍼 킬로 그램'],
'㏇': ['씨 오'],
'㏈': ['데시 벨'],
'㏉': ['그레이'],
'㏊': ['헥타르'],
'㏋': ['마력'],
'㏌': ['인치'],
'㏍': ['킬로 카이저'],
'㏎': ['킬로 미터'],
'㏏': ['킬로 톤'],
'㏐': ['루멘'],
'㏑': ['로그'],
'㏒': ['로그'],
'㏓': ['럭스'],
'㏔': ['밀리 바'],
'㏕': ['밀'],
'㏖': ['몰'],
'㏗': ['피 에이치'],
'㏘': ['오후'],
'㏙': ['피 피 엠'],
'㏚': ['피 알'],
'㏛': ['스테라디안'],
'㏜': ['시버트'],
'㏝': ['웨버']
}
currencies_pronun = {
'$': ['달러'],
'¢': ['센트'],
'£': ['파운드'],
'¤': ['화폐 표시'],
'¥': ['엔'],
'֏': ['드람'],
'؋': ['아프가니'],
'৲': ['루피 마크'],
'৳': ['루피 싸인'],
'৻': ['간다'],
'૱': ['루피'],
'௹': ['루피'],
'฿': ['바트'],
'៛': ['리엘'],
'₠': ['유로'],
'₡': ['콜론'],
'₢': ['크루제이로'],
'₣': ['프랑'],
'₤': ['리라'],
'₥': ['밀'],
'₦': ['나이라'],
'₧': ['페세타'],
'₨': ['루피'],
'₩': ['원'],
'₪': ['세겔'],
'₫': ['동'],
'€': ['유로'],
'₭': ['킵'],
'₮': ['터그릭'],
'₯': ['드라크마'],
'₰': ['페니'],
'₱': ['페소'],
'₲': ['과라니'],
'₳': ['오스트랄'],
'₴': ['리브니아'],
'₵': ['세디'],
'₶': ['토르노'],
'₷': ['스페스밀로'],
'₸': ['텐지'],
'₹': ['루피'],
'₺': ['리라'],
'₻': ['노르딕'],
'₼': ['마네'],
'₽': ['루블'],
'₾': ['라리'],
'꠸': ['루피'],
'﷼': ['리알'],
'﹩': ['달러'],
'$': ['달러'],
'¢': ['센트'],
'£': ['파운드'],
'¥': ['엔'],
'₩': ['원']
}
# TBD
# extracted from the corpus
validChars={
'℃': ['도', '도 섭씨', '도 씨'],
'㈜': ['주', '주식회사'],
'ρ': ['로'],
'μ': ['뮤', '마이크로'],
'µ': ['마이크로', '뮤'],
'W': ['와트'],
} | zeroth-normalizer | /zeroth_normalizer-0.1.0.tar.gz/zeroth_normalizer-0.1.0/zeroth_normalizer/normalizer/at_unicode.py | at_unicode.py |
import re
from re import Match
import sys
MAX_NUMBER = 9999999999999999
readTextUnit = [['','만','억','조'], '십', '백', '천']
readText = ['영','일','이','삼','사','오','육','칠','팔','구', '']
readNumber = ['공','일','이','삼','사','오','육','칠','팔','구', '']
readCountUnit = ['','열','스물','서른','마흔','쉰','예순', '일흔', '여든','아흔']
readCount = [['','하나','둘','셋','넷','다섯','여섯','일곱','여덟','아홉'],
['','한','두','세','네','다섯','여섯','일곱','여덟','아홉']]
COUNT_UNIT = [
'배','채','개','시','말','벌','축','톳','손','살','죽','쾌','닢','병','건','속','주', \
'망','포','피','미','팩','통','줄','봉','단','판','모','척','번','잔','장','쌍','명', \
'마리','가지','방울','자루','켤레','사람','박스','묶음','보루','봉지','포기','시루', \
]
def number2readNumber(numbers: str):
result=[]
for number in reversed(numbers):
idxNum = int(number)
rNum = readNumber[idxNum]
#rNum = "["+readNumber[idxNum]+"]"
result.insert(0, rNum)
return " ".join(result)
# 숫자를 서수방식으로 읽기
# 1~99 사이 숫자만 지원
# Option
# 0: 뒤에 단위가 없을 때 (default)
# 1: 뒤에 단위가 있는 경우 사용
def number2readCount(numbers: str, option=1):
# numbers expected as a text variable
cnt=0
result=[]
if int(numbers) > 99:
sys.exit('Out-of-range: read count range is 1~99')
for number in reversed(numbers):
idxNum = int(number)
if cnt == 0:
res=readCount[option][idxNum]
else:
res=readCountUnit[idxNum]
#print(number, res)
if res:
#res = '['+res+']'
result.insert(0, res)
cnt +=1
return result
#return " ".join(result)
# 숫자를 기수방식을 읽기
# 최대숫자 9999,9999,9999,9999
# option1
# 0: 모두 기수방식으로 읽음 (default)
# 1: 백자리 아래를 서수로 읽음
# option2
# number2readCount option 참조
#
def number2readText(numbers: str, option1=0, option2=0):
# numbers expected as a text variable
cnt=0
result=[]
# pre-processing
numbers = numbers.lstrip("0")
if numbers == '':
numbers = "0"
if int(numbers) > MAX_NUMBER:
return number2readNumber(numbers)
for number in reversed(numbers):
idxNum = int(number)
prec = cnt%4
if prec == 0:
# for every 4th location
rNum = readText[idxNum]
rLoc = ''
if cnt != 0: # 1's location ignore
rLoc = readTextUnit[0][cnt//4]
#rLoc = "{"+readTextUnit[0][cnt//4]+"}"
res = rNum +' '+ rLoc
else:
rNum = readText[idxNum] # 일, 이 ...
rLoc = readTextUnit[cnt%4] # 천, 백 ...
#rLoc = "("+ readTextUnit[cnt%4] +")" # 천, 백 ...
res = rNum + rLoc
# Exceptions for '영'
if rNum in ['영', '[영]']:
if len(numbers) != 1:
#if rLoc in ['{만}', '{억}', '{조}']:
if rLoc in ['만', '억', '조']:
cLoc=len(numbers)-cnt
if numbers[cLoc-4:cLoc] == '0000':
res=''
else:
res=rLoc
else:
res=''
else:
res=rNum
# Exceptions for '일'
if rNum == '일':
if cnt not in [12, 8, 4, 0]:
res=rLoc
else:
if cnt == 4 and len(numbers) == 5:
res=rLoc
#print(res, number, prec, cnt)
if res:
if prec != 0:
#res = '['+res+']'
res = res
result.insert(0, res)
cnt +=1
if option1:
rStr = number2readCount(numbers[-2:], option2)
result[-2:]=rStr
# 조/억/만 단위 띄어쓰기
outext = " ".join(result)
return outext
#outList = list(outext)
#if '조' in outList:
# outList.insert(outList.index('조')+1,' ')
#if '억' in outList:
# outList.insert(outList.index('억')+1,' ')
#if '만' in outList:
# outList.insert(outList.index('만')+1,' ')
#return "".join(outList)
def convNumType3(match: Match[str]) -> str:
#regex: '(\-?)(\d+)(\.)(\d+)'
text = '['
if match.group(1):
text += '마이너스 '
g2 = number2readText(match.group(2), 0, 0)
text += g2
g4 = number2readNumber(match.group(4))
text += ' 쩜|. '+g4
return text + ']'
def convNumType4(match: Match[str]) -> str:
#regex: '([\d\.]+)'
text = '['
tNum=match.group(0).split('.')
for elem in tNum:
text += number2readNumber(elem)+" "
return text + ']'
def convNumType5(match: Match[str]) -> str:
opt = 0
for elem in COUNT_UNIT:
# pitfall '미' is in '밀리미터' ...
if elem in match.group(7):
opt = 1
text=' '
if match.group(1):
g1 = number2readText(match.group(1), opt, opt)
text += g1 + ' '
if int(match.group(5)) - int(match.group(1)) > 1:
text += " 에서"
g5 = number2readText(match.group(5), opt, opt)
text += ' ' + g5 + ' ' + match.group(7)
return text
def convNumType6(match: Match[str]) -> str:
opt = 0
for elem in COUNT_UNIT:
if elem in match.group(3):
opt = 1
text=' '
g1 = number2readText(match.group(1), opt, opt)
text += g1 + ' '
if match.group(3):
text += match.group(3)
return text
def convNumType9(match: Match[str]) -> str:
text=' '
#g1 = number2readText(match.group(1), 0, 0)
g1 = number2readNumber(match.group(1))
text += g1 + ' '
return text
def convNum_1(match: Match[str]) -> str:
matchedTxt = match.group(0)
tlist = matchedTxt.split('.')
text = ''
if len(tlist) == 3 and len(str(int(tlist[0]))) == 4:
text += number2readText(tlist[0].strip(), 0, 0)+" 년 "
text += number2readText(tlist[1].strip(), 0, 0)+" 월 "
text += number2readText(tlist[2].strip(), 0, 0)+" 일"
else:
for elem in tlist[:-1]:
text += number2readText(elem.strip(), 0, 0)+' 쩜 '
text += number2readText(tlist[-1].strip(), 0, 0)
return text
def convNum_2(match: Match[str]) -> str:
matchedTxt = match.group(1)
tlist = matchedTxt.split('.')
text = ''
for elem in tlist[:-1]:
text += number2readText(elem.strip(), 0, 0)+' 쩜 '
#text += number2readText(tlist[-1].strip(), 0, 0)
text += number2readNumber(tlist[-1].strip())
return text+" "+match.group(2)
def convNum_3(match: Match[str]) -> str:
matchedTxt = match.group(0)
tlist = matchedTxt.split('.')
text = ''
for elem in tlist[:-1]:
text += number2readText(elem.strip(), 0, 0)+' [쩜] '
text += number2readNumber(tlist[-1].strip())
return text
def convNum_4(match: Match[str]) -> str:
matchedTxt = match.group(0)
tlist = matchedTxt.split('-')
text = ''
for elem in tlist[:-1]:
text += number2readText(elem.strip(), 0, 0)+' '
text += number2readText(tlist[-1].strip(), 0, 0)
return text
def convNum_5(match: Match[str]) -> str:
matchedTxt = match.group(1)
return number2readText(matchedTxt, 0, 0)+" "
def convNum_6(match: Match[str]) -> str:
matchedTxt = match.group(1)
return " [쩜] "+number2readNumber(matchedTxt)
# could be a number with count-unit, leave it to lexicon dictionary
def convNum_7(match: Match[str]) -> str:
return "["+match.group(1)+"]"+match.group(2)
def convNum_8(match: Match[str]) -> str:
matchedTxt = match.group(1)
text = number2readText(matchedTxt, 0, 0)+" "
#if match.group(2):
# text += match.group(2)
return text
def convNumType8(match: Match[str]) -> str:
num = match.group(1)
tNum = num.split(',')
num = "".join(tNum)
return " "+num+" "
def normalize(text: str) -> str:
#-----------
# numbers with ,: 123,456 --> 123456
text = re.sub('([^ 0-9]),([^ 0-9])', '\\1, \\2', text)
text = re.sub('([0-9]),([^ 0-9])', '\\1, \\2', text)
text = re.sub('([^ 0-9]),([0-9])', '\\1, \\2', text)
#text = re.sub('(?=.*[0-9].*)([0-9,]+)', convNumType8, text)
text = re.sub('\s([0-9][,0-9]{3,}[0-9])\s', convNumType8, text)
#text = re.sub(',', ' ', text)
# numbers with '.'
text = re.sub('\d+\.\s*\d+(\.\s*\d+)+', convNum_1,text) # 2016.1.2 or 1.2.1
text = re.sub('(\d+\.\d+)([^ 0-9A-Za-z]+)', convNum_2,text) # 1.23% [일] 쩜 [이] [삼]
text = re.sub('\d+\.\d+', convNum_3,text) # 1.23 [일] [쩜] [이] [삼], 3.1 운동
#text = re.sub('(\d+)\.', convNum_5,text) # 1.
#text = re.sub('\.', ' ', text)
text = re.sub('\s\.([0-9]+)', convNum_6,text) # .234
# numbers possively with count-unit (수량사)
# leave it as a numeric
# ex. 배추 1 박스 --> 배추 [1] 박스
text = re.sub('\\b(\d{1,2})(\s*[^ \]0-9]+)', convNum_7, text)
# segment (just for sure)
text = re.sub('(\S)\[', '\\1 [', text)
text = re.sub('\](\S)', '] \\1', text)
## convert all numeric into korean Numbers if there is no surrounding brackets
#words=text.split()
#for i in range(len(words)):
# if words[i][0] != '[' and words[i][-1] != ']':
# words[i] = re.sub('(\d+)(\S*)', convNum_8 , words[i])
#text= ' '.join(words)+'\n'
text = re.sub('(\\b\d{3,}\\b)', convNum_8 , text)
## remove brackets and . ,
##text = re.sub('[\[\]\.,]', '', text)
text = re.sub('[\[\]]', '', text)
text = re.sub('[\.\,\'\?\!]',' ', text)
# segment sentences
#text = re.sub('\s+(['+re.escape(at_unicode.puctuations)+'])', '\\1', text)
#text = re.sub('([가-힣])\s*\.', '\\1.\n', text)
#text = re.sub('([가-힣])\s*([\.?!])\s*([^가-힣]+ )', '\\1\\2\n\\3', text)
# remove repeated characters
text = re.sub('(.)\\1{4,}', '\\1\\1\\1', text)
text = re.sub(r'(\ )+', ' ', text).strip()
return text | zeroth-normalizer | /zeroth_normalizer-0.1.0.tar.gz/zeroth_normalizer-0.1.0/zeroth_normalizer/normalizer/step4.py | step4.py |
import requests
key = ""
def authenticate(input_key):
global key
key = input_key
# def get_stats():
# global key
# endpoint = "https://zt-rest-api-rmkp2vbpqq-uc.a.run.app/get_stats"
# headers = {"Authorization": "Bearer " + key}
# return requests.get(endpoint, headers=headers).json()
def get_stats(model_name=""):
global key
if model_name == "":
endpoint = "https://zt-rest-api-rmkp2vbpqq-uc.a.run.app/get_stats"
headers = {"Authorization": "Bearer " + key}
return requests.get(endpoint, headers=headers).json()
else:
if len(model_name.split("_")) == 3:
endpoint = "https://zt-rest-api-rmkp2vbpqq-uc.a.run.app/get/live_stats/" + model_name
headers = {"Authorization": "Bearer " + key}
return requests.get(endpoint, headers=headers).json()
else:
endpoint = "https://zt-rest-api-rmkp2vbpqq-uc.a.run.app/get_stats/" + model_name
headers = {"Authorization": "Bearer " + key}
return requests.get(endpoint, headers=headers).json()
def get_forecast(model_name=""):
global key
if model_name == "":
endpoint = "https://zt-rest-api-rmkp2vbpqq-uc.a.run.app/get_strategies"
headers = {"Authorization": "Bearer " + key}
return requests.get(endpoint, headers=headers).json()
else:
if len(model_name.split("_")) == 3:
endpoint = "https://zt-rest-api-rmkp2vbpqq-uc.a.run.app/get/live_strategies/" + model_name
headers = {"Authorization": "Bearer " + key}
return requests.get(endpoint, headers=headers).json()
else:
endpoint = "https://zt-rest-api-rmkp2vbpqq-uc.a.run.app/get_strategy/"+model_name
headers = {"Authorization": "Bearer " + key}
return requests.get(endpoint, headers=headers).json()
def get_ledger(model_name=""):
global key
if model_name != "":
endpoint = "https://zt-rest-api-rmkp2vbpqq-uc.a.run.app/" + str(model_name)
headers = {"Authorization": "Bearer " + key}
return requests.get(endpoint, headers=headers).json()
else:
return "Pass model name for ledger"
def get_historical_forecasts(model_name=""):
global key
if model_name != "":
endpoint = "https://zt-rest-api-rmkp2vbpqq-uc.a.run.app/" + str(model_name)
headers = {"Authorization": "Bearer " + key}
return requests.get(endpoint, headers=headers).json()
else:
return "Pass model name for getting historical forecasts" | zerotheorem-python | /zerotheorem_python-3.1-py3-none-any.whl/zerotheorem/__init__.py | __init__.py |
[](https://pypi.python.org/pypi/zerotk.clikit)
[](https://travis-ci.org/zerotk/clikit)
[](https://coveralls.io/github/zerotk/clikit)
# zerotk.clikit
A modern Command Line Interface library, making use of techniques such as dependency injection to
create a solid foundation for command line utilities and applications development.
## Example
This is the hello world example with steroids.
```python
from zerotk.clikit.app import App
import sys
app = App('hello')
@app
def greeting(console_, person, cumpliment='Hello', exclamation=False):
'''
Greetings from clikit.
:param person: Who to say it to.
:param cumpliment: What to say.
:param exclamation: Uses exclamatino point instead of a period.
'''
puntuation = '!' if exclamation else '.'
console_.Print('%(cumpliment)s, %(person)s%(puntuation)s' % locals())
if __name__ == '__main__':
sys.exit(app.Main())
```
We'll have:
```bash
$ hello.py
Usage:
hello <subcommand> [options]
Commands:
greeting Greetings from clikit.
$ hello.py greeting
ERROR: Too few arguments.
Greetings from clikit.
Usage:
greeting <person> [--cumpliment=Hello],[--exclamation]
Parameters:
person Who to say it to.
Options:
--cumpliment What to say. [default: Hello]
--exclamation Uses exclamatino point instead of a period.
$ hello.py greeting world
Hello, world.
$ hello.py greeting planet --cumpliment=Hi --exclamation
Hi, planet!
```
| zerotk.clikit | /zerotk.clikit-0.2.0.tar.gz/zerotk.clikit-0.2.0/README.md | README.md |
from __future__ import unicode_literals
'''
Module for string manipulation functions
'''
import six
def dedent(text, ignore_first_linebreak=True, ignore_last_linebreak=True):
"""
Heavily inspired by textwrap.dedent, with a few changes (as of python 2.7)
- No longer transforming all-whitespace lines into ''
- Options to ignore first and last linebreaks of `text`.
The last option is particularly useful because of ESSS coding standards.
For example, using the default textwrap.dedent to create a 3-line string would look like this:
textwrap.dedent(''' line1
line2
line3'''
)
With these options, you can create a better looking code with:
dedent(
'''
line1
line2
line3
'''
)
:param unicode text:
Text to be dedented (see examples above)
:param bool ignore_first_linebreak:
If True, blank characters (\r\n\t ) up to the first '\n' is ignored
:param bool ignore_last_linebreak:
If True, black characters (\r\n\t ) after the last '\n' is ignored
Original docs:
Remove any common leading whitespace from every line in `text`.
This can be used to make triple-quoted strings line up with the left edge of the display,
while still presenting them in the source code in indented form.
Note that tabs and spaces are both treated as whitespace, but they are not equal: the lines
" hello" and "\thello" are considered to have no common leading whitespace. (This
behaviour is new in Python 2.5; older versions of this module incorrectly expanded tabs
before searching for common leading whitespace.)
"""
if ignore_first_linebreak and '\n' in text:
first, others = text.split('\n', 1)
if first.strip('\n\r\t ') == '':
text = others
if ignore_last_linebreak and '\n' in text:
others, last = text.rsplit('\n', 1)
if last.strip('\n\r\t ') == '':
text = others
import re
_leading_whitespace_re = re.compile('(^[ ]*)(?:[^ \n])', re.MULTILINE)
# Look for the longest leading string of spaces and tabs common to
# all non-empty lines.
margin = None
indents = _leading_whitespace_re.findall(text)
for indent in indents:
if margin is None:
margin = indent
# Current line more deeply indented than previous winner:
# no change (previous winner is still on top).
elif indent.startswith(margin):
pass
# Current line consistent with and no deeper than previous winner:
# it's the new winner.
elif margin.startswith(indent):
margin = indent
if margin:
text = re.sub(r'(?m)^' + margin, '', text)
return text
def indent(text, indent_=1, indentation=' '):
"""
Indents multiple lines of text.
:param list(unicode)|unicode text:
The text to apply the indentation.
:param int indent_:
Number of indentations to add. Defaults to 1.
:param unicode indentation:
The text used as indentation. Defaults to 4 spaces.
:return unicode:
Returns the text with applied indentation.
"""
indentation = indent_ * indentation
lines = text
if isinstance(lines, six.text_type):
append_eol = lines.endswith('\n')
lines = lines.splitlines()
else:
append_eol = True
result = []
for i in lines:
if i.strip():
result.append(indentation + i)
else:
result.append(i)
if result:
result = '\n'.join(result)
if append_eol:
result += '\n'
else:
result = ''
return result
def safe_split(s, sep, maxsplit=None, default='', reversed=False):
"""
Perform a string split granting the size of the resulting list.
:param unicode s: The input string.
:param unicode sep: The separator.
:param int maxsplit: The max number of splits. The len of the resulting len is granted to be maxsplit + 1
:param default: The default value for filled values in the result.
:return list(unicode):
Returns a list with fixed size of maxsplit + 1.
"""
# NOTE: Can't import "string" module for string.split/string.rsplit because of module name
# clashing with this module.
if reversed:
split = lambda s,*args: s.rsplit(*args)
else:
split = lambda s,*args: s.split(*args)
if maxsplit is None:
result = split(s, sep)
else:
result = split(s, sep, maxsplit)
result_len = maxsplit + 1
diff_len = result_len - len(result)
if diff_len > 0:
defaults = [default] * diff_len
if reversed:
result = defaults + result
else:
result = result + defaults
return result
def format_it(iterable, format_expr="'%s'"):
"""
Formats an iterable into a string by applying format_expr to each item.
The resulting string is equivalent to stringifying a list, but unicode
items won't have the prefix 'u'.
Ex:
a = u'My Item'
b = [a]
FormatIterable(b) # outputs "['a']", rather than "[u'a']"
:param object iterable:
Any iterable object.
:param unicode format_expr:
The format expression to use on each item. Defaults to "'%s'" so that the
string representation of each item is encapsulated in single quotes.
"""
items = ', '.join((format_expr % (item,) for item in iterable))
return '[%s]' % (items,)
def match_any(text, regexes):
"""
Returns whether the given text matches any of the given regular expressions.
:param unicode text: The text to check for match.
:param list(unicode) regexes: List of regular expressions.
:return boolean:
Return True if the given text matches any of the given regular expressions.
"""
import re
for i_regex in regexes:
if re.match(i_regex, text) != None:
return True
return False | zerotk.clikit | /zerotk.clikit-0.2.0.tar.gz/zerotk.clikit-0.2.0/zerotk/text.py | text.py |
from __future__ import unicode_literals
'''
The Console is a class that makes it easier to generate colored output.
'''
import os
import re
import sys
import six
#===================================================================================================
# CreateColorMap
#===================================================================================================
def _CreateColorMap():
'''
Creates a map from color to ESC color codes.
'''
codes = {}
_attrs = {
'reset': '39;49;00m',
'bold': '01m',
'faint': '02m',
'standout': '03m',
'underline': '04m',
'blink': '05m',
}
for _name, _value in _attrs.items():
codes[_name] = '\x1b[' + _value
_colors = [
('black', 'darkgray'),
('darkred', 'red'),
('darkgreen', 'green'),
('brown', 'yellow'),
('darkblue', 'blue'),
('purple', 'fuchsia'),
('turqoise', 'teal'),
('lightgray', 'white'),
]
for i, (dark, light) in enumerate(_colors):
codes[dark] = '\x1b[%im' % (i + 30)
codes[light] = '\x1b[%i;01m' % (i + 30)
return codes
#===================================================================================================
# Console
#===================================================================================================
class Console(object):
'''
Verbosity
---------
Controls how much output is generated. It accepts three values:
0: Quiet: Messages in this level are printed even if verbosity is quiet.
1: Normal: Messages in this level are printed only of verbosity is normal or higher.
2: Verbose: Messages in this level are only printed when asked, that is, setting verbosity
to the max level.
Print calls with vebosity parameter equal or inferior to the console verbosity value will print
their messages, otherwise the message is skipped.
The shortcut methods PrintVerbose and PrintQuiet defaults verbosity to the appropriate level.
Color
-----
If true prints using colors on the stdout and stderr. On Windows we convert all ANSI color codes
to appropriate calls using @colorama@ library.
'''
def __init__(self, verbosity=1, color=False, colorama=None, stdout=sys.stdout, stdin=sys.stdin):
'''
:param bool|None color:
Define whether to generate colored output or not.
If None try to guess whether to use color based on the output capabilities.
:param bool colorama:
Enables/disbales the use of colorama.
None: Tries to use it if available.
True: Tries to use and fails if not available
False: Do not use it.
This is necessary because colorama is incompatible with pytest.
'''
self.__stderr = stdout
self.__stdin = stdin
self.__stdout = stdout
if colorama is True:
import colorama
self.colorama = True
elif colorama is None:
try:
import colorama # @UnusedImport
except ImportError:
self.colorama = False
else:
self.colorama = True
else:
self.colorama = False
self._verbosity = None
self._SetVerbosity(verbosity)
self._color = None
self._SetColor(color)
def SetStdOut(self, stdout):
'''
Configure output streams, both for normal (stdout) and PrintError (stderr) outputs.
:param stdout: A file-like object.
'''
self.__stdout = stdout
self.__stderr = stdout
def _CreateMarkupRe(self):
'''
Creates markup regular-expression.
Defined in a function because it uses COLOR_CODES constants.
'''
return re.compile(r'<(%s|/)>' % ('|'.join(self.COLOR_CODES)))
def _SetVerbosity(self, value):
'''
Verbosity property set method.
'''
if value not in (0, 1, 2):
raise ValueError('console.verbosity must be 0, 1 or 2')
self._verbosity = value
def _GetVerbosity(self):
'''
Verbosity property get method.
'''
return self._verbosity
@classmethod
def _AutoColor(cls):
'''
Try to guess color value (bool) from the environment:
* sys.stdout.isatty
* $COLORTERM
* $TERM
'''
# From Sphinx's console.py
if not hasattr(sys.stdout, 'isatty') or not sys.stdout.isatty():
return False
if 'COLORTERM' in os.environ:
return True
term = os.environ.get('TERM', 'dumb').lower()
if term in ('xterm', 'linux') or 'color' in term:
return True
return False
def _SetColor(self, value):
'''
Color property set method.
'''
if value is None:
self._color = self._AutoColor()
else:
self._color = bool(value)
def _GetColor(self):
'''
Color property get method.
'''
return self._color
verbosity = property(_GetVerbosity, _SetVerbosity)
color = property(_GetColor, _SetColor)
MARKUP_RE = property(_CreateMarkupRe)
COLOR_CODES = _CreateColorMap()
DEFAULT_VERBOSITY = 1
DEFAULT_NEWLINES = 1
DEFAULT_INDENT = 0
def Print(self, message='', verbosity=DEFAULT_VERBOSITY, newlines=DEFAULT_NEWLINES, indent_=DEFAULT_INDENT, stderr=False):
'''
Prints a message to the output.
:param unicode|list(unicode) message: the message to print.
:param int verbosity:
The miminum verbosity value for this message to appear. See verbosity property.
:param int newlines:
The number of new-lines to append to the message.
:param int indent_:
The message indentation.
:param bool stderr:
By default we print to the standar output. If this flag is set we print to the standard
error.
'''
if self.verbosity < verbosity:
return
if stderr:
stream = self.__stderr
else:
stream = self.__stdout
if isinstance(message, (list, tuple)):
message = '\n'.join(map(six.text_type, message))
else:
message = six.text_type(message)
if self.color:
# `out` holds the stream of text we'll eventually output
# `stack` is the currently applied color codes
# `remaining` holds the still-unparsed part of message
# `match` is any <colorcode> or </> construct
out = ''
stack = []
remaining = message
match = self.MARKUP_RE.search(remaining)
while match:
# `token` is either 'colorcode' or '/'.
token = match.groups()[0]
out += remaining[:match.start()]
remaining = remaining[match.end():]
if token == '/':
if stack:
# Pull the last style off the stack.
# Emit a reset then reapply the remaining styles.
stack.pop()
out += self.COLOR_CODES['reset']
for name in stack:
out += self.COLOR_CODES[name]
else:
out += self.COLOR_CODES[token]
stack.append(token)
match = self.MARKUP_RE.search(remaining)
# Get any remaining text that doesn't have markup and
# reset the terminal if there are any unclosed color tags.
out += remaining
if stack:
out += self.COLOR_CODES['reset']
else:
# No color, just strip that information from the message
out = self.MARKUP_RE.sub('', message)
# Support for colors on Windows
assert isinstance(indent_, int)
assert isinstance(newlines, int)
from zerotk.text import indent
text = indent(out, indent_=indent_) + ('\n' * newlines)
# Encode text to the target (console) encoding.
if six.PY2 and isinstance(text, six.text_type) and (hasattr(stream, 'encoding') and stream.encoding is None):
text = text.encode('ascii', 'replace')
if self.color and self.colorama:
from colorama import AnsiToWin32
ansi_to_win32 = AnsiToWin32(stream, strip=False, convert=True)
ansi_to_win32.write(text)
else:
stream.write(text)
stream.flush()
def PrintError(self, message, newlines=1, indent=0):
'''
Shortcut to Print using stderr.
'''
message = six.text_type(message)
return self.Print(message, verbosity=0, newlines=newlines, indent_=indent, stderr=True)
def PrintQuiet(self, message='', newlines=1, indent=0):
'''
Shortcut to Print using 'quiet' verbosity.
'''
return self.Print(message, verbosity=0, newlines=newlines, indent_=indent)
def PrintVerbose(self, message='', newlines=1, indent=0):
'''
Shortcut to Print using 'verbose' verbosity.
'''
return self.Print(message, verbosity=2, newlines=newlines, indent_=indent)
def Ask(self, message, hidden_input=False):
'''
Ask the users for a value.
:param unicode message: Message to print before asking for the value
:param bool hidden_input: If True, user input will not be shown in command line (useful for passwords)
:return unicode: A value entered by the user.
'''
self.PrintQuiet(message + ' ', newlines=0)
if hidden_input:
import getpass
return getpass.getpass(prompt='', stream=self.__stdin)
else:
return self.__stdin.readline().strip()
def AskPassword(self):
'''
Ask the users for a password. User input will not be shown in command line.
:return unicode: A value entered by the user.
'''
return self.Ask('Password:', hidden_input=True)
def Progress(self, message, verbosity=DEFAULT_VERBOSITY, indent=DEFAULT_INDENT, format_='%s: '):
'''
Starts a progree message, without the eol.
Use one of the "finishers" methods to finish the progress:
* ProgressOk
* ProgressError
'''
self.Print(format_ % message, verbosity=verbosity, newlines=0, indent_=indent)
def ProgressOk(self, message='OK', verbosity=DEFAULT_VERBOSITY, indent=0, format_='<green>%s</>'):
'''
Ends a progress "successfully" with a message
:param unicode message: Message to finish the progress. Default to "OK"
'''
self.Print(format_ % message, verbosity=verbosity, indent_=indent)
def ProgressError(self, message, verbosity=DEFAULT_VERBOSITY, indent=0, format_='<red>%s</>'):
'''
Ends a progress "with failure" message
:param unicode message: (Error) message to finish the progress.
'''
self.Print(format_ % message, verbosity=verbosity, indent_=indent)
def ProgressWarning(self, message, verbosity=DEFAULT_VERBOSITY, indent=0, format_='<yellow>%s</>'):
'''
Ends a progress "with a warning" message
:param unicode message: (Warning) message to finish the progress.
'''
self.Print(format_ % message, verbosity=verbosity, indent_=indent)
def Item(self, message, verbosity=DEFAULT_VERBOSITY, newlines=1, indent=0, stderr=False, format_='- %s'):
'''
Prints an item.
:param unicode message:
:param int verbosity:
:param int newlines:
:param int indent:
:paran bool stderr:
'''
return self.Print(format_ % message, verbosity, newlines, indent, stderr)
#===================================================================================================
# BufferedConsole
#===================================================================================================
class BufferedConsole(Console):
'''
The same as console, but defaults output to a buffer.
'''
def __init__(self, verbosity=1, color=None, stdin=None):
'''
:param (1|2|3) verbosity:
:param bool color:
'''
from six import StringIO
self.__buffer = StringIO()
Console.__init__(self, verbosity=verbosity, color=color, colorama=False, stdout=self.__buffer, stdin=stdin)
def GetOutput(self):
'''
Returns the current value of the output buffer and resets it.
'''
from six import StringIO
result = self.__buffer.getvalue()
self.__buffer = StringIO()
self.SetStdOut(self.__buffer)
return result | zerotk.clikit | /zerotk.clikit-0.2.0.tar.gz/zerotk.clikit-0.2.0/zerotk/clikit/console.py | console.py |
from __future__ import unicode_literals
from .console import BufferedConsole
from collections import OrderedDict
import re
import six
#===================================================================================================
# InvalidFixture
#===================================================================================================
class InvalidFixture(KeyError):
'''
Exception raised when an unknown argument is added to a command-function.
'''
pass
#===================================================================================================
# MissingArgument
#===================================================================================================
class MissingArgument(KeyError):
'''
Exception raised when an unknown argument is added to a command-function.
'''
pass
#===================================================================================================
# Command
#===================================================================================================
class Command:
'''
Holds the information for a command, directly associated with a function that implements it.
'''
class DEFAULT(object):
'''
Placeholder for positional arguments with default value.
Usage:
def Hello(self, console_, p1=DEFAULT('default'):
console_.Print(p1)
>hello
default
>hello other
other
'''
def __init__(self, default):
'''
:param object default:
The default value for the positional argument.
'''
self.default = default
def CreateArg(self, name):
'''
Creates Arg instance for our positional with default argument.
:param unicode name:
The name of the argument.
'''
return Command.Arg(name, Command.Arg.ARG_TYPE_POSITIONAL, self.default)
class Arg:
'''
Holds meta-information about the associated *function parameter*.
'''
NO_DEFAULT = object()
ARG_TYPE_FIXTURE = "F"
ARG_TYPE_OPTION = "O"
ARG_TYPE_POSITIONAL = 'P'
ARG_TYPE_TRAIL = "T"
def __init__(self, name, arg_type, default=NO_DEFAULT):
'''
:param unicode name:
The argument name.
:param ARG_TYPE_XXX arg_type:
The type of the argument in CliKit scope.
See ARG_TYPE_XXX constants.
:param object default:
The default value for this argument.
'''
self.name = name
self.arg_type = arg_type
self.default = default
self.description = '(no description)'
self.short_description = '(no description)'
# For the command line, all options with underscores are replaced by hyphens.
# argparse handles this and replaces back to underscore when setting option values
if arg_type == self.ARG_TYPE_OPTION:
self.argparse_name = name.replace('_', '-')
else:
self.argparse_name = name
def __str__(self):
if self.arg_type == self.ARG_TYPE_TRAIL:
return '*' + self.argparse_name
elif any(map(lambda x: self.default is x, (Command.Arg.NO_DEFAULT, True, False))):
return self.argparse_name
elif self.default is None:
return '%s=VALUE' % self.argparse_name
else:
return '%s=%s' % (self.argparse_name, self.default)
def __repr__(self):
'''
Representation for debug purposes.
'''
return '<Arg %s>' % self.__str__()
def ConfigureArgumentParser(self, parser):
'''
Configures the given parser with an argument matching the information in this class.
:param parser: argparse.ArgumentParser
'''
if self.arg_type == self.ARG_TYPE_FIXTURE:
return
if self.arg_type == self.ARG_TYPE_TRAIL:
parser.add_argument(self.argparse_name, nargs='*')
return
if self.arg_type == self.ARG_TYPE_OPTION:
if isinstance(self.default, bool):
# Boolean arguments have a special treatment, since they create a 'store_true'
# parameters type that has no arguments. For example, instead of passing
# --param=True in the command line, using --param should have the same effect
# Boolean options with default=True make no sense to the command line, since
# either setting or not setting them would lead to the same effect.
assert self.default is not True, 'Can\'t handle boolean options with default=True'
parser.add_argument('--%s' % self.argparse_name, action='store_true', default=self.default)
else:
# All other parameter types work as usual and must receive a value
parser.add_argument('--%s' % self.argparse_name, default=self.default)
return
if self.arg_type is self.ARG_TYPE_POSITIONAL:
if self.default is self.NO_DEFAULT:
parser.add_argument(self.argparse_name)
else:
parser.add_argument(self.argparse_name, nargs='?', default=self.default)
return
raise TypeError('Unknown arg_type==%r' % self.arg_type)
def __init__(self, func, names=None):
'''
:param <function> func:
A function to wrap as a command.
:param None|unicode|list(unicode) names:
A list of names for the command.
By default uses the function name converted to "command style".
If not None, uses only the names from this argument, ignoring the function name.
'''
import six
self.func = func
if names is None:
self.names = [func.__name__] # default to function name
elif isinstance(names, six.text_type):
self.names = [names] # a single name
else:
self.names = names # already a list
# Meta-info from function inspection
args, trail, self.kwargs, defaults = self._ParseFunctionArguments(self.func)
# Holds a dict, mapping the arg name to an Arg instance. (See Arg class)
self.args = OrderedDict()
first_default = len(args) - len(defaults)
for i, i_arg in enumerate(args):
if i_arg.endswith('_'):
self.args[i_arg] = self.Arg(i_arg, self.Arg.ARG_TYPE_FIXTURE)
elif i < first_default:
self.args[i_arg] = self.Arg(i_arg, self.Arg.ARG_TYPE_POSITIONAL)
else:
default = defaults[i - first_default]
if isinstance(default, Command.DEFAULT):
self.args[i_arg] = default.CreateArg(i_arg)
elif default is True:
# I couldn't find a reasonable way to handle bool args with default=True since
# Passing --option or not would have the same result, therefore, these args
# cannot be used in clikit commands.
raise RuntimeError(
"Clikit commands are not allowed to have " + \
"boolean parameters that default to True."
)
else:
self.args[i_arg] = self.Arg(i_arg, self.Arg.ARG_TYPE_OPTION, default)
# Adds trail (*args) to the list of arguments.
# - Note that these arguments have a asterisk prefix.
if trail is not None:
self.args[trail] = self.Arg(trail, self.Arg.ARG_TYPE_TRAIL)
# Meta-info from
description, long_description, arg_descriptions = self._ParseDocString(self.func.__doc__ or '')
self.description = description or '(no description)'
self.long_description = long_description or '(no description)'
for i_arg, i_description in six.iteritems(arg_descriptions):
try:
self.args[i_arg].description = i_description
except KeyError as e:
raise RuntimeError('%s: argument not found for documentation entry.' % six.text_type(e))
def _ParseFunctionArguments(self, func):
'''
Parses function arguments returning meta information about it.
:return tuple:
[0]: args: The list with the name of all the function arguments.
[1]: trail?
[2]: kwargs: if the function is using it, otherwise None.
[3]: defaults: The defaults value for the argument (if given any)
'''
import inspect
args, trail, kwargs, defaults = inspect.getargspec(func)
defaults = defaults or []
return args, trail, kwargs, defaults
PARAM_RE = re.compile(':param (.*):(.*)$')
def _ParseDocString(self, docstring):
'''
Parses the (function) docstring for the general and arguments descriptions.
:param docstring: A well formed docstring of a function.
:rtype: tuple(unicode, unicode, list(unicode))
:returns:
Returns the function description (doc's first line) and the description of each
argument (sphinx doc style).
'''
# States:
# 0: Starting to process
# 1: Loaded short_description
# 2: Loaded long_description
state = 0
short_description = ''
long_description = []
arg_descriptions = {}
lines = docstring.split('\n')
for i_line in lines:
i_line = i_line.strip()
if state == 0:
if not i_line:
continue
short_description = i_line
state = 1
m = self.PARAM_RE.match(i_line)
if m:
state = 2
arg, doc = m.groups()
arg_descriptions[arg.strip()] = doc.strip()
elif state == 1:
long_description.append(i_line)
continue
long_description = '\n'.join(long_description)
long_description = long_description.rstrip('\n')
return short_description, long_description, arg_descriptions
def FormatHelp(self):
'''
Format help for this command.
:return unicode:
'''
console = BufferedConsole()
console.Print('Usage:')
positionals = [i for i in self.args.values() if i.arg_type in (self.Arg.ARG_TYPE_POSITIONAL, self.Arg.ARG_TYPE_TRAIL)]
optionals = [i for i in self.args.values() if i.arg_type == self.Arg.ARG_TYPE_OPTION]
console.Print('%s %s %s' % (
','.join(self.names),
' '.join(['<%s>' % i for i in positionals]),
','.join(['[--%s]' % i for i in optionals]),
), indent_=1, newlines=2)
console.Print('Parameters:')
for i in positionals:
console.Print('<teal>%s</> %s' % (i.argparse_name, i.description), indent_=1)
console.Print()
console.Print('Options:')
for i in optionals:
if any(map(lambda x: i.default is x, (Command.Arg.NO_DEFAULT, None, True, False))):
console.Print('--%s %s' % (i.argparse_name, i.description), indent_=1)
else:
console.Print('--%s %s [default: %s]' % (i.argparse_name, i.description, i.default), indent_=1)
return console.GetOutput()
def ConfigureArgumentParser(self, parser):
'''
Configures the given parser with all arguments of this command.
:param parser: argparse.ArgumentParser
'''
for i_arg in six.itervalues(self.args):
i_arg.ConfigureArgumentParser(parser)
def Call(self, fixtures, argd):
'''
Executes the function filling the fixtures and options parameters.
:param dict(unicode : tuple(callable, callable)) fixtures:
Map of fixtures to pass to the function as requested.
:param argd:
Map of option values as passed by the user in the command line.
:return:
Returns the command function result.
'''
args = []
finalizers = []
for i_arg in six.itervalues(self.args):
if i_arg.arg_type == i_arg.ARG_TYPE_FIXTURE:
try:
fixture, finalizer = fixtures[i_arg.name]
except KeyError as exception:
raise InvalidFixture(six.text_type(exception))
args.append(fixture())
finalizers.append(finalizer)
continue
if i_arg.arg_type == i_arg.ARG_TYPE_TRAIL:
args += argd.get(i_arg.name, ())
continue
if i_arg.arg_type == i_arg.ARG_TYPE_POSITIONAL:
arg = argd.get(i_arg.name, i_arg.default)
if arg is self.Arg.NO_DEFAULT:
raise TypeError(i_arg.name)
else:
args.append(arg)
continue
if i_arg.arg_type == i_arg.ARG_TYPE_OPTION:
arg = argd.get(i_arg.name, i_arg.default)
args.append(arg)
continue
result = self.func(*args)
for i_finalizer in finalizers:
i_finalizer()
return result
def __call__(self, *args, **kwargs):
return self.func(*args, **kwargs) | zerotk.clikit | /zerotk.clikit-0.2.0.tar.gz/zerotk.clikit-0.2.0/zerotk/clikit/command.py | command.py |
from __future__ import unicode_literals
import argparse
import sys
import six
from six.moves.configparser import SafeConfigParser
from zerotk.text import dedent
from .command import Command
#===================================================================================================
# Exceptions
#===================================================================================================
class InvalidCommand(KeyError):
'''
Exception raised when an unknown command is requested for execution.
'''
pass
class UnknownApp(RuntimeError):
'''
Exception raised when trying to perform a TestCall with an unknown app.
'''
def __init__(self, app, apps):
RuntimeError.__init__(self, 'Unknown app "%s". Valid apps are: %s' % (app, ', '.join(apps)))
#===================================================================================================
# ConsolePlugin
#===================================================================================================
class ConsolePlugin():
'''
Options and fixtures for console.
Note that all Apps have a console, this plugin does not add the console per se, but only the options and fixtures
associated with the console.
'''
def __init__(self, console):
self.__console = console
def ConfigureOptions(self, parser):
'''
Implements IClikitPlugin.ConfigureOptions
'''
parser.add_argument(
'-v',
'--verbose',
dest='console_verbosity',
action='store_const',
const=2,
default=1,
help='Emit verbose information'
),
parser.add_argument(
'-q',
'--quiet',
dest='console_verbosity',
action='store_const',
const=0,
default=1,
help='Emit only errors'
),
parser.add_argument(
'--no-color',
dest='console_color',
action='store_false',
default=None,
help='Do not colorize output'
)
def HandleOptions(self, opts):
'''
Implements IClikitPlugin.HandleOptions
'''
self.__console.verbosity = opts['console_verbosity']
if opts['console_color'] is not None:
self.__console.color = opts['console_color']
def GetFixtures(self):
'''
Implements IClikitPlugin.GetFixtures
'''
return {
'console_' : self.__console
}
#===================================================================================================
# ConfPlugin
#===================================================================================================
class ConfPlugin():
'''
Adds global configuration fixture to App.
'''
def __init__(self, name, conf_defaults=None, conf_filename=None):
'''
:param unicode name:
The application name, used to deduce the configuration filename.
:param dict conf_defaults:
Default values for configuration.
This is a dictionary of dictionaries. The outer dictionary has the configuration groups
as keys. The inner dictionary maps names to values inside a group.
:param unicode conf_filename:
The configuration filename. If None generates a default name.
'''
self.__name = name
self.conf_defaults = conf_defaults or {}
self.conf_filename = conf_filename or '~/.%(name)s.conf'
def ConfigureOptions(self, parser):
'''
Implements IClikitPlugin.ConfigureOptions
'''
return
def HandleOptions(self, opts):
'''
Implements IClikitPlugin.HandleOptions
'''
pass
def GetFixtures(self):
'''
Implements IClikitPlugin.GetFixtures
'''
class MyConfigParser(SafeConfigParser):
'''
Adds:
* Filename, so it can "Save" the configuration file.
'''
def __init__(self, filename):
SafeConfigParser.__init__(self)
self.filename = filename
self.read(self.filename)
def Get(self, section, name):
'''
Returns a value from a section/name.
'''
return self.get(section, name)
def Set(self, section, name, value):
'''
Sets a value on a section.
'''
return self.set(section, name, value)
def Save(self):
'''
Saves the configuration file.
'''
self.write(file(self.filename, 'w'))
def GetConfFilename():
'''
Returns the full configuration file expanding users (~) and names (%(name)s).
'''
from zerotk.easyfs import ExpandUser
return ExpandUser(self.conf_filename % {'name' : self.__name})
def CreateConf():
'''
Creates the configuration file applying the defaults values from self.conf_default.
'''
filename = GetConfFilename()
conf = MyConfigParser(filename)
# Set the defaults in the object with the values from conf_default.
for section, options in six.iteritems(self.conf_defaults):
if not conf.has_section(section):
conf.add_section(section)
for name, value in six.iteritems(options):
if not conf.has_option(section, name):
conf.set(section, name, six.text_type(value))
return conf
return {
'conf_' : CreateConf(),
}
#===================================================================================================
# MyArgumentParser
#===================================================================================================
class TooFewArgumentError(RuntimeError):
pass
class UnrecognizedArgumentsError(RuntimeError):
def __init__(self, arguments):
self.arguments = arguments
RuntimeError.__init__(self)
class MyArgumentParser(argparse.ArgumentParser):
def error(self, message):
'''
Overrides original implementation to avoid printing stuff on errors.
All help and printing is done by clikit. "No soup for you", argparse.
'''
if message == 'too few arguments':
raise TooFewArgumentError()
if message.startswith('unrecognized arguments: '):
raise UnrecognizedArgumentsError(message[24:])
#===================================================================================================
# App
#===================================================================================================
class App(object):
'''
Command Line Interface Application.
'''
# Use DEFAULT for positional arguments with default values. see Command.DEFAULT.
DEFAULT = Command.DEFAULT
def __init__(
self,
name,
description='',
color=True,
colorama=None,
conf_defaults=None,
conf_filename=None,
buffered_console=False,
):
from .console import BufferedConsole, Console
self.__name = name
self.description = description
self.__commands = []
self.__custom_fixtures = {}
if buffered_console:
self.console = BufferedConsole(color=color)
else:
self.console = Console(color=color, colorama=colorama)
self.plugins = {
'conf' : ConfPlugin(self.__name, conf_defaults, conf_filename),
}
def __call__(self, func=None, **kwargs):
'''
Implement the decorator behavior for App.
There are two use cases:
Case 1:
@app(a=1, b=2)
def foo()
Case 2:
@app
def foo()
:param callable func:
Case 1: In this case, func is None.
Case 2: In this case, func is the decorated function.
:return:
Case 1: Returns a replacement for the function.
Case 2: Returns a "functor", which in turn returns a replacement for the function.
'''
if func is None:
def Decorator(func):
'''
In "Case 1" we return a simple callable that registers the function then returns it
unchanged.
'''
return self.Add(func, **kwargs)
return Decorator
else:
return self.Add(func)
def Add(
self,
func,
name=None,
alias=None,
):
'''
Adds a function as a subcommand to the application.
:param <funcion> func: The function to add.
:param unicode name: The name of the command. If not given (None) uses the function name.
:param list(unicode) alias: A list of valid aliases for the same command.
:return Command:
Command instance for the given function.
'''
def _GetNames(func, alias):
'''
Returns a list of names considering the function and all aliases.
:param funcion func:
:param list(unicode) alias:
'''
import six
result = [self.ConvertToCommandName(name or func.__name__)]
if alias is None:
alias = []
elif isinstance(alias, six.string_types):
alias = [alias]
else:
alias = list(alias)
result.extend(alias)
return result
assert not isinstance(func, Command), 'App.Add must receive a function/method, not a Command.'
names = _GetNames(func, alias)
command = Command(func, names)
# Make sure none of the existing commands share a name.
all_names = self.ListAllCommandNames()
for i_name in command.names:
if i_name in all_names:
command = self.GetCommandByName(i_name)
raise ValueError(
'Command name %s from %s.%s conflicts with name defined in %s.%s' %
(
i_name,
func.__module__, func.__name__,
command.func.__module__, command.func.__name__)
)
self.__commands.append(command)
return command
def Fixture(self, func=None, name=None):
'''
This is a decorator that registers a function as a custom fixture.
Once registered, a command can request the fixture by adding its name as a parameter.
'''
def _AddFixture(name, func):
name = self.ConvertToFixtureName(name or func.__name__)
self.__custom_fixtures[name] = func
if func is None:
def Decorator(func):
_AddFixture(name, func)
return func
return Decorator
else:
_AddFixture(name, func)
return func
@staticmethod
def ConvertToCommandName(func_name):
'''
Converts a function name to a command name:
- lower-case
- dashes separates words
The command name is used as the argument from the command line.
Ex.
CreateDb -> create-db
:param unicode func_name:
The function name, using camel cases standard.
:return unicode:
'''
import re
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1-\2', func_name)
return re.sub('([a-z0-9])([A-Z])', r'\1-\2', s1).lower()
@staticmethod
def ConvertToFixtureName(func_name):
'''
Converts a function name to a fixture name:
- lower-case
- underscores separates words
- ends with an underscore
The fixture name is used as a parameter of a Command function.
Ex.
MyDb -> my_db_
:param unicode func_name:
The function name, using camel cases standard.
:return unicode:
'''
import re
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', func_name)
result = re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
if not result.endswith('_'):
result += '_'
return result
def GetCommandByName(self, name):
'''
Returns a command instance from the given __name.
:param unicode __name:
:return self._Command:
'''
for j_command in self.__commands:
if name in j_command.names:
return j_command
raise InvalidCommand(name)
def ListAllCommandNames(self):
'''
Lists all commands names, including all aliases.
:return list(unicode):
'''
result = []
for j_command in self.__commands:
result += j_command.names
return result
def GetFixtures(self, argv):
'''
:return dict:
Returns a dictionary mapping each available fixture to its implementation callable.
'''
def GetFixtureAndFinalizer(func):
'''
Handles fixture function with yield.
Returns the fixture and finalizer callables.
'''
import inspect
if inspect.isgeneratorfunction(func):
func_iter = func()
next = getattr(func_iter, "__next__", None)
if next is None:
next = getattr(func_iter, "next")
result = next
def finalizer():
try:
next()
except StopIteration:
pass
else:
raise RuntimeError("Yield fixture function has more than one 'yield'")
else:
result = func
finalizer = lambda:None
return result, finalizer
result = {
'argv_' : (lambda:argv, lambda:None),
}
for i_fixture_name, i_fixture_func in six.iteritems(self.__custom_fixtures):
fixture, finalizer = GetFixtureAndFinalizer(i_fixture_func)
result[i_fixture_name] = (fixture, finalizer)
for i_plugin in six.itervalues(self.plugins):
result.update(
{
i : (lambda:j, lambda:None)
for (i,j) in six.iteritems(i_plugin.GetFixtures())
}
)
return result
RETCODE_OK = 0
RETCODE_ERROR = 1
def Main(self, argv=None):
'''
Entry point for the commands execution.
'''
self.plugins['console'] = ConsolePlugin(self.console)
if argv is None:
argv = sys.argv[1:]
parser = self.CreateArgumentParser()
opts, args = parser.parse_known_args(argv)
# Give plugins change to handle options
for i_plugin in six.itervalues(self.plugins):
i_plugin.HandleOptions(opts.__dict__)
# Print help for the available commands
if not args:
self.PrintHelp()
return self.RETCODE_OK
cmd, args = args[0], args[1:]
try:
command = self.GetCommandByName(cmd)
# Print help for the specific command
if opts.help:
self.PrintHelp(command)
return self.RETCODE_OK
# Configure parser with command specific parameters/options
command.ConfigureArgumentParser(parser)
# Parse parameters/options
try:
command_opts = parser.parse_args(args)
fixtures = self.GetFixtures(argv)
result = command.Call(fixtures, command_opts.__dict__)
if result is None:
result = self.RETCODE_OK
return result
except TooFewArgumentError:
self.console.PrintError('<red>ERROR: Too few arguments.</>', newlines=2)
self.PrintHelp(command)
return self.RETCODE_ERROR
except UnrecognizedArgumentsError as e:
self.console.PrintError('<red>ERROR: Unrecognized arguments: %s</>' % e.arguments, newlines=2)
self.PrintHelp(command)
return self.RETCODE_ERROR
except InvalidCommand as exception:
self.console.PrintError('<red>ERROR: Unknown command %s</>' % six.text_type(exception))
self.PrintHelp()
return self.RETCODE_ERROR
def CreateArgumentParser(self):
'''
Create a argument parser adding options from all plugins (ConfigureOptions)
'''
r_parser = MyArgumentParser(
prog=self.__name,
add_help=False,
)
r_parser.add_argument('--help', action='store_true', help='Help about a command')
for i_plugin in six.itervalues(self.plugins):
i_plugin.ConfigureOptions(r_parser)
return r_parser
def PrintHelp(self, command=None):
'''
Print help for all registered commands or an specific one.
:param Command command: A command to print help or None to print the application help.
'''
if command is None:
self.console.PrintQuiet()
self.console.PrintQuiet('Usage:')
self.console.PrintQuiet('%s <subcommand> [options]' % self.__name, indent=1)
self.console.PrintQuiet()
self.console.PrintQuiet('Commands:')
# Collect command names and description
commands = []
for i_command in self.__commands:
command_names = [i for i in i_command.names]
width = sum(map(len, command_names)) + (2 * (len(command_names) - 1))
if self.console.color:
command_names = ['<teal>%s</>' % i for i in command_names]
commands.append((width, ', '.join(command_names), i_command.description))
# Prints in columns
max_width = max([i[0] for i in commands])
for i_width, i_names, i_description in commands:
spaces = ' ' * ((max_width - i_width) + 3)
self.console.PrintQuiet('%s%s%s' % (i_names, spaces, i_description), indent=1)
else:
self.console.PrintQuiet(command.long_description, newlines=2)
self.console.Print(command.FormatHelp())
def ExecuteCommand(self, cmd, *args, **kwargs):
'''
Executes a command using normal parameters.
:param unicode cmd:
The name of a previously registered command to execute.
:param *args:
Arguments passed to the command function "as is".
:param *kwargs:
Keyword arguments passed to the command function "as is".
TODO: BEN-23: Handle fixture on clikit.app.App.ExecuteCommand.
This is not handling fixtures. It ALWAYS passes console as the first parameter.
'''
from .console import BufferedConsole
command = self.GetCommandByName(cmd)
console = BufferedConsole()
retcode = command(console, *args, **kwargs)
if retcode is None:
retcode = self.RETCODE_OK
return retcode, console.GetOutput()
def TestScript(self, script, input_prefix='>'):
'''
Executes a test script, containing command calls (prefixed by ">") and expected results.
Example:
app = App('ii')
app = TestScript(dedent(
"""
> ii list
- alpha
- bravo
"""
)
:param unicode string:
A multi-line string with command calls and expected results.
Consider the following syntax rules:
- Lines starting with '>' are command execution (command);
- Lines starting with "###" are ignored;
- Everything else is expected output of the previous "command";
- Use [retcode=X] syntax to check for non-zero return from a command.
'''
def Execute(cmd, expected_output, expected_retcode):
obtained_retcode, obtained = self.TestCall(cmd)
obtained_string = obtained.rstrip('\n') + '\n'
expected_string = expected_output.rstrip('\n') + '\n'
assert obtained_string == expected_string
assert expected_retcode == obtained_retcode, dedent(
'''
>>> %(cmd)s
Command finished with return code "%(obtained_retcode)s", was expecting "%(expected_retcode)s"
Use ">>>my_command [retcode=X]" syntax to define the expected return code.
''' % locals()
)
def GetExpectedReturnCode(input_line):
'''
Find the return code we expect from this command.
Expected return code must be in format '[retcode=999]'
If not specified, we assume that the expected retcode is 0
e.g.
>>>DoSomethingThatFails [retcode=1]
>>>DoSomethingOk [retcode=0]
:param unicode input_line:
'''
import re
pattern = '\[retcode=(\d+)\]'
match = re.search(pattern, input_line)
if match:
expected_retcode = int(match.groups()[0])
else:
expected_retcode = 0
return re.sub(pattern, '', input_line), expected_retcode
cmd = None
expected_output = ''
expected_retcode = 0
script = dedent(script)
for i_line in script.splitlines():
if i_line.startswith('###'):
continue
elif i_line.startswith(input_prefix):
if cmd is not None:
Execute(cmd, expected_output, expected_retcode)
expected_output = ''
cmd = i_line[len(input_prefix):]
cmd, expected_retcode = GetExpectedReturnCode(cmd)
else:
expected_output += i_line + '\n'
if cmd is not None:
Execute(cmd, expected_output, expected_retcode)
def TestCall(self, cmd_line, extra_apps={}):
'''
Executes the given command line for test purposes.
Example:
app = App('ii')
retcode, output = app.TestCall('ii list')
:param unicode cmd_line:
A command line to execute.
The first parameter must be the app name as declared in this App instance constructor.
:param dict(unicode : App):
A list of extra-apps available for execution.
By default only this App instance is available for executing in the command line. With
this parameter one can add others utilitarian apps for testing.
:return tuple(int, unicode):
Returns the command return code and output.
'''
from .console import BufferedConsole
from zerotk.pushpop import PushPopAttr
import shlex
apps = {
self.__name : self,
}
apps.update(extra_apps)
with PushPopAttr(self, 'console', BufferedConsole()):
cmd_line = shlex.split(cmd_line)
app_name, cmd_line = cmd_line[0], cmd_line[1:]
app = apps.get(app_name)
if app is None:
raise UnknownApp(app_name, apps.keys())
retcode = app.Main(cmd_line)
return retcode, self.console.GetOutput() | zerotk.clikit | /zerotk.clikit-0.2.0.tar.gz/zerotk.clikit-0.2.0/zerotk/clikit/app.py | app.py |
from __future__ import unicode_literals
#===================================================================================================
# UnknownPlatformError
#===================================================================================================
class UnknownPlatformError(RuntimeError):
def __init__(self, platform):
self.platform = platform
RuntimeError.__init__(self, 'Unknown platform "%s".' % platform)
#===================================================================================================
# NotImplementedProtocol
#===================================================================================================
class NotImplementedProtocol(RuntimeError):
def __init__(self, protocol):
RuntimeError.__init__(self, "Function can't handle protocol '%s'." % protocol)
self.protocol = protocol
#===================================================================================================
# NotImplementedForRemotePathError
#===================================================================================================
class NotImplementedForRemotePathError(NotImplementedError):
def __init__(self):
NotImplementedError.__init__(self, 'Function not implemented for remote paths.')
#===================================================================================================
# FileError
#===================================================================================================
class FileError(RuntimeError):
def __init__(self, filename):
self.filename = filename
RuntimeError.__init__(self, self.GetMessage(filename))
def GetMessage(self, filename):
raise NotImplementedError()
#===================================================================================================
# FileNotFoundError
#===================================================================================================
class FileNotFoundError(FileError):
def GetMessage(self, filename):
return 'File "%s" not found.' % filename
#===================================================================================================
# CantOpenFileThroughProxyError
#===================================================================================================
class CantOpenFileThroughProxyError(FileError):
def GetMessage(self, filename):
return 'Can\'t open file "%s" through a proxy.' % filename
#===================================================================================================
# DirectoryNotFoundError
#===================================================================================================
class DirectoryNotFoundError(FileError):
def GetMessage(self, directory):
return 'Directory "%s" not found.' % directory
#===================================================================================================
# DirectoryAlreadyExistsError
#===================================================================================================
class DirectoryAlreadyExistsError(FileError):
def GetMessage(self, directory):
return 'Directory "%s" already exists.' % directory
#===================================================================================================
# ServerTimeoutError
#===================================================================================================
class ServerTimeoutError(FileError):
def GetMessage(self, filename):
return 'Server timeout while accessing file "%s"' % filename
#===================================================================================================
# FileAlreadyExistsError
#===================================================================================================
class FileAlreadyExistsError(FileError):
def GetMessage(self, filename):
return 'File "%s" already exists.' % filename
#===================================================================================================
# FileOnlyActionError
#===================================================================================================
class FileOnlyActionError(FileError):
def GetMessage(self, filename):
return 'Action performed over "%s" only possible with a file.' % filename
#===================================================================================================
# MultipleFilesNotFound
#===================================================================================================
class MultipleFilesNotFound(FileNotFoundError):
'''
Raised when a filename search algorithm fails to find a valid filename match.
The error lists all the candidate filenames.
'''
def __init__(self, filenames, header=''):
self.header = header
self.filenames = filenames
def __str__(self):
return self.header + 'Files not found: %s' % ','.join(self.filenames) | zerotk.easyfs | /zerotk.easyfs-1.0.3.tar.gz/zerotk.easyfs-1.0.3/zerotk/easyfs/_exceptions.py | _exceptions.py |
from __future__ import unicode_literals
'''
This module contains a selection of file related functions that can be used anywhere.
Some sort of wrapper for common builtin 'os' operations with a nicer interface.
These functions abstract file location, most of them work for either local, ftp or http protocols
'''
from zerotk.reraiseit import reraise
import contextlib
import io
import os
import re
import sys
import six
#===================================================================================================
# Constants
#===================================================================================================
SEPARATOR_UNIX = '/'
SEPARATOR_WINDOWS = '\\'
EOL_STYLE_NONE = None # Binary files
EOL_STYLE_UNIX = '\n'
EOL_STYLE_WINDOWS = '\r\n'
EOL_STYLE_MAC = '\r'
def _GetNativeEolStyle(platform=sys.platform):
'''
Internal function that determines EOL_STYLE_NATIVE constant with the proper value for the
current platform.
'''
_NATIVE_EOL_STYLE_MAP = {
'win32' : EOL_STYLE_WINDOWS,
'linux2' : EOL_STYLE_UNIX,
'linux' : EOL_STYLE_UNIX,
'darwin' : EOL_STYLE_MAC,
}
result = _NATIVE_EOL_STYLE_MAP.get(platform)
if result is None:
from ._exceptions import UnknownPlatformError
raise UnknownPlatformError(platform)
return result
EOL_STYLE_NATIVE = _GetNativeEolStyle()
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa364939%28v=vs.85%29.aspx
# The drive type cannot be determined.
DRIVE_UNKNOWN = 0
# The root path is invalid; for example, there is no volume mounted at the specified path.
DRIVE_NO_ROOT_DIR = 1
# The drive has removable media; for example, a floppy drive, thumb drive, or flash card reader.
DRIVE_REMOVABLE = 2
# The drive has fixed media; for example, a hard disk drive or flash drive.
DRIVE_FIXED = 3
# The drive is a remote (network) drive.
DRIVE_REMOTE = 4
# The drive is a CD-ROM drive.
DRIVE_CDROM = 5
# The drive is a RAM disk
DRIVE_RAMDISK = 6
#===================================================================================================
# Cwd
#===================================================================================================
@contextlib.contextmanager
def Cwd(directory):
'''
Context manager for current directory (uses with_statement)
e.g.:
# working on some directory
with Cwd('/home/new_dir'):
# working on new_dir
# working on some directory again
:param unicode directory:
Target directory to enter
'''
old_directory = six.moves.getcwd()
if directory is not None:
os.chdir(directory)
try:
yield directory
finally:
os.chdir(old_directory)
#===================================================================================================
# NormalizePath
#===================================================================================================
def NormalizePath(path):
'''
Normalizes a path maintaining the final slashes.
Some environment variables need the final slash in order to work.
Ex. The SOURCES_DIR set by subversion must end with a slash because of the way it is used
in the Visual Studio projects.
:param unicode path:
The path to normalize.
:rtype: unicode
:returns:
Normalized path
'''
if path.endswith('/') or path.endswith('\\'):
slash = os.path.sep
else:
slash = ''
return os.path.normpath(path) + slash
#===================================================================================================
# CanonicalPath
#===================================================================================================
def CanonicalPath(path):
'''
Returns a version of a path that is unique.
Given two paths path1 and path2:
CanonicalPath(path1) == CanonicalPath(path2) if and only if they represent the same file on
the host OS. Takes account of case, slashes and relative paths.
:param unicode path:
The original path.
:rtype: unicode
:returns:
The unique path.
'''
path = os.path.normpath(path)
path = os.path.abspath(path)
path = os.path.normcase(path)
return path
#===================================================================================================
# StandardizePath
#===================================================================================================
def StandardizePath(path, strip=False):
'''
Replaces all slashes and backslashes with the target separator
StandardPath:
We are defining that the standard-path is the one with only back-slashes in it, either
on Windows or any other platform.
:param bool strip:
If True, removes additional slashes from the end of the path.
'''
path = path.replace(SEPARATOR_WINDOWS, SEPARATOR_UNIX)
if strip:
path = path.rstrip(SEPARATOR_UNIX)
return path
#===================================================================================================
# NormStandardPath
#===================================================================================================
def NormStandardPath(path):
'''
Normalizes a standard path (posixpath.normpath) maintaining any slashes at the end of the path.
Normalize:
Removes any local references in the path "/../"
StandardPath:
We are defining that the standard-path is the one with only back-slashes in it, either
on Windows or any other platform.
'''
import posixpath
if path.endswith('/'):
slash = '/'
else:
slash = ''
return posixpath.normpath(path) + slash
#===================================================================================================
# CreateMD5
#===================================================================================================
def CreateMD5(source_filename, target_filename=None):
'''
Creates a md5 file from a source file (contents are the md5 hash of source file)
:param unicode source_filename:
Path to source file
:type target_filename: unicode or None
:param target_filename:
Name of the target file with the md5 contents
If None, defaults to source_filename + '.md5'
'''
if target_filename is None:
target_filename = source_filename + '.md5'
from six.moves.urllib.parse import urlparse
source_url = urlparse(source_filename)
# Obtain MD5 hex
if _UrlIsLocal(source_url):
# If using a local file, we can give Md5Hex the filename
md5_contents = Md5Hex(filename=source_filename)
else:
# Md5Hex can't handle remote files, we open it and pray we won't run out of memory.
md5_contents = Md5Hex(contents=GetFileContents(source_filename, binary=True))
# Write MD5 hash to a file
CreateFile(target_filename, md5_contents)
MD5_SKIP = 'md5_skip' # Returned to show that a file copy was skipped because it hasn't changed.
#===================================================================================================
# CopyFile
#===================================================================================================
def CopyFile(source_filename, target_filename, override=True, md5_check=False, copy_symlink=True):
'''
Copy a file from source to target.
:param source_filename:
@see _DoCopyFile
:param target_filename:
@see _DoCopyFile
:param bool md5_check:
If True, checks md5 files (of both source and target files), if they match, skip this copy
and return MD5_SKIP
Md5 files are assumed to be {source, target} + '.md5'
If any file is missing (source, target or md5), the copy will always be made.
:param copy_symlink:
@see _DoCopyFile
:raises FileAlreadyExistsError:
If target_filename already exists, and override is False
:raises NotImplementedProtocol:
If file protocol is not accepted
Protocols allowed are:
source_filename: local, ftp, http
target_filename: local, ftp
:rtype: None | MD5_SKIP
:returns:
MD5_SKIP if the file was not copied because there was a matching .md5 file
.. seealso:: FTP LIMITATIONS at this module's doc for performance issues information
'''
from ._exceptions import FileNotFoundError
# Check override
if not override and Exists(target_filename):
from ._exceptions import FileAlreadyExistsError
raise FileAlreadyExistsError(target_filename)
# Don't do md5 check for md5 files themselves.
md5_check = md5_check and not target_filename.endswith('.md5')
# If we enabled md5 checks, ignore copy of files that haven't changed their md5 contents.
if md5_check:
source_md5_filename = source_filename + '.md5'
target_md5_filename = target_filename + '.md5'
try:
source_md5_contents = GetFileContents(source_md5_filename)
except FileNotFoundError:
source_md5_contents = None
try:
target_md5_contents = GetFileContents(target_md5_filename)
except FileNotFoundError:
target_md5_contents = None
if source_md5_contents is not None and \
source_md5_contents == target_md5_contents and \
Exists(target_filename):
return MD5_SKIP
# Copy source file
_DoCopyFile(source_filename, target_filename, copy_symlink=copy_symlink)
# If we have a source_md5, but no target_md5, create the target_md5 file
if md5_check and source_md5_contents is not None and source_md5_contents != target_md5_contents:
CreateFile(target_md5_filename, source_md5_contents)
def _DoCopyFile(source_filename, target_filename, copy_symlink=True):
'''
:param unicode source_filename:
The source filename.
Schemas: local, ftp, http
:param unicode target_filename:
Target filename.
Schemas: local, ftp
:param copy_symlink:
@see _CopyFileLocal
:raises FileNotFoundError:
If source_filename does not exist
'''
from six.moves.urllib.parse import urlparse
source_url = urlparse(source_filename)
target_url = urlparse(target_filename)
if _UrlIsLocal(source_url):
if not Exists(source_filename):
from ._exceptions import FileNotFoundError
raise FileNotFoundError(source_filename)
if _UrlIsLocal(target_url):
# local to local
_CopyFileLocal(source_filename, target_filename, copy_symlink=copy_symlink)
elif target_url.scheme in ['ftp']:
from ._exceptions import NotImplementedProtocol
raise NotImplementedProtocol(target_url.scheme)
else:
from ._exceptions import NotImplementedProtocol
raise NotImplementedProtocol(target_url.scheme)
elif source_url.scheme in ['http', 'https', 'ftp']:
if _UrlIsLocal(target_url):
# HTTP/FTP to local
from ._exceptions import NotImplementedProtocol
raise NotImplementedProtocol(target_url.scheme)
else:
# HTTP/FTP to other ==> NotImplemented
from ._exceptions import NotImplementedProtocol
raise NotImplementedProtocol(target_url.scheme)
else:
from ._exceptions import NotImplementedProtocol # @Reimport
raise NotImplementedProtocol(source_url.scheme)
def _CopyFileLocal(source_filename, target_filename, copy_symlink=True):
'''
Copy a file locally to a directory.
:param unicode source_filename:
The filename to copy from.
:param unicode target_filename:
The filename to copy to.
:param bool copy_symlink:
If True and source_filename is a symlink, target_filename will also be created as
a symlink.
If False, the file being linked will be copied instead.
'''
import shutil
try:
# >>> Create the target_filename directory if necessary
dir_name = os.path.dirname(target_filename)
if dir_name and not os.path.isdir(dir_name):
os.makedirs(dir_name)
if copy_symlink and IsLink(source_filename):
# >>> Delete the target_filename if it already exists
if os.path.isfile(target_filename) or IsLink(target_filename):
DeleteFile(target_filename)
# >>> Obtain the relative path from link to source_filename (linkto)
source_filename = ReadLink(source_filename)
CreateLink(source_filename, target_filename)
else:
# shutil can't copy links in Windows, so we must find the real file manually
if sys.platform == 'win32':
while IsLink(source_filename):
link = ReadLink(source_filename)
if os.path.isabs(link):
source_filename = link
else:
source_filename = os.path.join(os.path.dirname(source_filename), link)
shutil.copyfile(source_filename, target_filename)
shutil.copymode(source_filename, target_filename)
except Exception as e:
reraise(e, 'While executiong _filesystem._CopyFileLocal(%s, %s)' % (source_filename, target_filename))
#===================================================================================================
# CopyFiles
#===================================================================================================
def CopyFiles(source_dir, target_dir, create_target_dir=False, md5_check=False):
'''
Copy files from the given source to the target.
:param unicode source_dir:
A filename, URL or a file mask.
Ex.
x:\coilib50
x:\coilib50\*
http://server/directory/file
ftp://server/directory/file
:param unicode target_dir:
A directory or an URL
Ex.
d:\Temp
ftp://server/directory
:param bool create_target_dir:
If True, creates the target path if it doesn't exists.
:param bool md5_check:
.. seealso:: CopyFile
:raises DirectoryNotFoundError:
If target_dir does not exist, and create_target_dir is False
.. seealso:: CopyFile for documentation on accepted protocols
.. seealso:: FTP LIMITATIONS at this module's doc for performance issues information
'''
import fnmatch
# Check if we were given a directory or a directory with mask
if IsDir(source_dir):
# Yes, it's a directory, copy everything from it
source_mask = '*'
else:
# Split directory and mask
source_dir, source_mask = os.path.split(source_dir)
# Create directory if necessary
if not IsDir(target_dir):
if create_target_dir:
CreateDirectory(target_dir)
else:
from ._exceptions import DirectoryNotFoundError
raise DirectoryNotFoundError(target_dir)
# List and match files
filenames = ListFiles(source_dir)
# Check if we have a source directory
if filenames is None:
return
# Copy files
for i_filename in filenames:
if md5_check and i_filename.endswith('.md5'):
continue # md5 files will be copied by CopyFile when copying their associated files
if fnmatch.fnmatch(i_filename, source_mask):
source_path = source_dir + '/' + i_filename
target_path = target_dir + '/' + i_filename
if IsDir(source_path):
# If we found a directory, copy it recursively
CopyFiles(source_path, target_path, create_target_dir=True, md5_check=md5_check)
else:
CopyFile(source_path, target_path, md5_check=md5_check)
#===================================================================================================
# CopyFilesX
#===================================================================================================
def CopyFilesX(file_mapping):
'''
Copies files into directories, according to a file mapping
:param list(tuple(unicode,unicode)) file_mapping:
A list of mappings between the directory in the target and the source.
For syntax, @see: ExtendedPathMask
:rtype: list(tuple(unicode,unicode))
:returns:
List of files copied. (source_filename, target_filename)
.. seealso:: FTP LIMITATIONS at this module's doc for performance issues information
'''
# List files that match the mapping
files = []
for i_target_path, i_source_path_mask in file_mapping:
tree_recurse, flat_recurse, dirname, in_filters, out_filters = ExtendedPathMask.Split(i_source_path_mask)
_AssertIsLocal(dirname)
filenames = FindFiles(dirname, in_filters, out_filters, tree_recurse)
for i_source_filename in filenames:
if os.path.isdir(i_source_filename):
continue # Do not copy dirs
i_target_filename = i_source_filename[len(dirname) + 1:]
if flat_recurse:
i_target_filename = os.path.basename(i_target_filename)
i_target_filename = os.path.join(i_target_path, i_target_filename)
files.append((
StandardizePath(i_source_filename),
StandardizePath(i_target_filename)
))
# Copy files
for i_source_filename, i_target_filename in files:
# Create target dir if necessary
target_dir = os.path.dirname(i_target_filename)
CreateDirectory(target_dir)
CopyFile(i_source_filename, i_target_filename)
return files
#===================================================================================================
# IsFile
#===================================================================================================
def IsFile(path):
'''
:param unicode path:
Path to a file (local or ftp)
:raises NotImplementedProtocol:
If checking for a non-local, non-ftp file
:rtype: bool
:returns:
True if the file exists
.. seealso:: FTP LIMITATIONS at this module's doc for performance issues information
'''
from six.moves.urllib.parse import urlparse
url = urlparse(path)
if _UrlIsLocal(url):
if IsLink(path):
return IsFile(ReadLink(path))
return os.path.isfile(path)
elif url.scheme == 'ftp':
from ._exceptions import NotImplementedProtocol
raise NotImplementedProtocol(url.scheme)
else:
from ._exceptions import NotImplementedProtocol
raise NotImplementedProtocol(url.scheme)
def GetDriveType(path):
'''
Determine the type of drive, which can be one of the following values:
DRIVE_UNKNOWN = 0
The drive type cannot be determined.
DRIVE_NO_ROOT_DIR = 1
The root path is invalid; for example, there is no volume mounted at the specified path.
DRIVE_REMOVABLE = 2
The drive has removable media; for example, a floppy drive, thumb drive, or flash card reader.
DRIVE_FIXED = 3
The drive has fixed media; for example, a hard disk drive or flash drive.
DRIVE_REMOTE = 4
The drive is a remote (network) drive.
DRIVE_CDROM = 5
The drive is a CD-ROM drive.
DRIVE_RAMDISK = 6
The drive is a RAM disk
:note:
The implementation is valid only for Windows OS
Linux will always return DRIVE_UNKNOWN
:param path:
Path to a file or directory
'''
if sys.platform == 'win32':
import ctypes
kdll = ctypes.windll.LoadLibrary("kernel32.dll")
return kdll.GetDriveType(path + '\\')
import win32file
if IsFile(path):
path = os.path.dirname(path)
# A trailing backslash is required.
return win32file.GetDriveType(path + '\\')
else:
return DRIVE_UNKNOWN
#===================================================================================================
# IsDir
#===================================================================================================
def IsDir(directory):
'''
:param unicode directory:
A path
:rtype: bool
:returns:
Returns whether the given path points to an existent directory.
:raises NotImplementedProtocol:
If the path protocol is not local or ftp
.. seealso:: FTP LIMITATIONS at this module's doc for performance issues information
'''
from six.moves.urllib.parse import urlparse
directory_url = urlparse(directory)
if _UrlIsLocal(directory_url):
return os.path.isdir(directory)
elif directory_url.scheme == 'ftp':
from ._exceptions import NotImplementedProtocol
raise NotImplementedProtocol(target_url.scheme)
else:
from ._exceptions import NotImplementedProtocol
raise NotImplementedProtocol(directory_url.scheme)
#===================================================================================================
# Exists
#===================================================================================================
def Exists(path):
'''
:rtype: bool
:returns:
True if the path already exists (either a file or a directory)
.. seealso:: FTP LIMITATIONS at this module's doc for performance issues information
'''
from six.moves.urllib.parse import urlparse
path_url = urlparse(path)
# Handle local
if _UrlIsLocal(path_url):
return IsFile(path) or IsDir(path) or IsLink(path)
return IsFile(path) or IsDir(path)
#===================================================================================================
# CopyDirectory
#===================================================================================================
def CopyDirectory(source_dir, target_dir, override=False):
'''
Recursively copy a directory tree.
:param unicode source_dir:
Where files will come from
:param unicode target_dir:
Where files will go to
:param bool override:
If True and target_dir already exists, it will be deleted before copying.
:raises NotImplementedForRemotePathError:
If trying to copy to/from remote directories
'''
_AssertIsLocal(source_dir)
_AssertIsLocal(target_dir)
if override and IsDir(target_dir):
DeleteDirectory(target_dir, skip_on_error=False)
import shutil
shutil.copytree(source_dir, target_dir)
#===================================================================================================
# DeleteFile
#===================================================================================================
def DeleteFile(target_filename):
'''
Deletes the given local filename.
.. note:: If file doesn't exist this method has no effect.
:param unicode target_filename:
A local filename
:raises NotImplementedForRemotePathError:
If trying to delete a non-local path
:raises FileOnlyActionError:
Raised when filename refers to a directory.
'''
_AssertIsLocal(target_filename)
try:
if IsLink(target_filename):
DeleteLink(target_filename)
elif IsFile(target_filename):
os.remove(target_filename)
elif IsDir(target_filename):
from ._exceptions import FileOnlyActionError
raise FileOnlyActionError(target_filename)
except Exception as e:
reraise(e, 'While executing filesystem.DeleteFile(%s)' % (target_filename))
#===================================================================================================
# AppendToFile
#===================================================================================================
def AppendToFile(filename, contents, eol_style=EOL_STYLE_NATIVE, encoding=None, binary=False):
'''
Appends content to a local file.
:param unicode filename:
:param unicode contents:
:type eol_style: EOL_STYLE_XXX constant
:param eol_style:
Replaces the EOL by the appropriate EOL depending on the eol_style value.
Considers that all content is using only "\n" as EOL.
:param unicode encoding:
Target file's content encoding.
Defaults to sys.getfilesystemencoding()
:param bool binary:
If True, content is appended in binary mode. In this case, `contents` must be `bytes` and not
`unicode`
:raises NotImplementedForRemotePathError:
If trying to modify a non-local path
:raises ValueError:
If trying to mix unicode `contents` without `encoding`, or `encoding` without
unicode `contents`
'''
_AssertIsLocal(filename)
assert isinstance(contents, six.text_type) ^ binary, 'Must always receive unicode contents, unless binary=True'
if not binary:
# Replaces eol on each line by the given eol_style.
contents = _HandleContentsEol(contents, eol_style)
# Handle encoding here, and always write in binary mode. We can't use io.open because it
# tries to do its own line ending handling.
contents = contents.encode(encoding or sys.getfilesystemencoding())
oss = open(filename, 'ab')
try:
oss.write(contents)
finally:
oss.close()
#===================================================================================================
# MoveFile
#===================================================================================================
def MoveFile(source_filename, target_filename):
'''
Moves a file.
:param unicode source_filename:
:param unicode target_filename:
:raises NotImplementedForRemotePathError:
If trying to operate with non-local files.
'''
_AssertIsLocal(source_filename)
_AssertIsLocal(target_filename)
import shutil
shutil.move(source_filename, target_filename)
#===================================================================================================
# MoveDirectory
#===================================================================================================
def MoveDirectory(source_dir, target_dir):
'''
Moves a directory.
:param unicode source_dir:
:param unicode target_dir:
:raises NotImplementedError:
If trying to move anything other than:
Local dir -> local dir
FTP dir -> FTP dir (same host)
'''
if not IsDir(source_dir):
from ._exceptions import DirectoryNotFoundError
raise DirectoryNotFoundError(source_dir)
if Exists(target_dir):
from ._exceptions import DirectoryAlreadyExistsError
raise DirectoryAlreadyExistsError(target_dir)
from six.moves.urllib.parse import urlparse
source_url = urlparse(source_dir)
target_url = urlparse(target_dir)
# Local to local
if _UrlIsLocal(source_url) and _UrlIsLocal(target_url):
import shutil
shutil.move(source_dir, target_dir)
# FTP to FTP
elif source_url.scheme == 'ftp' and target_url.scheme == 'ftp':
from ._exceptions import NotImplementedProtocol
raise NotImplementedProtocol(target_url.scheme)
else:
raise NotImplementedError('Can only move directories local->local or ftp->ftp')
#===================================================================================================
# GetFileContents
#===================================================================================================
def GetFileContents(filename, binary=False, encoding=None, newline=None):
'''
Reads a file and returns its contents. Works for both local and remote files.
:param unicode filename:
:param bool binary:
If True returns the file as is, ignore any EOL conversion.
:param unicode encoding:
File's encoding. If not None, contents obtained from file will be decoded using this
`encoding`.
:param None|''|'\n'|'\r'|'\r\n' newline:
Controls universal newlines.
See 'io.open' newline parameter documentation for more details.
:returns str|unicode:
The file's contents.
Returns unicode string when `encoding` is not None.
.. seealso:: FTP LIMITATIONS at this module's doc for performance issues information
'''
source_file = OpenFile(filename, binary=binary, encoding=encoding, newline=newline)
try:
contents = source_file.read()
finally:
source_file.close()
return contents
#===================================================================================================
# GetFileLines
#===================================================================================================
def GetFileLines(filename, newline=None, encoding=None):
'''
Reads a file and returns its contents as a list of lines. Works for both local and remote files.
:param unicode filename:
:param None|''|'\n'|'\r'|'\r\n' newline:
Controls universal newlines.
See 'io.open' newline parameter documentation for more details.
:param unicode encoding:
File's encoding. If not None, contents obtained from file will be decoded using this
`encoding`.
:returns list(unicode):
The file's lines
.. seealso:: FTP LIMITATIONS at this module's doc for performance issues information
'''
return GetFileContents(
filename,
binary=False,
encoding=encoding,
newline=newline,
).split('\n')
def OpenFile(filename, binary=False, newline=None, encoding=None):
'''
Open a file and returns it.
Consider the possibility of a remote file (HTTP, HTTPS, FTP)
:param unicode filename:
Local or remote filename.
:param bool binary:
If True returns the file as is, ignore any EOL conversion.
If set ignores univeral_newlines parameter.
:param None|''|'\n'|'\r'|'\r\n' newline:
Controls universal newlines.
See 'io.open' newline parameter documentation for more details.
:param unicode encoding:
File's encoding. If not None, contents obtained from file will be decoded using this
`encoding`.
:returns file:
The open file, it must be closed by the caller
@raise: FileNotFoundError
When the given filename cannot be found
.. seealso:: FTP LIMITATIONS at this module's doc for performance issues information
'''
from six.moves.urllib.parse import urlparse
filename_url = urlparse(filename)
# Check if file is local
if _UrlIsLocal(filename_url):
if not os.path.isfile(filename):
from ._exceptions import FileNotFoundError
raise FileNotFoundError(filename)
mode = 'rb' if binary else 'r'
return io.open(filename, mode, encoding=encoding, newline=newline)
# Not local
from ._exceptions import NotImplementedProtocol
raise NotImplementedProtocol(target_url.scheme)
#===================================================================================================
# ListFiles
#===================================================================================================
def ListFiles(directory):
'''
Lists the files in the given directory
:type directory: unicode | unicode
:param directory:
A directory or URL
:rtype: list(unicode) | list(unicode)
:returns:
List of filenames/directories found in the given directory.
Returns None if the given directory does not exists.
If `directory` is a unicode string, all files returned will also be unicode
:raises NotImplementedProtocol:
If file protocol is not local or FTP
.. seealso:: FTP LIMITATIONS at this module's doc for performance issues information
'''
from six.moves.urllib.parse import urlparse
directory_url = urlparse(directory)
# Handle local
if _UrlIsLocal(directory_url):
if not os.path.isdir(directory):
return None
return os.listdir(directory)
# Handle FTP
elif directory_url.scheme == 'ftp':
from ._exceptions import NotImplementedProtocol
raise NotImplementedProtocol(directory_url.scheme)
else:
from ._exceptions import NotImplementedProtocol
raise NotImplementedProtocol(directory_url.scheme)
#===================================================================================================
# CheckIsFile
#===================================================================================================
def CheckIsFile(filename):
'''
Check if the given file exists.
@filename: unicode
The filename to check for existence.
@raise: FileNotFoundError
Raises if the file does not exist.
'''
if not IsFile(filename):
from ._exceptions import FileNotFoundError
raise FileNotFoundError(filename)
#===================================================================================================
# CheckIsDir
#===================================================================================================
def CheckIsDir(directory):
'''
Check if the given directory exists.
@filename: unicode
Path to a directory being checked for existence.
@raise: DirectoryNotFoundError
Raises if the directory does not exist.
'''
if not IsDir(directory):
from ._exceptions import DirectoryNotFoundError
raise DirectoryNotFoundError(directory)
#===================================================================================================
# CreateFile
#===================================================================================================
def CreateFile(filename, contents, eol_style=EOL_STYLE_NATIVE, create_dir=True, encoding=None, binary=False):
'''
Create a file with the given contents.
:param unicode filename:
Filename and path to be created.
:param unicode contents:
The file contents as a string.
:type eol_style: EOL_STYLE_XXX constant
:param eol_style:
Replaces the EOL by the appropriate EOL depending on the eol_style value.
Considers that all content is using only "\n" as EOL.
:param bool create_dir:
If True, also creates directories needed in filename's path
:param unicode encoding:
Target file's content encoding. Defaults to sys.getfilesystemencoding()
Ignored if `binary` = True
:param bool binary:
If True, file is created in binary mode. In this case, `contents` must be `bytes` and not
`unicode`
:return unicode:
Returns the name of the file created.
:raises NotImplementedProtocol:
If file protocol is not local or FTP
:raises ValueError:
If trying to mix unicode `contents` without `encoding`, or `encoding` without
unicode `contents`
.. seealso:: FTP LIMITATIONS at this module's doc for performance issues information
'''
# Lots of checks when writing binary files
if binary:
if isinstance(contents, six.text_type):
raise TypeError('contents must be str (bytes) when binary=True')
else:
if not isinstance(contents, six.text_type):
raise TypeError('contents must be unicode when binary=False')
# Replaces eol on each line by the given eol_style.
contents = _HandleContentsEol(contents, eol_style)
# Encode string and pretend we are using binary to prevent 'open' from automatically
# changing Eols
encoding = encoding or sys.getfilesystemencoding()
contents = contents.encode(encoding)
binary = True
# If asked, creates directory containing file
if create_dir:
dirname = os.path.dirname(filename)
if dirname:
CreateDirectory(dirname)
from six.moves.urllib.parse import urlparse
filename_url = urlparse(filename)
# Handle local
if _UrlIsLocal(filename_url):
# Always writing as binary (see handling above)
with open(filename, 'wb') as oss:
oss.write(contents)
# Handle FTP
elif filename_url.scheme == 'ftp':
# Always writing as binary (see handling above)
from ._exceptions import NotImplementedProtocol
raise NotImplementedProtocol(directory_url.scheme)
else:
from ._exceptions import NotImplementedProtocol
raise NotImplementedProtocol(filename_url.scheme)
return filename
def ReplaceInFile(filename, old, new, encoding=None):
'''
Replaces all occurrences of "old" by "new" in the given file.
:param unicode filename:
The name of the file.
:param unicode old:
The string to search for.
:param unicode new:
Replacement string.
:return unicode:
The new contents of the file.
'''
contents = GetFileContents(filename, encoding=encoding)
contents = contents.replace(old, new)
CreateFile(filename, contents, encoding=encoding)
return contents
#===================================================================================================
# CreateDirectory
#===================================================================================================
def CreateDirectory(directory):
'''
Create directory including any missing intermediate directory.
:param unicode directory:
:return unicode|urlparse.ParseResult:
Returns the created directory or url (see urlparse).
:raises NotImplementedProtocol:
If protocol is not local or FTP.
.. seealso:: FTP LIMITATIONS at this module's doc for performance issues information
'''
from six.moves.urllib.parse import urlparse
directory_url = urlparse(directory)
# Handle local
if _UrlIsLocal(directory_url):
if not os.path.exists(directory):
os.makedirs(directory)
return directory
# Handle FTP
elif directory_url.scheme == 'ftp':
from ._exceptions import NotImplementedProtocol
raise NotImplementedProtocol(directory_url.scheme)
else:
from ._exceptions import NotImplementedProtocol
raise NotImplementedProtocol(directory_url.scheme)
#===================================================================================================
# CreateTemporaryDirectory
#===================================================================================================
class CreateTemporaryDirectory(object):
'''
Context manager to create a temporary file and remove if at the context end.
:ivar unicode dirname:
Name of the created directory
'''
def __init__(self, suffix='', prefix='tmp', base_dir=None, maximum_attempts=100):
'''
:param unicode suffix:
A suffix to add in the name of the created directory
:param unicode prefix:
A prefix to add in the name of the created directory
:param unicode base_dir:
A path to use as base in the created directory (if any). The temp directory will be a
child of the given base dir
:param int maximum_attemps:
The maximum number of attempts to obtain the temp dir name.
'''
self.suffix = suffix
self.prefix = prefix
self.base_dir = base_dir
self.maximum_attempts = maximum_attempts
self.dirname = None
def __enter__(self):
'''
:return unicode:
The path to the created temp file.
'''
if self.base_dir is None:
# If no base directoy was given, let us create a dir in system temp area
import tempfile
self.dirname = tempfile.mkdtemp(self.suffix, self.prefix)
return self.dirname
# Listing the files found in the base dir
existing_files = set(ListFiles(self.base_dir))
# If a base dir was given, let us generate a unique directory name there and use it
for random_component in IterHashes(iterator_size=self.maximum_attempts):
candidate_name = '%stemp_dir_%s%s' % (self.prefix, random_component, self.suffix)
candidate_path = os.path.join(self.base_dir, candidate_name)
if candidate_path not in existing_files:
CreateDirectory(candidate_path)
self.dirname = candidate_path
return self.dirname
raise RuntimeError(
'It was not possible to obtain a temporary dirname from %s' % self.base_dir)
def __exit__(self, *args):
if self.dirname is not None:
DeleteDirectory(self.dirname, skip_on_error=True)
#===================================================================================================
# CreateTemporaryFile
#===================================================================================================
class CreateTemporaryFile(object):
'''
Context manager to create a temporary file and remove if at the context end.
:ivar unicode filename:
Name of the created file
'''
def __init__(
self,
contents,
eol_style=EOL_STYLE_NATIVE,
encoding=None,
suffix='',
prefix='tmp',
base_dir=None,
maximum_attempts=100):
'''
:param contents: .. seealso:: CreateFile
:param eol_style: .. seealso:: CreateFile
:param encoding: .. seealso:: CreateFile
:param unicode suffix:
A suffix to add in the name of the created file
:param unicode prefix:
A prefix to add in the name of the created file
:param unicode base_dir:
A path to use as base in the created file. Uses temp dir if not given.
:param int maximum_attemps:
The maximum number of attempts to obtain the temp file name.
'''
import tempfile
self.contents = contents
self.eol_style = eol_style
self.encoding = encoding
self.suffix = suffix
self.prefix = prefix
self.base_dir = base_dir or tempfile.gettempdir()
self.maximum_attempts = maximum_attempts
self.filename = None
def __enter__(self):
'''
:return unicode:
The path to the created temp file.
'''
from ._exceptions import FileAlreadyExistsError
for random_component in IterHashes(iterator_size=self.maximum_attempts):
filename = os.path.join(self.base_dir, self.prefix + random_component + self.suffix)
try:
CreateFile(
filename=filename,
contents=self.contents,
eol_style=self.eol_style,
encoding=self.encoding,
)
self.filename = filename
return filename
except FileAlreadyExistsError:
pass
raise RuntimeError('It was not possible to obtain a temporary filename in "%s"' % self.base_dir)
def __exit__(self, *args):
if self.filename is not None:
DeleteFile(self.filename)
#===================================================================================================
# DeleteDirectory
#===================================================================================================
def DeleteDirectory(directory, skip_on_error=False):
'''
Deletes a directory.
:param unicode directory:
:param bool skip_on_error:
If True, ignore any errors when trying to delete directory (for example, directory not
found)
:raises NotImplementedForRemotePathError:
If trying to delete a remote directory.
'''
_AssertIsLocal(directory)
import shutil
def OnError(fn, path, excinfo):
'''
Remove the read-only flag and try to remove again.
On Windows, rmtree fails when trying to remove a read-only file. This fix it!
Another case: Read-only directories return True in os.access test. It seems that read-only
directories has it own flag (looking at the property windows on Explorer).
'''
if IsLink(path):
return
if fn is os.remove and os.access(path, os.W_OK):
raise
# Make the file WRITEABLE and executes the original delete function (osfunc)
import stat
os.chmod(path, stat.S_IWRITE)
fn(path)
try:
if not os.path.isdir(directory):
if skip_on_error:
return
from ._exceptions import DirectoryNotFoundError
raise DirectoryNotFoundError(directory)
shutil.rmtree(directory, onerror=OnError)
except:
if not skip_on_error:
raise # Raise only if we are not skipping on error
#===================================================================================================
# GetMTime
#===================================================================================================
def GetMTime(path):
'''
:param unicode path:
Path to file or directory
:rtype: float
:returns:
Modification time for path.
If this is a directory, the highest mtime from files inside it will be returned.
@note:
In some Linux distros (such as CentOs, or anything with ext3), mtime will not return a value
with resolutions higher than a second.
http://stackoverflow.com/questions/2428556/os-path-getmtime-doesnt-return-fraction-of-a-second
'''
_AssertIsLocal(path)
if os.path.isdir(path):
files = FindFiles(path)
if len(files) > 0:
return max(map(os.path.getmtime, files))
return os.path.getmtime(path)
#===================================================================================================
# ListMappedNetworkDrives
#===================================================================================================
def ListMappedNetworkDrives():
'''
On Windows, returns a list of mapped network drives
:return: tuple(string, string, bool)
For each mapped netword drive, return 3 values tuple:
- the local drive
- the remote path-
- True if the mapping is enabled (warning: not reliable)
'''
if sys.platform != 'win32':
raise NotImplementedError
drives_list = []
netuse = _CallWindowsNetCommand(['use'])
for line in netuse.split(EOL_STYLE_WINDOWS):
match = re.match("(\w*)\s+(\w:)\s+(.+)", line.rstrip())
if match:
drives_list.append((match.group(2), match.group(3), match.group(1) == 'OK'))
return drives_list
#===================================================================================================
# DeleteLink
#===================================================================================================
def DeleteLink(path):
if sys.platform != 'win32':
os.unlink(path)
else:
from ._easyfs_win32 import RemoveDirectory as _RemoveDirectory, DeleteFile as _DeleteFile
if IsDir(path):
_RemoveDirectory(path)
else:
_DeleteFile(path)
#===================================================================================================
# CreateLink
#===================================================================================================
def CreateLink(target_path, link_path, override=True):
'''
Create a symbolic link at `link_path` pointing to `target_path`.
:param unicode target_path:
Link target
:param unicode link_path:
Fullpath to link name
:param bool override:
If True and `link_path` already exists as a link, that link is overridden.
'''
_AssertIsLocal(target_path)
_AssertIsLocal(link_path)
if override and IsLink(link_path):
DeleteLink(link_path)
# Create directories leading up to link
dirname = os.path.dirname(link_path)
if dirname:
CreateDirectory(dirname)
if sys.platform != 'win32':
return os.symlink(target_path, link_path) # @UndefinedVariable
else:
#import ntfsutils.junction
#return ntfsutils.junction.create(target_path, link_path)
import jaraco.windows.filesystem
return jaraco.windows.filesystem.symlink(target_path, link_path)
from ._easyfs_win32 import CreateSymbolicLink
try:
dw_flags = 0
if target_path and os.path.isdir(target_path):
dw_flags = 1
return CreateSymbolicLink(target_path, link_path, dw_flags)
except Exception as e:
reraise(e, 'Creating link "%(link_path)s" pointing to "%(target_path)s"' % locals())
#===================================================================================================
# IsLink
#===================================================================================================
def IsLink(path):
'''
:param unicode path:
Path being tested
:returns bool:
True if `path` is a link
'''
_AssertIsLocal(path)
if sys.platform != 'win32':
return os.path.islink(path)
import jaraco.windows.filesystem
return jaraco.windows.filesystem.islink(path)
#===================================================================================================
# ReadLink
#===================================================================================================
def ReadLink(path):
'''
Read the target of the symbolic link at `path`.
:param unicode path:
Path to a symbolic link
:returns unicode:
Target of a symbolic link
'''
_AssertIsLocal(path)
if sys.platform != 'win32':
return os.readlink(path) # @UndefinedVariable
if not IsLink(path):
from ._exceptions import FileNotFoundError
raise FileNotFoundError(path)
import jaraco.windows.filesystem
result = jaraco.windows.filesystem.readlink(path)
if '\\??\\' in result:
result = result.split('\\??\\')[1]
return result
#===================================================================================================
# Internal functions
#===================================================================================================
def _UrlIsLocal(directory_url):
'''
:param ParseResult directory_url:
A parsed url as returned by urlparse.urlparse.
:rtype: bool
:returns:
Returns whether the given url refers to a local path.
.. note:: The "directory_url.scheme" is the drive letter for a local path on Windows and an empty string
for a local path on Linux. The other possible values are "http", "ftp", etc. So, checking if
the length is less than 2 characters long checks that the url is local.
'''
return len(directory_url.scheme) < 2
def _AssertIsLocal(path):
'''
Checks if a given path is local, raise an exception if not.
This is used in filesystem functions that do not support remote operations yet.
:param unicode path:
:raises NotImplementedForRemotePathError:
If the given path is not local
'''
from six.moves.urllib.parse import urlparse
if not _UrlIsLocal(urlparse(path)):
from ._exceptions import NotImplementedForRemotePathError
raise NotImplementedForRemotePathError
def _HandleContentsEol(contents, eol_style):
'''
Replaces eol on each line by the given eol_style.
:param unicode contents:
:type eol_style: EOL_STYLE_XXX constant
:param eol_style:
'''
if eol_style == EOL_STYLE_NONE:
return contents
if eol_style == EOL_STYLE_UNIX:
return contents.replace('\r\n', eol_style).replace('\r', eol_style)
if eol_style == EOL_STYLE_MAC:
return contents.replace('\r\n', eol_style).replace('\n', eol_style)
if eol_style == EOL_STYLE_WINDOWS:
return contents.replace('\r\n', '\n').replace('\r', '\n').replace('\n', EOL_STYLE_WINDOWS)
raise ValueError('Unexpected eol style: %r' % (eol_style,))
def _CallWindowsNetCommand(parameters):
'''
Call Windows NET command, used to acquire/configure network services settings.
:param parameters: list of command line parameters
:return: command output
'''
import subprocess
popen = subprocess.Popen(["net"] + parameters, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdoutdata, stderrdata = popen.communicate()
if stderrdata:
raise OSError("Failed on call net.exe: %s" % stderrdata)
return stdoutdata
#===================================================================================================
# ExtendedPathMask
#===================================================================================================
class ExtendedPathMask(object):
'''
This class is a place-holder for functions that handle the extended path mask.
Extended Path Mask
------------------
The extended path mask is a file search path description used to find files based on the filename.
This extended path mask includes the following features:
- Recursive search (prefix with a "+" sign)
- The possibility of adding more than one filter to match files (separated by ";")
- The possibility of negate an mask (prefix the mask with "!").
The extended path mask has the following syntax:
[+|-]<path>/<filter>(;<filter>)*
Where:
+ : recursive and copy-tree flag
- : recursive and copy-flat flag (copy files to the target directory with no tree structure)
<path> : a usual path, using '/' as separator
<filter> : A filename filter, as used in dir command:
Ex:
*.zip;*.rar
units.txt;*.ini
*.txt;!*-002.txt
'''
@classmethod
def Split(cls, extended_path_mask):
'''
Splits the given path into their components: recursive, dirname, in_filters and out_filters
:param str: extended_path_mask:
The "extended path mask" to split
:rtype: tuple(bool,bool,str,list(str),list(str))
:returns:
Returns the extended path 5 components:
- The tree-recurse flag
- The flat-recurse flag
- The actual path
- A list of masks to include
- A list of masks to exclude
'''
import os.path
r_tree_recurse = extended_path_mask[0] in '+-'
r_flat_recurse = extended_path_mask[0] in '-'
r_dirname, r_filters = os.path.split(extended_path_mask)
if r_tree_recurse:
r_dirname = r_dirname[1:]
filters = r_filters.split(';')
r_in_filters = [i for i in filters if not i.startswith('!')]
r_out_filters = [i[1:] for i in filters if i.startswith('!')]
return r_tree_recurse, r_flat_recurse, r_dirname, r_in_filters, r_out_filters
#===================================================================================================
# CheckForUpdate
#===================================================================================================
def CheckForUpdate(source, target):
'''
Checks if the given target filename should be re-generated because the source has changed.
:param source: the source filename.
:param target: the target filename.
:return bool:
True if the target is out-dated, False otherwise.
'''
return \
not os.path.isfile(target) or \
os.path.getmtime(source) > os.path.getmtime(target)
#===================================================================================================
# MatchMasks
#===================================================================================================
def MatchMasks(filename, masks):
'''
Verifies if a filename match with given patterns.
:param str filename: The filename to match.
:param list(str) masks: The patterns to search in the filename.
:return bool:
True if the filename has matched with one pattern, False otherwise.
'''
import fnmatch
if not isinstance(masks, (list, tuple)):
masks = [masks]
for i_mask in masks:
if fnmatch.fnmatch(filename, i_mask):
return True
return False
#===================================================================================================
# FindFiles
#===================================================================================================
def FindFiles(dir_, in_filters=None, out_filters=None, recursive=True, include_root_dir=True, standard_paths=False):
'''
Searches for files in a given directory that match with the given patterns.
:param str dir_: the directory root, to search the files.
:param list(str) in_filters: a list with patterns to match (default = all). E.g.: ['*.py']
:param list(str) out_filters: a list with patterns to ignore (default = none). E.g.: ['*.py']
:param bool recursive: if True search in subdirectories, otherwise, just in the root.
:param bool include_root_dir: if True, includes the directory being searched in the returned paths
:param bool standard_paths: if True, always uses unix path separators "/"
:return list(str):
A list of strings with the files that matched (with the full path in the filesystem).
'''
# all files
if in_filters is None:
in_filters = ['*']
if out_filters is None:
out_filters = []
result = []
# maintain just files that don't have a pattern that match with out_filters
# walk through all directories based on dir
for dir_root, directories, filenames in os.walk(dir_):
for i_directory in directories[:]:
if MatchMasks(i_directory, out_filters):
directories.remove(i_directory)
for filename in directories + filenames:
if MatchMasks(filename, in_filters) and not MatchMasks(filename, out_filters):
result.append(os.path.join(dir_root, filename))
if not recursive:
break
if not include_root_dir:
# Remove root dir from all paths
dir_prefix = len(dir_) + 1
result = [file[dir_prefix:] for file in result]
if standard_paths:
result = map(StandardizePath, result)
return result
#===================================================================================================
# ExpandUser
#===================================================================================================
def ExpandUser(path):
'''
os.path.expanduser wrapper, necessary because it cannot handle unicode strings properly.
This is not necessary in Python 3.
:param path:
.. seealso:: os.path.expanduser
'''
if six.PY2:
encoding = sys.getfilesystemencoding()
path = path.encode(encoding)
result = os.path.expanduser(path)
if six.PY2:
result = result.decode(encoding)
return result
#===================================================================================================
# DumpDirHashToStringIO
#===================================================================================================
def DumpDirHashToStringIO(directory, stringio, base='', exclude=None, include=None):
'''
Helper to iterate over the files in a directory putting those in the passed StringIO in ini
format.
:param unicode directory:
The directory for which the hash should be done.
:param StringIO stringio:
The string to which the dump should be put.
:param unicode base:
If provided should be added (along with a '/') before the name=hash of file.
:param unicode exclude:
Pattern to match files to exclude from the hashing. E.g.: *.gz
:param unicode include:
Pattern to match files to include in the hashing. E.g.: *.zip
'''
import fnmatch
import os
files = [(os.path.join(directory, i), i) for i in os.listdir(directory)]
files = [i for i in files if os.path.isfile(i[0])]
for fullname, filename in files:
if include is not None:
if not fnmatch.fnmatch(fullname, include):
continue
if exclude is not None:
if fnmatch.fnmatch(fullname, exclude):
continue
md5 = Md5Hex(fullname)
if base:
stringio.write('%s/%s=%s\n' % (base, filename, md5))
else:
stringio.write('%s=%s\n' % (filename, md5))
#===================================================================================================
# Md5Hex
#===================================================================================================
def Md5Hex(filename=None, contents=None):
'''
:param unicode filename:
The file from which the md5 should be calculated. If the filename is given, the contents
should NOT be given.
:param unicode contents:
The contents for which the md5 should be calculated. If the contents are given, the filename
should NOT be given.
:rtype: unicode
:returns:
Returns a string with the hex digest of the stream.
'''
import io
import hashlib
md5 = hashlib.md5()
if filename:
stream = io.open(filename, 'rb')
try:
while True:
data = stream.read(md5.block_size * 128)
if not data:
break
md5.update(data)
finally:
stream.close()
else:
md5.update(contents)
return six.text_type(md5.hexdigest())
#===================================================================================================
# GetRandomHash
#===================================================================================================
def GetRandomHash(length=7):
'''
:param length:
Length of hash returned.
:return unicode:
A random hexadecimal hash of the given length
'''
import random
return ('%0' + six.text_type(length) + 'x') % random.randrange(16 ** length)
#===================================================================================================
# IterHashes
#===================================================================================================
def IterHashes(iterator_size, hash_length=7):
'''
Iterator for random hexadecimal hashes
:param iterator_size:
Amount of hashes return before this iterator stops.
Goes on forever if `iterator_size` is negative.
:param int hash_length:
Size of each hash returned.
:return generator(unicode):
'''
if not isinstance(iterator_size, int):
raise TypeError('iterator_size must be integer.')
count = 0
while count != iterator_size:
count += 1
yield GetRandomHash(hash_length)
#===================================================================================================
# PushPopItem
#===================================================================================================
@contextlib.contextmanager
def PushPopItem(obj, key, value):
'''
A context manager to replace and restore a value using a getter and setter.
:param object obj: The object to replace/restore.
:param object key: The key to replace/restore in the object.
:param object value: The value to replace.
Example::
with PushPop2(sys.modules, 'alpha', None):
pytest.raises(ImportError):
import alpha
'''
if key in obj:
old_value = obj[key]
obj[key] = value
yield value
obj[key] = old_value
else:
obj[key] = value
yield value
del obj[key]
# class Kernel(object):
#
# @classmethod
# def get_file_attributes(cls, path):
# import ctypes
# func = ctypes.windll.kernel32.GetFileAttributesW
# func.argtypes = [ctypes.c_wchar_p]
# func.restype = ctypes.wintypes.DWORD
# return func(path)
#
# @classmethod
# def create_file(cls, path):
# import win32file
# import winioctlcon
#
# handle = win32file.CreateFile(
# path, # fileName
# win32file.GENERIC_READ, # desiredAccess
# 0, # shareMode
# None, # attributes
# win32file.OPEN_EXISTING, # creationDisposition
# win32file.FILE_FLAG_OPEN_REPARSE_POINT | win32file.FILE_FLAG_BACKUP_SEMANTICS, # flagsAndAttributes
# None # hTemplateFile
# )
#
# def create_file(cls, path):
# import ctypes
# func = ctypes.windll.kernel32.GetFileAttributesW
# func.argtypes = [ctypes.c_wchar_p]
# func.restype = ctypes.wintypes.DWORD
# return func(path)
#
# try:
# buf = win32file.DeviceIoControl(
# handle, # hFile
# winioctlcon.FSCTL_GET_REPARSE_POINT, # dwIoControlCode
# None, # data
# 1024, # readSize
# )
# buf = buf[20::2].encode(sys.getfilesystemencoding(), errors='replace')
# if '\\??\\' in buf:
# return StandardizePath(buf.split('\\??\\')[0])
# else:
# return StandardizePath(buf[:len(buf) / 2])
# finally:
# handle.Close() | zerotk.easyfs | /zerotk.easyfs-1.0.3.tar.gz/zerotk.easyfs-1.0.3/zerotk/easyfs/_easyfs.py | _easyfs.py |
import pytest
class _EmbedDataFixture(object):
def __init__(self, request):
from zerotk.easyfs import StandardizePath
module_name = request.module.__name__.split('.')[-1]
# source directory: same name as the name of the test's module
self._source_dir = request.fspath.dirname + '/' + module_name
# data-dir directory: same name as the name of the test's module
data_dir_basename = module_name.replace('test_', 'data_')
data_dir_basename = data_dir_basename.replace('_test', '_data')
self._data_dir = StandardizePath(request.fspath.dirname + '/' + data_dir_basename + '-' + request.function.__name__)
def create_data_dir(self):
from zerotk.easyfs import CopyDirectory, IsDir, CreateDirectory
if IsDir(self._source_dir):
CopyDirectory(self._source_dir, self._data_dir, override=False)
else:
CreateDirectory(self._data_dir)
def delete_data_dir(self):
from zerotk.easyfs import IsDir, DeleteDirectory
if IsDir(self._data_dir):
DeleteDirectory(self._data_dir)
def get_data_dir(self):
'''
:rtype: unicode
:returns:
Returns the absolute path to data-directory name to use, standardized by StandardizePath.
@remarks:
This method triggers the data-directory creation.
'''
return self._data_dir
def get_filename(self, *parts):
'''
Returns an absolute filename in the data-directory (standardized by StandardizePath).
@params parts: list(unicode)
Path parts. Each part is joined to form a path.
:rtype: unicode
:returns:
The full path prefixed with the data-directory.
@remarks:
This method triggers the data-directory creation.
'''
from zerotk.easyfs import StandardizePath
result = [self._data_dir] + list(parts)
result = '/'.join(result)
return StandardizePath(result)
def __getitem__(self, index):
return self.get_filename(index)
def assert_equal_files(self, obtained_fn, expected_fn, fix_callback=lambda x:x, binary=False, encoding=None):
'''
Compare two files contents. If the files differ, show the diff and write a nice HTML
diff file into the data directory.
Searches for the filenames both inside and outside the data directory (in that order).
:param unicode obtained_fn: basename to obtained file into the data directory, or full path.
:param unicode expected_fn: basename to expected file into the data directory, or full path.
:param bool binary:
Thread both files as binary files.
:param unicode encoding:
File's encoding. If not None, contents obtained from file will be decoded using this
`encoding`.
:param callable fix_callback:
A callback to "fix" the contents of the obtained (first) file.
This callback receives a list of strings (lines) and must also return a list of lines,
changed as needed.
The resulting lines will be used to compare with the contents of expected_fn.
:param bool binary:
.. seealso:: zerotk.easyfs.GetFileContents
'''
import os
from zerotk.easyfs import GetFileContents, GetFileLines
__tracebackhide__ = True
import io
def FindFile(filename):
# See if this path exists in the data dir
data_filename = self.get_filename(filename)
if os.path.isfile(data_filename):
return data_filename
# If not, we might have already received a full path
if os.path.isfile(filename):
return filename
# If we didn't find anything, raise an error
from ._exceptions import MultipleFilesNotFound
raise MultipleFilesNotFound([filename, data_filename])
obtained_fn = FindFile(obtained_fn)
expected_fn = FindFile(expected_fn)
if binary:
obtained_lines = GetFileContents(obtained_fn, binary=True)
expected_lines = GetFileContents(expected_fn, binary=True)
assert obtained_lines == expected_lines
else:
obtained_lines = fix_callback(GetFileLines(obtained_fn, encoding=encoding))
expected_lines = GetFileLines(expected_fn, encoding=encoding)
if obtained_lines != expected_lines:
html_fn = os.path.splitext(obtained_fn)[0] + '.diff.html'
html_diff = self._generate_html_diff(
expected_fn, expected_lines, obtained_fn, obtained_lines)
with io.open(html_fn, 'w') as f:
f.write(html_diff)
import difflib
diff = ['FILES DIFFER:', obtained_fn, expected_fn]
diff += ['HTML DIFF: %s' % html_fn]
diff += difflib.context_diff(obtained_lines, expected_lines)
raise AssertionError('\n'.join(diff) + '\n')
def _generate_html_diff(self, expected_fn, expected_lines, obtained_fn, obtained_lines):
"""
Returns a nice side-by-side diff of the given files, as a string.
"""
import difflib
differ = difflib.HtmlDiff()
return differ.make_file(
fromlines=expected_lines,
fromdesc=expected_fn,
tolines=obtained_lines,
todesc=obtained_fn,
)
@pytest.yield_fixture
def embed_data(request):
"""
Create a temporary directory with input data for the test.
The directory contents is copied from a directory with the same name as the module located in the same directory of
the test module.
"""
result = _EmbedDataFixture(request)
result.delete_data_dir()
result.create_data_dir()
yield result
result.delete_data_dir() | zerotk.easyfs | /zerotk.easyfs-1.0.3.tar.gz/zerotk.easyfs-1.0.3/zerotk/easyfs/fixtures.py | fixtures.py |
## jenkins-job-builder-pipeline
A plugin for [jenkins-job-builder](http://docs.openstack.org/infra/jenkins-job-builder) to support [pipeline](https://wiki.jenkins-ci.org/display/JENKINS/Pipeline+Plugin) job generation.
Build Status: [](https://travis-ci.org/rusty-dev/jenkins-job-builder-pipeline)
#### Usage:
Plugin adds a new project-type `pipeline` and a job definition field `pipeline`.
There are two distinct job definitions.
Create a pipeline job with a DSL script:
```yaml
- job:
name: example-script
project-type: pipeline
pipeline:
script: |
# Your dsl script goes here.
node {
echo 'Hello world'
}
sandbox: true # Use groovy sandbox, false by default.
```
Create a pipeline job loading pipeline script from SCM.
```yaml
- job:
name: example-scm-script
project-type: pipeline
pipeline:
script-path: subdir/Jenkinsfile # path to pipeline script definition, "Jenkinsfile" by default.
scm: # normal scm definitions
- git:
branches:
- '*/maser'
url: '[email protected]:github-username/repository-name.git'
basedir: 'subdir'
skip-tag: true
wipe-workspace: false
```
Definition type is chosen automatically by detecting presence of "scm" field.
| zerotk.jenkins-job-builder-pipeline | /zerotk.jenkins-job-builder-pipeline-0.1.1.tar.gz/zerotk.jenkins-job-builder-pipeline-0.1.1/README.md | README.md |
README
======
Jenkins Job Builder takes simple descriptions of Jenkins_ jobs in YAML_ or JSON_
format and uses them to configure Jenkins. You can keep your job descriptions in
human readable text format in a version control system to make changes and
auditing easier. It also has a flexible template system, so creating many
similarly configured jobs is easy.
To install::
$ pip install --user jenkins-job-builder
Online documentation:
* http://docs.openstack.org/infra/jenkins-job-builder/
Developers
----------
Bug report:
* https://storyboard.openstack.org/#!/project/723
Repository:
* https://git.openstack.org/cgit/openstack-infra/jenkins-job-builder
Cloning::
git clone https://git.openstack.org/openstack-infra/jenkins-job-builder
A virtual environment is recommended for development. For example, Jenkins
Job Builder may be installed from the top level directory::
$ virtualenv .venv
$ source .venv/bin/activate
$ pip install -r test-requirements.txt -e .
Patches are submitted via Gerrit at:
* https://review.openstack.org/
Please do not submit GitHub pull requests, they will be automatically closed.
More details on how you can contribute is available on our wiki at:
* http://docs.openstack.org/infra/manual/developers.html
Writing a patch
---------------
We ask that all code submissions be pep8_ and pyflakes_ clean. The
easiest way to do that is to run tox_ before submitting code for
review in Gerrit. It will run ``pep8`` and ``pyflakes`` in the same
manner as the automated test suite that will run on proposed
patchsets.
When creating new YAML components, please observe the following style
conventions:
* All YAML identifiers (including component names and arguments)
should be lower-case and multiple word identifiers should use
hyphens. E.g., "build-trigger".
* The Python functions that implement components should have the same
name as the YAML keyword, but should use underscores instead of
hyphens. E.g., "build_trigger".
This consistency will help users avoid simple mistakes when writing
YAML, as well as developers when matching YAML components to Python
implementation.
Unit Tests
----------
Unit tests have been included and are in the ``tests`` folder. Many unit
tests samples are included as examples in our documentation to ensure that
examples are kept current with existing behaviour. To run the unit tests,
execute the command::
tox -e py34,py27
* Note: View ``tox.ini`` to run tests on other versions of Python,
generating the documentation and additionally for any special notes
on running the test to validate documentation external URLs from behind
proxies.
Installing without setup.py
---------------------------
For YAML support, you will need libyaml_ installed.
Mac OS X::
$ brew install libyaml
Then install the required python packages using pip_::
$ sudo pip install PyYAML python-jenkins
.. _Jenkins: https://jenkins.io/
.. _YAML: http://www.yaml.org/
.. _JSON: http://json.org/
.. _pep8: https://pypi.python.org/pypi/pep8
.. _pyflakes: https://pypi.python.org/pypi/pyflakes
.. _tox: https://testrun.org/tox
.. _libyaml: http://pyyaml.org/wiki/LibYAML
.. _pip: https://pypi.python.org/pypi/pip
| zerotk.jenkins-job-builder | /zerotk.jenkins-job-builder-2.0.0.0b2.tar.gz/zerotk.jenkins-job-builder-2.0.0.0b2/README.rst | README.rst |
Job Definitions
===============
The job definitions for Jenkins Job Builder are kept in any number of
YAML or JSON files, in whatever way you would like to organize them. When you
invoke ``jenkins-jobs`` you may specify either the path of a single
YAML file, or a directory. If you choose a directory, all of
the .yaml/.yml or .json files in that directory will be read, and all the
jobs they define will be created or updated.
Definitions
-----------
Jenkins Job Builder understands a few basic object types which are
described in the next sections.
.. _job:
Job
^^^
The most straightforward way to create a job is simply to define a
Job in YAML. It looks like this::
- job:
name: job-name
That's not very useful, so you'll want to add some actions such as
:ref:`builders`, and perhaps :ref:`publishers`. Those are described
later.
.. automodule:: jenkins_jobs.modules.general
.. _job-template:
Job Template
^^^^^^^^^^^^
If you need several jobs defined that are nearly identical, except
perhaps in their names, SCP targets, etc., then you may use a Job
Template to specify the particulars of the job, and then use a
`Project`_ to realize the job with appropriate variable substitution.
Any variables not specified at the project level will be inherited from
the `Defaults`_.
A Job Template has the same syntax as a `Job`_, but you may add
variables anywhere in the definition. Variables are indicated by
enclosing them in braces, e.g., ``{name}`` will substitute the
variable `name`. When using a variable in a string field, it is good
practice to wrap the entire string in quotes, even if the rules of
YAML syntax don't require it because the value of the variable may
require quotes after substitution. In the rare situation that you must
encode braces within literals inside a template (for example a shell
function definition in a builder), doubling the braces will prevent
them from being interpreted as a template variable.
You must include a variable in the ``name`` field of a Job Template
(otherwise, every instance would have the same name). For example::
- job-template:
name: '{name}-unit-tests'
Will not cause any job to be created in Jenkins, however, it will
define a template that you can use to create jobs with a `Project`_
definition. It's name will depend on what is supplied to the
`Project`_.
If you use the variable ``{template-name}``, the name of the template
itself (e.g. ``{name}-unit-tests`` in the above example) will be
substituted in. This is useful in cases where you need to trace a job
back to its template.
Sometimes it is useful to have the same job name format used even
where the template contents may vary. `Ids` provide a mechanism to
support such use cases in addition to simplifying referencing
templates when the name contains the more complex substitution with
default values.
Default Values for Template Variables
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
To facilitate reuse of templates with many variables that can be
substituted, but where in most cases the same or no value is needed,
it is possible to specify defaults for the variables within the
templates themselves.
This can be used to provide common settings for particular templates.
For example:
.. literalinclude::
/../../tests/yamlparser/fixtures/template_default_variables.yaml
:language: yaml
To use a default value for a variable used in the name would be
uncommon unless it was in addition to another variable. However you
can use `Ids`_ simplify such use cases.
.. _project:
Project
^^^^^^^
The purpose of a project is to collect related jobs together, and
provide values for the variables in a `Job Template`_. It looks like
this::
- project:
name: project-name
jobs:
- '{name}-unit-tests'
Any number of arbitrarily named additional fields may be specified,
and they will be available for variable substitution in the job
template. Any job templates listed under ``jobs:`` will be realized
with those values. The example above would create the job called
'project-name-unit-tests' in Jenkins.
The ``jobs:`` list can also allow for specifying job-specific
substitutions as follows::
- project:
name: project-name
jobs:
- '{name}-unit-tests':
mail-to: [email protected]
- '{name}-perf-tests':
mail-to: [email protected]
If a variable is a list, the job template will be realized with the
variable set to each value in the list. Multiple lists will lead to
the template being realized with the cartesian product of those
values. Example::
- project:
name: project-name
pyver:
- 26
- 27
jobs:
- '{name}-{pyver}'
If there are templates being realized that differ only in the variable
used for its name (thus not a use case for job-specific substitutions),
additional variables can be specified for project variables. Example:
.. literalinclude:: /../../tests/yamlparser/fixtures/templates002.yaml
You can also specify some variable combinations to exclude from the matrix with
the ``exclude`` keyword, to avoid generating jobs for those combinations. You
can specify all the variables of the combination or only a subset, if you
specify a subset, any value of the omited variable will match:
.. literalinclude:: /../../tests/yamlparser/fixtures/template_exclude.yaml
The above example will omit the jobs:
* build-axe1val1-axe2val1-axe3val2
* build-axe1val1-axe2val2-axe3val1
* build-axe1val2-axe2val2-axe3val1
To achieve the same without the ``exclude`` tag one would have to do something
a bit more complicated, that gets more complicated for each dimension in the
combination, for the previous example, the counterpart would be:
.. literalinclude::
/../../tests/yamlparser/fixtures/template_without_exclude.yaml
Job Group
^^^^^^^^^
If you have several Job Templates that should all be realized
together, you can define a Job Group to collect them. Simply use the
Job Group where you would normally use a `Job Template`_ and all of
the Job Templates in the Job Group will be realized. For example:
.. literalinclude:: /../../tests/yamlparser/fixtures/templates001.yaml
Would cause the jobs `project-name-unit-tests` and `project-name-perf-tests` to be created
in Jenkins.
.. _views:
Views
^^^^^
A view is a particular way of displaying a specific set of jobs. To
create a view, you must define a view in a YAML file and have a variable called view-type with a valid value. It looks like this::
- view:
name: view-name
view-type: list
Views are processed differently than Jobs and therefore will not work within a `Project`_ or a `Job Template`_.
.. _macro:
Macro
^^^^^
Many of the actions of a `Job`_, such as builders or publishers, can
be defined as a Macro, and then that Macro used in the `Job`_
description. Builders are described later, but let's introduce a
simple one now to illustrate the Macro functionality. This snippet
will instruct Jenkins to execute "make test" as part of the job::
- job:
name: foo-test
builders:
- shell: 'make test'
If you wanted to define a macro (which won't save much typing in this
case, but could still be useful to centralize the definition of a
commonly repeated task), the configuration would look like::
- builder:
name: make-test
builders:
- shell: 'make test'
- job:
name: foo-test
builders:
- make-test
This allows you to create complex actions (and even sequences of
actions) in YAML that look like first-class Jenkins Job Builder
actions. Not every attribute supports Macros, check the documentation
for the action before you try to use a Macro for it.
Macros can take parameters, letting you define a generic macro and more
specific ones without having to duplicate code::
# The 'add' macro takes a 'number' parameter and will creates a
# job which prints 'Adding ' followed by the 'number' parameter:
- builder:
name: add
builders:
- shell: "echo Adding {number}"
# A specialized macro 'addtwo' reusing the 'add' macro but with
# a 'number' parameter hardcoded to 'two':
- builder:
name: addtwo
builders:
- add:
number: "two"
# Glue to have Jenkins Job Builder to expand this YAML example:
- job:
name: "testingjob"
builders:
# The specialized macro:
- addtwo
# Generic macro call with a parameter
- add:
number: "ZERO"
# Generic macro called without a parameter. Never do this!
# See below for the resulting wrong output :(
- add
Then ``<builders />`` section of the generated job show up as::
<builders>
<hudson.tasks.Shell>
<command>echo Adding two</command>
</hudson.tasks.Shell>
<hudson.tasks.Shell>
<command>echo Adding ZERO</command>
</hudson.tasks.Shell>
<hudson.tasks.Shell>
<command>echo Adding {number}</command>
</hudson.tasks.Shell>
</builders>
As you can see, the specialized macro ``addtwo`` reused the definition from
the generic macro ``add``.
Macro Notes
~~~~~~~~~~~
If a macro is not passed any parameters it will not have any expansion
performed on it. Thus if you forget to provide `any` parameters to a
macro that expects some, the parameter-templates (``{foo}``) will be
left as is in the resulting output; this is almost certainly not what
you want. Note if you provide an invalid parameter, the expansion
will fail; the expansion will only be skipped if you provide `no`
parameters at all.
Macros are expanded using Python string substitution rules. This can
especially cause confusion with shell snippets that use ``{`` as part
of their syntax. As described, if a macro has `no` parameters, no
expansion will be performed and thus it is correct to write the script
with no escaping, e.g.::
- builder:
name: a_builder
builders:
- shell: |
VARIABLE=${VARIABLE:-bar}
function foo {
echo "my shell function"
}
However, if the macro `has` parameters, you must escape the ``{`` you
wish to make it through to the output, e.g.::
- builder:
name: a_builder
builders:
- shell: |
PARAMETER={parameter}
VARIABLE=${{VARIABLE:-bar}}
function foo {{
echo "my shell function"
}}
Note that a ``job-template`` will have parameters by definition (at
least a ``name``). Thus embedded-shell within a ``job-template`` should
always use ``{{`` to achieve a literal ``{``. A generic builder will need
to consider the correct quoting based on its use of parameters.
.. _ids:
Item ID's
^^^^^^^^^
It's possible to assign an `id` to any of the blocks and then use that
to reference it instead of the name. This has two primary functions:
* A unique identifier where you wish to use the same naming format for
multiple templates. This allows to follow a naming scheme while
still using multiple templates to handle subtle variables in job
requirements.
* Provides a simpler name for a `job-template` where you have multiple
variables including default values in the name and don't wish to have
to include this information in every use. This also makes changing
the template output name without impacting references.
Example:
.. literalinclude:: /../../tests/yamlparser/fixtures/template_ids.yaml
.. _raw:
Raw config
^^^^^^^^^^
It is possible, but not recommended, to use `raw` within a module to
inject raw xml into the job configs.
This is relevant in case there is no appropriate module for a
Jenkins plugin or the module does not behave as you expect it to do.
For example:
.. literalinclude:: /../../tests/wrappers/fixtures/raw001.yaml
Is the raw way of adding support for the `xvnc` wrapper.
To get the appropriate xml to use you would need to create/edit a job
in Jenkins and grab the relevant raw xml segment from the
`config.xml`.
The xml string can refer to variables just like anything else and as
such can be parameterized like anything else.
You can use `raw` in most locations, the following example show them
with arbitrary xml-data:
.. literalinclude::
/../../tests/yamlparser/fixtures/complete-raw001.yaml
Note: If you have a need to use `raw` please consider submitting a patch to
add or fix the module that will remove your need to use `raw`.
.. _defaults:
Defaults
^^^^^^^^
Defaults collect job attributes (including actions) and will supply
those values when the job is created, unless superseded by a value in
the 'Job'_ definition. If a set of Defaults is specified with the
name ``global``, that will be used by all `Job`_ (and `Job Template`_)
definitions unless they specify a different Default object with the
``defaults`` attribute. For example::
- defaults:
name: global
description: 'Do not edit this job through the web!'
Will set the job description for every job created.
You can define variables that will be realized in a `Job Template`.
.. literalinclude:: /../../tests/yamlparser/fixtures/template_honor_defaults.yaml
Would create jobs ``build-i386`` and ``build-amd64``.
.. _variable_references:
Variable References
^^^^^^^^^^^^^^^^^^^
If you want to pass an object (boolean, list or dict) to templates you can
use an ``{obj:key}`` variable in the job template. This triggers the use
of code that retains the original object type.
For example:
.. literalinclude:: /../../tests/yamlparser/fixtures/custom_distri.yaml
JJB also supports interpolation of parameters within parameters. This allows a
little more flexibility when ordering template jobs as components in different
projects and job groups.
For example:
.. literalinclude:: /../../tests/yamlparser/fixtures/second_order_parameter_interpolation002.yaml
By default JJB will fail if it tries to interpolate a variable that was not
defined, but you can change that behavior and allow empty variables with the
allow_empty_variables configuration option.
For example, having a configuration file with that option enabled:
.. literalinclude:: /../../tests/yamlparser/fixtures/allow_empty_variables.conf
Will prevent JJb from failing if there are any non-initialized variables used
and replace them with the empty string instead.
Yaml Anchors & Aliases
^^^^^^^^^^^^^^^^^^^^^^
The yaml specification supports `anchors and aliases`_ which means
that JJB definitions allow references to variables in templates.
For example:
.. literalinclude:: /../../tests/yamlparser/fixtures/yaml_anchor.yaml
The `anchors and aliases`_ are expanded internally within JJB's yaml loading
calls and are not limited to individual documents. That means you can't use
the same anchor name in included files without collisions.
A simple example can be seen in the specs `full length example`_ with the
following being more representative of usage within JJB:
.. literalinclude:: /../../tests/localyaml/fixtures/anchors_aliases.iyaml
Which will be expanded to the following yaml before being processed:
.. literalinclude:: /../../tests/localyaml/fixtures/anchors_aliases.oyaml
.. _full length example: http://www.yaml.org/spec/1.2/spec.html#id2761803
.. _anchors and aliases: http://www.yaml.org/spec/1.2/spec.html#id2765878
Custom Yaml Tags
----------------
.. automodule:: jenkins_jobs.local_yaml
Modules
-------
The bulk of the job definitions come from the following modules.
.. toctree::
:maxdepth: 2
:glob:
project_*
builders
hipchat
metadata
notifications
parameters
properties
publishers
reporters
scm
triggers
wrappers
zuul
Module Execution
----------------
The jenkins job builder modules are executed in sequence.
Generally the sequence is:
#. parameters/properties
#. scm
#. triggers
#. wrappers
#. prebuilders (maven only, configured like :ref:`builders`)
#. builders (maven, freestyle, matrix, etc..)
#. postbuilders (maven only, configured like :ref:`builders`)
#. publishers/reporters/notifications
| zerotk.jenkins-job-builder | /zerotk.jenkins-job-builder-2.0.0.0b2.tar.gz/zerotk.jenkins-job-builder-2.0.0.0b2/doc/source/definition.rst | definition.rst |
Installation
============
To install Jenkins Job Builder from source, run::
pip install --user jenkins-job-builder
A virtual environment is recommended for development. For example, Jenkins
Job Builder may be installed from the top level directory::
$ virtualenv .venv
$ source .venv/bin/activate
$ pip install -r test-requirements.txt -e .
Alternatively, the current release can be installed from pypi::
sudo pip install jenkins-job-builder
The OpenStack project uses Puppet to manage its infrastructure
systems, including Jenkins. If you use Puppet, you can use the
`OpenStack Jenkins module`__ to install Jenkins Job Builder.
__ https://git.openstack.org/cgit/openstack-infra/puppet-jenkins/tree/
Documentation
-------------
Documentation is included in the ``doc`` folder. To generate docs
locally execute the command::
tox -e docs
The generated documentation is then available under
``doc/build/html/index.html``.
As over time URLs change or become stale there is also a testenv available
to verify any links added. To run locally execute the command::
tox -e docs-linkcheck
* Note: When behind a proxy it is necessary to use ``TOX_TESTENV_PASSENV``
to pass any proxy settings for this test to be able to check links are
valid.
Unit Tests
----------
Unit tests have been included and are in the ``tests`` folder. We recently
started including unit tests as examples in our documentation so to keep the
examples up to date it is very important that we include unit tests for
every module. To run the unit tests, execute the command::
tox -e py27
* Note: View ``tox.ini`` to run tests on other versions of Python.
Test Coverage
-------------
To measure test coverage, execute the command::
tox -e cover
| zerotk.jenkins-job-builder | /zerotk.jenkins-job-builder-2.0.0.0b2.tar.gz/zerotk.jenkins-job-builder-2.0.0.0b2/doc/source/installation.rst | installation.rst |
.. _quick-start-guide:
Quick Start Guide
=================
This guide was made with the impatient in mind so explanation is sparse.
It will guide users through a set of typical use cases for JJB using the same
job definitions we use to test JJB.
#. Clone the repository_ to get the JJB job definition examples_
#. The :doc:`installation` can be either from pypi_ (released version) or from the clone (master).
Usage of the commands below assumes that you are at the root of the cloned directory.
.. _repository: http://git.openstack.org/cgit/openstack-infra/jenkins-job-builder/
.. _pypi: https://pypi.python.org/pypi/jenkins-job-builder/
.. _examples: http://git.openstack.org/cgit/openstack-infra/jenkins-job-builder/tree/tests
.. _use-case-1:
Use Case 1: Test a job definition
---------------------------------
JJB creates Jenkins XML configuration file from a YAML/JSON definition file and
just uploads it to Jenkins. JJB provides a convenient ``test`` command to allow
you to validate the XML before you attempt to upload it to Jenkins.
Test a YAML job definition::
jenkins-jobs test tests/yamlparser/fixtures/templates002.yaml
The above command prints the generated Jenkins XML to the console. If you
prefer to send it to a directory::
jenkins-jobs test -o output tests/yamlparser/fixtures/templates002.yaml
The `output` directory will contain files with the XML configurations.
.. _use-case-2:
Use Case 2: Updating Jenkins Jobs
---------------------------------
Once you've tested your job definition and are happy with it then you can use the
``update`` command to deploy the job to Jenkins. The ``update`` command requires a
configuration file. An example file is supplied in the etc folder, you should
update it to match your Jenkins master::
jenkins-jobs --conf etc/jenkins_jobs.ini-sample update tests/yamlparser/fixtures/templates002.yaml
The above command will update your Jenkins master with the generated jobs.
**Caution**: JJB caches Jenkins job information locally. Changes
made using the Jenkins UI will not update that cache, which may
lead to confusion. See :ref:`updating-jobs` for more information.
.. _use-case-3:
Use Case 3: Working with JSON job definitions
---------------------------------------------
You can also define your jobs in json instead of yaml::
jenkins-jobs --conf etc/jenkins_jobs.ini-sample update tests/jsonparser/fixtures/simple.json
The above command just uses a simple job definition. You can also convert any
of the YAML examples to JSON and feed that to JJB.
.. _use-case-4:
Use Case 4: Deleting a job
--------------------------
To delete a job::
jenkins-jobs --conf etc/jenkins_jobs.ini-sample delete simple
The above command deletes the job `simple` from the Jenkins master.
Please refer to the jenkins-jobs :ref:`command-reference` and the
:doc:`definition` pages for more details.
| zerotk.jenkins-job-builder | /zerotk.jenkins-job-builder-2.0.0.0b2.tar.gz/zerotk.jenkins-job-builder-2.0.0.0b2/doc/source/quick-start.rst | quick-start.rst |
Configuration File
------------------
After installation, you will need to create a configuration file. By
default, ``jenkins-jobs`` looks for ``~/.config/jenkins_jobs/jenkins_jobs.ini``,
``<script directory>/jenkins_jobs.ini`` or ``/etc/jenkins_jobs/jenkins_jobs.ini``
(in that order), but you may specify an alternative location when running
``jenkins-jobs``. The file should have the following format:
.. literalinclude:: ../../etc/jenkins_jobs.ini-sample
:language: ini
job_builder section
^^^^^^^^^^^^^^^^^^^
**ignore_cache**
(Optional) If set to True, Jenkins Job Builder won't use any cache.
**keep_descriptions**
By default `jenkins-jobs` will overwrite the jobs descriptions even if no
description has been defined explicitly.
When this option is set to True, that behavior changes and it will only
overwrite the description if you specified it in the yaml. False by default.
**include_path**
(Optional) Can be set to a ':' delimited list of paths, which jenkins
job builder will search for any files specified by the custom application
yaml tags 'include', 'include-raw' and 'include-raw-escaped'.
**recursive**
(Optional) If set to True, jenkins job builder will search for job
definition files recursively.
**exclude**
(Optional) If set to a list of values separated by ':', these paths will be
excluded from the list of paths to be processed when searching recursively.
Values containing no ``/`` will be matched against directory names at all
levels, those starting with ``/`` will be considered absolute, while others
containing a ``/`` somewhere other than the start of the value will be
considered relative to the starting path.
**allow_duplicates**
(Optional) By default `jenkins-jobs` will abort when a duplicate macro,
template, job-group or job name is encountered as it cannot establish the
correct one to use. When this option is set to True, only a warning is
emitted.
**allow_empty_variables**
(Optional) When expanding strings, by default `jenkins-jobs` will raise an
exception if there's a key in the string, that has not been declared in the
input YAML files. Setting this option to True will replace it with the empty
string, allowing you to use those strings without having to define all the
keys it might be using.
jenkins section
^^^^^^^^^^^^^^^
**user**
This should be the name of a user previously defined in Jenkins.
Appropriate user permissions must be set under the Jenkins security
matrix: under the ``Global`` group of permissions, check ``Read``,
then under the ``Job`` group of permissions, check ``Create``,
``Delete``, ``Configure`` and finally ``Read``.
**password**
The API token for the user specified. You can get this through the
Jenkins management interface under ``People`` -> username ->
``Configure`` and then click the ``Show API Token`` button.
**url**
The base URL for your Jenkins installation.
**timeout**
(Optional) The connection timeout (in seconds) to the Jenkins server.
By default this is set to the system configured socket timeout.
**query_plugins_info**
Whether to query the Jenkins instance for plugin info. If no configuration
files are found (either in the default paths or given through the
command-line), `jenkins-jobs` will skip querying for plugin information. True
by default.
hipchat section
^^^^^^^^^^^^^^^
**send-as**
This is the hipchat user name that will be used when sending notifications.
**authtoken**
The API token necessary to send messages to hipchat. This can be generated in
the hipchat web interface by a user with administrative access for your
organization. This authtoken is set for each job individually; the
JJB Hipchat Plugin does not currently support setting different tokens for
different projects, so the token you use will have to be scoped such that it
can be used for any room your jobs might be configured to notify. For more
information on this topic, please see the `Hipchat API Documentation`__
__ https://www.hipchat.com/docs/apiv2/auth
stash section
^^^^^^^^^^^^^^^^^^^^^^^
**username**
This is the stash user name that will be used to connect to stash
when using the stash publisher plugin and not defining it in the
yaml part.
**password**
This is the related password that will be used with the stash username
when using the stash publisher plugin and not defining it in the
yaml part.
__future__ section
^^^^^^^^^^^^^^^^^^
This section is to control enabling of beta features or behaviour changes that
deviate from previously released behaviour in ways that may require effort to
convert existing JJB configs to adopt. This essentially will act as a method
to share these new behaviours while under active development so they can be
changed ahead of releases.
**param_order_from_yaml**
Used to switch on using the order of the parameters are defined in yaml to
control the order of corresponding XML elements being written out. This is
intended as a global flag and can affect multiple modules.
Running
-------
After it's installed and configured, you can invoke Jenkins Job
Builder by running ``jenkins-jobs``. You won't be able to do
anything useful just yet without a configuration; that is
discussed in the next section.
Test Mode
^^^^^^^^^
Once you have a configuration defined, you can run the job builder in test mode.
If you want to run a simple test with just a single YAML job definition file
and see the XML output on stdout::
jenkins-jobs test /path/to/foo.yaml
You can also pass JJB a directory containing multiple job definition files::
jenkins-jobs test /path/to/defs -o /path/to/output
which will write XML files to the output directory for all of the jobs
defined in the defs directory.
.. _updating-jobs:
Updating Jobs
^^^^^^^^^^^^^
When you're satisfied with the generated XML from the test, you can run::
jenkins-jobs update /path/to/defs
which will upload the job and view definitions to Jenkins if needed. Jenkins
Job Builder maintains, for each host, a cache [#f1]_ of previously configured
jobs and views, so that you can run that command as often as you like, and it
will only update the jobs configurations in Jenkins if the defined definitions
has changed since the last time it was run. Note: if you modify a job
directly in Jenkins, jenkins-jobs will not know about it and will not
update it.
To update a specific list of jobs/views, simply pass the job/view names as
additional arguments after the job definition path. To update Foo1 and Foo2
run::
jenkins-jobs update /path/to/defs Foo1 Foo2
You can also enable the parallel execution of the program passing the workers
option with a value of 0, 2, or higher. Use 0 to run as many workers as cores
in the host that runs it, and 2 or higher to specify the number of workers to
use::
jenkins-jobs update --workers 0 /path/to/defs
Passing Multiple Paths
^^^^^^^^^^^^^^^^^^^^^^
It is possible to pass multiple paths to JJB using colons as a path separator on
\*nix systems and semi-colons on Windows systems. For example::
jenkins-jobs test /path/to/global:/path/to/instance:/path/to/instance/project
This helps when structuring directory layouts as you may selectively include
directories in different ways to suit different needs. If you maintain multiple
Jenkins instances suited to various needs you may want to share configuration
between those instances (global). Furthermore, there may be various ways you
would like to structure jobs within a given instance.
Recursive Searching of Paths
----------------------------
In addition to passing multiple paths to JJB it is also possible to enable
recursive searching to process all yaml files in the tree beneath each path.
For example::
For a tree:
/path/
to/
defs/
ci_jobs/
release_jobs/
globals/
macros/
templates/
jenkins-jobs update -r /path/to/defs:/path/to/globals
JJB will search defs/ci_jobs, defs/release_jobs, globals/macros and
globals/templates in addition to the defs and globals trees.
Excluding Paths
---------------
To allow a complex tree of jobs where some jobs are managed differently without
needing to explicitly provide each path, the recursive path processing supports
excluding paths based on absolute paths, relative paths and patterns. For
example::
For a tree:
/path/
to/
defs/
ci_jobs/
manual/
release_jobs/
manual/
qa_jobs/
globals/
macros/
templates/
special/
jenkins-jobs update -r -x man*:./qa_jobs -x /path/to/defs/globals/special \
/path/to/defs:/path/to/globals
JJB will search the given paths, ignoring the directories qa_jobs,
ci_jobs/manual, release_jobs/manual, and globals/special when
building the list of yaml files to be processed. Absolute paths
are denoted by starting from the root, relative by containing
the path separator, and patterns by having neither.
Patterns use simple shell globing to match directories.
Deleting Jobs/Views
^^^^^^^^^^^^^^^^^^^
Jenkins Job Builder supports deleting jobs and views from Jenkins.
To delete a specific job::
jenkins-jobs delete Foo1
To delete a list of jobs or views, simply pass them as additional
arguments after the command::
jenkins-jobs delete Foo1 Foo2
To delete only views or only jobs, simply add the argument
--views-only or --jobs-only after the command::
jenkins-jobs delete --views-only Foo1
jenkins-jobs delete --jobs-only Foo1
The ``update`` command includes a ``delete-old`` option to remove obsolete
jobs::
jenkins-jobs update --delete-old /path/to/defs
Obsolete jobs are jobs once managed by JJB (as distinguished by a special
comment that JJB appends to their description), that were not generated in this
JJB run.
There is also a command to delete **all** jobs and/or views.
**WARNING**: Use with caution.
To delete **all** jobs and views::
jenkins-jobs delete-all
TO delete **all** jobs::
jenkins-jobs delete-all --jobs-only
To delete **all** views::
jenkins-jobs delete-all --views-only
Globbed Parameters
^^^^^^^^^^^^^^^^^^
Jenkins job builder supports globbed parameters to identify jobs from a set of
definition files. This feature only supports JJB managed jobs.
To update jobs/views that only have 'foo' in their name::
jenkins-jobs update ./myjobs \*foo\*
To delete jobs/views that only have 'foo' in their name::
jenkins-jobs delete --path ./myjobs \*foo\*
.. _command-reference:
Command Reference
^^^^^^^^^^^^^^^^^
.. program-output:: jenkins-jobs --help
.. program-output:: jenkins-jobs test --help
.. program-output:: jenkins-jobs update --help
.. program-output:: jenkins-jobs delete-all --help
.. program-output:: jenkins-jobs delete --help
.. rubric:: Footnotes
.. [#f1] The cache default location is at ``~/.cache/jenkins_jobs``, which
can be overridden by setting the ``XDG_CACHE_HOME`` environment
variable.
| zerotk.jenkins-job-builder | /zerotk.jenkins-job-builder-2.0.0.0b2.tar.gz/zerotk.jenkins-job-builder-2.0.0.0b2/doc/source/execution.rst | execution.rst |
.. _extending:
Extending
=========
Jenkins Job Builder is quite modular. It is easy to add new
attributes to existing components, a new module to support a Jenkins
plugin, or include locally defined methods to deal with an
idiosyncratic build system.
The Builder
-----------
The ``Builder`` class manages Jenkins jobs. It's responsible for
creating/deleting/updating jobs and can be called from your application. You
can pass it a filename or an open file-like object that represents your YAML
configuration. See the ``jenkins_jobs/builder.py`` file for more details.
XML Processing
--------------
Most of the work of building XML from the YAML configuration file is
handled by individual functions that implement a single
characteristic. For example, see the
``jenkins_jobs/modules/builders.py`` file for the Python module that
implements the standard Jenkins builders. The ``shell`` function at
the top of the file implements the standard `Execute a shell` build
step. All of the YAML to XML functions in Jenkins Job Builder have
the same signature:
.. _component_interface:
.. py:function:: component(parser, xml_parent, data)
:noindex:
:arg YAMLParser parser: the jenkins jobs YAML parser
:arg Element xml_parent: this attribute's parent XML element
:arg dict data: the YAML data structure for this attribute and below
The function is expected to examine the YAML data structure and create
new XML nodes and attach them to the xml_parent element. This general
pattern is applied throughout the included modules.
.. _module:
Modules
-------
Nearly all of Jenkins Job Builder is implemented in modules. The main
program has no concept of builders, publishers, properties, or any
other aspects of job definition. Each of those building blocks is
defined in a module, and due to the use of setuptools entry points,
most modules are easily extensible with new components.
To add a new module, define a class that inherits from
:py:class:`jenkins_jobs.modules.base.Base`, and add it to the
``jenkins_jobs.modules`` entry point in your setup.py.
.. autoclass:: jenkins_jobs.modules.base.Base
:members:
:undoc-members:
:private-members:
.. _component:
Components
----------
Most of the standard modules supply a number of components, and it's
easy to provide your own components for use by those modules. For
instance, the Builders module provides several builders, such as the
`shell` builder as well as the `trigger_builds` builder. If you
wanted to add a new builder, all you need to do is write a function
that conforms to the :ref:`Component Interface <component_interface>`,
and then add that function to the appropriate entry point (via a
setup.py file).
.. _module_registry:
Module Registry
---------------
All modules and their associated components are registered in the
module registry. It can be accessed either from modules via the registry
field, or via the parser parameter of components.
.. autoclass:: jenkins_jobs.registry.ModuleRegistry
:members:
| zerotk.jenkins-job-builder | /zerotk.jenkins-job-builder-2.0.0.0b2.tar.gz/zerotk.jenkins-job-builder-2.0.0.0b2/doc/source/extending.rst | extending.rst |
rm -fr .test
mkdir -p .test/run-conf/config
CONFIGS_DIR=$(dirname ${0})/configs
CONFIGS=$(ls -1 ${CONFIGS_DIR}/*.conf 2>/dev/null)
cd .test
if [ -e /usr/zuul-env/bin/zuul-cloner ];
then
/usr/zuul-env/bin/zuul-cloner -m ../tools/run-compare-clonemap.yaml --cache-dir /opt/git git://git.openstack.org openstack-infra/project-config
else
git clone --depth=1 git://git.openstack.org/openstack-infra/project-config
fi
# setup a default configuration to compare results against
cp -r project-config/jenkins/jobs/* run-conf/config
cd ..
mkdir -p .test/run-conf/default/out
tox -e compare-xml-config
echo "############################################################"
echo "Starting processing configs"
for conf_file in ${CONFIGS}
do
echo "============================================================"
echo "Processing non-default config ${conf_file}"
conf_name=$(basename ${conf_file%%.conf})
mkdir -p .test/run-conf/${conf_name}/out
tox -e compare-xml-config -- --conf ${conf_file} test -o .test/run-conf/${conf_name}/out/ .test/run-conf/config
echo "------------------------------------------------------------"
done
echo "############################################################"
echo "Comparing differences from default to alternative configs"
for conf_file in ${CONFIGS}
do
echo "^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^"
echo "Comparing config ${conf_file}"
conf_name=$(basename ${conf_file%%.conf})
CHANGED=0
for x in `(cd .test/run-conf/default/out/ && find -type f)`
do
differences=$(diff -u .test/run-conf/default/out/${x} .test/run-conf/${conf_name}/out/${x} 2>&1)
if [ $? -ne 0 ]
then
CHANGED=1
echo "============================================================"
echo ${x}
echo "------------------------------------------------------------"
echo "${differences}"
fi
done
if [ "${CHANGED}" -eq "0" ]
then
echo "No differences between default and ${conf_name} configs"
fi
done
# should only fail if previous command exited with a non-zero status
exit 0 | zerotk.jenkins-job-builder | /zerotk.jenkins-job-builder-2.0.0.0b2.tar.gz/zerotk.jenkins-job-builder-2.0.0.0b2/tools/run-compare-configs.sh | run-compare-configs.sh |
# Copyright (c) 2012, AT&T Labs, Yun Mao <[email protected]>
# All Rights Reserved.
# Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
rm -fr .test
mkdir -p .test/old/config
mkdir -p .test/old/out
mkdir -p .test/new/config
mkdir -p .test/new/out
cd .test
if [ -e /usr/zuul-env/bin/zuul-cloner ];
then
/usr/zuul-env/bin/zuul-cloner -m ../tools/run-compare-clonemap.yaml --cache-dir /opt/git git://git.openstack.org openstack-infra/project-config
else
git clone --depth=1 git://git.openstack.org/openstack-infra/project-config
fi
cp -r project-config/jenkins/jobs/* old/config
cp -r project-config/jenkins/jobs/* new/config
cd ..
GITHEAD=`git rev-parse HEAD`
# First generate output from HEAD~1
git checkout HEAD~1
tox -e compare-xml-old
# Then use that as a reference to compare against HEAD
git checkout $GITHEAD
tox -e compare-xml-new
CHANGED=0
for x in `(cd .test/old/out && find -type f)`
do
if ! diff -u .test/old/out/$x .test/new/out/$x >/dev/null 2>&1
then
CHANGED=1
echo "============================================================"
echo $x
echo "------------------------------------------------------------"
fi
diff -u .test/old/out/$x .test/new/out/$x || /bin/true
done
echo
echo "You are in detached HEAD mode. If you are a developer"
echo "and not very familiar with git, you might want to do"
echo "'git checkout branch-name' to go back to your branch."
if [ "$CHANGED" -eq "1" ]; then
exit 1
fi
exit 0 | zerotk.jenkins-job-builder | /zerotk.jenkins-job-builder-2.0.0.0b2.tar.gz/zerotk.jenkins-job-builder-2.0.0.0b2/tools/run-compare-xml.sh | run-compare-xml.sh |
# Manage interpolation of JJB variables into template strings.
import logging
from pprint import pformat
import re
from string import Formatter
from jenkins_jobs.errors import JenkinsJobsException
logger = logging.getLogger(__name__)
def deep_format(obj, paramdict, allow_empty=False):
"""Apply the paramdict via str.format() to all string objects found within
the supplied obj. Lists and dicts are traversed recursively."""
# YAML serialisation was originally used to achieve this, but that places
# limitations on the values in paramdict - the post-format result must
# still be valid YAML (so substituting-in a string containing quotes, for
# example, is problematic).
if hasattr(obj, 'format'):
try:
ret = CustomFormatter(allow_empty).format(obj, **paramdict)
except KeyError as exc:
missing_key = exc.args[0]
desc = "%s parameter missing to format %s\nGiven:\n%s" % (
missing_key, obj, pformat(paramdict))
raise JenkinsJobsException(desc)
elif isinstance(obj, list):
ret = type(obj)()
for item in obj:
ret.append(deep_format(item, paramdict, allow_empty))
elif isinstance(obj, dict):
ret = type(obj)()
for item in obj:
try:
ret[CustomFormatter(allow_empty).format(item, **paramdict)] = \
deep_format(obj[item], paramdict, allow_empty)
except KeyError as exc:
missing_key = exc.args[0]
desc = "%s parameter missing to format %s\nGiven:\n%s" % (
missing_key, obj, pformat(paramdict))
raise JenkinsJobsException(desc)
else:
ret = obj
return ret
class CustomFormatter(Formatter):
"""
Custom formatter to allow non-existing key references when formatting a
string
"""
_expr = '{({{)*(?:obj:)?(?P<key>\w+)(?:\|(?P<default>[\w\s]*))?}(}})*'
def __init__(self, allow_empty=False):
super(CustomFormatter, self).__init__()
self.allow_empty = allow_empty
def vformat(self, format_string, args, kwargs):
matcher = re.compile(self._expr)
# special case of returning the object if the entire string
# matches a single parameter
try:
result = re.match('^%s$' % self._expr, format_string)
except TypeError:
return format_string.format(**kwargs)
if result is not None:
try:
return kwargs[result.group("key")]
except KeyError:
pass
# handle multiple fields within string via a callback to re.sub()
def re_replace(match):
key = match.group("key")
default = match.group("default")
if default is not None:
if key not in kwargs:
return default
else:
return "{%s}" % key
return match.group(0)
format_string = matcher.sub(re_replace, format_string)
return Formatter.vformat(self, format_string, args, kwargs)
def get_value(self, key, args, kwargs):
try:
return Formatter.get_value(self, key, args, kwargs)
except KeyError:
if self.allow_empty:
logger.debug(
'Found uninitialized key %s, replaced with empty string',
key
)
return ''
raise | zerotk.jenkins-job-builder | /zerotk.jenkins-job-builder-2.0.0.0b2.tar.gz/zerotk.jenkins-job-builder-2.0.0.0b2/jenkins_jobs/formatter.py | formatter.py |
# Concurrent execution helper functions and classes
from functools import wraps
import logging
from multiprocessing import cpu_count
import threading
import traceback
try:
import Queue as queue
except ImportError:
import queue
logger = logging.getLogger(__name__)
class TaskFunc(dict):
"""
Simple class to wrap around the information needed to run a function.
"""
def __init__(self, n_ord, func, args=None, kwargs=None):
self['func'] = func
self['args'] = args or []
self['kwargs'] = kwargs or {}
self['ord'] = n_ord
class Worker(threading.Thread):
"""
Class that actually does the work, gets a TaskFunc through the queue,
runs its function with the passed parameters and returns the result
If the string 'done' is passed instead of a TaskFunc instance, the thread
will end.
"""
def __init__(self, in_queue, out_queue):
threading.Thread.__init__(self)
self.in_queue = in_queue
self.out_queue = out_queue
def run(self):
while True:
task = self.in_queue.get()
if task == 'done':
return
try:
res = task['func'](*task['args'],
**task['kwargs'])
except Exception as exc:
res = exc
traceback.print_exc()
self.out_queue.put((task['ord'], res))
def concurrent(func):
@wraps(func)
def concurrentized(*args, **kwargs):
"""
This function will spawn workers and run the decorated function
concurrently on the workers. It will not ensure the thread safety of
the decorated function (the decorated function should be thread safe by
itself). It accepts two special parameters:
:arg list concurrentize: list of the arguments to pass to each of the
runs, the results of each run will be returned in the same order.
:arg int n_workers: number of workers to use, by default and if '0'
passed will autodetect the number of cores and use that, if '1'
passed, it will not use any workers and just run as if were not
concurrentized everything.
Example:
> @concurrent
> def sample(param1, param2, param3):
> return param1 + param2 + param3
>
> sample('param1', param2='val2',
> concurrent=[
> {'param3': 'val3'},
> {'param3': 'val4'},
> {'param3': 'val5'},
> ])
>
['param1val2val3', 'param1val2val4', 'param1val2val5']
This will run the function `concurrentized_function` 3 times, in
concurrent (depending on the number of detected cores) and return an
array with the results of the executions in the same order the
parameters were passed.
"""
n_workers = kwargs.pop('n_workers', 0)
p_kwargs = kwargs.pop('concurrent', [])
# if only one parameter is passed inside the concurrent dict, run the
# original function as is, no need for pools
if len(p_kwargs) == 1:
kwargs.update(p_kwargs[0])
if len(p_kwargs) in (1, 0):
return func(*args, **kwargs)
# prepare the workers
# If no number of workers passed or passed 0
if not n_workers:
n_workers = cpu_count()
logging.debug("Running concurrent %d workers", n_workers)
worker_pool = []
in_queue = queue.Queue()
out_queue = queue.Queue()
for n_worker in range(n_workers):
new_worker = Worker(in_queue, out_queue)
new_worker.setDaemon(True)
logging.debug("Spawning worker %d", n_worker)
new_worker.start()
worker_pool.append(new_worker)
# Feed the workers
n_ord = 0
for f_kwargs in p_kwargs:
f_kwargs.update(kwargs)
in_queue.put(TaskFunc(n_ord, func, args, f_kwargs))
n_ord += 1
for _ in range(n_workers):
in_queue.put('done')
# Wait for the results
logging.debug("Waiting for workers to finish processing")
results = []
for _ in p_kwargs:
new_res = out_queue.get()
results.append(new_res)
# cleanup
for worker in worker_pool:
worker.join()
# Reorder the results
results = [r[1] for r in sorted(results)]
logging.debug("Concurrent task finished")
return results
return concurrentized | zerotk.jenkins-job-builder | /zerotk.jenkins-job-builder-2.0.0.0b2.tar.gz/zerotk.jenkins-job-builder-2.0.0.0b2/jenkins_jobs/parallel.py | parallel.py |
# Manage Jenkins plugin module registry.
import logging
import operator
import pkg_resources
import re
import types
from jenkins_jobs.errors import JenkinsJobsException
from jenkins_jobs.formatter import deep_format
__all__ = [
"ModuleRegistry"
]
logger = logging.getLogger(__name__)
class ModuleRegistry(object):
entry_points_cache = {}
def __init__(self, jjb_config, plugins_list=None):
self.modules = []
self.modules_by_component_type = {}
self.handlers = {}
self.jjb_config = jjb_config
self.masked_warned = {}
if plugins_list is None:
self.plugins_dict = {}
else:
self.plugins_dict = self._get_plugins_info_dict(plugins_list)
for entrypoint in pkg_resources.iter_entry_points(
group='jenkins_jobs.modules'):
Mod = entrypoint.load()
mod = Mod(self)
self.modules.append(mod)
self.modules.sort(key=operator.attrgetter('sequence'))
if mod.component_type is not None:
self.modules_by_component_type[mod.component_type] = entrypoint
@staticmethod
def _get_plugins_info_dict(plugins_list):
def mutate_plugin_info(plugin_info):
"""
We perform mutations on a single member of plugin_info here, then
return a dictionary with the longName and shortName of the plugin
mapped to its plugin info dictionary.
"""
version = plugin_info.get('version', '0')
plugin_info['version'] = re.sub(r'(.*)-(?:SNAPSHOT|BETA)',
r'\g<1>.preview', version)
aliases = []
for key in ['longName', 'shortName']:
value = plugin_info.get(key, None)
if value is not None:
aliases.append(value)
plugin_info_dict = {}
for name in aliases:
plugin_info_dict[name] = plugin_info
return plugin_info_dict
list_of_dicts = [mutate_plugin_info(v) for v in plugins_list]
plugins_info_dict = {}
for d in list_of_dicts:
plugins_info_dict.update(d)
return plugins_info_dict
def get_plugin_info(self, plugin_name):
""" This method is intended to provide information about plugins within
a given module's implementation of Base.gen_xml. The return value is a
dictionary with data obtained directly from a running Jenkins instance.
This allows module authors to differentiate generated XML output based
on information such as specific plugin versions.
:arg string plugin_name: Either the shortName or longName of a plugin
as see in a query that looks like:
``http://<jenkins-hostname>/pluginManager/api/json?pretty&depth=2``
During a 'test' run, it is possible to override JJB's query to a live
Jenkins instance by passing it a path to a file containing a YAML list
of dictionaries that mimics the plugin properties you want your test
output to reflect::
jenkins-jobs test -p /path/to/plugins-info.yaml
Below is example YAML that might be included in
/path/to/plugins-info.yaml.
.. literalinclude:: /../../tests/cmd/fixtures/plugins-info.yaml
"""
return self.plugins_dict.get(plugin_name, {})
def registerHandler(self, category, name, method):
cat_dict = self.handlers.get(category, {})
if not cat_dict:
self.handlers[category] = cat_dict
cat_dict[name] = method
def getHandler(self, category, name):
return self.handlers[category][name]
@property
def parser_data(self):
return self.__parser_data
def set_parser_data(self, parser_data):
self.__parser_data = parser_data
def dispatch(self, component_type, xml_parent,
component, template_data={}):
"""This is a method that you can call from your implementation of
Base.gen_xml or component. It allows modules to define a type
of component, and benefit from extensibility via Python
entry points and Jenkins Job Builder :ref:`Macros <macro>`.
:arg string component_type: the name of the component
(e.g., `builder`)
:arg YAMLParser parser: the global YAML Parser
:arg Element xml_parent: the parent XML element
:arg dict template_data: values that should be interpolated into
the component definition
See :py:class:`jenkins_jobs.modules.base.Base` for how to register
components of a module.
See the Publishers module for a simple example of how to use
this method.
"""
if component_type not in self.modules_by_component_type:
raise JenkinsJobsException("Unknown component type: "
"'{0}'.".format(component_type))
entry_point = self.modules_by_component_type[component_type]
component_list_type = entry_point.load().component_list_type
if isinstance(component, dict):
# The component is a singleton dictionary of name: dict(args)
name, component_data = next(iter(component.items()))
if template_data:
# Template data contains values that should be interpolated
# into the component definition
component_data = deep_format(
component_data, template_data,
self.jjb_config.yamlparser['allow_empty_variables'])
else:
# The component is a simple string name, eg "run-tests"
name = component
component_data = {}
# Look for a component function defined in an entry point
eps = ModuleRegistry.entry_points_cache.get(component_list_type)
if eps is None:
module_eps = []
# auto build entry points by inferring from base component_types
mod = pkg_resources.EntryPoint(
"__all__", entry_point.module_name, dist=entry_point.dist)
Mod = mod.load()
func_eps = [Mod.__dict__.get(a) for a in dir(Mod)
if isinstance(Mod.__dict__.get(a),
types.FunctionType)]
for func_ep in func_eps:
try:
# extract entry point based on docstring
name_line = func_ep.__doc__.split('\n')
if not name_line[0].startswith('yaml:'):
logger.debug("Ignoring '%s' as an entry point" %
name_line)
continue
ep_name = name_line[0].split(' ')[1]
except (AttributeError, IndexError):
# AttributeError by docstring not being defined as
# a string to have split called on it.
# IndexError raised by name_line not containing anything
# after the 'yaml:' string.
logger.debug("Not including func '%s' as an entry point"
% func_ep.__name__)
continue
module_eps.append(
pkg_resources.EntryPoint(
ep_name, entry_point.module_name,
dist=entry_point.dist, attrs=(func_ep.__name__,)))
logger.debug(
"Adding auto EP '%s=%s:%s'" %
(ep_name, entry_point.module_name, func_ep.__name__))
# load from explicitly defined entry points
module_eps.extend(list(pkg_resources.iter_entry_points(
group='jenkins_jobs.{0}'.format(component_list_type))))
eps = {}
for module_ep in module_eps:
if module_ep.name in eps:
raise JenkinsJobsException(
"Duplicate entry point found for component type: "
"'{0}', '{0}',"
"name: '{1}'".format(component_type, name))
eps[module_ep.name] = module_ep
# cache both sets of entry points
ModuleRegistry.entry_points_cache[component_list_type] = eps
logger.debug("Cached entry point group %s = %s",
component_list_type, eps)
# check for macro first
component = self.parser_data.get(component_type, {}).get(name)
if component:
if name in eps and name not in self.masked_warned:
self.masked_warned[name] = True
logger.warning(
"You have a macro ('%s') defined for '%s' "
"component type that is masking an inbuilt "
"definition" % (name, component_type))
for b in component[component_list_type]:
# Pass component_data in as template data to this function
# so that if the macro is invoked with arguments,
# the arguments are interpolated into the real defn.
self.dispatch(component_type, xml_parent, b, component_data)
elif name in eps:
func = eps[name].load()
func(self, xml_parent, component_data)
else:
raise JenkinsJobsException("Unknown entry point or macro '{0}' "
"for component type: '{1}'.".
format(name, component_type)) | zerotk.jenkins-job-builder | /zerotk.jenkins-job-builder-2.0.0.0b2.tar.gz/zerotk.jenkins-job-builder-2.0.0.0b2/jenkins_jobs/registry.py | registry.py |
# Manage JJB Configuration sources, defaults, and access.
from collections import defaultdict
import io
import logging
import os
from six.moves import configparser, StringIO
from six import PY2
from jenkins_jobs import builder
from jenkins_jobs.errors import JJBConfigException
from jenkins_jobs.errors import JenkinsJobsException
__all__ = [
"JJBConfig"
]
logger = logging.getLogger(__name__)
DEFAULT_CONF = """
[job_builder]
keep_descriptions=False
ignore_cache=False
recursive=False
exclude=.*
allow_duplicates=False
allow_empty_variables=False
[jenkins]
url=http://localhost:8080/
query_plugins_info=True
[hipchat]
authtoken=dummy
send-as=Jenkins
"""
CONFIG_REQUIRED_MESSAGE = ("A valid configuration file is required. "
"No configuration file passed.")
class JJBConfig(object):
def __init__(self, config_filename=None, config_file_required=False):
"""
The JJBConfig class is intended to encapsulate and resolve priority
between all sources of configuration for the JJB library. This allows
the various sources of configuration to provide a consistent accessor
interface regardless of where they are used.
It also allows users of JJB-as-an-API to create minimally valid
configuration and easily make minor modifications to default values
without strictly adhering to the confusing setup (see the _setup
method, the behavior of which largely lived in the cmd.execute method
previously) necessary for the jenkins-jobs command line tool.
:arg str config_filename: Name of configuration file on which to base
this config object.
:arg bool config_file_required: Allows users of the JJBConfig class to
decide whether or not it's really necessary for a config file to be
passed in when creating an instance. This has two effects on the
behavior of JJBConfig initialization:
* It determines whether or not we try "local" and "global" config
files.
* It determines whether or not failure to read some config file
will raise an exception or simply print a warning message
indicating that no config file was found.
"""
config_parser = self._init_defaults()
global_conf = '/etc/jenkins_jobs/jenkins_jobs.ini'
user_conf = os.path.join(os.path.expanduser('~'), '.config',
'jenkins_jobs', 'jenkins_jobs.ini')
local_conf = os.path.join(os.path.dirname(__file__),
'jenkins_jobs.ini')
conf = None
if config_filename is not None:
conf = config_filename
else:
if os.path.isfile(local_conf):
conf = local_conf
elif os.path.isfile(user_conf):
conf = user_conf
else:
conf = global_conf
if config_file_required and conf is None:
raise JJBConfigException(CONFIG_REQUIRED_MESSAGE)
config_fp = None
if conf is not None:
try:
config_fp = self._read_config_file(conf)
except JJBConfigException:
if config_file_required:
raise JJBConfigException(CONFIG_REQUIRED_MESSAGE)
else:
logger.warning("Config file, {0}, not found. Using "
"default config values.".format(conf))
if config_fp is not None:
if PY2:
config_parser.readfp(config_fp)
else:
config_parser.read_file(config_fp)
self.config_parser = config_parser
self.ignore_cache = False
self.flush_cache = False
self.user = None
self.password = None
self.plugins_info = None
self.timeout = builder._DEFAULT_TIMEOUT
self.allow_empty_variables = None
self.jenkins = defaultdict(None)
self.builder = defaultdict(None)
self.yamlparser = defaultdict(None)
self.hipchat = defaultdict(None)
self._setup()
def _init_defaults(self):
""" Initialize default configuration values using DEFAULT_CONF
"""
config = configparser.ConfigParser()
# Load default config always
if PY2:
config.readfp(StringIO(DEFAULT_CONF))
else:
config.read_file(StringIO(DEFAULT_CONF))
return config
def _read_config_file(self, config_filename):
""" Given path to configuration file, read it in as a ConfigParser
object and return that object.
"""
if os.path.isfile(config_filename):
self.__config_file = config_filename # remember file we read from
logger.debug("Reading config from {0}".format(config_filename))
config_fp = io.open(config_filename, 'r', encoding='utf-8')
else:
raise JJBConfigException(
"A valid configuration file is required. "
"\n{0} is not valid.".format(config_filename))
return config_fp
def _setup(self):
config = self.config_parser
logger.debug("Config: {0}".format(config))
# check the ignore_cache setting
if config.has_option('jenkins', 'ignore_cache'):
logging.warning("ignore_cache option should be moved to the "
"[job_builder] section in the config file, the "
"one specified in the [jenkins] section will be "
"ignored in the future")
self.ignore_cache = config.getboolean('jenkins', 'ignore_cache')
elif config.has_option('job_builder', 'ignore_cache'):
self.ignore_cache = config.getboolean('job_builder',
'ignore_cache')
# check the flush_cache setting
if config.has_option('job_builder', 'flush_cache'):
self.flush_cache = config.getboolean('job_builder', 'flush_cache')
# Jenkins supports access as an anonymous user, which can be used to
# ensure read-only behaviour when querying the version of plugins
# installed for test mode to generate XML output matching what will be
# uploaded. To enable must pass 'None' as the value for user and
# password to python-jenkins
#
# catching 'TypeError' is a workaround for python 2.6 interpolation
# error
# https://bugs.launchpad.net/openstack-ci/+bug/1259631
try:
self.user = config.get('jenkins', 'user')
except (TypeError, configparser.NoOptionError):
pass
try:
self.password = config.get('jenkins', 'password')
except (TypeError, configparser.NoOptionError):
pass
# None -- no timeout, blocking mode; same as setblocking(True)
# 0.0 -- non-blocking mode; same as setblocking(False) <--- default
# > 0 -- timeout mode; operations time out after timeout seconds
# < 0 -- illegal; raises an exception
# to retain the default must use
# "timeout=jenkins_jobs.builder._DEFAULT_TIMEOUT" or not set timeout at
# all.
try:
self.timeout = config.getfloat('jenkins', 'timeout')
except (ValueError):
raise JenkinsJobsException("Jenkins timeout config is invalid")
except (TypeError, configparser.NoOptionError):
pass
if not config.getboolean("jenkins", "query_plugins_info"):
logger.debug("Skipping plugin info retrieval")
self.plugins_info = []
self.recursive = config.getboolean('job_builder', 'recursive')
self.excludes = config.get('job_builder', 'exclude').split(os.pathsep)
# The way we want to do things moving forward:
self.jenkins['url'] = config.get('jenkins', 'url')
self.jenkins['user'] = self.user
self.jenkins['password'] = self.password
self.jenkins['timeout'] = self.timeout
self.builder['ignore_cache'] = self.ignore_cache
self.builder['flush_cache'] = self.flush_cache
self.builder['plugins_info'] = self.plugins_info
# keep descriptions ? (used by yamlparser)
keep_desc = False
if (config and config.has_section('job_builder') and
config.has_option('job_builder', 'keep_descriptions')):
keep_desc = config.getboolean('job_builder',
'keep_descriptions')
self.yamlparser['keep_descriptions'] = keep_desc
# figure out the include path (used by yamlparser)
path = ["."]
if (config and config.has_section('job_builder') and
config.has_option('job_builder', 'include_path')):
path = config.get('job_builder',
'include_path').split(':')
self.yamlparser['include_path'] = path
# allow duplicates?
allow_duplicates = False
if config and config.has_option('job_builder', 'allow_duplicates'):
allow_duplicates = config.getboolean('job_builder',
'allow_duplicates')
self.yamlparser['allow_duplicates'] = allow_duplicates
# allow empty variables?
self.yamlparser['allow_empty_variables'] = (
self.allow_empty_variables or
config and config.has_section('job_builder') and
config.has_option('job_builder', 'allow_empty_variables') and
config.getboolean('job_builder', 'allow_empty_variables'))
def validate(self):
config = self.config_parser
# Inform the user as to what is likely to happen, as they may specify
# a real jenkins instance in test mode to get the plugin info to check
# the XML generated.
if self.jenkins['user'] is None and self.jenkins['password'] is None:
logger.info("Will use anonymous access to Jenkins if needed.")
elif ((self.jenkins['user'] is not None and
self.jenkins['password'] is None) or
(self.jenkins['user'] is None and
self.jenkins['password'] is not None)):
raise JenkinsJobsException(
"Cannot authenticate to Jenkins with only one of User and "
"Password provided, please check your configuration."
)
if (self.builder['plugins_info'] is not None and
not isinstance(self.builder['plugins_info'], list)):
raise JenkinsJobsException("plugins_info must contain a list!")
# Temporary until yamlparser is refactored to query config object
if self.yamlparser['allow_empty_variables'] is not None:
config.set('job_builder',
'allow_empty_variables',
str(self.yamlparser['allow_empty_variables']))
def get_module_config(self, section, key):
""" Given a section name and a key value, return the value assigned to
the key in the JJB .ini file if it exists, otherwise emit a warning
indicating that the value is not set. Default value returned if no
value is set in the file will be a blank string.
"""
result = ''
try:
result = self.config_parser.get(
section, key
)
except (configparser.NoSectionError, configparser.NoOptionError,
JenkinsJobsException) as e:
logger.warning("You didn't set a " + key +
" neither in the yaml job definition nor in" +
" the " + section + " section, blank default" +
" value will be applied:\n{0}".format(e))
return result
def get_plugin_config(self, plugin, key):
value = self.get_module_config('plugin "{}"'.format(plugin), key)
# Backwards compatibility for users who have not switched to the new
# plugin configuration format in their config. This code should be
# removed in future versions of JJB after 2.0.
if not value:
value = self.get_module_config(plugin, key)
logger.warning(
"Defining plugin configuration using [" + plugin + "] is"
" deprecated. The recommended way to define plugins now is by"
" configuring [plugin \"" + plugin + "\"]")
return value | zerotk.jenkins-job-builder | /zerotk.jenkins-job-builder-2.0.0.0b2.tar.gz/zerotk.jenkins-job-builder-2.0.0.0b2/jenkins_jobs/config.py | config.py |
# Manage jobs in Jenkins server
import errno
import io
import logging
import os
import re
import tempfile
import yaml
logger = logging.getLogger(__name__)
class JobCache(object):
# ensure each instance of the class has a reference to the required
# modules so that they are available to be used when the destructor
# is being called since python will not guarantee that it won't have
# removed global module references during teardown.
_logger = logger
_os = os
_tempfile = tempfile
_yaml = yaml
def __init__(self, jenkins_url, flush=False):
cache_dir = self.get_cache_dir()
# One cache per remote Jenkins URL:
host_vary = re.sub('[^A-Za-z0-9\-\~]', '_', jenkins_url)
self.cachefilename = os.path.join(
cache_dir, 'cache-host-jobs-' + host_vary + '.yml')
if flush or not os.path.isfile(self.cachefilename):
self.data = {}
else:
with io.open(self.cachefilename, 'r', encoding='utf-8') as yfile:
self.data = yaml.load(yfile)
logger.debug("Using cache: '{0}'".format(self.cachefilename))
@staticmethod
def get_cache_dir():
home = os.path.expanduser('~')
if home == '~':
raise OSError('Could not locate home folder')
xdg_cache_home = os.environ.get('XDG_CACHE_HOME') or \
os.path.join(home, '.cache')
path = os.path.join(xdg_cache_home, 'jenkins_jobs')
if not os.path.isdir(path):
try:
os.makedirs(path)
except OSError as ose:
# it could happen that two jjb instances are running at the
# same time and that the other instance created the directory
# after we made the check, in which case there is no error
if ose.errno != errno.EEXIST:
raise ose
return path
def set(self, job, md5):
self.data[job] = md5
def clear(self):
self.data.clear()
def is_cached(self, job):
if job in self.data:
return True
return False
def has_changed(self, job, md5):
if job in self.data and self.data[job] == md5:
return False
return True
def save(self):
# use self references to required modules in case called via __del__
# write to tempfile under same directory and then replace to avoid
# issues around corruption such the process be killed
tfile = self._tempfile.NamedTemporaryFile(dir=self.get_cache_dir(),
delete=False)
tfile.write(self._yaml.dump(self.data).encode('utf-8'))
# force contents to be synced on disk before overwriting cachefile
tfile.flush()
self._os.fsync(tfile.fileno())
tfile.close()
try:
self._os.rename(tfile.name, self.cachefilename)
except OSError:
# On Windows, if dst already exists, OSError will be raised even if
# it is a file. Remove the file first in that case and try again.
self._os.remove(self.cachefilename)
self._os.rename(tfile.name, self.cachefilename)
self._logger.debug("Cache written out to '%s'" % self.cachefilename)
def __del__(self):
# check we initialized sufficiently in case called
# due to an exception occurring in the __init__
if getattr(self, 'data', None) is not None:
try:
self.save()
except Exception as e:
self._logger.error("Failed to write to cache file '%s' on "
"exit: %s" % (self.cachefilename, e)) | zerotk.jenkins-job-builder | /zerotk.jenkins-job-builder-2.0.0.0b2.tar.gz/zerotk.jenkins-job-builder-2.0.0.0b2/jenkins_jobs/cache.py | cache.py |
import inspect
def is_sequence(arg):
return (not hasattr(arg, "strip") and
(hasattr(arg, "__getitem__") or
hasattr(arg, "__iter__")))
class JenkinsJobsException(Exception):
pass
class ModuleError(JenkinsJobsException):
def get_module_name(self):
frame = inspect.currentframe()
co_name = frame.f_code.co_name
module_name = '<unresolved>'
while frame and co_name != 'run':
# XML generation called via dispatch
if co_name == 'dispatch':
data = frame.f_locals
module_name = "%s.%s" % (data['component_type'], data['name'])
break
# XML generation done directly by class using gen_xml or root_xml
if co_name == 'gen_xml' or co_name == 'root_xml':
data = frame.f_locals['data']
module_name = next(iter(data.keys()))
break
frame = frame.f_back
co_name = frame.f_code.co_name
return module_name
class InvalidAttributeError(ModuleError):
def __init__(self, attribute_name, value, valid_values=None):
message = "'{0}' is an invalid value for attribute {1}.{2}".format(
value, self.get_module_name(), attribute_name)
if is_sequence(valid_values):
message += "\nValid values include: {0}".format(
', '.join("'{0}'".format(value)
for value in valid_values))
super(InvalidAttributeError, self).__init__(message)
class MissingAttributeError(ModuleError):
def __init__(self, missing_attribute, module_name=None):
module = module_name or self.get_module_name()
if is_sequence(missing_attribute):
message = "One of {0} must be present in '{1}'".format(
', '.join("'{0}'".format(value)
for value in missing_attribute), module)
else:
message = "Missing {0} from an instance of '{1}'".format(
missing_attribute, module)
super(MissingAttributeError, self).__init__(message)
class YAMLFormatError(JenkinsJobsException):
pass
class JJBConfigException(JenkinsJobsException):
pass | zerotk.jenkins-job-builder | /zerotk.jenkins-job-builder-2.0.0.0b2.tar.gz/zerotk.jenkins-job-builder-2.0.0.0b2/jenkins_jobs/errors.py | errors.py |
# Provides local yaml parsing classes and extend yaml module
"""Custom application specific yamls tags are supported to provide
enhancements when reading yaml configuration.
These allow inclusion of arbitrary files as a method of having blocks of data
managed separately to the yaml job configurations. A specific usage of this is
inlining scripts contained in separate files, although such tags may also be
used to simplify usage of macros or job templates.
The tag ``!include:`` will treat the following string as file which should be
parsed as yaml configuration data.
Example:
.. literalinclude:: /../../tests/localyaml/fixtures/include001.yaml
contents of include001.yaml.inc:
.. literalinclude:: /../../tests/yamlparser/fixtures/include001.yaml.inc
The tag ``!include-raw:`` will treat the given string or list of strings as
filenames to be opened as one or more data blob, which should be read into
the calling yaml construct without any further parsing. Any data in a file
included through this tag, will be treated as string data.
Examples:
.. literalinclude:: /../../tests/localyaml/fixtures/include-raw001.yaml
contents of include-raw001-hello-world.sh:
.. literalinclude::
/../../tests/localyaml/fixtures/include-raw001-hello-world.sh
contents of include-raw001-vars.sh:
.. literalinclude::
/../../tests/localyaml/fixtures/include-raw001-vars.sh
using a list of files:
.. literalinclude::
/../../tests/localyaml/fixtures/include-raw-multi001.yaml
The tag ``!include-raw-escape:`` treats the given string or list of strings as
filenames to be opened as one or more data blobs, which should be escaped
before being read in as string data. This allows job-templates to use this tag
to include scripts from files without needing to escape braces in the original
file.
Examples:
.. literalinclude::
/../../tests/localyaml/fixtures/include-raw-escaped001.yaml
contents of include-raw001-hello-world.sh:
.. literalinclude::
/../../tests/localyaml/fixtures/include-raw001-hello-world.sh
contents of include-raw001-vars.sh:
.. literalinclude::
/../../tests/localyaml/fixtures/include-raw001-vars.sh
using a list of files:
.. literalinclude::
/../../tests/localyaml/fixtures/include-raw-escaped-multi001.yaml
For all the multi file includes, the files are simply appended using a newline
character.
To allow for job templates to perform substitution on the path names, when a
filename containing a python format placeholder is encountered, lazy loading
support is enabled, where instead of returning the contents back during yaml
parsing, it is delayed until the variable substitution is performed.
Example:
.. literalinclude:: /../../tests/yamlparser/fixtures/lazy-load-jobs001.yaml
using a list of files:
.. literalinclude::
/../../tests/yamlparser/fixtures/lazy-load-jobs-multi001.yaml
.. note::
Because lazy-loading involves performing the substitution on the file
name, it means that jenkins-job-builder can not call the variable
substitution on the contents of the file. This means that the
``!include-raw:`` tag will behave as though ``!include-raw-escape:`` tag
was used instead whenever name substitution on the filename is to be
performed.
Given the behaviour described above, when substitution is to be performed
on any filename passed via ``!include-raw-escape:`` the tag will be
automatically converted to ``!include-raw:`` and no escaping will be
performed.
"""
import functools
import io
import logging
import os
import re
import yaml
from yaml.constructor import BaseConstructor
from yaml.representer import BaseRepresenter
from yaml import YAMLObject
from collections import OrderedDict
logger = logging.getLogger(__name__)
class OrderedConstructor(BaseConstructor):
"""The default constructor class for PyYAML loading uses standard python
dictionaries which can have randomized ordering enabled (default in
CPython from version 3.3). The order of the XML elements being outputted
is both important for tests and for ensuring predictable generation based
on the source. This subclass overrides this behaviour to ensure that all
dict's created make use of OrderedDict to have iteration of keys to always
follow the order in which the keys were inserted/created.
"""
def construct_yaml_map(self, node):
data = OrderedDict()
yield data
value = self.construct_mapping(node)
if isinstance(node, yaml.MappingNode):
self.flatten_mapping(node)
else:
raise yaml.constructor.ConstructorError(
None, None,
'expected a mapping node, but found %s' % node.id,
node.start_mark)
mapping = OrderedDict()
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=False)
try:
hash(key)
except TypeError as exc:
raise yaml.constructor.ConstructorError(
'while constructing a mapping', node.start_mark,
'found unacceptable key (%s)' % exc, key_node.start_mark)
value = self.construct_object(value_node, deep=False)
mapping[key] = value
data.update(mapping)
class OrderedRepresenter(BaseRepresenter):
def represent_yaml_mapping(self, mapping, flow_style=None):
tag = u'tag:yaml.org,2002:map'
node = self.represent_mapping(tag, mapping, flow_style=flow_style)
return node
class LocalAnchorLoader(yaml.Loader):
"""Subclass for yaml.Loader which keeps Alias between calls"""
anchors = {}
def __init__(self, *args, **kwargs):
super(LocalAnchorLoader, self).__init__(*args, **kwargs)
self.anchors = LocalAnchorLoader.anchors
@classmethod
def reset_anchors(cls):
cls.anchors = {}
# override the default composer to skip resetting the anchors at the
# end of the current document
def compose_document(self):
# Drop the DOCUMENT-START event.
self.get_event()
# Compose the root node.
node = self.compose_node(None, None)
# Drop the DOCUMENT-END event.
self.get_event()
return node
class LocalLoader(OrderedConstructor, LocalAnchorLoader):
"""Subclass for yaml.Loader which handles storing the search_path and
escape_callback functions for use by the custom YAML objects to find files
and escape the content where required.
Constructor access a list of search paths to look under for the given
file following each tag, taking the first match found. Search path by
default will include the same directory as the yaml file and the current
working directory.
Loading::
# use the load function provided in this module
import local_yaml
data = local_yaml.load(io.open(fn, 'r', encoding='utf-8'))
# Loading by providing the alternate class to the default yaml load
from local_yaml import LocalLoader
data = yaml.load(io.open(fn, 'r', encoding='utf-8'), LocalLoader)
# Loading with a search path
from local_yaml import LocalLoader
import functools
data = yaml.load(io.open(fn, 'r', encoding='utf-8'),
functools.partial(LocalLoader, search_path=['path']))
"""
def __init__(self, *args, **kwargs):
# make sure to pop off any local settings before passing to
# the parent constructor as any unknown args may cause errors.
self.search_path = list()
if 'search_path' in kwargs:
for p in kwargs.pop('search_path'):
logger.debug("Adding '{0}' to search path for include tags"
.format(p))
self.search_path.append(os.path.normpath(p))
if 'escape_callback' in kwargs:
self.escape_callback = kwargs.pop('escape_callback')
else:
self.escape_callback = self._escape
super(LocalLoader, self).__init__(*args, **kwargs)
# constructor to preserve order of maps and ensure that the order of
# keys returned is consistent across multiple python versions
self.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
type(self).construct_yaml_map)
if hasattr(self.stream, 'name'):
self.search_path.append(os.path.normpath(
os.path.dirname(self.stream.name)))
self.search_path.append(os.path.normpath(os.path.curdir))
def _escape(self, data):
return re.sub(r'({|})', r'\1\1', data)
class LocalDumper(OrderedRepresenter, yaml.Dumper):
def __init__(self, *args, **kwargs):
super(LocalDumper, self).__init__(*args, **kwargs)
# representer to ensure conversion back looks like normal
# mapping and hides that we use OrderedDict internally
self.add_representer(OrderedDict,
type(self).represent_yaml_mapping)
# convert any tuples to lists as the JJB input is generally
# in list format
self.add_representer(tuple,
type(self).represent_list)
class BaseYAMLObject(YAMLObject):
yaml_loader = LocalLoader
yaml_dumper = LocalDumper
class YamlInclude(BaseYAMLObject):
yaml_tag = u'!include:'
@classmethod
def _find_file(cls, filename, search_path):
for dirname in search_path:
candidate = os.path.expanduser(os.path.join(dirname, filename))
if os.path.isfile(candidate):
logger.info("Including file '{0}' from path '{1}'"
.format(filename, dirname))
return candidate
return filename
@classmethod
def _open_file(cls, loader, node):
node_str = loader.construct_yaml_str(node)
try:
node_str.format()
except KeyError:
return cls._lazy_load(loader, cls.yaml_tag, node)
filename = cls._find_file(node_str, loader.search_path)
try:
with io.open(filename, 'r', encoding='utf-8') as f:
return f.read()
except:
logger.error("Failed to include file using search path: '{0}'"
.format(':'.join(loader.search_path)))
raise
@classmethod
def _from_file(cls, loader, node):
contents = cls._open_file(loader, node)
if isinstance(contents, LazyLoader):
return contents
data = yaml.load(contents,
functools.partial(cls.yaml_loader,
search_path=loader.search_path))
return data
@classmethod
def _lazy_load(cls, loader, tag, node_str):
logger.info("Lazy loading of file template '{0}' enabled"
.format(node_str))
return LazyLoader((cls, loader, node_str))
@classmethod
def from_yaml(cls, loader, node):
if isinstance(node, yaml.ScalarNode):
return cls._from_file(loader, node)
elif isinstance(node, yaml.SequenceNode):
contents = [cls._from_file(loader, scalar_node)
for scalar_node in node.value]
if any(isinstance(s, LazyLoader) for s in contents):
return LazyLoaderCollection(contents)
return u'\n'.join(contents)
else:
raise yaml.constructor.ConstructorError(
None, None, "expected either a sequence or scalar node, but "
"found %s" % node.id, node.start_mark)
class YamlIncludeRaw(YamlInclude):
yaml_tag = u'!include-raw:'
@classmethod
def _from_file(cls, loader, node):
return cls._open_file(loader, node)
class YamlIncludeRawEscape(YamlIncludeRaw):
yaml_tag = u'!include-raw-escape:'
@classmethod
def from_yaml(cls, loader, node):
data = YamlIncludeRaw.from_yaml(loader, node)
if isinstance(data, LazyLoader):
logger.warning("Replacing %s tag with %s since lazy loading means "
"file contents will not be deep formatted for "
"variable substitution.", cls.yaml_tag,
YamlIncludeRaw.yaml_tag)
return data
else:
return loader.escape_callback(data)
class DeprecatedTag(BaseYAMLObject):
@classmethod
def from_yaml(cls, loader, node):
logger.warning("tag '%s' is deprecated, switch to using '%s'",
cls.yaml_tag, cls._new.yaml_tag)
return cls._new.from_yaml(loader, node)
class YamlIncludeDeprecated(DeprecatedTag):
yaml_tag = u'!include'
_new = YamlInclude
class YamlIncludeRawDeprecated(DeprecatedTag):
yaml_tag = u'!include-raw'
_new = YamlIncludeRaw
class YamlIncludeRawEscapeDeprecated(DeprecatedTag):
yaml_tag = u'!include-raw-escape'
_new = YamlIncludeRawEscape
class LazyLoaderCollection(object):
"""Helper class to format a collection of LazyLoader objects"""
def __init__(self, sequence):
self._data = sequence
def format(self, *args, **kwargs):
return u'\n'.join(item.format(*args, **kwargs) for item in self._data)
class LazyLoader(object):
"""Helper class to provide lazy loading of files included using !include*
tags where the path to the given file contains unresolved placeholders.
"""
def __init__(self, data):
# str subclasses can only have one argument, so assume it is a tuple
# being passed and unpack as needed
self._cls, self._loader, self._node = data
def __str__(self):
return "%s %s" % (self._cls.yaml_tag, self._node.value)
def __repr__(self):
return "%s %s" % (self._cls.yaml_tag, self._node.value)
def format(self, *args, **kwargs):
self._node.value = self._node.value.format(*args, **kwargs)
return self._cls.from_yaml(self._loader, self._node)
def load(stream, **kwargs):
LocalAnchorLoader.reset_anchors()
return yaml.load(stream, functools.partial(LocalLoader, **kwargs))
def dump(data, stream=None, **kwargs):
return yaml.dump(data, stream, Dumper=LocalDumper, **kwargs) | zerotk.jenkins-job-builder | /zerotk.jenkins-job-builder-2.0.0.0b2.tar.gz/zerotk.jenkins-job-builder-2.0.0.0b2/jenkins_jobs/local_yaml.py | local_yaml.py |
# Manage JJB yaml feature implementation
import copy
import fnmatch
import io
import itertools
import logging
import os
from jenkins_jobs.constants import MAGIC_MANAGE_STRING
from jenkins_jobs.errors import JenkinsJobsException
from jenkins_jobs.formatter import deep_format
import jenkins_jobs.local_yaml as local_yaml
from jenkins_jobs import utils
__all__ = [
"YamlParser"
]
logger = logging.getLogger(__name__)
def matches(what, glob_patterns):
"""
Checks if the given string, ``what``, matches any of the glob patterns in
the iterable, ``glob_patterns``
:arg str what: String that we want to test if it matches a pattern
:arg iterable glob_patterns: glob patterns to match (list, tuple, set,
etc.)
"""
return any(fnmatch.fnmatch(what, glob_pattern)
for glob_pattern in glob_patterns)
def combination_matches(combination, match_combinations):
"""
Checks if the given combination is matches for any of the given combination
globs, being those a set of combinations where if a key is missing, it's
considered matching
(key1=2, key2=3)
would match the combination match:
(key2=3)
but not:
(key1=2, key2=2)
"""
for cmatch in match_combinations:
for key, val in combination.items():
if cmatch.get(key, val) != val:
break
else:
return True
return False
class YamlParser(object):
def __init__(self, jjb_config=None):
self.data = {}
self.jobs = []
self.views = []
self.jjb_config = jjb_config
self.keep_desc = jjb_config.yamlparser['keep_descriptions']
self.path = jjb_config.yamlparser['include_path']
def load_files(self, fn):
# handle deprecated behavior, and check that it's not a file like
# object as these may implement the '__iter__' attribute.
if not hasattr(fn, '__iter__') or hasattr(fn, 'read'):
logger.warning(
'Passing single elements for the `fn` argument in '
'Builder.load_files is deprecated. Please update your code '
'to use a list as support for automatic conversion will be '
'removed in a future version.')
fn = [fn]
files_to_process = []
for path in fn:
if not hasattr(path, 'read') and os.path.isdir(path):
files_to_process.extend([os.path.join(path, f)
for f in os.listdir(path)
if (f.endswith('.yml')
or f.endswith('.yaml'))])
else:
files_to_process.append(path)
# symlinks used to allow loading of sub-dirs can result in duplicate
# definitions of macros and templates when loading all from top-level
unique_files = []
for f in files_to_process:
if hasattr(f, 'read'):
unique_files.append(f)
continue
rpf = os.path.realpath(f)
if rpf not in unique_files:
unique_files.append(rpf)
else:
logger.warning("File '%s' already added as '%s', ignoring "
"reference to avoid duplicating yaml "
"definitions." % (f, rpf))
for in_file in unique_files:
# use of ask-for-permissions instead of ask-for-forgiveness
# performs better when low use cases.
if hasattr(in_file, 'name'):
fname = in_file.name
else:
fname = in_file
logger.debug("Parsing YAML file {0}".format(fname))
if hasattr(in_file, 'read'):
self._parse_fp(in_file)
else:
self.parse(in_file)
def _parse_fp(self, fp):
# wrap provided file streams to ensure correct encoding used
data = local_yaml.load(utils.wrap_stream(fp), search_path=self.path)
if data:
if not isinstance(data, list):
raise JenkinsJobsException(
"The topmost collection in file '{fname}' must be a list,"
" not a {cls}".format(fname=getattr(fp, 'name', fp),
cls=type(data)))
for item in data:
cls, dfn = next(iter(item.items()))
group = self.data.get(cls, {})
if len(item.items()) > 1:
n = None
for k, v in item.items():
if k == "name":
n = v
break
# Syntax error
raise JenkinsJobsException("Syntax error, for item "
"named '{0}'. Missing indent?"
.format(n))
# allow any entry to specify an id that can also be used
_id = dfn.get('id', dfn['name'])
if _id in group:
self._handle_dups(
"Duplicate entry found in '{0}: '{1}' already "
"defined".format(fp.name, _id))
group[_id] = dfn
self.data[cls] = group
def parse(self, fn):
with io.open(fn, 'r', encoding='utf-8') as fp:
self._parse_fp(fp)
def _handle_dups(self, message):
if not self.jjb_config.yamlparser['allow_duplicates']:
logger.error(message)
raise JenkinsJobsException(message)
else:
logger.warning(message)
def _getJob(self, name):
job = self.data.get('job', {}).get(name, None)
if not job:
return job
return self._applyDefaults(job)
def _getJobGroup(self, name):
return self.data.get('job-group', {}).get(name, None)
def _getJobTemplate(self, name):
job = self.data.get('job-template', {}).get(name, None)
if not job:
return job
return self._applyDefaults(job)
def _applyDefaults(self, data, override_dict=None):
if override_dict is None:
override_dict = {}
whichdefaults = data.get('defaults', 'global')
defaults = copy.deepcopy(self.data.get('defaults',
{}).get(whichdefaults, {}))
if defaults == {} and whichdefaults != 'global':
raise JenkinsJobsException("Unknown defaults set: '{0}'"
.format(whichdefaults))
for key in override_dict.keys():
if key in defaults.keys():
defaults[key] = override_dict[key]
newdata = {}
newdata.update(defaults)
newdata.update(data)
return newdata
def _formatDescription(self, job):
if self.keep_desc:
description = job.get("description", None)
else:
description = job.get("description", '')
if description is not None:
job["description"] = description + \
self._get_managed_string().lstrip()
def expandYaml(self, registry, jobs_glob=None):
changed = True
while changed:
changed = False
for module in registry.modules:
if hasattr(module, 'handle_data'):
if module.handle_data(self.data):
changed = True
for job in self.data.get('job', {}).values():
if jobs_glob and not matches(job['name'], jobs_glob):
logger.debug("Ignoring job {0}".format(job['name']))
continue
logger.debug("Expanding job '{0}'".format(job['name']))
job = self._applyDefaults(job)
self._formatDescription(job)
self.jobs.append(job)
for view in self.data.get('view', {}).values():
logger.debug("Expanding view '{0}'".format(view['name']))
self._formatDescription(view)
self.views.append(view)
for project in self.data.get('project', {}).values():
logger.debug("Expanding project '{0}'".format(project['name']))
# use a set to check for duplicate job references in projects
seen = set()
for jobspec in project.get('jobs', []):
if isinstance(jobspec, dict):
# Singleton dict containing dict of job-specific params
jobname, jobparams = next(iter(jobspec.items()))
if not isinstance(jobparams, dict):
jobparams = {}
else:
jobname = jobspec
jobparams = {}
job = self._getJob(jobname)
if job:
# Just naming an existing defined job
if jobname in seen:
self._handle_dups("Duplicate job '{0}' specified "
"for project '{1}'"
.format(jobname, project['name']))
seen.add(jobname)
continue
# see if it's a job group
group = self._getJobGroup(jobname)
if group:
for group_jobspec in group['jobs']:
if isinstance(group_jobspec, dict):
group_jobname, group_jobparams = \
next(iter(group_jobspec.items()))
if not isinstance(group_jobparams, dict):
group_jobparams = {}
else:
group_jobname = group_jobspec
group_jobparams = {}
job = self._getJob(group_jobname)
if job:
if group_jobname in seen:
self._handle_dups(
"Duplicate job '{0}' specified for "
"project '{1}'".format(group_jobname,
project['name']))
seen.add(group_jobname)
continue
template = self._getJobTemplate(group_jobname)
# Allow a group to override parameters set by a project
d = type(project)(project)
d.update(jobparams)
d.update(group)
d.update(group_jobparams)
# Except name, since the group's name is not useful
d['name'] = project['name']
if template:
self._expandYamlForTemplateJob(d, template,
jobs_glob)
continue
# see if it's a template
template = self._getJobTemplate(jobname)
if template:
d = type(project)(project)
d.update(jobparams)
self._expandYamlForTemplateJob(d, template, jobs_glob)
else:
raise JenkinsJobsException("Failed to find suitable "
"template named '{0}'"
.format(jobname))
# check for duplicate generated jobs
seen = set()
# walk the list in reverse so that last definition wins
for job in self.jobs[::-1]:
if job['name'] in seen:
self._handle_dups("Duplicate definitions for job '{0}' "
"specified".format(job['name']))
self.jobs.remove(job)
seen.add(job['name'])
return self.jobs, self.views
def _expandYamlForTemplateJob(self, project, template, jobs_glob=None):
dimensions = []
template_name = template['name']
# reject keys that are not useful during yaml expansion
for k in ['jobs']:
project.pop(k)
excludes = project.pop('exclude', [])
for (k, v) in project.items():
tmpk = '{{{0}}}'.format(k)
if tmpk not in template_name:
continue
if type(v) == list:
dimensions.append(zip([k] * len(v), v))
# XXX somewhat hackish to ensure we actually have a single
# pass through the loop
if len(dimensions) == 0:
dimensions = [(("", ""),)]
for values in itertools.product(*dimensions):
params = copy.deepcopy(project)
params = self._applyDefaults(params, template)
try:
expanded_values = {}
for (k, v) in values:
if isinstance(v, dict):
inner_key = next(iter(v))
expanded_values[k] = inner_key
expanded_values.update(v[inner_key])
else:
expanded_values[k] = v
except TypeError:
project_name = project.pop('name')
logger.error(
"Exception thrown while expanding template '%s' for "
"project '%s', with expansion arguments of:\n%s\n"
"Original project input variables for template:\n%s\n"
"Most likely the inputs have items indented incorrectly "
"to describe how they should be applied.\n\nNote yaml "
"'null' is mapped to python's 'None'", template_name,
project_name,
"".join(local_yaml.dump({k: v}, default_flow_style=False)
for (k, v) in values),
local_yaml.dump(project, default_flow_style=False))
raise
params.update(expanded_values)
params = deep_format(params, params)
if combination_matches(params, excludes):
logger.debug('Excluding combination %s', str(params))
continue
for key in template.keys():
if key not in params:
params[key] = template[key]
params['template-name'] = template_name
expanded = deep_format(
template, params,
self.jjb_config.yamlparser['allow_empty_variables'])
job_name = expanded.get('name')
if jobs_glob and not matches(job_name, jobs_glob):
continue
self._formatDescription(expanded)
self.jobs.append(expanded)
def _get_managed_string(self):
# The \n\n is not hard coded, because they get stripped if the
# project does not otherwise have a description.
return "\n\n" + MAGIC_MANAGE_STRING | zerotk.jenkins-job-builder | /zerotk.jenkins-job-builder-2.0.0.0b2.tar.gz/zerotk.jenkins-job-builder-2.0.0.0b2/jenkins_jobs/parser.py | parser.py |
# Manage Jenkins XML config file output.
import hashlib
import pkg_resources
from xml.dom import minidom
import xml.etree.ElementTree as XML
from jenkins_jobs import errors
__all__ = [
"XmlJobGenerator",
"XmlJob"
]
def remove_ignorable_whitespace(node):
"""Remove insignificant whitespace from XML nodes
It should only remove whitespace in between elements and sub elements.
This should be safe for Jenkins due to how it's XML serialization works
but may not be valid for other XML documents. So use this method with
caution outside of this specific library.
"""
# strip tail whitespace if it's not significant
if node.tail and node.tail.strip() == "":
node.tail = None
for child in node:
# only strip whitespace from the text node if there are subelement
# nodes as this means we are removing leading whitespace before such
# sub elements. Otherwise risk removing whitespace from an element
# that only contains whitespace
if node.text and node.text.strip() == "":
node.text = None
remove_ignorable_whitespace(child)
class XmlJob(object):
def __init__(self, xml, name):
self.xml = xml
self.name = name
def md5(self):
return hashlib.md5(self.output()).hexdigest()
def output(self):
out = minidom.parseString(XML.tostring(self.xml, encoding='UTF-8'))
return out.toprettyxml(indent=' ', encoding='utf-8')
class XmlJobGenerator(object):
""" This class is responsible for generating Jenkins Configuration XML from
a compatible intermediate representation of Jenkins Jobs.
"""
def __init__(self, registry):
self.registry = registry
def generateXML(self, jobdict_list):
xml_jobs = []
for job in jobdict_list:
xml_jobs.append(self._getXMLForJob(job))
return xml_jobs
def _getXMLForJob(self, data):
kind = data.get('project-type', 'freestyle')
for ep in pkg_resources.iter_entry_points(
group='jenkins_jobs.projects', name=kind):
Mod = ep.load()
mod = Mod(self.registry)
xml = mod.root_xml(data)
self._gen_xml(xml, data)
job = XmlJob(xml, data['name'])
return job
raise errors.JenkinsJobsException("Unrecognized project type: '%s'"
% kind)
def _gen_xml(self, xml, data):
for module in self.registry.modules:
if hasattr(module, 'gen_xml'):
module.gen_xml(xml, data)
class XmlViewGenerator(object):
""" This class is responsible for generating Jenkins Configuration XML from
a compatible intermediate representation of Jenkins Views.
"""
def __init__(self, registry):
self.registry = registry
def generateXML(self, viewdict_list):
xml_views = []
for view in viewdict_list:
xml_views.append(self._getXMLForView(view))
return xml_views
def _getXMLForView(self, data):
kind = data.get('view-type', 'list')
for ep in pkg_resources.iter_entry_points(
group='jenkins_jobs.views', name=kind):
Mod = ep.load()
mod = Mod(self.registry)
xml = mod.root_xml(data)
self._gen_xml(xml, data)
view = XmlJob(xml, data['name'])
return view
def _gen_xml(self, xml, data):
for module in self.registry.modules:
if hasattr(module, 'gen_xml'):
module.gen_xml(xml, data) | zerotk.jenkins-job-builder | /zerotk.jenkins-job-builder-2.0.0.0b2.tar.gz/zerotk.jenkins-job-builder-2.0.0.0b2/jenkins_jobs/xml_config.py | xml_config.py |
# Manage jobs in Jenkins server
import errno
import hashlib
import io
import logging
import operator
import os
from pprint import pformat
import re
import time
import xml.etree.ElementTree as XML
import jenkins
from jenkins_jobs.cache import JobCache
from jenkins_jobs.constants import MAGIC_MANAGE_STRING
from jenkins_jobs.parallel import concurrent
from jenkins_jobs import utils
__all__ = [
"JenkinsManager"
]
logger = logging.getLogger(__name__)
_DEFAULT_TIMEOUT = object()
class JenkinsManager(object):
def __init__(self, jjb_config):
url = jjb_config.jenkins['url']
user = jjb_config.jenkins['user']
password = jjb_config.jenkins['password']
timeout = jjb_config.jenkins['timeout']
if timeout != _DEFAULT_TIMEOUT:
self.jenkins = jenkins.Jenkins(url, user, password, timeout)
else:
self.jenkins = jenkins.Jenkins(url, user, password)
self.cache = JobCache(jjb_config.jenkins['url'],
flush=jjb_config.builder['flush_cache'])
self._plugins_list = jjb_config.builder['plugins_info']
self._jobs = None
self._job_list = None
self._views = None
self._view_list = None
self._jjb_config = jjb_config
@property
def jobs(self):
if self._jobs is None:
# populate jobs
self._jobs = self.jenkins.get_jobs()
return self._jobs
@property
def job_list(self):
if self._job_list is None:
self._job_list = set(job['name'] for job in self.jobs)
return self._job_list
def update_job(self, job_name, xml):
if self.is_job(job_name):
logger.info("Reconfiguring jenkins job {0}".format(job_name))
self.jenkins.reconfig_job(job_name, xml)
else:
logger.info("Creating jenkins job {0}".format(job_name))
self.jenkins.create_job(job_name, xml)
def is_job(self, job_name):
# first use cache
if job_name in self.job_list:
return True
# if not exists, use jenkins
return self.jenkins.job_exists(job_name)
def get_job_md5(self, job_name):
xml = self.jenkins.get_job_config(job_name)
return hashlib.md5(xml.encode('utf-8')).hexdigest()
def delete_job(self, job_name):
if self.is_job(job_name):
logger.info("Deleting jenkins job {0}".format(job_name))
self.jenkins.delete_job(job_name)
def get_plugins_info(self):
""" Return a list of plugin_info dicts, one for each plugin on the
Jenkins instance.
"""
try:
plugins_list = self.jenkins.get_plugins().values()
except jenkins.JenkinsException as e:
if re.search("Connection refused", str(e)):
logger.warning(
"Unable to retrieve Jenkins Plugin Info from {0},"
" using default empty plugins info list.".format(
self.jenkins.server))
plugins_list = [{'shortName': '',
'version': '',
'longName': ''}]
else:
raise e
logger.debug("Jenkins Plugin Info {0}".format(pformat(plugins_list)))
return plugins_list
def get_jobs(self, cache=True):
if not cache:
self._jobs = None
self._job_list = None
return self.jobs
def is_managed(self, job_name):
xml = self.jenkins.get_job_config(job_name)
try:
out = XML.fromstring(xml)
description = out.find(".//description").text
return description.endswith(MAGIC_MANAGE_STRING)
except (TypeError, AttributeError):
pass
return False
@property
def plugins_list(self):
if self._plugins_list is None:
self._plugins_list = self.get_plugins_info()
return self._plugins_list
def delete_old_managed(self, keep=None):
jobs = self.get_jobs()
deleted_jobs = 0
if keep is None:
keep = []
for job in jobs:
if job['name'] not in keep:
if self.is_managed(job['name']):
logger.info("Removing obsolete jenkins job {0}"
.format(job['name']))
self.delete_job(job['name'])
deleted_jobs += 1
else:
logger.info("Not deleting unmanaged jenkins job %s",
job['name'])
else:
logger.debug("Keeping job %s", job['name'])
return deleted_jobs
def delete_jobs(self, jobs):
if jobs is not None:
logger.info("Removing jenkins job(s): %s" % ", ".join(jobs))
for job in jobs:
self.delete_job(job)
if(self.cache.is_cached(job)):
self.cache.set(job, '')
self.cache.save()
def delete_all_jobs(self):
jobs = self.get_jobs()
logger.info("Number of jobs to delete: %d", len(jobs))
script = ('for(job in jenkins.model.Jenkins.theInstance.getAllItems())'
' { job.delete(); }')
self.jenkins.run_script(script)
# Need to clear the JJB cache after deletion
self.cache.clear()
def changed(self, job):
md5 = job.md5()
changed = (self._jjb_config.builder['ignore_cache'] or
self.cache.has_changed(job.name, md5))
if not changed:
logger.debug("'{0}' has not changed".format(job.name))
return changed
def update_jobs(self, xml_jobs, output=None, n_workers=None):
orig = time.time()
logger.info("Number of jobs generated: %d", len(xml_jobs))
xml_jobs.sort(key=operator.attrgetter('name'))
if (output and not hasattr(output, 'write') and
not os.path.isdir(output)):
logger.info("Creating directory %s" % output)
try:
os.makedirs(output)
except OSError:
if not os.path.isdir(output):
raise
if output:
# ensure only wrapped once
if hasattr(output, 'write'):
output = utils.wrap_stream(output)
for job in xml_jobs:
if hasattr(output, 'write'):
# `output` is a file-like object
logger.info("Job name: %s", job.name)
logger.debug("Writing XML to '{0}'".format(output))
try:
output.write(job.output())
except IOError as exc:
if exc.errno == errno.EPIPE:
# EPIPE could happen if piping output to something
# that doesn't read the whole input (e.g.: the UNIX
# `head` command)
return
raise
continue
output_fn = os.path.join(output, job.name)
logger.debug("Writing XML to '{0}'".format(output_fn))
with io.open(output_fn, 'w', encoding='utf-8') as f:
f.write(job.output().decode('utf-8'))
return xml_jobs, len(xml_jobs)
# Filter out the jobs that did not change
logging.debug('Filtering %d jobs for changed jobs',
len(xml_jobs))
step = time.time()
jobs = [job for job in xml_jobs
if self.changed(job)]
logging.debug("Filtered for changed jobs in %ss",
(time.time() - step))
if not jobs:
return [], 0
# Update the jobs
logging.debug('Updating jobs')
step = time.time()
p_params = [{'job': job} for job in jobs]
results = self.parallel_update_job(
n_workers=n_workers,
concurrent=p_params)
logging.debug("Parsing results")
# generalize the result parsing, as a concurrent job always returns a
# list
if len(p_params) in (1, 0):
results = [results]
for result in results:
if isinstance(result, Exception):
raise result
else:
# update in-memory cache
j_name, j_md5 = result
self.cache.set(j_name, j_md5)
# write cache to disk
self.cache.save()
logging.debug("Updated %d jobs in %ss",
len(jobs),
time.time() - step)
logging.debug("Total run took %ss", (time.time() - orig))
return jobs, len(jobs)
@concurrent
def parallel_update_job(self, job):
self.update_job(job.name, job.output().decode('utf-8'))
return (job.name, job.md5())
################
# View related #
################
@property
def views(self):
if self._views is None:
# populate views
self._views = self.jenkins.get_views()
return self._views
@property
def view_list(self):
if self._view_list is None:
self._view_list = set(view['name'] for view in self.views)
return self._view_list
def get_views(self, cache=True):
if not cache:
self._views = None
self._view_list = None
return self.views
def is_view(self, view_name):
# first use cache
if view_name in self.view_list:
return True
# if not exists, use jenkins
return self.jenkins.view_exists(view_name)
def delete_view(self, view_name):
if self.is_view(view_name):
logger.info("Deleting jenkins view {}".format(view_name))
self.jenkins.delete_view(view_name)
def delete_views(self, views):
if views is not None:
logger.info("Removing jenkins view(s): %s" % ", ".join(views))
for view in views:
self.delete_view(view)
if self.cache.is_cached(view):
self.cache.set(view, '')
self.cache.save()
def delete_all_views(self):
views = self.get_views()
# Jenkins requires at least one view present. Don't remove the first
# view as it is likely the default view.
views.pop(0)
logger.info("Number of views to delete: %d", len(views))
for view in views:
self.delete_view(view['name'])
# Need to clear the JJB cache after deletion
self.cache.clear()
def update_view(self, view_name, xml):
if self.is_view(view_name):
logger.info("Reconfiguring jenkins view {0}".format(view_name))
self.jenkins.reconfig_view(view_name, xml)
else:
logger.info("Creating jenkins view {0}".format(view_name))
self.jenkins.create_view(view_name, xml)
def update_views(self, xml_views, output=None, n_workers=None):
orig = time.time()
logger.info("Number of views generated: %d", len(xml_views))
xml_views.sort(key=operator.attrgetter('name'))
if output:
# ensure only wrapped once
if hasattr(output, 'write'):
output = utils.wrap_stream(output)
for view in xml_views:
if hasattr(output, 'write'):
# `output` is a file-like object
logger.info("View name: %s", view.name)
logger.debug("Writing XML to '{0}'".format(output))
try:
output.write(view.output())
except IOError as exc:
if exc.errno == errno.EPIPE:
# EPIPE could happen if piping output to something
# that doesn't read the whole input (e.g.: the UNIX
# `head` command)
return
raise
continue
output_fn = os.path.join(output, view.name)
logger.debug("Writing XML to '{0}'".format(output_fn))
with io.open(output_fn, 'w', encoding='utf-8') as f:
f.write(view.output().decode('utf-8'))
return xml_views, len(xml_views)
# Filter out the views that did not change
logging.debug('Filtering %d views for changed views',
len(xml_views))
step = time.time()
views = [view for view in xml_views
if self.changed(view)]
logging.debug("Filtered for changed views in %ss",
(time.time() - step))
if not views:
return [], 0
# Update the views
logging.debug('Updating views')
step = time.time()
p_params = [{'view': view} for view in views]
results = self.parallel_update_view(
n_workers=n_workers,
concurrent=p_params)
logging.debug("Parsing results")
# generalize the result parsing, as a concurrent view always returns a
# list
if len(p_params) in (1, 0):
results = [results]
for result in results:
if isinstance(result, Exception):
raise result
else:
# update in-memory cache
v_name, v_md5 = result
self.cache.set(v_name, v_md5)
# write cache to disk
self.cache.save()
logging.debug("Updated %d views in %ss",
len(views),
time.time() - step)
logging.debug("Total run took %ss", (time.time() - orig))
return views, len(views)
@concurrent
def parallel_update_view(self, view):
self.update_view(view.name, view.output().decode('utf-8'))
return (view.name, view.md5()) | zerotk.jenkins-job-builder | /zerotk.jenkins-job-builder-2.0.0.0b2.tar.gz/zerotk.jenkins-job-builder-2.0.0.0b2/jenkins_jobs/builder.py | builder.py |
import xml.etree.ElementTree as XML
from jenkins_jobs.errors import JenkinsJobsException
from jenkins_jobs.errors import MissingAttributeError
from jenkins_jobs.errors import InvalidAttributeError
import jenkins_jobs.modules.base
from jenkins_jobs.modules.helpers import copyartifact_build_selector
def base_param(registry, xml_parent, data, do_default, ptype):
pdef = XML.SubElement(xml_parent, ptype)
XML.SubElement(pdef, 'name').text = data['name']
XML.SubElement(pdef, 'description').text = data.get('description', '')
if do_default:
default = data.get('default', None)
if default:
XML.SubElement(pdef, 'defaultValue').text = default
else:
XML.SubElement(pdef, 'defaultValue')
return pdef
def string_param(registry, xml_parent, data):
"""yaml: string
A string parameter.
:arg str name: the name of the parameter
:arg str default: the default value of the parameter (optional)
:arg str description: a description of the parameter (optional)
Example::
parameters:
- string:
name: FOO
default: bar
description: "A parameter named FOO, defaults to 'bar'."
"""
base_param(registry, xml_parent, data, True,
'hudson.model.StringParameterDefinition')
def promoted_param(registry, xml_parent, data):
"""yaml: promoted build
A promoted build parameter.
Requires the Jenkins :jenkins-wiki:`Promoted Builds Plugin
<Promoted+Builds+Plugin>`.
:arg str name: the name of the parameter (required)
:arg str project-name: the job from which the user can pick runs (required)
:arg str promotion-name: promotion process to choose from (optional)
:arg str description: a description of the parameter (optional)
Example:
.. literalinclude::
/../../tests/parameters/fixtures/promoted-build-param001.yaml
:language: yaml
"""
pdef = base_param(registry, xml_parent, data, False,
'hudson.plugins.promoted__builds.parameters.'
'PromotedBuildParameterDefinition')
try:
XML.SubElement(pdef, 'projectName').text = data['project-name']
except KeyError:
raise MissingAttributeError('project-name')
XML.SubElement(pdef, 'promotionProcessName').text = data.get(
'promotion-name', None)
def password_param(registry, xml_parent, data):
"""yaml: password
A password parameter.
:arg str name: the name of the parameter
:arg str default: the default value of the parameter (optional)
:arg str description: a description of the parameter (optional)
Example::
parameters:
- password:
name: FOO
default: 1HSC0Ts6E161FysGf+e1xasgsHkgleLh09JUTYnipPvw=
description: "A parameter named FOO."
"""
base_param(registry, xml_parent, data, True,
'hudson.model.PasswordParameterDefinition')
def bool_param(registry, xml_parent, data):
"""yaml: bool
A boolean parameter.
:arg str name: the name of the parameter
:arg str default: the default value of the parameter (optional)
:arg str description: a description of the parameter (optional)
Example::
parameters:
- bool:
name: FOO
default: false
description: "A parameter named FOO, defaults to 'false'."
"""
data['default'] = str(data.get('default', False)).lower()
base_param(registry, xml_parent, data, True,
'hudson.model.BooleanParameterDefinition')
def file_param(registry, xml_parent, data):
"""yaml: file
A file parameter.
:arg str name: the target location for the file upload
:arg str description: a description of the parameter (optional)
Example::
parameters:
- file:
name: test.txt
description: "Upload test.txt."
"""
base_param(registry, xml_parent, data, False,
'hudson.model.FileParameterDefinition')
def text_param(registry, xml_parent, data):
"""yaml: text
A text parameter.
:arg str name: the name of the parameter
:arg str default: the default value of the parameter (optional)
:arg str description: a description of the parameter (optional)
Example::
parameters:
- text:
name: FOO
default: bar
description: "A parameter named FOO, defaults to 'bar'."
"""
base_param(registry, xml_parent, data, True,
'hudson.model.TextParameterDefinition')
def label_param(registry, xml_parent, data):
"""yaml: label
A node label parameter.
:arg str name: the name of the parameter
:arg str default: the default value of the parameter (optional)
:arg str description: a description of the parameter (optional)
Example::
parameters:
- label:
name: node
default: precise
description: "The node on which to run the job"
"""
base_param(registry, xml_parent, data, True,
'org.jvnet.jenkins.plugins.nodelabelparameter.'
'LabelParameterDefinition')
def node_param(registry, xml_parent, data):
"""yaml: node
Defines a list of nodes where this job could potentially be executed on.
Restrict where this project can be run, If your using a node or label
parameter to run your job on a particular node, you should not use the
option "Restrict where this project can be run" in the job configuration
- it will not have any effect to the selection of your node anymore!
:arg str name: the name of the parameter
:arg str description: a description of the parameter (optional)
:arg list default-slaves: The nodes used when job gets triggered
by anything else other than manually
:arg list allowed-slaves: The nodes available for selection
when job gets triggered manually. Empty means 'All'.
:arg bool ignore-offline-nodes: Ignore nodes not online or not having
executors (default false)
:arg bool allowed-multiselect: Allow multi node selection for concurrent
builds - this option only makes sense (and must be selected!) in
case the job is configured with: "Execute concurrent builds if
necessary". With this configuration the build will be executed on all
the selected nodes in parallel. (default false)
Example:
.. literalinclude:: /../../tests/parameters/fixtures/node-param001.yaml
:language: yaml
"""
pdef = base_param(registry, xml_parent, data, False,
'org.jvnet.jenkins.plugins.nodelabelparameter.'
'NodeParameterDefinition')
default = XML.SubElement(pdef, 'defaultSlaves')
if 'default-slaves' in data:
for slave in data['default-slaves']:
XML.SubElement(default, 'string').text = slave
allowed = XML.SubElement(pdef, 'allowedSlaves')
if 'allowed-slaves' in data:
for slave in data['allowed-slaves']:
XML.SubElement(allowed, 'string').text = slave
XML.SubElement(pdef, 'ignoreOfflineNodes').text = str(
data.get('ignore-offline-nodes', False)).lower()
if data.get('allowed-multiselect', False):
XML.SubElement(pdef, 'triggerIfResult').text = \
'allowMultiSelectionForConcurrentBuilds'
else:
XML.SubElement(pdef, 'triggerIfResult').text = \
'multiSelectionDisallowed'
XML.SubElement(pdef, 'allowMultiNodeSelection').text = str(
data.get('allowed-multiselect', False)).lower()
XML.SubElement(pdef, 'triggerConcurrentBuilds').text = str(
data.get('allowed-multiselect', False)).lower()
def choice_param(registry, xml_parent, data):
"""yaml: choice
A single selection parameter.
:arg str name: the name of the parameter
:arg list choices: the available choices
:arg str description: a description of the parameter (optional)
Example::
parameters:
- choice:
name: project
choices:
- nova
- glance
description: "On which project to run?"
"""
pdef = base_param(registry, xml_parent, data, False,
'hudson.model.ChoiceParameterDefinition')
choices = XML.SubElement(pdef, 'choices',
{'class': 'java.util.Arrays$ArrayList'})
a = XML.SubElement(choices, 'a', {'class': 'string-array'})
for choice in data['choices']:
XML.SubElement(a, 'string').text = choice
def run_param(registry, xml_parent, data):
"""yaml: run
A run parameter.
:arg str name: the name of the parameter
:arg str project-name: the name of job from which the user can pick runs
:arg str description: a description of the parameter (optional)
Example:
.. literalinclude:: /../../tests/parameters/fixtures/run-param001.yaml
:language: yaml
"""
pdef = base_param(registry, xml_parent, data, False,
'hudson.model.RunParameterDefinition')
XML.SubElement(pdef, 'projectName').text = data['project-name']
def extended_choice_param(registry, xml_parent, data):
"""yaml: extended-choice
Creates an extended choice parameter where values can be read from a file
Requires the Jenkins :jenkins-wiki:`Extended Choice Parameter Plugin
<Extended+Choice+Parameter+plugin>`.
:arg str name: name of the parameter
:arg str description: description of the parameter
(optional, default '')
:arg str property-file: location of property file to read from
(optional, default '')
:arg str property-key: key for the property-file (optional, default '')
:arg bool quote-value: whether to put quotes around the property
when passing to Jenkins (optional, default false)
:arg str visible-items: number of items to show in the list
(optional, default 5)
:arg str type: type of select, can be single-select, multi-select,
radio, checkbox or textbox (optional, default single-select)
:arg str value: comma separated list of values for the single select
or multi-select box (optional, default '')
:arg str default-value: used to set the initial selection of the
single-select or multi-select box (optional, default '')
:arg str value-description: comma separated list of value descriptions
for the single select or multi-select box (optional, default '')
:arg str default-property-file: location of property file when default
value needs to come from a property file (optional, default '')
:arg str default-property-key: key for the default property file
(optional, default '')
:arg str description-property-file: location of property file when value
description needs to come from a property file (optional, default '')
:arg str description-property-key: key for the value description
property file (optional, default '')
:arg str multi-select-delimiter: value between selections when the
parameter is a multi-select (optiona, default ',')
Example:
.. literalinclude:: \
/../../tests/parameters/fixtures/extended-choice-param001.yaml
:language: yaml
"""
pdef = base_param(registry, xml_parent, data, False,
'com.cwctravel.hudson.plugins.'
'extended__choice__parameter.'
'ExtendedChoiceParameterDefinition')
XML.SubElement(pdef, 'value').text = data.get('value', '')
XML.SubElement(pdef, 'visibleItemCount').text = str(data.get(
'visible-items', data.get('visible-item-count', 5)))
XML.SubElement(pdef, 'multiSelectDelimiter').text = data.get(
'multi-select-delimiter', ',')
XML.SubElement(pdef, 'quoteValue').text = str(data.get('quote-value',
False)).lower()
XML.SubElement(pdef, 'defaultValue').text = data.get(
'default-value', '')
XML.SubElement(pdef, 'descriptionPropertyValue').text = data.get(
'value-description', '')
choice = data.get('type', 'single-select')
choicedict = {'single-select': 'PT_SINGLE_SELECT',
'multi-select': 'PT_MULTI_SELECT',
'radio': 'PT_RADIO',
'checkbox': 'PT_CHECKBOX',
'textbox': 'PT_TEXTBOX',
'PT_SINGLE_SELECT': 'PT_SINGLE_SELECT',
'PT_MULTI_SELECT': 'PT_MULTI_SELECT',
'PT_RADIO': 'PT_RADIO',
'PT_CHECKBOX': 'PT_CHECKBOX',
'PT_TEXTBOX': 'PT_TEXTBOX'}
if choice in choicedict:
XML.SubElement(pdef, 'type').text = choicedict[choice]
else:
raise JenkinsJobsException("Type entered is not valid, must be one "
"of: single-select, multi-select, radio, "
"textbox or checkbox")
XML.SubElement(pdef, 'propertyFile').text = data.get('property-file', '')
XML.SubElement(pdef, 'propertyKey').text = data.get('property-key', '')
XML.SubElement(pdef, 'defaultPropertyFile').text = data.get(
'default-property-file', '')
XML.SubElement(pdef, 'defaultPropertyKey').text = data.get(
'default-property-key', '')
XML.SubElement(pdef, 'descriptionPropertyFile').text = data.get(
'description-property-file', '')
XML.SubElement(pdef, 'descriptionPropertyKey').text = data.get(
'description-property-key', '')
def validating_string_param(registry, xml_parent, data):
"""yaml: validating-string
A validating string parameter
Requires the Jenkins :jenkins-wiki:`Validating String Plugin
<Validating+String+Parameter+Plugin>`.
:arg str name: the name of the parameter
:arg str default: the default value of the parameter (optional)
:arg str description: a description of the parameter (optional)
:arg str regex: a regular expression to validate the string
:arg str msg: a message to display upon failed validation
Example::
parameters:
- validating-string:
name: FOO
default: bar
description: "A parameter named FOO, defaults to 'bar'."
regex: [A-Za-z]*
msg: Your entered value failed validation
"""
pdef = base_param(registry, xml_parent, data, True,
'hudson.plugins.validating__string__parameter.'
'ValidatingStringParameterDefinition')
XML.SubElement(pdef, 'regex').text = data['regex']
XML.SubElement(pdef, 'failedValidationMessage').text = data['msg']
def svn_tags_param(registry, xml_parent, data):
"""yaml: svn-tags
A svn tag parameter
Requires the Jenkins :jenkins-wiki:`Parameterized Trigger Plugin
<Parameterized+Trigger+Plugin>`.
:arg str name: the name of the parameter
:arg str default: the default value of the parameter (optional)
:arg str description: a description of the parameter (optional)
:arg str url: the url to list tags from
:arg str filter: the regular expression to filter tags
Example::
parameters:
- svn-tags:
name: BRANCH_NAME
default: release
description: A parameter named BRANCH_NAME default is release
url: http://svn.example.com/repo
filter: [A-za-z0-9]*
"""
pdef = base_param(registry, xml_parent, data, True,
'hudson.scm.listtagsparameter.'
'ListSubversionTagsParameterDefinition')
XML.SubElement(pdef, 'tagsDir').text = data['url']
XML.SubElement(pdef, 'tagsFilter').text = data.get('filter', None)
XML.SubElement(pdef, 'reverseByDate').text = "true"
XML.SubElement(pdef, 'reverseByName').text = "false"
XML.SubElement(pdef, 'maxTags').text = "100"
XML.SubElement(pdef, 'uuid').text = "1-1-1-1-1"
def dynamic_choice_param(registry, xml_parent, data):
"""yaml: dynamic-choice
Dynamic Choice Parameter
Requires the Jenkins :jenkins-wiki:`Jenkins Dynamic Parameter Plug-in
<Dynamic+Parameter+Plug-in>`.
:arg str name: the name of the parameter
:arg str description: a description of the parameter (optional)
:arg str script: Groovy expression which generates the potential choices.
:arg bool remote: the script will be executed on the slave where the build
is started (default false)
:arg str classpath: class path for script (optional)
:arg bool read-only: user can't modify parameter once populated
(default false)
Example::
parameters:
- dynamic-choice:
name: OPTIONS
description: "Available options"
script: "['optionA', 'optionB']"
remote: false
read-only: false
"""
dynamic_param_common(registry, xml_parent, data,
'ChoiceParameterDefinition')
def dynamic_string_param(registry, xml_parent, data):
"""yaml: dynamic-string
Dynamic Parameter
Requires the Jenkins :jenkins-wiki:`Jenkins Dynamic Parameter Plug-in
<Dynamic+Parameter+Plug-in>`.
:arg str name: the name of the parameter
:arg str description: a description of the parameter (optional)
:arg str script: Groovy expression which generates the potential choices
:arg bool remote: the script will be executed on the slave where the build
is started (default false)
:arg str classpath: class path for script (optional)
:arg bool read-only: user can't modify parameter once populated
(default false)
Example::
parameters:
- dynamic-string:
name: FOO
description: "A parameter named FOO, defaults to 'bar'."
script: "bar"
remote: false
read-only: false
"""
dynamic_param_common(registry, xml_parent, data,
'StringParameterDefinition')
def dynamic_choice_scriptler_param(registry, xml_parent, data):
"""yaml: dynamic-choice-scriptler
Dynamic Choice Parameter (Scriptler)
Requires the Jenkins :jenkins-wiki:`Jenkins Dynamic Parameter Plug-in
<Dynamic+Parameter+Plug-in>`.
:arg str name: the name of the parameter
:arg str description: a description of the parameter (optional)
:arg str script-id: Groovy script which generates the default value
:arg list parameters: parameters to corresponding script
:Parameter: * **name** (`str`) Parameter name
* **value** (`str`) Parameter value
:arg bool remote: the script will be executed on the slave where the build
is started (default false)
:arg bool read-only: user can't modify parameter once populated
(default false)
Example::
parameters:
- dynamic-choice-scriptler:
name: OPTIONS
description: "Available options"
script-id: "scriptid.groovy"
parameters:
- name: param1
value: value1
- name: param2
value: value2
remote: false
read-only: false
"""
dynamic_scriptler_param_common(registry, xml_parent, data,
'ScriptlerChoiceParameterDefinition')
def dynamic_string_scriptler_param(registry, xml_parent, data):
"""yaml: dynamic-string-scriptler
Dynamic Parameter (Scriptler)
Requires the Jenkins :jenkins-wiki:`Jenkins Dynamic Parameter Plug-in
<Dynamic+Parameter+Plug-in>`.
:arg str name: the name of the parameter
:arg str description: a description of the parameter (optional)
:arg str script-id: Groovy script which generates the default value
:arg list parameters: parameters to corresponding script
:Parameter: * **name** (`str`) Parameter name
* **value** (`str`) Parameter value
:arg bool remote: the script will be executed on the slave where the build
is started (default false)
:arg bool read-only: user can't modify parameter once populated
(default false)
Example::
parameters:
- dynamic-string-scriptler:
name: FOO
description: "A parameter named FOO, defaults to 'bar'."
script-id: "scriptid.groovy"
parameters:
- name: param1
value: value1
- name: param2
value: value2
remote: false
read-only: false
"""
dynamic_scriptler_param_common(registry, xml_parent, data,
'ScriptlerStringParameterDefinition')
def dynamic_param_common(registry, xml_parent, data, ptype):
pdef = base_param(registry, xml_parent, data, False,
'com.seitenbau.jenkins.plugins.dynamicparameter.'
+ ptype)
XML.SubElement(pdef, '__remote').text = str(
data.get('remote', False)).lower()
XML.SubElement(pdef, '__script').text = data.get('script', None)
localBaseDir = XML.SubElement(pdef, '__localBaseDirectory',
{'serialization': 'custom'})
filePath = XML.SubElement(localBaseDir, 'hudson.FilePath')
default = XML.SubElement(filePath, 'default')
XML.SubElement(filePath, 'boolean').text = "true"
XML.SubElement(default, 'remote').text = \
"/var/lib/jenkins/dynamic_parameter/classpath"
XML.SubElement(pdef, '__remoteBaseDirectory').text = \
"dynamic_parameter_classpath"
XML.SubElement(pdef, '__classPath').text = data.get('classpath', None)
XML.SubElement(pdef, 'readonlyInputField').text = str(
data.get('read-only', False)).lower()
def dynamic_scriptler_param_common(registry, xml_parent, data, ptype):
pdef = base_param(registry, xml_parent, data, False,
'com.seitenbau.jenkins.plugins.dynamicparameter.'
'scriptler.' + ptype)
XML.SubElement(pdef, '__remote').text = str(
data.get('remote', False)).lower()
XML.SubElement(pdef, '__scriptlerScriptId').text = data.get(
'script-id', None)
parametersXML = XML.SubElement(pdef, '__parameters')
parameters = data.get('parameters', [])
if parameters:
for parameter in parameters:
parameterXML = XML.SubElement(parametersXML,
'com.seitenbau.jenkins.plugins.'
'dynamicparameter.scriptler.'
'ScriptlerParameterDefinition_'
'-ScriptParameter')
XML.SubElement(parameterXML, 'name').text = parameter['name']
XML.SubElement(parameterXML, 'value').text = parameter['value']
XML.SubElement(pdef, 'readonlyInputField').text = str(data.get(
'read-only', False)).lower()
def matrix_combinations_param(registry, xml_parent, data):
"""yaml: matrix-combinations
Matrix combinations parameter
Requires the Jenkins :jenkins-wiki:`Matrix Combinations Plugin
<Matrix+Combinations+Plugin>`.
:arg str name: the name of the parameter
:arg str description: a description of the parameter (optional)
:arg str filter: Groovy expression to use filter the combination by
default (optional)
Example:
.. literalinclude:: \
/../../tests/parameters/fixtures/matrix-combinations-param001.yaml
:language: yaml
"""
element_name = 'hudson.plugins.matrix__configuration__parameter.' \
'MatrixCombinationsParameterDefinition'
pdef = XML.SubElement(xml_parent, element_name)
if 'name' not in data:
raise JenkinsJobsException('matrix-combinations must have a name '
'parameter.')
XML.SubElement(pdef, 'name').text = data['name']
XML.SubElement(pdef, 'description').text = data.get('description', '')
combination_filter = data.get('filter')
if combination_filter:
XML.SubElement(pdef, 'defaultCombinationFilter').text = \
combination_filter
return pdef
def copyartifact_build_selector_param(registry, xml_parent, data):
"""yaml: copyartifact-build-selector
Control via a build parameter, which build the copyartifact plugin should
copy when it is configured to use 'build-param'. Requires the Jenkins
:jenkins-wiki:`Copy Artifact plugin <Copy+Artifact+Plugin>`.
:arg str name: name of the build parameter to store the selection in
:arg str description: a description of the parameter (optional)
:arg str which-build: which to provide as the default value in the UI. See
``which-build`` param of :py:mod:`~builders.copyartifact` from the
builders module for the available values as well as options available
that control additional behaviour for the selected value.
Example:
.. literalinclude::
/../../tests/parameters/fixtures/copyartifact-build-selector001.yaml
:language: yaml
"""
t = XML.SubElement(xml_parent, 'hudson.plugins.copyartifact.'
'BuildSelectorParameter')
try:
name = data['name']
except KeyError:
raise MissingAttributeError('name')
XML.SubElement(t, 'name').text = name
XML.SubElement(t, 'description').text = data.get('description', '')
copyartifact_build_selector(t, data, 'defaultSelector')
def maven_metadata_param(registry, xml_parent, data):
"""yaml: maven-metadata
This parameter allows the resolution of maven artifact versions
by contacting the repository and reading the maven-metadata.xml.
Requires the Jenkins :jenkins-wiki:`Maven Metadata Plugin
<Maven+Metadata+Plugin>`.
:arg str name: Name of the parameter
:arg str description: Description of the parameter (optional)
:arg str repository-base-url: URL from where you retrieve your artifacts
(default '')
:arg str repository-username: Repository's username if authentication is
required. (default '')
:arg str repository-password: Repository's password if authentication is
required. (default '')
:arg str artifact-group-id: Unique project identifier (default '')
:arg str artifact-id: Name of the artifact without version (default '')
:arg str packaging: Artifact packaging option. Could be something such as
jar, zip, pom.... (default '')
:arg str versions-filter: Specify a regular expression which will be used
to filter the versions which are actually displayed when triggering a
new build. (default '')
:arg str default-value: For features such as SVN polling a default value
is required. If job will only be started manually, this field is not
necessary. (default '')
:arg str maximum-versions-to-display: The maximum number of versions to
display in the drop-down. Any non-number value as well as 0 or negative
values will default to all. (default 10)
:arg str sorting-order: ascending or descending
(default descending)
Example:
.. literalinclude::
/../../tests/parameters/fixtures/maven-metadata-param001.yaml
:language: yaml
"""
pdef = base_param(registry, xml_parent, data, False,
'eu.markov.jenkins.plugin.mvnmeta.'
'MavenMetadataParameterDefinition')
XML.SubElement(pdef, 'repoBaseUrl').text = data.get('repository-base-url',
'')
XML.SubElement(pdef, 'groupId').text = data.get('artifact-group-id', '')
XML.SubElement(pdef, 'artifactId').text = data.get('artifact-id', '')
XML.SubElement(pdef, 'packaging').text = data.get('packaging', '')
XML.SubElement(pdef, 'defaultValue').text = data.get('default-value', '')
XML.SubElement(pdef, 'versionFilter').text = data.get('versions-filter',
'')
sort_order = data.get('sorting-order', 'descending').lower()
sort_dict = {'descending': 'DESC',
'ascending': 'ASC'}
if sort_order not in sort_dict:
raise InvalidAttributeError(sort_order, sort_order, sort_dict.keys())
XML.SubElement(pdef, 'sortOrder').text = sort_dict[sort_order]
XML.SubElement(pdef, 'maxVersions').text = str(data.get(
'maximum-versions-to-display', 10))
XML.SubElement(pdef, 'username').text = data.get('repository-username', '')
XML.SubElement(pdef, 'password').text = data.get('repository-password', '')
def hidden_param(parser, xml_parent, data):
"""yaml: hidden
Allows you to use parameters hidden from the build with parameter page.
Requires the Jenkins :jenkins-wiki:`Hidden Parameter Plugin
<Hidden+Parameter+Plugin>`.
:arg str name: the name of the parameter
:arg str default: the default value of the parameter (optional)
:arg str description: a description of the parameter (optional)
Example:
.. literalinclude::
/../../tests/parameters/fixtures/hidden-param001.yaml
:language: yaml
"""
base_param(parser, xml_parent, data, True,
'com.wangyin.parameter.WHideParameterDefinition')
def random_string_param(registry, xml_parent, data):
"""yaml: random-string
This parameter generates a random string and passes it to the
build, preventing Jenkins from combining queued builds.
Requires the Jenkins :jenkins-wiki:`Random String Parameter Plugin
<Random+String+Parameter+Plugin>`.
:arg str name: Name of the parameter
:arg str description: Description of the parameter (default '')
:arg str failed-validation-message: Failure message to display for invalid
input (default '')
Example:
.. literalinclude::
/../../tests/parameters/fixtures/random-string-param001.yaml
:language: yaml
"""
pdef = XML.SubElement(xml_parent,
'hudson.plugins.random__string__parameter.'
'RandomStringParameterDefinition')
if 'name' not in data:
raise JenkinsJobsException('random-string must have a name parameter.')
XML.SubElement(pdef, 'name').text = data['name']
XML.SubElement(pdef, 'description').text = data.get('description', '')
XML.SubElement(pdef, 'failedValidationMessage').text = data.get(
'failed-validation-message', '')
class Parameters(jenkins_jobs.modules.base.Base):
sequence = 21
component_type = 'parameter'
component_list_type = 'parameters'
def gen_xml(self, xml_parent, data):
properties = xml_parent.find('properties')
if properties is None:
properties = XML.SubElement(xml_parent, 'properties')
parameters = data.get('parameters', [])
hmodel = 'hudson.model.'
if parameters:
# The conditionals here are to work around the extended_choice
# parameter also being definable in the properties module. This
# usage has been deprecated but not removed. Because it may have
# added these elements before us, we need to check if they already
# exist, and only add them if they're missing.
pdefp = properties.find(hmodel + 'ParametersDefinitionProperty')
if pdefp is None:
pdefp = XML.SubElement(properties,
hmodel + 'ParametersDefinitionProperty')
pdefs = pdefp.find('parameterDefinitions')
if pdefs is None:
pdefs = XML.SubElement(pdefp, 'parameterDefinitions')
for param in parameters:
self.registry.dispatch('parameter', pdefs, param) | zerotk.jenkins-job-builder | /zerotk.jenkins-job-builder-2.0.0.0b2.tar.gz/zerotk.jenkins-job-builder-2.0.0.0b2/jenkins_jobs/modules/parameters.py | parameters.py |
import xml.etree.ElementTree as XML
from jenkins_jobs.errors import JenkinsJobsException
import jenkins_jobs.modules.base
def http_endpoint(registry, xml_parent, data):
"""yaml: http
Defines an HTTP notification endpoint.
Requires the Jenkins :jenkins-wiki:`Notification Plugin
<Notification+Plugin>`.
:arg str format: notification payload format, JSON (default) or XML
:arg str event: job events that trigger notifications: started,
completed, finalized or all (default)
:arg str url: URL of the endpoint
:arg str timeout: Timeout in milliseconds for sending notification
request (30 seconds by default)
:arg str log: Number lines of log messages to send (0 by default).
Use -1 for all (use with caution).
Example:
.. literalinclude:: \
/../../tests/notifications/fixtures/http-endpoint002.yaml
:language: yaml
"""
endpoint_element = XML.SubElement(xml_parent,
'com.tikal.hudson.plugins.notification.'
'Endpoint')
supported_formats = ['JSON', 'XML']
fmt = data.get('format', 'JSON').upper()
if fmt not in supported_formats:
raise JenkinsJobsException(
"format must be one of %s" %
", ".join(supported_formats))
else:
XML.SubElement(endpoint_element, 'format').text = fmt
XML.SubElement(endpoint_element, 'protocol').text = 'HTTP'
supported_events = ['started', 'completed', 'finalized', 'all']
event = data.get('event', 'all').lower()
if event not in supported_events:
raise JenkinsJobsException(
"event must be one of %s" %
", ".join(supported_events))
else:
XML.SubElement(endpoint_element, 'event').text = event
XML.SubElement(endpoint_element, 'timeout').text = str(data.get('timeout',
30000))
XML.SubElement(endpoint_element, 'url').text = data['url']
XML.SubElement(endpoint_element, 'loglines').text = str(data.get('log', 0))
class Notifications(jenkins_jobs.modules.base.Base):
sequence = 22
component_type = 'notification'
component_list_type = 'notifications'
def gen_xml(self, xml_parent, data):
properties = xml_parent.find('properties')
if properties is None:
properties = XML.SubElement(xml_parent, 'properties')
notifications = data.get('notifications', [])
if notifications:
notify_element = XML.SubElement(properties,
'com.tikal.hudson.plugins.'
'notification.'
'HudsonNotificationProperty')
endpoints_element = XML.SubElement(notify_element, 'endpoints')
for endpoint in notifications:
self.registry.dispatch('notification',
endpoints_element, endpoint) | zerotk.jenkins-job-builder | /zerotk.jenkins-job-builder-2.0.0.0b2.tar.gz/zerotk.jenkins-job-builder-2.0.0.0b2/jenkins_jobs/modules/notifications.py | notifications.py |
# Enabling hipchat notifications on a job requires specifying the hipchat
# config in job properties, and adding the hipchat notifier to the job's
# publishers list.
# The publisher configuration contains extra details not specified per job:
# - the hipchat authorisation token.
# - the jenkins server url.
# - a default room name/id.
# This complicates matters somewhat since the sensible place to store these
# details is in the global config file.
# The global config object is therefore passed down to the registry object,
# and this object is passed to the HipChat() class initialiser.
import logging
import pkg_resources
import sys
import xml.etree.ElementTree as XML
from six.moves import configparser
import jenkins_jobs.errors
import jenkins_jobs.modules.base
logger = logging.getLogger(__name__)
class HipChat(jenkins_jobs.modules.base.Base):
sequence = 80
def __init__(self, registry):
self.authToken = None
self.jenkinsUrl = None
self.registry = registry
def _load_global_data(self):
"""Load data from the global config object.
This is done lazily to avoid looking up the '[hipchat]' section
unless actually required.
"""
jjb_config = self.registry.jjb_config
if not self.authToken:
try:
self.authToken = jjb_config.get_plugin_config('hipchat',
'authtoken')
# Require that the authtoken is non-null
if self.authToken == '':
raise jenkins_jobs.errors.JenkinsJobsException(
"Hipchat authtoken must not be a blank string")
except (configparser.NoSectionError,
jenkins_jobs.errors.JenkinsJobsException) as e:
logger.fatal("The configuration file needs a hipchat section" +
" containing authtoken:\n{0}".format(e))
sys.exit(1)
self.jenkinsUrl = jjb_config.jenkins['url']
self.sendAs = jjb_config.get_plugin_config('hipchat', 'send-as')
def gen_xml(self, xml_parent, data):
hipchat = data.get('hipchat')
if not hipchat or not hipchat.get('enabled', True):
return
self._load_global_data()
# convert for compatibility before dispatch
if 'room' in hipchat:
if 'rooms' in hipchat:
logger.warning("Ignoring deprecated 'room' as 'rooms' also "
"defined.")
else:
logger.warning("'room' is deprecated, please use 'rooms'")
hipchat['rooms'] = [hipchat['room']]
plugin_info = self.registry.get_plugin_info("Jenkins HipChat Plugin")
version = pkg_resources.parse_version(plugin_info.get('version', '0'))
if version >= pkg_resources.parse_version("0.1.9"):
publishers = xml_parent.find('publishers')
if publishers is None:
publishers = XML.SubElement(xml_parent, 'publishers')
logger.warning(
"'hipchat' module supports the old plugin versions <1.9, "
"newer versions are supported via the 'publishers' module. "
"Please upgrade you job definition")
return self.registry.dispatch('publisher', publishers, data)
else:
properties = xml_parent.find('properties')
if properties is None:
properties = XML.SubElement(xml_parent, 'properties')
pdefhip = XML.SubElement(properties,
'jenkins.plugins.hipchat.'
'HipChatNotifier_-HipChatJobProperty')
room = XML.SubElement(pdefhip, 'room')
if 'rooms' in hipchat:
room.text = ",".join(hipchat['rooms'])
# Handle backwards compatibility 'start-notify' but all add an element
# of standardization with notify-*
if hipchat.get('start-notify'):
logger.warning("'start-notify' is deprecated, please use "
"'notify-start'")
XML.SubElement(pdefhip, 'startNotification').text = str(
hipchat.get('notify-start', hipchat.get('start-notify',
False))).lower()
if version >= pkg_resources.parse_version("0.1.5"):
XML.SubElement(pdefhip, 'notifySuccess').text = str(
hipchat.get('notify-success', False)).lower()
XML.SubElement(pdefhip, 'notifyAborted').text = str(
hipchat.get('notify-aborted', False)).lower()
XML.SubElement(pdefhip, 'notifyNotBuilt').text = str(
hipchat.get('notify-not-built', False)).lower()
XML.SubElement(pdefhip, 'notifyUnstable').text = str(
hipchat.get('notify-unstable', False)).lower()
XML.SubElement(pdefhip, 'notifyFailure').text = str(
hipchat.get('notify-failure', False)).lower()
XML.SubElement(pdefhip, 'notifyBackToNormal').text = str(
hipchat.get('notify-back-to-normal', False)).lower()
publishers = xml_parent.find('publishers')
if publishers is None:
publishers = XML.SubElement(xml_parent, 'publishers')
hippub = XML.SubElement(publishers,
'jenkins.plugins.hipchat.HipChatNotifier')
if version >= pkg_resources.parse_version("0.1.8"):
XML.SubElement(hippub, 'buildServerUrl').text = self.jenkinsUrl
XML.SubElement(hippub, 'sendAs').text = self.sendAs
else:
XML.SubElement(hippub, 'jenkinsUrl').text = self.jenkinsUrl
XML.SubElement(hippub, 'authToken').text = self.authToken
# The room specified here is the default room. The default is
# redundant in this case since a room must be specified. Leave empty.
XML.SubElement(hippub, 'room').text = '' | zerotk.jenkins-job-builder | /zerotk.jenkins-job-builder-2.0.0.0b2.tar.gz/zerotk.jenkins-job-builder-2.0.0.0b2/jenkins_jobs/modules/hipchat_notif.py | hipchat_notif.py |
# Base class for a jenkins_jobs module
import xml.etree.ElementTree as XML
def add_nonblank_xml_subelement(parent, tag, value):
"""
Adds an XML SubElement with the name tag to parent if value is a non-empty
string
"""
if value is not None and value != '':
XML.SubElement(parent, tag).text = value
class Base(object):
"""
A base class for a Jenkins Job Builder Module.
The module is initialized before any YAML is parsed.
:arg ModuleRegistry registry: the global module registry.
"""
#: The sequence number for the module. Modules are invoked in the
#: order of their sequence number in order to produce consistently
#: ordered XML output.
sequence = 10
#: The component type for components of this module. This will be
#: used to look for macros (they are defined singularly, and should
#: not be plural).
#: Set both component_type and component_list_type to None if module
#: doesn't have components.
component_type = None
#: The component list type will be used to look up possible
#: implementations of the component type via entry points (entry
#: points provide a list of components, so it should be plural).
#: Set both component_type and component_list_type to None if module
#: doesn't have components.
component_list_type = None
def __init__(self, registry):
self.registry = registry
def handle_data(self, job_data):
"""This method is called before any XML is generated. By
overriding this method, a module may arbitrarily modify a data
structure which will probably be the JJB YamlParser's intermediate data
representation. If it has changed the data structure at all, it must
return ``True``, otherwise, it must return ``False``.
:arg dict job_data: the intermediate representation of job data
loaded from JJB Yaml files without variables interpolation or other
yaml expansions.
:rtype: boolean
"""
return False
def gen_xml(self, xml_parent, data):
"""Update the XML element tree based on YAML data. Override
this method to add elements to the XML output. Create new
Element objects and add them to the xml_parent. The YAML data
structure must not be modified.
:arg YAMLParser parser: the global YAML Parser
:arg Element xml_parent: the parent XML element
:arg dict data: the YAML data structure
"""
pass | zerotk.jenkins-job-builder | /zerotk.jenkins-job-builder-2.0.0.0b2.tar.gz/zerotk.jenkins-job-builder-2.0.0.0b2/jenkins_jobs/modules/base.py | base.py |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import xml.etree.ElementTree as XML
import jenkins_jobs.modules.base
"""
The view pipeline module handles creating Jenkins Build Pipeline views.
To create a list view specify ``list`` in the ``view-type`` attribute
to the :ref:`View-pipeline` definition.
Requires the Jenkins
:jenkins-wiki:`Build Pipeline Plugin <build+pipeline+plugin>`.
:View Parameters:
* **name** (`str`): The name of the view.
* **view-type** (`str`): The type of view.
* **description** (`str`): A description of the view. (optional)
* **filter-executors** (`bool`): Show only executors that can
execute the included views. (default false)
* **filter-queue** (`bool`): Show only included jobs in builder
queue. (default false)
* **first-job** (`str`): Parent Job in the view.
* **no-of-displayed-builds** (`str`): Number of builds to display.
(default 1)
* **title** (`str`): Build view title. (optional)
* **linkStyle** (`str`): Console output link style. Can be
'Lightbox', 'New Window', or 'This Window'. (default Lightbox)
* **css-Url** (`str`): Url for Custom CSS files (optional)
* **latest-job-only** (`bool`) Trigger only latest job.
(default false)
* **manual-trigger** (`bool`) Always allow manual trigger.
(default false)
* **show-parameters** (`bool`) Show pipeline parameters.
(default false)
* **parameters-in-headers** (`bool`) Show pipeline parameters in
headers. (default false)
* **starts-with-parameters** (`bool`) Use Starts with parameters.
(default false)
* **refresh-frequency** (`str`) Frequency to refresh in seconds.
(default '3')
* **definition-header** (`bool`) Show pipeline definition header.
(default false)
Example:
.. literalinclude::
/../../tests/views/fixtures/pipeline_view001.yaml
Example:
.. literalinclude::
/../../tests/views/fixtures/pipeline_view002.yaml
"""
class Pipeline(jenkins_jobs.modules.base.Base):
sequence = 0
def root_xml(self, data):
linktypes = ['Lightbox', 'New Window']
root = XML.Element('au.com.centrumsystems.hudson.'
'plugin.buildpipeline.BuildPipelineView',
{'plugin': 'build-pipeline-plugin'})
XML.SubElement(root, 'name').text = data['name']
desc_text = data.get('description', None)
if desc_text is not None:
XML.SubElement(root, 'description').text = desc_text
filterExecutors = data.get('filter-executors', False)
FE_element = XML.SubElement(root, 'filterExecutors')
FE_element.text = 'true' if filterExecutors else 'false'
filterQueue = data.get('filter-queue', False)
FQ_element = XML.SubElement(root, 'filterQueue')
FQ_element.text = 'true' if filterQueue else 'false'
XML.SubElement(root, 'properties',
{'class': 'hudson.model.View$PropertyList'})
GBurl = ('au.com.centrumsystems.hudson.plugin.buildpipeline.'
'DownstreamProjectGridBuilder')
gridBuilder = XML.SubElement(root, 'gridBuilder', {'class': GBurl})
jobname = data.get('first-job', '')
XML.SubElement(gridBuilder, 'firstJob').text = jobname
builds = str(data.get('no-of-displayed-builds', 1))
XML.SubElement(root, 'noOfDisplayedBuilds').text = builds
title = data.get('title', None)
BVT_element = XML.SubElement(root, 'buildViewTitle')
if title is not None:
BVT_element.text = title
linkStyle = data.get('link-style', 'Lightbox')
LS_element = XML.SubElement(root, 'consoleOutputLinkStyle')
if linkStyle in linktypes:
LS_element.text = linkStyle
else:
LS_element.text = 'Lightbox'
cssUrl = data.get('css-Url', None)
CU_element = XML.SubElement(root, 'cssUrl')
if cssUrl is not None:
CU_element.text = cssUrl
latest_job_only = data.get('latest-job-only', False)
OLJ_element = XML.SubElement(root, 'triggerOnlyLatestJob')
OLJ_element.text = 'true' if latest_job_only else 'false'
manual_trigger = data.get('manual-trigger', False)
AMT_element = XML.SubElement(root, 'alwaysAllowManualTrigger')
AMT_element.text = 'true' if manual_trigger else 'false'
show_parameters = data.get('show-parameters', False)
PP_element = XML.SubElement(root, 'showPipelineParameters')
PP_element.text = 'true' if show_parameters else 'false'
parameters_in_headers = data.get('parameters-in-headers', False)
PIH_element = XML.SubElement(root, 'showPipelineParametersInHeaders')
PIH_element.text = 'true' if parameters_in_headers else 'false'
start_with_parameters = data.get('start-with-parameters', False)
SWP_element = XML.SubElement(root, 'startsWithParameters')
SWP_element.text = 'true' if start_with_parameters else 'false'
refresh_frequency = str(data.get('refresh-frequency', 3))
XML.SubElement(root, 'refreshFrequency').text = refresh_frequency
headers = data.get('definition-header', False)
DH_element = XML.SubElement(root, 'showPipelineDefinitionHeader')
DH_element.text = 'true' if headers else 'false'
return root | zerotk.jenkins-job-builder | /zerotk.jenkins-job-builder-2.0.0.0b2.tar.gz/zerotk.jenkins-job-builder-2.0.0.0b2/jenkins_jobs/modules/view_pipeline.py | view_pipeline.py |
import logging
import pkg_resources
import re
import xml.etree.ElementTree as XML
import six
from jenkins_jobs.errors import InvalidAttributeError
from jenkins_jobs.errors import JenkinsJobsException
from jenkins_jobs.errors import MissingAttributeError
import jenkins_jobs.modules.base
from jenkins_jobs.modules import hudson_model
from jenkins_jobs.modules.helpers import convert_mapping_to_xml
logger = logging.getLogger(str(__name__))
def gerrit_handle_legacy_configuration(data):
hyphenizer = re.compile("[A-Z]")
def hyphenize(attr):
"""Convert strings like triggerOn to trigger-on.
"""
return hyphenizer.sub(lambda x: "-%s" % x.group(0).lower(),
attr)
def convert_dict(d, old_keys):
for old_key in old_keys:
if old_key in d:
new_key = hyphenize(old_key)
logger.warning(
"'%s' is deprecated and will be removed after "
"1.0.0, please use '%s' instead", old_key, new_key)
d[new_key] = d[old_key]
del d[old_key]
convert_dict(data, [
'triggerOnPatchsetUploadedEvent',
'triggerOnChangeAbandonedEvent',
'triggerOnChangeMergedEvent',
'triggerOnChangeRestoredEvent',
'triggerOnCommentAddedEvent',
'triggerOnDraftPublishedEvent',
'triggerOnRefUpdatedEvent',
'triggerApprovalCategory',
'triggerApprovalValue',
'overrideVotes',
'gerritBuildSuccessfulVerifiedValue',
'gerritBuildFailedVerifiedValue',
'failureMessage',
'skipVote',
])
for project in data.get('projects', []):
convert_dict(project, [
'projectCompareType',
'projectPattern',
'branchCompareType',
'branchPattern',
])
mapping_obj_type = type(data)
old_format_events = mapping_obj_type(
(key, should_register) for key, should_register in six.iteritems(data)
if key.startswith('trigger-on-'))
trigger_on = data.setdefault('trigger-on', [])
if old_format_events:
logger.warning(
"The events: %s; which you used is/are deprecated. "
"Please use 'trigger-on' instead.",
', '.join(old_format_events))
if old_format_events and trigger_on:
raise JenkinsJobsException(
'Both, the new format (trigger-on) and old format (trigger-on-*) '
'gerrit events format found. Please use either the new or the old '
'format of trigger events definition.')
trigger_on.extend(event_name[len('trigger-on-'):]
for event_name, should_register
in six.iteritems(old_format_events) if should_register)
for idx, event in enumerate(trigger_on):
if event == 'comment-added-event':
trigger_on[idx] = events = mapping_obj_type()
try:
events['comment-added-event'] = mapping_obj_type((
('approval-category', data['trigger-approval-category']),
('approval-value', data['trigger-approval-value'])
))
except KeyError:
raise JenkinsJobsException(
'The comment-added-event trigger requires which approval '
'category and value you want to trigger the job. '
'It should be specified by the approval-category '
'and approval-value properties.')
def build_gerrit_triggers(xml_parent, data):
available_simple_triggers = {
'change-abandoned-event': 'PluginChangeAbandonedEvent',
'change-merged-event': 'PluginChangeMergedEvent',
'change-restored-event': 'PluginChangeRestoredEvent',
'draft-published-event': 'PluginDraftPublishedEvent',
'patchset-uploaded-event': 'PluginPatchsetCreatedEvent',
'patchset-created-event': 'PluginPatchsetCreatedEvent',
'ref-updated-event': 'PluginRefUpdatedEvent',
}
tag_namespace = 'com.sonyericsson.hudson.plugins.gerrit.trigger.' \
'hudsontrigger.events'
trigger_on_events = XML.SubElement(xml_parent, 'triggerOnEvents')
for event in data.get('trigger-on', []):
if isinstance(event, six.string_types):
tag_name = available_simple_triggers.get(event)
if event == 'patchset-uploaded-event':
logger.warning(
"'%s' is deprecated. Use 'patchset-created-event' "
"format instead.", event)
if not tag_name:
known = ', '.join(available_simple_triggers.keys()
+ ['comment-added-event',
'comment-added-contains-event'])
msg = ("The event '%s' under 'trigger-on' is not one of the "
"known: %s.") % (event, known)
raise JenkinsJobsException(msg)
XML.SubElement(trigger_on_events,
'%s.%s' % (tag_namespace, tag_name))
else:
if 'patchset-created-event' in event.keys():
pce = event['patchset-created-event']
pc = XML.SubElement(
trigger_on_events,
'%s.%s' % (tag_namespace, 'PluginPatchsetCreatedEvent'))
XML.SubElement(pc, 'excludeDrafts').text = str(
pce.get('exclude-drafts', False)).lower()
XML.SubElement(pc, 'excludeTrivialRebase').text = str(
pce.get('exclude-trivial-rebase', False)).lower()
XML.SubElement(pc, 'excludeNoCodeChange').text = str(
pce.get('exclude-no-code-change', False)).lower()
if 'comment-added-event' in event.keys():
comment_added_event = event['comment-added-event']
cadded = XML.SubElement(
trigger_on_events,
'%s.%s' % (tag_namespace, 'PluginCommentAddedEvent'))
XML.SubElement(cadded, 'verdictCategory').text = \
comment_added_event['approval-category']
XML.SubElement(
cadded,
'commentAddedTriggerApprovalValue').text = \
str(comment_added_event['approval-value'])
if 'comment-added-contains-event' in event.keys():
comment_added_event = event['comment-added-contains-event']
caddedc = XML.SubElement(
trigger_on_events,
'%s.%s' % (tag_namespace,
'PluginCommentAddedContainsEvent'))
XML.SubElement(caddedc, 'commentAddedCommentContains').text = \
comment_added_event['comment-contains-value']
def build_gerrit_skip_votes(xml_parent, data):
outcomes = [('successful', 'onSuccessful'),
('failed', 'onFailed'),
('unstable', 'onUnstable'),
('notbuilt', 'onNotBuilt')]
skip_vote_node = XML.SubElement(xml_parent, 'skipVote')
skip_vote = data.get('skip-vote', {})
for result_kind, tag_name in outcomes:
if skip_vote.get(result_kind, False):
XML.SubElement(skip_vote_node, tag_name).text = 'true'
else:
XML.SubElement(skip_vote_node, tag_name).text = 'false'
def gerrit(registry, xml_parent, data):
"""yaml: gerrit
Trigger on a Gerrit event.
Requires the Jenkins :jenkins-wiki:`Gerrit Trigger Plugin <Gerrit+Trigger>`
version >= 2.6.0.
:arg list trigger-on: Events to react on. Please use either the new
**trigger-on**, or the old **trigger-on-*** events definitions. You
cannot use both at once.
.. _trigger_on:
:Trigger on:
* **patchset-created-event** (`dict`) -- Trigger upon patchset
creation.
:Patchset created:
* **exclude-drafts** (`bool`) -- exclude drafts (default false)
* **exclude-trivial-rebase** (`bool`) -- exclude trivial rebase
(default false)
* **exclude-no-code-change** (`bool`) -- exclude no code change
(default false)
Exclude drafts|trivial-rebase|no-code-change needs
Gerrit Trigger v2.12.0
* **patchset-uploaded-event** -- Trigger upon patchset creation
(this is a alias for `patchset-created-event`).
.. deprecated:: 1.1.0 Please use :ref:`trigger-on <trigger_on>`.
* **change-abandoned-event** -- Trigger on patchset abandoned.
Requires Gerrit Trigger Plugin version >= 2.8.0.
* **change-merged-event** -- Trigger on change merged
* **change-restored-event** -- Trigger on change restored. Requires
Gerrit Trigger Plugin version >= 2.8.0
* **draft-published-event** -- Trigger on draft published event.
* **ref-updated-event** -- Trigger on ref-updated.
* **comment-added-event** (`dict`) -- Trigger on comment added.
:Comment added:
* **approval-category** (`str`) -- Approval (verdict) category
(for example 'APRV', 'CRVW', 'VRIF' -- see `Gerrit access
control
<http://gerrit.googlecode.com/svn/documentation/2.1/
access-control.html#categories>`_
* **approval-value** -- Approval value for the comment added.
* **comment-added-contains-event** (`dict`) -- Trigger on comment
added contains Regular Expression.
:Comment added contains:
* **comment-contains-value** (`str`) -- Comment contains
Regular Expression value.
:arg bool trigger-on-patchset-uploaded-event: Trigger on patchset upload.
.. deprecated:: 1.1.0. Please use :ref:`trigger-on <trigger_on>`.
:arg bool trigger-on-change-abandoned-event: Trigger on change abandoned.
Requires Gerrit Trigger Plugin version >= 2.8.0
.. deprecated:: 1.1.0. Please use :ref:`trigger-on <trigger_on>`.
:arg bool trigger-on-change-merged-event: Trigger on change merged
.. deprecated:: 1.1.0. Please use :ref:`trigger-on <trigger_on>`.
:arg bool trigger-on-change-restored-event: Trigger on change restored.
Requires Gerrit Trigger Plugin version >= 2.8.0
.. deprecated:: 1.1.0. Please use :ref:`trigger-on <trigger_on>`.
:arg bool trigger-on-comment-added-event: Trigger on comment added
.. deprecated:: 1.1.0. Please use :ref:`trigger-on <trigger_on>`.
:arg bool trigger-on-draft-published-event: Trigger on draft published
event
.. deprecated:: 1.1.0 Please use :ref:`trigger-on <trigger_on>`.
:arg bool trigger-on-ref-updated-event: Trigger on ref-updated
.. deprecated:: 1.1.0. Please use :ref:`trigger-on <trigger_on>`.
:arg str trigger-approval-category: Approval category for comment added
.. deprecated:: 1.1.0. Please use :ref:`trigger-on <trigger_on>`.
:arg int trigger-approval-value: Approval value for comment added
.. deprecated:: 1.1.0. Please use :ref:`trigger-on <trigger_on>`.
:arg bool override-votes: Override default vote values
:arg int gerrit-build-started-verified-value: Started ''Verified'' value
:arg int gerrit-build-successful-verified-value: Successful ''Verified''
value
:arg int gerrit-build-failed-verified-value: Failed ''Verified'' value
:arg int gerrit-build-unstable-verified-value: Unstable ''Verified'' value
:arg int gerrit-build-notbuilt-verified-value: Not built ''Verified''
value
:arg int gerrit-build-started-codereview-value: Started ''CodeReview''
value
:arg int gerrit-build-successful-codereview-value: Successful
''CodeReview'' value
:arg int gerrit-build-failed-codereview-value: Failed ''CodeReview'' value
:arg int gerrit-build-unstable-codereview-value: Unstable ''CodeReview''
value
:arg int gerrit-build-notbuilt-codereview-value: Not built ''CodeReview''
value
:arg str failure-message: Message to leave on failure (default '')
:arg str successful-message: Message to leave on success (default '')
:arg str unstable-message: Message to leave when unstable (default '')
:arg str notbuilt-message: Message to leave when not built (default '')
:arg str failure-message-file: Sets the filename within the workspace from
which to retrieve the unsuccessful review message. (optional)
:arg list projects: list of projects to match
:Project: * **project-compare-type** (`str`) -- ''PLAIN'', ''ANT'' or
''REG_EXP''
* **project-pattern** (`str`) -- Project name pattern to match
* **branch-compare-type** (`str`) -- ''PLAIN'', ''ANT'' or
''REG_EXP'' (not used if `branches` list is specified)
.. deprecated:: 1.1.0 Please use :ref:`branches <branches>`.
* **branch-pattern** (`str`) -- Branch name pattern to match
(not used if `branches` list is specified)
.. deprecated:: 1.1.0 Please use :ref:`branches <branches>`.
.. _branches:
* **branches** (`list`) -- List of branches to match
(optional)
:Branch: * **branch-compare-type** (`str`) -- ''PLAIN'',
''ANT'' or ''REG_EXP'' (optional) (default
''PLAIN'')
* **branch-pattern** (`str`) -- Branch name pattern
to match
* **file-paths** (`list`) -- List of file paths to match
(optional)
:File Path: * **compare-type** (`str`) -- ''PLAIN'', ''ANT''
or ''REG_EXP'' (optional) (default ''PLAIN'')
* **pattern** (`str`) -- File path pattern to
match
* **forbidden-file-paths** (`list`) -- List of file paths to
skip triggering (optional)
:Forbidden File Path: * **compare-type** (`str`) --
''PLAIN'', ''ANT'' or ''REG_EXP'' (optional)
(default ''PLAIN'')
* **pattern** (`str`) -- File path pattern to
match
* **topics** (`list`) -- List of topics to match
(optional)
:Topic: * **compare-type** (`str`) -- ''PLAIN'', ''ANT'' or
''REG_EXP'' (optional) (default ''PLAIN'')
* **pattern** (`str`) -- Topic name pattern to
match
* **disable-strict-forbidden-file-verification** (`bool`) --
Enabling this option will allow an event to trigger a
build if the event contains BOTH one or more wanted file
paths AND one or more forbidden file paths. In other
words, with this option, the build will not get
triggered if the change contains only forbidden files,
otherwise it will get triggered. Requires plugin
version >= 2.16.0 (default false)
:arg dict skip-vote: map of build outcomes for which Jenkins must skip
vote. Requires Gerrit Trigger Plugin version >= 2.7.0
:Outcome: * **successful** (`bool`)
* **failed** (`bool`)
* **unstable** (`bool`)
* **notbuilt** (`bool`)
:arg bool silent: When silent mode is on there will be no communication
back to Gerrit, i.e. no build started/failed/successful approve
messages etc. If other non-silent jobs are triggered by the same
Gerrit event as this job, the result of this job's build will not be
counted in the end result of the other jobs. (default false)
:arg bool silent-start: Sets silent start mode to on or off. When silent
start mode is on there will be no 'build started' messages sent back
to Gerrit. (default false)
:arg bool escape-quotes: escape quotes in the values of Gerrit change
parameters (default true)
:arg bool no-name-and-email: Do not pass compound 'name and email'
parameters (default false)
:arg bool readable-message: If parameters regarding multiline text,
e.g. commit message, should be as human readable or not. If false,
those parameters are Base64 encoded to keep environment variables
clean. (default false)
:arg str dependency-jobs: All jobs on which this job depends. If a commit
should trigger both a dependency and this job, the dependency will be
built first. Use commas to separate job names. Beware of cyclic
dependencies. (optional)
:arg str notification-level: Defines to whom email notifications should be
sent. This can either be nobody ('NONE'), the change owner ('OWNER'),
reviewers and change owner ('OWNER_REVIEWERS'), all interested users
i.e. owning, reviewing, watching, and starring ('ALL') or server
default ('SERVER_DEFAULT'). (default 'SERVER_DEFAULT')
:arg bool dynamic-trigger-enabled: Enable/disable the dynamic trigger
(default false)
:arg str dynamic-trigger-url: if you specify this option, the Gerrit
trigger configuration will be fetched from there on a regular interval
:arg bool trigger-for-unreviewed-patches: trigger patchset-created events
for changes that were uploaded while connection to Gerrit was down
(default false). Requires Gerrit Trigger Plugin version >= 2.11.0
:arg str custom-url: Custom URL for a message sent to Gerrit. Build
details URL will be used if empty. (default '')
:arg str server-name: Name of the server to trigger on, or ''__ANY__'' to
trigger on any configured Gerrit server (default '__ANY__'). Requires
Gerrit Trigger Plugin version >= 2.11.0
You may select one or more Gerrit events upon which to trigger.
You must also supply at least one project and branch, optionally
more. If you select the comment-added trigger, you should also
indicate which approval category and value you want to trigger the
job.
Until version 0.4.0 of Jenkins Job Builder, camelCase keys were used to
configure Gerrit Trigger Plugin, instead of hyphenated-keys. While still
supported, camedCase keys are deprecated and should not be used. Support
for this will be removed after 1.0.0 is released.
Example:
.. literalinclude:: /../../tests/triggers/fixtures/gerrit004.yaml
:language: yaml
"""
def get_compare_type(xml_tag, compare_type):
valid_compare_types = ['PLAIN',
'ANT',
'REG_EXP']
if compare_type not in valid_compare_types:
raise InvalidAttributeError(xml_tag, compare_type,
valid_compare_types)
return compare_type
gerrit_handle_legacy_configuration(data)
projects = data.get('projects', [])
gtrig = XML.SubElement(xml_parent,
'com.sonyericsson.hudson.plugins.gerrit.trigger.'
'hudsontrigger.GerritTrigger')
XML.SubElement(gtrig, 'spec')
gprojects = XML.SubElement(gtrig, 'gerritProjects')
for project in projects:
gproj = XML.SubElement(gprojects,
'com.sonyericsson.hudson.plugins.gerrit.'
'trigger.hudsontrigger.data.GerritProject')
XML.SubElement(gproj, 'compareType').text = get_compare_type(
'project-compare-type', project.get(
'project-compare-type', 'PLAIN'))
XML.SubElement(gproj, 'pattern').text = project['project-pattern']
branches = XML.SubElement(gproj, 'branches')
project_branches = project.get('branches', [])
if 'branch-compare-type' in project and 'branch-pattern' in project:
warning = 'branch-compare-type and branch-pattern at project ' \
'level are deprecated and support will be removed ' \
'in a later version of Jenkins Job Builder; '
if project_branches:
warning += 'discarding values and using values from ' \
'branches section'
else:
warning += 'please use branches section instead'
logger.warning(warning)
if not project_branches:
project_branches = [
{'branch-compare-type': project.get(
'branch-compare-type', 'PLAIN'),
'branch-pattern': project['branch-pattern']}]
for branch in project_branches:
gbranch = XML.SubElement(
branches, 'com.sonyericsson.hudson.plugins.'
'gerrit.trigger.hudsontrigger.data.Branch')
XML.SubElement(gbranch, 'compareType').text = get_compare_type(
'branch-compare-type', branch.get(
'branch-compare-type', 'PLAIN'))
XML.SubElement(gbranch, 'pattern').text = branch['branch-pattern']
project_file_paths = project.get('file-paths', [])
if project_file_paths:
fps_tag = XML.SubElement(gproj, 'filePaths')
for file_path in project_file_paths:
fp_tag = XML.SubElement(fps_tag,
'com.sonyericsson.hudson.plugins.'
'gerrit.trigger.hudsontrigger.data.'
'FilePath')
XML.SubElement(fp_tag, 'compareType').text = get_compare_type(
'compare-type', file_path.get('compare-type', 'PLAIN'))
XML.SubElement(fp_tag, 'pattern').text = file_path['pattern']
project_forbidden_file_paths = project.get('forbidden-file-paths', [])
if project_forbidden_file_paths:
ffps_tag = XML.SubElement(gproj, 'forbiddenFilePaths')
for forbidden_file_path in project_forbidden_file_paths:
ffp_tag = XML.SubElement(ffps_tag,
'com.sonyericsson.hudson.plugins.'
'gerrit.trigger.hudsontrigger.data.'
'FilePath')
XML.SubElement(ffp_tag, 'compareType').text = get_compare_type(
'compare-type', forbidden_file_path.get('compare-type',
'PLAIN'))
XML.SubElement(ffp_tag, 'pattern').text = \
forbidden_file_path['pattern']
topics = project.get('topics', [])
if topics:
topics_tag = XML.SubElement(gproj, 'topics')
for topic in topics:
topic_tag = XML.SubElement(topics_tag,
'com.sonyericsson.hudson.plugins.'
'gerrit.trigger.hudsontrigger.data.'
'Topic')
XML.SubElement(topic_tag, 'compareType').text = \
get_compare_type('compare-type', topic.get('compare-type',
'PLAIN'))
XML.SubElement(topic_tag, 'pattern').text = topic['pattern']
XML.SubElement(gproj,
'disableStrictForbiddenFileVerification').text = str(
project.get('disable-strict-forbidden-file-verification',
False)).lower()
build_gerrit_skip_votes(gtrig, data)
general_mappings = [
('silent', 'silentMode', False),
('silent-start', 'silentStartMode', False),
('escape-quotes', 'escapeQuotes', True),
('no-name-and-email', 'noNameAndEmailParameters', False),
('readable-message', 'readableMessage', False),
('dependency-jobs', 'dependencyJobsNames', ''),
]
convert_mapping_to_xml(gtrig, data, general_mappings, fail_required=True)
notification_levels = ['NONE', 'OWNER', 'OWNER_REVIEWERS', 'ALL',
'SERVER_DEFAULT']
notification_level = data.get('notification-level', 'SERVER_DEFAULT')
if notification_level not in notification_levels:
raise InvalidAttributeError('notification-level', notification_level,
notification_levels)
if notification_level == 'SERVER_DEFAULT':
XML.SubElement(gtrig, 'notificationLevel').text = ''
else:
XML.SubElement(gtrig, 'notificationLevel').text = notification_level
XML.SubElement(gtrig, 'dynamicTriggerConfiguration').text = str(
data.get('dynamic-trigger-enabled', False))
XML.SubElement(gtrig, 'triggerConfigURL').text = str(
data.get('dynamic-trigger-url', ''))
XML.SubElement(gtrig, 'allowTriggeringUnreviewedPatches').text = str(
data.get('trigger-for-unreviewed-patches', False)).lower()
build_gerrit_triggers(gtrig, data)
override = str(data.get('override-votes', False)).lower()
if override == 'true':
for yamlkey, xmlkey in [('gerrit-build-started-verified-value',
'gerritBuildStartedVerifiedValue'),
('gerrit-build-successful-verified-value',
'gerritBuildSuccessfulVerifiedValue'),
('gerrit-build-failed-verified-value',
'gerritBuildFailedVerifiedValue'),
('gerrit-build-unstable-verified-value',
'gerritBuildUnstableVerifiedValue'),
('gerrit-build-notbuilt-verified-value',
'gerritBuildNotBuiltVerifiedValue'),
('gerrit-build-started-codereview-value',
'gerritBuildStartedCodeReviewValue'),
('gerrit-build-successful-codereview-value',
'gerritBuildSuccessfulCodeReviewValue'),
('gerrit-build-failed-codereview-value',
'gerritBuildFailedCodeReviewValue'),
('gerrit-build-unstable-codereview-value',
'gerritBuildUnstableCodeReviewValue'),
('gerrit-build-notbuilt-codereview-value',
'gerritBuildNotBuiltCodeReviewValue')]:
if data.get(yamlkey) is not None:
# str(int(x)) makes input values like '+1' work
XML.SubElement(gtrig, xmlkey).text = str(
int(data.get(yamlkey)))
message_mappings = [
('start-message', 'buildStartMessage', ''),
('failure-message', 'buildFailureMessage', ''),
('successful-message', 'buildSuccessfulMessage', ''),
('unstable-message', 'buildUnstableMessage', ''),
('notbuilt-message', 'buildNotBuiltMessage', ''),
('failure-message-file', 'buildUnsuccessfulFilepath', ''),
('custom-url', 'customUrl', ''),
('server-name', 'serverName', '__ANY__'),
]
convert_mapping_to_xml(gtrig, data, message_mappings, fail_required=True)
def pollscm(registry, xml_parent, data):
"""yaml: pollscm
Poll the SCM to determine if there has been a change.
:Parameter: the polling interval (cron syntax)
.. deprecated:: 1.3.0. Please use :ref:`cron <cron>`.
.. _cron:
:arg string cron: the polling interval (cron syntax, required)
:arg bool ignore-post-commit-hooks: Ignore changes notified by SCM
post-commit hooks. The subversion-plugin supports this since
version 1.44. (default false)
Example:
.. literalinclude:: /../../tests/triggers/fixtures/pollscm002.yaml
:language: yaml
"""
try:
cron = data['cron']
ipch = str(data.get('ignore-post-commit-hooks', False)).lower()
except KeyError as e:
# ensure specific error on the attribute not being set is raised
# for new format
raise MissingAttributeError(e)
except TypeError:
# To keep backward compatibility
logger.warning(
"Your pollscm usage is deprecated, please use"
" the syntax described in the documentation"
" instead")
cron = data
ipch = 'false'
if not cron and cron != '':
raise InvalidAttributeError('cron', cron)
scmtrig = XML.SubElement(xml_parent, 'hudson.triggers.SCMTrigger')
XML.SubElement(scmtrig, 'spec').text = cron
XML.SubElement(scmtrig, 'ignorePostCommitHooks').text = ipch
def build_content_type(xml_parent, entries, namespace, collection_suffix,
entry_suffix, prefix, collection_name, element_name):
content_type = XML.SubElement(
xml_parent, '{0}.{1}{2}'.format(namespace, prefix, collection_suffix))
if entries:
collection = XML.SubElement(content_type, collection_name)
for entry in entries:
content_entry = XML.SubElement(
collection, '{0}.{1}{2}'.format(namespace, prefix,
entry_suffix))
XML.SubElement(content_entry, element_name).text = entry
def pollurl(registry, xml_parent, data):
"""yaml: pollurl
Trigger when the HTTP response from a URL changes.
Requires the Jenkins :jenkins-wiki:`URLTrigger Plugin <URLTrigger+Plugin>`.
:arg string cron: cron syntax of when to run (default '')
:arg string polling-node: Restrict where the polling should run.
(optional)
:arg list urls: List of URLs to monitor
:URL: * **url** (`str`) -- URL to monitor for changes (required)
* **proxy** (`bool`) -- Activate the Jenkins proxy (default false)
* **timeout** (`int`) -- Connect/read timeout in seconds
(default 300)
* **username** (`string`) -- User name for basic authentication
(optional)
* **password** (`string`) -- Password for basic authentication
(optional)
* **check-status** (`int`) -- Check for a specific HTTP status
code (optional)
* **check-etag** (`bool`) -- Check the HTTP ETag for changes
(default false)
* **check-date** (`bool`) -- Check the last modification date of
the URL (default false)
* **check-content** (`list`) -- List of content type changes to
monitor
:Content Type: * **simple** (`bool`) -- Trigger on any change to
the content of the URL (default false)
* **json** (`list`) -- Trigger on any change to
the listed JSON paths
* **text** (`list`) -- Trigger on any change to
the listed regular expressions
* **xml** (`list`) -- Trigger on any change to
the listed XPath expressions
Example:
.. literalinclude:: /../../tests/triggers/fixtures/pollurl001.yaml
"""
namespace = 'org.jenkinsci.plugins.urltrigger.'
valid_content_types = {
'simple': ['Simple', '', '', []],
'json': ['JSON', 'jsonPaths', 'jsonPath', None],
'text': ['TEXT', 'regExElements', 'regEx', None],
'xml': ['XML', 'xPaths', 'xPath', None]
}
urltrig = XML.SubElement(xml_parent,
namespace + 'URLTrigger')
node = data.get('polling-node')
XML.SubElement(urltrig, 'spec').text = data.get('cron', '')
XML.SubElement(urltrig, 'labelRestriction').text = str(bool(node)).lower()
if node:
XML.SubElement(urltrig, 'triggerLabel').text = node
entries = XML.SubElement(urltrig, 'entries')
urls = data.get('urls', [])
if not urls:
raise JenkinsJobsException('At least one url must be provided')
for url in urls:
entry = XML.SubElement(entries, namespace + 'URLTriggerEntry')
XML.SubElement(entry, 'url').text = url['url']
XML.SubElement(entry, 'proxyActivated').text = \
str(url.get('proxy', False)).lower()
if 'username' in url:
XML.SubElement(entry, 'username').text = url['username']
if 'password' in url:
XML.SubElement(entry, 'password').text = url['password']
if 'check-status' in url:
XML.SubElement(entry, 'checkStatus').text = 'true'
XML.SubElement(entry, 'statusCode').text = \
str(url.get('check-status'))
else:
XML.SubElement(entry, 'checkStatus').text = 'false'
XML.SubElement(entry, 'statusCode').text = '200'
XML.SubElement(entry, 'timeout').text = \
str(url.get('timeout', 300))
XML.SubElement(entry, 'checkETag').text = \
str(url.get('check-etag', False)).lower()
XML.SubElement(entry, 'checkLastModificationDate').text = \
str(url.get('check-date', False)).lower()
check_content = url.get('check-content', [])
XML.SubElement(entry, 'inspectingContent').text = \
str(bool(check_content)).lower()
content_types = XML.SubElement(entry, 'contentTypes')
for entry in check_content:
type_name = next(iter(entry.keys()))
if type_name not in valid_content_types:
raise JenkinsJobsException('check-content must be one of : %s'
% ', '.join(valid_content_types.
keys()))
content_type = valid_content_types.get(type_name)
if entry[type_name]:
sub_entries = content_type[3]
if sub_entries is None:
sub_entries = entry[type_name]
build_content_type(content_types, sub_entries,
namespace + 'content', 'ContentType',
'ContentEntry', *content_type[0:3])
def timed(registry, xml_parent, data):
"""yaml: timed
Trigger builds at certain times.
:Parameter: when to run the job (cron syntax)
Example::
triggers:
- timed: "@midnight"
"""
scmtrig = XML.SubElement(xml_parent, 'hudson.triggers.TimerTrigger')
XML.SubElement(scmtrig, 'spec').text = data
def bitbucket(registry, xml_parent, data):
"""yaml: bitbucket
Trigger a job when bitbucket repository is pushed to.
Requires the Jenkins :jenkins-wiki:`BitBucket Plugin
<BitBucket+Plugin>`.
Example:
.. literalinclude:: /../../tests/triggers/fixtures/bitbucket.yaml
"""
bbtrig = XML.SubElement(xml_parent, 'com.cloudbees.jenkins.'
'plugins.BitBucketTrigger')
XML.SubElement(bbtrig, 'spec').text = ''
def github(registry, xml_parent, data):
"""yaml: github
Trigger a job when github repository is pushed to.
Requires the Jenkins :jenkins-wiki:`GitHub Plugin <GitHub+Plugin>`.
Example::
triggers:
- github
"""
ghtrig = XML.SubElement(xml_parent, 'com.cloudbees.jenkins.'
'GitHubPushTrigger')
XML.SubElement(ghtrig, 'spec').text = ''
def github_pull_request(registry, xml_parent, data):
"""yaml: github-pull-request
Build pull requests in github and report results.
Requires the Jenkins :jenkins-wiki:`GitHub Pull Request Builder Plugin
<GitHub+pull+request+builder+plugin>`.
:arg list admin-list: the users with admin rights (optional)
:arg list white-list: users whose pull requests build (optional)
:arg list org-list: orgs whose users should be white listed (optional)
:arg bool allow-whitelist-orgs-as-admins: members of white listed orgs
will have admin rights. (default false)
:arg string cron: cron syntax of when to run (optional)
:arg string trigger-phrase: when filled, commenting this phrase
in the pull request will trigger a build (optional)
:arg bool only-trigger-phrase: only commenting the trigger phrase
in the pull request will trigger a build (default false)
:arg bool github-hooks: use github hook (default false)
:arg bool permit-all: build every pull request automatically
without asking (default false)
:arg bool auto-close-on-fail: close failed pull request automatically
(default false)
:arg list white-list-target-branches: Adding branches to this whitelist
allows you to selectively test pull requests destined for these
branches only. Supports regular expressions (e.g. 'master',
'feature-.*'). (optional)
:arg string auth-id: the auth id to use (optional)
:arg string build-desc-template: the template for build descriptions in
jenkins (optional)
:arg string status-context: the context to include on PR status comments
(optional)
:arg string triggered-status: the status message to set when the build has
been triggered (optional)
:arg string started-status: the status comment to set when the build has
been started (optional)
:arg string status-url: the status URL to set (optional)
:arg bool status-add-test-results: add test result one-liner to status
message (optional)
:arg string success-status: the status message to set if the job succeeds
(optional)
:arg string failure-status: the status message to set if the job fails
(optional)
:arg string error-status: the status message to set if the job errors
(optional)
:arg string success-comment: comment to add to the PR on a successful job
(optional)
:arg string failure-comment: comment to add to the PR on a failed job
(optional)
:arg string error-comment: comment to add to the PR on an errored job
(optional)
:arg bool cancel-builds-on-update: cancel existing builds when a PR is
updated (optional)
Example:
.. literalinclude:: /../../tests/triggers/fixtures/github-pull-request.yaml
"""
ghprb = XML.SubElement(xml_parent, 'org.jenkinsci.plugins.ghprb.'
'GhprbTrigger')
XML.SubElement(ghprb, 'spec').text = data.get('cron', '')
admin_string = "\n".join(data.get('admin-list', []))
XML.SubElement(ghprb, 'adminlist').text = admin_string
XML.SubElement(ghprb, 'allowMembersOfWhitelistedOrgsAsAdmin').text = str(
data.get('allow-whitelist-orgs-as-admins', False)).lower()
white_string = "\n".join(data.get('white-list', []))
XML.SubElement(ghprb, 'whitelist').text = white_string
org_string = "\n".join(data.get('org-list', []))
XML.SubElement(ghprb, 'orgslist').text = org_string
XML.SubElement(ghprb, 'cron').text = data.get('cron', '')
build_desc_template = data.get('build-desc-template', '')
if build_desc_template:
XML.SubElement(ghprb, 'buildDescTemplate').text = str(
build_desc_template)
XML.SubElement(ghprb, 'triggerPhrase').text = \
data.get('trigger-phrase', '')
XML.SubElement(ghprb, 'onlyTriggerPhrase').text = str(
data.get('only-trigger-phrase', False)).lower()
XML.SubElement(ghprb, 'useGitHubHooks').text = str(
data.get('github-hooks', False)).lower()
XML.SubElement(ghprb, 'permitAll').text = str(
data.get('permit-all', False)).lower()
XML.SubElement(ghprb, 'autoCloseFailedPullRequests').text = str(
data.get('auto-close-on-fail', False)).lower()
white_list_target_branches = data.get('white-list-target-branches', [])
if white_list_target_branches:
ghprb_wltb = XML.SubElement(ghprb, 'whiteListTargetBranches')
for branch in white_list_target_branches:
be = XML.SubElement(ghprb_wltb, 'org.jenkinsci.plugins.'
'ghprb.GhprbBranch')
XML.SubElement(be, 'branch').text = str(branch)
auth_id = data.get('auth-id', '')
if auth_id:
XML.SubElement(ghprb, 'gitHubAuthId').text = str(auth_id)
# PR status update fields
status_context = data.get('status-context', '')
triggered_status = data.get('triggered-status', '')
started_status = data.get('started-status', '')
status_url = data.get('status-url', '')
status_add_test_results = data.get('status-add-test-results', '')
success_status = data.get('success-status', '')
failure_status = data.get('failure-status', '')
error_status = data.get('error-status', '')
# is status handling is required?
requires_status = (
status_context or
triggered_status or
started_status or
status_url or
status_add_test_results or
success_status or
failure_status or
error_status
)
# is status message handling required?
requires_status_message = (
success_status or
failure_status or
error_status
)
# is comment handling required?
success_comment = data.get('success-comment', '')
failure_comment = data.get('failure-comment', '')
error_comment = data.get('error-comment', '')
requires_job_comment = (
success_comment or
failure_comment or
error_comment
)
cancel_builds_on_update = data.get('cancel-builds-on-update', False)
# We want to have only one 'extensions' subelement, even if status
# handling, comment handling and other extensions are enabled.
if requires_status or requires_job_comment or cancel_builds_on_update:
extensions = XML.SubElement(ghprb, 'extensions')
# Both comment and status elements have this same type. Using a const is
# much easier to read than repeating the tokens for this class each time
# it's used
comment_type = 'org.jenkinsci.plugins.ghprb.extensions.comments.'
comment_type = comment_type + 'GhprbBuildResultMessage'
if requires_status:
simple_status = XML.SubElement(extensions,
'org.jenkinsci.plugins'
'.ghprb.extensions.status.'
'GhprbSimpleStatus')
if status_context:
XML.SubElement(simple_status, 'commitStatusContext').text = str(
status_context)
if triggered_status:
XML.SubElement(simple_status, 'triggeredStatus').text = str(
triggered_status)
if started_status:
XML.SubElement(simple_status, 'startedStatus').text = str(
started_status)
if status_url:
XML.SubElement(simple_status, 'statusUrl').text = str(
status_url)
if status_add_test_results:
XML.SubElement(simple_status, 'addTestResults').text = str(
status_add_test_results).lower()
if requires_status_message:
completed_elem = XML.SubElement(simple_status, 'completedStatus')
if success_status:
success_elem = XML.SubElement(completed_elem, comment_type)
XML.SubElement(success_elem, 'message').text = str(
success_status)
XML.SubElement(success_elem, 'result').text = 'SUCCESS'
if failure_status:
failure_elem = XML.SubElement(completed_elem, comment_type)
XML.SubElement(failure_elem, 'message').text = str(
failure_status)
XML.SubElement(failure_elem, 'result').text = 'FAILURE'
if error_status:
error_elem = XML.SubElement(completed_elem, comment_type)
XML.SubElement(error_elem, 'message').text = str(error_status)
XML.SubElement(error_elem, 'result').text = 'ERROR'
# job comment handling
if requires_job_comment:
build_status = XML.SubElement(extensions,
'org.jenkinsci.plugins.ghprb.extensions'
'.comments.'
'GhprbBuildStatus')
messages_elem = XML.SubElement(build_status, 'messages')
if success_comment:
success_comment_elem = XML.SubElement(messages_elem, comment_type)
XML.SubElement(success_comment_elem, 'message').text = str(
success_comment)
XML.SubElement(success_comment_elem, 'result').text = 'SUCCESS'
if failure_comment:
failure_comment_elem = XML.SubElement(messages_elem, comment_type)
XML.SubElement(failure_comment_elem, 'message').text = str(
failure_comment)
XML.SubElement(failure_comment_elem, 'result').text = 'FAILURE'
if error_comment:
error_comment_elem = XML.SubElement(messages_elem, comment_type)
XML.SubElement(error_comment_elem, 'message').text = str(
error_comment)
XML.SubElement(error_comment_elem, 'result').text = 'ERROR'
if cancel_builds_on_update:
XML.SubElement(extensions,
'org.jenkinsci.plugins.ghprb.extensions.'
'build.GhprbCancelBuildsOnUpdate')
def gitlab_merge_request(registry, xml_parent, data):
"""yaml: gitlab-merge-request
Build merge requests in gitlab and report results.
Requires the Jenkins :jenkins-wiki:`Gitlab MergeRequest Builder Plugin.
<Gitlab+Merge+Request+Builder+Plugin>`.
:arg string cron: cron syntax of when to run (required)
:arg string project-path: gitlab-relative path to project (required)
Example:
.. literalinclude:: \
/../../tests/triggers/fixtures/gitlab-merge-request.yaml
"""
ghprb = XML.SubElement(xml_parent, 'org.jenkinsci.plugins.gitlab.'
'GitlabBuildTrigger')
if not data.get('cron', None):
raise jenkins_jobs.errors.JenkinsJobsException(
'gitlab-merge-request is missing "cron"')
if not data.get('project-path', None):
raise jenkins_jobs.errors.JenkinsJobsException(
'gitlab-merge-request is missing "project-path"')
# Because of a design limitation in the GitlabBuildTrigger Jenkins plugin
# both 'spec' and '__cron' have to be set to the same value to have them
# take effect. Also, cron and projectPath are prefixed with underscores
# in the plugin, but spec is not.
XML.SubElement(ghprb, 'spec').text = data.get('cron')
XML.SubElement(ghprb, '__cron').text = data.get('cron')
XML.SubElement(ghprb, '__projectPath').text = data.get('project-path')
def gitlab(registry, xml_parent, data):
"""yaml: gitlab
Makes Jenkins act like a GitLab CI server.
Requires the Jenkins :jenkins-wiki:`GitLab Plugin
<GitLab+Plugin>`.
:arg bool trigger-push: Build on Push Events (default true)
:arg bool trigger-merge-request: Build on Merge Request Events (default
true)
:arg str trigger-open-merge-request-push: Rebuild open Merge Requests
on Push Events.
:trigger-open-merge-request-push values (< 1.1.26):
* **true** (default)
* **false**
:trigger-open-merge-request-push values (>= 1.1.26):
* **never** (default)
* **source**
* **both**
:arg bool trigger-note: Build when comment is added with defined phrase
(>= 1.2.4) (default true)
:arg str note-regex: Phrase that triggers the build (>= 1.2.4) (default
'Jenkins please retry a build')
:arg bool ci-skip: Enable skipping builds of commits that contain
[ci-skip] in the commit message (default true)
:arg bool wip-skip: Enable skipping builds of WIP Merge Requests (>= 1.2.4)
(default false)
:arg bool set-build-description: Set build description to build cause
(eg. Merge request or Git Push) (default true)
:arg bool add-note-merge-request: Add note with build status on
merge requests (default true)
:arg bool add-vote-merge-request: Vote added to note with build status
on merge requests (>= 1.1.27) (default true)
:arg bool accept-merge-request-on-success: Automatically accept the Merge
Request if the build is successful (>= 1.1.27) (default false)
:arg bool add-ci-message: Add CI build status (1.1.28 - 1.2.0) (default
false)
:arg bool allow-all-branches: Allow all branches (Ignoring Filtered
Branches) (< 1.1.29) (default false)
:arg str branch-filter-type: Filter branches that can trigger a build.
Valid values and their additional attributes are described in the
`branch filter type`_ table (>= 1.1.29) (default 'All').
:arg list include-branches: Defined list of branches to include
(default [])
:arg list exclude-branches: Defined list of branches to exclude
(default [])
:arg str target-branch-regex: Regular expression to select branches
.. _`branch filter type`:
================== ====================================================
Branch filter type Description
================== ====================================================
All All branches are allowed to trigger this job.
NameBasedFilter Filter branches by name.
List source branches that are allowed to trigger a
build from a Push event or a Merge Request event. If
both fields are left empty, all branches are allowed
to trigger this job. For Merge Request events only
the target branch name is filtered out by the
**include-branches** and **exclude-branches** lists.
RegexBasedFilter Filter branches by regex
The target branch regex allows to limit the
execution of this job to certain branches. Any
branch matching the specified pattern in
**target-branch-regex** triggers the job. No
filtering is performed if the field is left empty.
================== ====================================================
Example (version < 1.1.26):
.. literalinclude:: /../../tests/triggers/fixtures/gitlab001.yaml
:language: yaml
Minimal example (version >= 1.1.26):
.. literalinclude:: /../../tests/triggers/fixtures/gitlab005.yaml
:language: yaml
Full example (version >= 1.1.26):
.. literalinclude:: /../../tests/triggers/fixtures/gitlab004.yaml
:language: yaml
"""
def _add_xml(elem, name, value):
XML.SubElement(elem, name).text = value
gitlab = XML.SubElement(
xml_parent, 'com.dabsquared.gitlabjenkins.GitLabPushTrigger'
)
plugin_info = registry.get_plugin_info('GitLab Plugin')
plugin_ver = pkg_resources.parse_version(plugin_info.get('version', "0"))
valid_merge_request = ['never', 'source', 'both']
if plugin_ver >= pkg_resources.parse_version("1.1.26"):
mapping = [
('trigger-open-merge-request-push',
'triggerOpenMergeRequestOnPush', 'never', valid_merge_request)]
convert_mapping_to_xml(gitlab, data, mapping, fail_required=True)
else:
mapping = [
('trigger-open-merge-request-push',
'triggerOpenMergeRequestOnPush', True)]
convert_mapping_to_xml(gitlab, data, mapping, fail_required=True)
if plugin_ver < pkg_resources.parse_version('1.2.0'):
if data.get('branch-filter-type', '') == 'All':
data['branch-filter-type'] = ''
valid_filters = ['', 'NameBasedFilter', 'RegexBasedFilter']
mapping = [
('branch-filter-type', 'branchFilterName', '', valid_filters)]
convert_mapping_to_xml(gitlab, data, mapping, fail_required=True)
else:
valid_filters = ['All', 'NameBasedFilter', 'RegexBasedFilter']
mapping = [
('branch-filter-type', 'branchFilterType', 'All', valid_filters)]
convert_mapping_to_xml(gitlab, data, mapping, fail_required=True)
XML.SubElement(gitlab, 'spec').text = ''
mapping = [
('trigger-push', 'triggerOnPush', True),
('trigger-merge-request', 'triggerOnMergeRequest', True),
('trigger-note', 'triggerOnNoteRequest', True),
('note-regex', 'noteRegex', 'Jenkins please retry a build'),
('ci-skip', 'ciSkip', True),
('wip-skip', 'skipWorkInProgressMergeRequest', True),
('set-build-description', 'setBuildDescription', True),
('add-note-merge-request', 'addNoteOnMergeRequest', True),
('add-vote-merge-request', 'addVoteOnMergeRequest', True),
('accept-merge-request-on-success', 'acceptMergeRequestOnSuccess',
False),
('add-ci-message', 'addCiMessage', False),
('allow-all-branches', 'allowAllBranches', False),
('target-branch-regex', 'targetBranchRegex', '')
]
list_mapping = (
('include-branches', 'includeBranchesSpec', []),
('exclude-branches', 'excludeBranchesSpec', []),
)
convert_mapping_to_xml(gitlab, data, mapping, fail_required=True)
for yaml_name, xml_name, default_val in list_mapping:
value = ', '.join(data.get(yaml_name, default_val))
_add_xml(gitlab, xml_name, value)
def build_result(registry, xml_parent, data):
"""yaml: build-result
Configure jobB to monitor jobA build result. A build is scheduled if there
is a new build result that matches your criteria (unstable, failure, ...).
Requires the Jenkins :jenkins-wiki:`BuildResultTrigger Plugin
<BuildResultTrigger+Plugin>`.
:arg list groups: List groups of jobs and results to monitor for
:arg list jobs: The jobs to monitor (required)
:arg list results: Build results to monitor for (default success)
:arg bool combine: Combine all job information. A build will be
scheduled only if all conditions are met (default false)
:arg str cron: The cron syntax with which to poll the jobs for the
supplied result (default '')
Example::
triggers:
- build-result:
combine: true
cron: '* * * * *'
groups:
- jobs:
- foo
- example
results:
- unstable
- jobs:
- foo2
results:
- not-built
- aborted
"""
brt = XML.SubElement(xml_parent, 'org.jenkinsci.plugins.'
'buildresulttrigger.BuildResultTrigger')
XML.SubElement(brt, 'spec').text = data.get('cron', '')
XML.SubElement(brt, 'combinedJobs').text = str(
data.get('combine', False)).lower()
jobs_info = XML.SubElement(brt, 'jobsInfo')
result_dict = {'success': 'SUCCESS',
'unstable': 'UNSTABLE',
'failure': 'FAILURE',
'not-built': 'NOT_BUILT',
'aborted': 'ABORTED'}
for group in data['groups']:
brti = XML.SubElement(jobs_info, 'org.jenkinsci.plugins.'
'buildresulttrigger.model.'
'BuildResultTriggerInfo')
if not group.get('jobs', []):
raise jenkins_jobs.errors.\
JenkinsJobsException('Jobs is missing and a required'
' element')
jobs_string = ",".join(group['jobs'])
XML.SubElement(brti, 'jobNames').text = jobs_string
checked_results = XML.SubElement(brti, 'checkedResults')
for result in group.get('results', ['success']):
if result not in result_dict:
raise jenkins_jobs.errors.\
JenkinsJobsException('Result entered is not valid,'
' must be one of: '
+ ', '.join(result_dict.keys()))
model_checked = XML.SubElement(checked_results, 'org.jenkinsci.'
'plugins.buildresulttrigger.model.'
'CheckedResult')
XML.SubElement(model_checked, 'checked').text = result_dict[result]
def reverse(registry, xml_parent, data):
"""yaml: reverse
This trigger can be configured in the UI using the checkbox with the
following text: 'Build after other projects are built'.
Set up a trigger so that when some other projects finish building, a new
build is scheduled for this project. This is convenient for running an
extensive test after a build is complete, for example.
This configuration complements the "Build other projects" section in the
"Post-build Actions" of an upstream project, but is preferable when you
want to configure the downstream project.
:arg str jobs: List of jobs to watch. Can be either a comma separated
list or a list.
:arg str result: Build results to monitor for between the following
options: success, unstable and failure. (default 'success').
Example:
.. literalinclude:: /../../tests/triggers/fixtures/reverse.yaml
Example List:
.. literalinclude:: /../../tests/triggers/fixtures/reverse-list.yaml
"""
reserveBuildTrigger = XML.SubElement(
xml_parent, 'jenkins.triggers.ReverseBuildTrigger')
supported_thresholds = ['SUCCESS', 'UNSTABLE', 'FAILURE']
XML.SubElement(reserveBuildTrigger, 'spec').text = ''
jobs = data.get('jobs')
if isinstance(jobs, list):
jobs = ",".join(jobs)
XML.SubElement(reserveBuildTrigger, 'upstreamProjects').text = \
jobs
threshold = XML.SubElement(reserveBuildTrigger, 'threshold')
result = str(data.get('result', 'success')).upper()
if result not in supported_thresholds:
raise jenkins_jobs.errors.JenkinsJobsException(
"Choice should be one of the following options: %s." %
", ".join(supported_thresholds))
XML.SubElement(threshold, 'name').text = \
hudson_model.THRESHOLDS[result]['name']
XML.SubElement(threshold, 'ordinal').text = \
hudson_model.THRESHOLDS[result]['ordinal']
XML.SubElement(threshold, 'color').text = \
hudson_model.THRESHOLDS[result]['color']
XML.SubElement(threshold, 'completeBuild').text = \
str(hudson_model.THRESHOLDS[result]['complete']).lower()
def monitor_folders(registry, xml_parent, data):
"""yaml: monitor-folders
Configure Jenkins to monitor folders.
Requires the Jenkins :jenkins-wiki:`Filesystem Trigger Plugin
<FSTrigger+Plugin>`.
:arg str path: Folder path to poll. (default '')
:arg list includes: Fileset includes setting that specifies the list of
includes files. Basedir of the fileset is relative to the workspace
root. If no value is set, all files are used. (default '')
:arg str excludes: The 'excludes' pattern. A file that matches this mask
will not be polled even if it matches the mask specified in 'includes'
section. (default '')
:arg bool check-modification-date: Check last modification date.
(default true)
:arg bool check-content: Check content. (default true)
:arg bool check-fewer: Check fewer files (default true)
:arg str cron: cron syntax of when to run (default '')
Full Example:
.. literalinclude::
/../../tests/triggers/fixtures/monitor-folders-full.yaml
:language: yaml
Minimal Example:
.. literalinclude::
/../../tests/triggers/fixtures/monitor-folders-minimal.yaml
:language: yaml
"""
ft = XML.SubElement(xml_parent, ('org.jenkinsci.plugins.fstrigger.'
'triggers.FolderContentTrigger'))
ft.set('plugin', 'fstrigger')
mappings = [
('path', 'path', ''),
('cron', 'spec', ''),
]
convert_mapping_to_xml(ft, data, mappings, fail_required=True)
includes = data.get('includes', '')
XML.SubElement(ft, 'includes').text = ",".join(includes)
XML.SubElement(ft, 'excludes').text = data.get('excludes', '')
XML.SubElement(ft, 'excludeCheckLastModificationDate').text = str(
not data.get('check-modification-date', True)).lower()
XML.SubElement(ft, 'excludeCheckContent').text = str(
not data.get('check-content', True)).lower()
XML.SubElement(ft, 'excludeCheckFewerOrMoreFiles').text = str(
not data.get('check-fewer', True)).lower()
def monitor_files(registry, xml_parent, data):
"""yaml: monitor-files
Configure Jenkins to monitor files.
Requires the Jenkins :jenkins-wiki:`Filesystem Trigger Plugin
<FSTrigger+Plugin>`.
:arg list files: List of files to monitor
:File:
* **path** (`str`) -- File path to monitor. You can use a pattern
that specifies a set of files if you dont know the real file
path. (required)
* **strategy** (`str`) -- Choose your strategy if there is more
than one matching file. Can be one of Ignore file ('IGNORE') or
Use the most recent ('LATEST'). (default 'LATEST')
* **check-content** (`list`) -- List of content changes of the
file to monitor
:Content Nature:
* **simple** (`bool`) -- Trigger on change in content of
the specified file (whatever the type file).
(default false)
* **jar** (`bool`) -- Trigger on change in content of the
specified JAR file. (default false)
* **tar** (`bool`) -- Trigger on change in content of the
specified Tar file. (default false)
* **zip** (`bool`) -- Trigger on change in content of the
specified ZIP file. (default false)
* **source-manifest** (`list`) -- Trigger on change to
MANIFEST files.
:MANIFEST File:
* **keys** (`list`) -- List of keys to inspect.
(optional)
* **all-keys** (`bool`) -- If true, take into
account all keys. (default true)
* **jar-manifest** (`list`) -- Trigger on change to
MANIFEST files (contained in jar files).
:MANIFEST File:
* **keys** (`list`) -- List of keys to inspect.
(optional)
* **all-keys** (`bool`) -- If true, take into
account all keys. (default true)
* **properties** (`list`) -- Monitor the contents of the
properties file.
:Properties File:
* **keys** (`list`) -- List of keys to inspect.
(optional)
* **all-keys** (`bool`) -- If true, take into
account all keys. (default true)
* **xml** (`list str`) -- Trigger on change to the listed
XPath expressions.
* **text** (`list str`) -- Trigger on change to the listed
regular expressions.
* **ignore-modificaton-date** (`bool`) -- If true, ignore the file
modification date. Only valid when content changes of the file
are being monitored. (default true)
:arg str cron: cron syntax of when to run (default '')
Example:
.. literalinclude:: /../../tests/triggers/fixtures/monitor-files001.yaml
:language: yaml
"""
ft_prefix = 'org.jenkinsci.plugins.fstrigger.triggers.'
valid_strategies = ['LATEST', 'IGNORE']
valid_content_types = {
'simple': ['Simple', '', '', []],
'jar': ['JAR', '', '', []],
'tar': ['Tar', '', '', []],
'zip': ['ZIP', '', '', []],
'source-manifest': ['SourceManifest'],
'jar-manifest': ['JARManifest'],
'properties': ['Properties'],
'xml': ['XML', 'expressions', 'expression', None],
'text': ['Text', 'regexElements', 'regex', None]
}
ft = XML.SubElement(xml_parent, ft_prefix + 'FileNameTrigger')
XML.SubElement(ft, 'spec').text = str(data.get('cron', ''))
files = data.get('files', [])
if not files:
raise JenkinsJobsException('At least one file must be provided')
files_tag = XML.SubElement(ft, 'fileInfo')
for file_info in files:
file_tag = XML.SubElement(files_tag, ft_prefix + 'FileNameTriggerInfo')
try:
XML.SubElement(file_tag,
'filePathPattern').text = file_info['path']
except KeyError:
raise MissingAttributeError('path')
strategy = file_info.get('strategy', 'LATEST')
if strategy not in valid_strategies:
raise InvalidAttributeError('strategy', strategy, valid_strategies)
XML.SubElement(file_tag, 'strategy').text = strategy
check_content = file_info.get('check-content', [])
XML.SubElement(file_tag, 'inspectingContentFile').text = str(
bool(check_content)).lower()
base_content_tag = XML.SubElement(file_tag, 'contentFileTypes')
for content in check_content:
type_name = next(iter(content.keys()))
if type_name not in valid_content_types:
raise InvalidAttributeError('check-content', type_name,
valid_content_types.keys())
content_type = valid_content_types.get(type_name)
if len(content_type) == 1:
class_name = '{0}filecontent.{1}FileContent'.format(
ft_prefix, content_type[0])
content_data = content.get(type_name)
if not content_data:
raise JenkinsJobsException("Need to specify something "
"under " + type_name)
for entry in content_data:
content_tag = XML.SubElement(base_content_tag, class_name)
keys = entry.get('keys', [])
if keys:
XML.SubElement(content_tag, 'keys2Inspect'
).text = ",".join(keys)
XML.SubElement(content_tag, 'allKeys').text = str(
entry.get('all-keys', True)).lower()
else:
if content[type_name]:
sub_entries = content_type[3]
if sub_entries is None:
sub_entries = content[type_name]
build_content_type(base_content_tag, sub_entries,
ft_prefix + 'filecontent',
'FileContent', 'FileContentEntry',
*content_type[0:3])
if bool(check_content):
XML.SubElement(file_tag,
'doNotCheckLastModificationDate').text = str(
file_info.get('ignore-modificaton-date', True)).lower()
def ivy(registry, xml_parent, data):
"""yaml: ivy
Poll with an Ivy script
Requires the Jenkins :jenkins-wiki:`IvyTrigger Plugin
<IvyTrigger+Plugin>`.
:arg str path: Path of the ivy file. (optional)
:arg str settings-path: Ivy Settings Path. (optional)
:arg list str properties-file: List of properties file path. Properties
will be injected as variables in the ivy settings file. (optional)
:arg str properties-content: Properties content. Properties will be
injected as variables in the ivy settings file. (optional)
:arg bool debug: Active debug mode on artifacts resolution. (default false)
:arg download-artifacts: Download artifacts for dependencies to see if they
have changed. (default true)
:arg bool enable-concurrent: Enable Concurrent Build. (default false)
:arg str label: Restrict where the polling should run. (default '')
:arg str cron: cron syntax of when to run (default '')
Example:
.. literalinclude:: /../../tests/triggers/fixtures/ivy.yaml
"""
it = XML.SubElement(xml_parent,
'org.jenkinsci.plugins.ivytrigger.IvyTrigger')
mappings = [('path', 'ivyPath', None),
('settings-path', 'ivySettingsPath', None),
('properties-file', 'propertiesFilePath', None),
('properties-content', 'propertiesContent', None),
('debug', 'debug', False),
('download-artifacts', 'downloadArtifacts', True),
('enable-concurrent', 'enableConcurrentBuild', False),
('cron', 'spec', '')]
for prop in mappings:
opt, xmlopt, default_val = prop[:3]
val = data.get(opt, default_val)
if val is not None:
if type(val) == bool:
val = str(val).lower()
if type(val) == list:
val = ";".join(val)
XML.SubElement(it, xmlopt).text = val
label = data.get('label')
XML.SubElement(it, 'labelRestriction').text = str(bool(label)).lower()
if label:
XML.SubElement(it, 'triggerLabel').text = label
def script(registry, xml_parent, data):
"""yaml: script
Triggers the job using shell or batch script.
Requires the Jenkins :jenkins-wiki:`ScriptTrigger Plugin
<ScriptTrigger+Plugin>`.
:arg str label: Restrict where the polling should run. (default '')
:arg str script: A shell or batch script. (default '')
:arg str script-file-path: A shell or batch script path. (default '')
:arg str cron: cron syntax of when to run (default '')
:arg bool enable-concurrent: Enables triggering concurrent builds.
(default false)
:arg int exit-code: If the exit code of the script execution returns this
expected exit code, a build is scheduled. (default 0)
Full Example:
.. literalinclude:: /../../tests/triggers/fixtures/script-full.yaml
:language: yaml
Minimal Example:
.. literalinclude:: /../../tests/triggers/fixtures/script-minimal.yaml
:language: yaml
"""
st = XML.SubElement(
xml_parent,
'org.jenkinsci.plugins.scripttrigger.ScriptTrigger'
)
st.set('plugin', 'scripttrigger')
label = data.get('label')
mappings = [
('script', 'script', ''),
('script-file-path', 'scriptFilePath', ''),
('cron', 'spec', ''),
('enable-concurrent', 'enableConcurrentBuild', False),
('exit-code', 'exitCode', 0)
]
convert_mapping_to_xml(st, data, mappings, fail_required=True)
XML.SubElement(st, 'labelRestriction').text = str(bool(label)).lower()
if label:
XML.SubElement(st, 'triggerLabel').text = label
def groovy_script(registry, xml_parent, data):
"""yaml: groovy-script
Triggers the job using a groovy script.
Requires the Jenkins :jenkins-wiki:`ScriptTrigger Plugin
<ScriptTrigger+Plugin>`.
:arg bool system-script: If true, run the groovy script as a system script,
the script will have access to the same variables as the Groovy Console.
If false, run the groovy script on the executor node, the script will not
have access to the hudson or job model. (default false)
:arg str script: Content of the groovy script. If the script result is
evaluated to true, a build is scheduled. (default '')
:arg str script-file-path: Groovy script path. (default '')
:arg str property-file-path: Property file path. All properties will be set
as parameters for the triggered build. (default '')
:arg bool enable-concurrent: Enable concurrent build. (default false)
:arg str label: Restrict where the polling should run. (default '')
:arg str cron: cron syntax of when to run (default '')
Full Example:
.. literalinclude:: /../../tests/triggers/fixtures/groovy-script-full.yaml
:language: yaml
Minimal Example:
.. literalinclude::
/../../tests/triggers/fixtures/groovy-script-minimal.yaml
:language: yaml
"""
gst = XML.SubElement(
xml_parent,
'org.jenkinsci.plugins.scripttrigger.groovy.GroovyScriptTrigger'
)
gst.set('plugin', 'scripttrigger')
mappings = [
('system-script', 'groovySystemScript', False),
('script', 'groovyExpression', ''),
('script-file-path', 'groovyFilePath', ''),
('property-file-path', 'propertiesFilePath', ''),
('enable-concurrent', 'enableConcurrentBuild', False),
('cron', 'spec', ''),
]
convert_mapping_to_xml(gst, data, mappings, fail_required=True)
label = data.get('label')
XML.SubElement(gst, 'labelRestriction').text = str(bool(label)).lower()
if label:
XML.SubElement(gst, 'triggerLabel').text = label
def rabbitmq(registry, xml_parent, data):
"""yaml: rabbitmq
This plugin triggers build using remote build message in RabbitMQ queue.
Requires the Jenkins :jenkins-wiki:`RabbitMQ Build Trigger Plugin
<RabbitMQ+Build+Trigger+Plugin>`.
:arg str token: the build token expected in the message queue (required)
Example:
.. literalinclude:: /../../tests/triggers/fixtures/rabbitmq.yaml
:language: yaml
"""
rabbitmq = XML.SubElement(
xml_parent,
'org.jenkinsci.plugins.rabbitmqbuildtrigger.'
'RemoteBuildTrigger')
XML.SubElement(rabbitmq, 'spec').text = ''
try:
XML.SubElement(rabbitmq, 'remoteBuildToken').text = str(
data.get('token'))
except KeyError as e:
raise MissingAttributeError(e.args[0])
def parameterized_timer(parser, xml_parent, data):
"""yaml: parameterized-timer
Trigger builds with parameters at certain times.
Requires the Jenkins :jenkins-wiki:`Parameterized Scheduler Plugin
<Parameterized+Scheduler+Plugin>`.
:arg str cron: cron syntax of when to run and with which parameters
(required)
Example:
.. literalinclude::
/../../tests/triggers/fixtures/parameterized-timer001.yaml
:language: yaml
"""
param_timer = XML.SubElement(
xml_parent,
'org.jenkinsci.plugins.parameterizedscheduler.'
'ParameterizedTimerTrigger')
XML.SubElement(param_timer, 'spec').text = ''
try:
XML.SubElement(param_timer, 'parameterizedSpecification').text = str(
data.get('cron'))
except KeyError as e:
raise MissingAttributeError(e)
class Triggers(jenkins_jobs.modules.base.Base):
sequence = 50
component_type = 'trigger'
component_list_type = 'triggers'
def gen_xml(self, xml_parent, data):
triggers = data.get('triggers', [])
if not triggers:
return
trig_e = XML.SubElement(xml_parent, 'triggers', {'class': 'vector'})
for trigger in triggers:
self.registry.dispatch('trigger', trig_e, trigger) | zerotk.jenkins-job-builder | /zerotk.jenkins-job-builder-2.0.0.0b2.tar.gz/zerotk.jenkins-job-builder-2.0.0.0b2/jenkins_jobs/modules/triggers.py | triggers.py |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import xml.etree.ElementTree as XML
import jenkins_jobs.modules.base
"""
The view list module handles creating Jenkins List views.
To create a list view specify ``list`` in the ``view-type`` attribute
to the :ref:`View-list` definition.
:View Parameters:
* **name** (`str`): The name of the view.
* **view-type** (`str`): The type of view.
* **description** (`str`): A description of the view. (optional)
* **filter-executors** (`bool`): Show only executors that can
execute the included views. (default false)
* **filter-queue** (`bool`): Show only included jobs in builder
queue. (default false)
* **job-name** (`list`): List of jobs to be included.
* **columns** (`list`): List of columns to be shown in view.
* **regex** (`str`): . Regular expression for selecting jobs
(optional)
* **recurse** (`bool`): Recurse in subfolders.(default false)
* **status-filter** (`bool`): Filter job list by enabled/disabled
status. (optional)
"""
COLUMN_DICT = {
'status': 'hudson.views.StatusColumn',
'weather': 'hudson.views.WeatherColumn',
'job': 'hudson.views.JobColumn',
'last-success': 'hudson.views.LastSuccessColumn',
'last-failure': 'hudson.views.LastFailureColumn',
'last-duration': 'hudson.views.LastDurationColumn',
'build-button': 'hudson.views.BuildButtonColumn',
'last-stable': 'hudson.views.LastStableColumn',
'configure-project': dict(
tag='jenkins.plugins.extracolumns.ConfigureProjectColumn',
attrib=dict(plugin='[email protected]'),
),
'last-build-console': dict(
tag='jenkins.plugins.extracolumns.LastBuildConsoleColumn',
attrib=dict(plugin='[email protected]'),
),
'job-name-color': dict(
tag='com.robestone.hudson.compactcolumns.JobNameColorColumn',
attrib=dict(plugin='[email protected]'),
elements=dict(colorblindHint='nohint', showColor='false', showDescription='false', showLastBuild='false')
),
'test-result': dict(
tag='jenkins.plugins.extracolumns.TestResultColumn',
attrib=dict(plugin='[email protected]'),
elements=dict(testResultFormat='1')
),
'coverage': dict(
tag='hudson.plugins.cobertura.CoverageColumn',
attrib=dict(plugin='[email protected]'),
elements=dict(type='both')
),
'all-statuses': dict(
tag='com.robestone.hudson.compactcolumns.AllStatusesColumn',
attrib=dict(plugin='[email protected]'),
elements=dict(colorblindHint='nohint', timeAgoTypeString='DIFF', onlyShowLastStatus='false', hideDays=0)
),
}
class List(jenkins_jobs.modules.base.Base):
sequence = 0
def root_xml(self, data):
root = XML.Element('hudson.model.ListView')
XML.SubElement(root, 'name').text = data['name']
desc_text = data.get('description', None)
if desc_text is not None:
XML.SubElement(root, 'description').text = desc_text
filterExecutors = data.get('filter-executors', False)
FE_element = XML.SubElement(root, 'filterExecutors')
FE_element.text = 'true' if filterExecutors else 'false'
filterQueue = data.get('filter-queue', False)
FQ_element = XML.SubElement(root, 'filterQueue')
FQ_element.text = 'true' if filterQueue else 'false'
XML.SubElement(root, 'properties',
{'class': 'hudson.model.View$PropertyList'})
jn_xml = XML.SubElement(root, 'jobNames')
jobnames = data.get('job-name', None)
XML.SubElement(jn_xml, 'comparator', {'class':
'hudson.util.CaseInsensitiveComparator'})
if jobnames is not None:
for jobname in jobnames:
XML.SubElement(jn_xml, 'string').text = str(jobname)
XML.SubElement(root, 'jobFilters')
c_xml = XML.SubElement(root, 'columns')
columns = data.get('columns', [])
for column in columns:
if column in COLUMN_DICT:
column_tag, column_attrib, column_elements = COLUMN_DICT[column], {}, {}
if isinstance(column_tag, dict):
column_attrib = column_tag.get('attrib', {})
column_elements = column_tag.get('elements', {})
column_tag = column_tag['tag']
column_xml = XML.SubElement(c_xml, column_tag, attrib=column_attrib)
for i_sub_tag_name, i_sub_tag_value in column_elements.items():
XML.SubElement(column_xml, i_sub_tag_name).text = str(i_sub_tag_value)
regex = data.get('regex', None)
if regex is not None:
XML.SubElement(root, 'includeRegex').text = regex
recurse = data.get('recurse', False)
R_element = XML.SubElement(root, 'recurse')
R_element.text = 'true' if recurse else 'false'
statusfilter = data.get('status-filter', None)
if statusfilter is not None:
SF_element = XML.SubElement(root, 'statusFilter')
SF_element.text = 'true' if statusfilter else 'false'
return root | zerotk.jenkins-job-builder | /zerotk.jenkins-job-builder-2.0.0.0b2.tar.gz/zerotk.jenkins-job-builder-2.0.0.0b2/jenkins_jobs/modules/view_list.py | view_list.py |
import logging
import xml.etree.ElementTree as XML
from jenkins_jobs.errors import InvalidAttributeError
from jenkins_jobs.errors import JenkinsJobsException
from jenkins_jobs.errors import MissingAttributeError
import jenkins_jobs.modules.base
from jenkins_jobs.modules.helpers import convert_mapping_to_xml
def git(registry, xml_parent, data):
"""yaml: git
Specifies the git SCM repository for this job.
Requires the Jenkins :jenkins-wiki:`Git Plugin <Git+Plugin>`.
:arg str url: URL of the git repository
:arg str credentials-id: ID of credential to use to connect, which is the
last field (a 32-digit hexadecimal code) of the path of URL visible
after you clicked the credential under Jenkins Global credentials.
(optional)
:arg str refspec: refspec to fetch (default
'+refs/heads/\*:refs/remotes/remoteName/\*')
:arg str name: name to fetch (default 'origin')
:arg list(str) remotes: list of remotes to set up (optional, only needed if
multiple remotes need to be set up)
:Remote:
* **url** (`string`) - url of remote repo
* **refspec** (`string`) - refspec to fetch (optional)
* **credentials-id** - ID of credential to use to connect, which
is the last field of the path of URL (a 32-digit hexadecimal
code) visible after you clicked credential under Jenkins Global
credentials. (optional)
:arg list(str) branches: list of branch specifiers to build (default '**')
:arg bool skip-tag: Skip tagging (default true)
.. deprecated:: 1.6.0. Please use per-build-tag extension, which has
the inverse meaning.
:arg bool clean: Clean after checkout (default false)
.. deprecated:: 1.1.1. Please use clean extension format.
:arg bool fastpoll: Use fast remote polling (default false)
:arg bool disable-submodules: Disable submodules (default false)
.. deprecated:: 1.1.1. Please use submodule extension.
:arg bool recursive-submodules: Recursively update submodules (default
false)
.. deprecated:: 1.1.1. Please use submodule extension.
:arg str git-tool: The name of the Git installation to use (default
'Default')
:arg str reference-repo: Path of the reference repo to use during clone
(optional)
:arg str browser: what repository browser to use.
:browsers supported:
* **auto** - (default)
* **assemblaweb** - https://www.assembla.com/home
* **bitbucketweb** - https://bitbucket.org/
* **cgit** - https://git.zx2c4.com/cgit/about/
* **fisheye** - https://www.atlassian.com/software/fisheye
* **gitblit** - http://gitblit.com/
* **githubweb** - https://github.com/
* **gitiles** - https://code.google.com/p/gitiles/
* **gitlab** - https://about.gitlab.com/
* **gitlist** - http://gitlist.org/
* **gitoriousweb** - https://gitorious.org/
* **gitweb** - https://git-scm.com/docs/gitweb
* **kiln** - https://www.fogcreek.com/kiln/
* **microsoft\-tfs\-2013** - |tfs_2013|
* **phabricator** - http://phabricator.org/
* **redmineweb** - http://www.redmine.org/
* **rhodecode** - https://rhodecode.com/
* **stash** - https://www.atlassian.com/software/bitbucket/server
* **viewgit** - http://viewgit.fealdia.org/
:arg str browser-url: url for the repository browser (required if browser
is not 'auto', no default)
:arg str browser-version: version of the repository browser (GitLab only,
default '0.0')
:arg str project-name: project name in Gitblit and ViewGit repobrowser
(optional)
:arg str repo-name: repository name in phabricator repobrowser (optional)
:arg str git-config-name: Configure name for Git clone (optional)
:arg str git-config-email: Configure email for Git clone (optional)
:extensions:
* **basedir** (`string`) - Location relative to the workspace root to
clone to (default workspace)
* **changelog-against** (`dict`)
* **remote** (`string`) - name of repo that contains branch to
create changelog against (default 'origin')
* **branch** (`string`) - name of the branch to create changelog
against (default 'master')
* **choosing-strategy**: (`string`) - Jenkins class for selecting what
to build. Can be one of `default`,`inverse`, or `gerrit`
(default 'default')
* **clean** (`dict`)
* **after** (`bool`) - Clean the workspace after checkout
* **before** (`bool`) - Clean the workspace before checkout
* **excluded-users**: (`list(string)`) - list of users to ignore
revisions from when polling for changes.
(if polling is enabled, optional)
* **included-regions**: (`list(string)`) - list of file/folders to
include (optional)
* **excluded-regions**: (`list(string)`) - list of file/folders to
exclude (optional)
* **ignore-commits-with-messages** (`list(str)`) - Revisions committed
with messages matching these patterns will be ignored. (optional)
* **ignore-notify**: (`bool`) - Ignore notifyCommit URL accesses
(default false)
* **force-polling-using-workspace** (`bool`) - Force polling using
workspace (default false)
* **local-branch** (`string`) - Checkout/merge to local branch
(optional)
* **merge** (`dict`)
* **remote** (`string`) - name of repo that contains branch to
merge to (default 'origin')
* **branch** (`string`) - name of the branch to merge to
* **strategy** (`string`) - merge strategy. Can be one of
'default', 'resolve', 'recursive', 'octopus', 'ours',
'subtree'. (default 'default')
* **fast-forward-mode** (`string`) - merge fast-forward mode.
Can be one of 'FF', 'FF_ONLY' or 'NO_FF'. (default 'FF')
* **per-build-tag** (`bool`) - Create a tag in the workspace for every
build. (default is inverse of skip-tag if set, otherwise false)
* **prune** (`bool`) - Prune remote branches (default false)
* **scm-name** (`string`) - The unique scm name for this Git SCM
(optional)
* **shallow-clone** (`bool`) - Perform shallow clone (default false)
* **sparse-checkout** (`dict`)
* **paths** (`list`) - List of paths to sparse checkout. (optional)
* **submodule** (`dict`)
* **disable** (`bool`) - By disabling support for submodules you
can still keep using basic git plugin functionality and just have
Jenkins to ignore submodules completely as if they didn't exist.
* **recursive** (`bool`) - Retrieve all submodules recursively
(uses '--recursive' option which requires git>=1.6.5)
* **tracking** (`bool`) - Retrieve the tip of the configured
branch in .gitmodules (Uses '\-\-remote' option which requires
git>=1.8.2)
* **reference-repo** (`str`) - Path of the reference repo to use
during clone (optional)
* **timeout** (`int`) - Specify a timeout (in minutes) for
submodules operations (default 10).
* **timeout** (`str`) - Timeout for git commands in minutes (optional)
* **use-author** (`bool`): Use author rather than committer in Jenkin's
build changeset (default false)
* **wipe-workspace** (`bool`) - Wipe out workspace before build
(default true)
Example:
.. literalinclude:: /../../tests/scm/fixtures/git001.yaml
.. |tfs_2013| replace::
https://www.visualstudio.com/en-us/products/tfs-overview-vs.aspx
"""
logger = logging.getLogger("%s:git" % __name__)
# XXX somebody should write the docs for those with option name =
# None so we have a sensible name/key for it.
mapping = [
# option, xml name, default value (text), attributes (hard coded)
("disable-submodules", 'disableSubmodules', False),
("recursive-submodules", 'recursiveSubmodules', False),
(None, 'doGenerateSubmoduleConfigurations', False),
# XXX is this the same as force-polling-using-workspace?
("fastpoll", 'remotePoll', False),
# XXX does this option still exist?
("git-tool", 'gitTool', "Default"),
(None, 'submoduleCfg', '', {'class': 'list'}),
('reference-repo', 'reference', ''),
("git-config-name", 'gitConfigName', ''),
("git-config-email", 'gitConfigEmail', ''),
]
choosing_strategies = {
'default': 'hudson.plugins.git.util.DefaultBuildChooser',
'gerrit': ('com.sonyericsson.hudson.plugins.'
'gerrit.trigger.hudsontrigger.GerritTriggerBuildChooser'),
'inverse': 'hudson.plugins.git.util.InverseBuildChooser',
}
scm = XML.SubElement(xml_parent,
'scm', {'class': 'hudson.plugins.git.GitSCM'})
XML.SubElement(scm, 'configVersion').text = '2'
user = XML.SubElement(scm, 'userRemoteConfigs')
if 'remotes' not in data:
data['remotes'] = [{data.get('name', 'origin'): data.copy()}]
for remoteData in data['remotes']:
huser = XML.SubElement(user, 'hudson.plugins.git.UserRemoteConfig')
remoteName = next(iter(remoteData.keys()))
XML.SubElement(huser, 'name').text = remoteName
remoteParams = next(iter(remoteData.values()))
if 'refspec' in remoteParams:
refspec = remoteParams['refspec']
else:
refspec = '+refs/heads/*:refs/remotes/' + remoteName + '/*'
XML.SubElement(huser, 'refspec').text = refspec
if 'url' in remoteParams:
remoteURL = remoteParams['url']
else:
raise JenkinsJobsException('Must specify a url for git remote \"' +
remoteName + '"')
XML.SubElement(huser, 'url').text = remoteURL
if 'credentials-id' in remoteParams:
credentialsId = remoteParams['credentials-id']
XML.SubElement(huser, 'credentialsId').text = credentialsId
xml_branches = XML.SubElement(scm, 'branches')
branches = data.get('branches', ['**'])
for branch in branches:
bspec = XML.SubElement(xml_branches, 'hudson.plugins.git.BranchSpec')
XML.SubElement(bspec, 'name').text = branch
for elem in mapping:
(optname, xmlname, val) = elem[:3]
# Throw warning for deprecated settings and skip if the 'submodule' key
# is available.
submodule_cfgs = ['disable-submodules', 'recursive-submodules']
if optname in submodule_cfgs:
if optname in data:
logger.warning(
"'{0}' is deprecated, please convert to use the "
"'submodule' section instead as support for this "
"top level option will be removed in a future "
"release.".format(optname))
if 'submodule' in data:
continue
attrs = {}
if len(elem) >= 4:
attrs = elem[3]
xe = XML.SubElement(scm, xmlname, attrs)
if optname and optname in data:
val = data[optname]
if type(val) == bool:
xe.text = str(val).lower()
else:
xe.text = val
exts_node = XML.SubElement(scm, 'extensions')
impl_prefix = 'hudson.plugins.git.extensions.impl.'
if 'basedir' in data:
ext = XML.SubElement(exts_node,
impl_prefix + 'RelativeTargetDirectory')
XML.SubElement(ext, 'relativeTargetDir').text = data['basedir']
if 'changelog-against' in data:
ext_name = impl_prefix + 'ChangelogToBranch'
ext = XML.SubElement(exts_node, ext_name)
opts = XML.SubElement(ext, 'options')
change_remote = data['changelog-against'].get('remote', 'origin')
change_branch = data['changelog-against'].get('branch', 'master')
XML.SubElement(opts, 'compareRemote').text = change_remote
XML.SubElement(opts, 'compareTarget').text = change_branch
if 'choosing-strategy' in data:
try:
choosing_strategy = choosing_strategies[
data.get('choosing-strategy')]
except KeyError:
raise ValueError('Invalid choosing-strategy %r' %
data.get('choosing-strategy'))
ext = XML.SubElement(exts_node, impl_prefix + 'BuildChooserSetting')
XML.SubElement(ext, 'buildChooser', {'class': choosing_strategy})
if 'clean' in data:
# Keep support for old format 'clean' configuration by checking
# if 'clean' is boolean. Else we're using the new extensions style.
if isinstance(data['clean'], bool):
clean_after = data['clean']
clean_before = False
logger.warning(
"'clean: bool' configuration format is deprecated, "
"please use the extension style format to configure "
"this option.")
else:
clean_after = data['clean'].get('after', False)
clean_before = data['clean'].get('before', False)
if clean_after:
ext_name = impl_prefix + 'CleanCheckout'
ext = XML.SubElement(exts_node, ext_name)
if clean_before:
ext_name = impl_prefix + 'CleanBeforeCheckout'
ext = XML.SubElement(exts_node, ext_name)
if 'excluded-users' in data:
excluded_users = '\n'.join(data['excluded-users'])
ext = XML.SubElement(exts_node, impl_prefix + 'UserExclusion')
XML.SubElement(ext, 'excludedUsers').text = excluded_users
if 'included-regions' in data or 'excluded-regions' in data:
ext = XML.SubElement(exts_node,
'hudson.plugins.git.extensions.impl.'
'PathRestriction')
if 'included-regions' in data:
include_string = '\n'.join(data['included-regions'])
XML.SubElement(ext, 'includedRegions').text = include_string
if 'excluded-regions' in data:
exclude_string = '\n'.join(data['excluded-regions'])
XML.SubElement(ext, 'excludedRegions').text = exclude_string
if 'ignore-commits-with-messages' in data:
for msg in data['ignore-commits-with-messages']:
ext_name = impl_prefix + 'MessageExclusion'
ext = XML.SubElement(exts_node, ext_name)
XML.SubElement(ext, 'excludedMessage').text = msg
if 'local-branch' in data:
ext = XML.SubElement(exts_node, impl_prefix + 'LocalBranch')
XML.SubElement(ext, 'localBranch').text = str(data['local-branch'])
if 'merge' in data:
merge = data['merge']
merge_strategies = ['default', 'resolve', 'recursive', 'octopus',
'ours', 'subtree']
fast_forward_modes = ['FF', 'FF_ONLY', 'NO_FF']
name = merge.get('remote', 'origin')
branch = merge['branch']
ext = XML.SubElement(exts_node, impl_prefix + 'PreBuildMerge')
merge_opts = XML.SubElement(ext, 'options')
XML.SubElement(merge_opts, 'mergeRemote').text = name
XML.SubElement(merge_opts, 'mergeTarget').text = branch
strategy = merge.get('strategy', 'default')
if strategy not in merge_strategies:
raise InvalidAttributeError('strategy', strategy, merge_strategies)
XML.SubElement(merge_opts, 'mergeStrategy').text = strategy
fast_forward_mode = merge.get('fast-forward-mode', 'FF')
if fast_forward_mode not in fast_forward_modes:
raise InvalidAttributeError('fast-forward-mode', fast_forward_mode,
fast_forward_modes)
XML.SubElement(merge_opts, 'fastForwardMode').text = fast_forward_mode
if 'scm-name' in data:
ext = XML.SubElement(exts_node, impl_prefix + 'ScmName')
XML.SubElement(ext, 'name').text = str(data['scm-name'])
if 'shallow-clone' in data or 'timeout' in data:
clo = XML.SubElement(exts_node, impl_prefix + 'CloneOption')
XML.SubElement(clo, 'shallow').text = str(
data.get('shallow-clone', False)).lower()
if 'timeout' in data:
XML.SubElement(clo, 'timeout').text = str(data['timeout'])
if 'sparse-checkout' in data:
ext_name = impl_prefix + 'SparseCheckoutPaths'
ext = XML.SubElement(exts_node, ext_name)
sparse_co = XML.SubElement(ext, 'sparseCheckoutPaths')
sparse_paths = data['sparse-checkout'].get('paths')
if sparse_paths is not None:
path_tagname = impl_prefix + 'SparseCheckoutPath'
for path in sparse_paths:
path_tag = XML.SubElement(sparse_co, path_tagname)
XML.SubElement(path_tag, 'path').text = path
if 'submodule' in data:
ext_name = impl_prefix + 'SubmoduleOption'
ext = XML.SubElement(exts_node, ext_name)
XML.SubElement(ext, 'disableSubmodules').text = str(
data['submodule'].get('disable', False)).lower()
XML.SubElement(ext, 'recursiveSubmodules').text = str(
data['submodule'].get('recursive', False)).lower()
XML.SubElement(ext, 'trackingSubmodules').text = str(
data['submodule'].get('tracking', False)).lower()
XML.SubElement(ext, 'reference').text = str(
data['submodule'].get('reference-repo', ''))
XML.SubElement(ext, 'timeout').text = str(
data['submodule'].get('timeout', 10))
if 'timeout' in data:
co = XML.SubElement(exts_node, impl_prefix + 'CheckoutOption')
XML.SubElement(co, 'timeout').text = str(data['timeout'])
polling_using_workspace = str(data.get('force-polling-using-workspace',
False)).lower()
if polling_using_workspace == 'true':
ext_name = impl_prefix + 'DisableRemotePoll'
ext = XML.SubElement(exts_node, ext_name)
if 'per-build-tag' in data or 'skip-tag' in data:
# We want to support both skip-tag (the old option) and per-build-tag
# (the new option), with the new one overriding the old one.
# Unfortunately they have inverse meanings, so we have to be careful.
# The default value of per-build-tag is False if skip-tag is not set,
# so we set the default value of skip-tag to True.
per_build_tag_default = False
if str(data.get('skip-tag', True)).lower == 'false':
per_build_tag_default = True
if str(data.get('per-build-tag',
per_build_tag_default)).lower() == 'true':
XML.SubElement(exts_node, impl_prefix + 'PerBuildTag')
prune = str(data.get('prune', False)).lower()
if prune == 'true':
XML.SubElement(exts_node, impl_prefix + 'PruneStaleBranch')
ignore_notify_commits = str(data.get('ignore-notify', False)).lower()
if ignore_notify_commits == 'true':
XML.SubElement(exts_node, impl_prefix + 'IgnoreNotifyCommit')
# By default we wipe the workspace
wipe_workspace = str(data.get('wipe-workspace', True)).lower()
if wipe_workspace == 'true':
ext_name = impl_prefix + 'WipeWorkspace'
ext = XML.SubElement(exts_node, ext_name)
use_author = str(data.get('use-author', False)).lower()
if use_author == 'true':
XML.SubElement(exts_node, impl_prefix + 'AuthorInChangelog')
browser = data.get('browser', 'auto')
browserdict = {'auto': 'auto',
'assemblaweb': 'AssemblaWeb',
'bitbucketweb': 'BitbucketWeb',
'cgit': 'CGit',
'fisheye': 'FisheyeGitRepositoryBrowser',
'gitblit': 'GitBlitRepositoryBrowser',
'githubweb': 'GithubWeb',
'gitiles': 'Gitiles',
'gitlab': 'GitLab',
'gitlist': 'GitList',
'gitoriousweb': 'GitoriousWeb',
'gitweb': 'GitWeb',
'kiln': 'KilnGit',
'microsoft-tfs-2013': 'TFS2013GitRepositoryBrowser',
'phabricator': 'Phabricator',
'redmineweb': 'RedmineWeb',
'rhodecode': 'RhodeCode',
'stash': 'Stash',
'viewgit': 'ViewGitWeb'}
if browser not in browserdict:
valid = sorted(browserdict.keys())
raise JenkinsJobsException("Browser entered is not valid must be one "
"of: %s or %s." % (", ".join(valid[:-1]),
valid[-1]))
if browser != 'auto':
bc = XML.SubElement(scm, 'browser', {'class':
'hudson.plugins.git.browser.' +
browserdict[browser]})
XML.SubElement(bc, 'url').text = data['browser-url']
if browser in ['gitblit', 'viewgit']:
XML.SubElement(bc, 'projectName').text = str(
data.get('project-name', ''))
if browser == 'gitlab':
XML.SubElement(bc, 'version').text = str(
data.get('browser-version', '0.0'))
if browser == 'phabricator':
XML.SubElement(bc, 'repo').text = str(
data.get('repo-name', ''))
def cvs(registry, xml_parent, data):
"""yaml: cvs
Specifies the CVS SCM repository for this job.
Requires the Jenkins :jenkins-wiki:`CVS Plugin <CVS+Plugin>`.
:arg list repos: List of CVS repositories. (required)
:Repos:
* **root** (`str`) -- The CVS connection string Jenkins uses to
connect to the server. The format is :protocol:user@host:path
(required)
* **locations** (`list`) -- List of locations. (required)
:Locations:
* **type** (`str`) -- Type of location.
:supported values:
* **HEAD** - (default)
* **BRANCH**
* **TAG**
* **name** (`str`) -- Name of location. Only valid in case
of 'BRANCH' or 'TAG' location type. (default '')
* **use-head** (`bool`) -- Use Head if not found. Only
valid in case of 'BRANCH' or 'TAG' location type.
(default false)
* **modules** (`list`) -- List of modules. (required)
:Modules:
* **remote** -- The name of the module in the
repository at CVSROOT. (required)
* **local-name** -- The name to be applied to
this module in the local workspace. If blank,
the remote module name will be used.
(default '')
* **excluded-regions** (`list str`) -- Patterns for excluding
regions. (optional)
* **compression-level** (`int`) -- Compression level. Must be a
number between -1 and 9 inclusive. Choose -1 for System Default.
(default -1)
:arg bool use-update: If true, Jenkins will use 'cvs update' whenever
possible for builds. This makes a build faster. But this also causes the
artifacts from the previous build to remain in the file system when a
new build starts, making it not a true clean build. (default true)
:arg bool prune-empty: Remove empty directories after checkout using the
CVS '-P' option. (default true)
:arg bool skip-changelog: Prevent the changelog being generated after
checkout has completed. (default false)
:arg bool show-all-output: Instructs CVS to show all logging output. CVS
normally runs in quiet mode but this option disables that.
(default false)
:arg bool clean-checkout: Perform clean checkout on failed update.
(default false)
:arg bool clean-copy: Force clean copy for locally modified files.
(default false)
Example
.. literalinclude:: /../../tests/scm/fixtures/cvs001.yaml
:language: yaml
.. literalinclude:: /../../tests/scm/fixtures/cvs002.yaml
:language: yaml
"""
prefix = 'hudson.scm.'
valid_loc_types = {'HEAD': 'Head', 'TAG': 'Tag', 'BRANCH': 'Branch'}
cvs = XML.SubElement(xml_parent, 'scm', {'class': prefix + 'CVSSCM'})
repos = data.get('repos')
if not repos:
raise JenkinsJobsException("'repos' empty or missing")
repos_tag = XML.SubElement(cvs, 'repositories')
for repo in repos:
repo_tag = XML.SubElement(repos_tag, prefix + 'CvsRepository')
try:
XML.SubElement(repo_tag, 'cvsRoot').text = repo['root']
except KeyError:
raise MissingAttributeError('root')
items_tag = XML.SubElement(repo_tag, 'repositoryItems')
locations = repo.get('locations')
if not locations:
raise JenkinsJobsException("'locations' empty or missing")
for location in locations:
item_tag = XML.SubElement(items_tag, prefix + 'CvsRepositoryItem')
loc_type = location.get('type', 'HEAD')
if loc_type not in valid_loc_types:
raise InvalidAttributeError('type', loc_type, valid_loc_types)
loc_class = ('{0}CvsRepositoryLocation${1}Repository'
'Location').format(prefix, valid_loc_types[loc_type])
loc_tag = XML.SubElement(item_tag, 'location',
{'class': loc_class})
XML.SubElement(loc_tag, 'locationType').text = loc_type
if loc_type == 'TAG' or loc_type == 'BRANCH':
XML.SubElement(loc_tag, 'locationName').text = location.get(
'name', '')
XML.SubElement(loc_tag, 'useHeadIfNotFound').text = str(
location.get('use-head', False)).lower()
modules = location.get('modules')
if not modules:
raise JenkinsJobsException("'modules' empty or missing")
modules_tag = XML.SubElement(item_tag, 'modules')
for module in modules:
module_tag = XML.SubElement(modules_tag, prefix + 'CvsModule')
try:
XML.SubElement(module_tag, 'remoteName'
).text = module['remote']
except KeyError:
raise MissingAttributeError('remote')
XML.SubElement(module_tag, 'localName').text = module.get(
'local-name', '')
excluded = repo.get('excluded-regions', [])
excluded_tag = XML.SubElement(repo_tag, 'excludedRegions')
for pattern in excluded:
pattern_tag = XML.SubElement(excluded_tag,
prefix + 'ExcludedRegion')
XML.SubElement(pattern_tag, 'pattern').text = pattern
compression_level = repo.get('compression-level', '-1')
if int(compression_level) not in range(-1, 10):
raise InvalidAttributeError('compression-level',
compression_level, range(-1, 10))
XML.SubElement(repo_tag, 'compressionLevel').text = compression_level
mappings = [
('use-update', 'canUseUpdate', True),
('prune-empty', 'pruneEmptyDirectories', True),
('skip-changelog', 'skipChangeLog', False),
('show-all-output', 'disableCvsQuiet', False),
('clean-checkout', 'cleanOnFailedUpdate', False),
('clean-copy', 'forceCleanCopy', False)]
convert_mapping_to_xml(cvs, data, mappings, fail_required=True)
def repo(registry, xml_parent, data):
"""yaml: repo
Specifies the repo SCM repository for this job.
Requires the Jenkins :jenkins-wiki:`Repo Plugin <Repo+Plugin>`.
:arg str manifest-url: URL of the repo manifest (required)
:arg str manifest-branch: The branch of the manifest to use (optional)
:arg str manifest-file: Initial manifest file to use when initialising
(optional)
:arg str manifest-group: Only retrieve those projects in the manifest
tagged with the provided group name (optional)
:arg list(str) ignore-projects: a list of projects in which changes would
not be considered to trigger a build when pooling (optional)
:arg str destination-dir: Location relative to the workspace root to clone
under (optional)
:arg str repo-url: custom url to retrieve the repo application (optional)
:arg str mirror-dir: Path to mirror directory to reference when
initialising (optional)
:arg int jobs: Number of projects to fetch simultaneously (default 0)
:arg int depth: Specify the depth in history to sync from the source. The
default is to sync all of the history. Use 1 to just sync the most
recent commit (default 0)
:arg bool current-branch: Fetch only the current branch from the server
(default true)
:arg bool reset-first: Remove any commits that are not on the repositories
by running the following command before anything else (default false):
``repo forall -c "git reset --hard"``
:arg bool quiet: Make repo more quiet
(default true)
:arg bool force-sync: Continue sync even if a project fails to sync
(default false)
:arg bool no-tags: Don't fetch tags (default false)
:arg bool trace: Trace git command execution into the build logs. (default
false)
:arg bool show-all-changes: When this is checked --first-parent is no
longer passed to git log when determining changesets (default false)
:arg str local-manifest: Contents of .repo/local_manifest.xml, written
prior to calling sync (optional)
Example:
.. literalinclude:: /../../tests/scm/fixtures/repo001.yaml
"""
scm = XML.SubElement(xml_parent,
'scm', {'class': 'hudson.plugins.repo.RepoScm'})
mapping = [
# option, xml name, default value
('manifest-url', 'manifestRepositoryUrl', None),
('jobs', 'jobs', 0),
('depth', 'depth', 0),
('current-branch', 'currentBranch', True),
('reset-first', 'resetFirst', False),
('quiet', 'quiet', True),
('force-sync', 'forceSync', False),
('no-tags', 'noTags', False),
('trace', 'trace', False),
('show-all-changes', 'showAllChanges', False),
]
convert_mapping_to_xml(scm, data, mapping, fail_required=True)
optional_mapping = [
# option, xml name, default value
('manifest-branch', 'manifestBranch', None),
('manifest-file', 'manifestFile', None),
('manifest-group', 'manifestGroup', None),
('destination-dir', 'destinationDir', None),
('repo-url', 'repoUrl', None),
('mirror-dir', 'mirrorDir', None),
('local-manifest', 'localManifest', None),
]
convert_mapping_to_xml(scm, data, optional_mapping, fail_required=False)
# ignore-projects does not follow the same pattern of the other parameters,
# so process it here:
ip = XML.SubElement(scm, 'ignoreProjects', {'class': 'linked-hash-set'})
ignored_projects = data.get('ignore-projects', [''])
for ignored_project in ignored_projects:
XML.SubElement(ip, 'string').text = str(ignored_project)
def store(registry, xml_parent, data):
"""yaml: store
Specifies the Visualworks Smalltalk Store repository for this job.
Requires the Jenkins :jenkins-wiki:`Visualworks Smalltalk Store Plugin
<Visualworks+Smalltalk+Store+Plugin>`.
:arg str script: name of the Store script to run
:arg str repository: name of the Store repository
:arg str version-regex: regular expression that specifies which pundle
versions should be considered (optional)
:arg str minimum-blessing: minimum blessing level to consider (optional)
:arg str parcel-builder-file: name of the file to generate as input to
a later parcel building step (optional - if not specified, then no
parcel builder file will be generated)
:arg list pundles:
:(package or bundle): (`dict`): A package or bundle to check
Example:
.. literalinclude:: /../../tests/scm/fixtures/store001.yaml
"""
namespace = 'org.jenkinsci.plugins.visualworks_store'
scm = XML.SubElement(xml_parent, 'scm',
{'class': '{0}.StoreSCM'.format(namespace)})
if 'script' in data:
XML.SubElement(scm, 'scriptName').text = data['script']
else:
raise JenkinsJobsException("Must specify a script name")
if 'repository' in data:
XML.SubElement(scm, 'repositoryName').text = data['repository']
else:
raise JenkinsJobsException("Must specify a repository name")
pundle_specs = data.get('pundles', [])
if not pundle_specs:
raise JenkinsJobsException("At least one pundle must be specified")
valid_pundle_types = ['package', 'bundle']
pundles = XML.SubElement(scm, 'pundles')
for pundle_spec in pundle_specs:
pundle = XML.SubElement(pundles, '{0}.PundleSpec'.format(namespace))
pundle_type = next(iter(pundle_spec))
pundle_name = pundle_spec[pundle_type]
if pundle_type not in valid_pundle_types:
raise JenkinsJobsException(
'pundle type must be must be one of: '
+ ', '.join(valid_pundle_types))
else:
XML.SubElement(pundle, 'name').text = pundle_name
XML.SubElement(pundle, 'pundleType').text = pundle_type.upper()
if 'version-regex' in data:
XML.SubElement(scm, 'versionRegex').text = data['version-regex']
if 'minimum-blessing' in data:
XML.SubElement(scm, 'minimumBlessingLevel').text = \
data['minimum-blessing']
if 'parcel-builder-file' in data:
XML.SubElement(scm, 'generateParcelBuilderInputFile').text = 'true'
XML.SubElement(scm, 'parcelBuilderInputFilename').text = \
data['parcel-builder-file']
else:
XML.SubElement(scm, 'generateParcelBuilderInputFile').text = 'false'
def svn(registry, xml_parent, data):
"""yaml: svn
Specifies the svn SCM repository for this job.
:arg str url: URL of the svn repository
:arg str basedir: location relative to the workspace root to checkout to
(default '.')
:arg str credentials-id: optional argument to specify the ID of credentials
to use
:arg str repo-depth: Repository depth. Can be one of 'infinity', 'empty',
'files', 'immediates' or 'unknown'. (default 'infinity')
:arg bool ignore-externals: Ignore Externals. (default false)
:arg str workspaceupdater: optional argument to specify
:arg str workspaceupdater: optional argument to specify how to update the
workspace (default wipeworkspace)
:supported values:
* **wipeworkspace** - deletes the workspace before checking out
* **revertupdate** - do an svn revert then an svn update
* **emulateclean** - delete unversioned/ignored files then update
* **update** - do an svn update as much as possible
:arg list(str) excluded-users: list of users to ignore revisions from
when polling for changes (if polling is enabled; parameter is optional)
:arg list(str) included-regions: list of file/folders to include
(optional)
:arg list(str) excluded-regions: list of file/folders to exclude (optional)
:arg list(str) excluded-commit-messages: list of commit messages to exclude
(optional)
:arg str exclusion-revprop-name: revision svn-property to ignore (optional)
:arg bool ignore-property-changes-on-directories: ignore svn-property only
changes of directories (default false)
:arg bool filter-changelog: If set Jenkins will apply the same inclusion
and exclusion patterns for displaying changelog entries as it does for
polling for changes (default false)
:arg list repos: list of repositories to checkout (optional)
:arg str viewvc-url: URL of the svn web interface (optional)
:Repo:
* **url** (`str`) -- URL for the repository
* **basedir** (`str`) -- Location relative to the workspace root
to checkout to (default '.')
* **credentials-id** - optional ID of credentials to use
* **repo-depth** - Repository depth. Can be one of 'infinity',
'empty', 'files', 'immediates' or 'unknown'. (default 'infinity')
* **ignore-externals** - Ignore Externals. (default false)
Multiple repos example:
.. literalinclude:: /../../tests/scm/fixtures/svn-multiple-repos-001.yaml
Advanced commit filtering example:
.. literalinclude:: /../../tests/scm/fixtures/svn-regions-001.yaml
"""
scm = XML.SubElement(xml_parent, 'scm', {'class':
'hudson.scm.SubversionSCM'})
if 'viewvc-url' in data:
browser = XML.SubElement(
scm, 'browser', {'class': 'hudson.scm.browsers.ViewSVN'})
XML.SubElement(browser, 'url').text = data['viewvc-url']
locations = XML.SubElement(scm, 'locations')
def populate_repo_xml(parent, data):
module = XML.SubElement(parent,
'hudson.scm.SubversionSCM_-ModuleLocation')
XML.SubElement(module, 'remote').text = data['url']
XML.SubElement(module, 'local').text = data.get('basedir', '.')
if 'credentials-id' in data:
XML.SubElement(module, 'credentialsId').text = data[
'credentials-id']
repo_depths = ['infinity', 'empty', 'files', 'immediates', 'unknown']
repo_depth = data.get('repo-depth', 'infinity')
if repo_depth not in repo_depths:
raise InvalidAttributeError('repo_depth', repo_depth, repo_depths)
XML.SubElement(module, 'depthOption').text = repo_depth
XML.SubElement(module, 'ignoreExternalsOption').text = str(
data.get('ignore-externals', False)).lower()
if 'repos' in data:
repos = data['repos']
for repo in repos:
populate_repo_xml(locations, repo)
elif 'url' in data:
populate_repo_xml(locations, data)
else:
raise JenkinsJobsException("A top level url or repos list must exist")
updater = data.get('workspaceupdater', 'wipeworkspace')
if updater == 'wipeworkspace':
updaterclass = 'CheckoutUpdater'
elif updater == 'revertupdate':
updaterclass = 'UpdateWithRevertUpdater'
elif updater == 'emulateclean':
updaterclass = 'UpdateWithCleanUpdater'
elif updater == 'update':
updaterclass = 'UpdateUpdater'
XML.SubElement(scm, 'workspaceUpdater', {'class':
'hudson.scm.subversion.' + updaterclass})
mapping = [
# option, xml name, default value
("excluded-regions", 'excludedRegions', []),
("included-regions", 'includedRegions', []),
("excluded-users", 'excludedUsers', []),
("exclusion-revprop-name", 'excludedRevprop', ''),
("excluded-commit-messages", 'excludedCommitMessages', []),
("ignore-property-changes-on-directories", 'ignoreDirPropChanges',
False),
("filter-changelog", 'filterChangelog', False),
]
for optname, xmlname, defvalue in mapping:
if isinstance(defvalue, list):
val = '\n'.join(data.get(optname, defvalue))
else:
val = data.get(optname, defvalue)
# Skip adding xml entry if default is empty and no value given
if not val and (defvalue in ['', []]):
continue
xe = XML.SubElement(scm, xmlname)
if isinstance(defvalue, bool):
xe.text = str(val).lower()
else:
xe.text = str(val)
def tfs(registry, xml_parent, data):
"""yaml: tfs
Specifies the Team Foundation Server repository for this job.
Requires the Jenkins :jenkins-wiki:`Team Foundation Server Plugin
<Team+Foundation+Server+Plugin>`.
**NOTE**: TFS Password must be entered manually on the project if a
user name is specified. The password will be overwritten with an empty
value every time the job is rebuilt with Jenkins Job Builder.
:arg str server-url: The name or URL of the team foundation server.
If the server has been registered on the machine then it is only
necessary to enter the name.
:arg str project-path: The name of the project as it is registered on the
server.
:arg str login: The user name that is registered on the server. The user
name must contain the name and the domain name. Entered as
domain\\\\user or user\@domain (optional).
**NOTE**: You must enter in at least two slashes for the
domain\\\\user format in JJB YAML. It will be rendered normally.
:arg str use-update: If true, Hudson will not delete the workspace at end
of each build. This causes the artifacts from the previous build to
remain when a new build starts. (default true)
:arg str local-path: The folder where all files will be retrieved into.
The folder name is a relative path, under the workspace of the current
job. (default .)
:arg str workspace: The name of the workspace under which the source
should be retrieved. This workspace is created at the start of a
download, and deleted at the end. You can normally omit the property
unless you want to name a workspace to avoid conflicts on the server
(i.e. when you have multiple projects on one server talking to a
Team Foundation Server). (default Hudson-${JOB_NAME}-${NODE_NAME})
The TFS plugin supports the following macros that are replaced in the
workspace name:
* ${JOB_NAME} - The name of the job.
* ${USER_NAME} - The user name that the Hudson server or slave is
running as.
* ${NODE_NAME} - The name of the node/slave that the plugin currently
is executed on. Note that this is not the hostname, this value is
the Hudson configured name of the slave/node.
* ${ENV} - The environment variable that is set on the master or slave.
:arg dict web-access: Adds links in "changes" views within Jenkins to an
external system for browsing the details of those changes. The "Auto"
selection attempts to infer the repository browser from other jobs,
if supported by the SCM and a job with matching SCM details can be
found. (optional, default Auto).
:web-access value:
* **web-url** -- Enter the URL to the TSWA server. The plugin will
strip the last path (if any) of the URL when building URLs for
change set pages and other pages. (optional, default
uses server-url)
Examples:
.. literalinclude:: /../../tests/scm/fixtures/tfs-001.yaml
.. literalinclude:: /../../tests/scm/fixtures/tfs-002.yaml
"""
tfs = XML.SubElement(xml_parent, 'scm',
{'class': 'hudson.plugins.tfs.'
'TeamFoundationServerScm'})
XML.SubElement(tfs, 'serverUrl').text = str(
data.get('server-url', ''))
XML.SubElement(tfs, 'projectPath').text = str(
data.get('project-path', ''))
XML.SubElement(tfs, 'localPath').text = str(
data.get('local-path', '.'))
XML.SubElement(tfs, 'workspaceName').text = str(
data.get('workspace', 'Hudson-${JOB_NAME}-${NODE_NAME}'))
# TODO: In the future, it would be nice to have a place that can pull
# passwords into JJB without having to commit them in plaintext. This
# could also integrate nicely with global configuration options.
XML.SubElement(tfs, 'userPassword')
XML.SubElement(tfs, 'userName').text = str(
data.get('login', ''))
XML.SubElement(tfs, 'useUpdate').text = str(
data.get('use-update', True))
store = data.get('web-access', None)
if 'web-access' in data and isinstance(store, list):
web = XML.SubElement(tfs, 'repositoryBrowser',
{'class': 'hudson.plugins.tfs.browsers.'
'TeamSystemWebAccessBrowser'})
XML.SubElement(web, 'url').text = str(store[0].get('web-url', None))
elif 'web-access' in data and store is None:
XML.SubElement(tfs, 'repositoryBrowser', {'class': 'hudson.'
'plugins.tfs.browsers.'
'TeamSystemWebAccess'
'Browser'})
def workspace(registry, xml_parent, data):
"""yaml: workspace
Specifies the cloned workspace for this job to use as a SCM source.
Requires the Jenkins :jenkins-wiki:`Clone Workspace SCM Plugin
<Clone+Workspace+SCM+Plugin>`.
The job the workspace is cloned from must be configured with an
clone-workspace publisher
:arg str parent-job: The name of the parent job to clone the
workspace from.
:arg str criteria: Set the criteria to determine what build of the parent
project to use. Can be one of 'Any', 'Not Failed' or 'Successful'.
(default Any)
Example:
.. literalinclude:: /../../tests/scm/fixtures/workspace001.yaml
"""
workspace = XML.SubElement(xml_parent, 'scm', {'class': 'hudson.plugins.'
'cloneworkspace.CloneWorkspaceSCM'})
XML.SubElement(workspace, 'parentJobName').text = str(
data.get('parent-job', ''))
criteria_list = ['Any', 'Not Failed', 'Successful']
criteria = data.get('criteria', 'Any').title()
if 'criteria' in data and criteria not in criteria_list:
raise JenkinsJobsException(
'clone-workspace criteria must be one of: '
+ ', '.join(criteria_list))
else:
XML.SubElement(workspace, 'criteria').text = criteria
def hg(self, xml_parent, data):
"""yaml: hg
Specifies the mercurial SCM repository for this job.
Requires the Jenkins :jenkins-wiki:`Mercurial Plugin <Mercurial+Plugin>`.
:arg str url: URL of the hg repository
:arg str credentials-id: ID of credentials to use to connect (optional)
:arg str revision-type: revision type to use (default 'branch')
:arg str revision: the branch or tag name you would like to track
(default 'default')
:arg list(str) modules: reduce unnecessary builds by specifying a list of
"modules" within the repository. A module is a directory name within
the repository that this project lives in. (default '')
:arg bool clean: wipe any local modifications or untracked files in the
repository checkout (default false)
:arg str subdir: check out the Mercurial repository into this
subdirectory of the job's workspace (optional)
:arg bool disable-changelog: do not calculate the Mercurial changelog
for each build (default false)
:arg str browser: what repository browser to use
:browsers supported:
* **auto** - (default)
* **bitbucketweb** - https://bitbucket.org/
* **fisheye** - https://www.atlassian.com/software/fisheye
* **googlecode** - https://code.google.com/
* **hgweb** - https://www.selenic.com/hg/help/hgweb
* **kilnhg** - https://www.fogcreek.com/kiln/
* **rhodecode** - https://rhodecode.com/ (versions >= 1.2)
* **rhodecode-pre-1.2.0** - https://rhodecode.com/ (versions < 1.2)
:arg str browser-url: url for the repository browser
(required if browser is set)
Example:
.. literalinclude:: ../../tests/scm/fixtures/hg02.yaml
"""
scm = XML.SubElement(xml_parent, 'scm', {'class':
'hudson.plugins.mercurial.MercurialSCM'})
if 'url' in data:
XML.SubElement(scm, 'source').text = data['url']
else:
raise JenkinsJobsException("A top level url must exist")
if 'credentials-id' in data:
XML.SubElement(scm, 'credentialsId').text = data['credentials-id']
revision_type_dict = {
'branch': 'BRANCH',
'tag': 'TAG',
}
try:
revision_type = revision_type_dict[data.get('revision-type', 'branch')]
except KeyError:
raise JenkinsJobsException('Invalid revision-type %r' %
data.get('revision-type'))
XML.SubElement(scm, 'revisionType').text = revision_type
XML.SubElement(scm, 'revision').text = data.get('revision', 'default')
if 'subdir' in data:
XML.SubElement(scm, 'subdir').text = data['subdir']
xc = XML.SubElement(scm, 'clean')
xc.text = str(data.get('clean', False)).lower()
modules = data.get('modules', '')
if isinstance(modules, list):
modules = " ".join(modules)
XML.SubElement(scm, 'modules').text = modules
xd = XML.SubElement(scm, 'disableChangeLog')
xd.text = str(data.get('disable-changelog', False)).lower()
browser = data.get('browser', 'auto')
browserdict = {
'auto': '',
'bitbucket': 'BitBucket',
'fisheye': 'FishEye',
'googlecode': 'GoogleCode',
'hgweb': 'HgWeb',
'kilnhg': 'KilnHG',
'rhodecode': 'RhodeCode',
'rhodecode-pre-1.2.0': 'RhodeCodeLegacy'
}
if browser not in browserdict:
raise JenkinsJobsException("Browser entered is not valid must be one "
"of: %s" % ", ".join(browserdict.keys()))
if browser != 'auto':
bc = XML.SubElement(scm, 'browser',
{'class': 'hudson.plugins.mercurial.browser.' +
browserdict[browser]})
if 'browser-url' in data:
XML.SubElement(bc, 'url').text = data['browser-url']
else:
raise JenkinsJobsException("A browser-url must be specified along "
"with browser.")
def openshift_img_streams(registry, xml_parent, data):
"""yaml: openshift-img-streams
Rather than a Build step extension plugin, this is an extension of the
Jenkins SCM plugin, where this baked-in polling mechanism provided by
Jenkins is leveraged by exposing some of the common semantics between
OpenShift ImageStreams (which are abstractions of Docker repositories)
and SCMs - versions / commit IDs of related artifacts
(images vs. programmatics files)
Requires the Jenkins :jenkins-wiki:`OpenShift
Pipeline Plugin <OpenShift+Pipeline+Plugin>`._
:arg str image-stream-name: The name of the ImageStream is what shows up
in the NAME column if you dump all the ImageStream's with the
`oc get is` command invocation. (default nodejs-010-centos7)
:arg str tag: The specific image tag within the ImageStream to monitor.
(default latest)
:arg str api-url: This would be the value you specify if you leverage the
--server option on the OpenShift `oc` command.
(default \https://openshift.default.svc.cluster.local\)
:arg str namespace: The value here should be whatever was the output
form `oc project` when you created the BuildConfig you want to run
a Build on. (default test)
:arg str auth-token: The value here is what you supply with the --token
option when invoking the OpenShift `oc` command. (default '')
:arg bool verbose: This flag is the toggle for
turning on or off detailed logging in this plug-in. (default false)
Full Example:
.. literalinclude::
../../tests/scm/fixtures/openshift-img-streams001.yaml
:language: yaml
Minimal Example:
.. literalinclude::
../../tests/scm/fixtures/openshift-img-streams002.yaml
:language: yaml
"""
scm = XML.SubElement(xml_parent,
'scm', {'class':
'com.openshift.jenkins.plugins.pipeline.'
'OpenShiftImageStreams'})
mapping = [
# option, xml name, default value
("image-stream-name", 'imageStreamName', 'nodejs-010-centos7'),
("tag", 'tag', 'latest'),
("api-url", 'apiURL', 'https://openshift.default.svc.cluster.local'),
("namespace", 'namespace', 'test'),
("auth-token", 'authToken', ''),
("verbose", 'verbose', False),
]
convert_mapping_to_xml(scm, data, mapping, fail_required=True)
def bzr(registry, xml_parent, data):
"""yaml: bzr
Specifies the bzr SCM repository for this job.
Requires the Jenkins :jenkins-wiki:`Bazaar Plugin <Bazaar+Plugin>`.
:arg str url: URL of the bzr branch (required)
:arg bool clean-tree: Clean up the workspace (using bzr) before pulling
the branch (default false)
:arg bool lightweight-checkout: Use a lightweight checkout instead of a
full branch (default false)
:arg str browser: The repository browser to use.
:browsers supported:
* **auto** - (default)
* **loggerhead** - as used by Launchpad
* **opengrok** - https://opengrok.github.io/OpenGrok/
:arg str browser-url:
URL for the repository browser (required if browser is set).
:arg str opengrok-root-module:
Root module for OpenGrok (required if browser is opengrok).
Example:
.. literalinclude:: /../../tests/scm/fixtures/bzr001.yaml
:language: yaml
"""
mapping = [
# option, xml name, default value (text), attributes (hard coded)
('url', 'source', None),
('clean-tree', 'cleantree', False),
('lightweight-checkout', 'checkout', False),
]
scm_element = XML.SubElement(
xml_parent, 'scm', {'class': 'hudson.plugins.bazaar.BazaarSCM'})
convert_mapping_to_xml(scm_element, data, mapping, fail_required=True)
browser_name_to_class = {
'loggerhead': 'Loggerhead',
'opengrok': 'OpenGrok',
}
browser = data.get('browser', 'auto')
if browser == 'auto':
return
if browser not in browser_name_to_class:
raise InvalidAttributeError('browser', browser,
browser_name_to_class.keys())
browser_element = XML.SubElement(
scm_element,
'browser',
{'class': 'hudson.plugins.bazaar.browsers.{0}'.format(
browser_name_to_class[browser])})
XML.SubElement(browser_element, 'url').text = data['browser-url']
if browser == 'opengrok':
XML.SubElement(browser_element, 'rootModule').text = (
data['opengrok-root-module'])
def url(registry, xml_parent, data):
"""yaml: url
Watch for changes in, and download an artifact from a particular url.
Requires the Jenkins :jenkins-wiki:`URL SCM <URL+SCM>`.
:arg list url-list: List of URLs to watch. (required)
:arg bool clear-workspace: If set to true, clear the workspace before
downloading the artifact(s) specified in url-list. (default false)
Examples:
.. literalinclude:: ../../tests/scm/fixtures/url001.yaml
:language: yaml
.. literalinclude:: ../../tests/scm/fixtures/url002.yaml
:language: yaml
"""
scm = XML.SubElement(xml_parent, 'scm', {'class':
'hudson.plugins.URLSCM.URLSCM'})
urls = XML.SubElement(scm, 'urls')
try:
for data_url in data['url-list']:
url_tuple = XML.SubElement(
urls, 'hudson.plugins.URLSCM.URLSCM_-URLTuple')
XML.SubElement(url_tuple, 'urlString').text = data_url
except KeyError as e:
raise MissingAttributeError(e.args[0])
XML.SubElement(scm, 'clearWorkspace').text = str(
data.get('clear-workspace', False)).lower()
def dimensions(registry, xml_parent, data):
"""yaml: dimensions
Specifies the Dimensions SCM repository for this job.
Requires Jenkins :jenkins-wiki:`Dimensions Plugin <Dimensions+Plugin>`.
:arg str project: Project name of format PRODUCT_ID:PROJECT_NAME (required)
:arg str permissions: Default Permissions for updated files
(default: DEFAULT)
:Permissions:
* **DEFAULT**
* **READONLY**
* **WRITABLE**
:arg str eol: End of line (default: DEFAULT)
:End of line:
* **DEFAULT**
* **UNIX**
* **WINDOWS**
* **UNCHANGED**
:arg list folders: Folders to monitor (default /)
:arg list exclude: Paths to exclude from monitor
:arg str username: Repository username for this job
:arg str password: Repository password for this job
:arg str server: Dimensions server for this job
:arg str database: Dimensions database for this job.
Format must be database@dsn
:arg bool update: Use update (default false)
:arg bool clear-workspace: Clear workspace prior to build (default false)
:arg bool force-build: Force build even if the repository SCM checkout
operation fails (default false)
:arg bool overwrite-modified: Overwrite files in worspace from
repository files (default false)
:arg bool expand-vars: Expand substitution variables (default false)
:arg bool no-metadata: Checkout files with no metadata (default false)
:arg bool maintain-timestamp: Maintain file timestamp from Dimensions
(default false)
:arg bool slave-checkout: Force slave based checkout (default false)
:arg str timezone: Server timezone
:arg str web-url: Dimensions Web URL
Examples:
.. literalinclude:: /../../tests/scm/fixtures/dimensions-minimal.yaml
:language: yaml
.. literalinclude:: /../../tests/scm/fixtures/dimensions-full.yaml
:language: yaml
"""
scm = XML.SubElement(
xml_parent,
'scm', {'class': 'hudson.plugins.dimensionsscm.DimensionsSCM'})
# List to check against for valid permission
perm = ['DEFAULT', 'READONLY', 'WRITABLE']
# List to check against for valid end of line
eol = ['DEFAULT', 'UNIX', 'WINDOWS', 'UNCHANGED']
mapping = [
# option, xml name, default value (text), attributes (hard coded)
('project', 'project', None),
('permissions', 'permissions', 'DEFAULT', perm),
('eol', 'eol', 'DEFAULT', eol),
('update', 'canJobUpdate', False),
('clear-workspace', 'canJobDelete', False),
('force-build', 'canJobForce', False),
('overwrite-modified', 'canJobRevert', False),
('expand-vars', 'canJobExpand', False),
('no-metadata', 'canJobNoMetadata', False),
('maintain-timestamp', 'canJobNoTouch', False),
('slave-checkout', 'forceAsSlave', False),
]
convert_mapping_to_xml(scm, data, mapping, fail_required=True)
# Folders to monitor. Default '/'
folders = XML.SubElement(scm, 'folders')
if 'folders' in data:
for folder in data['folders']:
XML.SubElement(folders, 'string').text = folder
else:
XML.SubElement(folders, 'string').text = '/'
# Excluded paths
exclude = XML.SubElement(scm, 'pathsToExclude')
if 'exclude' in data:
for exc in data['exclude']:
XML.SubElement(exclude, 'string').text = exc
optional_mapping = [
# option, xml name, default value (text), attributes (hard coded)
('username', 'jobUserName', None),
('password', 'jobPasswd', None),
('server', 'jobServer', None),
('database', 'jobDatabase', None),
('timezone', 'jobTimeZone', None),
('web-url', 'jobWebUrl', None),
]
convert_mapping_to_xml(scm, data, optional_mapping, fail_required=False)
class SCM(jenkins_jobs.modules.base.Base):
sequence = 30
component_type = 'scm'
component_list_type = 'scm'
def gen_xml(self, xml_parent, data):
scms_parent = XML.Element('scms')
for scm in data.get('scm', []):
self.registry.dispatch('scm', scms_parent, scm)
scms_count = len(scms_parent)
if scms_count == 0:
XML.SubElement(xml_parent, 'scm', {'class': 'hudson.scm.NullSCM'})
elif scms_count == 1:
xml_parent.append(scms_parent[0])
else:
class_name = 'org.jenkinsci.plugins.multiplescms.MultiSCM'
xml_attribs = {'class': class_name}
xml_parent = XML.SubElement(xml_parent, 'scm', xml_attribs)
for scms_child in scms_parent:
try:
scms_child.tag = scms_child.attrib['class']
del(scms_child.attrib['class'])
except KeyError:
pass
xml_parent.append(scms_parent)
class PipelineSCM(jenkins_jobs.modules.base.Base):
sequence = 30
component_type = 'pipeline-scm'
component_list_type = 'pipeline-scm'
def gen_xml(self, xml_parent, data):
definition_parent = xml_parent.find('definition')
pipeline_dict = data.get(self.component_type, {})
scms = pipeline_dict.get('scm')
if scms:
scms_count = len(scms)
if scms_count == 0:
raise JenkinsJobsException("'scm' missing or empty")
elif scms_count == 1:
self.registry.dispatch('scm', definition_parent, scms[0])
XML.SubElement(definition_parent, 'scriptPath'
).text = pipeline_dict.get('script-path',
'Jenkinsfile')
else:
raise JenkinsJobsException('Only one SCM can be specified '
'as pipeline-scm') | zerotk.jenkins-job-builder | /zerotk.jenkins-job-builder-2.0.0.0b2.tar.gz/zerotk.jenkins-job-builder-2.0.0.0b2/jenkins_jobs/modules/scm.py | scm.py |
import xml.etree.ElementTree as XML
import jenkins_jobs.modules.base
class Matrix(jenkins_jobs.modules.base.Base):
sequence = 0
# List the supported Axis names in our configuration
# and map them to the Jenkins XML element name.
supported_axis = {
'label-expression': 'hudson.matrix.LabelExpAxis',
'user-defined': 'hudson.matrix.TextAxis',
'slave': 'hudson.matrix.LabelAxis',
'jdk': 'hudson.matrix.JDKAxis',
'dynamic': 'ca.silvermaplesolutions.jenkins.plugins.daxis.DynamicAxis',
'python': 'jenkins.plugins.shiningpanda.matrix.PythonAxis',
'tox': 'jenkins.plugins.shiningpanda.matrix.ToxAxis',
'groovy': 'org.jenkinsci.plugins.GroovyAxis',
'yaml': 'org.jenkinsci.plugins.yamlaxis.YamlAxis',
}
def root_xml(self, data):
root = XML.Element('matrix-project')
ex_r = XML.SubElement(root, 'executionStrategy',
{'class': 'hudson.matrix.'
'DefaultMatrixExecutionStrategyImpl'})
ex_d = data.get('execution-strategy', {})
XML.SubElement(root, 'combinationFilter').text = \
str(ex_d.get('combination-filter', '')).rstrip()
XML.SubElement(ex_r, 'runSequentially').text = \
str(ex_d.get('sequential', False)).lower()
if 'touchstone' in ex_d:
XML.SubElement(ex_r, 'touchStoneCombinationFilter').text = \
str(ex_d['touchstone'].get('expr', ''))
t_r = XML.SubElement(ex_r, 'touchStoneResultCondition')
n = ex_d['touchstone'].get('result', 'stable').upper()
if n not in ('STABLE', 'UNSTABLE'):
raise ValueError('Required result must be stable or unstable')
XML.SubElement(t_r, 'name').text = n
if n == "STABLE":
XML.SubElement(t_r, 'ordinal').text = '0'
XML.SubElement(t_r, 'color').text = 'BLUE'
else:
XML.SubElement(t_r, 'ordinal').text = '1'
XML.SubElement(t_r, 'color').text = 'YELLOW'
ax_root = XML.SubElement(root, 'axes')
for axis_ in data.get('axes', []):
axis = axis_['axis']
axis_type = axis['type']
if axis_type not in self.supported_axis:
raise ValueError('Only %s axes types are supported'
% self.supported_axis.keys())
axis_name = self.supported_axis.get(axis_type)
lbl_root = XML.SubElement(ax_root, axis_name)
name, values = axis.get('name', ''), axis.get('values', [''])
if axis_type == 'jdk':
XML.SubElement(lbl_root, 'name').text = 'jdk'
elif axis_type == 'python':
XML.SubElement(lbl_root, 'name').text = 'PYTHON'
elif axis_type == 'tox':
XML.SubElement(lbl_root, 'name').text = 'TOXENV'
else:
XML.SubElement(lbl_root, 'name').text = str(name)
if axis_type != "groovy":
v_root = XML.SubElement(lbl_root, 'values')
if axis_type == "dynamic":
XML.SubElement(v_root, 'string').text = str(values[0])
XML.SubElement(lbl_root, 'varName').text = str(values[0])
v_root = XML.SubElement(lbl_root, 'axisValues')
XML.SubElement(v_root, 'string').text = 'default'
elif axis_type == "groovy":
command = XML.SubElement(lbl_root, 'groovyString')
command.text = axis.get('command')
XML.SubElement(lbl_root, 'computedValues').text = ''
elif axis_type == "yaml":
XML.SubElement(v_root, 'string').text = axis.get('filename')
else:
for v in values:
XML.SubElement(v_root, 'string').text = str(v)
return root | zerotk.jenkins-job-builder | /zerotk.jenkins-job-builder-2.0.0.0b2.tar.gz/zerotk.jenkins-job-builder-2.0.0.0b2/jenkins_jobs/modules/project_matrix.py | project_matrix.py |
import logging
import xml.etree.ElementTree as XML
import jenkins_jobs.modules.base
from jenkins_jobs.xml_config import remove_ignorable_whitespace
class General(jenkins_jobs.modules.base.Base):
sequence = 10
logrotate_warn_issued = False
def gen_xml(self, xml, data):
jdk = data.get('jdk', None)
if jdk:
XML.SubElement(xml, 'jdk').text = jdk
XML.SubElement(xml, 'actions')
desc_text = data.get('description', None)
if desc_text is not None:
description = XML.SubElement(xml, 'description')
description.text = desc_text
XML.SubElement(xml, 'keepDependencies').text = 'false'
# Need to ensure we support the None parameter to allow disabled to
# remain the last setting if the user purposely adds and then removes
# the disabled parameter.
# See: http://lists.openstack.org/pipermail/openstack-infra/2016-March/003980.html # noqa
disabled = data.get('disabled', None)
if disabled is not None:
XML.SubElement(xml, 'disabled').text = str(disabled).lower()
if 'display-name' in data:
XML.SubElement(xml, 'displayName').text = data['display-name']
if data.get('block-downstream'):
XML.SubElement(xml,
'blockBuildWhenDownstreamBuilding').text = 'true'
else:
XML.SubElement(xml,
'blockBuildWhenDownstreamBuilding').text = 'false'
if data.get('block-upstream'):
XML.SubElement(xml,
'blockBuildWhenUpstreamBuilding').text = 'true'
else:
XML.SubElement(xml,
'blockBuildWhenUpstreamBuilding').text = 'false'
if 'auth-token' in data:
XML.SubElement(xml, 'authToken').text = data['auth-token']
if data.get('concurrent'):
XML.SubElement(xml, 'concurrentBuild').text = 'true'
else:
XML.SubElement(xml, 'concurrentBuild').text = 'false'
if 'workspace' in data:
XML.SubElement(xml, 'customWorkspace').text = \
str(data['workspace'])
if (xml.tag == 'matrix-project') and ('child-workspace' in data):
XML.SubElement(xml, 'childCustomWorkspace').text = \
str(data['child-workspace'])
if 'quiet-period' in data:
XML.SubElement(xml, 'quietPeriod').text = str(data['quiet-period'])
node = data.get('node', None)
if node:
XML.SubElement(xml, 'assignedNode').text = node
XML.SubElement(xml, 'canRoam').text = 'false'
else:
XML.SubElement(xml, 'canRoam').text = 'true'
if 'retry-count' in data:
XML.SubElement(xml, 'scmCheckoutRetryCount').text = \
str(data['retry-count'])
if 'logrotate' in data:
if not self.logrotate_warn_issued:
logging.warning('logrotate is deprecated on jenkins>=1.637,'
' the property build-discarder on newer'
' jenkins instead')
self.logrotate_warn_issued = True
lr_xml = XML.SubElement(xml, 'logRotator')
logrotate = data['logrotate']
lr_days = XML.SubElement(lr_xml, 'daysToKeep')
lr_days.text = str(logrotate.get('daysToKeep', -1))
lr_num = XML.SubElement(lr_xml, 'numToKeep')
lr_num.text = str(logrotate.get('numToKeep', -1))
lr_adays = XML.SubElement(lr_xml, 'artifactDaysToKeep')
lr_adays.text = str(logrotate.get('artifactDaysToKeep', -1))
lr_anum = XML.SubElement(lr_xml, 'artifactNumToKeep')
lr_anum.text = str(logrotate.get('artifactNumToKeep', -1))
if 'raw' in data:
raw(self.registry, xml, data['raw'])
def raw(registry, xml_parent, data):
# documented in definition.rst since includes and docs is not working well
# For cross cutting method like this
root = XML.fromstring(data.get('xml'))
remove_ignorable_whitespace(root)
xml_parent.append(root) | zerotk.jenkins-job-builder | /zerotk.jenkins-job-builder-2.0.0.0b2.tar.gz/zerotk.jenkins-job-builder-2.0.0.0b2/jenkins_jobs/modules/general.py | general.py |
import xml.etree.ElementTree as XML
from jenkins_jobs.errors import InvalidAttributeError
from jenkins_jobs.errors import JenkinsJobsException
from jenkins_jobs.errors import MissingAttributeError
def build_trends_publisher(plugin_name, xml_element, data):
"""Helper to create various trend publishers.
"""
def append_thresholds(element, data, only_totals):
"""Appends the status thresholds.
"""
for status in ['unstable', 'failed']:
status_data = data.get(status, {})
limits = [
('total-all', 'TotalAll'),
('total-high', 'TotalHigh'),
('total-normal', 'TotalNormal'),
('total-low', 'TotalLow')]
if only_totals is False:
limits.extend([
('new-all', 'NewAll'),
('new-high', 'NewHigh'),
('new-normal', 'NewNormal'),
('new-low', 'NewLow')])
for key, tag_suffix in limits:
tag_name = status + tag_suffix
XML.SubElement(element, tag_name).text = str(
status_data.get(key, ''))
# Tuples containing: setting name, tag name, default value
settings = [
('healthy', 'healthy', ''),
('unhealthy', 'unHealthy', ''),
('health-threshold', 'thresholdLimit', 'low'),
('plugin-name', 'pluginName', plugin_name),
('default-encoding', 'defaultEncoding', ''),
('can-run-on-failed', 'canRunOnFailed', False),
('use-stable-build-as-reference', 'useStableBuildAsReference', False),
('use-previous-build-as-reference',
'usePreviousBuildAsReference', False),
('use-delta-values', 'useDeltaValues', False),
('thresholds', 'thresholds', {}),
('should-detect-modules', 'shouldDetectModules', False),
('dont-compute-new', 'dontComputeNew', True),
('do-not-resolve-relative-paths', 'doNotResolveRelativePaths', False),
('pattern', 'pattern', '')]
thresholds = ['low', 'normal', 'high']
for key, tag_name, default in settings:
xml_config = XML.SubElement(xml_element, tag_name)
config_value = data.get(key, default)
if key == 'thresholds':
append_thresholds(
xml_config,
config_value,
data.get('dont-compute-new', True))
elif key == 'health-threshold' and config_value not in thresholds:
raise JenkinsJobsException("health-threshold must be one of %s" %
", ".join(thresholds))
else:
if isinstance(default, bool):
xml_config.text = str(config_value).lower()
else:
xml_config.text = str(config_value)
def config_file_provider_builder(xml_parent, data):
"""Builder / Wrapper helper"""
xml_files = XML.SubElement(xml_parent, 'managedFiles')
files = data.get('files', [])
for file in files:
xml_file = XML.SubElement(xml_files, 'org.jenkinsci.plugins.'
'configfiles.buildwrapper.ManagedFile')
mapping = [
('file-id', 'fileId', None),
('target', 'targetLocation', ''),
('variable', 'variable', ''),
]
convert_mapping_to_xml(xml_file, file, mapping, fail_required=True)
def config_file_provider_settings(xml_parent, data):
SETTINGS_TYPES = ['file', 'cfp']
settings = {
'default-settings':
'jenkins.mvn.DefaultSettingsProvider',
'settings':
'jenkins.mvn.FilePathSettingsProvider',
'config-file-provider-settings':
'org.jenkinsci.plugins.configfiles.maven.job.MvnSettingsProvider',
'default-global-settings':
'jenkins.mvn.DefaultGlobalSettingsProvider',
'global-settings':
'jenkins.mvn.FilePathGlobalSettingsProvider',
'config-file-provider-global-settings':
'org.jenkinsci.plugins.configfiles.maven.job.'
'MvnGlobalSettingsProvider',
}
if 'settings' in data:
# Support for Config File Provider
settings_file = str(data['settings'])
settings_type = data.get('settings-type', 'file')
# For cfp versions <2.10.0 we are able to detect cfp via the config
# settings name.
text = 'org.jenkinsci.plugins.configfiles.maven.MavenSettingsConfig'
if settings_file.startswith(text):
settings_type = 'cfp'
if settings_type == 'file':
lsettings = XML.SubElement(
xml_parent, 'settings',
{'class': settings['settings']})
XML.SubElement(lsettings, 'path').text = settings_file
elif settings_type == 'cfp':
lsettings = XML.SubElement(
xml_parent, 'settings',
{'class': settings['config-file-provider-settings']})
XML.SubElement(lsettings, 'settingsConfigId').text = settings_file
else:
raise InvalidAttributeError(
'settings-type', settings_type, SETTINGS_TYPES)
else:
XML.SubElement(xml_parent, 'settings',
{'class': settings['default-settings']})
if 'global-settings' in data:
# Support for Config File Provider
global_settings_file = str(data['global-settings'])
global_settings_type = data.get('settings-type', 'file')
# For cfp versions <2.10.0 we are able to detect cfp via the config
# settings name.
text = ('org.jenkinsci.plugins.configfiles.maven.'
'GlobalMavenSettingsConfig')
if global_settings_file.startswith(text):
global_settings_type = 'cfp'
if global_settings_type == 'file':
gsettings = XML.SubElement(xml_parent, 'globalSettings',
{'class': settings['global-settings']})
XML.SubElement(gsettings, 'path').text = global_settings_file
elif global_settings_type == 'cfp':
gsettings = XML.SubElement(
xml_parent, 'globalSettings',
{'class': settings['config-file-provider-global-settings']})
XML.SubElement(
gsettings,
'settingsConfigId').text = global_settings_file
else:
raise InvalidAttributeError(
'settings-type', global_settings_type, SETTINGS_TYPES)
else:
XML.SubElement(xml_parent, 'globalSettings',
{'class': settings['default-global-settings']})
def copyartifact_build_selector(xml_parent, data, select_tag='selector'):
select = data.get('which-build', 'last-successful')
selectdict = {'last-successful': 'StatusBuildSelector',
'last-completed': 'LastCompletedBuildSelector',
'specific-build': 'SpecificBuildSelector',
'last-saved': 'SavedBuildSelector',
'upstream-build': 'TriggeredBuildSelector',
'permalink': 'PermalinkBuildSelector',
'workspace-latest': 'WorkspaceSelector',
'build-param': 'ParameterizedBuildSelector',
'downstream-build': 'DownstreamBuildSelector',
'multijob-build': 'MultiJobBuildSelector'}
if select not in selectdict:
raise InvalidAttributeError('which-build',
select,
selectdict.keys())
permalink = data.get('permalink', 'last')
permalinkdict = {'last': 'lastBuild',
'last-stable': 'lastStableBuild',
'last-successful': 'lastSuccessfulBuild',
'last-failed': 'lastFailedBuild',
'last-unstable': 'lastUnstableBuild',
'last-unsuccessful': 'lastUnsuccessfulBuild'}
if permalink not in permalinkdict:
raise InvalidAttributeError('permalink',
permalink,
permalinkdict.keys())
if select == 'multijob-build':
selector = XML.SubElement(xml_parent, select_tag,
{'class':
'com.tikal.jenkins.plugins.multijob.' +
selectdict[select]})
else:
selector = XML.SubElement(xml_parent, select_tag,
{'class':
'hudson.plugins.copyartifact.' +
selectdict[select]})
if select == 'specific-build':
XML.SubElement(selector, 'buildNumber').text = data['build-number']
if select == 'last-successful':
XML.SubElement(selector, 'stable').text = str(
data.get('stable', False)).lower()
if select == 'upstream-build':
XML.SubElement(selector, 'fallbackToLastSuccessful').text = str(
data.get('fallback-to-last-successful', False)).lower()
if select == 'permalink':
XML.SubElement(selector, 'id').text = permalinkdict[permalink]
if select == 'build-param':
XML.SubElement(selector, 'parameterName').text = data['param']
if select == 'downstream-build':
XML.SubElement(selector, 'upstreamProjectName').text = (
data['upstream-project-name'])
XML.SubElement(selector, 'upstreamBuildNumber').text = (
data['upstream-build-number'])
def findbugs_settings(xml_parent, data):
# General Options
mapping = [
('rank-priority', 'isRankActivated', False),
('include-files', 'includePattern', ''),
('exclude-files', 'excludePattern', ''),
]
convert_mapping_to_xml(xml_parent, data, mapping, fail_required=True)
def get_value_from_yaml_or_config_file(key, section, data, jjb_config):
result = data.get(key, '')
if result == '':
result = jjb_config.get_plugin_config(section, key)
return result
def cloudformation_region_dict():
region_dict = {'us-east-1': 'US_East_Northern_Virginia',
'us-west-1': 'US_WEST_Northern_California',
'us-west-2': 'US_WEST_Oregon',
'eu-central-1': 'EU_Frankfurt',
'eu-west-1': 'EU_Ireland',
'ap-southeast-1': 'Asia_Pacific_Singapore',
'ap-southeast-2': 'Asia_Pacific_Sydney',
'ap-northeast-1': 'Asia_Pacific_Tokyo',
'sa-east-1': 'South_America_Sao_Paulo'}
return region_dict
def cloudformation_init(xml_parent, data, xml_tag):
cloudformation = XML.SubElement(
xml_parent, 'com.syncapse.jenkinsci.'
'plugins.awscloudformationwrapper.' + xml_tag)
return XML.SubElement(cloudformation, 'stacks')
def cloudformation_stack(xml_parent, stack, xml_tag, stacks, region_dict):
if 'name' not in stack or stack['name'] == '':
raise MissingAttributeError('name')
step = XML.SubElement(
stacks, 'com.syncapse.jenkinsci.plugins.'
'awscloudformationwrapper.' + xml_tag)
try:
XML.SubElement(step, 'stackName').text = stack['name']
XML.SubElement(step, 'awsAccessKey').text = stack['access-key']
XML.SubElement(step, 'awsSecretKey').text = stack['secret-key']
region = stack['region']
except KeyError as e:
raise MissingAttributeError(e.args[0])
if region not in region_dict:
raise InvalidAttributeError('region', region, region_dict.keys())
XML.SubElement(step, 'awsRegion').text = region_dict.get(region)
if xml_tag == 'SimpleStackBean':
prefix = str(stack.get('prefix', False)).lower()
XML.SubElement(step, 'isPrefixSelected').text = prefix
else:
XML.SubElement(step, 'description').text = stack.get('description', '')
XML.SubElement(step, 'parameters').text = ','.join(
stack.get('parameters', []))
XML.SubElement(step, 'timeout').text = str(stack.get('timeout', '0'))
XML.SubElement(step, 'sleep').text = str(stack.get('sleep', '0'))
try:
XML.SubElement(step, 'cloudFormationRecipe').text = stack['recipe']
except KeyError as e:
raise MissingAttributeError(e.args[0])
def include_exclude_patterns(xml_parent, data, yaml_prefix,
xml_elem_name):
xml_element = XML.SubElement(xml_parent, xml_elem_name)
XML.SubElement(xml_element, 'includePatterns').text = ','.join(
data.get(yaml_prefix + '-include-patterns', []))
XML.SubElement(xml_element, 'excludePatterns').text = ','.join(
data.get(yaml_prefix + '-exclude-patterns', []))
def artifactory_deployment_patterns(xml_parent, data):
include_exclude_patterns(xml_parent, data, 'deployment',
'artifactDeploymentPatterns')
def artifactory_env_vars_patterns(xml_parent, data):
include_exclude_patterns(xml_parent, data, 'env-vars',
'envVarsPatterns')
def artifactory_optional_props(xml_parent, data, target):
optional_str_props = [
('scopes', 'scopes'),
('violationRecipients', 'violation-recipients'),
('blackDuckAppName', 'black-duck-app-name'),
('blackDuckAppVersion', 'black-duck-app-version'),
('blackDuckReportRecipients', 'black-duck-report-recipients'),
('blackDuckScopes', 'black-duck-scopes')
]
for (xml_prop, yaml_prop) in optional_str_props:
XML.SubElement(xml_parent, xml_prop).text = data.get(
yaml_prop, '')
common_bool_props = [
# yaml property name, xml property name, default value
('deploy-artifacts', 'deployArtifacts', True),
('discard-old-builds', 'discardOldBuilds', False),
('discard-build-artifacts', 'discardBuildArtifacts', False),
('publish-build-info', 'deployBuildInfo', False),
('env-vars-include', 'includeEnvVars', False),
('run-checks', 'runChecks', False),
('include-publish-artifacts', 'includePublishArtifacts', False),
('license-auto-discovery', 'licenseAutoDiscovery', True),
('enable-issue-tracker-integration', 'enableIssueTrackerIntegration',
False),
('aggregate-build-issues', 'aggregateBuildIssues', False),
('black-duck-run-checks', 'blackDuckRunChecks', False),
('black-duck-include-published-artifacts',
'blackDuckIncludePublishedArtifacts', False),
('auto-create-missing-component-requests',
'autoCreateMissingComponentRequests', True),
('auto-discard-stale-component-requests',
'autoDiscardStaleComponentRequests', True),
('filter-excluded-artifacts-from-build',
'filterExcludedArtifactsFromBuild', False)
]
convert_mapping_to_xml(
xml_parent, data, common_bool_props, fail_required=True)
if 'wrappers' in target:
wrapper_bool_props = [
('enable-resolve-artifacts', 'enableResolveArtifacts', False),
('disable-license-auto-discovery',
'disableLicenseAutoDiscovery', False),
('record-all-dependencies',
'recordAllDependencies', False)
]
convert_mapping_to_xml(
xml_parent, data, wrapper_bool_props, fail_required=True)
if 'publishers' in target:
publisher_bool_props = [
('even-if-unstable', 'evenIfUnstable', False),
('pass-identified-downstream', 'passIdentifiedDownstream', False),
('allow-promotion-of-non-staged-builds',
'allowPromotionOfNonStagedBuilds', False)
]
convert_mapping_to_xml(
xml_parent, data, publisher_bool_props, fail_required=True)
def artifactory_common_details(details, data):
XML.SubElement(details, 'artifactoryName').text = data.get('name', '')
XML.SubElement(details, 'artifactoryUrl').text = data.get('url', '')
def artifactory_repository(xml_parent, data, target):
if 'release' in target:
XML.SubElement(xml_parent, 'keyFromText').text = data.get(
'deploy-release-repo-key', '')
XML.SubElement(xml_parent, 'keyFromSelect').text = data.get(
'deploy-release-repo-key', '')
XML.SubElement(xml_parent, 'dynamicMode').text = str(
data.get('deploy-dynamic-mode', False)).lower()
if 'snapshot' in target:
XML.SubElement(xml_parent, 'keyFromText').text = data.get(
'deploy-snapshot-repo-key', '')
XML.SubElement(xml_parent, 'keyFromSelect').text = data.get(
'deploy-snapshot-repo-key', '')
XML.SubElement(xml_parent, 'dynamicMode').text = str(
data.get('deploy-dynamic-mode', False)).lower()
def append_git_revision_config(parent, config_def):
params = XML.SubElement(
parent, 'hudson.plugins.git.GitRevisionBuildParameters')
try:
# If git-revision is a boolean, the get() will
# throw an AttributeError
combine_commits = str(
config_def.get('combine-queued-commits', False)).lower()
except AttributeError:
combine_commits = 'false'
XML.SubElement(params, 'combineQueuedCommits').text = combine_commits
def test_fairy_common(xml_element, data):
xml_element.set('plugin', 'TestFairy')
valid_max_duration = ['10m', '60m', '300m', '1440m']
valid_interval = [1, 2, 5]
valid_video_quality = ['high', 'medium', 'low']
mappings = [
# General
('apikey', 'apiKey', None),
('appfile', 'appFile', None),
('tester-groups', 'testersGroups', ''),
('notify-testers', 'notifyTesters', True),
('autoupdate', 'autoUpdate', True),
# Session
('max-duration', 'maxDuration', '10m', valid_max_duration),
('record-on-background', 'recordOnBackground', False),
('data-only-wifi', 'dataOnlyWifi', False),
# Video
('video-enabled', 'isVideoEnabled', True),
('screenshot-interval', 'screenshotInterval', 1, valid_interval),
('video-quality', 'videoQuality', 'high', valid_video_quality),
# Metrics
('cpu', 'cpu', True),
('memory', 'memory', True),
('logs', 'logs', True),
('network', 'network', False),
('phone-signal', 'phoneSignal', False),
('wifi', 'wifi', False),
('gps', 'gps', False),
('battery', 'battery', False),
('opengl', 'openGl', False),
# Advanced options
('advanced-options', 'advancedOptions', '')
]
convert_mapping_to_xml(xml_element, data, mappings, fail_required=True)
def convert_mapping_to_xml(parent, data, mapping, fail_required=False):
"""Convert mapping to XML
fail_required affects the last parameter of the mapping field when it's
parameter is set to 'None'. When fail_required is True then a 'None' value
represents a required configuration so will raise a MissingAttributeError
if the user does not provide the configuration.
If fail_required is False parameter is treated as optional. Logic will skip
configuring the XML tag for the parameter. We recommend for new plugins to
set fail_required=True and instead of optional parameters provide a default
value for all paramters that are not required instead.
valid_options provides a way to check if the value the user input is from a
list of available options. When the user pass a value that is not supported
from the list, it raise an InvalidAttributeError.
valid_dict provides a way to set options through their key and value. If
the user input corresponds to a key, the XML tag will use the key's value
for its element. When the user pass a value that there are no keys for,
it raise an InvalidAttributeError.
"""
for elem in mapping:
(optname, xmlname, val) = elem[:3]
val = data.get(optname, val)
valid_options = []
valid_dict = {}
if len(elem) == 4:
if type(elem[3]) is list:
valid_options = elem[3]
if type(elem[3]) is dict:
valid_dict = elem[3]
# Use fail_required setting to allow support for optional parameters
# we will phase this out in the future as we rework plugins so that
# optional parameters use a default setting instead.
if val is None and fail_required is True:
raise MissingAttributeError(optname)
# (Deprecated) in the future we will default to fail_required True
# if no value is provided then continue else leave it
# up to the user if they want to use an empty XML tag
if val is None and fail_required is False:
continue
if valid_dict:
if val not in valid_dict:
raise InvalidAttributeError(optname, val, valid_dict.keys())
if valid_options:
if val not in valid_options:
raise InvalidAttributeError(optname, val, valid_options)
if type(val) == bool:
val = str(val).lower()
if val in valid_dict:
XML.SubElement(parent, xmlname).text = str(valid_dict[val])
else:
XML.SubElement(parent, xmlname).text = str(val) | zerotk.jenkins-job-builder | /zerotk.jenkins-job-builder-2.0.0.0b2.tar.gz/zerotk.jenkins-job-builder-2.0.0.0b2/jenkins_jobs/modules/helpers.py | helpers.py |
import logging
import pkg_resources
import xml.etree.ElementTree as XML
from jenkins_jobs.errors import InvalidAttributeError
from jenkins_jobs.errors import JenkinsJobsException
from jenkins_jobs.errors import MissingAttributeError
import jenkins_jobs.modules.base
from jenkins_jobs.modules.builders import create_builders
from jenkins_jobs.modules.helpers import artifactory_common_details
from jenkins_jobs.modules.helpers import artifactory_deployment_patterns
from jenkins_jobs.modules.helpers import artifactory_env_vars_patterns
from jenkins_jobs.modules.helpers import artifactory_optional_props
from jenkins_jobs.modules.helpers import artifactory_repository
from jenkins_jobs.modules.helpers import config_file_provider_builder
from jenkins_jobs.modules.helpers import convert_mapping_to_xml
logger = logging.getLogger(__name__)
MIN_TO_SEC = 60
def docker_custom_build_env(registry, xml_parent, data):
"""yaml: docker-custom-build-env
Allows the definition of a build environment for a job using a Docker
container.
Requires the Jenkins :jenkins-wiki:`CloudBees Docker Custom Build
Environment Plugin<CloudBees+Docker+Custom+Build+Environment+Plugin>`.
:arg str image-type: Docker image type. Valid values and their
additional attributes described in the image_types_ table
:arg str docker-tool: The name of the docker installation to use
(default 'Default')
:arg str host: URI to the docker host you are using
:arg str credentials-id: Argument to specify the ID of credentials to use
for docker host (optional)
:arg str registry-credentials-id: Argument to specify the ID of
credentials to use for docker registry (optional)
:arg list volumes: Volumes to bind mound from slave host into container
:volume: * **host-path** (`str`) Path on host
* **path** (`str`) Path inside container
:arg bool verbose: Log docker commands executed by plugin on build log
(default false)
:arg bool privileged: Run in privileged mode (default false)
:arg bool force-pull: Force pull (default false)
:arg str group: The user to run build has to be the same as the Jenkins
slave user so files created in workspace have adequate owner and
permission set
:arg str command: Container start command (default '/bin/cat')
:arg str net: Network bridge (default 'bridge')
.. _image_types:
================== ====================================================
Image Type Description
================== ====================================================
dockerfile Build docker image from a Dockerfile in project
workspace. With this option, project can define the
build environment as a Dockerfile stored in SCM with
project source code
:context-path: (str) Path to docker context
(default '.')
:dockerfile: (str) Use an alternate Dockerfile to
build the container hosting this build
(default 'Dockerfile')
pull Pull specified docker image from Docker repository
:image: (str) Image id/tag
================== ====================================================
Example:
.. literalinclude::
/../../tests/wrappers/fixtures/docker-custom-build-env001.yaml
:language: yaml
"""
core_prefix = 'com.cloudbees.jenkins.plugins.okidocki.'
entry_xml = XML.SubElement(
xml_parent, core_prefix + 'DockerBuildWrapper')
entry_xml.set('plugin', 'docker-custom-build-environment')
selectorobj = XML.SubElement(entry_xml, 'selector')
image_type = data['image-type']
if image_type == 'dockerfile':
selectorobj.set('class', core_prefix + 'DockerfileImageSelector')
XML.SubElement(selectorobj, 'contextPath').text = data.get(
'context-path', '.')
XML.SubElement(selectorobj, 'dockerfile').text = data.get(
'dockerfile', 'Dockerfile')
elif image_type == 'pull':
selectorobj.set('class', core_prefix + 'PullDockerImageSelector')
XML.SubElement(selectorobj, 'image').text = data.get(
'image', '')
XML.SubElement(entry_xml, 'dockerInstallation').text = data.get(
'docker-tool', 'Default')
host = XML.SubElement(entry_xml, 'dockerHost')
host.set('plugin', 'docker-commons')
if data.get('host'):
XML.SubElement(host, 'uri').text = data['host']
if data.get('credentials-id'):
XML.SubElement(host, 'credentialsId').text = data['credentials-id']
XML.SubElement(entry_xml, 'dockerRegistryCredentials').text = data.get(
'registry-credentials-id', '')
volumesobj = XML.SubElement(entry_xml, 'volumes')
volumes = data.get('volumes', [])
if not volumes:
volumesobj.set('class', 'empty-list')
else:
for volume in volumes:
volumeobj = XML.SubElement(
volumesobj, 'com.cloudbees.jenkins.plugins.okidocki.Volume')
XML.SubElement(volumeobj, 'hostPath').text = volume['volume'].get(
'host-path', '')
XML.SubElement(volumeobj, 'path').text = volume['volume'].get(
'path', '')
XML.SubElement(entry_xml, 'forcePull').text = str(data.get(
'force-pull', False)).lower()
XML.SubElement(entry_xml, 'privileged').text = str(data.get(
'privileged', False)).lower()
XML.SubElement(entry_xml, 'verbose').text = str(data.get(
'verbose', False)).lower()
XML.SubElement(entry_xml, 'group').text = data.get('group', '')
XML.SubElement(entry_xml, 'command').text = data.get('command', '/bin/cat')
XML.SubElement(entry_xml, 'net').text = data.get('net', 'bridge')
def ci_skip(registry, xml_parent, data):
"""yaml: ci-skip
Skip making a build for certain push.
Just add [ci skip] into your commit's message to let Jenkins know,
that you do not want to perform build for the next push.
Requires the Jenkins :jenkins-wiki:`Ci Skip Plugin <Ci+Skip+Plugin>`.
Example:
.. literalinclude:: /../../tests/wrappers/fixtures/ci-skip001.yaml
"""
rpobj = XML.SubElement(xml_parent, 'ruby-proxy-object')
robj = XML.SubElement(rpobj, 'ruby-object', attrib={
'pluginid': 'ci-skip',
'ruby-class': 'Jenkins::Tasks::BuildWrapperProxy'
})
pluginid = XML.SubElement(robj, 'pluginid', {
'pluginid': 'ci-skip', 'ruby-class': 'String'
})
pluginid.text = 'ci-skip'
obj = XML.SubElement(robj, 'object', {
'ruby-class': 'CiSkipWrapper', 'pluginid': 'ci-skip'
})
XML.SubElement(obj, 'ci__skip', {
'pluginid': 'ci-skip', 'ruby-class': 'NilClass'
})
def config_file_provider(registry, xml_parent, data):
"""yaml: config-file-provider
Provide configuration files (i.e., settings.xml for maven etc.)
which will be copied to the job's workspace.
Requires the Jenkins :jenkins-wiki:`Config File Provider Plugin
<Config+File+Provider+Plugin>`.
:arg list files: List of managed config files made up of three
parameters
:files: * **file-id** (`str`) -- The identifier for the managed config
file
* **target** (`str`) -- Define where the file should be created
(default '')
* **variable** (`str`) -- Define an environment variable to be
used (default '')
Example:
.. literalinclude:: \
/../../tests/wrappers/fixtures/config-file-provider003.yaml
:language: yaml
"""
cfp = XML.SubElement(xml_parent, 'org.jenkinsci.plugins.configfiles.'
'buildwrapper.ConfigFileBuildWrapper')
cfp.set('plugin', 'config-file-provider')
config_file_provider_builder(cfp, data)
def logfilesize(registry, xml_parent, data):
"""yaml: logfilesize
Abort the build if its logfile becomes too big.
Requires the Jenkins :jenkins-wiki:`Logfilesizechecker Plugin
<Logfilesizechecker+Plugin>`.
:arg bool set-own: Use job specific maximum log size instead of global
config value (default false).
:arg bool fail: Make builds aborted by this wrapper be marked as "failed"
(default false).
:arg int size: Abort the build if logfile size is bigger than this
value (in MiB, default 128). Only applies if set-own is true.
Full Example:
.. literalinclude:: /../../tests/wrappers/fixtures/logfilesize-full.yaml
Minimal Example:
.. literalinclude:: /../../tests/wrappers/fixtures/logfilesize-minimal.yaml
"""
lfswrapper = XML.SubElement(xml_parent,
'hudson.plugins.logfilesizechecker.'
'LogfilesizecheckerWrapper')
lfswrapper.set("plugin", "logfilesizechecker")
mapping = [
('set-own', 'setOwn', False),
('size', 'maxLogSize', 128),
('fail', 'failBuild', False),
]
convert_mapping_to_xml(lfswrapper, data, mapping, fail_required=True)
def timeout(registry, xml_parent, data):
"""yaml: timeout
Abort the build if it runs too long.
Requires the Jenkins :jenkins-wiki:`Build Timeout Plugin
<Build-timeout+Plugin>`.
:arg bool fail: Mark the build as failed (default false)
:arg bool abort: Mark the build as aborted (default false)
:arg bool write-description: Write a message in the description
(default false)
:arg int timeout: Abort the build after this number of minutes (default 3)
:arg str timeout-var: Export an environment variable to reference the
timeout value (optional)
:arg str type: Timeout type to use (default absolute)
:type values:
* **likely-stuck**
* **no-activity**
* **elastic**
* **absolute**
* **deadline**
:arg int elastic-percentage: Percentage of the three most recent builds
where to declare a timeout, only applies to **elastic** type.
(default 0)
:arg int elastic-number-builds: Number of builds to consider computing
average duration, only applies to **elastic** type. (default 3)
:arg int elastic-default-timeout: Timeout to use if there were no previous
builds, only applies to **elastic** type. (default 3)
:arg str deadline-time: Build terminate automatically at next deadline time
(HH:MM:SS), only applies to **deadline** type. (default 0:00:00)
:arg int deadline-tolerance: Period in minutes after deadline when a job
should be immediately aborted, only applies to **deadline** type.
(default 1)
Example (Version < 1.14):
.. literalinclude:: /../../tests/wrappers/fixtures/timeout/timeout001.yaml
.. literalinclude:: /../../tests/wrappers/fixtures/timeout/timeout002.yaml
.. literalinclude:: /../../tests/wrappers/fixtures/timeout/timeout003.yaml
Example (Version >= 1.14):
.. literalinclude::
/../../tests/wrappers/fixtures/timeout/version-1.14/absolute001.yaml
.. literalinclude::
/../../tests/wrappers/fixtures/timeout/version-1.14/no-activity001.yaml
.. literalinclude::
/../../tests/wrappers/fixtures/timeout/version-1.14/likely-stuck001.yaml
.. literalinclude::
/../../tests/wrappers/fixtures/timeout/version-1.14/elastic001.yaml
.. literalinclude::
/../../tests/wrappers/fixtures/timeout/version-1.15/deadline001.yaml
"""
prefix = 'hudson.plugins.build__timeout.'
twrapper = XML.SubElement(xml_parent, prefix + 'BuildTimeoutWrapper')
plugin_info = registry.get_plugin_info(
"Jenkins build timeout plugin")
version = pkg_resources.parse_version(plugin_info.get("version", "0"))
valid_strategies = ['absolute', 'no-activity', 'likely-stuck', 'elastic',
'deadline']
if version >= pkg_resources.parse_version("1.14"):
strategy = data.get('type', 'absolute')
if strategy not in valid_strategies:
InvalidAttributeError('type', strategy, valid_strategies)
if strategy == "absolute":
strategy_element = XML.SubElement(
twrapper, 'strategy',
{'class': "hudson.plugins.build_timeout."
"impl.AbsoluteTimeOutStrategy"})
XML.SubElement(strategy_element, 'timeoutMinutes'
).text = str(data.get('timeout', 3))
elif strategy == "no-activity":
strategy_element = XML.SubElement(
twrapper, 'strategy',
{'class': "hudson.plugins.build_timeout."
"impl.NoActivityTimeOutStrategy"})
timeout_sec = int(data.get('timeout', 3)) * MIN_TO_SEC
XML.SubElement(strategy_element,
'timeoutSecondsString').text = str(timeout_sec)
elif strategy == "likely-stuck":
strategy_element = XML.SubElement(
twrapper, 'strategy',
{'class': "hudson.plugins.build_timeout."
"impl.LikelyStuckTimeOutStrategy"})
XML.SubElement(strategy_element,
'timeoutMinutes').text = str(data.get('timeout', 3))
elif strategy == "elastic":
strategy_element = XML.SubElement(
twrapper, 'strategy',
{'class': "hudson.plugins.build_timeout."
"impl.ElasticTimeOutStrategy"})
XML.SubElement(strategy_element, 'timeoutPercentage'
).text = str(data.get('elastic-percentage', 0))
XML.SubElement(strategy_element, 'numberOfBuilds'
).text = str(data.get('elastic-number-builds', 0))
XML.SubElement(strategy_element, 'timeoutMinutesElasticDefault'
).text = str(data.get('elastic-default-timeout', 3))
elif strategy == "deadline":
strategy_element = XML.SubElement(
twrapper, 'strategy',
{'class': "hudson.plugins.build_timeout."
"impl.DeadlineTimeOutStrategy"})
deadline_time = str(data.get('deadline-time', '0:00:00'))
XML.SubElement(strategy_element,
'deadlineTime').text = str(deadline_time)
deadline_tolerance = int(data.get('deadline-tolerance', 1))
XML.SubElement(strategy_element, 'deadlineToleranceInMinutes'
).text = str(deadline_tolerance)
actions = []
for action in ['fail', 'abort']:
if str(data.get(action, 'false')).lower() == 'true':
actions.append(action)
# Set the default action to "abort"
if len(actions) == 0:
actions.append("abort")
description = data.get('write-description', None)
if description is not None:
actions.append('write-description')
operation_list = XML.SubElement(twrapper, 'operationList')
for action in actions:
fmt_str = prefix + "operations.{0}Operation"
if action == "abort":
XML.SubElement(operation_list, fmt_str.format("Abort"))
elif action == "fail":
XML.SubElement(operation_list, fmt_str.format("Fail"))
elif action == "write-description":
write_description = XML.SubElement(
operation_list, fmt_str.format("WriteDescription"))
XML.SubElement(write_description, "description"
).text = description
else:
raise JenkinsJobsException("Unsupported BuiltTimeoutWrapper "
"plugin action: {0}".format(action))
timeout_env_var = data.get('timeout-var')
if timeout_env_var:
XML.SubElement(twrapper,
'timeoutEnvVar').text = str(timeout_env_var)
else:
XML.SubElement(twrapper,
'timeoutMinutes').text = str(data.get('timeout', 3))
timeout_env_var = data.get('timeout-var')
if timeout_env_var:
XML.SubElement(twrapper,
'timeoutEnvVar').text = str(timeout_env_var)
XML.SubElement(twrapper, 'failBuild'
).text = str(data.get('fail', 'false')).lower()
XML.SubElement(twrapper, 'writingDescription'
).text = str(data.get('write-description', 'false')
).lower()
XML.SubElement(twrapper, 'timeoutPercentage'
).text = str(data.get('elastic-percentage', 0))
XML.SubElement(twrapper, 'timeoutMinutesElasticDefault'
).text = str(data.get('elastic-default-timeout', 3))
tout_type = str(data.get('type', 'absolute')).lower()
if tout_type == 'likely-stuck':
tout_type = 'likelyStuck'
XML.SubElement(twrapper, 'timeoutType').text = tout_type
def timestamps(registry, xml_parent, data):
"""yaml: timestamps
Add timestamps to the console log.
Requires the Jenkins :jenkins-wiki:`Timestamper Plugin <Timestamper>`.
Example::
wrappers:
- timestamps
"""
XML.SubElement(xml_parent,
'hudson.plugins.timestamper.TimestamperBuildWrapper')
def ansicolor(registry, xml_parent, data):
"""yaml: ansicolor
Translate ANSI color codes to HTML in the console log.
Requires the Jenkins :jenkins-wiki:`Ansi Color Plugin <AnsiColor+Plugin>`.
:arg string colormap: (optional) color mapping to use
Examples::
wrappers:
- ansicolor
# Explicitly setting the colormap
wrappers:
- ansicolor:
colormap: vga
"""
cwrapper = XML.SubElement(
xml_parent,
'hudson.plugins.ansicolor.AnsiColorBuildWrapper')
# Optional colormap
colormap = data.get('colormap')
if colormap:
XML.SubElement(cwrapper, 'colorMapName').text = colormap
def build_keeper(registry, xml_parent, data):
"""yaml: build-keeper
Keep builds based on specific policy.
Requires the Jenkins :jenkins-wiki:`Build Keeper Plugin
<Build+Keeper+Plugin>`.
:arg str policy: Policy to keep builds.
:policy values:
* **by-day**
* **keep-since**
* **build-number**
* **keep-first-failed**
:arg int build-period: Number argument to calculate build to keep,
depends on the policy. (default 0)
:arg bool dont-keep-failed: Flag to indicate if to keep failed builds.
(default false)
:arg int number-of-fails: number of consecutive failed builds in order
to mark first as keep forever, only applies to keep-first-failed
policy (default 0)
Example:
.. literalinclude:: /../../tests/wrappers/fixtures/build-keeper0001.yaml
.. literalinclude:: /../../tests/wrappers/fixtures/build-keeper0002.yaml
"""
root = XML.SubElement(xml_parent,
'org.jenkins__ci.plugins.build__keeper.BuildKeeper')
valid_policies = ('by-day', 'keep-since', 'build-number',
'keep-first-failed')
policy = data.get('policy')
build_period = str(data.get('build-period', 0))
dont_keep_failed = str(data.get('dont-keep-failed', False)).lower()
if policy == 'by-day':
policy_element = XML.SubElement(root,
'policy',
{'class': 'org.jenkins_ci.plugins.'
'build_keeper.ByDayPolicy'})
XML.SubElement(policy_element, 'buildPeriod').text = build_period
XML.SubElement(policy_element,
'dontKeepFailed').text = dont_keep_failed
elif policy == 'keep-since':
policy_element = XML.SubElement(root,
'policy',
{'class': 'org.jenkins_ci.plugins.'
'build_keeper.KeepSincePolicy'})
XML.SubElement(policy_element, 'buildPeriod').text = build_period
XML.SubElement(policy_element,
'dontKeepFailed').text = dont_keep_failed
elif policy == 'build-number':
policy_element = XML.SubElement(root,
'policy',
{'class': 'org.jenkins_ci.plugins.'
'build_keeper.BuildNumberPolicy'})
XML.SubElement(policy_element, 'buildPeriod').text = build_period
XML.SubElement(policy_element,
'dontKeepFailed').text = dont_keep_failed
elif policy == 'keep-first-failed':
policy_element = XML.SubElement(root,
'policy',
{'class': 'org.jenkins_ci.plugins.'
'build_keeper.KeepFirstFailedPolicy'})
XML.SubElement(policy_element, 'numberOfFails').text = str(
data.get('number-of-fails', 0))
else:
InvalidAttributeError('policy', policy, valid_policies)
def live_screenshot(registry, xml_parent, data):
"""yaml: live-screenshot
Show live screenshots of running jobs in the job list.
Requires the Jenkins :jenkins-wiki:`Live-Screenshot Plugin
<LiveScreenshot+Plugin>`.
:arg str full-size: name of screenshot file (default 'screenshot.png')
:arg str thumbnail: name of thumbnail file (default 'screenshot-thumb.png')
File type must be .png and they must be located inside the $WORKDIR.
Full Example:
.. literalinclude::
/../../tests/wrappers/fixtures/live-screenshot-full.yaml
Minimal Example:
.. literalinclude::
/../../tests/wrappers/fixtures/live-screenshot-minimal.yaml
"""
live = XML.SubElement(
xml_parent,
'org.jenkinsci.plugins.livescreenshot.LiveScreenshotBuildWrapper')
live.set('plugin', 'livescreenshot')
mapping = [
('full-size', 'fullscreenFilename', 'screenshot.png'),
('thumbnail', 'thumbnailFilename', 'screenshot-thumb.png'),
]
convert_mapping_to_xml(live, data, mapping, fail_required=True)
def mask_passwords(registry, xml_parent, data):
"""yaml: mask-passwords
Hide passwords in the console log.
Requires the Jenkins :jenkins-wiki:`Mask Passwords Plugin
<Mask+Passwords+Plugin>`.
Example::
wrappers:
- mask-passwords
"""
XML.SubElement(xml_parent,
'com.michelin.cio.hudson.plugins.maskpasswords.'
'MaskPasswordsBuildWrapper')
def workspace_cleanup(registry, xml_parent, data):
"""yaml: workspace-cleanup (pre-build)
Requires the Jenkins :jenkins-wiki:`Workspace Cleanup Plugin
<Workspace+Cleanup+Plugin>`.
The post-build workspace-cleanup is available as a publisher.
:arg list include: list of files to be included
:arg list exclude: list of files to be excluded
:arg bool dirmatch: Apply pattern to directories too (default false)
:arg str check-parameter: boolean environment variable to check to
determine whether to actually clean up
:arg str external-deletion-command: external deletion command to run
against files and directories
Example:
.. literalinclude::
/../../tests/wrappers/fixtures/workspace-cleanup001.yaml
:language: yaml
"""
p = XML.SubElement(xml_parent,
'hudson.plugins.ws__cleanup.PreBuildCleanup')
p.set("plugin", "ws-cleanup")
if "include" in data or "exclude" in data:
patterns = XML.SubElement(p, 'patterns')
for inc in data.get("include", []):
ptrn = XML.SubElement(patterns, 'hudson.plugins.ws__cleanup.Pattern')
XML.SubElement(ptrn, 'pattern').text = inc
XML.SubElement(ptrn, 'type').text = "INCLUDE"
for exc in data.get("exclude", []):
ptrn = XML.SubElement(patterns, 'hudson.plugins.ws__cleanup.Pattern')
XML.SubElement(ptrn, 'pattern').text = exc
XML.SubElement(ptrn, 'type').text = "EXCLUDE"
deldirs = XML.SubElement(p, 'deleteDirs')
deldirs.text = str(data.get("dirmatch", False)).lower()
XML.SubElement(p, 'cleanupParameter').text = str(
data.get('check-parameter', ''))
XML.SubElement(p, 'externalDelete').text = str(
data.get('external-deletion-command', ''))
def m2_repository_cleanup(registry, xml_parent, data):
"""yaml: m2-repository-cleanup
Configure M2 Repository Cleanup
Requires the Jenkins :jenkins-wiki:`M2 Repository Cleanup
<M2+Repository+Cleanup+Plugin>`.
:arg list patterns: List of patterns for artifacts to cleanup before
building. (optional)
This plugin allows you to configure a maven2 job to clean some or all of
the artifacts from the repository before it runs.
Example:
.. literalinclude:: \
../../tests/wrappers/fixtures/m2-repository-cleanup001.yaml
"""
m2repo = XML.SubElement(
xml_parent,
'hudson.plugins.m2__repo__reaper.M2RepoReaperWrapper')
m2repo.set("plugin", "m2-repo-reaper")
patterns = data.get("patterns", [])
XML.SubElement(m2repo, 'artifactPatterns').text = ",".join(patterns)
p = XML.SubElement(m2repo, 'patterns')
for pattern in patterns:
XML.SubElement(p, 'string').text = pattern
def rvm_env(registry, xml_parent, data):
"""yaml: rvm-env
Set the RVM implementation
Requires the Jenkins :jenkins-wiki:`Rvm Plugin <RVM+Plugin>`.
:arg str implementation: Type of implementation. Syntax is RUBY[@GEMSET],
such as '1.9.3' or 'jruby@foo'.
Example::
wrappers:
- rvm-env:
implementation: 1.9.3
"""
rpo = XML.SubElement(xml_parent,
'ruby-proxy-object')
ro_class = "Jenkins::Plugin::Proxies::BuildWrapper"
ro = XML.SubElement(rpo,
'ruby-object',
{'ruby-class': ro_class,
'pluginid': 'rvm'})
o = XML.SubElement(ro,
'object',
{'ruby-class': 'RvmWrapper',
'pluginid': 'rvm'})
XML.SubElement(o,
'impl',
{'pluginid': 'rvm',
'ruby-class': 'String'}).text = data['implementation']
XML.SubElement(ro,
'pluginid',
{'pluginid': 'rvm',
'ruby-class': 'String'}).text = "rvm"
def rbenv(registry, xml_parent, data):
"""yaml: rbenv
Set the rbenv implementation.
Requires the Jenkins :jenkins-wiki:`rbenv plugin <rbenv+plugin>`.
All parameters are optional.
:arg str ruby-version: Version of Ruby to use (default 1.9.3-p484)
:arg bool ignore-local-version: If true, ignore local Ruby
version (defined in the ".ruby-version" file in workspace) even if it
has been defined (default false)
:arg str preinstall-gem-list: List of gems to install
(default 'bundler,rake')
:arg str rbenv-root: RBENV_ROOT (default $HOME/.rbenv)
:arg str rbenv-repo: Which repo to clone rbenv from
(default https://github.com/rbenv/rbenv)
:arg str rbenv-branch: Which branch to clone rbenv from (default master)
:arg str ruby-build-repo: Which repo to clone ruby-build from
(default https://github.com/rbenv/ruby-build)
:arg str ruby-build-branch: Which branch to clone ruby-build from
(default master)
Example:
.. literalinclude:: /../../tests/wrappers/fixtures/rbenv003.yaml
"""
mapping = [
# option, xml name, default value (text), attributes (hard coded)
("preinstall-gem-list", 'gem__list', 'bundler,rake'),
("rbenv-root", 'rbenv__root', '$HOME/.rbenv'),
("rbenv-repo", 'rbenv__repository',
'https://github.com/rbenv/rbenv'),
("rbenv-branch", 'rbenv__revision', 'master'),
("ruby-build-repo", 'ruby__build__repository',
'https://github.com/rbenv/ruby-build'),
("ruby-build-branch", 'ruby__build__revision', 'master'),
("ruby-version", 'version', '1.9.3-p484'),
]
rpo = XML.SubElement(xml_parent,
'ruby-proxy-object')
ro_class = "Jenkins::Tasks::BuildWrapperProxy"
ro = XML.SubElement(rpo,
'ruby-object',
{'ruby-class': ro_class,
'pluginid': 'rbenv'})
XML.SubElement(ro,
'pluginid',
{'pluginid': "rbenv",
'ruby-class': "String"}).text = "rbenv"
o = XML.SubElement(ro,
'object',
{'ruby-class': 'RbenvWrapper',
'pluginid': 'rbenv'})
for elem in mapping:
(optname, xmlname, val) = elem[:3]
xe = XML.SubElement(o,
xmlname,
{'ruby-class': "String",
'pluginid': "rbenv"})
if optname and optname in data:
val = data[optname]
if type(val) == bool:
xe.text = str(val).lower()
else:
xe.text = val
ignore_local_class = 'FalseClass'
if 'ignore-local-version' in data:
ignore_local_string = str(data['ignore-local-version']).lower()
if ignore_local_string == 'true':
ignore_local_class = 'TrueClass'
XML.SubElement(o,
'ignore__local__version',
{'ruby-class': ignore_local_class,
'pluginid': 'rbenv'})
def build_name(registry, xml_parent, data):
"""yaml: build-name
Set the name of the build
Requires the Jenkins :jenkins-wiki:`Build Name Setter Plugin
<Build+Name+Setter+Plugin>`.
:arg str name: Name for the build. Typically you would use a variable
from Jenkins in the name. The syntax would be ${FOO} for
the FOO variable.
Example::
wrappers:
- build-name:
name: Build-${FOO}
"""
bsetter = XML.SubElement(xml_parent,
'org.jenkinsci.plugins.buildnamesetter.'
'BuildNameSetter')
XML.SubElement(bsetter, 'template').text = data['name']
def port_allocator(registry, xml_parent, data):
"""yaml: port-allocator
Assign unique TCP port numbers
Requires the Jenkins :jenkins-wiki:`Port Allocator Plugin
<Port+Allocator+Plugin>`.
:arg str name: Deprecated, use names instead
:arg list names: Variable list of names of the port or list of
specific port numbers
Example:
.. literalinclude:: /../../tests/wrappers/fixtures/port-allocator002.yaml
"""
pa = XML.SubElement(xml_parent,
'org.jvnet.hudson.plugins.port__allocator.'
'PortAllocator')
ports = XML.SubElement(pa, 'ports')
names = data.get('names')
if not names:
logger = logging.getLogger(__name__)
logger.warning(
'port_allocator name is deprecated, use a names list '
' instead')
names = [data['name']]
for name in names:
dpt = XML.SubElement(ports,
'org.jvnet.hudson.plugins.port__allocator.'
'DefaultPortType')
XML.SubElement(dpt, 'name').text = name
def locks(registry, xml_parent, data):
"""yaml: locks
Control parallel execution of jobs.
Requires the Jenkins :jenkins-wiki:`Locks and Latches Plugin
<Locks+and+Latches+plugin>`.
:arg: list of locks to use
Example:
.. literalinclude:: /../../tests/wrappers/fixtures/locks002.yaml
:language: yaml
"""
locks = data
if locks:
lw = XML.SubElement(xml_parent,
'hudson.plugins.locksandlatches.LockWrapper')
locktop = XML.SubElement(lw, 'locks')
for lock in locks:
lockwrapper = XML.SubElement(locktop,
'hudson.plugins.locksandlatches.'
'LockWrapper_-LockWaitConfig')
XML.SubElement(lockwrapper, 'name').text = lock
def copy_to_slave(registry, xml_parent, data):
"""yaml: copy-to-slave
Copy files to slave before build
Requires the Jenkins :jenkins-wiki:`Copy To Slave Plugin
<Copy+To+Slave+Plugin>`.
:arg list includes: list of file patterns to copy (optional)
:arg list excludes: list of file patterns to exclude (optional)
:arg bool flatten: flatten directory structure (default false)
:arg str relative-to: base location of includes/excludes, must be home
($JENKINS_HOME), somewhereElse ($JENKINS_HOME/copyToSlave),
userContent ($JENKINS_HOME/userContent) or workspace
(default userContent)
:arg bool include-ant-excludes: exclude ant's default excludes
(default false)
Minimal Example:
.. literalinclude:: /../../tests/wrappers/fixtures/copy-to-slave001.yaml
:language: yaml
Full Example:
.. literalinclude:: /../../tests/wrappers/fixtures/copy-to-slave002.yaml
:language: yaml
"""
p = 'com.michelin.cio.hudson.plugins.copytoslave.CopyToSlaveBuildWrapper'
cs = XML.SubElement(xml_parent, p)
XML.SubElement(cs, 'includes').text = ','.join(data.get('includes', ['']))
XML.SubElement(cs, 'excludes').text = ','.join(data.get('excludes', ['']))
XML.SubElement(cs, 'flatten').text = \
str(data.get('flatten', False)).lower()
XML.SubElement(cs, 'includeAntExcludes').text = \
str(data.get('include-ant-excludes', False)).lower()
rel = str(data.get('relative-to', 'userContent'))
opt = ('home', 'somewhereElse', 'userContent', 'workspace')
if rel not in opt:
raise ValueError('relative-to must be one of %r' % opt)
XML.SubElement(cs, 'relativeTo').text = rel
# seems to always be false, can't find it in source code
XML.SubElement(cs, 'hudsonHomeRelative').text = 'false'
def inject(registry, xml_parent, data):
"""yaml: inject
Add or override environment variables to the whole build process
Requires the Jenkins :jenkins-wiki:`EnvInject Plugin <EnvInject+Plugin>`.
:arg str properties-file: path to the properties file (default '')
:arg str properties-content: key value pair of properties (default '')
:arg str script-file: path to the script file (default '')
:arg str script-content: contents of a script (default '')
Example::
wrappers:
- inject:
properties-file: /usr/local/foo
properties-content: PATH=/foo/bar
script-file: /usr/local/foo.sh
script-content: echo $PATH
"""
eib = XML.SubElement(xml_parent, 'EnvInjectBuildWrapper')
info = XML.SubElement(eib, 'info')
jenkins_jobs.modules.base.add_nonblank_xml_subelement(
info, 'propertiesFilePath', data.get('properties-file'))
jenkins_jobs.modules.base.add_nonblank_xml_subelement(
info, 'propertiesContent', data.get('properties-content'))
jenkins_jobs.modules.base.add_nonblank_xml_subelement(
info, 'scriptFilePath', data.get('script-file'))
jenkins_jobs.modules.base.add_nonblank_xml_subelement(
info, 'scriptContent', data.get('script-content'))
XML.SubElement(info, 'loadFilesFromMaster').text = 'false'
def inject_ownership_variables(registry, xml_parent, data):
"""yaml: inject-ownership-variables
Inject ownership variables to the build as environment variables.
Requires the Jenkins :jenkins-wiki:`EnvInject Plugin <EnvInject+Plugin>`
and Jenkins :jenkins-wiki:`Ownership plugin <Ownership+Plugin>`.
:arg bool job-variables: inject job ownership variables to the job
(default false)
:arg bool node-variables: inject node ownership variables to the job
(default false)
Example:
.. literalinclude:: /../../tests/wrappers/fixtures/ownership001.yaml
"""
ownership = XML.SubElement(xml_parent, 'com.synopsys.arc.jenkins.plugins.'
'ownership.wrappers.OwnershipBuildWrapper')
XML.SubElement(ownership, 'injectNodeOwnership').text = \
str(data.get('node-variables', False)).lower()
XML.SubElement(ownership, 'injectJobOwnership').text = \
str(data.get('job-variables', False)).lower()
def inject_passwords(registry, xml_parent, data):
"""yaml: inject-passwords
Inject passwords to the build as environment variables.
Requires the Jenkins :jenkins-wiki:`EnvInject Plugin <EnvInject+Plugin>`.
:arg bool global: inject global passwords to the job
:arg bool mask-password-params: mask password parameters
:arg list job-passwords: key value pair of job passwords
:Parameter: * **name** (`str`) Name of password
* **password** (`str`) Encrypted password
Example:
.. literalinclude:: /../../tests/wrappers/fixtures/passwords001.yaml
"""
eib = XML.SubElement(xml_parent, 'EnvInjectPasswordWrapper')
XML.SubElement(eib, 'injectGlobalPasswords').text = \
str(data.get('global', False)).lower()
XML.SubElement(eib, 'maskPasswordParameters').text = \
str(data.get('mask-password-params', False)).lower()
entries = XML.SubElement(eib, 'passwordEntries')
passwords = data.get('job-passwords', [])
if passwords:
for password in passwords:
entry = XML.SubElement(entries, 'EnvInjectPasswordEntry')
XML.SubElement(entry, 'name').text = password['name']
XML.SubElement(entry, 'value').text = password['password']
def env_file(registry, xml_parent, data):
"""yaml: env-file
Add or override environment variables to the whole build process
Requires the Jenkins :jenkins-wiki:`Environment File Plugin
<Envfile+Plugin>`.
:arg str properties-file: path to the properties file (default '')
Example::
wrappers:
- env-file:
properties-file: ${WORKSPACE}/foo
"""
eib = XML.SubElement(xml_parent,
'hudson.plugins.envfile.EnvFileBuildWrapper')
jenkins_jobs.modules.base.add_nonblank_xml_subelement(
eib, 'filePath', data.get('properties-file'))
def env_script(registry, xml_parent, data):
"""yaml: env-script
Add or override environment variables to the whole build process.
Requires the Jenkins :jenkins-wiki:`Environment Script Plugin
<Environment+Script+Plugin>`.
:arg script-content: The script to run (default '')
:arg str script-type: The script type.
:script-types supported:
* **unix-script** (default)
* **power-shell**
* **batch-script**
:arg only-run-on-parent: Only applicable for Matrix Jobs. If true, run only
on the matrix parent job (default false)
Example:
.. literalinclude:: /../../tests/wrappers/fixtures/env-script001.yaml
"""
el = XML.SubElement(xml_parent, 'com.lookout.jenkins.EnvironmentScript')
XML.SubElement(el, 'script').text = data.get('script-content', '')
valid_script_types = {
'unix-script': 'unixScript',
'power-shell': 'powerShell',
'batch-script': 'batchScript',
}
script_type = data.get('script-type', 'unix-script')
if script_type not in valid_script_types:
raise InvalidAttributeError('script-type', script_type,
valid_script_types)
XML.SubElement(el, 'scriptType').text = valid_script_types[script_type]
only_on_parent = str(data.get('only-run-on-parent', False)).lower()
XML.SubElement(el, 'onlyRunOnParent').text = only_on_parent
def jclouds(registry, xml_parent, data):
"""yaml: jclouds
Uses JClouds to provide slave launching on most of the currently
usable Cloud infrastructures.
Requires the Jenkins :jenkins-wiki:`JClouds Plugin <JClouds+Plugin>`.
:arg bool single-use: Whether or not to terminate the slave after use
(default false).
:arg list instances: The name of the jclouds template to create an
instance from, and its parameters.
:arg str cloud-name: The name of the jclouds profile containing the
specified template.
:arg int count: How many instances to create (default 1).
:arg bool stop-on-terminate: Whether or not to suspend instead of terminate
the instance (default false).
Example:
.. literalinclude:: /../../tests/wrappers/fixtures/jclouds001.yaml
:language: yaml
"""
if 'instances' in data:
buildWrapper = XML.SubElement(
xml_parent, 'jenkins.plugins.jclouds.compute.JCloudsBuildWrapper')
instances = XML.SubElement(buildWrapper, 'instancesToRun')
for foo in data['instances']:
for template, params in foo.items():
instance = XML.SubElement(instances,
'jenkins.plugins.jclouds.compute.'
'InstancesToRun')
XML.SubElement(instance, 'templateName').text = template
XML.SubElement(instance, 'cloudName').text = \
params.get('cloud-name', '')
XML.SubElement(instance, 'count').text = \
str(params.get('count', 1))
XML.SubElement(instance, 'suspendOrTerminate').text = \
str(params.get('stop-on-terminate', False)).lower()
if data.get('single-use'):
XML.SubElement(xml_parent,
'jenkins.plugins.jclouds.compute.'
'JCloudsOneOffSlave')
def openstack(registry, xml_parent, data):
"""yaml: openstack
Provision slaves from OpenStack on demand. Requires the Jenkins
:jenkins-wiki:`Openstack Cloud Plugin <Openstack+Cloud+Plugin>`.
:arg list instances: List of instances to be launched at the beginning of
the build.
:instances:
* **cloud-name** (`str`) -- The name of the cloud profile which
contains the specified cloud instance template (required).
* **template-name** (`str`) -- The name of the cloud instance
template to create an instance from(required).
* **manual-template** (`bool`) -- If True, instance template name
will be put in 'Specify Template Name as String' option. Not
specifying or specifying False, instance template name will be
put in 'Select Template from List' option. To use parameter
replacement, set this to True. (default false)
* **count** (`int`) -- How many instances to create (default 1).
:arg bool single-use: Whether or not to terminate the slave after use
(default false).
Example:
.. literalinclude:: /../../tests/wrappers/fixtures/openstack001.yaml
"""
tag_prefix = 'jenkins.plugins.openstack.compute.'
if 'instances' in data:
clouds_build_wrapper = XML.SubElement(
xml_parent, tag_prefix + 'JCloudsBuildWrapper')
instances_wrapper = XML.SubElement(
clouds_build_wrapper, 'instancesToRun')
for instance in data['instances']:
instances_to_run = XML.SubElement(
instances_wrapper, tag_prefix + 'InstancesToRun')
try:
cloud_name = instance['cloud-name']
template_name = instance['template-name']
except KeyError as exception:
raise MissingAttributeError(exception.args[0])
XML.SubElement(instances_to_run, 'cloudName').text = cloud_name
if instance.get('manual-template', False):
XML.SubElement(instances_to_run,
'manualTemplateName').text = template_name
else:
XML.SubElement(instances_to_run,
'templateName').text = template_name
XML.SubElement(instances_to_run, 'count').text = str(
instance.get('count', 1))
if data.get('single-use', False):
XML.SubElement(xml_parent, tag_prefix + 'JCloudsOneOffSlave')
def build_user_vars(registry, xml_parent, data):
"""yaml: build-user-vars
Set environment variables to the value of the user that started the build.
Requires the Jenkins :jenkins-wiki:`Build User Vars Plugin
<Build+User+Vars+Plugin>`.
Example::
wrappers:
- build-user-vars
"""
XML.SubElement(xml_parent, 'org.jenkinsci.plugins.builduser.BuildUser')
def release(registry, xml_parent, data):
"""yaml: release
Add release build configuration
Requires the Jenkins :jenkins-wiki:`Release Plugin <Release+Plugin>`.
:arg bool keep-forever: Keep build forever (default true)
:arg bool override-build-parameters: Enable build-parameter override
(default false)
:arg string version-template: Release version template (default '')
:arg list parameters: Release parameters (see the :ref:`Parameters` module)
:arg list pre-build: Pre-build steps (see the :ref:`Builders` module)
:arg list post-build: Post-build steps (see :ref:`Builders`)
:arg list post-success: Post successful-build steps (see :ref:`Builders`)
:arg list post-failed: Post failed-build steps (see :ref:`Builders`)
Example:
.. literalinclude:: /../../tests/wrappers/fixtures/release001.yaml
"""
relwrap = XML.SubElement(xml_parent,
'hudson.plugins.release.ReleaseWrapper')
# For 'keep-forever', the sense of the XML flag is the opposite of
# the YAML flag.
no_keep_forever = 'false'
if str(data.get('keep-forever', True)).lower() == 'false':
no_keep_forever = 'true'
XML.SubElement(relwrap, 'doNotKeepLog').text = no_keep_forever
XML.SubElement(relwrap, 'overrideBuildParameters').text = str(
data.get('override-build-parameters', False)).lower()
XML.SubElement(relwrap, 'releaseVersionTemplate').text = data.get(
'version-template', '')
parameters = data.get('parameters', [])
if parameters:
pdef = XML.SubElement(relwrap, 'parameterDefinitions')
for param in parameters:
registry.dispatch('parameter', pdef, param)
builder_steps = {
'pre-build': 'preBuildSteps',
'post-build': 'postBuildSteps',
'post-success': 'postSuccessfulBuildSteps',
'post-fail': 'postFailedBuildSteps',
}
for step in builder_steps.keys():
for builder in data.get(step, []):
registry.dispatch('builder',
XML.SubElement(relwrap, builder_steps[step]),
builder)
def sauce_ondemand(registry, xml_parent, data):
"""yaml: sauce-ondemand
Allows you to integrate Sauce OnDemand with Jenkins. You can
automate the setup and tear down of Sauce Connect and integrate
the Sauce OnDemand results videos per test. Requires the Jenkins
:jenkins-wiki:`Sauce OnDemand Plugin <Sauce+OnDemand+Plugin>`.
:arg bool enable-sauce-connect: launches a SSH tunnel from their cloud
to your private network (default false)
:arg str sauce-host: The name of the selenium host to be used. For
tests run using Sauce Connect, this should be localhost.
ondemand.saucelabs.com can also be used to conenct directly to
Sauce OnDemand, The value of the host will be stored in the
SAUCE_ONDEMAND_HOST environment variable. (default '')
:arg str sauce-port: The name of the Selenium Port to be used. For
tests run using Sauce Connect, this should be 4445. If using
ondemand.saucelabs.com for the Selenium Host, then use 4444.
The value of the port will be stored in the SAUCE_ONDEMAND_PORT
environment variable. (default '')
:arg str override-username: If set then api-access-key must be set.
Overrides the username from the global config. (default '')
:arg str override-api-access-key: If set then username must be set.
Overrides the api-access-key set in the global config. (default '')
:arg str starting-url: The value set here will be stored in the
SELENIUM_STARTING_ULR environment variable. Only used when type
is selenium. (default '')
:arg str type: Type of test to run (default selenium)
:type values:
* **selenium**
* **webdriver**
:arg list platforms: The platforms to run the tests on. Platforms
supported are dynamically retrieved from sauce labs. The format of
the values has only the first letter capitalized, no spaces, underscore
between os and version, underscore in internet_explorer, everything
else is run together. If there are not multiple version of the browser
then just the first version number is used.
Examples: Mac_10.8iphone5.1 or Windows_2003firefox10
or Windows_2012internet_explorer10 (default '')
:arg bool launch-sauce-connect-on-slave: Whether to launch sauce connect
on the slave. (default false)
:arg str https-protocol: The https protocol to use (default '')
:arg str sauce-connect-options: Options to pass to sauce connect
(default '')
Example::
wrappers:
- sauce-ondemand:
enable-sauce-connect: true
sauce-host: foo
sauce-port: 8080
override-username: foo
override-api-access-key: 123lkj123kh123l;k12323
type: webdriver
platforms:
- Linuxandroid4
- Linuxfirefox10
- Linuxfirefox11
launch-sauce-connect-on-slave: true
"""
sauce = XML.SubElement(xml_parent, 'hudson.plugins.sauce__ondemand.'
'SauceOnDemandBuildWrapper')
XML.SubElement(sauce, 'enableSauceConnect').text = str(data.get(
'enable-sauce-connect', False)).lower()
host = data.get('sauce-host', '')
XML.SubElement(sauce, 'seleniumHost').text = host
port = data.get('sauce-port', '')
XML.SubElement(sauce, 'seleniumPort').text = port
# Optional override global authentication
username = data.get('override-username')
key = data.get('override-api-access-key')
if username and key:
cred = XML.SubElement(sauce, 'credentials')
XML.SubElement(cred, 'username').text = username
XML.SubElement(cred, 'apiKey').text = key
atype = data.get('type', 'selenium')
info = XML.SubElement(sauce, 'seleniumInformation')
if atype == 'selenium':
url = data.get('starting-url', '')
XML.SubElement(info, 'startingURL').text = url
browsers = XML.SubElement(info, 'seleniumBrowsers')
for platform in data['platforms']:
XML.SubElement(browsers, 'string').text = platform
XML.SubElement(info, 'isWebDriver').text = 'false'
XML.SubElement(sauce, 'seleniumBrowsers',
{'reference': '../seleniumInformation/'
'seleniumBrowsers'})
if atype == 'webdriver':
browsers = XML.SubElement(info, 'webDriverBrowsers')
for platform in data['platforms']:
XML.SubElement(browsers, 'string').text = platform
XML.SubElement(info, 'isWebDriver').text = 'true'
XML.SubElement(sauce, 'webDriverBrowsers',
{'reference': '../seleniumInformation/'
'webDriverBrowsers'})
XML.SubElement(sauce, 'launchSauceConnectOnSlave').text = str(data.get(
'launch-sauce-connect-on-slave', False)).lower()
protocol = data.get('https-protocol', '')
XML.SubElement(sauce, 'httpsProtocol').text = protocol
options = data.get('sauce-connect-options', '')
XML.SubElement(sauce, 'options').text = options
def sonar(registry, xml_parent, data):
"""yaml: sonar
Wrapper for SonarQube Plugin
Requires :jenkins-wiki:`SonarQube plugin <SonarQube+plugin>`
:arg str install-name: Release goals and options (default '')
Minimal Example:
.. literalinclude:: /../../tests/wrappers/fixtures/sonar-minimal.yaml
:language: yaml
Full Example:
.. literalinclude:: /../../tests/wrappers/fixtures/sonar-full.yaml
:language: yaml
"""
sonar = XML.SubElement(
xml_parent, 'hudson.plugins.sonar.SonarBuildWrapper')
sonar.set('plugin', 'sonar')
if data.get('install-name'):
mapping = [
('install-name', 'installationName', ''),
]
convert_mapping_to_xml(sonar, data, mapping, fail_required=True)
def pathignore(registry, xml_parent, data):
"""yaml: pathignore
This plugin allows SCM-triggered jobs to ignore
build requests if only certain paths have changed.
Requires the Jenkins :jenkins-wiki:`Pathignore Plugin <Pathignore+Plugin>`.
:arg str ignored: A set of patterns to define ignored changes
Example::
wrappers:
- pathignore:
ignored: "docs, tests"
"""
ruby = XML.SubElement(xml_parent, 'ruby-proxy-object')
robj = XML.SubElement(ruby, 'ruby-object', attrib={
'pluginid': 'pathignore',
'ruby-class': 'Jenkins::Plugin::Proxies::BuildWrapper'
})
pluginid = XML.SubElement(robj, 'pluginid', {
'pluginid': 'pathignore', 'ruby-class': 'String'
})
pluginid.text = 'pathignore'
obj = XML.SubElement(robj, 'object', {
'ruby-class': 'PathignoreWrapper', 'pluginid': 'pathignore'
})
ignored = XML.SubElement(obj, 'ignored__paths', {
'pluginid': 'pathignore', 'ruby-class': 'String'
})
ignored.text = data.get('ignored', '')
XML.SubElement(obj, 'invert__ignore', {
'ruby-class': 'FalseClass', 'pluginid': 'pathignore'
})
def pre_scm_buildstep(registry, xml_parent, data):
"""yaml: pre-scm-buildstep
Execute a Build Step before running the SCM
Requires the Jenkins :jenkins-wiki:`pre-scm-buildstep <pre-scm-buildstep>`.
:arg list buildsteps: List of build steps to execute
:Buildstep: Any acceptable builder, as seen in the example
Example::
wrappers:
- pre-scm-buildstep:
- shell: |
#!/bin/bash
echo "Doing somethiung cool"
- shell: |
#!/bin/zsh
echo "Doing somethin cool with zsh"
- ant: "target1 target2"
ant-name: "Standard Ant"
- inject:
properties-file: example.prop
properties-content: EXAMPLE=foo-bar
"""
bsp = XML.SubElement(xml_parent,
'org.jenkinsci.plugins.preSCMbuildstep.'
'PreSCMBuildStepsWrapper')
bs = XML.SubElement(bsp, 'buildSteps')
for step in data:
for edited_node in create_builders(registry, step):
bs.append(edited_node)
def logstash(registry, xml_parent, data):
"""yaml: logstash build wrapper
Dump the Jenkins console output to Logstash
Requires the Jenkins :jenkins-wiki:`logstash plugin <Logstash+Plugin>`.
:arg use-redis: Boolean to use Redis. (default true)
:arg redis: Redis config params
:Parameter: * **host** (`str`) Redis hostname\
(default 'localhost')
:Parameter: * **port** (`int`) Redis port number (default 6397)
:Parameter: * **database-number** (`int`)\
Redis database number (default 0)
:Parameter: * **database-password** (`str`)\
Redis database password (default '')
:Parameter: * **data-type** (`str`)\
Redis database type (default 'list')
:Parameter: * **key** (`str`) Redis key (default 'logstash')
Example:
.. literalinclude:: /../../tests/wrappers/fixtures/logstash001.yaml
"""
logstash = XML.SubElement(xml_parent,
'jenkins.plugins.logstash.'
'LogstashBuildWrapper')
logstash.set('plugin', '[email protected]')
redis_bool = XML.SubElement(logstash, 'useRedis')
redis_bool.text = str(data.get('use-redis', True)).lower()
if data.get('use-redis'):
redis_config = data.get('redis', {})
redis_sub_element = XML.SubElement(logstash, 'redis')
host_sub_element = XML.SubElement(redis_sub_element, 'host')
host_sub_element.text = str(
redis_config.get('host', 'localhost'))
port_sub_element = XML.SubElement(redis_sub_element, 'port')
port_sub_element.text = str(redis_config.get('port', '6379'))
database_numb_sub_element = XML.SubElement(redis_sub_element, 'numb')
database_numb_sub_element.text = \
str(redis_config.get('database-number', '0'))
database_pass_sub_element = XML.SubElement(redis_sub_element, 'pass')
database_pass_sub_element.text = \
str(redis_config.get('database-password', ''))
data_type_sub_element = XML.SubElement(redis_sub_element, 'dataType')
data_type_sub_element.text = \
str(redis_config.get('data-type', 'list'))
key_sub_element = XML.SubElement(redis_sub_element, 'key')
key_sub_element.text = str(redis_config.get('key', 'logstash'))
def mongo_db(registry, xml_parent, data):
"""yaml: mongo-db build wrapper
Initalizes a MongoDB database while running the build.
Requires the Jenkins :jenkins-wiki:`MongoDB plugin <MongoDB+Plugin>`.
:arg str name: The name of the MongoDB install to use (required)
:arg str data-directory: Data directory for the server (default '')
:arg int port: Port for the server (default '')
:arg str startup-params: Startup parameters for the server (default '')
:arg int start-timeout: How long to wait for the server to start in
milliseconds. 0 means no timeout. (default 0)
Full Example:
.. literalinclude:: /../../tests/wrappers/fixtures/mongo-db-full.yaml
Minimal Example:
.. literalinclude:: /../../tests/wrappers/fixtures/mongo-db-minimal.yaml
"""
mongodb = XML.SubElement(xml_parent,
'org.jenkinsci.plugins.mongodb.'
'MongoBuildWrapper')
mongodb.set('plugin', 'mongodb')
mapping = [
('name', 'mongodbName', None),
('port', 'port', ''),
('data-directory', 'dbpath', ''),
('startup-params', 'parameters', ''),
('start-timeout', 'startTimeout', 0),
]
convert_mapping_to_xml(mongodb, data, mapping, fail_required=True)
def delivery_pipeline(registry, xml_parent, data):
"""yaml: delivery-pipeline
If enabled the job will create a version based on the template.
The version will be set to the environment variable PIPELINE_VERSION and
will also be set in the downstream jobs.
Requires the Jenkins :jenkins-wiki:`Delivery Pipeline Plugin
<Delivery+Pipeline+Plugin>`.
:arg str version-template: Template for generated version e.g
1.0.${BUILD_NUMBER} (default '')
:arg bool set-display-name: Set the generated version as the display name
for the build (default false)
Minimal Example:
.. literalinclude::
/../../tests/wrappers/fixtures/delivery-pipeline-minimal.yaml
:language: yaml
Full Example:
.. literalinclude::
/../../tests/wrappers/fixtures/delivery-pipeline-full.yaml
:language: yaml
"""
pvc = XML.SubElement(
xml_parent, 'se.diabol.jenkins.pipeline.PipelineVersionContributor')
pvc.set('plugin', 'delivery-pipeline-plugin')
mapping = [
('version-template', 'versionTemplate', ''),
('set-display-name', 'updateDisplayName', False),
]
convert_mapping_to_xml(pvc, data, mapping, fail_required=True)
def matrix_tie_parent(registry, xml_parent, data):
"""yaml: matrix-tie-parent
Tie parent to a node.
Requires the Jenkins :jenkins-wiki:`Matrix Tie Parent Plugin
<Matrix+Tie+Parent+Plugin>`.
Note that from Jenkins version 1.532 this plugin's functionality is
available under the "advanced" option of the matrix project configuration.
You can use the top level ``node`` parameter to control where the parent
job is tied in Jenkins 1.532 and higher.
:arg str node: Name of the node.
Example:
.. literalinclude:: /../../tests/wrappers/fixtures/matrix-tie-parent.yaml
"""
mtp = XML.SubElement(xml_parent, 'matrixtieparent.BuildWrapperMtp')
XML.SubElement(mtp, 'labelName').text = data['node']
def exclusion(registry, xml_parent, data):
"""yaml: exclusion
Add a resource to use for critical sections to establish a mutex on. If
another job specifies the same resource, the second job will wait for the
blocked resource to become available.
Requires the Jenkins :jenkins-wiki:`Exclusion Plugin <Exclusion-Plugin>`.
:arg list resources: List of resources to add for exclusion
Example:
.. literalinclude:: /../../tests/wrappers/fixtures/exclusion002.yaml
"""
exl = XML.SubElement(xml_parent,
'org.jvnet.hudson.plugins.exclusion.IdAllocator')
exl.set('plugin', 'Exclusion')
ids = XML.SubElement(exl, 'ids')
resources = data.get('resources', [])
for resource in resources:
dit = \
XML.SubElement(ids,
'org.jvnet.hudson.plugins.exclusion.DefaultIdType')
XML.SubElement(dit, 'name').text = str(resource).upper()
def ssh_agent_credentials(registry, xml_parent, data):
"""yaml: ssh-agent-credentials
Sets up the user for the ssh agent plugin for jenkins.
Requires the Jenkins :jenkins-wiki:`SSH-Agent Plugin <SSH+Agent+Plugin>`.
:arg list users: A list of Jenkins users credential IDs (required)
:arg str user: The user id of the jenkins user credentials (deprecated)
Example:
.. literalinclude::
/../../tests/wrappers/fixtures/ssh-agent-credentials002.yaml
if both **users** and **user** parameters specified, **users** will be
prefered, **user** will be ignored.
Example:
.. literalinclude::
/../../tests/wrappers/fixtures/ssh-agent-credentials003.yaml
The **users** with one value in list equals to the **user**. In this
case old style XML will be generated. Use this format if you use
SSH-Agent plugin < 1.5.
Example:
.. literalinclude::
/../../tests/wrappers/fixtures/ssh-agent-credentials004.yaml
equals to:
.. literalinclude::
/../../tests/wrappers/fixtures/ssh-agent-credentials001.yaml
"""
logger = logging.getLogger(__name__)
entry_xml = XML.SubElement(
xml_parent,
'com.cloudbees.jenkins.plugins.sshagent.SSHAgentBuildWrapper')
xml_key = 'user'
user_list = list()
if 'users' in data:
user_list += data['users']
if len(user_list) > 1:
entry_xml = XML.SubElement(entry_xml, 'credentialIds')
xml_key = 'string'
if 'user' in data:
logger.warning(
"Both 'users' and 'user' parameters specified for "
"ssh-agent-credentials. 'users' is used, 'user' is "
"ignored.")
elif 'user' in data:
logger.warning("The 'user' param has been deprecated, "
"use the 'users' param instead.")
user_list.append(data['user'])
else:
raise JenkinsJobsException("Missing 'user' or 'users' parameter "
"for ssh-agent-credentials")
for user in user_list:
XML.SubElement(entry_xml, xml_key).text = user
def credentials_binding(registry, xml_parent, data):
"""yaml: credentials-binding
Binds credentials to environment variables using the credentials binding
plugin for jenkins.
Requires the Jenkins :jenkins-wiki:`Credentials Binding Plugin
<Credentials+Binding+Plugin>` version 1.1 or greater.
:arg list binding-type: List of each bindings to create. Bindings may be
of type `zip-file`, `file`, `username-password`, `text`,
`username-password-separated` or `amazon-web-services`.
username-password sets a variable to the username and password given in
the credentials, separated by a colon.
username-password-separated sets one variable to the username and one
variable to the password given in the credentials.
amazon-web-services sets one variable to the access key and one
variable to the secret access key. Requires the
:jenkins-wiki:`AWS Credentials Plugin <CloudBees+AWS+Credentials+Plugin>`
.
:Parameters: * **credential-id** (`str`) UUID of the credential being
referenced
* **variable** (`str`) Environment variable where the
credential will be stored
* **username** (`str`) Environment variable for the
username (Required for binding-type
username-password-separated)
* **password** (`str`) Environment variable for the
password (Required for binding-type
username-password-separated)
* **access-key** (`str`) Environment variable for the
access key (Required for binding-type
amazon-web-services)
* **secret-key** (`str`) Environment variable for the
access secret key (Required for binding-type
amazon-web-services)
Example:
.. literalinclude::
/../../tests/wrappers/fixtures/credentials_binding.yaml
:language: yaml
"""
entry_xml = xml_parent.find(
'org.jenkinsci.plugins.credentialsbinding.impl.SecretBuildWrapper')
if entry_xml is None:
entry_xml = XML.SubElement(
xml_parent,
'org.jenkinsci.plugins.credentialsbinding.impl.SecretBuildWrapper')
bindings_xml = entry_xml.find('bindings')
if bindings_xml is None:
bindings_xml = XML.SubElement(entry_xml, 'bindings')
binding_types = {
'zip-file': 'org.jenkinsci.plugins.credentialsbinding.impl.'
'ZipFileBinding',
'file': 'org.jenkinsci.plugins.credentialsbinding.impl.FileBinding',
'username-password': 'org.jenkinsci.plugins.credentialsbinding.impl.'
'UsernamePasswordBinding',
'username-password-separated': 'org.jenkinsci.plugins.'
'credentialsbinding.impl.'
'UsernamePasswordMultiBinding',
'text': 'org.jenkinsci.plugins.credentialsbinding.impl.StringBinding',
'amazon-web-services':
'com.cloudbees.jenkins.plugins.awscredentials'
'.AmazonWebServicesCredentialsBinding'
}
if not data:
raise JenkinsJobsException('At least one binding-type must be '
'specified for the credentials-binding '
'element')
for binding in data:
for binding_type, params in binding.items():
if binding_type not in binding_types.keys():
raise JenkinsJobsException('binding-type must be one of %r' %
binding_types.keys())
binding_xml = XML.SubElement(bindings_xml,
binding_types[binding_type])
if binding_type == 'username-password-separated':
try:
XML.SubElement(binding_xml, 'usernameVariable'
).text = params['username']
XML.SubElement(binding_xml, 'passwordVariable'
).text = params['password']
except KeyError as e:
raise MissingAttributeError(e.args[0])
elif binding_type == 'amazon-web-services':
try:
XML.SubElement(binding_xml, 'accessKeyVariable'
).text = params['access-key']
XML.SubElement(binding_xml, 'secretKeyVariable'
).text = params['secret-key']
except KeyError as e:
raise MissingAttributeError(e.args[0])
else:
variable_xml = XML.SubElement(binding_xml, 'variable')
variable_xml.text = params.get('variable')
credential_xml = XML.SubElement(binding_xml, 'credentialsId')
credential_xml.text = params.get('credential-id')
def custom_tools(registry, xml_parent, data):
"""yaml: custom-tools
Requires the Jenkins :jenkins-wiki:`Custom Tools Plugin
<Custom+Tools+Plugin>`.
:arg list tools: List of custom tools to add
(optional)
:arg bool skip-master-install: skips the install in top level matrix job
(default 'false')
:arg bool convert-homes-to-upper: Converts the home env vars to uppercase
(default 'false')
Example:
.. literalinclude:: /../../tests/wrappers/fixtures/custom-tools001.yaml
"""
base = 'com.cloudbees.jenkins.plugins.customtools'
wrapper = XML.SubElement(xml_parent,
base + ".CustomToolInstallWrapper")
wrapper_tools = XML.SubElement(wrapper, 'selectedTools')
tools = data.get('tools', [])
tool_node = base + '.CustomToolInstallWrapper_-SelectedTool'
for tool in tools:
tool_wrapper = XML.SubElement(wrapper_tools, tool_node)
XML.SubElement(tool_wrapper, 'name').text = str(tool)
opts = XML.SubElement(wrapper,
'multiconfigOptions')
skip_install = str(data.get('skip-master-install', 'false'))
XML.SubElement(opts,
'skipMasterInstallation').text = skip_install
convert_home = str(data.get('convert-homes-to-upper', 'false'))
XML.SubElement(wrapper,
'convertHomesToUppercase').text = convert_home
def nodejs_installator(registry, xml_parent, data):
"""yaml: nodejs-installator
Requires the Jenkins :jenkins-wiki:`NodeJS Plugin
<NodeJS+Plugin>`.
:arg str name: nodejs installation name
Example:
.. literalinclude::
/../../tests/wrappers/fixtures/nodejs-installator001.yaml
"""
npm_node = XML.SubElement(xml_parent,
'jenkins.plugins.nodejs.tools.'
'NpmPackagesBuildWrapper')
try:
XML.SubElement(npm_node, 'nodeJSInstallationName').text = data['name']
except KeyError as e:
raise MissingAttributeError(e.args[0])
def xvnc(registry, xml_parent, data):
"""yaml: xvnc
Enable xvnc during the build.
Requires the Jenkins :jenkins-wiki:`xvnc plugin <Xvnc+Plugin>`.
:arg bool screenshot: Take screenshot upon build completion (default false)
:arg bool xauthority: Create a dedicated Xauthority file per build (default
true)
Full Example:
.. literalinclude:: /../../tests/wrappers/fixtures/xvnc-full.yaml
:language: yaml
Minimal Example:
.. literalinclude:: /../../tests/wrappers/fixtures/xvnc-minimal.yaml
:language: yaml
"""
xwrapper = XML.SubElement(xml_parent,
'hudson.plugins.xvnc.Xvnc')
xwrapper.set('plugin', 'xvnc')
mapping = [
('screenshot', 'takeScreenshot', False),
('xauthority', 'useXauthority', True),
]
convert_mapping_to_xml(xwrapper, data, mapping, fail_required=True)
def job_log_logger(registry, xml_parent, data):
"""yaml: job-log-logger
Enable writing the job log to the underlying logging system.
Requires the Jenkins :jenkins-wiki:`Job Log Logger plugin
<Job+Log+Logger+Plugin>`.
:arg bool suppress-empty: Suppress empty log messages
(default true)
Example:
.. literalinclude:: /../../tests/wrappers/fixtures/job-log-logger001.yaml
"""
top = XML.SubElement(xml_parent,
'org.jenkins.ci.plugins.jobloglogger.'
'JobLogLoggerBuildWrapper')
XML.SubElement(top, 'suppressEmpty').text = str(
data.get('suppress-empty', True)).lower()
def xvfb(registry, xml_parent, data):
"""yaml: xvfb
Enable xvfb during the build.
Requires the Jenkins :jenkins-wiki:`Xvfb Plugin <Xvfb+Plugin>`.
:arg str installation-name: The name of the Xvfb tool instalation (default
'default')
:arg bool auto-display-name: Uses the -displayfd option of Xvfb by which it
chooses it's own display name (default false)
:arg str display-name: Ordinal of the display Xvfb will be running on, if
left empty choosen based on current build executor number (default '')
:arg str assigned-labels: If you want to start Xvfb only on specific nodes
specify its name or label (default '')
:arg bool parallel-build: When running multiple Jenkins nodes on the same
machine this setting influences the display number generation (default
false)
:arg int timeout: A timeout of given seconds to wait before returning
control to the job (default 0)
:arg str screen: Resolution and color depth. (default '1024x768x24')
:arg int display-name-offset: Offset for display names. (default 1)
:arg str additional-options: Additional options to be added with the
options above to the Xvfb command line (default '')
:arg bool debug: If Xvfb output should appear in console log of this job
(default false)
:arg bool shutdown-with-build: Should the display be kept until the whole
job ends (default false)
Full Example:
.. literalinclude:: /../../tests/wrappers/fixtures/xvfb-full.yaml
:language: yaml
Minimal Example:
.. literalinclude:: /../../tests/wrappers/fixtures/xvfb-minimal.yaml
:language: yaml
"""
xwrapper = XML.SubElement(xml_parent,
'org.jenkinsci.plugins.xvfb.XvfbBuildWrapper')
mapping = [
('installation-name', 'installationName', 'default'),
('auto-display-name', 'autoDisplayName', False),
('display-name', 'displayName', ''),
('assigned-labels', 'assignedLabels', ''),
('parallel-build', 'parallelBuild', False),
('timeout', 'timeout', 0),
('screen', 'screen', '1024x768x24'),
('display-name-offset', 'displayNameOffset', 1),
('additional-options', 'additionalOptions', ''),
('debug', 'debug', False),
('shutdown-with-build', 'shutdownWithBuild', False),
]
convert_mapping_to_xml(xwrapper, data, mapping, fail_required=True)
def android_emulator(registry, xml_parent, data):
"""yaml: android-emulator
Automates many Android development tasks including SDK installation,
build file generation, emulator creation and launch,
APK (un)installation...
Requires the Jenkins :jenkins-wiki:`Android Emulator Plugin
<Android+Emulator+Plugin>`.
:arg str avd: Enter the name of an existing Android emulator configuration.
If this is exclusive with the 'os' arg.
:arg str os: Can be an OS version, target name or SDK add-on
:arg str screen-density: Density in dots-per-inch (dpi) or as an alias,
e.g. "160" or "mdpi". (default mdpi)
:arg str screen-resolution: Can be either a named resolution or explicit
size, e.g. "WVGA" or "480x800". (default WVGA)
:arg str locale: Language and country pair. (default en_US)
:arg str target-abi: Name of the ABI / system image to be used. (optional)
:arg str sd-card: sd-card size e.g. "32M" or "10240K". (optional)
:arg bool wipe: if true, the emulator will have its user data reset at
start-up (default false)
:arg bool show-window: if true, the Android emulator user interface will
be displayed on screen during the build. (default false)
:arg bool snapshot: Start emulator from stored state (default false)
:arg bool delete: Delete Android emulator at the end of build
(default false)
:arg int startup-delay: Wait this many seconds before attempting
to start the emulator (default 0)
:arg str commandline-options: Will be given when starting the
Android emulator executable (optional)
:arg str exe: The emulator executable. (optional)
:arg list hardware-properties: Dictionary of hardware properties. Allows
you to override the default values for an AVD. (optional)
Example:
.. literalinclude:: /../../tests/wrappers/fixtures/android003.yaml
"""
root = XML.SubElement(xml_parent,
'hudson.plugins.android__emulator.AndroidEmulator')
if data.get('avd') and data.get('os'):
raise JenkinsJobsException("'avd' and 'os' options are "
"exclusive, please pick one only")
if not data.get('avd') and not data.get('os'):
raise JenkinsJobsException("AndroidEmulator requires an AVD name or"
"OS version to run: specify 'os' or 'avd'")
if data.get('avd'):
XML.SubElement(root, 'avdName').text = str(data['avd'])
if data.get('os'):
XML.SubElement(root, 'osVersion').text = str(data['os'])
XML.SubElement(root, 'screenDensity').text = str(
data.get('screen-density', 'mdpi'))
XML.SubElement(root, 'screenResolution').text = str(
data.get('screen-resolution', 'WVGA'))
XML.SubElement(root, 'deviceLocale').text = str(
data.get('locale', 'en_US'))
XML.SubElement(root, 'targetAbi').text = str(
data.get('target-abi', ''))
XML.SubElement(root, 'sdCardSize').text = str(data.get('sd-card', ''))
hardware = XML.SubElement(root, 'hardwareProperties')
for prop_name, prop_val in data.get('hardware-properties', {}).items():
prop_node = XML.SubElement(hardware,
'hudson.plugins.android__emulator'
'.AndroidEmulator_-HardwareProperty')
XML.SubElement(prop_node, 'key').text = str(prop_name)
XML.SubElement(prop_node, 'value').text = str(prop_val)
XML.SubElement(root, 'wipeData').text = str(
data.get('wipe', False)).lower()
XML.SubElement(root, 'showWindow').text = str(
data.get('show-window', False)).lower()
XML.SubElement(root, 'useSnapshots').text = str(
data.get('snapshot', False)).lower()
XML.SubElement(root, 'deleteAfterBuild').text = str(
data.get('delete', False)).lower()
XML.SubElement(root, 'startupDelay').text = str(
data.get('startup-delay', 0))
XML.SubElement(root, 'commandLineOptions').text = str(
data.get('commandline-options', ''))
XML.SubElement(root, 'executable').text = str(data.get('exe', ''))
def artifactory_maven(registry, xml_parent, data):
"""yaml: artifactory-maven
Wrapper for non-Maven projects. Requires the
:jenkins-wiki:`Artifactory Plugin <Artifactory+Plugin>`
:arg str url: URL of the Artifactory server. e.g.
https://www.jfrog.com/artifactory/ (default '')
:arg str name: Artifactory user with permissions use for
connected to the selected Artifactory Server
(default '')
:arg str repo-key: Name of the repository to search for
artifact dependencies. Provide a single repo-key or provide
separate release-repo-key and snapshot-repo-key.
:arg str release-repo-key: Release repository name. Value of
repo-key take priority over release-repo-key if provided.
:arg str snapshot-repo-key: Snapshots repository name. Value of
repo-key take priority over release-repo-key if provided.
Example:
.. literalinclude:: /../../tests/wrappers/fixtures/artifactory001.yaml
:language: yaml
"""
artifactory = XML.SubElement(
xml_parent,
'org.jfrog.hudson.maven3.ArtifactoryMaven3NativeConfigurator')
# details
details = XML.SubElement(artifactory, 'details')
artifactory_common_details(details, data)
if 'repo-key' in data:
XML.SubElement(
details, 'downloadRepositoryKey').text = data['repo-key']
else:
XML.SubElement(
details, 'downloadSnapshotRepositoryKey').text = data.get(
'snapshot-repo-key', '')
XML.SubElement(
details, 'downloadReleaseRepositoryKey').text = data.get(
'release-repo-key', '')
def artifactory_generic(registry, xml_parent, data):
"""yaml: artifactory-generic
Wrapper for non-Maven projects. Requires the
:jenkins-wiki:`Artifactory Plugin <Artifactory+Plugin>`
:arg str url: URL of the Artifactory server. e.g.
https://www.jfrog.com/artifactory/ (default '')
:arg str name: Artifactory user with permissions use for
connected to the selected Artifactory Server
(default '')
:arg str repo-key: Release repository name (plugin < 2.3.0) (default '')
:arg str snapshot-repo-key: Snapshots repository name (plugin < 2.3.0)
(default '')
:arg str key-from-select: Repository key to use (plugin >= 2.3.0)
(default '')
:arg str key-from-text: Repository key to use that can be configured
dynamically using Jenkins variables (plugin >= 2.3.0) (default '')
:arg list deploy-pattern: List of patterns for mappings
build artifacts to published artifacts. Supports Ant-style wildcards
mapping to target directories. E.g.: */*.zip=>dir (default [])
:arg list resolve-pattern: List of references to other
artifacts that this build should use as dependencies.
:arg list matrix-params: List of properties to attach to all deployed
artifacts in addition to the default ones: build.name, build.number,
and vcs.revision (default [])
:arg bool deploy-build-info: Deploy jenkins build metadata with
artifacts to Artifactory (default false)
:arg bool env-vars-include: Include environment variables accessible by
the build process. Jenkins-specific env variables are always included.
Use the env-vars-include-patterns and env-vars-exclude-patterns to
filter the environment variables published to artifactory.
(default false)
:arg list env-vars-include-patterns: List of environment variable patterns
for including env vars as part of the published build info. Environment
variables may contain the * and the ? wildcards (default [])
:arg list env-vars-exclude-patterns: List of environment variable patterns
that determine the env vars excluded from the published build info
(default [])
:arg bool discard-old-builds:
Remove older build info from Artifactory (default false)
:arg bool discard-build-artifacts:
Remove older build artifacts from Artifactory (default false)
Example:
.. literalinclude:: /../../tests/wrappers/fixtures/artifactory002.yaml
:language: yaml
"""
artifactory = XML.SubElement(
xml_parent,
'org.jfrog.hudson.generic.ArtifactoryGenericConfigurator')
# details
details = XML.SubElement(artifactory, 'details')
artifactory_common_details(details, data)
# Get plugin information to maintain backwards compatibility
info = registry.get_plugin_info('artifactory')
version = pkg_resources.parse_version(info.get('version', '0'))
if version >= pkg_resources.parse_version('2.3.0'):
deployReleaseRepo = XML.SubElement(details, 'deployReleaseRepository')
XML.SubElement(deployReleaseRepo, 'keyFromText').text = data.get(
'key-from-text', '')
XML.SubElement(deployReleaseRepo, 'keyFromSelect').text = data.get(
'key-from-select', '')
XML.SubElement(deployReleaseRepo, 'dynamicMode').text = str(
'key-from-text' in data.keys()).lower()
else:
XML.SubElement(details, 'repositoryKey').text = data.get(
'repo-key', '')
XML.SubElement(details, 'snapshotsRepositoryKey').text = data.get(
'snapshot-repo-key', '')
XML.SubElement(artifactory, 'deployPattern').text = ','.join(data.get(
'deploy-pattern', []))
XML.SubElement(artifactory, 'resolvePattern').text = ','.join(
data.get('resolve-pattern', []))
XML.SubElement(artifactory, 'matrixParams').text = ','.join(
data.get('matrix-params', []))
XML.SubElement(artifactory, 'deployBuildInfo').text = str(
data.get('deploy-build-info', False)).lower()
XML.SubElement(artifactory, 'includeEnvVars').text = str(
data.get('env-vars-include', False)).lower()
XML.SubElement(artifactory, 'discardOldBuilds').text = str(
data.get('discard-old-builds', False)).lower()
XML.SubElement(artifactory, 'discardBuildArtifacts').text = str(
data.get('discard-build-artifacts', True)).lower()
# envVarsPatterns
artifactory_env_vars_patterns(artifactory, data)
def artifactory_maven_freestyle(registry, xml_parent, data):
"""yaml: artifactory-maven-freestyle
Wrapper for Free Stype projects. Requires the Artifactory plugin.
Requires :jenkins-wiki:`Artifactory Plugin <Artifactory+Plugin>`
:arg str url: URL of the Artifactory server. e.g.
https://www.jfrog.com/artifactory/ (default '')
:arg str name: Artifactory user with permissions use for
connected to the selected Artifactory Server (default '')
:arg str release-repo-key: Release repository name (default '')
:arg str snapshot-repo-key: Snapshots repository name (default '')
:arg bool publish-build-info: Push build metadata with artifacts
(default false)
:arg bool discard-old-builds:
Remove older build info from Artifactory (default true)
:arg bool discard-build-artifacts:
Remove older build artifacts from Artifactory (default false)
:arg bool include-env-vars: Include all environment variables
accessible by the build process. Jenkins-specific env variables
are always included (default false)
:arg bool run-checks: Run automatic license scanning check after the
build is complete (default false)
:arg bool include-publish-artifacts: Include the build's published
module artifacts in the license violation checks if they are
also used as dependencies for other modules in this build
(default false)
:arg bool license-auto-discovery: Tells Artifactory not to try
and automatically analyze and tag the build's dependencies
with license information upon deployment (default true)
:arg bool enable-issue-tracker-integration: When the Jenkins
JIRA plugin is enabled, synchronize information about JIRA
issues to Artifactory and attach issue information to build
artifacts (default false)
:arg bool aggregate-build-issues: When the Jenkins JIRA plugin
is enabled, include all issues from previous builds up to the
latest build status defined in "Aggregation Build Status"
(default false)
:arg bool filter-excluded-artifacts-from-build: Add the excluded
files to the excludedArtifacts list and remove them from the
artifacts list in the build info (default false)
:arg str scopes: A list of dependency scopes/configurations to run
license violation checks on. If left empty all dependencies from
all scopes will be checked (default '')
:arg str violation-recipients: Recipients that need to be notified
of license violations in the build info (default '')
:arg list matrix-params: List of properties to attach to all
deployed artifacts in addition to the default ones:
build.name, build.number, and vcs.revision (default '')
:arg str black-duck-app-name: The existing Black Duck Code Center
application name (default '')
:arg str black-duck-app-version: The existing Black Duck Code Center
application version (default '')
:arg str black-duck-report-recipients: Recipients that will be emailed
a report after the automatic Black Duck Code Center compliance checks
finished (default '')
:arg str black-duck-scopes: A list of dependency scopes/configurations
to run Black Duck Code Center compliance checks on. If left empty
all dependencies from all scopes will be checked (default '')
:arg bool black-duck-run-checks: Automatic Black Duck Code Center
compliance checks will occur after the build completes
(default false)
:arg bool black-duck-include-published-artifacts: Include the build's
published module artifacts in the license violation checks if they
are also used as dependencies for other modules in this build
(default false)
:arg bool auto-create-missing-component-requests: Auto create
missing components in Black Duck Code Center application after
the build is completed and deployed in Artifactory
(default true)
:arg bool auto-discard-stale-component-requests: Auto discard
stale components in Black Duck Code Center application after
the build is completed and deployed in Artifactory
(default true)
:arg bool deploy-artifacts: Push artifacts to the Artifactory
Server. The specific artifacts to push are controlled using
the deployment-include-patterns and deployment-exclude-patterns.
(default true)
:arg list deployment-include-patterns: List of patterns for including
build artifacts to publish to artifactory. (default[]')
:arg list deployment-exclude-patterns: List of patterns
for excluding artifacts from deployment to Artifactory
(default [])
:arg bool env-vars-include: Include environment variables
accessible by the build process. Jenkins-specific env variables
are always included. Environment variables can be filtered using
the env-vars-include-patterns nad env-vars-exclude-patterns.
(default false)
:arg list env-vars-include-patterns: List of environment variable patterns
that will be included as part of the published build info. Environment
variables may contain the * and the ? wildcards (default [])
:arg list env-vars-exclude-patterns: List of environment variable patterns
that will be excluded from the published build info
(default [])
Example:
.. literalinclude:: /../../tests/wrappers/fixtures/artifactory003.yaml
:language: yaml
"""
artifactory = XML.SubElement(
xml_parent,
'org.jfrog.hudson.maven3.ArtifactoryMaven3Configurator')
# details
details = XML.SubElement(artifactory, 'details')
artifactory_common_details(details, data)
deploy_release = XML.SubElement(details, 'deployReleaseRepository')
artifactory_repository(deploy_release, data, 'release')
deploy_snapshot = XML.SubElement(details, 'deploySnapshotRepository')
artifactory_repository(deploy_snapshot, data, 'snapshot')
XML.SubElement(details, 'stagingPlugin').text = data.get(
'resolve-staging-plugin', '')
# resolverDetails
resolver = XML.SubElement(artifactory, 'resolverDetails')
artifactory_common_details(resolver, data)
resolve_snapshot = XML.SubElement(resolver, 'resolveSnapshotRepository')
artifactory_repository(resolve_snapshot, data, 'snapshot')
deploy_release = XML.SubElement(resolver, 'resolveReleaseRepository')
artifactory_repository(deploy_release, data, 'release')
XML.SubElement(resolver, 'stagingPlugin').text = data.get(
'resolve-staging-plugin', '')
# artifactDeploymentPatterns
artifactory_deployment_patterns(artifactory, data)
# envVarsPatterns
artifactory_env_vars_patterns(artifactory, data)
XML.SubElement(artifactory, 'matrixParams').text = ','.join(
data.get('matrix-params', []))
# optional__props
artifactory_optional_props(artifactory, data, 'wrappers')
def maven_release(registry, xml_parent, data):
"""yaml: maven-release
Wrapper for Maven projects
Requires :jenkins-wiki:`M2 Release Plugin <M2+Release+Plugin>`
:arg str release-goals: Release goals and options (default '')
:arg str dry-run-goals: DryRun goals and options (default '')
:arg int num-successful-builds: Number of successful release builds to keep
(default 1)
:arg bool select-custom-scm-comment-prefix: Preselect 'Specify custom SCM
comment prefix' (default false)
:arg bool select-append-jenkins-username: Preselect 'Append Jenkins
Username' (default false)
:arg bool select-scm-credentials: Preselect 'Specify SCM login/password'
(default false)
:arg str release-env-var: Release environment variable (default '')
:arg str scm-user-env-var: SCM username environment variable (default '')
:arg str scm-password-env-var: SCM password environment variable
(default '')
Example:
.. literalinclude:: /../../tests/wrappers/fixtures/maven-release001.yaml
:language: yaml
"""
mvn_release = XML.SubElement(xml_parent,
'org.jvnet.hudson.plugins.m2release.'
'M2ReleaseBuildWrapper')
mapping = [
('release-goals', 'releaseGoals', ''),
('dry-run-goals', 'dryRunGoals', ''),
('num-successful-builds', 'numberOfReleaseBuildsToKeep', 1),
('select-custom-scm-comment-prefix', 'selectCustomScmCommentPrefix',
False),
('select-append-jenkins-username', 'selectAppendHudsonUsername',
False),
('select-scm-credentials', 'selectScmCredentials', False),
('release-env-var', 'releaseEnvVar', ''),
('scm-user-env-var', 'scmUserEnvVar', ''),
('scm-password-env-var', 'scmPasswordEnvVar', ''),
]
convert_mapping_to_xml(mvn_release, data, mapping, fail_required=True)
def version_number(parser, xml_parent, data):
"""yaml: version-number
Generate a version number for the build using a format string. See the
wiki page for more detailed descriptions of options.
Requires the Jenkins :jenkins-wiki:`version number plugin
<Version+Number+Plugin>`.
:arg str variable-name: Name of environment variable to assign version
number to (required)
:arg str format-string: Format string used to generate version number
(required)
:arg bool skip-failed-builds: If the build fails, DO NOT increment any
auto-incrementing component of the version number (default: false)
:arg bool display-name: Use the version number for the build display
name (default: false)
:arg str start-date: The date the project began as a UTC timestamp
(default 1970-1-1 00:00:00.0 UTC)
:arg int builds-today: The number of builds that have been executed
today (optional)
:arg int builds-this-month: The number of builds that have been executed
since the start of the month (optional)
:arg int builds-this-year: The number of builds that have been executed
since the start of the year (optional)
:arg int builds-all-time: The number of builds that have been executed
since the start of the project (optional)
Example:
.. literalinclude:: /../../tests/wrappers/fixtures/version-number001.yaml
:language: yaml
"""
version_number = XML.SubElement(
xml_parent, 'org.jvnet.hudson.tools.versionnumber.VersionNumberBuilder'
)
mapping = [
# option, xml name, default value
("variable-name", 'environmentVariableName', None),
("format-string", 'versionNumberString', None),
("skip-failed-builds", 'skipFailedBuilds', False),
("display-name", 'useAsBuildDisplayName', False),
("start-date", 'projectStartDate', '1970-1-1 00:00:00.0 UTC'),
("builds-today", 'oBuildsToday', '-1'),
("builds-this-month", 'oBuildsThisMonth', '-1'),
("builds-this-year", 'oBuildsThisYear', '-1'),
("builds-all-time", 'oBuildsAllTime', '-1'),
]
convert_mapping_to_xml(version_number, data, mapping, fail_required=True)
class Wrappers(jenkins_jobs.modules.base.Base):
sequence = 80
component_type = 'wrapper'
component_list_type = 'wrappers'
def gen_xml(self, xml_parent, data):
wrappers = XML.SubElement(xml_parent, 'buildWrappers')
for wrap in data.get('wrappers', []):
self.registry.dispatch('wrapper', wrappers, wrap) | zerotk.jenkins-job-builder | /zerotk.jenkins-job-builder-2.0.0.0b2.tar.gz/zerotk.jenkins-job-builder-2.0.0.0b2/jenkins_jobs/modules/wrappers.py | wrappers.py |
import pkg_resources
import xml.etree.ElementTree as XML
from jenkins_jobs.errors import InvalidAttributeError
import jenkins_jobs.modules.base
from jenkins_jobs.modules.helpers import config_file_provider_settings
from jenkins_jobs.modules import hudson_model
class Maven(jenkins_jobs.modules.base.Base):
sequence = 0
choices_private_repo = {
'default':
'hudson.maven.local_repo.DefaultLocalRepositoryLocator',
'local-to-workspace':
'hudson.maven.local_repo.PerJobLocalRepositoryLocator',
'local-to-executor':
'hudson.maven.local_repo.PerExecutorLocalRepositoryLocator',
}
def root_xml(self, data):
xml_parent = XML.Element('maven2-moduleset')
if 'maven' not in data:
return xml_parent
# determine version of plugin
plugin_info = self.registry.get_plugin_info("Maven Integration plugin")
version = pkg_resources.parse_version(plugin_info.get('version', '0'))
if 'root-module' in data['maven']:
root_module = XML.SubElement(xml_parent, 'rootModule')
XML.SubElement(root_module, 'groupId').text = \
data['maven']['root-module']['group-id']
XML.SubElement(root_module, 'artifactId').text = \
data['maven']['root-module']['artifact-id']
XML.SubElement(xml_parent, 'goals').text = data['maven']['goals']
maven_opts = data['maven'].get('maven-opts')
if maven_opts:
XML.SubElement(xml_parent, 'mavenOpts').text = maven_opts
maven_name = data['maven'].get('maven-name')
if maven_name:
XML.SubElement(xml_parent, 'mavenName').text = maven_name
private_repo = data['maven'].get('private-repository')
if private_repo:
if private_repo not in self.choices_private_repo.keys():
raise ValueError('Not a valid private-repository "%s", '
'must be one of "%s"' %
(private_repo,
", ".join(self.choices_private_repo.keys())))
XML.SubElement(xml_parent,
'localRepository',
attrib={'class':
self.choices_private_repo[private_repo]})
XML.SubElement(xml_parent, 'ignoreUpstremChanges').text = str(
data['maven'].get('ignore-upstream-changes', True)).lower()
XML.SubElement(xml_parent, 'rootPOM').text = \
data['maven'].get('root-pom', 'pom.xml')
XML.SubElement(xml_parent, 'aggregatorStyleBuild').text = str(
not data['maven'].get('parallel-build-modules', False)).lower()
XML.SubElement(xml_parent, 'incrementalBuild').text = str(
data['maven'].get('incremental-build', False)).lower()
XML.SubElement(xml_parent, 'siteArchivingDisabled').text = str(
not data['maven'].get('automatic-site-archiving', True)).lower()
XML.SubElement(xml_parent, 'fingerprintingDisabled').text = str(
not data['maven'].get('automatic-fingerprinting', True)).lower()
if (version > pkg_resources.parse_version('0') and
version < pkg_resources.parse_version('2.0.1')):
XML.SubElement(xml_parent, 'perModuleEmail').text = str(
data.get('per-module-email', True)).lower()
XML.SubElement(xml_parent, 'archivingDisabled').text = str(
not data['maven'].get('automatic-archiving', True)).lower()
XML.SubElement(xml_parent, 'resolveDependencies').text = str(
data['maven'].get('resolve-dependencies', False)).lower()
XML.SubElement(xml_parent, 'processPlugins').text = str(
data['maven'].get('process-plugins', False)).lower()
XML.SubElement(xml_parent, 'mavenValidationLevel').text = '-1'
XML.SubElement(xml_parent, 'runHeadless').text = str(
data['maven'].get('run-headless', False)).lower()
if 'custom-workspace' in data['maven']:
XML.SubElement(xml_parent, 'customWorkspace').text = str(
data['maven'].get('custom-workspace'))
config_file_provider_settings(xml_parent, data['maven'])
run_post_steps = XML.SubElement(xml_parent, 'runPostStepsIfResult')
run_conditions = ['SUCCESS', 'UNSTABLE', 'FAILURE']
run_condition = data['maven'].get('post-step-run-condition', 'FAILURE')
if run_condition not in run_conditions:
raise InvalidAttributeError('post-step-run-condition',
run_condition, run_conditions)
cond_dict = hudson_model.THRESHOLDS[run_condition]
XML.SubElement(run_post_steps, 'name').text = cond_dict['name']
XML.SubElement(run_post_steps, 'ordinal').text = cond_dict['ordinal']
XML.SubElement(run_post_steps, 'color').text = cond_dict['color']
return xml_parent | zerotk.jenkins-job-builder | /zerotk.jenkins-job-builder-2.0.0.0b2.tar.gz/zerotk.jenkins-job-builder-2.0.0.0b2/jenkins_jobs/modules/project_maven.py | project_maven.py |
import logging
import pkg_resources
import xml.etree.ElementTree as XML
from jenkins_jobs.errors import InvalidAttributeError
from jenkins_jobs.errors import JenkinsJobsException
from jenkins_jobs.errors import MissingAttributeError
import jenkins_jobs.modules.base
import jenkins_jobs.modules.helpers as helpers
def builds_chain_fingerprinter(registry, xml_parent, data):
"""yaml: builds-chain-fingerprinter
Builds chain fingerprinter.
Requires the Jenkins :jenkins-wiki:`Builds chain fingerprinter Plugin
<Builds+chain+fingerprinter>`.
:arg bool per-builds-chain: enable builds hierarchy fingerprinting
(default false)
:arg bool per-job-chain: enable jobs hierarchy fingerprinting
(default false)
Example:
.. literalinclude:: /../../tests/properties/fixtures/fingerprinter.yaml
:language: yaml
"""
fingerprinter = XML.SubElement(xml_parent,
'org.jenkinsci.plugins.'
'buildschainfingerprinter.'
'AutomaticFingerprintJobProperty')
XML.SubElement(fingerprinter, 'isPerBuildsChainEnabled').text = str(
data.get('per-builds-chain', False)).lower()
XML.SubElement(fingerprinter, 'isPerJobsChainEnabled').text = str(
data.get('per-job-chain', False)).lower()
def ownership(registry, xml_parent, data):
"""yaml: ownership
Plugin provides explicit ownership for jobs and slave nodes.
Requires the Jenkins :jenkins-wiki:`Ownership Plugin <Ownership+Plugin>`.
:arg bool enabled: whether ownership enabled (default : true)
:arg str owner: the owner of job
:arg list co-owners: list of job co-owners
Example:
.. literalinclude:: /../../tests/properties/fixtures/ownership.yaml
:language: yaml
"""
ownership_plugin = XML.SubElement(
xml_parent,
'com.synopsys.arc.jenkins.plugins.ownership.jobs.JobOwnerJobProperty')
ownership = XML.SubElement(ownership_plugin, 'ownership')
owner = str(data.get('enabled', True)).lower()
XML.SubElement(ownership, 'ownershipEnabled').text = owner
XML.SubElement(ownership, 'primaryOwnerId').text = data.get('owner')
coownersIds = XML.SubElement(ownership, 'coownersIds')
for coowner in data.get('co-owners', []):
XML.SubElement(coownersIds, 'string').text = coowner
def promoted_build(registry, xml_parent, data):
"""yaml: promoted-build
Marks a build for promotion. A promotion process with an identical
name must be created via the web interface in the job in order for the job
promotion to persist. Promotion processes themselves cannot be configured
by jenkins-jobs due to the separate storage of plugin configuration files.
Requires the Jenkins :jenkins-wiki:`Promoted Builds Plugin
<Promoted+Builds+Plugin>`.
:arg list names: the promoted build names (optional)
Example:
.. literalinclude:: /../../tests/properties/fixtures/promoted_build.yaml
:language: yaml
"""
promoted = XML.SubElement(xml_parent, 'hudson.plugins.promoted__builds.'
'JobPropertyImpl')
names = data.get('names', [])
if names:
active_processes = XML.SubElement(promoted, 'activeProcessNames')
for n in names:
XML.SubElement(active_processes, 'string').text = str(n)
def gitbucket(parser, xml_parent, data):
"""yaml: gitbucket
Integrate GitBucket to Jenkins.
Requires the Jenkins :jenkins-wiki:`GitBucket Plugin <GitBucket+Plugin>`.
:arg str url: GitBucket URL to issue (required)
:arg bool link-enabled: Enable hyperlink to issue (default false)
Minimal Example:
.. literalinclude:: /../../tests/properties/fixtures/gitbucket-minimal.yaml
:language: yaml
Full Example:
.. literalinclude:: /../../tests/properties/fixtures/gitbucket-full.yaml
:language: yaml
"""
gitbucket = XML.SubElement(
xml_parent, 'org.jenkinsci.plugins.gitbucket.GitBucketProjectProperty')
gitbucket.set('plugin', 'gitbucket')
mapping = [
('url', 'url', None),
('link-enabled', 'linkEnabled', False),
]
helpers.convert_mapping_to_xml(
gitbucket, data, mapping, fail_required=True)
def github(registry, xml_parent, data):
"""yaml: github
Sets the GitHub URL for the project.
:arg str url: the GitHub URL (required)
:arg str display-name: This value will be used as context name for commit
status if status builder or status publisher is defined for this
project. (>= 1.14.1) (default '')
Minimal Example:
.. literalinclude:: /../../tests/properties/fixtures/github-minimal.yaml
:language: yaml
Full Example:
.. literalinclude:: /../../tests/properties/fixtures/github-full.yaml
:language: yaml
"""
github = XML.SubElement(
xml_parent, 'com.coravy.hudson.plugins.github.GithubProjectProperty')
github.set('plugin', 'github')
mapping = [
('url', 'projectUrl', None),
('display-name', 'displayName', ''),
]
helpers.convert_mapping_to_xml(github, data, mapping, fail_required=True)
def gitlab(registry, xml_parent, data):
"""yaml: gitlab
Sets the GitLab connection for the project. Configured via Jenkins Global
Configuration.
Requires the Jenkins :jenkins-wiki:`GitLab Plugin <GitLab+Plugin>`.
:arg str connection: the GitLab connection name (required)
Example:
.. literalinclude:: /../../tests/properties/fixtures/gitlab.yaml
:language: yaml
"""
gitlab = XML.SubElement(xml_parent,
'com.dabsquared.gitlabjenkins.connection.'
'GitLabConnectionProperty')
try:
XML.SubElement(gitlab, 'gitLabConnection').text = data['connection']
except KeyError as e:
raise MissingAttributeError(e)
def least_load(registry, xml_parent, data):
"""yaml: least-load
Enables the Least Load Plugin.
Requires the Jenkins :jenkins-wiki:`Least Load Plugin <Least+Load+Plugin>`.
:arg bool disabled: whether or not leastload is disabled (default true)
Example:
.. literalinclude:: /../../tests/properties/fixtures/least-load002.yaml
:language: yaml
"""
least = XML.SubElement(xml_parent,
'org.bstick12.jenkinsci.plugins.leastload.'
'LeastLoadDisabledProperty')
XML.SubElement(least, 'leastLoadDisabled').text = str(
data.get('disabled', True)).lower()
def throttle(registry, xml_parent, data):
"""yaml: throttle
Throttles the number of builds for this job.
Requires the Jenkins :jenkins-wiki:`Throttle Concurrent Builds Plugin
<Throttle+Concurrent+Builds+Plugin>`.
:arg str option: throttle `project` (throttle the project alone)
or `category` (throttle the project as part of one or more categories)
:arg int max-per-node: max concurrent builds per node (default 0)
:arg int max-total: max concurrent builds (default 0)
:arg bool enabled: whether throttling is enabled (default true)
:arg list categories: multiproject throttle categories
:arg bool matrix-builds: throttle matrix master builds (default true)
:arg bool matrix-configs: throttle matrix config builds (default false)
Example:
.. literalinclude:: /../../tests/properties/fixtures/throttle001.yaml
:language: yaml
"""
throttle = XML.SubElement(xml_parent,
'hudson.plugins.throttleconcurrents.'
'ThrottleJobProperty')
XML.SubElement(throttle, 'maxConcurrentPerNode').text = str(
data.get('max-per-node', '0'))
XML.SubElement(throttle, 'maxConcurrentTotal').text = str(
data.get('max-total', '0'))
# TODO: What's "categories"?
# XML.SubElement(throttle, 'categories')
XML.SubElement(throttle, 'throttleEnabled').text = str(
data.get('enabled', True)).lower()
cat = data.get('categories', [])
if cat:
cn = XML.SubElement(throttle, 'categories')
for c in cat:
XML.SubElement(cn, 'string').text = str(c)
options_list = ('category', 'project')
option = data.get('option')
if option not in options_list:
raise InvalidAttributeError('option', option, options_list)
XML.SubElement(throttle, 'throttleOption').text = option
XML.SubElement(throttle, 'configVersion').text = '1'
matrixopt = XML.SubElement(throttle, 'matrixOptions')
XML.SubElement(matrixopt, 'throttleMatrixBuilds').text = str(
data.get('matrix-builds', True)).lower()
XML.SubElement(matrixopt, 'throttleMatrixConfigurations').text = str(
data.get('matrix-configs', False)).lower()
def sidebar(registry, xml_parent, data):
"""yaml: sidebar
Allows you to add links in the sidebar.
Requires the Jenkins :jenkins-wiki:`Sidebar-Link Plugin
<Sidebar-Link+Plugin>`.
:arg str url: url to link to (optional)
:arg str text: text for the link (optional)
:arg str icon: path to icon (optional)
Example:
.. literalinclude:: /../../tests/properties/fixtures/sidebar02.yaml
:language: yaml
"""
sidebar = xml_parent.find('hudson.plugins.sidebar__link.ProjectLinks')
if sidebar is None:
sidebar = XML.SubElement(xml_parent,
'hudson.plugins.sidebar__link.ProjectLinks')
links = XML.SubElement(sidebar, 'links')
else:
links = sidebar.find('links')
action = XML.SubElement(links, 'hudson.plugins.sidebar__link.LinkAction')
XML.SubElement(action, 'url').text = str(data.get('url', ''))
XML.SubElement(action, 'text').text = str(data.get('text', ''))
XML.SubElement(action, 'icon').text = str(data.get('icon', ''))
def inject(registry, xml_parent, data):
"""yaml: inject
Allows you to inject environment variables into the build.
Requires the Jenkins :jenkins-wiki:`Env Inject Plugin <EnvInject+Plugin>`.
:arg str properties-file: file to read with properties (optional)
:arg str properties-content: key=value properties (optional)
:arg str script-file: file with script to run (optional)
:arg str script-content: script to run (optional)
:arg str groovy-content: groovy script to run (optional)
:arg bool load-from-master: load files from master (default false)
:arg bool enabled: injection enabled (default true)
:arg bool keep-system-variables: keep system variables (default true)
:arg bool keep-build-variables: keep build variable (default true)
:arg bool override-build-parameters: override build parameters
(default false)
Example:
.. literalinclude:: /../../tests/properties/fixtures/inject001.yaml
:language: yaml
"""
inject = XML.SubElement(xml_parent,
'EnvInjectJobProperty')
info = XML.SubElement(inject, 'info')
jenkins_jobs.modules.base.add_nonblank_xml_subelement(
info, 'propertiesFilePath', data.get('properties-file'))
jenkins_jobs.modules.base.add_nonblank_xml_subelement(
info, 'propertiesContent', data.get('properties-content'))
jenkins_jobs.modules.base.add_nonblank_xml_subelement(
info, 'scriptFilePath', data.get('script-file'))
jenkins_jobs.modules.base.add_nonblank_xml_subelement(
info, 'scriptContent', data.get('script-content'))
jenkins_jobs.modules.base.add_nonblank_xml_subelement(
info, 'groovyScriptContent', data.get('groovy-content'))
XML.SubElement(info, 'loadFilesFromMaster').text = str(
data.get('load-from-master', False)).lower()
XML.SubElement(inject, 'on').text = str(
data.get('enabled', True)).lower()
XML.SubElement(inject, 'keepJenkinsSystemVariables').text = str(
data.get('keep-system-variables', True)).lower()
XML.SubElement(inject, 'keepBuildVariables').text = str(
data.get('keep-build-variables', True)).lower()
XML.SubElement(inject, 'overrideBuildParameters').text = str(
data.get('override-build-parameters', False)).lower()
def authenticated_build(registry, xml_parent, data):
"""yaml: authenticated-build
Specifies an authorization matrix where only authenticated users
may trigger a build.
.. deprecated:: 0.1.0. Please use :ref:`authorization <authorization>`.
Example:
.. literalinclude::
/../../tests/properties/fixtures/authenticated_build.yaml
:language: yaml
"""
# TODO: generalize this
security = XML.SubElement(xml_parent,
'hudson.security.'
'AuthorizationMatrixProperty')
XML.SubElement(security, 'permission').text = (
'hudson.model.Item.Build:authenticated')
def authorization(registry, xml_parent, data):
"""yaml: authorization
Specifies an authorization matrix
.. _authorization:
:arg list <name>: `<name>` is the name of the group or user, containing
the list of rights to grant.
:<name> rights:
* **credentials-create**
* **credentials-delete**
* **credentials-manage-domains**
* **credentials-update**
* **credentials-view**
* **job-build**
* **job-cancel**
* **job-configure**
* **job-delete**
* **job-discover**
* **job-extended-read**
* **job-move**
* **job-read**
* **job-status**
* **job-workspace**
* **ownership-jobs**
* **run-delete**
* **run-update**
* **scm-tag**
Example:
.. literalinclude:: /../../tests/properties/fixtures/authorization.yaml
:language: yaml
"""
credentials = 'com.cloudbees.plugins.credentials.CredentialsProvider.'
ownership = 'com.synopsys.arc.jenkins.plugins.ownership.OwnershipPlugin.'
mapping = {
'credentials-create': ''.join((credentials, 'Create')),
'credentials-delete': ''.join((credentials, 'Delete')),
'credentials-manage-domains': ''.join((credentials, 'ManageDomains')),
'credentials-update': ''.join((credentials, 'Update')),
'credentials-view': ''.join((credentials, 'View')),
'job-build': 'hudson.model.Item.Build',
'job-cancel': 'hudson.model.Item.Cancel',
'job-configure': 'hudson.model.Item.Configure',
'job-delete': 'hudson.model.Item.Delete',
'job-discover': 'hudson.model.Item.Discover',
'job-extended-read': 'hudson.model.Item.ExtendedRead',
'job-move': 'hudson.model.Item.Move',
'job-read': 'hudson.model.Item.Read',
'job-status': 'hudson.model.Item.ViewStatus',
'job-workspace': 'hudson.model.Item.Workspace',
'ownership-jobs': ''.join((ownership, 'Jobs')),
'run-delete': 'hudson.model.Run.Delete',
'run-update': 'hudson.model.Run.Update',
'scm-tag': 'hudson.scm.SCM.Tag',
}
if data:
matrix = XML.SubElement(xml_parent,
'hudson.security.AuthorizationMatrixProperty')
for (username, perms) in data.items():
for perm in perms:
pe = XML.SubElement(matrix, 'permission')
try:
pe.text = "{0}:{1}".format(mapping[perm], username)
except KeyError:
raise InvalidAttributeError(username, perm, mapping.keys())
def priority_sorter(registry, xml_parent, data):
"""yaml: priority-sorter
Allows simple ordering of builds, using a configurable job priority.
Requires the Jenkins :jenkins-wiki:`Priority Sorter Plugin
<Priority+Sorter+Plugin>`.
:arg int priority: Priority of the job. Higher value means higher
priority, with 100 as the standard priority. (required)
Example:
.. literalinclude:: /../../tests/properties/fixtures/priority_sorter.yaml
:language: yaml
"""
priority_sorter_tag = XML.SubElement(xml_parent,
'hudson.queueSorter.'
'PrioritySorterJobProperty')
try:
XML.SubElement(priority_sorter_tag, 'priority').text = str(
data['priority'])
except KeyError as e:
raise MissingAttributeError(e)
def build_blocker(registry, xml_parent, data):
"""yaml: build-blocker
This plugin keeps the actual job in the queue
if at least one name of currently running jobs
is matching with one of the given regular expressions.
Requires the Jenkins :jenkins-wiki:`Build Blocker Plugin
<Build+Blocker+Plugin>`.
:arg bool use-build-blocker: Enable or disable build blocker (default true)
:arg list blocking-jobs: One regular expression per line to select
blocking jobs by their names. (required)
:arg str block-level: block build globally ('GLOBAL') or per node ('NODE')
(default 'GLOBAL')
:arg str queue-scanning: scan build queue for all builds ('ALL') or only
buildable builds ('BUILDABLE') (default 'DISABLED'))
Example:
.. literalinclude::
/../../tests/properties/fixtures/build-blocker01.yaml
:language: yaml
"""
blocker = XML.SubElement(xml_parent,
'hudson.plugins.'
'buildblocker.BuildBlockerProperty')
if data is None or 'blocking-jobs' not in data:
raise JenkinsJobsException('blocking-jobs field is missing')
elif data.get('blocking-jobs', None) is None:
raise JenkinsJobsException('blocking-jobs list must not be empty')
XML.SubElement(blocker, 'useBuildBlocker').text = str(
data.get('use-build-blocker', True)).lower()
jobs = ''
for value in data['blocking-jobs']:
jobs = jobs + value + '\n'
XML.SubElement(blocker, 'blockingJobs').text = jobs
block_level_list = ('GLOBAL', 'NODE')
block_level = data.get('block-level', 'GLOBAL')
if block_level not in block_level_list:
raise InvalidAttributeError('block-level',
block_level,
block_level_list)
XML.SubElement(blocker, 'blockLevel').text = block_level
queue_scanning_list = ('DISABLED', 'ALL', 'BUILDABLE')
queue_scanning = data.get('queue-scanning', 'DISABLED')
if queue_scanning not in queue_scanning_list:
raise InvalidAttributeError('queue-scanning',
queue_scanning,
queue_scanning_list)
XML.SubElement(blocker, 'scanQueueFor').text = queue_scanning
def copyartifact(registry, xml_parent, data):
"""yaml: copyartifact
Specify a list of projects that have access to copy the artifacts of
this project.
Requires the Jenkins :jenkins-wiki:`Copy Artifact plugin
<Copy+Artifact+Plugin>`.
:arg str projects: comma separated list of projects that can copy
artifacts of this project. Wild card character '*' is available.
Example:
.. literalinclude::
/../../tests/properties/fixtures/copyartifact.yaml
:language: yaml
"""
copyartifact = XML.SubElement(xml_parent,
'hudson.plugins.'
'copyartifact.'
'CopyArtifactPermissionProperty',
plugin='copyartifact')
if not data or not data.get('projects', None):
raise JenkinsJobsException("projects string must exist and "
"not be empty")
projectlist = XML.SubElement(copyartifact, 'projectNameList')
for project in str(data.get('projects')).split(','):
XML.SubElement(projectlist, 'string').text = project
def batch_tasks(registry, xml_parent, data):
"""yaml: batch-tasks
Batch tasks can be tasks for events like releases, integration, archiving,
etc. In this way, anyone in the project team can execute them in a way that
leaves a record.
A batch task consists of a shell script and a name. When you execute
a build, the shell script gets run on the workspace, just like a build.
Batch tasks and builds "lock" the workspace, so when one of those
activities is in progress, all the others will block in the queue.
Requires the Jenkins :jenkins-wiki:`Batch Task Plugin <Batch+Task+Plugin>`.
:arg list batch-tasks: Batch tasks.
:Tasks:
* **name** (`str`) Task name.
* **script** (`str`) Task script.
Example:
.. literalinclude:: /../../tests/properties/fixtures/batch-task.yaml
:language: yaml
"""
pdef = XML.SubElement(xml_parent,
'hudson.plugins.batch__task.BatchTaskProperty')
tasks = XML.SubElement(pdef, 'tasks')
for task in data:
batch_task = XML.SubElement(tasks,
'hudson.plugins.batch__task.BatchTask')
XML.SubElement(batch_task, 'name').text = task['name']
XML.SubElement(batch_task, 'script').text = task['script']
def heavy_job(registry, xml_parent, data):
"""yaml: heavy-job
This plugin allows you to define "weight" on each job,
and making each job consume that many executors
Requires the Jenkins :jenkins-wiki:`Heavy Job Plugin <Heavy+Job+Plugin>`.
:arg int weight: Specify the total number of executors
that this job should occupy (default 1)
Example:
.. literalinclude:: /../../tests/properties/fixtures/heavy-job.yaml
:language: yaml
"""
heavyjob = XML.SubElement(xml_parent,
'hudson.plugins.'
'heavy__job.HeavyJobProperty')
XML.SubElement(heavyjob, 'weight').text = str(
data.get('weight', 1))
def slave_utilization(registry, xml_parent, data):
"""yaml: slave-utilization
This plugin allows you to specify the percentage of a slave's capacity a
job wants to use.
Requires the Jenkins :jenkins-wiki:`Slave Utilization Plugin
<Slave+Utilization+Plugin>`.
:arg int slave-percentage: Specify the percentage of a slave's execution
slots that this job should occupy (default 0)
:arg bool single-instance-per-slave: Control whether concurrent instances
of this job will be permitted to run in parallel on a single slave
(default false)
Example:
.. literalinclude::
/../../tests/properties/fixtures/slave-utilization1.yaml
:language: yaml
"""
utilization = XML.SubElement(
xml_parent, 'com.suryagaddipati.jenkins.SlaveUtilizationProperty')
percent = int(data.get('slave-percentage', 0))
XML.SubElement(utilization, 'needsExclusiveAccessToNode'
).text = 'true' if percent else 'false'
XML.SubElement(utilization, 'slaveUtilizationPercentage'
).text = str(percent)
XML.SubElement(utilization, 'singleInstancePerSlave').text = str(
data.get('single-instance-per-slave', False)).lower()
def delivery_pipeline(registry, xml_parent, data):
"""yaml: delivery-pipeline
Requires the Jenkins :jenkins-wiki:`Delivery Pipeline Plugin
<Delivery+Pipeline+Plugin>`.
:arg str stage: Name of the stage for this job (default '')
:arg str task: Name of the task for this job (default '')
:arg str description: task description template for this job
(default '')
Minimal Example:
.. literalinclude::
/../../tests/properties/fixtures/delivery-pipeline-minimal.yaml
:language: yaml
Full Example:
.. literalinclude::
/../../tests/properties/fixtures/delivery-pipeline-full.yaml
:language: yaml
"""
pipeline = XML.SubElement(
xml_parent, 'se.diabol.jenkins.pipeline.PipelineProperty')
pipeline.set('plugin', 'delivery-pipeline-plugin')
mapping = [
('stage', 'stageName', ''),
('task', 'taskName', ''),
('description', 'descriptionTemplate', ''),
]
helpers.convert_mapping_to_xml(pipeline, data, mapping, fail_required=True)
def zeromq_event(registry, xml_parent, data):
"""yaml: zeromq-event
This is a Jenkins plugin that will publish Jenkins Job run events
(start, complete, finish) to a ZMQ PUB socket.
Requires the Jenkins `ZMQ Event Publisher.
<https://git.openstack.org/cgit/openstack-infra/zmq-event-publisher>`_
Example:
.. literalinclude::
/../../tests/properties/fixtures/zeromq-event.yaml
:language: yaml
"""
zmq_event = XML.SubElement(xml_parent,
'org.jenkinsci.plugins.'
'ZMQEventPublisher.HudsonNotificationProperty')
XML.SubElement(zmq_event, 'enabled').text = 'true'
def slack(registry, xml_parent, data):
"""yaml: slack
Requires the Jenkins :jenkins-wiki:`Slack Plugin <Slack+Plugin>`
When using Slack Plugin version < 2.0, Slack Plugin itself requires a
publisher aswell as properties please note that you have to add the
publisher to your job configuration aswell. When using Slack Plugin
version >= 2.0, you should only configure the publisher.
:arg bool notify-start: Send notification when the job starts
(default false)
:arg bool notify-success: Send notification on success. (default false)
:arg bool notify-aborted: Send notification when job is aborted. (
default false)
:arg bool notify-not-built: Send notification when job set to NOT_BUILT
status. (default false)
:arg bool notify-unstable: Send notification when job becomes unstable.
(default false)
:arg bool notify-failure: Send notification when job fails.
(default false)
:arg bool notifiy-back-to-normal: Send notification when job is
succeeding again after being unstable or failed. (default false)
:arg bool 'notify-repeated-failure': Send notification when job is
still failing after last failure. (default false)
:arg bool include-test-summary: Include the test summary. (default
False)
:arg bool include-custom-message: Include a custom message into the
notification. (default false)
:arg str custom-message: Custom message to be included. (default '')
:arg str room: A comma seperated list of rooms / channels to send
the notifications to. (default '')
Example:
.. literalinclude::
/../../tests/properties/fixtures/slack001.yaml
:language: yaml
"""
logger = logging.getLogger(__name__)
plugin_info = registry.get_plugin_info('Slack Notification Plugin')
plugin_ver = pkg_resources.parse_version(plugin_info.get('version', "0"))
if plugin_ver >= pkg_resources.parse_version("2.0"):
logger.warning(
"properties section is not used with plugin version >= 2.0",
)
mapping = (
('notify-start', 'startNotification', False),
('notify-success', 'notifySuccess', False),
('notify-aborted', 'notifyAborted', False),
('notify-not-built', 'notifyNotBuilt', False),
('notify-unstable', 'notifyUnstable', False),
('notify-failure', 'notifyFailure', False),
('notify-back-to-normal', 'notifyBackToNormal', False),
('notify-repeated-failure', 'notifyRepeatedFailure', False),
('include-test-summary', 'includeTestSummary', False),
('include-custom-message', 'includeCustomMessage', False),
('custom-message', 'customMessage', ''),
('room', 'room', ''),
)
slack = XML.SubElement(
xml_parent,
'jenkins.plugins.slack.SlackNotifier_-SlackJobProperty',
)
# Ensure that custom-message is set when include-custom-message is set
# to true.
if data.get('include-custom-message', False):
if not data.get('custom-message', ''):
raise MissingAttributeError('custom-message')
helpers.convert_mapping_to_xml(slack, data, mapping, fail_required=True)
def rebuild(registry, xml_parent, data):
"""yaml: rebuild
This plug-in allows the user to rebuild a parameterized build without
entering the parameters again.It will also allow the user to edit the
parameters before rebuilding.
Requires the Jenkins :jenkins-wiki:`Rebuild Plugin <Rebuild+Plugin>`.
:arg bool auto-rebuild: Rebuild without asking for parameters
(default false)
:arg bool rebuild-disabled: Disable rebuilding for this job
(default false)
Minimal Example:
.. literalinclude:: /../../tests/properties/fixtures/rebuild-minimal.yaml
:language: yaml
Full Example:
.. literalinclude:: /../../tests/properties/fixtures/rebuild-full.yaml
:language: yaml
"""
sub_element = XML.SubElement(xml_parent,
'com.sonyericsson.rebuild.RebuildSettings')
sub_element.set('plugin', 'rebuild')
mapping = [
('auto-rebuild', 'autoRebuild', False),
('rebuild-disabled', 'rebuildDisabled', False),
]
helpers.convert_mapping_to_xml(
sub_element, data, mapping, fail_required=True)
def build_discarder(registry, xml_parent, data):
"""yaml: build-discarder
:arg int days-to-keep: Number of days to keep builds for (default -1)
:arg int num-to-keep: Number of builds to keep (default -1)
:arg int artifact-days-to-keep: Number of days to keep builds with
artifacts (default -1)
:arg int artifact-num-to-keep: Number of builds with artifacts to keep
(default -1)
Example:
.. literalinclude::
/../../tests/properties/fixtures/build-discarder-001.yaml
:language: yaml
.. literalinclude::
/../../tests/properties/fixtures/build-discarder-002.yaml
:language: yaml
"""
base_sub = XML.SubElement(xml_parent,
'jenkins.model.BuildDiscarderProperty')
strategy = XML.SubElement(base_sub, 'strategy')
strategy.set('class', 'hudson.tasks.LogRotator')
mappings = [
('days-to-keep', 'daysToKeep', -1),
('num-to-keep', 'numToKeep', -1),
('artifact-days-to-keep', 'artifactDaysToKeep', -1),
('artifact-num-to-keep', 'artifactNumToKeep', -1),
]
helpers.convert_mapping_to_xml(
strategy, data, mappings, fail_required=True)
def slave_prerequisites(registry, xml_parent, data):
"""yaml: slave-prerequisites
This plugin allows to check prerequisites on slave before
a job can run a build on it
Requires the Jenkins :jenkins-wiki:`Slave Prerequisites Plugin
<Slave+Prerequisites+Plugin>`.
:arg str script: A script to be executed on slave node.
If returning non 0 status, the node will be vetoed from hosting
the build. (required)
:arg str interpreter: Command line interpreter to be used for executing
the prerequisite script - either `shell` for Unix shell or `cmd` for
Windows batch script. (default shell)
Example:
.. literalinclude::
/../../tests/properties/fixtures/slave-prerequisites-minimal.yaml
:language: yaml
.. literalinclude::
/../../tests/properties/fixtures/slave-prerequisites-full.yaml
:language: yaml
"""
prereqs = XML.SubElement(xml_parent,
'com.cloudbees.plugins.JobPrerequisites')
mappings = [
('script', 'script', None),
('interpreter', 'interpreter', 'shell', {
'cmd': 'windows batch command',
'shell': 'shell script'}),
]
helpers.convert_mapping_to_xml(
prereqs, data, mappings, fail_required=True)
class Properties(jenkins_jobs.modules.base.Base):
sequence = 20
component_type = 'property'
component_list_type = 'properties'
def gen_xml(self, xml_parent, data):
properties = xml_parent.find('properties')
if properties is None:
properties = XML.SubElement(xml_parent, 'properties')
for prop in data.get('properties', []):
self.registry.dispatch('property', properties, prop) | zerotk.jenkins-job-builder | /zerotk.jenkins-job-builder-2.0.0.0b2.tar.gz/zerotk.jenkins-job-builder-2.0.0.0b2/jenkins_jobs/modules/properties.py | properties.py |
import logging
import pkg_resources
import random
import xml.etree.ElementTree as XML
import six
from jenkins_jobs.errors import InvalidAttributeError
from jenkins_jobs.errors import JenkinsJobsException
from jenkins_jobs.errors import MissingAttributeError
import jenkins_jobs.modules.base
from jenkins_jobs.modules import hudson_model
import jenkins_jobs.modules.helpers as helpers
def archive(registry, xml_parent, data):
"""yaml: archive
Archive build artifacts
:arg str artifacts: path specifier for artifacts to archive
:arg str excludes: path specifier for artifacts to exclude (optional)
:arg bool latest-only: only keep the artifacts from the latest
successful build
:arg bool allow-empty: pass the build if no artifacts are
found (default false)
:arg bool only-if-success: archive artifacts only if build is successful
(default false)
:arg bool fingerprint: fingerprint all archived artifacts (default false)
:arg bool default-excludes: This option allows to enable or disable the
default Ant exclusions. (default true)
:arg bool case-sensitive: Treat include and exclude patterns as case
sensitive. (default true)
Example:
.. literalinclude:: /../../tests/publishers/fixtures/archive001.yaml
:language: yaml
"""
logger = logging.getLogger("%s:archive" % __name__)
archiver = XML.SubElement(xml_parent, 'hudson.tasks.ArtifactArchiver')
artifacts = XML.SubElement(archiver, 'artifacts')
artifacts.text = data['artifacts']
if 'excludes' in data:
excludes = XML.SubElement(archiver, 'excludes')
excludes.text = data['excludes']
latest = XML.SubElement(archiver, 'latestOnly')
# backward compatibility
latest_only = data.get('latest_only', False)
if 'latest_only' in data:
logger.warning('latest_only is deprecated please use latest-only')
if 'latest-only' in data:
latest_only = data['latest-only']
if latest_only:
latest.text = 'true'
else:
latest.text = 'false'
if 'allow-empty' in data:
empty = XML.SubElement(archiver, 'allowEmptyArchive')
# Default behavior is to fail the build.
empty.text = str(data.get('allow-empty', False)).lower()
if 'only-if-success' in data:
success = XML.SubElement(archiver, 'onlyIfSuccessful')
success.text = str(data.get('only-if-success', False)).lower()
if 'fingerprint' in data:
fingerprint = XML.SubElement(archiver, 'fingerprint')
fingerprint.text = str(data.get('fingerprint', False)).lower()
default_excludes = XML.SubElement(archiver, 'defaultExcludes')
default_excludes.text = str(data.get('default-excludes', True)).lower()
if 'case-sensitive' in data:
case_sensitive = XML.SubElement(archiver, 'caseSensitive')
case_sensitive.text = str(data.get('case-sensitive', True)).lower()
def blame_upstream(registry, xml_parent, data):
"""yaml: blame-upstream
Notify upstream commiters when build fails
Requires the Jenkins :jenkins-wiki:`Blame upstream commiters Plugin
<Blame+Upstream+Committers+Plugin>`.
Example:
.. literalinclude:: /../../tests/publishers/fixtures/blame001.yaml
:language: yaml
"""
XML.SubElement(xml_parent,
'hudson.plugins.blame__upstream__commiters.'
'BlameUpstreamCommitersPublisher')
def jclouds(registry, xml_parent, data):
"""yaml: jclouds
JClouds Cloud Storage Settings provides a way to store artifacts on
JClouds supported storage providers. Requires the Jenkins
:jenkins-wiki:`JClouds Plugin <JClouds+Plugin>`.
JClouds Cloud Storage Settings must be configured for the Jenkins instance.
:arg str profile: preconfigured storage profile (required)
:arg str files: files to upload (regex) (required)
:arg str basedir: the source file path (relative to workspace, Optional)
:arg str container: the destination container name (required)
:arg bool hierarchy: keep hierarchy (default false)
Example:
.. literalinclude:: /../../tests/publishers/fixtures/jclouds001.yaml
"""
deployer = XML.SubElement(xml_parent,
'jenkins.plugins.jclouds.blobstore.'
'BlobStorePublisher')
if 'profile' not in data:
raise JenkinsJobsException('profile parameter is missing')
XML.SubElement(deployer, 'profileName').text = data.get('profile')
entries = XML.SubElement(deployer, 'entries')
deployer_entry = XML.SubElement(entries,
'jenkins.plugins.jclouds.blobstore.'
'BlobStoreEntry')
try:
XML.SubElement(deployer_entry, 'container').text = data['container']
XML.SubElement(deployer_entry, 'path').text = data.get('basedir', '')
XML.SubElement(deployer_entry, 'sourceFile').text = data['files']
except KeyError as e:
raise JenkinsJobsException("blobstore requires '%s' to be set"
% e.args[0])
XML.SubElement(deployer_entry, 'keepHierarchy').text = str(
data.get('hierarchy', False)).lower()
def javadoc(registry, xml_parent, data):
"""yaml: javadoc
Publish Javadoc
Requires the Jenkins :jenkins-wiki:`Javadoc Plugin <Javadoc+Plugin>`.
:arg str directory: Directory relative to the root of the workspace,
such as 'myproject/build/javadoc' (optional)
:arg bool keep-all-successful: When true, it will retain Javadoc for each
successful build. This allows you to browse Javadoc for older builds,
at the expense of additional disk space requirement. If false, it will
only keep the latest Javadoc, so older Javadoc will be overwritten as
new builds succeed. (default false)
Example:
.. literalinclude:: /../../tests/publishers/fixtures/javadoc001.yaml
:language: yaml
"""
root = XML.SubElement(xml_parent, 'hudson.tasks.JavadocArchiver')
if 'directory' in data:
XML.SubElement(root, 'javadocDir').text = data.get('directory', '')
XML.SubElement(root, 'keepAll').text = str(data.get(
'keep-all-successful', False)).lower()
def jdepend(registry, xml_parent, data):
"""yaml: jdepend
Publish jdepend report
Requires the :jenkins-wiki:`JDepend Plugin <JDepend+Plugin>`.
:arg str file: path to jdepend file (required)
Example:
.. literalinclude:: /../../tests/publishers/fixtures/jdepend001.yaml
:language: yaml
"""
jdepend = XML.SubElement(
xml_parent,
'hudson.plugins.jdepend.JDependRecorder')
mapping = [('file', 'configuredJDependFile', None)]
helpers.convert_mapping_to_xml(jdepend, data, mapping, fail_required=True)
def hue_light(registry, xml_parent, data):
"""yaml: hue-light
This plugin shows the state of your builds using the awesome Philips hue
lights.
Requires the Jenkins :jenkins-wiki:`hue-light Plugin
<hue-light+Plugin>`.
:arg int light-id: ID of light. Define multiple lights by a comma as a
separator (required)
:arg string pre-build: Colour of building state (default 'blue')
:arg string good-build: Colour of succesful state (default 'green')
:arg string unstable-build: Colour of unstable state (default 'yellow')
:arg string bad-build: Colour of unsuccessful state (default 'red')
Full Example:
.. literalinclude:: /../../tests/publishers/fixtures/hue-light-full.yaml
:language: yaml
Minimal Example:
.. literalinclude:: /../../tests/publishers/fixtures/hue-light-minimal.yaml
:language: yaml
"""
hue_light = XML.SubElement(
xml_parent, 'org.jenkinsci.plugins.hue__light.LightNotifier')
hue_light.set('plugin', 'hue-light')
lightId = XML.SubElement(hue_light, 'lightId')
id_mapping = [('light-id', 'string', None)]
helpers.convert_mapping_to_xml(
lightId, data, id_mapping, fail_required=True)
build_mapping = [
('pre-build', 'preBuild', 'blue'),
('good-build', 'goodBuild', 'green'),
('unstable-build', 'unstableBuild', 'yellow'),
('bad-build', 'badBuild', 'red'),
]
helpers.convert_mapping_to_xml(
hue_light, data, build_mapping, fail_required=True)
def campfire(registry, xml_parent, data):
"""yaml: campfire
Send build notifications to Campfire rooms.
Requires the Jenkins :jenkins-wiki:`Campfire Plugin <Campfire+Plugin>`.
Campfire notifications global default values must be configured for
the Jenkins instance. Default values will be used if no specific
values are specified for each job, so all config params are optional.
:arg str subdomain: override the default campfire subdomain
:arg str token: override the default API token
:arg bool ssl: override the default 'use SSL'
:arg str room: override the default room name
Example:
.. literalinclude:: /../../tests/publishers/fixtures/campfire001.yaml
:language: yaml
"""
root = XML.SubElement(xml_parent,
'hudson.plugins.campfire.'
'CampfireNotifier')
campfire = XML.SubElement(root, 'campfire')
if ('subdomain' in data and data['subdomain']):
subdomain = XML.SubElement(campfire, 'subdomain')
subdomain.text = data['subdomain']
if ('token' in data and data['token']):
token = XML.SubElement(campfire, 'token')
token.text = data['token']
if ('ssl' in data):
ssl = XML.SubElement(campfire, 'ssl')
ssl.text = str(data['ssl']).lower()
if ('room' in data and data['room']):
room = XML.SubElement(root, 'room')
name = XML.SubElement(room, 'name')
name.text = data['room']
XML.SubElement(room, 'campfire reference="../../campfire"')
def mqtt(registry, xml_parent, data):
"""yaml: mqtt
This plugin lets you send build notifications to a MQTT message queue.
Requires the :jenkins-wiki:`MQTT Notification Plugin
<MQTT+Notification+Plugin>`.
:arg str broker-url: the broker URL, as protocol://address:port (required)
:arg str credentials-id: credentials to use to connect to the broker
(optional)
:arg str topic: the message topic (default "jenkins/$PROJECT_URL")
:arg str message: the message itself (default "$BUILD_RESULT")
:arg str qos: one of AT_MOST_ONCE, AT_LEAST_ONCE, or EXACTLY_ONCE
(default AT_MOST_ONCE)
:arg bool retain-message: whether to resend message or not when a new
client connects (default false)
Minimal Example:
.. literalinclude:: /../../tests/publishers/fixtures/mqtt-minimal.yaml
:language: yaml
Full Example:
.. literalinclude:: /../../tests/publishers/fixtures/mqtt-full.yaml
:language: yaml
"""
mqtt = XML.SubElement(xml_parent,
'jenkins.plugins.mqttnotification.MqttNotifier')
mqtt.set('plugin', 'mqtt-notification-plugin')
mqtt_mapping = [
('broker-url', 'brokerUrl', None), ]
helpers.convert_mapping_to_xml(mqtt, data, mqtt_mapping,
fail_required=True)
mqtt_mapping = [
('credentials-id', 'credentialsId', None),
('topic', 'topic', 'jenkins/$PROJECT_URL'),
('message', 'message', '$BUILD_RESULT'),
('qos', 'qos', 'AT_MOST_ONCE', {'AT_MOST_ONCE': '0',
'AT_LEAST_ONCE': '1',
'EXACTLY_ONCE': '2'}),
('retain-message', 'retainMessage', False)
]
helpers.convert_mapping_to_xml(mqtt, data, mqtt_mapping)
def codecover(registry, xml_parent, data):
"""yaml: codecover
This plugin allows you to capture code coverage report from CodeCover.
Jenkins will generate the trend report of coverage.
Requires the Jenkins :jenkins-wiki:`CodeCover Plugin <CodeCover+Plugin>`.
:arg str include: Specify the path to the CodeCover HTML report file,
relative to the workspace root (default '')
:arg int min-statement: Minimum statement threshold (default 0)
:arg int max-statement: Maximum statement threshold (default 90)
:arg int min-branch: Minimum branch threshold (default 0)
:arg int max-branch: Maximum branch threshold (default 80)
:arg int min-loop: Minimum loop threshold (default 0)
:arg int max-loop: Maximum loop threshold (default 50)
:arg int min-condition: Minimum condition threshold (default 0)
:arg int max-condition: Maximum conditon threshold (default 50)
Minimal Example:
.. literalinclude:: /../../tests/publishers/fixtures/codecover-minimal.yaml
:language: yaml
Full Example:
.. literalinclude:: /../../tests/publishers/fixtures/codecover-full.yaml
:language: yaml
"""
codecover = XML.SubElement(
xml_parent, 'hudson.plugins.codecover.CodeCoverPublisher')
codecover.set('plugin', 'codecover')
XML.SubElement(codecover, 'includes').text = str(data.get('include', ''))
health_report = XML.SubElement(codecover, 'healthReports')
mapping = [
('min-statement', 'minStatement', 0),
('max-statement', 'maxStatement', 90),
('min-branch', 'minBranch', 0),
('max-branch', 'maxBranch', 80),
('min-loop', 'minLoop', 0),
('max-loop', 'maxLoop', 50),
('min-condition', 'minCondition', 0),
('max-condition', 'maxCondition', 50),
]
helpers.convert_mapping_to_xml(
health_report, data, mapping, fail_required=True)
def emotional_jenkins(registry, xml_parent, data):
"""yaml: emotional-jenkins
Emotional Jenkins. This funny plugin changes the expression of Mr. Jenkins
in the background when your builds fail.
Requires the Jenkins :jenkins-wiki:`Emotional Jenkins Plugin
<Emotional+Jenkins+Plugin>`.
Example:
.. literalinclude:: /../../tests/publishers/fixtures/emotional-jenkins.yaml
:language: yaml
"""
XML.SubElement(xml_parent,
'org.jenkinsci.plugins.emotional__jenkins.'
'EmotionalJenkinsPublisher')
def trigger_parameterized_builds(registry, xml_parent, data):
"""yaml: trigger-parameterized-builds
Trigger parameterized builds of other jobs.
Requires the Jenkins :jenkins-wiki:`Parameterized Trigger Plugin
<Parameterized+Trigger+Plugin>`.
Use of the `node-label-name` or `node-label` parameters
requires the Jenkins :jenkins-wiki:`NodeLabel Parameter Plugin
<NodeLabel+Parameter+Plugin>`.
Note: 'node-parameters' overrides the Node that the triggered
project is tied to.
:arg list project: list the jobs to trigger, will generate comma-separated
string containing the named jobs.
:arg str predefined-parameters: parameters to pass to the other
job (optional)
:arg bool current-parameters: Whether to include the parameters passed
to the current build to the triggered job (optional)
:arg bool node-parameters: Use the same Node for the triggered builds
that was used for this build. (optional)
:arg bool svn-revision: Pass svn revision to the triggered job (optional)
:arg bool include-upstream: Include/pass through Upstream SVN Revisons.
Only valid when 'svn-revision' is true. (default false)
:arg dict git-revision: Passes git revision to the triggered job
(optional).
* **combine-queued-commits** (bool): Whether to combine queued git
hashes or not (default false)
:arg bool combine-queued-commits: Combine Queued git hashes. Only valid
when 'git-revision' is true. (default false)
.. deprecated:: 1.5.0 Please use `combine-queued-commits` under the
`git-revision` argument instead.
:arg dict boolean-parameters: Pass boolean parameters to the downstream
jobs. Specify the name and boolean value mapping of the parameters.
(optional)
:arg str condition: when to trigger the other job. Can be: 'SUCCESS',
'UNSTABLE', 'FAILED_OR_BETTER', 'UNSTABLE_OR_BETTER',
'UNSTABLE_OR_WORSE', 'FAILED', 'ALWAYS'. (default 'ALWAYS')
:arg str property-file: Use properties from file (optional)
:arg bool fail-on-missing: Blocks the triggering of the downstream jobs
if any of the property files are not found in the workspace.
Only valid when 'property-file' is specified.
(default 'False')
:arg bool use-matrix-child-files: Use files in workspaces of child
builds (default 'False')
:arg str matrix-child-combination-filter: A Groovy expression to filter
the child builds to look in for files
:arg bool only-exact-matrix-child-runs: Use only child builds triggered
exactly by the parent.
:arg str file-encoding: Encoding of contents of the files. If not
specified, default encoding of the platform is used. Only valid when
'property-file' is specified. (optional)
:arg bool trigger-with-no-params: Trigger a build even when there are
currently no parameters defined (default 'False')
:arg str restrict-matrix-project: Filter that restricts the subset
of the combinations that the downstream project will run (optional)
:arg str node-label-name: Specify the Name for the NodeLabel parameter.
(optional)
:arg str node-label: Specify the Node for the NodeLabel parameter.
(optional)
Example:
.. literalinclude::
/../../tests/publishers/fixtures/trigger_parameterized_builds001.yaml
:language: yaml
.. literalinclude::
/../../tests/publishers/fixtures/trigger_parameterized_builds003.yaml
:language: yaml
"""
logger = logging.getLogger("%s:trigger-parameterized-builds" % __name__)
pt_prefix = 'hudson.plugins.parameterizedtrigger.'
tbuilder = XML.SubElement(xml_parent, pt_prefix + 'BuildTrigger')
configs = XML.SubElement(tbuilder, 'configs')
# original order
orig_order = [
'predefined-parameters',
'git-revision',
'property-file',
'current-parameters',
'node-parameters',
'svn-revision',
'restrict-matrix-project',
'node-label-name',
'node-label',
'boolean-parameters',
]
try:
if registry.jjb_config.config_parser.getboolean(
'__future__', 'param_order_from_yaml'):
orig_order = None
except six.moves.configparser.NoSectionError:
pass
if orig_order:
logger.warning(
"Using deprecated order for parameter sets in "
"triggered-parameterized-builds. This will be changed in a future "
"release to inherit the order from the user defined yaml. To "
"enable this behaviour immediately, set the config option "
"'__future__.param_order_from_yaml' to 'true' and change the "
"input job configuration to use the desired order")
for project_def in data:
tconfig = XML.SubElement(configs, pt_prefix + 'BuildTriggerConfig')
tconfigs = XML.SubElement(tconfig, 'configs')
if orig_order:
parameters = orig_order
else:
parameters = project_def.keys()
for param_type in parameters:
param_value = project_def.get(param_type)
if param_value is None:
continue
if param_type == 'predefined-parameters':
params = XML.SubElement(tconfigs, pt_prefix +
'PredefinedBuildParameters')
properties = XML.SubElement(params, 'properties')
properties.text = param_value
elif param_type == 'git-revision' and param_value:
if 'combine-queued-commits' in project_def:
logger.warning(
"'combine-queued-commit' has moved to reside under "
"'git-revision' configuration, please update your "
"configs as support for this will be removed."
)
git_revision = {
'combine-queued-commits':
project_def['combine-queued-commits']
}
else:
git_revision = project_def['git-revision']
helpers.append_git_revision_config(tconfigs, git_revision)
elif param_type == 'property-file':
params = XML.SubElement(tconfigs,
pt_prefix + 'FileBuildParameters')
properties = XML.SubElement(params, 'propertiesFile')
properties.text = project_def['property-file']
failOnMissing = XML.SubElement(params, 'failTriggerOnMissing')
failOnMissing.text = str(project_def.get('fail-on-missing',
False)).lower()
if 'file-encoding' in project_def:
XML.SubElement(params, 'encoding'
).text = project_def['file-encoding']
if 'use-matrix-child-files' in project_def:
# TODO: These parameters only affect execution in
# publishers of matrix projects; we should warn if they are
# used in other contexts.
XML.SubElement(params, "useMatrixChild").text = (
str(project_def['use-matrix-child-files']).lower())
XML.SubElement(params, "combinationFilter").text = (
project_def.get('matrix-child-combination-filter', ''))
XML.SubElement(params, "onlyExactRuns").text = (
str(project_def.get('only-exact-matrix-child-runs',
False)).lower())
elif param_type == 'current-parameters' and param_value:
XML.SubElement(tconfigs, pt_prefix + 'CurrentBuildParameters')
elif param_type == 'node-parameters' and param_value:
XML.SubElement(tconfigs, pt_prefix + 'NodeParameters')
elif param_type == 'svn-revision' and param_value:
param = XML.SubElement(tconfigs, pt_prefix +
'SubversionRevisionBuildParameters')
XML.SubElement(param, 'includeUpstreamParameters').text = str(
project_def.get('include-upstream', False)).lower()
elif param_type == 'restrict-matrix-project' and param_value:
subset = XML.SubElement(tconfigs, pt_prefix +
'matrix.MatrixSubsetBuildParameters')
XML.SubElement(subset, 'filter').text = \
project_def['restrict-matrix-project']
elif (param_type == 'node-label-name' or
param_type == 'node-label'):
tag_name = ('org.jvnet.jenkins.plugins.nodelabelparameter.'
'parameterizedtrigger.NodeLabelBuildParameter')
if tconfigs.find(tag_name) is not None:
# already processed and can only have one
continue
params = XML.SubElement(tconfigs, tag_name)
name = XML.SubElement(params, 'name')
if 'node-label-name' in project_def:
name.text = project_def['node-label-name']
label = XML.SubElement(params, 'nodeLabel')
if 'node-label' in project_def:
label.text = project_def['node-label']
elif param_type == 'boolean-parameters' and param_value:
params = XML.SubElement(tconfigs,
pt_prefix + 'BooleanParameters')
config_tag = XML.SubElement(params, 'configs')
param_tag_text = pt_prefix + 'BooleanParameterConfig'
params_list = param_value
for name, value in params_list.items():
param_tag = XML.SubElement(config_tag, param_tag_text)
XML.SubElement(param_tag, 'name').text = name
XML.SubElement(param_tag, 'value').text = str(
value or False).lower()
if not list(tconfigs):
# not child parameter tags added
tconfigs.set('class', 'java.util.Collections$EmptyList')
projects = XML.SubElement(tconfig, 'projects')
if isinstance(project_def['project'], list):
projects.text = ",".join(project_def['project'])
else:
projects.text = project_def['project']
condition = XML.SubElement(tconfig, 'condition')
condition.text = project_def.get('condition', 'ALWAYS')
trigger_with_no_params = XML.SubElement(tconfig,
'triggerWithNoParameters')
trigger_with_no_params.text = str(
project_def.get('trigger-with-no-params', False)).lower()
def trigger(registry, xml_parent, data):
"""yaml: trigger
Trigger non-parametrised builds of other jobs.
:arg str project: name of the job to trigger
:arg str threshold: when to trigger the other job (default 'SUCCESS'),
alternatives: SUCCESS, UNSTABLE, FAILURE
Example:
.. literalinclude:: /../../tests/publishers/fixtures/trigger_success.yaml
:language: yaml
"""
tconfig = XML.SubElement(xml_parent, 'hudson.tasks.BuildTrigger')
childProjects = XML.SubElement(tconfig, 'childProjects')
childProjects.text = data['project']
tthreshold = XML.SubElement(tconfig, 'threshold')
threshold = data.get('threshold', 'SUCCESS')
supported_thresholds = ['SUCCESS', 'UNSTABLE', 'FAILURE']
if threshold not in supported_thresholds:
raise JenkinsJobsException("threshold must be one of %s" %
", ".join(supported_thresholds))
tname = XML.SubElement(tthreshold, 'name')
tname.text = hudson_model.THRESHOLDS[threshold]['name']
tordinal = XML.SubElement(tthreshold, 'ordinal')
tordinal.text = hudson_model.THRESHOLDS[threshold]['ordinal']
tcolor = XML.SubElement(tthreshold, 'color')
tcolor.text = hudson_model.THRESHOLDS[threshold]['color']
def clone_workspace(registry, xml_parent, data):
"""yaml: clone-workspace
Archive the workspace from builds of one project and reuse them as the SCM
source for another project.
Requires the Jenkins :jenkins-wiki:`Clone Workspace SCM Plugin
<Clone+Workspace+SCM+Plugin>`.
:arg str workspace-glob: Files to include in cloned workspace (default '')
:arg str workspace-exclude-glob: Files to exclude from cloned workspace
:arg str criteria: Criteria for build to be archived. Can be 'any',
'not failed', or 'successful'. (default 'any')
:arg str archive-method: Choose the method to use for archiving the
workspace. Can be 'tar' or 'zip'. (default 'tar')
:arg bool override-default-excludes: Override default ant excludes.
(default false)
Minimal example:
.. literalinclude::
/../../tests/publishers/fixtures/clone-workspace001.yaml
:language: yaml
Full example:
.. literalinclude::
/../../tests/publishers/fixtures/clone-workspace002.yaml
:language: yaml
"""
cloneworkspace = XML.SubElement(
xml_parent,
'hudson.plugins.cloneworkspace.CloneWorkspacePublisher')
cloneworkspace.set('plugin', 'clone-workspace-scm')
mappings = [
('workspace-glob', 'workspaceGlob', ''),
('override-default-excludes', 'overrideDefaultExcludes', False),
]
helpers.convert_mapping_to_xml(
cloneworkspace, data, mappings, fail_required=True)
if 'workspace-exclude-glob' in data:
XML.SubElement(
cloneworkspace,
'workspaceExcludeGlob').text = data['workspace-exclude-glob']
criteria_list = ['Any', 'Not Failed', 'Successful']
criteria = data.get('criteria', 'Any').title()
if 'criteria' in data and criteria not in criteria_list:
raise JenkinsJobsException(
'clone-workspace criteria must be one of: '
+ ', '.join(criteria_list))
else:
XML.SubElement(cloneworkspace, 'criteria').text = criteria
archive_list = ['TAR', 'ZIP']
archive_method = data.get('archive-method', 'TAR').upper()
if 'archive-method' in data and archive_method not in archive_list:
raise JenkinsJobsException(
'clone-workspace archive-method must be one of: '
+ ', '.join(archive_list))
else:
XML.SubElement(cloneworkspace, 'archiveMethod').text = archive_method
def cloverphp(registry, xml_parent, data):
"""yaml: cloverphp
Capture code coverage reports from PHPUnit
Requires the Jenkins :jenkins-wiki:`Clover PHP Plugin <Clover+PHP+Plugin>`.
Your job definition should pass to PHPUnit the --coverage-clover option
pointing to a file in the workspace (ex: clover-coverage.xml). The filename
has to be filled in the `xml-location` field.
:arg str xml-location: Path to the coverage XML file generated by PHPUnit
using --coverage-clover. Relative to workspace. (required)
:arg dict html: When existent, whether the plugin should generate a HTML
report. Note that PHPUnit already provide a HTML report via its
--cover-html option which can be set in your builder (optional):
* **dir** (str): Directory where HTML report will be generated relative
to workspace. (required in `html` dict).
* **archive** (bool): Whether to archive HTML reports (default true).
:arg list metric-targets: List of metric targets to reach, must be one of
**healthy**, **unhealthy** and **failing**. Each metric target can
takes two parameters:
* **method** Target for method coverage
* **statement** Target for statements coverage
Whenever a metric target is not filled in, the Jenkins plugin can fill
in defaults for you (as of v0.3.3 of the plugin the healthy target will
have method: 70 and statement: 80 if both are left empty). Jenkins Job
Builder will mimic that feature to ensure clean configuration diff.
Minimal example:
.. literalinclude:: /../../tests/publishers/fixtures/cloverphp001.yaml
:language: yaml
Full example:
.. literalinclude:: /../../tests/publishers/fixtures/cloverphp002.yaml
:language: yaml
"""
cloverphp = XML.SubElement(
xml_parent,
'org.jenkinsci.plugins.cloverphp.CloverPHPPublisher')
cloverphp.set('plugin', 'cloverphp')
# The plugin requires clover XML file to parse
if 'xml-location' not in data:
raise JenkinsJobsException('xml-location must be set')
# Whether HTML publishing has been checked
html_publish = False
# By default, disableArchiving = false. Note that we use
# reversed logic.
html_archive = True
if 'html' in data:
html_publish = True
html_dir = data['html'].get('dir', None)
html_archive = data['html'].get('archive', html_archive)
if html_dir is None:
# No point in going further, the plugin would not work
raise JenkinsJobsException('htmldir is required in a html block')
XML.SubElement(cloverphp, 'publishHtmlReport').text = str(
html_publish).lower()
if html_publish:
XML.SubElement(cloverphp, 'reportDir').text = html_dir
XML.SubElement(cloverphp, 'xmlLocation').text = data.get('xml-location')
XML.SubElement(cloverphp, 'disableArchiving').text = str(
not html_archive).lower()
# Handle targets
# Plugin v0.3.3 will fill defaults for us whenever healthy targets are both
# blanks.
default_metrics = {
'healthy': {'method': 70, 'statement': 80}
}
allowed_metrics = ['healthy', 'unhealthy', 'failing']
metrics = data.get('metric-targets', [])
# list of dicts to dict
metrics = dict(kv for m in metrics for kv in m.items())
# Populate defaults whenever nothing has been filled by user.
for default in default_metrics.keys():
if metrics.get(default, None) is None:
metrics[default] = default_metrics[default]
# The plugin would at least define empty targets so make sure
# we output them all in the XML regardless of what the user
# has or has not entered.
for target in allowed_metrics:
cur_target = XML.SubElement(cloverphp, target + 'Target')
for t_type in ['method', 'statement']:
val = metrics.get(target, {}).get(t_type)
if val is None or type(val) != int:
continue
if val < 0 or val > 100:
raise JenkinsJobsException(
"Publisher cloverphp metric target %s:%s = %s "
"is not in valid range 0-100." % (target, t_type, val))
XML.SubElement(cur_target, t_type + 'Coverage').text = str(val)
def coverage(registry, xml_parent, data):
"""yaml: coverage
WARNING: The coverage function is deprecated. Instead, use the
cobertura function to generate a cobertura coverage report.
Requires the Jenkins :jenkins-wiki:`Cobertura Coverage Plugin
<Cobertura+Plugin>`.
Example:
.. literalinclude:: /../../tests/publishers/fixtures/coverage001.yaml
:language: yaml
"""
logger = logging.getLogger(__name__)
logger.warning("Coverage function is deprecated. Switch to cobertura.")
cobertura = XML.SubElement(xml_parent,
'hudson.plugins.cobertura.CoberturaPublisher')
XML.SubElement(cobertura, 'coberturaReportFile').text = '**/coverage.xml'
XML.SubElement(cobertura, 'onlyStable').text = 'false'
healthy = XML.SubElement(cobertura, 'healthyTarget')
targets = XML.SubElement(healthy, 'targets', {
'class': 'enum-map',
'enum-type': 'hudson.plugins.cobertura.targets.CoverageMetric'})
entry = XML.SubElement(targets, 'entry')
XML.SubElement(entry, 'hudson.plugins.cobertura.targets.CoverageMetric'
).text = 'CONDITIONAL'
XML.SubElement(entry, 'int').text = '70'
entry = XML.SubElement(targets, 'entry')
XML.SubElement(entry, 'hudson.plugins.cobertura.targets.CoverageMetric'
).text = 'LINE'
XML.SubElement(entry, 'int').text = '80'
entry = XML.SubElement(targets, 'entry')
XML.SubElement(entry, 'hudson.plugins.cobertura.targets.CoverageMetric'
).text = 'METHOD'
XML.SubElement(entry, 'int').text = '80'
unhealthy = XML.SubElement(cobertura, 'unhealthyTarget')
targets = XML.SubElement(unhealthy, 'targets', {
'class': 'enum-map',
'enum-type': 'hudson.plugins.cobertura.targets.CoverageMetric'})
entry = XML.SubElement(targets, 'entry')
XML.SubElement(entry, 'hudson.plugins.cobertura.targets.CoverageMetric'
).text = 'CONDITIONAL'
XML.SubElement(entry, 'int').text = '0'
entry = XML.SubElement(targets, 'entry')
XML.SubElement(entry, 'hudson.plugins.cobertura.targets.CoverageMetric'
).text = 'LINE'
XML.SubElement(entry, 'int').text = '0'
entry = XML.SubElement(targets, 'entry')
XML.SubElement(entry, 'hudson.plugins.cobertura.targets.CoverageMetric'
).text = 'METHOD'
XML.SubElement(entry, 'int').text = '0'
failing = XML.SubElement(cobertura, 'failingTarget')
targets = XML.SubElement(failing, 'targets', {
'class': 'enum-map',
'enum-type': 'hudson.plugins.cobertura.targets.CoverageMetric'})
entry = XML.SubElement(targets, 'entry')
XML.SubElement(entry, 'hudson.plugins.cobertura.targets.CoverageMetric'
).text = 'CONDITIONAL'
XML.SubElement(entry, 'int').text = '0'
entry = XML.SubElement(targets, 'entry')
XML.SubElement(entry, 'hudson.plugins.cobertura.targets.CoverageMetric'
).text = 'LINE'
XML.SubElement(entry, 'int').text = '0'
entry = XML.SubElement(targets, 'entry')
XML.SubElement(entry, 'hudson.plugins.cobertura.targets.CoverageMetric'
).text = 'METHOD'
XML.SubElement(entry, 'int').text = '0'
XML.SubElement(cobertura, 'sourceEncoding').text = 'ASCII'
def cobertura(registry, xml_parent, data):
"""yaml: cobertura
Generate a cobertura coverage report.
Requires the Jenkins :jenkins-wiki:`Cobertura Coverage Plugin
<Cobertura+Plugin>`.
:arg str report-file: This is a file name pattern that can be used
to locate the cobertura xml report files (optional)
:arg bool only-stable: Include only stable builds (default false)
:arg bool fail-no-reports: fail builds if no coverage reports are found
(default false)
:arg bool fail-unhealthy: Unhealthy projects will be failed (default false)
:arg bool fail-unstable: Unstable projects will be failed (default false)
:arg bool health-auto-update: Auto update threshold for health on
successful build (default false)
:arg bool stability-auto-update: Auto update threshold for stability on
successful build (default false)
:arg bool zoom-coverage-chart: Zoom the coverage chart and crop area below
the minimum and above the maximum coverage of the past reports
(default false)
:arg str source-encoding: Override the source encoding (default ASCII)
:arg dict targets:
:targets: (packages, files, classes, method, line, conditional)
* **healthy** (`int`): Healthy threshold (default 0)
* **unhealthy** (`int`): Unhealthy threshold (default 0)
* **failing** (`int`): Failing threshold (default 0)
Example:
.. literalinclude:: /../../tests/publishers/fixtures/cobertura001.yaml
:language: yaml
"""
cobertura = XML.SubElement(xml_parent,
'hudson.plugins.cobertura.CoberturaPublisher')
mapping = [
('report-file', 'coberturaReportFile', '**/coverage.xml'),
('only-stable', 'onlyStable', False),
('fail-unhealthy', 'failUnhealthy', False),
('fail-unstable', 'failUnstable', False),
('health-auto-update', 'autoUpdateHealth', False),
('stability-auto-update', 'autoUpdateStability', False),
('zoom-coverage-chart', 'zoomCoverageChart', False),
('fail-no-reports', 'failNoReports', False),
]
helpers.convert_mapping_to_xml(
cobertura, data, mapping, fail_required=True)
healthy = XML.SubElement(cobertura, 'healthyTarget')
targets = XML.SubElement(healthy, 'targets', {
'class': 'enum-map',
'enum-type': 'hudson.plugins.cobertura.targets.CoverageMetric'})
for item in data['targets']:
item_name = next(iter(item.keys()))
item_values = item.get(item_name, 0)
entry = XML.SubElement(targets, 'entry')
XML.SubElement(entry,
'hudson.plugins.cobertura.targets.'
'CoverageMetric').text = str(item_name).upper()
XML.SubElement(entry, 'int').text = str(item_values.get('healthy', 0))
unhealthy = XML.SubElement(cobertura, 'unhealthyTarget')
targets = XML.SubElement(unhealthy, 'targets', {
'class': 'enum-map',
'enum-type': 'hudson.plugins.cobertura.targets.CoverageMetric'})
for item in data['targets']:
item_name = next(iter(item.keys()))
item_values = item.get(item_name, 0)
entry = XML.SubElement(targets, 'entry')
XML.SubElement(entry, 'hudson.plugins.cobertura.targets.'
'CoverageMetric').text = str(item_name).upper()
XML.SubElement(entry, 'int').text = str(item_values.get('unhealthy',
0))
failing = XML.SubElement(cobertura, 'failingTarget')
targets = XML.SubElement(failing, 'targets', {
'class': 'enum-map',
'enum-type': 'hudson.plugins.cobertura.targets.CoverageMetric'})
for item in data['targets']:
item_name = next(iter(item.keys()))
item_values = item.get(item_name, 0)
entry = XML.SubElement(targets, 'entry')
XML.SubElement(entry, 'hudson.plugins.cobertura.targets.'
'CoverageMetric').text = str(item_name).upper()
XML.SubElement(entry, 'int').text = str(item_values.get('failing', 0))
XML.SubElement(cobertura, 'sourceEncoding').text = data.get(
'source-encoding', 'ASCII')
def jacoco(registry, xml_parent, data):
"""yaml: jacoco
Generate a JaCoCo coverage report.
Requires the Jenkins :jenkins-wiki:`JaCoCo Plugin <JaCoCo+Plugin>`.
:arg str exec-pattern: This is a file name pattern that can be used to
locate the jacoco report files (default
``**/**.exec``)
:arg str class-pattern: This is a file name pattern that can be used
to locate class files (default ``**/classes``)
:arg str source-pattern: This is a file name pattern that can be used
to locate source files (default ``**/src/main/java``)
:arg bool update-build-status: Update the build according to the results
(default false)
:arg str inclusion-pattern: This is a file name pattern that can be used
to include certain class files (optional)
:arg str exclusion-pattern: This is a file name pattern that can be used
to exclude certain class files (optional)
:arg dict targets:
:targets: (instruction, branch, complexity, line, method, class)
* **healthy** (`int`): Healthy threshold (default 0)
* **unhealthy** (`int`): Unhealthy threshold (default 0)
Example:
.. literalinclude:: /../../tests/publishers/fixtures/jacoco001.yaml
:language: yaml
"""
jacoco = XML.SubElement(xml_parent,
'hudson.plugins.jacoco.JacocoPublisher')
XML.SubElement(jacoco, 'execPattern').text = data.get(
'exec-pattern', '**/**.exec')
XML.SubElement(jacoco, 'classPattern').text = data.get(
'class-pattern', '**/classes')
XML.SubElement(jacoco, 'sourcePattern').text = data.get(
'source-pattern', '**/src/main/java')
XML.SubElement(jacoco, 'changeBuildStatus').text = data.get(
'update-build-status', False)
XML.SubElement(jacoco, 'inclusionPattern').text = data.get(
'inclusion-pattern', '')
XML.SubElement(jacoco, 'exclusionPattern').text = data.get(
'exclusion-pattern', '')
itemsList = ['instruction',
'branch',
'complexity',
'line',
'method',
'class']
for item in data['targets']:
item_name = next(iter(item.keys()))
if item_name not in itemsList:
raise JenkinsJobsException("item entered is not valid must be "
"one of: %s" % ",".join(itemsList))
item_values = item.get(item_name, 0)
XML.SubElement(jacoco,
'maximum' +
item_name.capitalize() +
'Coverage').text = str(item_values.get('healthy', 0))
XML.SubElement(jacoco,
'minimum' +
item_name.capitalize() +
'Coverage').text = str(item_values.get('unhealthy', 0))
def ftp(registry, xml_parent, data):
"""yaml: ftp
Upload files via FTP.
Requires the Jenkins :jenkins-wiki:`Publish over FTP Plugin
<Publish+Over+FTP+Plugin>`.
:arg str site: name of the ftp site
:arg str target: destination directory
:arg bool target-is-date-format: whether target is a date format. If true,
raw text should be quoted (default false)
:arg bool clean-remote: should the remote directory be deleted before
transferring files (default false)
:arg str source: source path specifier
:arg str excludes: excluded file pattern (optional)
:arg str remove-prefix: prefix to remove from uploaded file paths
(optional)
:arg bool fail-on-error: fail the build if an error occurs (default false).
:arg bool flatten: only create files on the server, don't create
directories (default false).
Example:
.. literalinclude:: /../../tests/publishers/fixtures/ftp001.yaml
:language: yaml
"""
console_prefix = 'FTP: '
plugin_tag = 'jenkins.plugins.publish__over__ftp.BapFtpPublisherPlugin'
publisher_tag = 'jenkins.plugins.publish__over__ftp.BapFtpPublisher'
transfer_tag = 'jenkins.plugins.publish__over__ftp.BapFtpTransfer'
plugin_reference_tag = 'jenkins.plugins.publish_over_ftp.' \
'BapFtpPublisherPlugin'
(_, transfer_node) = base_publish_over(xml_parent,
data,
console_prefix,
plugin_tag,
publisher_tag,
transfer_tag,
plugin_reference_tag)
XML.SubElement(transfer_node, 'asciiMode').text = 'false'
def ftp_publisher(registry, xml_parent, data):
"""yaml: ftp-publisher
This plugin can be used to upload project artifacts and whole directories
to an ftp server.
Requires the Jenkins :jenkins-wiki:`FTP-Publisher Plugin
<FTP-Publisher+Plugin>`.
:arg list uploads: List of files to upload
:uploads:
* **file-path** ('str') -- Destination folder. It will be created
if doesn't exists. Created relative to ftp root directory.
(default '')
* **source-file** ('str') -- Source files which will be uploaded
(default '')
:arg str site-name: Name of FTP server to upload to (required)
:arg bool use-timestamps: Use timestamps in the FTP directory path (default
false)
:arg bool flatten-files: Flatten files on the FTP host (default false)
:arg bool skip-publishing: Skip publishing (default false)
Minimal Example:
.. literalinclude::
/../../tests/publishers/fixtures/ftp-publisher-minimal.yaml
:language: yaml
Full Example:
.. literalinclude::
/../../tests/publishers/fixtures/ftp-publisher-full.yaml
:language: yaml
"""
ftp = XML.SubElement(xml_parent, 'com.zanox.hudson.plugins.FTPPublisher')
ftp.set('plugin', 'ftppublisher')
entries = XML.SubElement(ftp, 'entries')
if 'uploads' in data:
upload_mapping = [
('file-path', 'filePath', ''),
('source-file', 'sourceFile', ''),
]
for upload in data['uploads']:
entry = XML.SubElement(entries, 'com.zanox.hudson.plugins.Entry')
helpers.convert_mapping_to_xml(
entry, upload, upload_mapping, fail_required=True)
mapping = [
('site-name', 'siteName', None),
('use-timestamps', 'useTimestamps', False),
('flatten-files', 'flatten', False),
('skip-publishing', 'skip', False),
]
helpers.convert_mapping_to_xml(ftp, data, mapping, fail_required=True)
def junit(registry, xml_parent, data):
"""yaml: junit
Publish JUnit test results.
:arg str results: results filename (required)
:arg bool keep-long-stdio: Retain long standard output/error in test
results (default true).
:arg float health-scale-factor: Amplification factor to apply to test
failures when computing the test result contribution to the build
health score. (default 1.0)
:arg bool allow-empty-results: Do not fail the build on empty test results
(default false)
:arg bool test-stability: Add historical information about test
results stability (default false).
Requires the Jenkins :jenkins-wiki:`Test stability Plugin
<Test+stability+plugin>`.
:arg bool claim-build: Allow claiming of failed tests (default false)
Requires the Jenkins :jenkins-wiki:`Claim Plugin <Claim+plugin>`.
:arg bool measurement-plots: Create measurement plots (default false)
Requires the Jenkins :jenkins-wiki:`Measurement Plots Plugin
<Measurement+Plots+Plugin>`.
:arg bool flaky-test-reports: Publish flaky test reports (default false).
Requires the Jenkins :jenkins-wiki:`Flaky Test Handler Plugin
<Flaky+Test+Handler+Plugin>`.
:arg bool junit-attachments: Publish test attachments (default false).
Requires the Jenkins :jenkins-wiki:`JUnit Attachments Plugin
<JUnit+Attachments+Plugin>`.
Minimal example using defaults:
.. literalinclude:: /../../tests/publishers/fixtures/junit001.yaml
:language: yaml
Full example:
.. literalinclude:: /../../tests/publishers/fixtures/junit002.yaml
:language: yaml
"""
junitresult = XML.SubElement(xml_parent,
'hudson.tasks.junit.JUnitResultArchiver')
junitresult.set('plugin', 'junit')
mapping = [
('results', 'testResults', None),
('keep-long-stdio', 'keepLongStdio', True),
('health-scale-factor', 'healthScaleFactor', '1.0'),
('allow-empty-results', 'allowEmptyResults', False),
]
helpers.convert_mapping_to_xml(
junitresult, data, mapping, fail_required=True)
datapublisher = XML.SubElement(junitresult, 'testDataPublishers')
if str(data.get('test-stability', False)).lower() == 'true':
XML.SubElement(datapublisher,
'de.esailors.jenkins.teststability'
'.StabilityTestDataPublisher')
if str(data.get('claim-build', False)).lower() == 'true':
XML.SubElement(datapublisher,
'hudson.plugins.claim.ClaimTestDataPublisher')
if str(data.get('measurement-plots', False)).lower() == 'true':
XML.SubElement(datapublisher,
'hudson.plugins.measurement__plots.TestDataPublisher')
if str(data.get('flaky-test-reports', False)).lower() == 'true':
XML.SubElement(datapublisher,
'com.google.jenkins.flakyTestHandler.plugin'
'.JUnitFlakyTestDataPublisher')
if str(data.get('junit-attachments', False)).lower() == 'true':
XML.SubElement(datapublisher,
'hudson.plugins.junitattachments.AttachmentPublisher')
def cucumber_reports(registry, xml_parent, data):
"""yaml: cucumber-reports
This plugin creates pretty cucumber-jvm html reports on jenkins.
Requires the Jenkins :jenkins-wiki:`cucumber reports
<Cucumber+Reports+Plugin>`.
:arg str json-reports-path: The path relative to the workspace of
the json reports generated by cucumber-jvm e.g. target - leave
empty to scan the whole workspace (default '')
:arg str file-include-pattern: Include pattern (default '')
:arg str file-exclude-pattern: Exclude pattern (default '')
:arg str plugin-url-path: The path to the jenkins user content url
e.g. :samp:`http://host:port[/jenkins/]plugin` - leave empty if jenkins
url root is host:port (default '')
:arg bool skipped-fails: Skipped steps to cause the build to fail
(default false)
:arg bool pending-fails: Pending steps to cause the build to fail
(default false)
:arg bool undefined-fails: Undefined steps to cause the build to fail
(default false)
:arg bool missing-fails: Missing steps to cause the build to fail
(default false)
:arg bool no-flash-charts: Use javascript charts instead of flash charts
(default false)
:arg bool ignore-failed-tests: Entire build to fail when these tests fail
(default false)
:arg bool parallel-testing: Run same test in parallel for multiple devices
(default false)
Full example:
.. literalinclude::
/../../tests/publishers/fixtures/cucumber-reports-complete.yaml
:language: yaml
Minimal Example:
.. literalinclude::
/../../tests/publishers/fixtures/cucumber-reports-minimal.yaml
:language: yaml
"""
cucumber_reports = XML.SubElement(xml_parent,
'net.masterthought.jenkins.'
'CucumberReportPublisher')
cucumber_reports.set('plugin', 'cucumber-reports')
mappings = [
('json-reports-path', 'jsonReportDirectory', ''),
('plugin-url-path', 'pluginUrlPath', ''),
('file-include-pattern', 'fileIncludePattern', ''),
('file-exclude-pattern', 'fileExcludePattern', ''),
('skipped-fails', 'skippedFails', False),
('pending-fails', 'pendingFails', False),
('undefined-fails', 'undefinedFails', False),
('missing-fails', 'missingFails', False),
('no-flash-charts', 'noFlashCharts', False),
('ignore-failed-tests', 'ignoreFailedTests', False),
('parallel-testing', 'parallelTesting', False)
]
helpers.convert_mapping_to_xml(
cucumber_reports, data, mappings, fail_required=True)
def cucumber_testresult(registry, xml_parent, data):
"""yaml: cucumber-testresult
Publish cucumber test results.
Requires the Jenkins :jenkins-wiki:`cucumber testresult
<Cucumber+Test+Result+Plugin>`.
:arg str results: Results filename (required)
:arg bool ignore-bad-steps: Ignore not existed step results (default false)
Minimal example:
.. literalinclude::
/../../tests/publishers/fixtures/cucumber-testresult-minimal.yaml
:language: yaml
Full Example:
.. literalinclude::
/../../tests/publishers/fixtures/cucumber-testresult-complete.yaml
:language: yaml
"""
cucumber_result = XML.SubElement(xml_parent,
'org.jenkinsci.plugins.cucumber.'
'jsontestsupport.'
'CucumberTestResultArchiver')
cucumber_result.set('plugin', 'cucumber-testresult-plugin')
mappings = [
('results', 'testResults', None),
('ignore-bad-steps', 'ignoreBadSteps', False)
]
helpers.convert_mapping_to_xml(
cucumber_result, data, mappings, fail_required=True)
def xunit(registry, xml_parent, data):
"""yaml: xunit
Publish tests results. Requires the Jenkins :jenkins-wiki:`xUnit Plugin
<xUnit+Plugin>`.
:arg str thresholdmode: Whether thresholds represents an absolute number
of tests or a percentage. Either 'number' or 'percent'. (default
'number')
:arg list thresholds: Thresholds for both 'failed' and 'skipped' tests.
:threshold (`dict`): Threshold values to set, where missing, xUnit
should default to an internal value of 0. Each test threshold
should contain the following:
* **unstable** (`int`)
* **unstablenew** (`int`)
* **failure** (`int`)
* **failurenew** (`int`)
:arg int test-time-margin: Give the report time margin value in ms, before
to fail if not new unless the option **requireupdate** is set for the
configured framework. (default 3000)
:arg list types: Frameworks to configure, and options. Supports the
following: ``aunit``, ``boosttest``, ``checktype``, ``cpptest``,
``cppunit``, ``ctest``, ``dotnettest``, ``embunit``, ``fpcunit``,
``gtest``, ``junit``, ``mstest``, ``nunit``, ``phpunit``, ``tusar``,
``unittest``, and ``valgrind``.
The 'custom' type is not supported.
:type (`dict`): each type can be configured using the following:
* **pattern** (`str`): An Ant pattern to look for Junit result
files, relative to the workspace root (default '')
* **requireupdate** (`bool`): fail the build whenever fresh tests
results have not been found (default true).
* **deleteoutput** (`bool`): delete temporary JUnit files
(default true).
* **skip-if-no-test-files** (`bool`): Skip parsing this xUnit type
report if there are no test reports files (default false).
* **stoponerror** (`bool`): Fail the build whenever an error occur
during a result file processing (default true).
Example:
.. literalinclude:: /../../tests/publishers/fixtures/xunit001.yaml
:language: yaml
"""
logger = logging.getLogger(__name__)
xunit = XML.SubElement(xml_parent, 'xunit')
xunit.set('plugin', 'xunit')
# Map our internal types to the XML element names used by Jenkins plugin
types_to_plugin_types = {
'aunit': 'AUnitJunitHudsonTestType',
'boosttest': 'BoostTestJunitHudsonTestType',
'checktype': 'CheckType',
'cpptest': 'CppTestJunitHudsonTestType',
'cppunit': 'CppUnitJunitHudsonTestType',
'ctest': 'CTestType',
'dotnettest': 'XUnitDotNetTestType', # since plugin v1.93
'embunit': 'EmbUnitType', # since plugin v1.84
'fpcunit': 'FPCUnitJunitHudsonTestType',
'gtest': 'GoogleTestType',
'junit': 'JUnitType',
'mstest': 'MSTestJunitHudsonTestType',
'nunit': 'NUnitJunitHudsonTestType',
'phpunit': 'PHPUnitJunitHudsonTestType',
'tusar': 'TUSARJunitHudsonTestType',
'unittest': 'UnitTestJunitHudsonTestType',
'valgrind': 'ValgrindJunitHudsonTestType',
# FIXME should implement the 'custom' type
}
implemented_types = types_to_plugin_types.keys() # shortcut
# Unit framework we are going to generate xml for
supported_types = []
for configured_type in data['types']:
type_name = next(iter(configured_type.keys()))
if type_name not in implemented_types:
logger.warning("Requested xUnit type '%s' is not yet supported",
type_name)
else:
# Append for generation
supported_types.append(configured_type)
# Generate XML for each of the supported framework types
xmltypes = XML.SubElement(xunit, 'types')
for supported_type in supported_types:
framework_name = next(iter(supported_type.keys()))
xmlframework = XML.SubElement(xmltypes,
types_to_plugin_types[framework_name])
mappings = [
('pattern', 'pattern', ''),
('requireupdate', 'failIfNotNew', True),
('deleteoutput', 'deleteOutputFiles', True),
('skip-if-no-test-files', 'skipNoTestFiles', False),
('stoponerror', 'stopProcessingIfError', True),
]
helpers.convert_mapping_to_xml(xmlframework,
supported_type[framework_name],
mappings,
fail_required=True)
xmlthresholds = XML.SubElement(xunit, 'thresholds')
for t in data.get('thresholds', []):
if not ('failed' in t or 'skipped' in t):
logger.warning(
"Unrecognized threshold, should be 'failed' or 'skipped'")
continue
elname = ("org.jenkinsci.plugins.xunit.threshold.%sThreshold" %
next(iter(t.keys())).title())
el = XML.SubElement(xmlthresholds, elname)
for threshold_name, threshold_value in next(iter(t.values())).items():
# Normalize and craft the element name for this threshold
elname = "%sThreshold" % threshold_name.lower().replace(
'new', 'New')
XML.SubElement(el, elname).text = str(threshold_value)
# Whether to use percent of exact number of tests.
# Thresholdmode is either:
# - 1 : absolute (number of tests), default.
# - 2 : relative (percentage of tests)
thresholdmode = '1'
if 'percent' == data.get('thresholdmode', 'number'):
thresholdmode = '2'
XML.SubElement(xunit, 'thresholdMode').text = thresholdmode
extra_config = XML.SubElement(xunit, 'extraConfiguration')
XML.SubElement(extra_config, 'testTimeMargin').text = str(
data.get('test-time-margin', '3000'))
def _violations_add_entry(xml_parent, name, data):
vmin = data.get('min', 10)
vmax = data.get('max', 999)
vunstable = data.get('unstable', 999)
pattern = data.get('pattern', None)
entry = XML.SubElement(xml_parent, 'entry')
XML.SubElement(entry, 'string').text = name
tconfig = XML.SubElement(entry, 'hudson.plugins.violations.TypeConfig')
XML.SubElement(tconfig, 'type').text = name
XML.SubElement(tconfig, 'min').text = str(vmin)
XML.SubElement(tconfig, 'max').text = str(vmax)
XML.SubElement(tconfig, 'unstable').text = str(vunstable)
XML.SubElement(tconfig, 'usePattern').text = 'false'
if pattern:
XML.SubElement(tconfig, 'pattern').text = pattern
else:
XML.SubElement(tconfig, 'pattern')
def violations(registry, xml_parent, data):
"""yaml: violations
Publish code style violations.
Requires the Jenkins :jenkins-wiki:`Violations Plugin <Violations>`.
The violations component accepts any number of dictionaries keyed
by the name of the violations system. The dictionary has the
following values:
:arg int min: sunny threshold
:arg int max: stormy threshold
:arg int unstable: unstable threshold
:arg str pattern: report filename pattern
Any system without a dictionary provided will use default values.
Valid systems are:
checkstyle, codenarc, cpd, cpplint, csslint, findbugs, fxcop,
gendarme, jcreport, jslint, pep8, perlcritic, pmd, pylint,
simian, stylecop
Example:
.. literalinclude:: /../../tests/publishers/fixtures/violations001.yaml
:language: yaml
"""
violations = XML.SubElement(xml_parent,
'hudson.plugins.violations.'
'ViolationsPublisher')
config = XML.SubElement(violations, 'config')
suppressions = XML.SubElement(config, 'suppressions',
{'class': 'tree-set'})
XML.SubElement(suppressions, 'no-comparator')
configs = XML.SubElement(config, 'typeConfigs')
XML.SubElement(configs, 'no-comparator')
for name in ['checkstyle',
'codenarc',
'cpd',
'cpplint',
'csslint',
'findbugs',
'fxcop',
'gendarme',
'jcreport',
'jslint',
'pep8',
'perlcritic',
'pmd',
'pylint',
'simian',
'stylecop']:
_violations_add_entry(configs, name, data.get(name, {}))
XML.SubElement(config, 'limit').text = '100'
XML.SubElement(config, 'sourcePathPattern')
XML.SubElement(config, 'fauxProjectPath')
XML.SubElement(config, 'encoding').text = 'default'
def findbugs(registry, xml_parent, data):
"""yaml: findbugs
FindBugs reporting for builds
Requires the Jenkins :jenkins-wiki:`FindBugs Plugin
<FindBugs+Plugin>`.
:arg str pattern: specifies the generated raw FindBugs XML report files,
such as \*\*/findbugs.xml or \*\*/findbugsXml.xml. (default '')
:arg bool rank-priority: Use rank as priority (default false)
:arg str include-files: Comma separated list of files to include.
(default '')
:arg str exclude-files: Comma separated list of files to exclude.
(default '')
:arg bool can-run-on-failed: Weather or not to run plug-in on failed builds
(default false)
:arg bool should-detect-modules: Determines if Ant or Maven modules should
be detected for all files that contain warnings. (default false)
:arg int healthy: Sunny threshold (default '')
:arg int unhealthy: Stormy threshold (default '')
:arg str health-threshold: Threshold priority for health status
('low', 'normal' or 'high', defaulted to 'low')
:arg bool dont-compute-new: If set to false, computes new warnings based on
the reference build (default true)
:arg bool use-delta-values: Use delta for new warnings. (default false)
:arg bool use-previous-build-as-reference: If set then the number of new
warnings will always be calculated based on the previous build.
Otherwise the reference build. (default false)
:arg bool use-stable-build-as-reference: The number of new warnings will be
calculated based on the last stable build, allowing reverts of unstable
builds where the number of warnings was decreased. (default false)
:arg dict thresholds:
:thresholds:
* **unstable** (`dict`)
:unstable: * **total-all** (`int`)
* **total-high** (`int`)
* **total-normal** (`int`)
* **total-low** (`int`)
* **new-all** (`int`)
* **new-high** (`int`)
* **new-normal** (`int`)
* **new-low** (`int`)
* **failed** (`dict`)
:failed: * **total-all** (`int`)
* **total-high** (`int`)
* **total-normal** (`int`)
* **total-low** (`int`)
* **new-all** (`int`)
* **new-high** (`int`)
* **new-normal** (`int`)
* **new-low** (`int`)
Minimal Example:
.. literalinclude:: /../../tests/publishers/fixtures/findbugs-minimal.yaml
Full Example:
.. literalinclude:: /../../tests/publishers/fixtures/findbugs-full.yaml
"""
findbugs = XML.SubElement(xml_parent,
'hudson.plugins.findbugs.FindBugsPublisher')
findbugs.set('plugin', 'findbugs')
helpers.findbugs_settings(findbugs, data)
helpers.build_trends_publisher('[FINDBUGS] ', findbugs, data)
def checkstyle(registry, xml_parent, data):
"""yaml: checkstyle
Publish trend reports with Checkstyle.
Requires the Jenkins :jenkins-wiki:`Checkstyle Plugin <Checkstyle+Plugin>`.
The checkstyle component accepts a dictionary with the
following values:
:arg str pattern: Report filename pattern (default '')
:arg bool can-run-on-failed: Also runs for failed builds, instead of just
stable or unstable builds (default false)
:arg bool should-detect-modules: Determines if Ant or Maven modules should
be detected for all files that contain warnings (default false)
:arg int healthy: Sunny threshold (default '')
:arg int unhealthy: Stormy threshold (default '')
:arg str health-threshold: Threshold priority for health status
('low', 'normal' or 'high') (default 'low')
:arg dict thresholds: Mark build as failed or unstable if the number of
errors exceeds a threshold. (optional)
:thresholds:
* **unstable** (`dict`)
:unstable: * **total-all** (`int`)
* **total-high** (`int`)
* **total-normal** (`int`)
* **total-low** (`int`)
* **new-all** (`int`)
* **new-high** (`int`)
* **new-normal** (`int`)
* **new-low** (`int`)
* **failed** (`dict`)
:failed: * **total-all** (`int`)
* **total-high** (`int`)
* **total-normal** (`int`)
* **total-low** (`int`)
* **new-all** (`int`)
* **new-high** (`int`)
* **new-normal** (`int`)
* **new-low** (`int`)
:arg str default-encoding: Encoding for parsing or showing files
(default '')
:arg bool do-not-resolve-relative-paths: (default false)
:arg bool dont-compute-new: If set to false, computes new warnings based on
the reference build (default true)
:arg bool use-previous-build-as-reference: determines whether to always
use the previous build as the reference build (default false)
:arg bool use-stable-build-as-reference: The number of new warnings will be
calculated based on the last stable build, allowing reverts of unstable
builds where the number of warnings was decreased. (default false)
:arg bool use-delta-values: If set then the number of new warnings is
calculated by subtracting the total number of warnings of the current
build from the reference build. (default false)
Example:
.. literalinclude:: /../../tests/publishers/fixtures/checkstyle004.yaml
:language: yaml
Full example:
.. literalinclude:: /../../tests/publishers/fixtures/checkstyle006.yaml
:language: yaml
"""
def convert_settings(lookup, data):
"""Helper to convert settings from one key to another
"""
for old_key in list(data.keys()):
if old_key in lookup:
data.setdefault(lookup[old_key], data[old_key])
del data[old_key]
checkstyle = XML.SubElement(xml_parent,
'hudson.plugins.checkstyle.'
'CheckStylePublisher')
checkstyle.set('plugin', 'checkstyle')
# Convert old style yaml to new style
convert_settings({
'unHealthy': 'unhealthy',
'healthThreshold': 'health-threshold',
'defaultEncoding': 'default-encoding',
'canRunOnFailed': 'can-run-on-failed',
'shouldDetectModules': 'should-detect-modules'
}, data)
threshold_data = data.get('thresholds', {})
for threshold in ['unstable', 'failed']:
convert_settings({
'totalAll': 'total-all',
'totalHigh': 'total-high',
'totalNormal': 'total-normal',
'totalLow': 'total-low'
}, threshold_data.get(threshold, {}))
helpers.build_trends_publisher('[CHECKSTYLE] ', checkstyle, data)
def scp(registry, xml_parent, data):
"""yaml: scp
Upload files via SCP
Requires the Jenkins :jenkins-wiki:`SCP Plugin <SCP+plugin>`.
When writing a publisher macro, it is important to keep in mind that
Jenkins uses Ant's `SCP Task
<https://ant.apache.org/manual/Tasks/scp.html>`_ via the Jenkins
:jenkins-wiki:`SCP Plugin <SCP+plugin>` which relies on `FileSet
<https://ant.apache.org/manual/Types/fileset.html>`_
and `DirSet <https://ant.apache.org/manual/Types/dirset.html>`_ patterns.
The relevant piece of documentation is excerpted below:
Source points to files which will be uploaded. You can use ant
includes syntax, eg. ``folder/dist/*.jar``. Path is constructed from
workspace root. Note that you cannot point files outside the workspace
directory. For example providing: ``../myfile.txt`` won't work...
Destination points to destination folder on remote site. It will be
created if doesn't exists and relative to root repository path. You
can define multiple blocks of source/destination pairs.
This means that absolute paths, e.g., ``/var/log/**`` will not work and
will fail to compile. All paths need to be relative to the directory that
the publisher runs and the paths have to be contained inside of that
directory. The relative working directory is usually::
/home/jenkins/workspace/${JOB_NAME}
:arg str site: name of the scp site (required)
:arg str target: destination directory (required)
:arg str source: source path specifier (default '')
:arg bool keep-hierarchy: keep the file hierarchy when uploading
(default false)
:arg bool copy-after-failure: copy files even if the job fails
(default false)
:arg bool copy-console: copy the console log (default false); if
specified, omit 'source'
Example:
.. literalinclude:: /../../tests/publishers/fixtures/scp001.yaml
:language: yaml
"""
scp = XML.SubElement(xml_parent,
'be.certipost.hudson.plugin.SCPRepositoryPublisher')
scp.set('plugin', 'scp')
mappings = [
('site', 'siteName', None),
]
helpers.convert_mapping_to_xml(scp, data, mappings, fail_required=True)
entries = XML.SubElement(scp, 'entries')
for entry in data['files']:
entry_e = XML.SubElement(entries, 'be.certipost.hudson.plugin.Entry')
mappings = [
('target', 'filePath', None),
('source', 'sourceFile', ''),
('keep-hierarchy', 'keepHierarchy', False),
('copy-console', 'copyConsoleLog', False),
('copy-after-failure', 'copyAfterFailure', False),
]
helpers.convert_mapping_to_xml(
entry_e, entry, mappings, fail_required=True)
def ssh(registry, xml_parent, data):
"""yaml: ssh
Upload files via SCP.
Requires the Jenkins :jenkins-wiki:`Publish over SSH Plugin
<Publish+Over+SSH+Plugin>`.
:arg str site: name of the ssh site
:arg str target: destination directory
:arg bool target-is-date-format: whether target is a date format. If true,
raw text should be quoted (default false)
:arg bool clean-remote: should the remote directory be deleted before
transferring files (default false)
:arg str source: source path specifier
:arg str command: a command to execute on the remote server (optional)
:arg int timeout: timeout in milliseconds for the Exec command (optional)
:arg bool use-pty: run the exec command in pseudo TTY (default false)
:arg str excludes: excluded file pattern (optional)
:arg str remove-prefix: prefix to remove from uploaded file paths
(optional)
:arg bool fail-on-error: fail the build if an error occurs (default false).
:arg bool always-publish-from-master: transfer the files through the master
before being sent to the remote server (defaults false)
:arg bool flatten: only create files on the server, don't create
directories (default false).
Example:
.. literalinclude:: /../../tests/publishers/fixtures/ssh001.yaml
:language: yaml
"""
console_prefix = 'SSH: '
tag_prefix = 'jenkins.plugins.publish'
publisher_tag = '%s__over__ssh.BapSshPublisher' % tag_prefix
transfer_tag = '%s__over__ssh.BapSshTransfer' % tag_prefix
reference_tag = '%s_over_ssh.BapSshPublisherPlugin' % tag_prefix
if xml_parent.tag == 'publishers':
plugin_tag = '%s__over__ssh.BapSshPublisherPlugin' % tag_prefix
else:
plugin_tag = '%s__over__ssh.BapSshBuilderPlugin' % tag_prefix
base_publish_over(xml_parent, data, console_prefix, plugin_tag,
publisher_tag, transfer_tag, reference_tag)
def pipeline(registry, xml_parent, data):
"""yaml: pipeline
Specify a downstream project in a pipeline.
Requires the Jenkins :jenkins-wiki:`Build Pipeline Plugin
<Build+Pipeline+Plugin>`.
:arg str project: the name of the downstream project
:arg str predefined-parameters: parameters to pass to the other
job (optional)
:arg bool current-parameters: Whether to include the parameters passed
to the current build to the triggered job (optional)
:arg str property-file: Use properties from file (optional)
:arg bool fail-on-missing: Blocks the triggering of the downstream jobs
if any of the property files are not found in the workspace.
Only valid when 'property-file' is specified.
(default false)
:arg str file-encoding: Encoding of contents of the files. If not
specified, default encoding of the platform is used. Only valid when
'property-file' is specified. (optional)
Example:
.. literalinclude:: /../../tests/publishers/fixtures/pipeline002.yaml
:language: yaml
.. literalinclude:: /../../tests/publishers/fixtures/pipeline003.yaml
:language: yaml
You can build pipeline jobs that are re-usable in different pipelines by
using a :ref:`job-template` to define the pipeline jobs,
and variable substitution to specify the name of
the downstream job in the pipeline.
Job-specific substitutions are useful here (see :ref:`project`).
See 'samples/pipeline.yaml' for an example pipeline implementation.
"""
if 'project' in data and data['project'] != '':
pippub = XML.SubElement(xml_parent,
'au.com.centrumsystems.hudson.plugin.'
'buildpipeline.trigger.BuildPipelineTrigger')
configs = XML.SubElement(pippub, 'configs')
if 'predefined-parameters' in data:
params = XML.SubElement(configs,
'hudson.plugins.parameterizedtrigger.'
'PredefinedBuildParameters')
properties = XML.SubElement(params, 'properties')
properties.text = data['predefined-parameters']
if ('current-parameters' in data
and data['current-parameters']):
XML.SubElement(configs,
'hudson.plugins.parameterizedtrigger.'
'CurrentBuildParameters')
if 'property-file' in data and data['property-file']:
params = XML.SubElement(configs,
'hudson.plugins.parameterizedtrigger.'
'FileBuildParameters')
properties = XML.SubElement(params, 'propertiesFile')
properties.text = data['property-file']
failOnMissing = XML.SubElement(params, 'failTriggerOnMissing')
failOnMissing.text = str(
data.get('fail-on-missing', False)).lower()
if 'file-encoding' in data:
XML.SubElement(params, 'encoding'
).text = data['file-encoding']
XML.SubElement(pippub, 'downstreamProjectNames').text = data['project']
def email(registry, xml_parent, data):
"""yaml: email
Email notifications on build failure.
Requires the Jenkins :jenkins-wiki:`Mailer Plugin
<Mailer>`.
:arg str recipients: Space separated list of recipient email addresses
(required)
:arg bool notify-every-unstable-build: Send an email for every
unstable build (default true)
:arg bool send-to-individuals: Send an email to the individual
who broke the build (default false)
Example:
.. literalinclude::
/../../tests/publishers/fixtures/email-minimal.yaml
:language: yaml
.. literalinclude:: /../../tests/publishers/fixtures/email-complete.yaml
:language: yaml
"""
# TODO: raise exception if this is applied to a maven job
mailer = XML.SubElement(xml_parent,
'hudson.tasks.Mailer')
mailer.set('plugin', 'mailer')
mapping = [
('recipients', 'recipients', None)
]
helpers.convert_mapping_to_xml(mailer, data, mapping, fail_required=True)
# Note the logic reversal (included here to match the GUI
if data.get('notify-every-unstable-build', True):
XML.SubElement(mailer, 'dontNotifyEveryUnstableBuild').text = 'false'
else:
XML.SubElement(mailer, 'dontNotifyEveryUnstableBuild').text = 'true'
XML.SubElement(mailer, 'sendToIndividuals').text = str(
data.get('send-to-individuals', False)).lower()
def claim_build(registry, xml_parent, data):
"""yaml: claim-build
Claim build failures
Requires the Jenkins :jenkins-wiki:`Claim Plugin <Claim+plugin>`.
Example:
.. literalinclude:: /../../tests/publishers/fixtures/claim-build001.yaml
:language: yaml
"""
XML.SubElement(xml_parent, 'hudson.plugins.claim.ClaimPublisher')
def base_email_ext(registry, xml_parent, data, ttype):
trigger = XML.SubElement(xml_parent,
'hudson.plugins.emailext.plugins.trigger.'
+ ttype)
email = XML.SubElement(trigger, 'email')
XML.SubElement(email, 'recipientList').text = ''
XML.SubElement(email, 'subject').text = '$PROJECT_DEFAULT_SUBJECT'
XML.SubElement(email, 'body').text = '$PROJECT_DEFAULT_CONTENT'
if 'send-to' in data:
XML.SubElement(email, 'sendToDevelopers').text = str(
'developers' in data['send-to']).lower()
XML.SubElement(email, 'sendToRequester').text = str(
'requester' in data['send-to']).lower()
XML.SubElement(email, 'includeCulprits').text = str(
'culprits' in data['send-to']).lower()
XML.SubElement(email, 'sendToRecipientList').text = str(
'recipients' in data['send-to']).lower()
else:
XML.SubElement(email, 'sendToRequester').text = 'false'
XML.SubElement(email, 'sendToDevelopers').text = 'false'
XML.SubElement(email, 'includeCulprits').text = 'false'
XML.SubElement(email, 'sendToRecipientList').text = 'true'
if ttype == 'ScriptTrigger':
XML.SubElement(trigger, 'triggerScript').text = data['trigger-script']
def email_ext(registry, xml_parent, data):
"""yaml: email-ext
Extend Jenkin's built in email notification
Requires the Jenkins :jenkins-wiki:`Email-ext Plugin
<Email-ext+plugin>`.
:arg bool disable-publisher: Disable the publisher, while maintaining the
settings. The usage model for this is when you want to test things out
in the build, not send out e-mails during the testing. A message will
be printed to the build log saying that the publisher is disabled.
(default false)
:arg str recipients: Comma separated list of recipient email addresses
(default '$DEFAULT_RECIPIENTS')
:arg str reply-to: Comma separated list of email addresses that should be
in the Reply-To header for this project (default '$DEFAULT_REPLYTO')
:arg str content-type: The content type of the emails sent. If not set, the
Jenkins plugin uses the value set on the main configuration page.
Possible values: 'html', 'text', 'both-html-text' or 'default'
(default 'default')
:arg str subject: Subject for the email, can include variables like
${BUILD_NUMBER} or even groovy or javascript code
(default '$DEFAULT_SUBJECT')
:arg str body: Content for the body of the email, can include variables
like ${BUILD_NUMBER}, but the real magic is using groovy or
javascript to hook into the Jenkins API itself
(default '$DEFAULT_CONTENT')
:arg bool attach-build-log: Include build log in the email (default false)
:arg bool compress-log: Compress build log in the email (default false)
:arg str attachments: pattern of files to include as attachment
(default '')
:arg bool always: Send an email for every result (default false)
:arg bool unstable: Send an email for an unstable result (default false)
:arg bool first-failure: Send an email for just the first failure
(default false)
:arg bool not-built: Send an email if not built (default false)
:arg bool aborted: Send an email if the build is aborted (default false)
:arg bool regression: Send an email if there is a regression
(default false)
:arg bool failure: Send an email if the build fails (default true)
:arg bool second-failure: Send an email for the second failure
(default false)
:arg bool improvement: Send an email if the build improves (default false)
:arg bool still-failing: Send an email if the build is still failing
(default false)
:arg bool success: Send an email for a successful build (default false)
:arg bool fixed: Send an email if the build is fixed (default false)
:arg bool still-unstable: Send an email if the build is still unstable
(default false)
:arg bool pre-build: Send an email before the build (default false)
:arg str trigger-script: A Groovy script used to determine if an email
should be sent.
:arg str presend-script: A Groovy script executed prior sending the mail.
(default '')
:arg str postsend-script: A Goovy script executed after sending the email.
(default '')
:arg bool save-output: Save email content to workspace (default false)
:arg str matrix-trigger: If using matrix projects, when to trigger
:matrix-trigger values:
* **both**
* **only-parent**
* **only-configurations**
:arg list send-to: list of recipients from the predefined groups
:send-to values:
* **developers** (disabled by default)
* **requester** (disabled by default)
* **culprits** (disabled by default)
* **recipients** (enabled by default)
Example:
.. literalinclude:: /../../tests/publishers/fixtures/email-ext001.yaml
:language: yaml
"""
emailext = XML.SubElement(xml_parent,
'hudson.plugins.emailext.ExtendedEmailPublisher')
if 'recipients' in data:
XML.SubElement(emailext, 'recipientList').text = data['recipients']
else:
XML.SubElement(emailext, 'recipientList').text = '$DEFAULT_RECIPIENTS'
ctrigger = XML.SubElement(emailext, 'configuredTriggers')
if data.get('always', False):
base_email_ext(registry, ctrigger, data, 'AlwaysTrigger')
if data.get('unstable', False):
base_email_ext(registry, ctrigger, data, 'UnstableTrigger')
if data.get('first-failure', False):
base_email_ext(registry, ctrigger, data, 'FirstFailureTrigger')
if data.get('not-built', False):
base_email_ext(registry, ctrigger, data, 'NotBuiltTrigger')
if data.get('aborted', False):
base_email_ext(registry, ctrigger, data, 'AbortedTrigger')
if data.get('regression', False):
base_email_ext(registry, ctrigger, data, 'RegressionTrigger')
if data.get('failure', True):
base_email_ext(registry, ctrigger, data, 'FailureTrigger')
if data.get('second-failure', False):
base_email_ext(registry, ctrigger, data, 'SecondFailureTrigger')
if data.get('improvement', False):
base_email_ext(registry, ctrigger, data, 'ImprovementTrigger')
if data.get('still-failing', False):
base_email_ext(registry, ctrigger, data, 'StillFailingTrigger')
if data.get('success', False):
base_email_ext(registry, ctrigger, data, 'SuccessTrigger')
if data.get('fixed', False):
base_email_ext(registry, ctrigger, data, 'FixedTrigger')
if data.get('still-unstable', False):
base_email_ext(registry, ctrigger, data, 'StillUnstableTrigger')
if data.get('pre-build', False):
base_email_ext(registry, ctrigger, data, 'PreBuildTrigger')
if data.get('trigger-script', False):
base_email_ext(registry, ctrigger, data, 'ScriptTrigger')
content_type_mime = {
'text': 'text/plain',
'html': 'text/html',
'default': 'default',
'both-html-text': 'both',
}
ctype = data.get('content-type', 'default')
if ctype not in content_type_mime:
raise JenkinsJobsException('email-ext content type must be one of: %s'
% ', '.join(content_type_mime.keys()))
XML.SubElement(emailext, 'contentType').text = content_type_mime[ctype]
mappings = [
('subject', 'defaultSubject', '$DEFAULT_SUBJECT'),
('body', 'defaultContent', '$DEFAULT_CONTENT'),
('attachments', 'attachmentsPattern', ''),
('presend-script', 'presendScript', ''),
('postsend-script', 'postsendScript', ''),
('attach-build-log', 'attachBuildLog', False),
('compress-log', 'compressBuildLog', False),
('save-output', 'saveOutput', False),
('disable-publisher', 'disabled', False),
('reply-to', 'replyTo', '$DEFAULT_REPLYTO'),
]
helpers.convert_mapping_to_xml(
emailext, data, mappings, fail_required=True)
matrix_dict = {'both': 'BOTH',
'only-configurations': 'ONLY_CONFIGURATIONS',
'only-parent': 'ONLY_PARENT'}
matrix_trigger = data.get('matrix-trigger', None)
# If none defined, then do not create entry
if matrix_trigger is not None:
if matrix_trigger not in matrix_dict:
raise JenkinsJobsException("matrix-trigger entered is not valid, "
"must be one of: %s" %
", ".join(matrix_dict.keys()))
XML.SubElement(emailext, 'matrixTriggerMode').text = matrix_dict.get(
matrix_trigger)
def fingerprint(registry, xml_parent, data):
"""yaml: fingerprint
Fingerprint files to track them across builds. Requires the
Jenkins :jenkins-wiki:`Fingerprint Plugin <Fingerprint+Plugin>`.
:arg str files: files to fingerprint, follows the @includes of Ant fileset
(default '')
:arg bool record-artifacts: fingerprint all archived artifacts
(default false)
Example:
.. literalinclude:: /../../tests/publishers/fixtures/fingerprint001.yaml
:language: yaml
"""
finger = XML.SubElement(xml_parent, 'hudson.tasks.Fingerprinter')
mappings = [
('files', 'targets', ''),
('record-artifacts', 'recordBuildArtifacts', False)
]
helpers.convert_mapping_to_xml(finger, data, mappings, fail_required=True)
def aggregate_tests(registry, xml_parent, data):
"""yaml: aggregate-tests
Aggregate downstream test results
:arg bool include-failed-builds: whether to include failed builds
Example:
.. literalinclude::
/../../tests/publishers/fixtures/aggregate-tests001.yaml
:language: yaml
"""
agg = XML.SubElement(xml_parent,
'hudson.tasks.test.AggregatedTestResultPublisher')
XML.SubElement(agg, 'includeFailedBuilds').text = str(data.get(
'include-failed-builds', False)).lower()
def aggregate_flow_tests(registry, xml_parent, data):
"""yaml: aggregate-flow-tests
Aggregate downstream test results in a Build Flow job.
Requires the Jenkins :jenkins-wiki:`Build Flow Test Aggregator Plugin
<Build+Flow+Test+Aggregator+Plugin>`.
:arg bool show-test-results-trend: whether to show test results
trend graph (default true)
Example:
.. literalinclude::
/../../tests/publishers/fixtures/aggregate-flow-tests002.yaml
:language: yaml
"""
agg_flow = XML.SubElement(xml_parent, 'org.zeroturnaround.jenkins.'
'flowbuildtestaggregator.FlowTestAggregator')
XML.SubElement(agg_flow, 'showTestResultTrend').text = str(
data.get('show-test-results-trend', True)).lower()
def cppcheck(registry, xml_parent, data):
"""yaml: cppcheck
Cppcheck result publisher
Requires the Jenkins :jenkins-wiki:`Cppcheck Plugin <Cppcheck+Plugin>`.
:arg str pattern: File pattern for cppcheck xml report (required)
:arg bool ignoreblankfiles: Ignore blank files (default false)
:arg bool allow-no-report: Do not fail the build if the Cppcheck report
is not found (default false)
:arg dict thresholds:
:thresholds: Configure the build status and health. A build is
considered as unstable or failure if the new or total number
of issues exceeds the specified thresholds. The build health
is also determined by thresholds. If the actual number of issues
is between the provided thresholds, then the build health is
interpolated.
* **unstable** (`str`): Total number unstable threshold (default '')
* **new-unstable** (`str`): New number unstable threshold (default '')
* **failure** (`str`): Total number failure threshold (default '')
* **new-failure** (`str`): New number failure threshold (default '')
* **healthy** (`str`): Healthy threshold (default '')
* **unhealthy** (`str`): Unhealthy threshold (default '')
:arg dict severity:
:severity: Determines which severity of issues should be considered
when evaluating the build status and health, default all true
* **error** (`bool`): Severity error (default true)
* **warning** (`bool`): Severity warning (default true)
* **style** (`bool`): Severity style (default true)
* **performance** (`bool`): Severity performance (default true)
* **information** (`bool`): Severity information (default true)
* **nocategory** (`bool`): Severity nocategory (default true)
* **portability** (`bool`): Severity portability (default true)
:arg dict graph:
:graph: Graph configuration
* **xysize** (`array`): Chart width and height (default [500, 200])
* **num-builds-in-graph** (`int`): Builds number in graph (default 0)
:arg dict display
:display: which errors to display, default only sum
* **sum** (`bool`): Display sum of all issues (default true)
* **error** (`bool`): Display errors (default false)
* **warning** (`bool`): Display warnings (default false)
* **style** (`bool`): Display style (default false)
* **performance** (`bool`): Display performance (default false)
* **information** (`bool`): Display information (default false)
* **nocategory** (`bool`): Display no category (default false)
* **portability** (`bool`): Display portability (default false)
Minimal Example:
.. literalinclude::
/../../tests/publishers/fixtures/cppcheck-minimal.yaml
:language: yaml
Full Example:
.. literalinclude::
/../../tests/publishers/fixtures/cppcheck-complete.yaml
:language: yaml
"""
cppextbase = XML.SubElement(xml_parent,
'org.jenkinsci.plugins.cppcheck.'
'CppcheckPublisher')
cppextbase.set('plugin', 'cppcheck')
cppext = XML.SubElement(cppextbase, 'cppcheckConfig')
mappings = [
('pattern', 'pattern', None),
('ignoreblankfiles', 'ignoreBlankFiles', False),
('allow-no-report', 'allowNoReport', False)
]
helpers.convert_mapping_to_xml(cppext, data, mappings, fail_required=True)
csev = XML.SubElement(cppext, 'configSeverityEvaluation')
thrsh = data.get('thresholds', {})
thrsh_mappings = [
('unstable', 'threshold', ''),
('new-unstable', 'newThreshold', ''),
('failure', 'failureThreshold', ''),
('new-failure', 'newFailureThreshold', ''),
('healthy', 'healthy', ''),
('unhealthy', 'unHealthy', '')
]
helpers.convert_mapping_to_xml(
csev, thrsh, thrsh_mappings, fail_required=True)
sev = thrsh.get('severity', {})
sev_mappings = [
('error', 'severityError', True),
('warning', 'severityWarning', True),
('style', 'severityStyle', True),
('performance', 'severityPerformance', True),
('information', 'severityInformation', True),
('nocategory', 'severityNoCategory', True),
('portability', 'severityPortability', True)
]
helpers.convert_mapping_to_xml(
csev, sev, sev_mappings, fail_required=True)
graph = data.get('graph', {})
cgraph = XML.SubElement(cppext, 'configGraph')
x, y = graph.get('xysize', [500, 200])
XML.SubElement(cgraph, 'xSize').text = str(x)
XML.SubElement(cgraph, 'ySize').text = str(y)
graph_mapping = [
('num-builds-in-graph', 'numBuildsInGraph', 0)
]
helpers.convert_mapping_to_xml(
cgraph, graph, graph_mapping, fail_required=True)
gdisplay = graph.get('display', {})
gdisplay_mappings = [
('sum', 'displayAllErrors', True),
('error', 'displayErrorSeverity', False),
('warning', 'displayWarningSeverity', False),
('style', 'displayStyleSeverity', False),
('performance', 'displayPerformanceSeverity', False),
('information', 'displayInformationSeverity', False),
('nocategory', 'displayNoCategorySeverity', False),
('portability', 'displayPortabilitySeverity', False)
]
helpers.convert_mapping_to_xml(
cgraph, gdisplay, gdisplay_mappings, fail_required=True)
def logparser(registry, xml_parent, data):
"""yaml: logparser
Requires the Jenkins :jenkins-wiki:`Log Parser Plugin <Log+Parser+Plugin>`.
:arg str parse-rules: full path to parse rules (default '')
:arg bool unstable-on-warning: mark build unstable on warning
(default false)
:arg bool fail-on-error: mark build failed on error (default false)
Example:
.. literalinclude:: /../../tests/publishers/fixtures/logparser001.yaml
:language: yaml
"""
clog = XML.SubElement(xml_parent,
'hudson.plugins.logparser.LogParserPublisher')
clog.set('plugin', 'log-parser')
mappings = [
('unstable-on-warning', 'unstableOnWarning', False),
('fail-on-error', 'failBuildOnError', False),
('parse-rules', 'parsingRulesPath', '')
]
helpers.convert_mapping_to_xml(clog, data, mappings, fail_required=True)
def copy_to_master(registry, xml_parent, data):
"""yaml: copy-to-master
Copy files to master from slave
Requires the Jenkins :jenkins-wiki:`Copy To Slave Plugin
<Copy+To+Slave+Plugin>`.
:arg list includes: list of file patterns to copy
:arg list excludes: list of file patterns to exclude
:arg string destination: absolute path into which the files will be copied.
If left blank they will be copied into the workspace of the current job
(default '')
:arg bool run-after-result: If this is checked then copying files back to
master will not run until the build result is finalized.(default true)
Example:
.. literalinclude::
/../../tests/publishers/fixtures/copy-to-master001.yaml
:language: yaml
"""
cm = XML.SubElement(xml_parent, 'com.michelin.'
'cio.hudson.plugins.copytoslave.CopyToMasterNotifier')
cm.set('plugin', 'copy-to-slave')
XML.SubElement(cm, 'includes').text = ','.join(data.get('includes', ['']))
XML.SubElement(cm, 'excludes').text = ','.join(data.get('excludes', ['']))
mappings = [
('run-after-result', 'runAfterResultFinalised', True),
('destination', 'destinationFolder', '')
]
helpers.convert_mapping_to_xml(cm, data, mappings, fail_required=True)
if data.get('destination', ''):
XML.SubElement(cm, 'overrideDestinationFolder').text = 'true'
def jira(registry, xml_parent, data):
"""yaml: jira
Update relevant JIRA issues
Requires the Jenkins :jenkins-wiki:`JIRA Plugin <JIRA+Plugin>`.
Example:
.. literalinclude:: /../../tests/publishers/fixtures/jira001.yaml
:language: yaml
"""
XML.SubElement(xml_parent, 'hudson.plugins.jira.JiraIssueUpdater')
def growl(registry, xml_parent, data):
"""yaml: growl
Push notifications to growl client.
Requires the Jenkins :jenkins-wiki:`Growl Plugin <Growl+Plugin>`.
:arg str ip: IP address to send growl notifications to (required)
:arg bool notify-only-on-fail-or-recovery: send a growl only when build
fails or recovers from a failure (default false)
Minimal Example:
.. literalinclude:: /../../tests/publishers/fixtures/growl-minimal.yaml
:language: yaml
Full Example:
.. literalinclude:: /../../tests/publishers/fixtures/growl-full.yaml
:language: yaml
"""
growl = XML.SubElement(xml_parent, 'hudson.plugins.growl.GrowlPublisher')
growl.set('plugin', 'growl')
mapping = [
('ip', 'IP', None),
('notify-only-on-fail-or-recovery', 'onlyOnFailureOrRecovery', False),
]
helpers.convert_mapping_to_xml(growl, data, mapping, fail_required=True)
def groovy_postbuild(registry, xml_parent, data):
"""yaml: groovy-postbuild
Execute a groovy script.
Requires the Jenkins :jenkins-wiki:`Groovy Postbuild Plugin
<Groovy+Postbuild+Plugin>`.
Please pay attention on version of plugin you have installed.
There were incompatible changes between 1.x and 2.x. Please see
:jenkins-wiki:`home page <Groovy+Postbuild+Plugin>` of this plugin
for full information including migration process.
:arg str script: The groovy script to execute
:arg list classpath: List of additional classpaths (>=1.6)
:arg str on-failure: In case of script failure leave build as it is
for "nothing" option, mark build as unstable
for "unstable" and mark job as failure for "failed"
(default 'nothing')
:arg bool matrix-parent: Run script for matrix parent only (>=1.9)
(default false)
:arg bool sandbox: Execute script inside of groovy sandbox (>=2.0)
(default false)
Example:
.. literalinclude::
/../../tests/publishers/fixtures/groovy-postbuild001.yaml
:language: yaml
"""
logger = logging.getLogger("%s:groovy-postbuild" % __name__)
# Backward compatibility with old format
if isinstance(data, six.string_types):
logger.warning(
"You use depricated configuration, please follow documentation "
"to change configuration. It is not going to be supported in "
"future releases!"
)
data = {
'script': data,
}
# There are incompatible changes, we need to know version
info = registry.get_plugin_info('groovy-postbuild')
version = pkg_resources.parse_version(info.get('version', "0"))
# Version specific predicates
matrix_parent_support = version >= pkg_resources.parse_version("1.9")
security_plugin_support = version >= pkg_resources.parse_version("2.0")
extra_classpath_support = version >= pkg_resources.parse_version("1.6")
root_tag = (
'org.jvnet.hudson.plugins.groovypostbuild.GroovyPostbuildRecorder'
)
groovy = XML.SubElement(xml_parent, root_tag)
behavior = data.get('on-failure')
XML.SubElement(groovy, 'behavior').text = {
'unstable': '1',
'failed': '2',
}.get(behavior, '0')
if matrix_parent_support:
XML.SubElement(
groovy,
'runForMatrixParent',
).text = str(data.get('matrix-parent', False)).lower()
classpaths = data.get('classpath', list())
if security_plugin_support:
script = XML.SubElement(groovy, 'script')
XML.SubElement(script, 'script').text = data.get('script')
XML.SubElement(script, 'sandbox').text = str(
data.get('sandbox', False)
).lower()
if classpaths:
classpath = XML.SubElement(script, 'classpath')
for path in classpaths:
script_path = XML.SubElement(classpath, 'entry')
XML.SubElement(script_path, 'url').text = path
else:
XML.SubElement(groovy, 'groovyScript').text = data.get('script')
if extra_classpath_support and classpaths:
classpath = XML.SubElement(groovy, 'classpath')
for path in classpaths:
script_path = XML.SubElement(
classpath,
'org.jvnet.hudson.plugins.groovypostbuild.'
'GroovyScriptPath',
)
XML.SubElement(script_path, 'path').text = path
def base_publish_over(xml_parent, data, console_prefix,
plugin_tag, publisher_tag,
transferset_tag, reference_plugin_tag):
outer = XML.SubElement(xml_parent, plugin_tag)
# 'Publish over SSH' builder has an extra top delegate element
if xml_parent.tag == 'builders':
outer = XML.SubElement(outer, 'delegate')
XML.SubElement(outer, 'consolePrefix').text = console_prefix
delegate = XML.SubElement(outer, 'delegate')
publishers = XML.SubElement(delegate, 'publishers')
inner = XML.SubElement(publishers, publisher_tag)
XML.SubElement(inner, 'configName').text = data['site']
XML.SubElement(inner, 'verbose').text = 'true'
transfers = XML.SubElement(inner, 'transfers')
transfersset = XML.SubElement(transfers, transferset_tag)
XML.SubElement(transfersset, 'remoteDirectory').text = data['target']
XML.SubElement(transfersset, 'sourceFiles').text = data['source']
XML.SubElement(transfersset, 'excludes').text = data.get('excludes', '')
XML.SubElement(transfersset, 'removePrefix').text = data.get(
'remove-prefix', '')
XML.SubElement(transfersset, 'remoteDirectorySDF').text = str(
data.get('target-is-date-format', False)).lower()
XML.SubElement(transfersset, 'flatten').text = str(
data.get('flatten', False)).lower()
XML.SubElement(transfersset, 'cleanRemote').text = str(
data.get('clean-remote', False)).lower()
if 'command' in data:
XML.SubElement(transfersset, 'execCommand').text = data['command']
if 'timeout' in data:
XML.SubElement(transfersset, 'execTimeout').text = str(data['timeout'])
if 'use-pty' in data:
XML.SubElement(transfersset, 'usePty').text = str(
data.get('use-pty', False)).lower()
XML.SubElement(inner, 'useWorkspaceInPromotion').text = 'false'
XML.SubElement(inner, 'usePromotionTimestamp').text = 'false'
XML.SubElement(delegate, 'continueOnError').text = 'false'
XML.SubElement(delegate, 'failOnError').text = str(
data.get('fail-on-error', False)).lower()
XML.SubElement(delegate, 'alwaysPublishFromMaster').text = str(
data.get('always-publish-from-master', False)).lower()
XML.SubElement(delegate, 'hostConfigurationAccess',
{'class': reference_plugin_tag, 'reference': '../..'})
return (outer, transfersset)
def cifs(registry, xml_parent, data):
"""yaml: cifs
Upload files via CIFS.
Requires the Jenkins :jenkins-wiki:`Publish over CIFS Plugin
<Publish+Over+CIFS+Plugin>`.
:arg str site: name of the cifs site/share (required)
:arg str target: destination directory (required)
:arg bool target-is-date-format: whether target is a date format. If true,
raw text should be quoted (default false)
:arg bool clean-remote: should the remote directory be deleted before
transferring files (default false)
:arg str source: source path specifier (required)
:arg str excludes: excluded file pattern (default '')
:arg str remove-prefix: prefix to remove from uploaded file paths
(default '')
:arg bool fail-on-error: fail the build if an error occurs (default false).
:arg bool flatten: only create files on the server, don't create
directories (default false).
Example:
.. literalinclude:: /../../tests/publishers/fixtures/cifs001.yaml
:language: yaml
"""
console_prefix = 'CIFS: '
plugin_tag = 'jenkins.plugins.publish__over__cifs.CifsPublisherPlugin'
publisher_tag = 'jenkins.plugins.publish__over__cifs.CifsPublisher'
transfer_tag = 'jenkins.plugins.publish__over__cifs.CifsTransfer'
plugin_reference_tag = ('jenkins.plugins.publish_over_cifs.'
'CifsPublisherPlugin')
base_publish_over(xml_parent,
data,
console_prefix,
plugin_tag,
publisher_tag,
transfer_tag,
plugin_reference_tag)
def cigame(registry, xml_parent, data):
"""yaml: cigame
This plugin introduces a game where users get points
for improving the builds.
Requires the Jenkins :jenkins-wiki:`The Continuous Integration Game plugin
<The+Continuous+Integration+Game+plugin>`.
Example:
.. literalinclude:: /../../tests/publishers/fixtures/cigame.yaml
:language: yaml
"""
XML.SubElement(xml_parent, 'hudson.plugins.cigame.GamePublisher')
def sonar(registry, xml_parent, data):
"""yaml: sonar
Sonar plugin support.
Requires the Jenkins `Sonar Plugin.
<http://docs.sonarqube.org/display/SONAR/\
Analyzing+with+SonarQube+Scanner+for+Jenkins>`_
:arg str jdk: JDK to use (inherited from the job if omitted). (optional)
:arg str branch: branch onto which the analysis will be posted (default '')
:arg str language: source code language (default '')
:arg str root-pom: Root POM (default 'pom.xml')
:arg bool private-maven-repo: If true, use private Maven repository.
(default false)
:arg str maven-opts: options given to maven (default '')
:arg str additional-properties: sonar analysis parameters (default '')
:arg dict skip-global-triggers:
:Triggers: * **skip-when-scm-change** (`bool`): skip analysis when
build triggered by scm (default false)
* **skip-when-upstream-build** (`bool`): skip analysis when
build triggered by an upstream build (default false)
* **skip-when-envvar-defined** (`str`): skip analysis when
the specified environment variable is set to true
(default '')
:arg str settings: Path to use as user settings.xml. It is possible to
provide a ConfigFileProvider settings file, see Example below.
(optional)
:arg str global-settings: Path to use as global settings.xml. It is
possible to provide a ConfigFileProvider settings file, see Example
below. (optional)
Requires the Jenkins :jenkins-wiki:`Config File Provider Plugin
<Config+File+Provider+Plugin>`
for the Config File Provider "settings" and "global-settings" config.
This publisher supports the post-build action exposed by the Jenkins
Sonar Plugin, which is triggering a Sonar Analysis with Maven.
Minimal Example:
.. literalinclude:: /../../tests/publishers/fixtures/sonar-minimal.yaml
:language: yaml
Full Example:
.. literalinclude:: /../../tests/publishers/fixtures/sonar-complete.yaml
:language: yaml
"""
sonar = XML.SubElement(xml_parent, 'hudson.plugins.sonar.SonarPublisher')
sonar.set('plugin', 'sonar')
if 'jdk' in data:
XML.SubElement(sonar, 'jdk').text = data['jdk']
mappings = [
('branch', 'branch', ''),
('language', 'language', ''),
('root-pom', 'rootPom', 'pom.xml'),
('private-maven-repo', 'usePrivateRepository', False),
('maven-opts', 'mavenOpts', ''),
('additional-properties', 'jobAdditionalProperties', '')
]
helpers.convert_mapping_to_xml(sonar, data, mappings, fail_required=True)
if 'skip-global-triggers' in data:
data_triggers = data['skip-global-triggers']
triggers = XML.SubElement(sonar, 'triggers')
triggers_mappings = [
('skip-when-scm-change', 'skipScmCause', False),
('skip-when-upstream-build', 'skipUpstreamCause', False),
('skip-when-envvar-defined', 'envVar', '')
]
helpers.convert_mapping_to_xml(
triggers, data_triggers, triggers_mappings, fail_required=True)
helpers.config_file_provider_settings(sonar, data)
def performance(registry, xml_parent, data):
"""yaml: performance
Publish performance test results from jmeter and junit.
Requires the Jenkins :jenkins-wiki:`Performance Plugin
<Performance+Plugin>`.
:arg int failed-threshold: Specify the error percentage threshold that
set the build failed. A negative value means don't use this threshold
(default 0)
:arg int unstable-threshold: Specify the error percentage threshold that
set the build unstable. A negative value means don't use this threshold
(default 0)
:arg str unstable-response-time-threshold: Average response time threshold
(default '')
:arg float failed-threshold-positive: Maximum failed percentage for build
comparison (default 0.0)
:arg float failed-threshold-negative: Minimum failed percentage for build
comparison (default 0.0)
:arg float unstable-threshold-positive: Maximum unstable percentage for
build comparison (default 0.0)
:arg float unstable-threshold-negative: Minimum unstable percentage for
build comparison (default 0.0)
:arg int nth-build-number: Build number for build comparison (default 0)
:arg bool mode-relative-thresholds: Relative threshold mode (default false)
:arg str config-type: Compare based on (default 'ART')
:config-type values:
* **ART** -- Average Response Time
* **MRT** -- Median Response Time
* **PRT** -- Percentile Response Time
:arg bool mode-of-threshold: Mode of threshold, true for relative threshold
and false for error threshold (default false)
:arg bool fail-build: Fail build when result files are not present
(default false)
:arg bool compare-build-previous: Compare with previous build
(default false)
:arg bool mode-performance-per-test-case: Performance Per Test Case Mode
(default true)
:arg bool mode-thoughput: Show Throughput Chart (default false)
:arg dict report:
:(jmeter or junit): (`dict` or `str`): Specify a custom report file
(optional; jmeter default \**/*.jtl, junit default **/TEST-\*.xml)
Minimal Example:
.. literalinclude::
/../../tests/publishers/fixtures/performance-minimal.yaml
:language: yaml
Full Example:
.. literalinclude::
/../../tests/publishers/fixtures/performance-complete.yaml
:language: yaml
"""
perf = XML.SubElement(xml_parent, 'hudson.plugins.performance.'
'PerformancePublisher')
perf.set('plugin', 'performance')
types = ['ART', 'MRT', 'PRT']
mappings = [
('failed-threshold', 'errorFailedThreshold', 0),
('unstable-threshold', 'errorUnstableThreshold', 0),
('unstable-response-time-threshold',
'errorUnstableResponseTimeThreshold',
''),
('failed-threshold-positive',
'relativeFailedThresholdPositive',
'0.0'),
('failed-threshold-negative',
'relativeFailedThresholdNegative',
'0.0'),
('unstable-threshold-positive',
'relativeUnstableThresholdPositive',
'0.0'),
('unstable-threshold-negative',
'relativeUnstableThresholdNegative',
'0.0'),
('nth-build-number', 'nthBuildNumber', 0),
('mode-relative-thresholds', 'modeRelativeThresholds', False),
('config-type', 'configType', 'ART', types),
('mode-of-threshold', 'modeOfThreshold', False),
('fail-build', 'failBuildIfNoResultFile', False),
('compare-build-previous', 'compareBuildPrevious', False),
('mode-performance-per-test-case', 'modePerformancePerTestCase', True),
('mode-thoughput', 'modeThroughput', False)
]
helpers.convert_mapping_to_xml(perf, data, mappings, fail_required=True)
parsers = XML.SubElement(perf, 'parsers')
if 'report' in data:
for item in data['report']:
if isinstance(item, dict):
item_name = next(iter(item.keys()))
item_values = item.get(item_name, None)
if item_name == 'jmeter':
jmhold = XML.SubElement(parsers, 'hudson.plugins.'
'performance.'
'JMeterParser')
XML.SubElement(jmhold, 'glob').text = str(item_values)
elif item_name == 'junit':
juhold = XML.SubElement(parsers, 'hudson.plugins.'
'performance.'
'JUnitParser')
XML.SubElement(juhold, 'glob').text = str(item_values)
else:
raise JenkinsJobsException("You have not specified jmeter "
"or junit, or you have "
"incorrectly assigned the key "
"value.")
elif isinstance(item, str):
if item == 'jmeter':
jmhold = XML.SubElement(parsers, 'hudson.plugins.'
'performance.'
'JMeterParser')
XML.SubElement(jmhold, 'glob').text = '**/*.jtl'
elif item == 'junit':
juhold = XML.SubElement(parsers, 'hudson.plugins.'
'performance.'
'JUnitParser')
XML.SubElement(juhold, 'glob').text = '**/TEST-*.xml'
else:
raise JenkinsJobsException("You have not specified jmeter "
"or junit, or you have "
"incorrectly assigned the key "
"value.")
def join_trigger(registry, xml_parent, data):
"""yaml: join-trigger
Trigger a job after all the immediate downstream jobs have completed.
Requires the Jenkins :jenkins-wiki:`Join Plugin <Join+Plugin>`.
:arg bool even-if-unstable: if true jobs will trigger even if some
downstream jobs are marked as unstable (default false)
:arg list projects: list of projects to trigger
:arg list publishers: list of triggers from publishers module that
defines projects that need to be triggered
Example:
.. literalinclude:: /../../tests/publishers/fixtures/join-trigger001.yaml
:language: yaml
"""
jointrigger = XML.SubElement(xml_parent, 'join.JoinTrigger')
joinProjectsText = ','.join(data.get('projects', ['']))
XML.SubElement(jointrigger, 'joinProjects').text = joinProjectsText
publishers = XML.SubElement(jointrigger, 'joinPublishers')
for pub in data.get('publishers', []):
for edited_node in create_publishers(registry, pub):
publishers.append(edited_node)
unstable = str(data.get('even-if-unstable', 'false')).lower()
XML.SubElement(jointrigger, 'evenIfDownstreamUnstable').text = unstable
def jabber(registry, xml_parent, data):
"""yaml: jabber
Integrates Jenkins with the Jabber/XMPP instant messaging protocol
Requires the Jenkins :jenkins-wiki:`Jabber Plugin <Jabber+Plugin>`.
:arg bool notify-on-build-start: Whether to send notifications
to channels when a build starts (default false)
:arg bool notify-scm-committers: Whether to send notifications
to the users that are suspected of having broken this build
(default false)
:arg bool notify-scm-culprits: Also send notifications to 'culprits'
from previous unstable/failed builds (default false)
:arg bool notify-upstream-committers: Whether to send notifications to
upstream committers if no committers were found for a broken build
(default false)
:arg bool notify-scm-fixers: Whether to send notifications to the users
that have fixed a broken build (default false)
:arg list group-targets: List of group targets to notify
:arg list individual-targets: List of individual targets to notify
:arg dict strategy: When to send notifications (default all)
:strategy values:
* **all** -- Always
* **failure** -- On any failure
* **failure-fixed** -- On failure and fixes
* **new-failure-fixed** -- On new failure and fixes
* **change** -- Only on state change
:arg dict message: Channel notification message (default summary-scm)
:message values:
* **summary-scm** -- Summary + SCM changes
* **summary** -- Just summary
* **summary-build** -- Summary and build parameters
* **summary-scm-fail** -- Summary, SCM changes, and failed tests
Minimal Example:
.. literalinclude:: /../../tests/publishers/fixtures/jabber-minimal.yaml
:language: yaml
Full Example:
.. literalinclude:: /../../tests/publishers/fixtures/jabber-complete.yaml
:language: yaml
"""
j = XML.SubElement(xml_parent, 'hudson.plugins.jabber.im.transport.'
'JabberPublisher')
j.set('plugin', 'jabber')
t = XML.SubElement(j, 'targets')
if 'group-targets' in data:
for group in data['group-targets']:
gcimt = XML.SubElement(t, 'hudson.plugins.im.'
'GroupChatIMMessageTarget')
gcimt.set('plugin', 'instant-messaging')
XML.SubElement(gcimt, 'name').text = group
XML.SubElement(gcimt, 'notificationOnly').text = 'false'
if 'individual-targets' in data:
for individual in data['individual-targets']:
dimt = XML.SubElement(t, 'hudson.plugins.im.'
'DefaultIMMessageTarget')
dimt.set('plugin', 'instant-messaging')
XML.SubElement(dimt, 'value').text = individual
strategy = data.get('strategy', 'all')
strategydict = {'all': 'ALL',
'failure': 'ANY_FAILURE',
'failure-fixed': 'FAILURE_AND_FIXED',
'new-failure-fixed': 'NEW_FAILURE_AND_FIXED',
'change': 'STATECHANGE_ONLY'}
if strategy not in strategydict:
raise JenkinsJobsException("Strategy entered is not valid, must be " +
"one of: all, failure, failure-fixed, or "
"change")
XML.SubElement(j, 'strategy').text = strategydict[strategy]
mappings = [
('notify-on-build-start', 'notifyOnBuildStart', False),
('notify-scm-committers', 'notifySuspects', False),
('notify-scm-culprits', 'notifyCulprits', False),
('notify-scm-fixers', 'notifyFixers', False),
('notify-upstream-committers', 'notifyUpstreamCommitters', False)
]
helpers.convert_mapping_to_xml(j, data, mappings, fail_required=True)
message = data.get('message', 'summary-scm')
messagedict = {'summary-scm': 'DefaultBuildToChatNotifier',
'summary': 'SummaryOnlyBuildToChatNotifier',
'summary-build': 'BuildParametersBuildToChatNotifier',
'summary-scm-fail': 'PrintFailingTestsBuildToChatNotifier'}
if message not in messagedict:
raise JenkinsJobsException("Message entered is not valid, must be one "
"of: summary-scm, summary, summary-build "
"or summary-scm-fail")
XML.SubElement(j, 'buildToChatNotifier', {
'class': 'hudson.plugins.im.build_notify.' + messagedict[message]})
XML.SubElement(j, 'matrixMultiplier').text = 'ONLY_CONFIGURATIONS'
def workspace_cleanup(registry, xml_parent, data):
"""yaml: workspace-cleanup (post-build)
Requires the Jenkins :jenkins-wiki:`Workspace Cleanup Plugin
<Workspace+Cleanup+Plugin>`.
The pre-build workspace-cleanup is available as a wrapper.
:arg list include: list of files to be included
:arg list exclude: list of files to be excluded
:arg bool dirmatch: Apply pattern to directories too (default false)
:arg list clean-if: clean depending on build status
:clean-if values:
* **success** (`bool`) (default true)
* **unstable** (`bool`) (default true)
* **failure** (`bool`) (default true)
* **aborted** (`bool`) (default true)
* **not-built** (`bool`) (default true)
:arg bool fail-build: Fail the build if the cleanup fails (default true)
:arg bool clean-parent: Cleanup matrix parent workspace (default false)
:arg str external-deletion-command: external deletion command to run
against files and directories
Minimal Example:
.. literalinclude::
/../../tests/publishers/fixtures/workspace-cleanup-minimal.yaml
:language: yaml
Full Example:
.. literalinclude::
/../../tests/publishers/fixtures/workspace-cleanup-complete.yaml
:language: yaml
"""
p = XML.SubElement(xml_parent,
'hudson.plugins.ws__cleanup.WsCleanup')
p.set("plugin", "ws-cleanup")
if "include" in data or "exclude" in data:
patterns = XML.SubElement(p, 'patterns')
for inc in data.get("include", []):
ptrn = XML.SubElement(patterns, 'hudson.plugins.ws__cleanup.Pattern')
XML.SubElement(ptrn, 'pattern').text = inc
XML.SubElement(ptrn, 'type').text = "INCLUDE"
for exc in data.get("exclude", []):
ptrn = XML.SubElement(patterns, 'hudson.plugins.ws__cleanup.Pattern')
XML.SubElement(ptrn, 'pattern').text = exc
XML.SubElement(ptrn, 'type').text = "EXCLUDE"
mappings = [
('dirmatch', 'deleteDirs', False),
('clean-parent', 'cleanupMatrixParent', False),
('external-deletion-command', 'externalDelete', '')
]
helpers.convert_mapping_to_xml(p, data, mappings, fail_required=True)
mask = [('success', 'cleanWhenSuccess'),
('unstable', 'cleanWhenUnstable'),
('failure', 'cleanWhenFailure'),
('not-built', 'cleanWhenNotBuilt'),
('aborted', 'cleanWhenAborted')]
clean = data.get('clean-if', [])
cdict = dict()
for d in clean:
cdict.update(d)
for k, v in mask:
XML.SubElement(p, v).text = str(cdict.pop(k, True)).lower()
if len(cdict) > 0:
raise ValueError('clean-if must be one of: %r' % list(mask.keys()))
if str(data.get("fail-build", False)).lower() == 'false':
XML.SubElement(p, 'notFailBuild').text = 'true'
else:
XML.SubElement(p, 'notFailBuild').text = 'false'
def maven_deploy(registry, xml_parent, data):
"""yaml: maven-deploy
Deploy artifacts to Maven repository.
:arg str id: Repository ID
:arg str url: Repository URL (optional)
:arg bool unique-version: Assign unique versions to snapshots
(default true)
:arg bool deploy-unstable: Deploy even if the build is unstable
(default false)
:arg str release-env-var: If the given variable name is set to "true",
the deploy steps are skipped. (optional)
Example:
.. literalinclude:: /../../tests/publishers/fixtures/maven-deploy001.yaml
:language: yaml
"""
p = XML.SubElement(xml_parent, 'hudson.maven.RedeployPublisher')
if 'id' in data:
XML.SubElement(p, 'id').text = data['id']
if 'url' in data:
XML.SubElement(p, 'url').text = data['url']
XML.SubElement(p, 'uniqueVersion').text = str(
data.get('unique-version', True)).lower()
XML.SubElement(p, 'evenIfUnstable').text = str(
data.get('deploy-unstable', False)).lower()
if 'release-env-var' in data:
XML.SubElement(p, 'releaseEnvVar').text = data['release-env-var']
def artifactory(registry, xml_parent, data):
"""yaml: artifactory
Uses/requires the Artifactory plugin to deploy artifacts to
Artifactory Server.
Requires the Jenkins :jenkins-wiki:`Artifactory Plugin
<Artifactory+Plugin>`.
:arg str url: Artifactory server url (default '')
:arg str name: Artifactory user with permissions use for
connected to the selected Artifactory Server (default '')
:arg str release-repo-key: Release repository name (default '')
:arg str snapshot-repo-key: Snapshots repository name (default '')
:arg bool publish-build-info: Push build metadata with artifacts
(default false)
:arg bool discard-old-builds:
Remove older build info from Artifactory (default false)
:arg bool discard-build-artifacts:
Remove older build artifacts from Artifactory (default false)
:arg bool even-if-unstable: Deploy artifacts even when the build
is unstable (default false)
:arg bool run-checks: Run automatic license scanning check after the
build is complete (default false)
:arg bool include-publish-artifacts: Include the build's published
module artifacts in the license violation checks if they are
also used as dependencies for other modules in this build
(default false)
:arg bool pass-identified-downstream: When true, a build parameter
named ARTIFACTORY_BUILD_ROOT with a value of
${JOB_NAME}-${BUILD_NUMBER} will be sent to downstream builds
(default false)
:arg bool license-auto-discovery: Tells Artifactory not to try
and automatically analyze and tag the build's dependencies
with license information upon deployment (default true)
:arg bool enable-issue-tracker-integration: When the Jenkins
JIRA plugin is enabled, synchronize information about JIRA
issues to Artifactory and attach issue information to build
artifacts (default false)
:arg bool aggregate-build-issues: When the Jenkins JIRA plugin
is enabled, include all issues from previous builds up to the
latest build status defined in "Aggregation Build Status"
(default false)
:arg bool allow-promotion-of-non-staged-builds: The build
promotion operation will be available to all successful builds
instead of only staged ones (default false)
:arg bool filter-excluded-artifacts-from-build: Add the excluded
files to the excludedArtifacts list and remove them from the
artifacts list in the build info (default false)
:arg str scopes: A list of dependency scopes/configurations to run
license violation checks on. If left empty all dependencies from
all scopes will be checked (default '')
:arg str violation-recipients: Recipients that need to be notified
of license violations in the build info (default '')
:arg list matrix-params: Semicolon-separated list of properties to
attach to all deployed artifacts in addition to the default ones:
build.name, build.number, and vcs.revision (default [])
:arg str black-duck-app-name: The existing Black Duck Code Center
application name (default '')
:arg str black-duck-app-version: The existing Black Duck Code Center
application version (default '')
:arg str black-duck-report-recipients: Recipients that will be emailed
a report after the automatic Black Duck Code Center compliance checks
finished (default '')
:arg str black-duck-scopes: A list of dependency scopes/configurations
to run Black Duck Code Center compliance checks on. If left empty
all dependencies from all scopes will be checked (default '')
:arg bool black-duck-run-checks: Automatic Black Duck Code Center
compliance checks will occur after the build completes
(default false)
:arg bool black-duck-include-published-artifacts: Include the build's
published module artifacts in the license violation checks if they
are also used as dependencies for other modules in this build
(default false)
:arg bool auto-create-missing-component-requests: Auto create
missing components in Black Duck Code Center application after
the build is completed and deployed in Artifactory
(default true)
:arg bool auto-discard-stale-component-requests: Auto discard
stale components in Black Duck Code Center application after
the build is completed and deployed in Artifactory
(default true)
:arg bool deploy-artifacts: Push artifacts to the Artifactory
Server. Use deployment-include-patterns and
deployment-exclude-patterns to filter deploy artifacts. (default true)
:arg list deployment-include-patterns: New line or comma separated mappings
of build artifacts to published artifacts. Supports Ant-style wildcards
mapping to target directories. E.g.: */*.zip=>dir (default [])
:arg list deployment-exclude-patterns: New line or comma separated patterns
for excluding artifacts from deployment to Artifactory (default [])
:arg bool env-vars-include: Include all environment variables
accessible by the build process. Jenkins-specific env variables
are always included. Use env-vars-include-patterns and
env-vars-exclude-patterns to filter variables to publish,
(default false)
:arg list env-vars-include-patterns: Comma or space-separated list of
environment variables that will be included as part of the published
build info. Environment variables may contain the * and the ? wildcards
(default [])
:arg list env-vars-exclude-patterns: Comma or space-separated list of
environment variables that will be excluded from the published
build info (default [])
Example:
.. literalinclude:: /../../tests/publishers/fixtures/artifactory01.yaml
.. literalinclude:: /../../tests/publishers/fixtures/artifactory02.yaml
"""
artifactory = XML.SubElement(
xml_parent, 'org.jfrog.hudson.ArtifactoryRedeployPublisher')
# optional_props
helpers.artifactory_optional_props(artifactory, data, 'publishers')
XML.SubElement(artifactory, 'matrixParams').text = ','.join(
data.get('matrix-params', []))
# details
details = XML.SubElement(artifactory, 'details')
helpers.artifactory_common_details(details, data)
XML.SubElement(details, 'repositoryKey').text = data.get(
'release-repo-key', '')
XML.SubElement(details, 'snapshotsRepositoryKey').text = data.get(
'snapshot-repo-key', '')
plugin = XML.SubElement(details, 'stagingPlugin')
XML.SubElement(plugin, 'pluginName').text = 'None'
# artifactDeploymentPatterns
helpers.artifactory_deployment_patterns(artifactory, data)
# envVarsPatterns
helpers.artifactory_env_vars_patterns(artifactory, data)
def test_fairy(registry, xml_parent, data):
"""yaml: test-fairy
This plugin helps you to upload Android APKs or iOS IPA files to
www.testfairy.com.
Requires the Jenkins :jenkins-wiki:`Test Fairy Plugin
<TestFairy+Plugin>`.
:arg str platform: Select platform to upload to, **android** or **ios**
(required)
Android Only:
:arg str proguard-file: Path to Proguard file. Path of mapping.txt from
your proguard output directory. (default '')
:arg str storepass: Password for the keystore (default android)
:arg str alias: alias for key (default androiddebugkey)
:arg str keypass: password for the key (default '')
:arg str keystorepath: Path to Keystore file (required)
IOS Only:
:arg str dSYM-file: Path to .dSYM.zip file (default '')
All:
:arg str apikey: TestFairy API_KEY. Find it in your TestFairy account
settings (required)
:arg str appfile: Path to App file (.apk) or (.ipa). For example:
$WORKSPACE/[YOUR_FILE_NAME].apk or full path to the apk file.
(required)
:arg str tester-groups: Tester groups to notify (default '')
:arg bool notify-testers: Send email with changelogs to testers
(default false)
:arg bool autoupdate: Automatic update (default false)
:arg str max-duration: Duration of the session (default 10m)
:max-duration values:
* **10m**
* **60m**
* **300m**
* **1440m**
:arg bool record-on-background: Record on background (default false)
:arg bool data-only-wifi: Record data only in wifi (default false)
:arg bool video-enabled: Record video (default true)
:arg int screenshot-interval: Time interval between screenshots
(default 1)
:screenshot-interval values:
* **1**
* **2**
* **5**
:arg str video-quality: Video quality (default high)
:video-quality values:
* **high**
* **medium**
* **low**
:arg bool cpu: Enable CPU metrics (default true)
:arg bool memory: Enable memory metrics (default true)
:arg bool logs: Enable logs metrics (default true)
:arg bool network: Enable network metrics (default false)
:arg bool phone-signal: Enable phone signal metrics (default false)
:arg bool wifi: Enable wifi metrics (default false)
:arg bool gps: Enable gps metrics (default false)
:arg bool battery: Enable battery metrics (default false)
:arg bool opengl: Enable opengl metrics (default false)
Example:
.. literalinclude::
/../../tests/publishers/fixtures/test-fairy-android-minimal.yaml
:language: yaml
.. literalinclude::
/../../tests/publishers/fixtures/test-fairy-android001.yaml
:language: yaml
.. literalinclude::
/../../tests/publishers/fixtures/test-fairy-ios-minimal.yaml
:language: yaml
.. literalinclude::
/../../tests/publishers/fixtures/test-fairy-ios001.yaml
:language: yaml
"""
platform = data.get('platform')
valid_platforms = ['android', 'ios']
if 'platform' not in data:
raise MissingAttributeError('platform')
if platform == 'android':
root = XML.SubElement(
xml_parent,
'org.jenkinsci.plugins.testfairy.TestFairyAndroidRecorder')
helpers.test_fairy_common(root, data)
mappings = [
('proguard-file', 'mappingFile', ''),
('keystorepath', 'keystorePath', None),
('storepass', 'storepass', 'android'),
('alias', 'alias', 'androiddebugkey'),
('keypass', 'keypass', '')]
helpers.convert_mapping_to_xml(
root, data, mappings, fail_required=True)
elif platform == 'ios':
root = XML.SubElement(
xml_parent, 'org.jenkinsci.plugins.testfairy.TestFairyIosRecorder')
helpers.test_fairy_common(root, data)
mappings = [('dSYM-file', 'mappingFile', '')]
helpers.convert_mapping_to_xml(
root, data, mappings, fail_required=True)
else:
raise InvalidAttributeError('platform', platform, valid_platforms)
def text_finder(registry, xml_parent, data):
"""yaml: text-finder
This plugin lets you search keywords in the files you specified and
additionally check build status
Requires the Jenkins :jenkins-wiki:`Text-finder Plugin
<Text-finder+Plugin>`.
:arg str regexp: Specify a regular expression (required)
:arg str fileset: Specify the path to search (optional)
:arg bool also-check-console-output:
Search the console output (default false)
:arg bool succeed-if-found:
Force a build to succeed if a string was found (default false)
:arg bool unstable-if-found:
Set build unstable instead of failing the build (default false)
Example:
.. literalinclude:: /../../tests/publishers/fixtures/text-finder001.yaml
:language: yaml
"""
finder = XML.SubElement(xml_parent,
'hudson.plugins.textfinder.TextFinderPublisher')
finder.set('plugin', 'text-finder')
if ('fileset' in data):
XML.SubElement(finder, 'fileSet').text = data['fileset']
mappings = [
('regexp', 'regexp', None),
('also-check-console-output', 'alsoCheckConsoleOutput', False),
('succeed-if-found', 'succeedIfFound', False),
('unstable-if-found', 'unstableIfFound', False)
]
helpers.convert_mapping_to_xml(finder, data, mappings, fail_required=True)
def html_publisher(registry, xml_parent, data):
"""yaml: html-publisher
This plugin publishes HTML reports.
Requires the Jenkins :jenkins-wiki:`HTML Publisher Plugin
<HTML+Publisher+Plugin>`.
:arg str name: Report name (required)
:arg str dir: HTML directory to archive (required)
:arg str files: Specify the pages to display (required)
:arg bool keep-all: keep HTML reports for each past build (default false)
:arg bool allow-missing: Allow missing HTML reports (default false)
:arg bool link-to-last-build: If this and 'keep-all' both are true, it
publishes the link on project level even if build failed.
(default false)
Example:
.. literalinclude:: /../../tests/publishers/fixtures/html-publisher001.yaml
:language: yaml
"""
reporter = XML.SubElement(xml_parent, 'htmlpublisher.HtmlPublisher')
targets = XML.SubElement(reporter, 'reportTargets')
ptarget = XML.SubElement(targets, 'htmlpublisher.HtmlPublisherTarget')
mapping = [
('name', 'reportName', None),
('dir', 'reportDir', None),
('files', 'reportFiles', None),
('link-to-last-build', 'alwaysLinkToLastBuild', False),
('keep-all', 'keepAll', False),
('allow-missing', 'allowMissing', False),
]
helpers.convert_mapping_to_xml(ptarget, data, mapping, fail_required=True)
XML.SubElement(ptarget, 'wrapperName').text = "htmlpublisher-wrapper.html"
def rich_text_publisher(registry, xml_parent, data):
"""yaml: rich-text-publisher
This plugin puts custom rich text message to the Build pages and Job main
page.
Requires the Jenkins :jenkins-wiki:`Rich Text Publisher Plugin
<Rich+Text+Publisher+Plugin>`.
:arg str stable-text: The stable text (required)
:arg str unstable-text: The unstable text if different from stable
(default '')
:arg bool unstable-as-stable: The same text block is used for stable and
unstable builds (default true)
:arg str failed-text: The failed text if different from stable (default '')
:arg bool failed-as-stable: The same text block is used for stable and
failed builds (default true)
:arg str parser-name: HTML, Confluence or WikiText (default 'WikiText')
Minimal Example:
.. literalinclude:: /../../tests/publishers/fixtures/richtext-minimal.yaml
:language: yaml
Full Example:
.. literalinclude::
/../../tests/publishers/fixtures/richtext-complete.yaml
:language: yaml
"""
parsers = ['HTML', 'Confluence', 'WikiText']
reporter = XML.SubElement(
xml_parent,
'org.korosoft.jenkins.plugin.rtp.RichTextPublisher')
reporter.set('plugin', 'rich-text-publisher-plugin')
mappings = [
('stable-text', 'stableText', None),
('unstable-text', 'unstableText', ''),
('failed-text', 'failedText', ''),
('unstable-as-stable', 'unstableAsStable', True),
('failed-as-stable', 'failedAsStable', True),
('parser-name', 'parserName', 'WikiText', parsers)
]
helpers.convert_mapping_to_xml(
reporter, data, mappings, fail_required=True)
def tap(registry, xml_parent, data):
"""yaml: tap
Adds support to TAP test result files
Requires the Jenkins :jenkins-wiki:`TAP Plugin <TAP+Plugin>`.
:arg str results: TAP test result files (required)
:arg bool fail-if-no-results: Fail if no result (default false)
:arg bool failed-tests-mark-build-as-failure:
Mark build as failure if test fails (default false)
:arg bool output-tap-to-console: Output tap to console (default true)
:arg bool enable-subtests: Enable subtests (default true)
:arg bool discard-old-reports: Discard old reports (default false)
:arg bool todo-is-failure: Handle TODO's as failures (default true)
:arg bool include-comment-diagnostics: Include comment diagnostics (#) in
the results table (>=1.12) (default false)
:arg bool validate-tests: Validate number of tests (>=1.13) (default false)
:arg bool plan-required: TAP plan required? (>=1.17) (default true)
:arg bool verbose: Print a message for each TAP stream file (>=1.17)
(default true)
:arg bool show-only-failures: show only test failures (>=1.17)
(default false)
Full Example:
.. literalinclude:: /../../tests/publishers/fixtures/tap-full.yaml
:language: yaml
Minimal Example:
.. literalinclude:: /../../tests/publishers/fixtures/tap-minimal.yaml
:language: yaml
"""
tap = XML.SubElement(xml_parent, 'org.tap4j.plugin.TapPublisher')
tap.set('plugin', 'tap')
mappings = [
('results', 'testResults', None),
('fail-if-no-results', 'failIfNoResults', False),
('failed-tests-mark-build-as-failure',
'failedTestsMarkBuildAsFailure',
False),
('output-tap-to-console', 'outputTapToConsole', True),
('enable-subtests', 'enableSubtests', True),
('discard-old-reports', 'discardOldReports', False),
('todo-is-failure', 'todoIsFailure', True),
('include-comment-diagnostics', 'includeCommentDiagnostics', False),
('validate-tests', 'validateNumberOfTests', False),
('plan-required', 'planRequired', True),
('verbose', 'verbose', True),
('show-only-failures', 'showOnlyFailures', False),
]
helpers.convert_mapping_to_xml(tap, data, mappings, fail_required=True)
def post_tasks(registry, xml_parent, data):
"""yaml: post-tasks
Adds support to post build task plugin
Requires the Jenkins :jenkins-wiki:`Post Build Task plugin
<Post+build+task>`.
:arg dict task: Post build task definition
:arg list task[matches]: list of matches when to run the task
:arg dict task[matches][*]: match definition
:arg str task[matches][*][log-text]: text to match against the log
:arg str task[matches][*][operator]: operator to apply with the next match
:task[matches][*][operator] values (default 'AND'):
* **AND**
* **OR**
:arg bool task[escalate-status]: Escalate the task status to the job
(default 'false')
:arg bool task[run-if-job-successful]: Run only if the job was successful
(default 'false')
:arg str task[script]: Shell script to run (default '')
Example:
.. literalinclude:: /../../tests/publishers/fixtures/post-tasks001.yaml
:language: yaml
"""
pb_xml = XML.SubElement(xml_parent,
'hudson.plugins.postbuildtask.PostbuildTask')
tasks_xml = XML.SubElement(pb_xml, 'tasks')
for task in data:
task_xml = XML.SubElement(
tasks_xml,
'hudson.plugins.postbuildtask.TaskProperties')
matches_xml = XML.SubElement(task_xml, 'logTexts')
for match in task.get('matches', []):
lt_xml = XML.SubElement(
matches_xml,
'hudson.plugins.postbuildtask.LogProperties')
XML.SubElement(lt_xml, 'logText').text = str(
match.get('log-text', False) or '')
XML.SubElement(lt_xml, 'operator').text = str(
match.get('operator', 'AND')).upper()
XML.SubElement(task_xml, 'EscalateStatus').text = str(
task.get('escalate-status', False)).lower()
XML.SubElement(task_xml, 'RunIfJobSuccessful').text = str(
task.get('run-if-job-successful', False)).lower()
XML.SubElement(task_xml, 'script').text = str(
task.get('script', ''))
def postbuildscript(registry, xml_parent, data):
"""yaml: postbuildscript
Executes additional builders, script or Groovy after the build is
complete.
Requires the Jenkins :jenkins-wiki:`Post Build Script plugin
<PostBuildScript+Plugin>`.
:arg list generic-script: Paths to Batch/Shell scripts
:arg list groovy-script: Paths to Groovy scripts
:arg list groovy: Inline Groovy
:arg list builders: Any supported builders, see :doc:`builders`.
:arg bool onsuccess: Deprecated, replaced with script-only-if-succeeded
:arg bool script-only-if-succeeded: Scripts and builders are run only if
the build succeeded (default true)
:arg bool onfailure: Deprecated, replaced with script-only-if-failed
:arg bool script-only-if-failed: Scripts and builders are run only if the
build failed (default false)
:arg bool mark-unstable-if-failed: Build will be marked unstable
if job will be successfully completed
but publishing script will return
non zero exit code (default false)
:arg str execute-on: For matrix projects, scripts can be run after each
axis is built (`axes`), after all axis of the matrix
are built (`matrix`) or after each axis AND the matrix
are built (`both`).
The `script-only-if-succeeded` and `bool script-only-if-failed` options are
confusing. If you want the post build to always run regardless of the build
status, you should set them both to `false`.
Example:
.. literalinclude::
/../../tests/publishers/fixtures/postbuildscript001.yaml
:language: yaml
You can also execute :doc:`builders </builders>`:
.. literalinclude::
/../../tests/publishers/fixtures/postbuildscript002.yaml
:language: yaml
Run once after the whole matrix (all axes) is built:
.. literalinclude::
/../../tests/publishers/fixtures/postbuildscript003.yaml
:language: yaml
"""
pbs_xml = XML.SubElement(
xml_parent,
'org.jenkinsci.plugins.postbuildscript.PostBuildScript')
# Shell/Groovy in a file
script_types = {
'generic-script': 'GenericScript',
'groovy-script': 'GroovyScriptFile',
}
# Assuming yaml preserves order of input data make sure
# corresponding XML steps are generated in the same order
build_scripts = [(k, v) for k, v in data.items()
if k in script_types or k in ['groovy', 'builders']]
for step, script_data in build_scripts:
if step in script_types:
scripts_xml = XML.SubElement(pbs_xml, step[:-len('-script')] +
'ScriptFileList')
for shell_script in script_data:
script_xml = XML.SubElement(
scripts_xml,
'org.jenkinsci.plugins.postbuildscript.'
+ script_types[step])
file_path_xml = XML.SubElement(script_xml, 'filePath')
file_path_xml.text = shell_script
# Inlined Groovy
if step == 'groovy':
groovy_inline_xml = XML.SubElement(pbs_xml,
'groovyScriptContentList')
for groovy in script_data:
groovy_xml = XML.SubElement(
groovy_inline_xml,
'org.jenkinsci.plugins.postbuildscript.GroovyScriptContent'
)
groovy_content = XML.SubElement(groovy_xml, 'content')
groovy_content.text = groovy
# Inject builders
if step == 'builders':
build_steps_xml = XML.SubElement(pbs_xml, 'buildSteps')
for builder in script_data:
registry.dispatch('builder', build_steps_xml, builder)
# When to run the build? Note the plugin let one specify both options
# although they are antinomic
# onsuccess and onfailure parameters are deprecated, this is to keep
# backwards compatability
success_xml = XML.SubElement(pbs_xml, 'scriptOnlyIfSuccess')
if 'script-only-if-succeeded' in data:
success_xml.text = str(data.get('script-only-if-succeeded',
True)).lower()
else:
success_xml.text = str(data.get('onsuccess', True)).lower()
failure_xml = XML.SubElement(pbs_xml, 'scriptOnlyIfFailure')
if 'script-only-if-failed' in data:
failure_xml.text = str(data.get('script-only-if-failed',
False)).lower()
else:
failure_xml.text = str(data.get('onfailure', False)).lower()
# Mark build unstable if publisher script return non zero exit code
XML.SubElement(pbs_xml, 'markBuildUnstable').text = str(
data.get('mark-unstable-if-failed', False)).lower()
# TODO: we may want to avoid setting "execute-on" on non-matrix jobs,
# either by skipping this part or by raising an error to let the user know
# an attempt was made to set execute-on on a non-matrix job. There are
# currently no easy ways to check for this though.
if 'execute-on' in data:
valid_values = ('matrix', 'axes', 'both')
execute_on = data['execute-on'].lower()
if execute_on not in valid_values:
raise JenkinsJobsException(
'execute-on must be one of %s, got %s' %
valid_values, execute_on
)
execute_on_xml = XML.SubElement(pbs_xml, 'executeOn')
execute_on_xml.text = execute_on.upper()
def xml_summary(registry, xml_parent, data):
"""yaml: xml-summary
Adds support for the Summary Display Plugin
Requires the Jenkins :jenkins-wiki:`Summary Display Plugin
<Summary+Display+Plugin>`.
:arg str files: Files to parse (required)
:arg bool shown-on-project-page: Display summary on project page
(default false)
Minimal Example:
.. literalinclude::
/../../tests/publishers/fixtures/xml-summary-minimal.yaml
:language: yaml
Full Example:
.. literalinclude:: /../../tests/publishers/fixtures/xml-summary-full.yaml
:language: yaml
"""
summary = XML.SubElement(
xml_parent, 'hudson.plugins.summary__report.ACIPluginPublisher')
summary.set('plugin', 'summary_report')
mapping = [
('files', 'name', None),
('shown-on-project-page', 'shownOnProjectPage', False),
]
helpers.convert_mapping_to_xml(summary, data, mapping, fail_required=True)
def robot(registry, xml_parent, data):
"""yaml: robot
Adds support for the Robot Framework Plugin
Requires the Jenkins :jenkins-wiki:`Robot Framework Plugin
<Robot+Framework+Plugin>`.
:arg str output-path: Path to directory containing robot xml and html files
relative to build workspace. (required)
:arg str log-file-link: Name of log or report file to be linked on jobs
front page (default '')
:arg str report-html: Name of the html file containing robot test report
(default 'report.html')
:arg str log-html: Name of the html file containing detailed robot test log
(default 'log.html')
:arg str output-xml: Name of the xml file containing robot output
(default 'output.xml')
:arg str pass-threshold: Minimum percentage of passed tests to consider
the build successful (default 0.0)
:arg str unstable-threshold: Minimum percentage of passed test to
consider the build as not failed (default 0.0)
:arg bool only-critical: Take only critical tests into account when
checking the thresholds (default true)
:arg list other-files: list other files to archive (default '')
:arg bool archive-output-xml: Archive output xml file to server
(default true)
:arg bool enable-cache: Enable cache for test results (default true)
Minimal Example:
.. literalinclude:: /../../tests/publishers/fixtures/robot-minimal.yaml
:language: yaml
Full Example:
.. literalinclude:: /../../tests/publishers/fixtures/robot-complete.yaml
:language: yaml
"""
parent = XML.SubElement(xml_parent, 'hudson.plugins.robot.RobotPublisher')
parent.set('plugin', 'robot')
mappings = [
('output-path', 'outputPath', None),
('log-file-link', 'logFileLink', ''),
('report-html', 'reportFileName', 'report.html'),
('log-html', 'logFileName', 'log.html'),
('output-xml', 'outputFileName', 'output.xml'),
('pass-threshold', 'passThreshold', '0.0'),
('unstable-threshold', 'unstableThreshold', '0.0'),
('only-critical', 'onlyCritical', True),
('enable-cache', 'enableCache', True)
]
helpers.convert_mapping_to_xml(parent, data, mappings, fail_required=True)
other_files = XML.SubElement(parent, 'otherFiles')
for other_file in data.get('other-files', []):
XML.SubElement(other_files, 'string').text = str(other_file)
XML.SubElement(parent, 'disableArchiveOutput').text = str(
not data.get('archive-output-xml', True)).lower()
def warnings(registry, xml_parent, data):
"""yaml: warnings
Generate trend report for compiler warnings in the console log or
in log files. Requires the Jenkins :jenkins-wiki:`Warnings Plugin
<Warnings+Plugin>`.
:arg list console-log-parsers: The parser to use to scan the console
log (default '')
:arg dict workspace-file-scanners:
:workspace-file-scanners:
* **file-pattern** (`str`) -- Fileset 'includes' setting that
specifies the files to scan for warnings (required)
* **scanner** (`str`) -- The parser to use to scan the files
provided in workspace-file-pattern (default '')
:arg str files-to-include: Comma separated list of regular
expressions that specifies the files to include in the report
(based on their absolute filename). By default all files are
included
:arg str files-to-ignore: Comma separated list of regular expressions
that specifies the files to exclude from the report (based on their
absolute filename). (default '')
:arg bool run-always: By default, this plug-in runs only for stable or
unstable builds, but not for failed builds. Set to true if the
plug-in should run even for failed builds. (default false)
:arg bool detect-modules: Determines if Ant or Maven modules should be
detected for all files that contain warnings. Activating this
option may increase your build time since the detector scans
the whole workspace for 'build.xml' or 'pom.xml' files in order
to assign the correct module names. (default false)
:arg bool resolve-relative-paths: Determines if relative paths in
warnings should be resolved using a time expensive operation that
scans the whole workspace for matching files. Deactivate this
option if you encounter performance problems. (default false)
:arg int health-threshold-high: The upper threshold for the build
health. If left empty then no health report is created. If
the actual number of warnings is between the provided
thresholds then the build health is interpolated (default '')
:arg int health-threshold-low: The lower threshold for the build
health. See health-threshold-high. (default '')
:arg dict health-priorities: Determines which warning priorities
should be considered when evaluating the build health (default
all-priorities)
:health-priorities values:
* **priority-high** -- Only priority high
* **high-and-normal** -- Priorities high and normal
* **all-priorities** -- All priorities
:arg dict total-thresholds: If the number of total warnings is greater
than one of these thresholds then a build is considered as unstable
or failed, respectively. (default '')
:total-thresholds:
* **unstable** (`dict`)
:unstable: * **total-all** (`int`)
* **total-high** (`int`)
* **total-normal** (`int`)
* **total-low** (`int`)
* **failed** (`dict`)
:failed: * **total-all** (`int`)
* **total-high** (`int`)
* **total-normal** (`int`)
* **total-low** (`int`)
:arg dict new-thresholds: If the specified number of new warnings exceeds
one of these thresholds then a build is considered as unstable or
failed, respectively. (default '')
:new-thresholds:
* **unstable** (`dict`)
:unstable: * **new-all** (`int`)
* **new-high** (`int`)
* **new-normal** (`int`)
* **new-low** (`int`)
* **failed** (`dict`)
:failed: * **new-all** (`int`)
* **new-high** (`int`)
* **new-normal** (`int`)
* **new-high** (`int`)
:arg bool use-delta-for-new-warnings: If set then the number of new
warnings is calculated by subtracting the total number of warnings
of the current build from the reference build. This may lead to wrong
results if you have both fixed and new warnings in a build. If not set,
then the number of new warnings is calculated by an asymmetric set
difference of the warnings in the current and reference build. This
will find all new warnings even if the number of total warnings is
decreasing. However, sometimes false positives will be reported due
to minor changes in a warning (refactoring of variable of method
names, etc.) (default false)
:arg bool use-previous-build-as-reference: If set the number of new
warnings will always be computed based on the previous build, even if
that build is unstable (due to a violated warning threshold).
Otherwise the last build that did not violate any given threshold will
be used as
reference. It is recommended to uncheck this option if the plug-in
should ensure that all new warnings will be finally fixed in subsequent
builds. (default false)
:arg bool only-use-stable-builds-as-reference: The number of new warnings
will be calculated based on the last stable build, allowing reverts
of unstable builds where the number of warnings was decreased.
(default false)
:arg str default-encoding: Default encoding when parsing or showing files
Leave empty to use default encoding of platform (default '')
Minimal Example:
.. literalinclude:: /../../tests/publishers/fixtures/warnings-minimal.yaml
:language: yaml
Full Example:
.. literalinclude:: /../../tests/publishers/fixtures/warnings-complete.yaml
:language: yaml
"""
warnings = XML.SubElement(xml_parent,
'hudson.plugins.warnings.'
'WarningsPublisher')
warnings.set('plugin', 'warnings')
console = XML.SubElement(warnings, 'consoleParsers')
for parser in data.get('console-log-parsers', []):
console_parser = XML.SubElement(console,
'hudson.plugins.warnings.'
'ConsoleParser')
XML.SubElement(console_parser, 'parserName').text = parser
workspace = XML.SubElement(warnings, 'parserConfigurations')
for wfs in data.get('workspace-file-scanners', []):
workspace_pattern = XML.SubElement(workspace,
'hudson.plugins.warnings.'
'ParserConfiguration')
workspace_pattern_mappings = [
('file-pattern', 'pattern', None),
('scanner', 'parserName', '')
]
helpers.convert_mapping_to_xml(workspace_pattern,
wfs,
workspace_pattern_mappings,
fail_required=True)
prioritiesDict = {'priority-high': 'high',
'high-and-normal': 'normal',
'all-priorities': 'low'}
warnings_mappings = [
('files-to-include', 'includePattern', ''),
('files-to-ignore', 'excludePattern', ''),
('plugin-name', 'pluginName', '[WARNINGS]'),
('run-always', 'canRunOnFailed', False),
('detect-modules', 'shouldDetectModules', False),
('health-threshold-high', 'healthy', ''),
('health-threshold-low', 'unHealthy', ''),
('health-priorities',
'thresholdLimit',
'all-priorities',
prioritiesDict),
('default-encoding', 'defaultEncoding', '')
]
helpers.convert_mapping_to_xml(
warnings, data, warnings_mappings, fail_required=True)
# Note the logic reversal (included here to match the GUI)
XML.SubElement(warnings, 'doNotResolveRelativePaths').text = str(
not data.get('resolve-relative-paths', False)).lower()
td = XML.SubElement(warnings, 'thresholds')
for base in ["total", "new"]:
thresholds = data.get("%s-thresholds" % base, {})
for status in ["unstable", "failed"]:
bystatus = thresholds.get(status, {})
for level in ["all", "high", "normal", "low"]:
val = str(bystatus.get("%s-%s" % (base, level), ''))
XML.SubElement(td, "%s%s%s" % (status,
base.capitalize(), level.capitalize())
).text = val
if data.get('new-thresholds'):
XML.SubElement(warnings, 'dontComputeNew').text = 'false'
delta = data.get('use-delta-for-new-warnings', False)
XML.SubElement(warnings, 'useDeltaValues').text = str(delta).lower()
use_previous_build = data.get('use-previous-build-as-reference', False)
XML.SubElement(warnings, 'usePreviousBuildAsReference').text = str(
use_previous_build).lower()
use_stable_builds = data.get('only-use-stable-builds-as-reference',
False)
XML.SubElement(warnings, 'useStableBuildAsReference').text = str(
use_stable_builds).lower()
else:
XML.SubElement(warnings, 'dontComputeNew').text = 'true'
XML.SubElement(warnings, 'useDeltaValues').text = 'false'
XML.SubElement(warnings, 'usePreviousBuildAsReference').text = 'false'
XML.SubElement(warnings, 'useStableBuildAsReference').text = 'false'
def sloccount(registry, xml_parent, data):
"""yaml: sloccount
Generates the trend report for SLOCCount
Requires the Jenkins :jenkins-wiki:`SLOCCount Plugin <SLOCCount+Plugin>`.
:arg str report-files: Setting that specifies the generated raw
SLOCCount report files. Be sure not to include any non-report files
into this pattern. The report files must have been generated by
sloccount using the "--wide --details" options.
(default '\*\*/sloccount.sc')
:arg str charset: The character encoding to be used to read the SLOCCount
result files. (default 'UTF-8')
:arg int builds-in-graph: Maximal number of last successful builds, that
are displayed in the trend graphs. (default 0)
:arg bool comment-is-code: This option is considered only in the cloc
report parser and is ignored in the SLOCCount one. (default false)
:arg bool ignore-build-failure: Try to process the report files even if
the build is not successful. (default false)
Minimal Example:
.. literalinclude:: /../../tests/publishers/fixtures/sloccount-minimal.yaml
:language: yaml
Full Example:
.. literalinclude::
/../../tests/publishers/fixtures/sloccount-complete.yaml
:language: yaml
"""
top = XML.SubElement(xml_parent,
'hudson.plugins.sloccount.SloccountPublisher')
top.set('plugin', 'sloccount')
mappings = [
('report-files', 'pattern', '**/sloccount.sc'),
('charset', 'encoding', 'UTF-8'),
('builds-in-graph', 'numBuildsInGraph', 0),
('comment-is-code', 'commentIsCode', False),
('ignore-build-failure', 'ignoreBuildFailure', False)
]
helpers.convert_mapping_to_xml(top, data, mappings, fail_required=True)
def ircbot(registry, xml_parent, data):
"""yaml: ircbot
ircbot enables Jenkins to send build notifications via IRC and lets you
interact with Jenkins via an IRC bot.
Requires the Jenkins :jenkins-wiki:`IRC Plugin <IRC+Plugin>`.
:arg string strategy: When to send notifications
:strategy values:
* **all** always (default)
* **any-failure** on any failure
* **failure-and-fixed** on failure and fixes
* **new-failure-and-fixed** on new failure and fixes
* **statechange-only** only on state change
:arg bool notify-start: Whether to send notifications to channels when a
build starts
(default false)
:arg bool notify-committers: Whether to send notifications to the users
that are suspected of having broken this build
(default false)
:arg bool notify-culprits: Also send notifications to 'culprits' from
previous unstable/failed builds
(default false)
:arg bool notify-upstream: Whether to send notifications to upstream
committers if no committers were found for a
broken build
(default false)
:arg bool notify-fixers: Whether to send notifications to the users that
have fixed a broken build
(default false)
:arg string message-type: Channel Notification Message.
:message-type values:
* **summary-scm** for summary and SCM changes (default)
* **summary** for summary only
* **summary-params** for summary and build parameters
* **summary-scm-fail** for summary, SCM changes, failures)
:arg list channels: list channels definitions
If empty, it takes channel from Jenkins configuration.
(default empty)
WARNING: the IRC plugin requires the channel to be
configured in the system wide configuration or the jobs
will fail to emit notifications to the channel
:Channel: * **name** (`str`) Channel name
* **password** (`str`) Channel password (optional)
* **notify-only** (`bool`) Set to true if you want to
disallow bot commands (default false)
:arg string matrix-notifier: notify for matrix projects
instant-messaging-plugin injects an additional
field in the configuration form whenever the
project is a multi-configuration project
:matrix-notifier values:
* **all**
* **only-configurations** (default)
* **only-parent**
Example:
.. literalinclude:: /../../tests/publishers/fixtures/ircbot001.yaml
:language: yaml
"""
top = XML.SubElement(xml_parent, 'hudson.plugins.ircbot.IrcPublisher')
message_dict = {'summary-scm': 'DefaultBuildToChatNotifier',
'summary': 'SummaryOnlyBuildToChatNotifier',
'summary-params': 'BuildParametersBuildToChatNotifier',
'summary-scm-fail': 'PrintFailingTestsBuildToChatNotifier'}
message = data.get('message-type', 'summary-scm')
if message not in message_dict:
raise JenkinsJobsException("message-type entered is not valid, must "
"be one of: %s" %
", ".join(message_dict.keys()))
message = "hudson.plugins.im.build_notify." + message_dict.get(message)
XML.SubElement(top, 'buildToChatNotifier', attrib={'class': message})
strategy_dict = {'all': 'ALL',
'any-failure': 'ANY_FAILURE',
'failure-and-fixed': 'FAILURE_AND_FIXED',
'new-failure-and-fixed': 'NEW_FAILURE_AND_FIXED',
'statechange-only': 'STATECHANGE_ONLY'}
strategy = data.get('strategy', 'all')
if strategy not in strategy_dict:
raise JenkinsJobsException("strategy entered is not valid, must be "
"one of: %s" %
", ".join(strategy_dict.keys()))
XML.SubElement(top, 'strategy').text = strategy_dict.get(strategy)
targets = XML.SubElement(top, 'targets')
channels = data.get('channels', [])
for channel in channels:
sub = XML.SubElement(targets,
'hudson.plugins.im.GroupChatIMMessageTarget')
XML.SubElement(sub, 'name').text = channel.get('name')
XML.SubElement(sub, 'password').text = channel.get('password')
XML.SubElement(sub, 'notificationOnly').text = str(
channel.get('notify-only', False)).lower()
XML.SubElement(top, 'notifyOnBuildStart').text = str(
data.get('notify-start', False)).lower()
XML.SubElement(top, 'notifySuspects').text = str(
data.get('notify-committers', False)).lower()
XML.SubElement(top, 'notifyCulprits').text = str(
data.get('notify-culprits', False)).lower()
XML.SubElement(top, 'notifyFixers').text = str(
data.get('notify-fixers', False)).lower()
XML.SubElement(top, 'notifyUpstreamCommitters').text = str(
data.get('notify-upstream', False)).lower()
matrix_dict = {'all': 'ALL',
'only-configurations': 'ONLY_CONFIGURATIONS',
'only-parent': 'ONLY_PARENT'}
matrix = data.get('matrix-notifier', 'only-configurations')
if matrix not in matrix_dict:
raise JenkinsJobsException("matrix-notifier entered is not valid, "
"must be one of: %s" %
", ".join(matrix_dict.keys()))
XML.SubElement(top, 'matrixMultiplier').text = matrix_dict.get(matrix)
def plot(registry, xml_parent, data):
"""yaml: plot
Plot provides generic plotting (or graphing).
Requires the Jenkins :jenkins-wiki:`Plot Plugin <Plot+Plugin>`.
:arg str title: title for the graph (default '')
:arg str yaxis: title of Y axis (default '')
:arg int width: the width of the plot in pixels (default 750)
:arg int height: the height of the plot in pixels (default 450)
:arg str group: name of the group to which the plot belongs (required)
:arg int num-builds: number of builds to plot across
(default plot all builds)
:arg str style: Specifies the graph style of the plot
Can be: area, bar, bar3d, line, line3d, stackedArea, stackedbar,
stackedbar3d, waterfall (default 'line')
:arg bool use-description: When false, the X-axis labels are formed using
build numbers and dates, and the corresponding tooltips contain the
build descriptions. When enabled, the contents of the labels and
tooltips are swapped, with the descriptions used as X-axis labels and
the build number and date used for tooltips. (default false)
:arg bool exclude-zero-yaxis: When false, Y-axis contains the value zero
even if it is not included in the data series. When true, the value
zero is not automatically included. (default false)
:arg bool logarithmic-yaxis: When true, the Y-axis will use a logarithmic
scale. By default, the Y-axis uses a linear scale. (default false)
:arg bool keep-records: When true, show all builds up to 'Number of
builds to include'. (default false)
:arg str csv-file-name: Use for choosing the file name in which the data
will be persisted. If none specified and random name is generated as
done in the Jenkins Plot plugin. (default random generated .csv
filename, same behaviour as the Jenkins Plot plugin)
:arg list series: list data series definitions
:Serie: * **file** (`str`) : files to include
* **inclusion-flag** filtering mode for CSV files. Possible
values are:
* **off** (default)
* **include-by-string**
* **exclude-by-string**
* **include-by-column**
* **exclude-by-column**
* **exclude** (`str`) : exclude pattern for CSV file.
* **url** (`str`) : for 'csv' and 'xml' file types
used when you click on a point (default empty)
* **display-table** (`bool`) : for 'csv' file type
if true, original CSV will be shown above plot (default false)
* **label** (`str`) : used by 'properties' file type
Specifies the legend label for this data series.
(default empty)
* **format** (`str`) : Type of file where we get datas.
Can be: properties, csv, xml
* **xpath-type** (`str`) : The result type of the expression must
be supplied due to limitations in the java.xml.xpath parsing.
The result can be: node, nodeset, boolean, string, or number.
Strings and numbers will be converted to double. Boolean will
be converted to 1 for true, and 0 for false. (default 'node')
* **xpath** (`str`) : used by 'xml' file type
Xpath which selects the values that should be plotted.
Example:
.. literalinclude:: /../../tests/publishers/fixtures/plot004.yaml
:language: yaml
.. literalinclude:: /../../tests/publishers/fixtures/plot005.yaml
:language: yaml
"""
top = XML.SubElement(xml_parent, 'hudson.plugins.plot.PlotPublisher')
plots = XML.SubElement(top, 'plots')
format_dict = {'properties': 'hudson.plugins.plot.PropertiesSeries',
'csv': 'hudson.plugins.plot.CSVSeries',
'xml': 'hudson.plugins.plot.XMLSeries'}
xpath_dict = {'nodeset': 'NODESET', 'node': 'NODE', 'string': 'STRING',
'boolean': 'BOOLEAN', 'number': 'NUMBER'}
inclusion_dict = {'off': 'OFF',
'include-by-string': 'INCLUDE_BY_STRING',
'exclude-by-string': 'EXCLUDE_BY_STRING',
'include-by-column': 'INCLUDE_BY_COLUMN',
'exclude-by-column': 'EXCLUDE_BY_COLUMN'}
for plot in data:
plugin = XML.SubElement(plots, 'hudson.plugins.plot.Plot')
XML.SubElement(plugin, 'title').text = plot.get('title', '')
XML.SubElement(plugin, 'yaxis').text = plot['yaxis']
XML.SubElement(plugin, 'width').text = str(plot.get('width', '750'))
XML.SubElement(plugin, 'height').text = str(plot.get('height', '450'))
XML.SubElement(plugin, 'csvFileName').text = \
plot.get('csv-file-name', '%s.csv' % random.randrange(2 << 32))
topseries = XML.SubElement(plugin, 'series')
series = plot['series']
for serie in series:
format_data = serie.get('format')
if format_data not in format_dict:
raise JenkinsJobsException("format entered is not valid, must "
"be one of: %s" %
" , ".join(format_dict.keys()))
subserie = XML.SubElement(topseries, format_dict.get(format_data))
XML.SubElement(subserie, 'file').text = serie.get('file')
if format_data == 'properties':
XML.SubElement(subserie, 'label').text = serie.get('label', '')
if format_data == 'csv':
inclusion_flag = serie.get('inclusion-flag', 'off')
if inclusion_flag not in inclusion_dict:
raise JenkinsJobsException("Inclusion flag result entered "
"is not valid, must be one of: "
"%s"
% ", ".join(inclusion_dict))
XML.SubElement(subserie, 'inclusionFlag').text = \
inclusion_dict.get(inclusion_flag)
XML.SubElement(subserie, 'exclusionValues').text = \
serie.get('exclude', '')
if serie.get('exclude', ''):
exclude_strings = serie.get('exclude', '').split(',')
exclusionset = XML.SubElement(subserie, 'strExclusionSet')
for exclude_string in exclude_strings:
XML.SubElement(exclusionset, 'string').text = \
exclude_string
XML.SubElement(subserie, 'url').text = serie.get('url', '')
XML.SubElement(subserie, 'displayTableFlag').text = \
str(serie.get('display-table', False)).lower()
if format_data == 'xml':
XML.SubElement(subserie, 'url').text = serie.get('url', '')
XML.SubElement(subserie, 'xpathString').text = \
serie.get('xpath')
xpathtype = serie.get('xpath-type', 'node')
if xpathtype not in xpath_dict:
raise JenkinsJobsException("XPath result entered is not "
"valid, must be one of: %s" %
", ".join(xpath_dict))
XML.SubElement(subserie, 'nodeTypeString').text = \
xpath_dict.get(xpathtype)
XML.SubElement(subserie, 'fileType').text = serie.get('format')
mappings = [
('group', 'group', None),
('use-description', 'useDescr', False),
('exclude-zero-yaxis', 'exclZero', False),
('logarithmic-yaxis', 'logarithmic', False),
('keep-records', 'keepRecords', False),
('num-builds', 'numBuilds', '')]
helpers.convert_mapping_to_xml(
plugin, plot, mappings, fail_required=True)
style_list = ['area', 'bar', 'bar3d', 'line', 'line3d', 'stackedArea',
'stackedbar', 'stackedbar3d', 'waterfall']
style = plot.get('style', 'line')
if style not in style_list:
raise JenkinsJobsException("style entered is not valid, must be "
"one of: %s" % ", ".join(style_list))
XML.SubElement(plugin, 'style').text = style
def git(registry, xml_parent, data):
"""yaml: git
This plugin will configure the Jenkins Git plugin to
push merge results, tags, and/or branches to
remote repositories after the job completes.
Requires the Jenkins :jenkins-wiki:`Git Plugin <Git+Plugin>`.
:arg bool push-merge: push merges back to the origin specified in the
pre-build merge options (default false)
:arg bool push-only-if-success: Only push to remotes if the build succeeds
- otherwise, nothing will be pushed.
(default true)
:arg bool force-push: Add force option to git push (default false)
:arg list tags: tags to push at the completion of the build
:tag: * **remote** (`str`) remote repo name to push to
(default 'origin')
* **name** (`str`) name of tag to push
* **message** (`str`) message content of the tag
* **create-tag** (`bool`) whether or not to create the tag
after the build, if this is False then the tag needs to
exist locally (default false)
* **update-tag** (`bool`) whether to overwrite a remote tag
or not (default false)
:arg list branches: branches to push at the completion of the build
:branch: * **remote** (`str`) remote repo name to push to
(default 'origin')
* **name** (`str`) name of remote branch to push to
:arg list notes: notes to push at the completion of the build
:note: * **remote** (`str`) remote repo name to push to
(default 'origin')
* **message** (`str`) content of the note
* **namespace** (`str`) namespace of the note
(default master)
* **replace-note** (`bool`) whether to overwrite a note or not
(default false)
Example:
.. literalinclude:: /../../tests/publishers/fixtures/git001.yaml
:language: yaml
"""
mappings = [('push-merge', 'pushMerge', False),
('push-only-if-success', 'pushOnlyIfSuccess', True),
('force-push', 'forcePush', False)]
tag_mappings = [('remote', 'targetRepoName', 'origin'),
('name', 'tagName', None),
('message', 'tagMessage', ''),
('create-tag', 'createTag', False),
('update-tag', 'updateTag', False)]
branch_mappings = [('remote', 'targetRepoName', 'origin'),
('name', 'branchName', None)]
note_mappings = [('remote', 'targetRepoName', 'origin'),
('message', 'noteMsg', None),
('namespace', 'noteNamespace', 'master'),
('replace-note', 'noteReplace', False)]
top = XML.SubElement(xml_parent, 'hudson.plugins.git.GitPublisher')
XML.SubElement(top, 'configVersion').text = '2'
helpers.convert_mapping_to_xml(top, data, mappings, fail_required=True)
tags = data.get('tags', [])
if tags:
xml_tags = XML.SubElement(top, 'tagsToPush')
for tag in tags:
xml_tag = XML.SubElement(
xml_tags,
'hudson.plugins.git.GitPublisher_-TagToPush')
helpers.convert_mapping_to_xml(
xml_tag, tag['tag'], tag_mappings, fail_required=True)
branches = data.get('branches', [])
if branches:
xml_branches = XML.SubElement(top, 'branchesToPush')
for branch in branches:
xml_branch = XML.SubElement(
xml_branches,
'hudson.plugins.git.GitPublisher_-BranchToPush')
helpers.convert_mapping_to_xml(xml_branch,
branch['branch'],
branch_mappings,
fail_required=True)
notes = data.get('notes', [])
if notes:
xml_notes = XML.SubElement(top, 'notesToPush')
for note in notes:
xml_note = XML.SubElement(
xml_notes,
'hudson.plugins.git.GitPublisher_-NoteToPush')
helpers.convert_mapping_to_xml(
xml_note, note['note'], note_mappings, fail_required=True)
def github_notifier(registry, xml_parent, data):
"""yaml: github-notifier
Set build status on Github commit.
Requires the Jenkins :jenkins-wiki:`Github Plugin <GitHub+Plugin>`.
Example:
.. literalinclude:: /../../tests/publishers/fixtures/github-notifier.yaml
:language: yaml
"""
XML.SubElement(xml_parent,
'com.cloudbees.jenkins.GitHubCommitNotifier')
def gitlab_notifier(registry, xml_parent, data):
"""yaml: gitlab-notifier
Set build status on GitLab commit.
Requires the Jenkins :jenkins-wiki:`GitLab Plugin <GitLab+Plugin>`.
Example:
.. literalinclude:: /../../tests/publishers/fixtures/gitlab-notifier.yaml
:language: yaml
"""
XML.SubElement(
xml_parent,
'com.dabsquared.gitlabjenkins.publisher.GitLabCommitStatusPublisher')
def zulip(registry, xml_parent, data):
"""yaml: zulip
Set build status on zulip.
Requires the Jenkins :jenkins-wiki:`Humbug Plugin <Humbug+Plugin>`.
Example:
.. literalinclude:: /../../tests/publishers/fixtures/zulip.yaml
:language: yaml
"""
XML.SubElement(xml_parent,
'hudson.plugins.humbug.HumbugNotifier')
def build_publisher(registry, xml_parent, data):
"""yaml: build-publisher
This plugin allows records from one Jenkins to be published
on another Jenkins.
Requires the Jenkins :jenkins-wiki:`Build Publisher Plugin
<Build+Publisher+Plugin>`.
:arg bool publish-unstable-builds: publish unstable builds (default true)
:arg bool publish-failed-builds: publish failed builds (default true)
:arg int days-to-keep: days to keep when publishing results (optional)
:arg int num-to-keep: number of jobs to keep in the published results
(optional)
Example:
.. literalinclude::
/../../tests/publishers/fixtures/build-publisher002.yaml
:language: yaml
"""
reporter = XML.SubElement(
xml_parent,
'hudson.plugins.build__publisher.BuildPublisher')
XML.SubElement(reporter, 'publishUnstableBuilds').text = \
str(data.get('publish-unstable-builds', True)).lower()
XML.SubElement(reporter, 'publishFailedBuilds').text = \
str(data.get('publish-failed-builds', True)).lower()
if 'days-to-keep' in data or 'num-to-keep' in data:
logrotator = XML.SubElement(reporter, 'logRotator')
XML.SubElement(logrotator, 'daysToKeep').text = \
str(data.get('days-to-keep', -1))
XML.SubElement(logrotator, 'numToKeep').text = \
str(data.get('num-to-keep', -1))
# hardcoded to -1 to emulate what the build publisher
# plugin seem to do.
XML.SubElement(logrotator, 'artifactDaysToKeep').text = "-1"
XML.SubElement(logrotator, 'artifactNumToKeep').text = "-1"
def stash(registry, xml_parent, data):
"""yaml: stash
This plugin will configure the Jenkins Stash Notifier plugin to
notify Atlassian Stash after job completes.
Requires the Jenkins :jenkins-wiki:`StashNotifier Plugin
<StashNotifier+Plugin>`.
:arg string url: Base url of Stash Server (default "")
:arg string username: Username of Stash Server (default "")
:arg string password: Password of Stash Server (default "")
:arg string credentials-id: Credentials of Stash Server (optional)
:arg bool ignore-ssl: Ignore unverified SSL certificate (default false)
:arg string commit-sha1: Commit SHA1 to notify (default "")
:arg bool include-build-number: Include build number in key
(default false)
Example:
.. literalinclude:: /../../tests/publishers/fixtures/stash001.yaml
:language: yaml
"""
top = XML.SubElement(xml_parent,
'org.jenkinsci.plugins.stashNotifier.StashNotifier')
XML.SubElement(top, 'stashServerBaseUrl').text = data.get('url', '')
if data.get('credentials-id') is not None:
XML.SubElement(top, 'credentialsId').text = str(
data.get('credentials-id'))
else:
XML.SubElement(top, 'stashUserName'
).text = helpers.get_value_from_yaml_or_config_file(
'username', 'stash', data, registry.jjb_config)
XML.SubElement(top, 'stashUserPassword'
).text = helpers.get_value_from_yaml_or_config_file(
'password', 'stash', data, registry.jjb_config)
XML.SubElement(top, 'ignoreUnverifiedSSLPeer').text = str(
data.get('ignore-ssl', False)).lower()
XML.SubElement(top, 'commitSha1').text = data.get('commit-sha1', '')
XML.SubElement(top, 'includeBuildNumberInKey').text = str(
data.get('include-build-number', False)).lower()
def dependency_check(registry, xml_parent, data):
"""yaml: dependency-check
Dependency-Check is an open source utility that identifies project
dependencies and checks if there are any known, publicly disclosed,
vulnerabilities.
Requires the Jenkins :jenkins-wiki:`OWASP Dependency-Check Plugin
<OWASP+Dependency-Check+Plugin>`.
:arg str pattern: Report filename pattern (optional)
:arg bool can-run-on-failed: Also runs for failed builds, instead of just
stable or unstable builds (default false)
:arg bool should-detect-modules: Determines if Ant or Maven modules should
be detected for all files that contain warnings (default false)
:arg int healthy: Sunny threshold (optional)
:arg int unhealthy: Stormy threshold (optional)
:arg str health-threshold: Threshold priority for health status
('low', 'normal' or 'high', defaulted to 'low')
:arg dict thresholds: Mark build as failed or unstable if the number of
errors exceeds a threshold. (optional)
:thresholds:
* **unstable** (`dict`)
:unstable: * **total-all** (`int`)
* **total-high** (`int`)
* **total-normal** (`int`)
* **total-low** (`int`)
* **new-all** (`int`)
* **new-high** (`int`)
* **new-normal** (`int`)
* **new-low** (`int`)
* **failed** (`dict`)
:failed: * **total-all** (`int`)
* **total-high** (`int`)
* **total-normal** (`int`)
* **total-low** (`int`)
* **new-all** (`int`)
* **new-high** (`int`)
* **new-normal** (`int`)
* **new-low** (`int`)
:arg str default-encoding: Encoding for parsing or showing files (optional)
:arg bool do-not-resolve-relative-paths: (default false)
:arg bool dont-compute-new: If set to false, computes new warnings based on
the reference build (default true)
:arg bool use-previous-build-as-reference: determines whether to always
use the previous build as the reference build (default false)
:arg bool use-stable-build-as-reference: The number of new warnings will be
calculated based on the last stable build, allowing reverts of unstable
builds where the number of warnings was decreased. (default false)
:arg bool use-delta-values: If set then the number of new warnings is
calculated by subtracting the total number of warnings of the current
build from the reference build.
(default false)
Example:
.. literalinclude::
/../../tests/publishers/fixtures/dependency-check001.yaml
:language: yaml
"""
dependency_check = XML.SubElement(
xml_parent,
'org.jenkinsci.plugins.DependencyCheck.DependencyCheckPublisher')
# trends
helpers.build_trends_publisher(
'[DEPENDENCYCHECK] ', dependency_check, data)
def description_setter(registry, xml_parent, data):
"""yaml: description-setter
This plugin sets the description for each build,
based upon a RegEx test of the build log file.
Requires the Jenkins :jenkins-wiki:`Description Setter Plugin
<Description+Setter+Plugin>`.
:arg str regexp: A RegEx which is used to scan the build log file
:arg str regexp-for-failed: A RegEx which is used for failed builds
(optional)
:arg str description: The description to set on the build (optional)
:arg str description-for-failed: The description to set on
the failed builds (optional)
:arg bool set-for-matrix: Also set the description on
a multi-configuration build (default false)
Example:
.. literalinclude::
/../../tests/publishers/fixtures/description-setter001.yaml
:language: yaml
"""
descriptionsetter = XML.SubElement(
xml_parent,
'hudson.plugins.descriptionsetter.DescriptionSetterPublisher')
XML.SubElement(descriptionsetter, 'regexp').text = data.get('regexp', '')
XML.SubElement(descriptionsetter, 'regexpForFailed').text = \
data.get('regexp-for-failed', '')
if 'description' in data:
XML.SubElement(descriptionsetter, 'description').text = \
data['description']
if 'description-for-failed' in data:
XML.SubElement(descriptionsetter, 'descriptionForFailed').text = \
data['description-for-failed']
for_matrix = str(data.get('set-for-matrix', False)).lower()
XML.SubElement(descriptionsetter, 'setForMatrix').text = for_matrix
def doxygen(registry, xml_parent, data):
"""yaml: doxygen
This plugin parses the Doxygen descriptor (Doxyfile) and provides a link to
the generated Doxygen documentation.
Requires the Jenkins :jenkins-wiki:`Doxygen Plugin <Doxygen+Plugin>`.
:arg str doxyfile: The doxyfile path
:arg str slave: The node or label to pull the doxygen HTML files from
:arg bool keep-all: Retain doxygen generation for each successful build
(default false)
:arg str folder: Folder where you run doxygen (default '')
Example:
.. literalinclude:: /../../tests/publishers/fixtures/doxygen001.yaml
:language: yaml
"""
logger = logging.getLogger(__name__)
p = XML.SubElement(xml_parent, 'hudson.plugins.doxygen.DoxygenArchiver')
if not data.get('doxyfile'):
raise JenkinsJobsException('The path to a doxyfile must be specified.')
XML.SubElement(p, 'doxyfilePath').text = str(data.get('doxyfile'))
XML.SubElement(p, 'runOnChild').text = str(data.get('slave', ''))
# backward compatibility
if 'keepall' in data:
if 'keep-all' in data:
XML.SubElement(p, 'keepAll').text = str(
data.get('keep-all', False)).lower()
logger.warning("The value of 'keepall' will be ignored "
"in preference to 'keep-all'.")
else:
XML.SubElement(p, 'keepAll').text = str(
data.get('keepall', False)).lower()
logger.warning("'keepall' is deprecated please use 'keep-all'")
else:
XML.SubElement(p, 'keepAll').text = str(
data.get('keep-all', False)).lower()
XML.SubElement(p, 'folderWhereYouRunDoxygen').text = str(
data.get('folder', ''))
def sitemonitor(registry, xml_parent, data):
"""yaml: sitemonitor
This plugin checks the availability of an url.
It requires the :jenkins-wiki:`sitemonitor plugin <SiteMonitor+Plugin>`.
:arg list sites: List of URLs to check
Example:
.. literalinclude:: /../../tests/publishers/fixtures/sitemonitor001.yaml
:language: yaml
"""
mon = XML.SubElement(xml_parent,
'hudson.plugins.sitemonitor.SiteMonitorRecorder')
if data.get('sites'):
sites = XML.SubElement(mon, 'mSites')
for siteurl in data.get('sites'):
site = XML.SubElement(sites,
'hudson.plugins.sitemonitor.model.Site')
XML.SubElement(site, 'mUrl').text = siteurl['url']
def testng(registry, xml_parent, data):
"""yaml: testng
This plugin publishes TestNG test reports.
Requires the Jenkins :jenkins-wiki:`TestNG Results Plugin <testng-plugin>`.
:arg str pattern: filename pattern to locate the TestNG XML report files
(required)
:arg bool escape-test-description: escapes the description string
associated with the test method while displaying test method details
(default true)
:arg bool escape-exception-msg: escapes the test method's exception
messages. (default true)
:arg bool fail-on-failed-test-config: Allows for a distinction between
failing tests and failing configuration methods (>=1.10) (default
false)
:arg bool show-failed-builds: include results from failed builds in the
trend graph (>=1.6) (default false)
:arg int unstable-skips: Build is marked UNSTABLE if the number/percentage
of skipped tests exceeds the specified threshold (>=1.11) (default 100)
:arg int unstable-fails: Build is marked UNSTABLE if the number/percentage
of failed tests exceeds the specified threshold (>=1.11) (default 0)
:arg int failed-skips: Build is marked FAILURE if the number/percentage of
skipped tests exceeds the specified threshold (>=1.11) (default 100)
:arg int failed-fails: Build is marked FAILURE if the number/percentage of
failed tests exceeds the specified threshold (>=1.11) (default 100)
:arg str threshold-mode: Interpret threshold as number of tests or
percentage of tests (>=1.11) (default percentage)
Full Example:
.. literalinclude:: /../../tests/publishers/fixtures/testng-full.yaml
:language: yaml
Minimal Example:
.. literalinclude:: /../../tests/publishers/fixtures/testng-minimal.yaml
:language: yaml
"""
reporter = XML.SubElement(xml_parent, 'hudson.plugins.testng.Publisher')
reporter.set('plugin', 'testng-plugin')
threshold_modes = {
'number': 1,
'percentage': 2}
mappings = [
('pattern', 'reportFilenamePattern', None),
('escape-test-description', 'escapeTestDescp', True),
('escape-exception-msg', 'escapeExceptionMsg', True),
('fail-on-failed-test-config', 'failureOnFailedTestConfig', False),
('show-failed-builds', 'showFailedBuilds', False),
('unstable-skips', 'unstableSkips', 100),
('unstable-fails', 'unstableFails', 0),
('failed-skips', 'failedSkips', 100),
('failed-fails', 'failedFails', 100),
('threshold-mode', 'thresholdMode', 'percentage', threshold_modes)
]
helpers.convert_mapping_to_xml(
reporter, data, mappings, fail_required=True)
def artifact_deployer(registry, xml_parent, data):
"""yaml: artifact-deployer
This plugin makes it possible to copy artifacts to remote locations.
Requires the Jenkins :jenkins-wiki:`ArtifactDeployer Plugin
<ArtifactDeployer+Plugin>`.
:arg list entries:
:entries:
* **files** (`str`) - files to deploy
* **basedir** (`str`) - the dir from files are deployed
* **excludes** (`str`) - the mask to exclude files
* **remote** (`str`) - a remote output directory
* **flatten** (`bool`) - ignore the source directory structure
(default false)
* **delete-remote** (`bool`) - clean-up remote directory
before deployment (default false)
* **delete-remote-artifacts** (`bool`) - delete remote artifacts
when the build is deleted (default false)
* **fail-no-files** (`bool`) - fail build if there are no files
(default false)
* **groovy-script** (`str`) - execute a Groovy script
before a build is deleted
:arg bool deploy-if-fail: Deploy if the build is failed (default false)
Example:
.. literalinclude:: /../../tests/publishers/fixtures/artifact-dep.yaml
:language: yaml
"""
deployer = XML.SubElement(xml_parent,
'org.jenkinsci.plugins.artifactdeployer.'
'ArtifactDeployerPublisher')
if data is None or 'entries' not in data:
raise Exception('entries field is missing')
elif data.get('entries', None) is None:
entries = XML.SubElement(deployer, 'entries', {'class': 'empty-list'})
else:
entries = XML.SubElement(deployer, 'entries')
for entry in data.get('entries'):
deployer_entry = XML.SubElement(
entries,
'org.jenkinsci.plugins.artifactdeployer.ArtifactDeployerEntry')
XML.SubElement(deployer_entry, 'includes').text = \
entry.get('files')
XML.SubElement(deployer_entry, 'basedir').text = \
entry.get('basedir')
XML.SubElement(deployer_entry, 'excludes').text = \
entry.get('excludes')
XML.SubElement(deployer_entry, 'remote').text = entry.get('remote')
XML.SubElement(deployer_entry, 'flatten').text = \
str(entry.get('flatten', False)).lower()
XML.SubElement(deployer_entry, 'deleteRemote').text = \
str(entry.get('delete-remote', False)).lower()
XML.SubElement(deployer_entry, 'deleteRemoteArtifacts').text = \
str(entry.get('delete-remote-artifacts', False)).lower()
XML.SubElement(deployer_entry, 'failNoFilesDeploy').text = \
str(entry.get('fail-no-files', False)).lower()
XML.SubElement(deployer_entry, 'groovyExpression').text = \
entry.get('groovy-script')
deploy_if_fail = str(data.get('deploy-if-fail', False)).lower()
XML.SubElement(deployer, 'deployEvenBuildFail').text = deploy_if_fail
def s3(registry, xml_parent, data):
"""yaml: s3
Upload build artifacts to Amazon S3.
Requires the Jenkins :jenkins-wiki:`S3 plugin <S3+Plugin>`.
:arg str s3-profile: Globally-defined S3 profile to use
:arg list entries:
:entries:
* **destination-bucket** (`str`) - Destination S3 bucket
* **source-files** (`str`) - Source files (Ant glob syntax)
* **storage-class** (`str`) - S3 storage class; one of "STANDARD"
or "REDUCED_REDUNDANCY"
* **bucket-region** (`str`) - S3 bucket region (capitalized with
underscores)
* **upload-on-failure** (`bool`) - Upload files even if the build
failed (default false)
* **upload-from-slave** (`bool`) - Perform the upload directly from
the Jenkins slave rather than the master node. (default false)
* **managed-artifacts** (`bool`) - Let Jenkins fully manage the
published artifacts, similar to when artifacts are published to
the Jenkins master. (default false)
* **s3-encryption** (`bool`) - Use S3 AES-256 server side encryption
support. (default false)
* **flatten** (`bool`) - Ignore the directory structure of the
artifacts in the source project and copy all matching artifacts
directly into the specified bucket. (default false)
:arg list metadata-tags:
:metadata-tags:
* **key** Metadata key for files from this build. It will be
prefixed by "x-amz-meta-" when uploaded to S3. Can contain macros
(e.g. environment variables).
* **value** Metadata value associated with the key. Can contain macros.
Example:
.. literalinclude:: /../../tests/publishers/fixtures/s3001.yaml
:language: yaml
"""
deployer = XML.SubElement(xml_parent,
'hudson.plugins.s3.S3BucketPublisher')
if data is None or not data.get('entries'):
raise JenkinsJobsException('No filesets defined.')
XML.SubElement(deployer, 'profileName').text = data.get('s3-profile')
entries = XML.SubElement(deployer, 'entries')
for entry in data.get('entries'):
fileset = XML.SubElement(entries, 'hudson.plugins.s3.Entry')
# xml keys -> yaml keys
settings = [('bucket', 'destination-bucket', ''),
('sourceFile', 'source-files', ''),
('storageClass', 'storage-class', ''),
('selectedRegion', 'bucket-region', ''),
('noUploadOnFailure', 'upload-on-failure', False),
('uploadFromSlave', 'upload-from-slave', False),
('managedArtifacts', 'managed-artifacts', False),
('useServerSideEncryption', 's3-encryption', False),
('flatten', 'flatten', False)]
for xml_key, yaml_key, default in settings:
xml_config = XML.SubElement(fileset, xml_key)
config_value = entry.get(yaml_key, default)
if xml_key == 'noUploadOnFailure':
xml_config.text = str(not config_value).lower()
elif isinstance(default, bool):
xml_config.text = str(config_value).lower()
else:
xml_config.text = str(config_value)
metadata = XML.SubElement(deployer, 'userMetadata')
for tag in data.get('metadata-tags', []):
pair = XML.SubElement(metadata, 'hudson.plugins.s3.MetadataPair')
XML.SubElement(pair, 'key').text = tag.get('key')
XML.SubElement(pair, 'value').text = tag.get('value')
def ruby_metrics(registry, xml_parent, data):
"""yaml: ruby-metrics
Rcov plugin parses rcov html report files and
shows it in Jenkins with a trend graph.
Requires the Jenkins :jenkins-wiki:`Ruby metrics plugin
<RubyMetrics+plugin>`.
:arg str report-dir: Relative path to the coverage report directory
:arg dict targets:
:targets: (total-coverage, code-coverage)
* **healthy** (`int`): Healthy threshold
* **unhealthy** (`int`): Unhealthy threshold
* **unstable** (`int`): Unstable threshold
Example:
.. literalinclude:: /../../tests/publishers/fixtures/ruby-metrics.yaml
:language: yaml
"""
metrics = XML.SubElement(
xml_parent,
'hudson.plugins.rubyMetrics.rcov.RcovPublisher')
report_dir = data.get('report-dir', '')
XML.SubElement(metrics, 'reportDir').text = report_dir
targets = XML.SubElement(metrics, 'targets')
if 'target' in data:
for t in data['target']:
if not ('code-coverage' in t or 'total-coverage' in t):
raise JenkinsJobsException('Unrecognized target name')
el = XML.SubElement(
targets,
'hudson.plugins.rubyMetrics.rcov.model.MetricTarget')
if 'total-coverage' in t:
XML.SubElement(el, 'metric').text = 'TOTAL_COVERAGE'
else:
XML.SubElement(el, 'metric').text = 'CODE_COVERAGE'
for threshold_name, threshold_value in \
next(iter(t.values())).items():
elname = threshold_name.lower()
XML.SubElement(el, elname).text = str(threshold_value)
else:
raise JenkinsJobsException('Coverage metric targets must be set')
def fitnesse(registry, xml_parent, data):
"""yaml: fitnesse
Publish Fitnesse test results
Requires the Jenkins :jenkins-wiki:`Fitnesse plugin <Fitnesse+Plugin>`.
:arg str results: path specifier for results files
Example:
.. literalinclude:: /../../tests/publishers/fixtures/fitnesse001.yaml
:language: yaml
"""
fitnesse = XML.SubElement(
xml_parent,
'hudson.plugins.fitnesse.FitnesseResultsRecorder')
results = data.get('results', '')
XML.SubElement(fitnesse, 'fitnessePathToXmlResultsIn').text = results
def valgrind(registry, xml_parent, data):
"""yaml: valgrind
This plugin publishes Valgrind Memcheck XML results.
Requires the Jenkins :jenkins-wiki:`Valgrind Plugin <Valgrind+Plugin>`.
:arg str pattern: Filename pattern to locate the Valgrind XML report files
(required)
:arg dict thresholds: Mark build as failed or unstable if the number of
errors exceeds a threshold. All threshold values are optional.
:thresholds:
* **unstable** (`dict`)
:unstable: * **invalid-read-write** (`int`)
* **definitely-lost** (`int`)
* **total** (`int`)
* **failed** (`dict`)
:failed: * **invalid-read-write** (`int`)
* **definitely-lost** (`int`)
* **total** (`int`)
:arg bool fail-no-reports: Fail build if no reports are found
(default false)
:arg bool fail-invalid-reports: Fail build if reports are malformed
(default false)
:arg bool publish-if-aborted: Publish results for aborted builds
(default false)
:arg bool publish-if-failed: Publish results for failed builds
(default false)
Example:
.. literalinclude:: /../../tests/publishers/fixtures/valgrind001.yaml
:language: yaml
"""
p = XML.SubElement(xml_parent,
'org.jenkinsci.plugins.valgrind.ValgrindPublisher')
p = XML.SubElement(p, 'valgrindPublisherConfig')
if 'pattern' not in data:
raise JenkinsJobsException("A filename pattern must be specified.")
XML.SubElement(p, 'pattern').text = data['pattern']
dthresholds = data.get('thresholds', {})
for threshold in ['unstable', 'failed']:
dthreshold = dthresholds.get(threshold, {})
threshold = threshold.replace('failed', 'fail')
XML.SubElement(p, '%sThresholdInvalidReadWrite' % threshold).text \
= str(dthreshold.get('invalid-read-write', ''))
XML.SubElement(p, '%sThresholdDefinitelyLost' % threshold).text \
= str(dthreshold.get('definitely-lost', ''))
XML.SubElement(p, '%sThresholdTotal' % threshold).text \
= str(dthreshold.get('total', ''))
XML.SubElement(p, 'failBuildOnMissingReports').text = str(
data.get('fail-no-reports', False)).lower()
XML.SubElement(p, 'failBuildOnInvalidReports').text = str(
data.get('fail-invalid-reports', False)).lower()
XML.SubElement(p, 'publishResultsForAbortedBuilds').text = str(
data.get('publish-if-aborted', False)).lower()
XML.SubElement(p, 'publishResultsForFailedBuilds').text = str(
data.get('publish-if-failed', False)).lower()
def pmd(registry, xml_parent, data):
"""yaml: pmd
Publish trend reports with PMD.
Requires the Jenkins :jenkins-wiki:`PMD Plugin <PMD+Plugin>`.
The PMD component accepts a dictionary with the following values:
:arg str pattern: Report filename pattern (optional)
:arg bool can-run-on-failed: Also runs for failed builds, instead of just
stable or unstable builds (default false)
:arg bool should-detect-modules: Determines if Ant or Maven modules should
be detected for all files that contain warnings (default false)
:arg int healthy: Sunny threshold (optional)
:arg int unhealthy: Stormy threshold (optional)
:arg str health-threshold: Threshold priority for health status
('low', 'normal' or 'high', defaulted to 'low')
:arg dict thresholds: Mark build as failed or unstable if the number of
errors exceeds a threshold. (optional)
:thresholds:
* **unstable** (`dict`)
:unstable: * **total-all** (`int`)
* **total-high** (`int`)
* **total-normal** (`int`)
* **total-low** (`int`)
* **new-all** (`int`)
* **new-high** (`int`)
* **new-normal** (`int`)
* **new-low** (`int`)
* **failed** (`dict`)
:failed: * **total-all** (`int`)
* **total-high** (`int`)
* **total-normal** (`int`)
* **total-low** (`int`)
* **new-all** (`int`)
* **new-high** (`int`)
* **new-normal** (`int`)
* **new-low** (`int`)
:arg str default-encoding: Encoding for parsing or showing files (optional)
:arg bool do-not-resolve-relative-paths: (default false)
:arg bool dont-compute-new: If set to false, computes new warnings based on
the reference build (default true)
:arg bool use-previous-build-as-reference: determines whether to always
use the previous build as the reference build (default false)
:arg bool use-stable-build-as-reference: The number of new warnings will be
calculated based on the last stable build, allowing reverts of unstable
builds where the number of warnings was decreased. (default false)
:arg bool use-delta-values: If set then the number of new warnings is
calculated by subtracting the total number of warnings of the current
build from the reference build.
(default false)
Example:
.. literalinclude:: /../../tests/publishers/fixtures/pmd001.yaml
:language: yaml
Full example:
.. literalinclude:: /../../tests/publishers/fixtures/pmd002.yaml
:language: yaml
"""
xml_element = XML.SubElement(xml_parent, 'hudson.plugins.pmd.PmdPublisher')
helpers.build_trends_publisher('[PMD] ', xml_element, data)
def scan_build(registry, xml_parent, data):
"""yaml: scan-build
Publishes results from the Clang scan-build static analyzer.
The scan-build report has to be generated in the directory
``${WORKSPACE}/clangScanBuildReports`` for the publisher to find it.
Requires the Jenkins :jenkins-wiki:`Clang Scan-Build Plugin
<Clang+Scan-Build+Plugin>`.
:arg bool mark-unstable: Mark build as unstable if the number of bugs
exceeds a threshold (default false)
:arg int threshold: Threshold for marking builds as unstable (default 0)
:arg string exclude-paths: Comma separated paths to exclude from reports
(>=1.5) (default '')
:arg string report-folder: Folder where generated reports are located
(>=1.7) (default 'clangScanBuildReports')
Full Example:
.. literalinclude:: /../../tests/publishers/fixtures/scan-build-full.yaml
:language: yaml
Minimal Example:
.. literalinclude::
/../../tests/publishers/fixtures/scan-build-minimal.yaml
:language: yaml
"""
p = XML.SubElement(
xml_parent,
'jenkins.plugins.clangscanbuild.publisher.ClangScanBuildPublisher')
p.set('plugin', 'clang-scanbuild')
mappings = [
('mark-unstable', 'markBuildUnstableWhenThresholdIsExceeded', False),
('threshold', 'bugThreshold', 0),
('exclude-paths', 'clangexcludedpaths', ''),
('report-folder', 'reportFolderName', 'clangScanBuildReports'),
]
helpers.convert_mapping_to_xml(p, data, mappings, fail_required=True)
def dry(registry, xml_parent, data):
"""yaml: dry
Publish trend reports with DRY.
Requires the Jenkins :jenkins-wiki:`DRY Plugin <DRY+Plugin>`.
The DRY component accepts a dictionary with the following values:
:arg str pattern: Report filename pattern (default '')
:arg bool can-run-on-failed: Also runs for failed builds, instead of just
stable or unstable builds (default false)
:arg bool should-detect-modules: Determines if Ant or Maven modules should
be detected for all files that contain warnings (default false)
:arg int healthy: Sunny threshold (default '')
:arg int unhealthy: Stormy threshold (default '')
:arg str health-threshold: Threshold priority for health status
('low', 'normal' or 'high', defaulted to 'low')
:arg int high-threshold: Minimum number of duplicated lines for high
priority warnings. (default 50)
:arg int normal-threshold: Minimum number of duplicated lines for normal
priority warnings. (default 25)
:arg dict thresholds: Mark build as failed or unstable if the number of
errors exceeds a threshold. (default '')
:thresholds:
* **unstable** (`dict`)
:unstable: * **total-all** (`int`)
* **total-high** (`int`)
* **total-normal** (`int`)
* **total-low** (`int`)
* **new-all** (`int`)
* **new-high** (`int`)
* **new-normal** (`int`)
* **new-low** (`int`)
* **failed** (`dict`)
:failed: * **total-all** (`int`)
* **total-high** (`int`)
* **total-normal** (`int`)
* **total-low** (`int`)
* **new-all** (`int`)
* **new-high** (`int`)
* **new-normal** (`int`)
* **new-low** (`int`)
:arg str default-encoding: Encoding for parsing or showing files (optional)
:arg bool do-not-resolve-relative-paths: (default false)
:arg bool dont-compute-new: If set to false, computes new warnings based on
the reference build (default true)
:arg bool use-previous-build-as-reference: determines whether to always
use the previous build as the reference build (default false)
:arg bool use-stable-build-as-reference: The number of new warnings will be
calculated based on the last stable build, allowing reverts of unstable
builds where the number of warnings was decreased. (default false)
:arg bool use-delta-values: If set then the number of new warnings is
calculated by subtracting the total number of warnings of the current
build from the reference build. (default false)
Example:
.. literalinclude:: /../../tests/publishers/fixtures/dry001.yaml
:language: yaml
Full example:
.. literalinclude:: /../../tests/publishers/fixtures/dry004.yaml
:language: yaml
"""
xml_element = XML.SubElement(xml_parent, 'hudson.plugins.dry.DryPublisher')
helpers.build_trends_publisher('[DRY] ', xml_element, data)
# Add specific settings for this trends publisher
settings = [
('high-threshold', 'highThreshold', 50),
('normal-threshold', 'normalThreshold', 25)]
helpers.convert_mapping_to_xml(
xml_element, data, settings, fail_required=True)
def shining_panda(registry, xml_parent, data):
"""yaml: shining-panda
Publish coverage.py results. Requires the Jenkins
:jenkins-wiki:`ShiningPanda Plugin <ShiningPanda+Plugin>`.
:arg str html-reports-directory: path to coverage.py html results
(optional)
Example:
.. literalinclude:: /../../tests/publishers/fixtures/shiningpanda001.yaml
:language: yaml
"""
shining_panda_plugin = XML.SubElement(
xml_parent,
'jenkins.plugins.shiningpanda.publishers.CoveragePublisher')
if 'html-reports-directory' in data:
XML.SubElement(shining_panda_plugin, 'htmlDir').text = str(
data['html-reports-directory'])
def downstream_ext(registry, xml_parent, data):
"""yaml: downstream-ext
Trigger multiple downstream jobs when a job is completed and
condition is met.
Requires the Jenkins :jenkins-wiki:`Downstream-Ext Plugin
<Downstream-Ext+Plugin>`.
:arg list projects: Projects to build (required)
:arg string condition: comparison condition used for the criteria.
One of 'equal-or-over', 'equal-or-under', 'equal'
(default 'equal-or-over')
:arg string criteria: Trigger downstream job if build results meets
condition. One of 'success', 'unstable', 'failure' or
'aborted' (default 'success')
:arg bool only-on-scm-change: Trigger only if downstream project
has SCM changes (default false)
:arg bool only-on-local-scm-change: Trigger only if current project
has SCM changes (default false)
Example:
.. literalinclude::
/../../tests/publishers/fixtures/downstream-ext002.yaml
:language: yaml
"""
conditions = {
"equal-or-over": "AND_HIGHER",
"equal-or-under": "AND_LOWER",
"equal": "EXACT"
}
p = XML.SubElement(xml_parent,
'hudson.plugins.downstream__ext.DownstreamTrigger')
if 'projects' not in data:
raise JenkinsJobsException("Missing list of downstream projects.")
XML.SubElement(p, 'childProjects').text = ','.join(data['projects'])
th = XML.SubElement(p, 'threshold')
criteria = data.get('criteria', 'success').upper()
if criteria not in hudson_model.THRESHOLDS:
raise JenkinsJobsException("criteria must be one of %s" %
", ".join(hudson_model.THRESHOLDS.keys()))
wr_threshold = hudson_model.THRESHOLDS[
criteria]
XML.SubElement(th, "name").text = wr_threshold['name']
XML.SubElement(th, "ordinal").text = wr_threshold['ordinal']
XML.SubElement(th, "color").text = wr_threshold['color']
XML.SubElement(th, "completeBuild").text = str(
wr_threshold['complete']).lower()
condition = data.get('condition', 'equal-or-over')
if condition not in conditions:
raise JenkinsJobsException('condition must be one of: %s' %
", ".join(conditions))
XML.SubElement(p, 'thresholdStrategy').text = conditions[
condition]
XML.SubElement(p, 'onlyIfSCMChanges').text = str(
data.get('only-on-scm-change', False)).lower()
XML.SubElement(p, 'onlyIfLocalSCMChanges').text = str(
data.get('only-on-local-scm-change', False)).lower()
def rundeck(registry, xml_parent, data):
"""yaml: rundeck
Trigger a rundeck job when the build is complete.
Requires the Jenkins :jenkins-wiki:`RunDeck
Plugin <RunDeck+Plugin>`.
:arg str job-id: The RunDeck job identifier. (required)
This could be:
* ID example : "42"
* UUID example : "2027ce89-7924-4ecf-a963-30090ada834f"
* reference, in the format : "project:group/job"
:arg str options: List of options for the Rundeck job, in Java-Properties
format: key=value (default "")
:arg str node-filters: List of filters to optionally filter the nodes
included by the job. (default "")
:arg str tag: Used for on-demand job scheduling on rundeck: if a tag is
specified, the job will only execute if the given tag is present in the
SCM changelog. (default "")
:arg bool wait-for-rundeck: If true Jenkins will wait for the job to
complete, if false the job will be started and Jenkins will move on.
(default false)
:arg bool fail-the-build: If true a RunDeck job failure will cause the
Jenkins build to fail. (default false)
Example:
.. literalinclude:: /../../tests/publishers/fixtures/rundeck001.yaml
:language: yaml
Full example:
.. literalinclude:: /../../tests/publishers/fixtures/rundeck002.yaml
:language: yaml
"""
p = XML.SubElement(
xml_parent,
'org.jenkinsci.plugins.rundeck.RundeckNotifier')
mappings = [
('job-id', 'jobId', None),
('options', 'options', ''),
('node-filters', 'nodeFilters', ''),
('tag', 'tag', ''),
('wait-for-rundeck', 'shouldWaitForRundeckJob', False),
('fail-the-build', 'shouldFailTheBuild', False),
]
helpers.convert_mapping_to_xml(p, data, mappings, fail_required=True)
def create_publishers(registry, action):
dummy_parent = XML.Element("dummy")
registry.dispatch('publisher', dummy_parent, action)
return list(dummy_parent)
def conditional_publisher(registry, xml_parent, data):
"""yaml: conditional-publisher
Conditionally execute some post-build steps. Requires the Jenkins
:jenkins-wiki:`Flexible Publish Plugin <Flexible+Publish+Plugin>`.
A Flexible Publish list of Conditional Actions is created in Jenkins.
:arg str condition-kind: Condition kind that must be verified before the
action is executed. Valid values and their additional attributes are
described in the conditions_ table.
:arg str on-evaluation-failure: What should be the outcome of the build
if the evaluation of the condition fails. Possible values are `fail`,
`mark-unstable`, `run-and-mark-unstable`, `run` and `dont-run`.
Default is `fail`.
:arg list action: Action to run if the condition is verified. Item
can be any publisher known by Jenkins Job Builder and supported
by the Flexible Publish Plugin.
.. _conditions:
================== ====================================================
Condition kind Description
================== ====================================================
always Condition is always verified
never Condition is never verified
boolean-expression Run the action if the expression expands to a
representation of true
:condition-expression: Expression to expand
current-status Run the action if the current build status is
within the configured range
:condition-worst: Accepted values are SUCCESS,
UNSTABLE, FAILURE, NOT_BUILD, ABORTED
:condition-best: Accepted values are SUCCESS,
UNSTABLE, FAILURE, NOT_BUILD, ABORTED
shell Run the action if the shell command succeeds
:condition-command: Shell command to execute
windows-shell Similar to shell, except that commands will be
executed by cmd, under Windows
:condition-command: Command to execute
regexp Run the action if a regular expression matches
:condition-expression: Regular Expression
:condition-searchtext: Text to match against
the regular expression
file-exists Run the action if a file exists
:condition-filename: Check existence of this file
:condition-basedir: If condition-filename is
relative, it will be considered relative to
either `workspace`, `artifact-directory`,
or `jenkins-home`. Default is `workspace`.
================== ====================================================
Single Conditional Action Example:
.. literalinclude:: \
/../../tests/publishers/fixtures/conditional-publisher001.yaml
:language: yaml
Multiple Conditional Actions Example
(includes example of multiple actions per condition which requires
v0.13 or higher of the Flexible Publish plugin):
.. literalinclude:: \
/../../tests/publishers/fixtures/conditional-publisher003.yaml
:language: yaml
:download:`Multiple Conditional Actions Example for pre-v0.13 versions
<../../tests/publishers/fixtures/conditional-publisher002.yaml>`
"""
def publish_condition(cdata):
kind = cdata['condition-kind']
ctag = XML.SubElement(cond_publisher, condition_tag)
class_pkg = 'org.jenkins_ci.plugins.run_condition'
if kind == "always":
ctag.set('class',
class_pkg + '.core.AlwaysRun')
elif kind == "never":
ctag.set('class',
class_pkg + '.core.NeverRun')
elif kind == "boolean-expression":
ctag.set('class',
class_pkg + '.core.BooleanCondition')
XML.SubElement(ctag, "token").text = cdata['condition-expression']
elif kind == "current-status":
ctag.set('class',
class_pkg + '.core.StatusCondition')
wr = XML.SubElement(ctag, 'worstResult')
wr_name = cdata['condition-worst']
if wr_name not in hudson_model.THRESHOLDS:
raise JenkinsJobsException(
"threshold must be one of %s" %
", ".join(hudson_model.THRESHOLDS.keys()))
wr_threshold = hudson_model.THRESHOLDS[wr_name]
XML.SubElement(wr, "name").text = wr_threshold['name']
XML.SubElement(wr, "ordinal").text = wr_threshold['ordinal']
XML.SubElement(wr, "color").text = wr_threshold['color']
XML.SubElement(wr, "completeBuild").text = \
str(wr_threshold['complete']).lower()
br = XML.SubElement(ctag, 'bestResult')
br_name = cdata['condition-best']
if br_name not in hudson_model.THRESHOLDS:
raise JenkinsJobsException(
"threshold must be one of %s" %
", ".join(hudson_model.THRESHOLDS.keys()))
br_threshold = hudson_model.THRESHOLDS[br_name]
XML.SubElement(br, "name").text = br_threshold['name']
XML.SubElement(br, "ordinal").text = br_threshold['ordinal']
XML.SubElement(br, "color").text = br_threshold['color']
XML.SubElement(br, "completeBuild").text = \
str(wr_threshold['complete']).lower()
elif kind == "shell":
ctag.set('class',
class_pkg + '.contributed.ShellCondition')
XML.SubElement(ctag, "command").text = cdata['condition-command']
elif kind == "windows-shell":
ctag.set('class',
class_pkg + '.contributed.BatchFileCondition')
XML.SubElement(ctag, "command").text = cdata['condition-command']
elif kind == "regexp":
ctag.set('class',
class_pkg + '.core.ExpressionCondition')
XML.SubElement(ctag,
"expression").text = cdata['condition-expression']
XML.SubElement(ctag, "label").text = cdata['condition-searchtext']
elif kind == "file-exists":
ctag.set('class',
class_pkg + '.core.FileExistsCondition')
XML.SubElement(ctag, "file").text = cdata['condition-filename']
basedir = cdata.get('condition-basedir', 'workspace')
basedir_tag = XML.SubElement(ctag, "baseDir")
if "workspace" == basedir:
basedir_tag.set('class',
class_pkg + '.common.BaseDirectory$Workspace')
elif "artifact-directory" == basedir:
basedir_tag.set('class',
class_pkg + '.common.'
'BaseDirectory$ArtifactsDir')
elif "jenkins-home" == basedir:
basedir_tag.set('class',
class_pkg + '.common.'
'BaseDirectory$JenkinsHome')
else:
raise JenkinsJobsException('%s is not a valid condition-kind '
'value.' % kind)
def publish_action(parent, action):
for edited_node in create_publishers(registry, action):
if not use_publisher_list:
edited_node.set('class', edited_node.tag)
edited_node.tag = 'publisher'
parent.append(edited_node)
flex_publisher_tag = 'org.jenkins__ci.plugins.flexible__publish.' \
'FlexiblePublisher'
cond_publisher_tag = 'org.jenkins__ci.plugins.flexible__publish.' \
'ConditionalPublisher'
root_tag = XML.SubElement(xml_parent, flex_publisher_tag)
publishers_tag = XML.SubElement(root_tag, "publishers")
condition_tag = "condition"
evaluation_classes_pkg = 'org.jenkins_ci.plugins.run_condition'
evaluation_classes = {
'fail': evaluation_classes_pkg + '.BuildStepRunner$Fail',
'mark-unstable': evaluation_classes_pkg +
'.BuildStepRunner$Unstable',
'run-and-mark-unstable': evaluation_classes_pkg +
'.BuildStepRunner$RunUnstable',
'run': evaluation_classes_pkg + '.BuildStepRunner$Run',
'dont-run': evaluation_classes_pkg + '.BuildStepRunner$DontRun',
}
for cond_action in data:
cond_publisher = XML.SubElement(publishers_tag, cond_publisher_tag)
publish_condition(cond_action)
evaluation_flag = cond_action.get('on-evaluation-failure', 'fail')
if evaluation_flag not in evaluation_classes.keys():
raise JenkinsJobsException('on-evaluation-failure value '
'specified is not valid. Must be one '
'of: %s' % evaluation_classes.keys())
evaluation_class = evaluation_classes[evaluation_flag]
XML.SubElement(cond_publisher, "runner").set('class',
evaluation_class)
if 'action' in cond_action:
actions = cond_action['action']
action_parent = cond_publisher
plugin_info = \
registry.get_plugin_info("Flexible Publish Plugin")
version = pkg_resources.parse_version(plugin_info.get('version',
'0'))
# XML tag changed from publisher to publisherList in v0.13
# check the plugin version to determine further operations
use_publisher_list = version >= pkg_resources.parse_version("0.13")
if use_publisher_list:
action_parent = XML.SubElement(cond_publisher, 'publisherList')
else:
# Check the length of actions list for versions prior to 0.13.
# Flexible Publish will overwrite action if more than one is
# specified. Limit the action list to one element.
if len(actions) is not 1:
raise JenkinsJobsException("Only one action may be "
"specified for each condition.")
for action in actions:
publish_action(action_parent, action)
else:
raise JenkinsJobsException('action must be set for each condition')
def scoverage(registry, xml_parent, data):
"""yaml: scoverage
Publish scoverage results as a trend graph.
Requires the Jenkins :jenkins-wiki:`Scoverage Plugin <Scoverage+Plugin>`.
:arg str report-directory: This is a directory that specifies the locations
where the xml scoverage report is generated (required)
:arg str report-file: This is a file name that is given to the xml
scoverage report (required)
Example:
.. literalinclude:: /../../tests/publishers/fixtures/scoverage001.yaml
:language: yaml
"""
scoverage = XML.SubElement(
xml_parent,
'org.jenkinsci.plugins.scoverage.ScoveragePublisher')
scoverage.set('plugin', 'scoverage')
mappings = [
('report-directory', 'reportDir', None),
('report-file', 'reportFile', None),
]
helpers.convert_mapping_to_xml(
scoverage, data, mappings, fail_required=True)
def display_upstream_changes(registry, xml_parent, data):
"""yaml: display-upstream-changes
Display SCM changes of upstream jobs. Requires the Jenkins
:jenkins-wiki:`Display Upstream Changes Plugin
<Display+Upstream+Changes+Plugin>`.
Example:
.. literalinclude:: \
/../../tests/publishers/fixtures/display-upstream-changes.yaml
"""
XML.SubElement(
xml_parent,
'jenkins.plugins.displayupstreamchanges.'
'DisplayUpstreamChangesRecorder')
def gatling(registry, xml_parent, data):
"""yaml: gatling
Publish gatling results as a trend graph
Requires the Jenkins :jenkins-wiki:`Gatling Plugin <Gatling+Plugin>`.
Example:
.. literalinclude:: /../../tests/publishers/fixtures/gatling001.yaml
:language: yaml
"""
gatling = XML.SubElement(
xml_parent,
'io.gatling.jenkins.GatlingPublisher')
XML.SubElement(gatling, 'enabled').text = 'true'
def logstash(registry, xml_parent, data):
"""yaml: logstash
Send job's console log to Logstash for processing and analyis of
your job data. Also stores test metrics from Junit.
Requires the Jenkins :jenkins-wiki:`Logstash Plugin <Logstash+Plugin>`.
:arg int max-lines: The maximum number of log lines to send to Logstash.
(default 1000)
:arg bool fail-build: Mark build as failed if this step fails.
(default false)
Minimal Example:
.. literalinclude:: /../../tests/publishers/fixtures/logstash-min.yaml
:language: yaml
Full Example:
.. literalinclude:: /../../tests/publishers/fixtures/logstash-full.yaml
:language: yaml
"""
logstash = XML.SubElement(xml_parent,
'jenkins.plugins.logstash.LogstashNotifier')
logstash.set('plugin', 'logstash')
mapping = [
('max-lines', 'maxLines', 1000),
('fail-build', 'failBuild', False),
]
helpers.convert_mapping_to_xml(logstash, data, mapping, fail_required=True)
def image_gallery(registry, xml_parent, data):
"""yaml: image-gallery
Produce an image gallery using Javascript library. Requires the Jenkins
:jenkins-wiki:`Image Gallery Plugin<Image+Gallery+Plugin>`.
:arg str gallery-type:
:gallery-type values:
* **archived-images-gallery** (default)
* **in-folder-comparative-gallery**
* **multiple-folder-comparative-gallery**
:arg str title: gallery title (optional)
:arg int image-width: width of the image (optional)
:arg bool unstable-if-no-artifacts: mark build as unstable
if no archived artifacts were found (default false)
:arg str includes: include pattern (valid for archived-images-gallery
gallery)
:arg str base-root-folder: base root dir (valid for comparative gallery)
:arg int image-inner-width: width of the image displayed in the inner
gallery popup (valid for comparative gallery, optional)
Example:
.. literalinclude:: /../../tests/publishers/fixtures/image-gallery001.yaml
"""
def include_comparative_elements(gallery_parent_elem, gallery):
XML.SubElement(gallery_parent_elem, 'baseRootFolder').text = str(
gallery.get('base-root-folder', ''))
image_inner_width = gallery.get('image-inner-width', '')
if image_inner_width:
XML.SubElement(gallery_parent_elem, 'imageInnerWidth').text = str(
image_inner_width)
package_prefix = 'org.jenkinsci.plugins.imagegallery.'
builder = XML.SubElement(
xml_parent, package_prefix + 'ImageGalleryRecorder'
)
image_galleries = XML.SubElement(builder, 'imageGalleries')
galleries = {
'archived-images-gallery': package_prefix + 'imagegallery.'
'ArchivedImagesGallery',
'in-folder-comparative-gallery': package_prefix + 'comparative.'
'InFolderComparativeArchivedImagesGallery',
'multiple-folder-comparative-gallery': package_prefix + 'comparative.'
'MultipleFolderComparativeArchivedImagesGallery'
}
for gallery_def in data:
gallery_type = gallery_def.get('gallery-type',
'archived-images-gallery')
if gallery_type not in galleries:
raise InvalidAttributeError('gallery-type', gallery_type,
galleries.keys())
gallery_config = XML.SubElement(
image_galleries, galleries[gallery_type])
XML.SubElement(gallery_config, 'title').text = str(
gallery_def.get('title', ''))
image_width = str(gallery_def.get('image-width', ''))
if image_width:
XML.SubElement(gallery_config, 'imageWidth').text = str(
image_width)
XML.SubElement(
gallery_config,
'markBuildAsUnstableIfNoArchivesFound').text = str(gallery_def.get(
'unstable-if-no-artifacts', False))
if gallery_type == 'archived-images-gallery':
XML.SubElement(gallery_config, 'includes').text = str(
gallery_def.get('includes', ''))
if gallery_type == 'in-folder-comparative-gallery':
include_comparative_elements(gallery_config, gallery_def)
if gallery_type == 'multiple-folder-comparative-gallery':
include_comparative_elements(gallery_config, gallery_def)
def naginator(registry, xml_parent, data):
"""yaml: naginator
Automatically reschedule a build after a build failure
Requires the Jenkins :jenkins-wiki:`Naginator Plugin <Naginator+Plugin>`.
:arg bool rerun-unstable-builds: Rerun build for unstable builds as well
as failures (default false)
:arg bool rerun-matrix-part: Rerun build only for failed parts on the
matrix (>=1.12) (default false)
:arg int fixed-delay: Fixed delay before retrying build (cannot be used
with progressive-delay-increment or progressive-delay-maximum.
This is the default delay type. (default 0)
:arg int progressive-delay-increment: Progressive delay before retrying
build increment (cannot be used when fixed-delay is being used)
(default 0)
:arg int progressive-delay-maximum: Progressive delay before retrying
maximum delay (cannot be used when fixed-delay is being used)
(default 0)
:arg int max-failed-builds: Maximum number of successive failed builds
(default 0)
:arg str regular-expression: Only rerun build if regular expression is
found in output (default '')
Example:
.. literalinclude:: /../../tests/publishers/fixtures/naginator001.yaml
:language: yaml
"""
naginator = XML.SubElement(
xml_parent,
'com.chikli.hudson.plugin.naginator.NaginatorPublisher')
XML.SubElement(naginator, 'regexpForRerun').text = str(
data.get('regular-expression', ''))
XML.SubElement(naginator, 'checkRegexp').text = str(
'regular-expression' in data).lower()
XML.SubElement(naginator, 'rerunIfUnstable').text = str(
data.get('rerun-unstable-builds', False)).lower()
XML.SubElement(naginator, 'rerunMatrixPart').text = str(
data.get('rerun-matrix-part', False)).lower()
progressive_delay = ('progressive-delay-increment' in data or
'progressive-delay-maximum' in data)
if 'fixed-delay' in data and progressive_delay:
raise JenkinsJobsException("You cannot specify both fixed "
"and progressive delays")
if not progressive_delay:
delay = XML.SubElement(
naginator,
'delay',
{'class': 'com.chikli.hudson.plugin.naginator.FixedDelay'})
XML.SubElement(delay, 'delay').text = str(
data.get('fixed-delay', '0'))
else:
delay = XML.SubElement(
naginator,
'delay',
{'class': 'com.chikli.hudson.plugin.naginator.ProgressiveDelay'})
XML.SubElement(delay, 'increment').text = str(
data.get('progressive-delay-increment', '0'))
XML.SubElement(delay, 'max').text = str(
data.get('progressive-delay-maximum', '0'))
XML.SubElement(naginator, 'maxSchedule').text = str(
data.get('max-failed-builds', '0'))
def disable_failed_job(registry, xml_parent, data):
"""yaml: disable-failed-job
Automatically disable failed jobs.
Requires the Jenkins :jenkins-wiki:`Disable Failed Job Plugin
<Disable+Failed+Job+Plugin>`.
:arg str when-to-disable: The condition to disable the job. (required)
Possible values are
* **Only Failure**
* **Failure and Unstable**
* **Unstable**
:arg int no-of-failures: Number of consecutive failures to disable the
job. (optional)
Example:
.. literalinclude::
/../../tests/publishers/fixtures/disable-failed-job001.yaml
:language: yaml
"""
xml_element = XML.SubElement(xml_parent, 'disableFailedJob.'
'disableFailedJob.DisableFailedJob',
{'plugin': 'disable-failed-job'})
valid_conditions = ['Only Failure',
'Failure and Unstable',
'Only Unstable']
mapping = [('when-to-disable', 'whenDisable', None, valid_conditions)]
helpers.convert_mapping_to_xml(
xml_element, data, mapping, fail_required=True)
if 'no-of-failures' in data:
XML.SubElement(xml_element, 'failureTimes').text = str(data.get(
'no-of-failures'))
XML.SubElement(xml_element, 'optionalBrockChecked').text = 'true'
else:
XML.SubElement(xml_element, 'optionalBrockChecked').text = 'false'
def google_cloud_storage(registry, xml_parent, data):
"""yaml: google-cloud-storage
Upload build artifacts to Google Cloud Storage. Requires the
Jenkins :jenkins-wiki:`Google Cloud Storage plugin
<Google+Cloud+Storage+Plugin>`.
Apart from the Google Cloud Storage Plugin itself, installation of Google
OAuth Credentials and addition of required credentials to Jenkins is
required.
:arg str credentials-id: The set of Google credentials registered with
the Jenkins Credential Manager for authenticating
with your project. (required)
:arg list uploads:
:uploads:
* **expiring-elements** (`dict`)
:params:
* **bucket-name** (`str`) bucket name to upload artifacts
(required)
* **days-to-retain** (`int`) days to keep artifacts
(required)
* **build-log** (`dict`)
:params:
* **log-name** (`str`) name of the file that the Jenkins
console log to be named (required)
* **storage-location** (`str`) bucket name to upload
artifacts (required)
* **share-publicly** (`bool`) whether to share uploaded
share uploaded artifacts with everyone (default false)
* **upload-for-failed-jobs** (`bool`) whether to upload
artifacts even if the build fails (default false)
* **show-inline** (`bool`) whether to show uploaded build
log inline in web browsers, rather than forcing it to be
downloaded (default true)
* **strip-prefix** (`str`) strip this prefix off the
file names (default not set)
* **classic** (`dict`)
:params:
* **file-pattern** (`str`) ant style globs to match the
files to upload (required)
* **storage-location** (`str`) bucket name to upload
artifacts (required)
* **share-publicly** (`bool`) whether to share uploaded
share uploaded artifacts with everyone (default false)
* **upload-for-failed-jobs** (`bool`) whether to upload
artifacts even if the build fails (default false)
* **show-inline** (`bool`) whether to show uploaded
artifacts inline in web browsers, rather than forcing
them to be downloaded (default false)
* **strip-prefix** (`str`) strip this prefix off the
file names (default not set)
Example:
.. literalinclude::
/../../tests/publishers/fixtures/google_cloud_storage001.yaml
:language: yaml
Full example:
.. literalinclude::
/../../tests/publishers/fixtures/google_cloud_storage002.yaml
:language: yaml
"""
def expiring_elements(properties, upload_element, types):
"""Handle expiring elements upload action
"""
xml_element = XML.SubElement(upload_element, 'com.google.'
'jenkins.plugins.storage.'
'ExpiringBucketLifecycleManager')
if 'bucket-name' not in properties:
raise MissingAttributeError('bucket-name')
XML.SubElement(xml_element, 'bucketNameWithVars').text = str(
properties['bucket-name'])
XML.SubElement(xml_element, 'sharedPublicly').text = 'false'
XML.SubElement(xml_element, 'forFailedJobs').text = 'false'
if types.count('expiring-elements') > 1:
XML.SubElement(xml_element, 'module',
{'reference': '../../com.google.jenkins.plugins.'
'storage.ExpiringBucketLifecycleManager/module'})
else:
XML.SubElement(xml_element, 'module')
if 'days-to-retain' not in properties:
raise MissingAttributeError('days-to-retain')
XML.SubElement(xml_element, 'bucketObjectTTL').text = str(
properties['days-to-retain'])
def build_log(properties, upload_element, types):
"""Handle build log upload action
"""
xml_element = XML.SubElement(upload_element, 'com.google.jenkins.'
'plugins.storage.StdoutUpload')
if 'storage-location' not in properties:
raise MissingAttributeError('storage-location')
XML.SubElement(xml_element, 'bucketNameWithVars').text = str(
properties['storage-location'])
XML.SubElement(xml_element, 'sharedPublicly').text = str(
properties.get('share-publicly', False)).lower()
XML.SubElement(xml_element, 'forFailedJobs').text = str(
properties.get('upload-for-failed-jobs', False)).lower()
XML.SubElement(xml_element, 'showInline').text = str(
properties.get('show-inline', True)).lower()
XML.SubElement(xml_element, 'pathPrefix').text = str(
properties.get('strip-prefix', ''))
if types.count('build-log') > 1:
XML.SubElement(xml_element, 'module',
{'reference': '../../com.google.jenkins.plugins.'
'storage.StdoutUpload/module'})
else:
XML.SubElement(xml_element, 'module')
if 'log-name' not in properties:
raise MissingAttributeError('log-name')
XML.SubElement(xml_element, 'logName').text = str(
properties['log-name'])
def classic(properties, upload_element, types):
"""Handle classic upload action
"""
xml_element = XML.SubElement(upload_element, 'com.google.jenkins.'
'plugins.storage.ClassicUpload')
if 'storage-location' not in properties:
raise MissingAttributeError('storage-location')
XML.SubElement(xml_element, 'bucketNameWithVars').text = str(
properties['storage-location'])
XML.SubElement(xml_element, 'sharedPublicly').text = str(
properties.get('share-publicly', False)).lower()
XML.SubElement(xml_element, 'forFailedJobs').text = str(
properties.get('upload-for-failed-jobs', False)).lower()
XML.SubElement(xml_element, 'showInline').text = str(
properties.get('show-inline', False)).lower()
XML.SubElement(xml_element, 'pathPrefix').text = str(
properties.get('strip-prefix', ''))
if types.count('classic') > 1:
XML.SubElement(xml_element, 'module',
{'reference': '../../com.google.jenkins.plugins.'
'storage.ClassicUpload/module'})
else:
XML.SubElement(xml_element, 'module')
if 'file-pattern' not in properties:
raise MissingAttributeError('file-pattern')
XML.SubElement(xml_element, 'sourceGlobWithVars').text = str(
properties['file-pattern'])
uploader = XML.SubElement(xml_parent,
'com.google.jenkins.plugins.storage.'
'GoogleCloudStorageUploader',
{'plugin': 'google-storage-plugin'})
try:
credentials_id = str(data['credentials-id'])
except KeyError as e:
raise MissingAttributeError(e.args[0])
XML.SubElement(uploader, 'credentialsId').text = credentials_id
valid_upload_types = ['expiring-elements',
'build-log',
'classic']
types = []
upload_element = XML.SubElement(uploader, 'uploads')
uploads = data['uploads']
for upload in uploads:
for upload_type, properties in upload.items():
types.append(upload_type)
if upload_type not in valid_upload_types:
raise InvalidAttributeError('uploads', upload_type,
valid_upload_types)
else:
locals()[upload_type.replace('-', '_')](
properties, upload_element, types)
def flowdock(registry, xml_parent, data):
"""yaml: flowdock
This plugin publishes job build results to a Flowdock flow.
Requires the Jenkins :jenkins-wiki:`Flowdock Plugin
<Flowdock+Plugin>`.
:arg str token: API token for the targeted flow.
(required)
:arg str tags: Comma-separated list of tags to incude in message
(default "")
:arg bool chat-notification: Send chat notification when build fails
(default true)
:arg bool notify-success: Send notification on build success
(default true)
:arg bool notify-failure: Send notification on build failure
(default true)
:arg bool notify-fixed: Send notification when build is fixed
(default true)
:arg bool notify-unstable: Send notification when build is unstable
(default false)
:arg bool notify-aborted: Send notification when build was aborted
(default false)
:arg bool notify-notbuilt: Send notification when build did not occur
(default false)
Example:
.. literalinclude:: /../../tests/publishers/fixtures/flowdock001.yaml
:language: yaml
Full example:
.. literalinclude:: /../../tests/publishers/fixtures/flowdock002.yaml
:language: yaml
"""
def gen_notification_entry(data_item, default, text):
e = XML.SubElement(nm, 'entry')
XML.SubElement(e, 'com.flowdock.jenkins.BuildResult').text = text
XML.SubElement(e, 'boolean').text = str(
data.get(data_item, default)).lower()
def gen_setting(item, default):
XML.SubElement(parent, 'notify%s' % item).text = str(
data.get('notify-%s' % item.lower(), default)).lower()
# Raise exception if token was not specified
if 'token' not in data:
raise MissingAttributeError('token')
parent = XML.SubElement(xml_parent,
'com.flowdock.jenkins.FlowdockNotifier')
XML.SubElement(parent, 'flowToken').text = data['token']
XML.SubElement(parent, 'notificationTags').text = data.get('tags', '')
XML.SubElement(parent, 'chatNotification').text = str(
data.get('chat-notification', True)).lower()
nm = XML.SubElement(parent, 'notifyMap')
# notification entries
gen_notification_entry('notify-success', True, 'SUCCESS')
gen_notification_entry('notify-failure', True, 'FAILURE')
gen_notification_entry('notify-fixed', True, 'FIXED')
gen_notification_entry('notify-unstable', False, 'UNSTABLE')
gen_notification_entry('notify-aborted', False, 'ABORTED')
gen_notification_entry('notify-notbuilt', False, 'NOT_BUILT')
# notification settings
gen_setting('Success', True)
gen_setting('Failure', True)
gen_setting('Fixed', True)
gen_setting('Unstable', False)
gen_setting('Aborted', False)
gen_setting('NotBuilt', False)
def clamav(registry, xml_parent, data):
"""yaml: clamav
Check files with ClamAV, an open source antivirus engine.
Requires the Jenkins :jenkins-wiki:`ClamAV Plugin <ClamAV+Plugin>`.
:arg str includes: Comma seperated list of files that should be scanned.
Must be set for ClamAV to check for artifacts. (default '')
:arg str excludes: Comma seperated list of files that should be ignored
(default '')
Full Example:
.. literalinclude:: /../../tests/publishers/fixtures/clamav-full.yaml
:language: yaml
Minimal Example:
.. literalinclude:: /../../tests/publishers/fixtures/clamav-minimal.yaml
:language: yaml
"""
clamav = XML.SubElement(
xml_parent,
'org.jenkinsci.plugins.clamav.ClamAvRecorder')
clamav.set('plugin', 'clamav')
mappings = [
('includes', 'includes', ''),
('excludes', 'excludes', ''),
]
helpers.convert_mapping_to_xml(clamav, data, mappings, fail_required=True)
def testselector(registry, xml_parent, data):
"""yaml: testselector
This plugin allows you to choose specific tests you want to run.
Requires the Jenkins :jenkins-wiki:`Tests Selector Plugin
<Tests+Selector+Plugin>`.
:arg str name: Environment variable in which selected tests are saved
(required)
:arg str description: Description
(default "")
:arg str properties-file: Contain all your tests
(required)
:arg str enable-field: Imply if the test is enabled or not
(default "")
:arg str groupby: Plugin will group the tests by
(default "")
:arg str field-sperator: Separate between the fields in the tests tree
(default "")
:arg str show-fields: Shown in the tests tree
(default "")
:arg str multiplicity-field: Amount of times the test should run
(default "")
Example:
.. literalinclude:: /../../tests/publishers/fixtures/testselector001.yaml
:language: yaml
"""
testselector = XML.SubElement(xml_parent, 'il.ac.technion.jenkins.plugins'
'TestExecuter')
try:
name = str(data['name'])
except KeyError as e:
raise MissingAttributeError(e.args[0])
try:
propertiesfile = str(data['properties-file'])
except KeyError as e:
raise MissingAttributeError(e.args[0])
XML.SubElement(testselector, 'name').text = name
XML.SubElement(testselector, 'description').text = data.get(
'description', '')
XML.SubElement(testselector, 'propertiesFilePath').text = propertiesfile
XML.SubElement(testselector, 'enableField').text = data.get(
'enable-field', '')
XML.SubElement(testselector, 'groupBy').text = data.get(
'groupby', '')
XML.SubElement(testselector, 'fieldSeparator').text = data.get(
'field-separator', '')
XML.SubElement(testselector, 'showFields').text = data.get(
'show-fields', '')
XML.SubElement(testselector, 'multiplicityField').text = data.get(
'multiplicity-field', '')
def cloudformation(registry, xml_parent, data):
"""yaml: cloudformation
Create cloudformation stacks before running a build and optionally
delete them at the end. Requires the Jenkins :jenkins-wiki:`AWS
Cloudformation Plugin <AWS+Cloudformation+Plugin>`.
:arg list create-stacks: List of stacks to create
:create-stacks attributes:
* **arg str name** - The name of the stack (Required)
* **arg str description** - Description of the stack (Optional)
* **arg str recipe** - The cloudformation recipe file (Required)
* **arg list parameters** - A list of key/value pairs, will be
joined together into a comma separated string (Optional)
* **arg int timeout** - Number of seconds to wait before giving up
creating a stack (default 0)
* **arg str access-key** - The Amazon API Access Key (Required)
* **arg str secret-key** - The Amazon API Secret Key (Required)
* **arg int sleep** - Number of seconds to wait before continuing
to the next step (default 0)
* **arg array region** - The region to run cloudformation in.
(Required)
:region values:
* **us-east-1**
* **us-west-1**
* **us-west-2**
* **eu-central-1**
* **eu-west-1**
* **ap-southeast-1**
* **ap-southeast-2**
* **ap-northeast-1**
* **sa-east-1**
:arg list delete-stacks: List of stacks to delete
:delete-stacks attributes:
* **arg list name** - The names of the stacks to delete (Required)
* **arg str access-key** - The Amazon API Access Key (Required)
* **arg str secret-key** - The Amazon API Secret Key (Required)
* **arg bool prefix** - If selected the tear down process will look
for the stack that Starts with the stack name with the oldest
creation date and will delete it. (default false)
* **arg array region** - The region to run cloudformation in.
(Required)
:region values:
* **us-east-1**
* **us-west-1**
* **us-west-2**
* **eu-central-1**
* **eu-west-1**
* **ap-southeast-1**
* **ap-southeast-2**
* **ap-northeast-1**
* **sa-east-1**
Example:
.. literalinclude:: /../../tests/publishers/fixtures/cloudformation.yaml
:language: yaml
"""
region_dict = helpers.cloudformation_region_dict()
stacks = helpers.cloudformation_init(
xml_parent, data, 'CloudFormationPostBuildNotifier')
for stack in data.get('create-stacks', []):
helpers.cloudformation_stack(xml_parent, stack, 'PostBuildStackBean',
stacks, region_dict)
delete_stacks = helpers.cloudformation_init(
xml_parent, data, 'CloudFormationNotifier')
for delete_stack in data.get('delete-stacks', []):
helpers.cloudformation_stack(xml_parent, delete_stack,
'SimpleStackBean', delete_stacks,
region_dict)
def whitesource(registry, xml_parent, data):
"""yaml: whitesource
This plugin brings automatic open source management to Jenkins users.
Requires the Jenkins :jenkins-wiki:`Whitesource Plugin
<Whitesource+Plugin>`.
:arg str product-token: Product name or token to update (default '')
:arg str version: Product version (default '')
:arg str override-token: Override the api token from the global config
(default '')
:arg str project-token: Token uniquely identifying the project to update
(default '')
:arg list includes: list of libraries to include (default '[]')
:arg list excludes: list of libraries to exclude (default '[]')
:arg str policies: Whether to override the global settings. Valid values:
global, enable, disable (default 'global')
:arg str requester-email: Email of the WhiteSource user that requests to
update WhiteSource (>=1.5.1) (default '')
Full Example:
.. literalinclude:: /../../tests/publishers/fixtures/whitesource-full.yaml
:language: yaml
Minimal Example:
.. literalinclude::
/../../tests/publishers/fixtures/whitesource-minimal.yaml
:language: yaml
"""
whitesource = XML.SubElement(xml_parent, 'org.whitesource.jenkins.'
'WhiteSourcePublisher')
whitesource.set('plugin', 'whitesource')
policies = ['global', 'enable', 'disable']
mappings = [
('policies', 'jobCheckPolicies', 'global', policies),
('override-token', 'jobApiToken', ''),
('product-token', 'product', ''),
('version', 'productVersion', ''),
('project-token', 'projectToken', ''),
('requester-email', 'requesterEmail', ''),
]
helpers.convert_mapping_to_xml(
whitesource, data, mappings, fail_required=True)
XML.SubElement(whitesource, 'libIncludes').text = ' '.join(
data.get('includes', []))
XML.SubElement(whitesource, 'libExcludes').text = ' '.join(
data.get('excludes', []))
XML.SubElement(whitesource, 'ignorePomModules').text = 'false'
def hipchat(registry, xml_parent, data):
"""yaml: hipchat
Publisher that sends hipchat notifications on job events
Requires the Jenkins :jenkins-wiki:`Hipchat Plugin
<Hipchat+Plugin>` version >=1.9
Please see documentation for older plugin version
http://docs.openstack.org/infra/jenkins-job-builder/hipchat.html
:arg str token: This will override the default auth token (optional)
:arg list rooms: list of HipChat rooms to post messages to, overrides
global default (optional)
:arg bool notify-start: post messages about build start event
(default false)
:arg bool notify-success: post messages about successful build event
(default false)
:arg bool notify-aborted: post messages about aborted build event
(default false)
:arg bool notify-not-built: post messages about build set to NOT_BUILT.
This status code is used in a multi-stage build where a problem in
earlier stage prevented later stages from building. (default false)
:arg bool notify-unstable: post messages about unstable build event
(default false)
:arg bool notify-failure: post messages about build failure event
(default false)
:arg bool notify-back-to-normal: post messages about build being back to
normal after being unstable or failed (default false)
:arg str start-message: This will override the default start message
(optional)
:arg str complete-message: This will override the default complete message
(optional)
Example:
.. literalinclude:: /../../tests/publishers/fixtures/hipchat001.yaml
:language: yaml
"""
hipchat = XML.SubElement(
xml_parent,
'jenkins.plugins.hipchat.HipChatNotifier')
XML.SubElement(hipchat, 'token').text = str(
data.get('token', ''))
if 'rooms' in data:
XML.SubElement(hipchat, 'room').text = str(
",".join(data['rooms']))
mapping = [
('notify-start', 'startNotification', False),
('notify-success', 'notifySuccess', False),
('notify-aborted', 'notifyAborted', False),
('notify-not-built', 'notifyNotBuilt', False),
('notify-unstable', 'notifyUnstable', False),
('notify-failure', 'notifyFailure', False),
('notify-back-to-normal', 'notifyBackToNormal', False),
]
helpers.convert_mapping_to_xml(hipchat, data, mapping, fail_required=True)
# optional settings, so only add XML in if set.
if 'start-message' in data:
XML.SubElement(hipchat, 'startJobMessage').text = str(
data['start-message'])
if 'complete-message' in data:
XML.SubElement(hipchat, 'completeJobMessage').text = str(
data['complete-message'])
def slack(registry, xml_parent, data):
"""yaml: slack
Publisher that sends slack notifications on job events.
Requires the Jenkins :jenkins-wiki:`Slack Plugin <Slack+Plugin>`
When using Slack Plugin version < 2.0, Slack Plugin itself requires a
publisher aswell as properties please note that you have to create those
too. When using Slack Plugin version >= 2.0, you should only configure the
publisher.
:arg str team-domain: Your team's domain at slack. (default '')
:arg str auth-token: The integration token to be used when sending
notifications. (default '')
:arg str build-server-url: Specify the URL for your server installation.
(default '/')
:arg str room: A comma seperated list of rooms / channels to post the
notifications to. (default '')
:arg bool notify-start: Send notification when the job starts (>=2.0).
(default false)
:arg bool notify-success: Send notification on success (>=2.0).
(default false)
:arg bool notify-aborted: Send notification when job is aborted (>=2.0).
(default false)
:arg bool notify-not-built: Send notification when job set to NOT_BUILT
status (>=2.0). (default false)
:arg bool notify-unstable: Send notification when job becomes unstable
(>=2.0). (default false)
:arg bool notify-failure: Send notification when job fails for the first
time (previous build was a success) (>=2.0). (default false)
:arg bool notifiy-back-to-normal: Send notification when job is succeeding
again after being unstable or failed (>=2.0). (default false)
:arg bool notify-repeated-failure: Send notification when job fails
successively (previous build was also a failure) (>=2.0).
(default false)
:arg bool include-test-summary: Include the test summary (>=2.0).
(default false)
:arg str commit-info-choice: What commit information to include into
notification message, "NONE" includes nothing about commits, "AUTHORS"
includes commit list with authors only, and "AUTHORS_AND_TITLES"
includes commit list with authors and titles (>=2.0). (default "NONE")
:arg bool include-custom-message: Include a custom message into the
notification (>=2.0). (default false)
:arg str custom-message: Custom message to be included (>=2.0).
(default '')
Example (version < 2.0):
.. literalinclude::
/../../tests/publishers/fixtures/slack001.yaml
:language: yaml
Minimal example (version >= 2.0):
.. literalinclude::
/../../tests/publishers/fixtures/slack003.yaml
:language: yaml
Full example (version >= 2.0):
.. literalinclude::
/../../tests/publishers/fixtures/slack004.yaml
:language: yaml
"""
def _add_xml(elem, name, value=''):
if isinstance(value, bool):
value = str(value).lower()
XML.SubElement(elem, name).text = value
logger = logging.getLogger(__name__)
plugin_info = registry.get_plugin_info('Slack Notification Plugin')
plugin_ver = pkg_resources.parse_version(plugin_info.get('version', "0"))
mapping = (
('team-domain', 'teamDomain', ''),
('auth-token', 'authToken', ''),
('build-server-url', 'buildServerUrl', '/'),
('room', 'room', ''),
)
mapping_20 = (
('notify-start', 'startNotification', False),
('notify-success', 'notifySuccess', False),
('notify-aborted', 'notifyAborted', False),
('notify-not-built', 'notifyNotBuilt', False),
('notify-unstable', 'notifyUnstable', False),
('notify-failure', 'notifyFailure', False),
('notify-back-to-normal', 'notifyBackToNormal', False),
('notify-repeated-failure', 'notifyRepeatedFailure', False),
('include-test-summary', 'includeTestSummary', False),
('commit-info-choice', 'commitInfoChoice', 'NONE'),
('include-custom-message', 'includeCustomMessage', False),
('custom-message', 'customMessage', ''),
)
commit_info_choices = ['NONE', 'AUTHORS', 'AUTHORS_AND_TITLES']
slack = XML.SubElement(
xml_parent,
'jenkins.plugins.slack.SlackNotifier',
)
if plugin_ver >= pkg_resources.parse_version("2.0"):
mapping = mapping + mapping_20
if plugin_ver < pkg_resources.parse_version("2.0"):
for yaml_name, _, default_value in mapping:
# All arguments that don't have a default value are mandatory for
# the plugin to work as intended.
if not data.get(yaml_name, default_value):
raise MissingAttributeError(yaml_name)
for yaml_name, _, _ in mapping_20:
if yaml_name in data:
logger.warning(
"'%s' is invalid with plugin version < 2.0, ignored",
yaml_name,
)
for yaml_name, xml_name, default_value in mapping:
value = data.get(yaml_name, default_value)
# 'commit-info-choice' is enumerated type
if yaml_name == 'commit-info-choice':
if value not in commit_info_choices:
raise InvalidAttributeError(
yaml_name, value, commit_info_choices,
)
# Ensure that custom-message is set when include-custom-message is set
# to true.
if yaml_name == 'include-custom-message' and data is False:
if not data.get('custom-message', ''):
raise MissingAttributeError('custom-message')
_add_xml(slack, xml_name, value)
def phabricator(registry, xml_parent, data):
"""yaml: phabricator
Integrate with `Phabricator <http://phabricator.org/>`_
Requires the Jenkins :jenkins-wiki:`Phabricator Plugin
<Phabricator+Differential+Plugin>`.
:arg bool comment-on-success: Post a *comment* when the build
succeeds. (optional)
:arg bool uberalls-enabled: Integrate with uberalls. (optional)
:arg str comment-file: Include contents of given file if
commenting is enabled. (optional)
:arg int comment-size: Maximum comment character length. (optional)
:arg bool comment-with-console-link-on-failure: Post a *comment*
when the build fails. (optional)
Example:
.. literalinclude::
/../../tests/publishers/fixtures/phabricator001.yaml
:language: yaml
"""
root = XML.SubElement(xml_parent,
'com.uber.jenkins.phabricator.PhabricatorNotifier')
if 'comment-on-success' in data:
XML.SubElement(root, 'commentOnSuccess').text = str(
data.get('comment-on-success')).lower()
if 'uberalls-enabled' in data:
XML.SubElement(root, 'uberallsEnabled').text = str(
data.get('uberalls-enabled')).lower()
if 'comment-file' in data:
XML.SubElement(root, 'commentFile').text = data.get('comment-file')
if 'comment-size' in data:
XML.SubElement(root, 'commentSize').text = str(
data.get('comment-size'))
if 'comment-with-console-link-on-failure' in data:
XML.SubElement(root, 'commentWithConsoleLinkOnFailure').text = str(
data.get('comment-with-console-link-on-failure')).lower()
def openshift_build_canceller(registry, xml_parent, data):
"""yaml: openshift-build-canceller
This action is intended to provide cleanup for a Jenkins job which failed
because a build is hung (instead of terminating with a failure code);
this step will allow you to perform the equivalent of a oc cancel-build
for the provided build config; any builds under that build config which
are not previously terminated (either successfully or unsuccessfully)
or cancelled will be cancelled.
Requires the Jenkins :jenkins-wiki:`OpenShift
Pipeline Plugin <OpenShift+Pipeline+Plugin>`.
:arg str api-url: this would be the value you specify if you leverage the
--server option on the OpenShift `oc` command.
(default '\https://openshift.default.svc.cluster.local')
:arg str bld-cfg: The value here should be whatever was the output
form `oc project` when you created the BuildConfig you
want to run a Build on (default 'frontend')
:arg str namespace: If you run `oc get bc` for the project listed in
"namespace", that is the value you want to put here. (default 'test')
:arg str auth-token: The value here is what you supply with the --token
option when invoking the OpenShift `oc` command. (default '')
:arg bool verbose: This flag is the toggle for
turning on or off detailed logging in this plug-in. (default false)
Full Example:
.. literalinclude::
../../tests/publishers/fixtures/openshift-build-canceller001.yaml
:language: yaml
Minimal Example:
.. literalinclude::
../../tests/publishers/fixtures/openshift-build-canceller002.yaml
:language: yaml
"""
osb = XML.SubElement(xml_parent,
'com.openshift.jenkins.plugins.pipeline.'
'OpenShiftBuildCanceller')
mapping = [
# option, xml name, default value
("api-url", 'apiURL', 'https://openshift.default.svc.cluster.local'),
("bld-cfg", 'bldCfg', 'frontend'),
("namespace", 'namespace', 'test'),
("auth-token", 'authToken', ''),
("verbose", 'verbose', False),
]
helpers.convert_mapping_to_xml(osb, data, mapping, fail_required=True)
def openshift_deploy_canceller(registry, xml_parent, data):
"""yaml: openshift-deploy-canceller
This action is intended to provide cleanup for any OpenShift deployments
left running when the Job completes; this step will allow you to perform
the equivalent of a oc deploy --cancel for the provided deployment config.
Requires the Jenkins :jenkins-wiki:`OpenShift
Pipeline Plugin <OpenShift+Pipeline+Plugin>`.
:arg str api-url: this would be the value you specify if you leverage the
--server option on the OpenShift `oc` command.
(default '\https://openshift.default.svc.cluster.local')
:arg str dep-cfg: The value here should be whatever was the output
form `oc project` when you created the BuildConfig you want to run a
Build on (default frontend)
:arg str namespace: If you run `oc get bc` for the project listed in
"namespace", that is the value you want to put here. (default 'test')
:arg str auth-token: The value here is what you supply with the --token
option when invoking the OpenShift `oc` command. (default '')
:arg bool verbose: This flag is the toggle for
turning on or off detailed logging in this plug-in. (default false)
Full Example:
.. literalinclude::
../../tests/publishers/fixtures/openshift-deploy-canceller001.yaml
:language: yaml
Minimal Example:
.. literalinclude::
../../tests/publishers/fixtures/openshift-deploy-canceller002.yaml
:language: yaml
"""
osb = XML.SubElement(xml_parent,
'com.openshift.jenkins.plugins.pipeline.'
'OpenShiftDeployCanceller')
mapping = [
# option, xml name, default value
("api-url", 'apiURL', 'https://openshift.default.svc.cluster.local'),
("dep-cfg", 'depCfg', 'frontend'),
("namespace", 'namespace', 'test'),
("auth-token", 'authToken', ''),
("verbose", 'verbose', False),
]
helpers.convert_mapping_to_xml(osb, data, mapping, fail_required=True)
def github_pull_request_merge(registry, xml_parent, data):
"""yaml: github-pull-request-merge
This action merges the pull request that triggered the build (see the
github pull request trigger)
Requires the Jenkins :jenkins-wiki:`GitHub pull request builder plugin
<GitHub+pull+request+builder+plugin>`.
:arg bool only-admins-merge: if `true` only administrators can merge the
pull request, (default false)
:arg bool disallow-own-code: if `true` will allow merging your own pull
requests, in opposite to needing someone else to trigger the merge.
(default false)
:arg str merge-comment: Comment to set on the merge commit (default '')
:arg bool fail-on-non-merge: fail the job if the merge was unsuccessful
(default false)
:arg bool delete-on-merge: Delete the branch of the pull request on
successful merge (default false)
Full Example:
.. literalinclude::
../../tests/publishers/fixtures/github-pull-request-merge001.yaml
:language: yaml
Minimal Example:
.. literalinclude::
../../tests/publishers/fixtures/github-pull-request-merge002.yaml
:language: yaml
"""
osb = XML.SubElement(xml_parent,
'org.jenkinsci.plugins.ghprb.GhprbPullRequestMerge')
mapping = [
# option, xml name, default value
("only-admins-merge", 'onlyAdminsMerge', 'false'),
("disallow-own-code", 'disallowOwnCode', 'false'),
("merge-comment", 'mergeComment', ''),
("fail-on-non-merge", 'failOnNonMerge', 'false'),
("delete-on-merge", 'deleteOnMerge', 'false'),
]
helpers.convert_mapping_to_xml(osb, data, mapping, fail_required=True)
class Publishers(jenkins_jobs.modules.base.Base):
sequence = 70
component_type = 'publisher'
component_list_type = 'publishers'
def gen_xml(self, xml_parent, data):
publishers = XML.SubElement(xml_parent, 'publishers')
for action in data.get('publishers', []):
self.registry.dispatch('publisher', publishers, action) | zerotk.jenkins-job-builder | /zerotk.jenkins-job-builder-2.0.0.0b2.tar.gz/zerotk.jenkins-job-builder-2.0.0.0b2/jenkins_jobs/modules/publishers.py | publishers.py |
import itertools
def zuul():
"""yaml: zuul
Configure this job to be triggered by Zuul.
Adds parameters describing the change triggering the build such as the
branch name, change number and patchset.
See parameters `expected by Zuul`_.
Example::
triggers:
- zuul
"""
def zuul_post():
"""yaml: zuul-post
Configure this post-merge job to be triggered by Zuul.
Adds parameters describing the reference update triggering the build, which
are the previous and next revisions in full (40 hexadecimal sha1) and short
form.
See parameters `expected by Zuul`_.
Example::
triggers:
- zuul-post
"""
import jenkins_jobs.modules.base
ZUUL_PARAMETERS = [
{'string':
{'description': 'Zuul provided key to link builds with Gerrit events',
'name': 'ZUUL_UUID'}},
{'string':
{'description': 'Zuul provided key to link builds with Gerrit'
' events (deprecated use ZUUL_UUID instead)',
'name': 'UUID'}},
{'string':
{'description': 'Zuul pipeline triggering this job',
'name': 'ZUUL_PIPELINE'}},
{'string':
{'description': 'URL of Zuul\'s git repos accessible to workers',
'name': 'ZUUL_URL'}},
{'string':
{'description': 'Branch name of triggering project',
'name': 'ZUUL_PROJECT'}},
{'string':
{'description': 'Branch name of triggering change',
'name': 'ZUUL_BRANCH'}},
{'string':
{'description': 'List of dependent changes to merge',
'name': 'ZUUL_CHANGES'}},
{'string':
{'description': 'Reference for the merged commit(s) to use',
'name': 'ZUUL_REF'}},
{'string':
{'description': 'The commit SHA1 at the head of ZUUL_REF',
'name': 'ZUUL_COMMIT'}},
{'string':
{'description': 'List of included changes',
'name': 'ZUUL_CHANGE_IDS'}},
{'string':
{'description': 'ID of triggering change',
'name': 'ZUUL_CHANGE'}},
{'string':
{'description': 'Patchset of triggering change',
'name': 'ZUUL_PATCHSET'}},
{'string':
{'description': 'Zuul considered this job voting or not',
'name': 'ZUUL_VOTING'}},
]
ZUUL_POST_PARAMETERS = [
{'string':
{'description': 'Zuul provided key to link builds with Gerrit events',
'name': 'ZUUL_UUID'}},
{'string':
{'description': 'Zuul provided key to link builds with Gerrit'
' events (deprecated use ZUUL_UUID instead)',
'name': 'UUID'}},
{'string':
{'description': 'Zuul pipeline triggering this job',
'name': 'ZUUL_PIPELINE'}},
{'string':
{'description': 'URL of Zuul\'s git repos accessible to workers',
'name': 'ZUUL_URL'}},
{'string':
{'description': 'Branch name of triggering project',
'name': 'ZUUL_PROJECT'}},
{'string':
{'description': 'Name of updated reference triggering this job',
'name': 'ZUUL_REF'}},
{'string':
{'description': 'Name of updated reference triggering this job',
'name': 'ZUUL_REFNAME'}},
{'string':
{'description': 'Old SHA at this reference',
'name': 'ZUUL_OLDREV'}},
{'string':
{'description': 'New SHA at this reference',
'name': 'ZUUL_NEWREV'}},
{'string':
{'description': 'Shortened new SHA at this reference',
'name': 'ZUUL_SHORT_NEWREV'}},
]
class Zuul(jenkins_jobs.modules.base.Base):
sequence = 0
def handle_data(self, job_data):
changed = False
jobs = itertools.chain(
job_data.get('job', {}).values(),
job_data.get('job-template', {}).values())
for job in jobs:
triggers = job.get('triggers')
if not triggers:
continue
if ('zuul' not in job.get('triggers', []) and
'zuul-post' not in job.get('triggers', [])):
continue
if 'parameters' not in job:
job['parameters'] = []
if 'zuul' in job.get('triggers', []):
job['parameters'].extend(ZUUL_PARAMETERS)
job['triggers'].remove('zuul')
if 'zuul-post' in job.get('triggers', []):
job['parameters'].extend(ZUUL_POST_PARAMETERS)
job['triggers'].remove('zuul-post')
changed = True
return changed | zerotk.jenkins-job-builder | /zerotk.jenkins-job-builder-2.0.0.0b2.tar.gz/zerotk.jenkins-job-builder-2.0.0.0b2/jenkins_jobs/modules/zuul.py | zuul.py |
import logging
import xml.etree.ElementTree as XML
from jenkins_jobs.errors import InvalidAttributeError
from jenkins_jobs.errors import JenkinsJobsException
from jenkins_jobs.errors import MissingAttributeError
import jenkins_jobs.modules.base
import jenkins_jobs.modules.helpers as helpers
from jenkins_jobs.modules.helpers import append_git_revision_config
import pkg_resources
from jenkins_jobs.modules.helpers import cloudformation_init
from jenkins_jobs.modules.helpers import cloudformation_region_dict
from jenkins_jobs.modules.helpers import cloudformation_stack
from jenkins_jobs.modules.helpers import config_file_provider_builder
from jenkins_jobs.modules.helpers import config_file_provider_settings
from jenkins_jobs.modules.helpers import convert_mapping_to_xml
from jenkins_jobs.modules.helpers import copyartifact_build_selector
from jenkins_jobs.modules import hudson_model
from jenkins_jobs.modules.publishers import ssh
logger = logging.getLogger(__name__)
def shell(registry, xml_parent, data):
"""yaml: shell
Execute a shell command.
:arg str parameter: the shell command to execute
Example:
.. literalinclude:: /../../tests/builders/fixtures/shell.yaml
:language: yaml
"""
shell = XML.SubElement(xml_parent, 'hudson.tasks.Shell')
XML.SubElement(shell, 'command').text = data
def python(registry, xml_parent, data):
"""yaml: python
Execute a python command. Requires the Jenkins :jenkins-wiki:`Python plugin
<Python+Plugin>`.
:arg str parameter: the python command to execute
Example:
.. literalinclude:: /../../tests/builders/fixtures/python.yaml
:language: yaml
"""
python = XML.SubElement(xml_parent, 'hudson.plugins.python.Python')
XML.SubElement(python, 'command').text = data
def copyartifact(registry, xml_parent, data):
"""yaml: copyartifact
Copy artifact from another project. Requires the :jenkins-wiki:`Copy
Artifact plugin <Copy+Artifact+Plugin>`.
Please note using the multijob-build for which-build argument requires
the :jenkins-wiki:`Multijob plugin <Multijob+Plugin>`
:arg str project: Project to copy from
:arg str filter: what files to copy
:arg str target: Target base directory for copy, blank means use workspace
:arg bool flatten: Flatten directories (default false)
:arg bool optional: If the artifact is missing (for any reason) and
optional is true, the build won't fail because of this builder
(default false)
:arg bool do-not-fingerprint: Disable automatic fingerprinting of copied
artifacts (default false)
:arg str which-build: which build to get artifacts from
(optional, default last-successful)
:which-build values:
* **last-successful**
* **last-completed**
* **specific-build**
* **last-saved**
* **upstream-build**
* **permalink**
* **workspace-latest**
* **build-param**
* **downstream-build**
* **multijob-build**
:arg str build-number: specifies the build number to get when
when specific-build is specified as which-build
:arg str permalink: specifies the permalink to get when
permalink is specified as which-build
:permalink values:
* **last**
* **last-stable**
* **last-successful**
* **last-failed**
* **last-unstable**
* **last-unsuccessful**
:arg bool stable: specifies to get only last stable build when
last-successful is specified as which-build
:arg bool fallback-to-last-successful: specifies to fallback to
last successful build when upstream-build is specified as which-build
:arg string param: specifies to use a build parameter to get the build when
build-param is specified as which-build
:arg str upstream-project-name: specifies the project name of downstream
when downstream-build is specified as which-build
:arg str upstream-build-number: specifies the number of the build to
find its downstream build when downstream-build is specified as
which-build
:arg string parameter-filters: Filter matching jobs based on these
parameters (optional)
Example:
.. literalinclude:: ../../tests/builders/fixtures/copy-artifact001.yaml
:language: yaml
Multijob Example:
.. literalinclude:: ../../tests/builders/fixtures/copy-artifact004.yaml
:language: yaml
"""
t = XML.SubElement(xml_parent, 'hudson.plugins.copyartifact.CopyArtifact')
mappings = [
# Warning: this only works with copy artifact version 1.26+,
# for copy artifact version 1.25- the 'projectName' element needs
# to be used instead of 'project'
('project', 'project', None),
('filter', 'filter', ''),
('target', 'target', ''),
('flatten', 'flatten', False),
('optional', 'optional', False),
('do-not-fingerprint', 'doNotFingerprintArtifacts', False),
('parameter-filters', 'parameters', '')
]
convert_mapping_to_xml(t, data, mappings, fail_required=True)
copyartifact_build_selector(t, data)
def change_assembly_version(registry, xml_parent, data):
"""yaml: change-assembly-version
Change the assembly version.
Requires the Jenkins :jenkins-wiki:`Change Assembly Version
<Change+Assembly+Version>`.
:arg str version: Set the new version number for replace (default 1.0.0)
:arg str assemblyFile: The file name to search (default AssemblyInfo.cs)
Example:
.. literalinclude::
/../../tests/builders/fixtures/changeassemblyversion001.yaml
:language: yaml
"""
cav_builder_tag = ('org.jenkinsci.plugins.changeassemblyversion.'
'ChangeAssemblyVersion')
cav = XML.SubElement(xml_parent, cav_builder_tag)
XML.SubElement(cav, 'task').text = data.get('version', '1.0.0')
XML.SubElement(cav, 'assemblyFile').text = str(
data.get('assembly-file', 'AssemblyInfo.cs'))
def fingerprint(registry, xml_parent, data):
"""yaml: fingerprint
Adds the ability to generate fingerprints as build steps instead of waiting
for a build to complete. Requires the Jenkins :jenkins-wiki:`Fingerprint
Plugin <Fingerprint+Plugin>`.
:arg str targets: Files to fingerprint (default '')
Full Example:
.. literalinclude::
/../../tests/builders/fixtures/fingerprint-full.yaml
:language: yaml
Minimal Example:
.. literalinclude::
/../../tests/builders/fixtures/fingerprint-minimal.yaml
:language: yaml
"""
fingerprint = XML.SubElement(
xml_parent, 'hudson.plugins.createfingerprint.CreateFingerprint')
fingerprint.set('plugin', 'create-fingerprint')
mapping = [('targets', 'targets', '')]
convert_mapping_to_xml(fingerprint, data, mapping, fail_required=True)
def ant(registry, xml_parent, data):
"""yaml: ant
Execute an ant target. Requires the Jenkins :jenkins-wiki:`Ant Plugin
<Ant+Plugin>`.
To setup this builder you can either reference the list of targets
or use named parameters. Below is a description of both forms:
*1) Listing targets:*
After the ant directive, simply pass as argument a space separated list
of targets to build.
:Parameter: space separated list of Ant targets
Example to call two Ant targets:
.. literalinclude:: ../../tests/builders/fixtures/ant001.yaml
:language: yaml
The build file would be whatever the Jenkins Ant Plugin is set to use
per default (i.e build.xml in the workspace root).
*2) Using named parameters:*
:arg str targets: the space separated list of ANT targets.
:arg str buildfile: the path to the ANT build file.
:arg list properties: Passed to ant script using -Dkey=value (optional)
:arg str ant-name: the name of the ant installation,
(default 'default') (optional)
:arg str java-opts: java options for ant, can have multiples,
must be in quotes (optional)
Example specifying the build file too and several targets:
.. literalinclude:: ../../tests/builders/fixtures/ant002.yaml
:language: yaml
"""
ant = XML.SubElement(xml_parent, 'hudson.tasks.Ant')
if type(data) is str:
# Support for short form: -ant: "target"
data = {'targets': data}
for setting, value in sorted(data.items()):
if setting == 'targets':
targets = XML.SubElement(ant, 'targets')
targets.text = value
if setting == 'buildfile':
buildfile = XML.SubElement(ant, 'buildFile')
buildfile.text = value
if setting == 'properties':
properties = data['properties']
prop_string = ''
for prop, val in properties.items():
prop_string += "%s=%s\n" % (prop, val)
prop_element = XML.SubElement(ant, 'properties')
prop_element.text = prop_string
if setting == 'java-opts':
javaopts = data['java-opts']
jopt_string = ' '.join(javaopts)
jopt_element = XML.SubElement(ant, 'antOpts')
jopt_element.text = jopt_string
XML.SubElement(ant, 'antName').text = data.get('ant-name', 'default')
def trigger_remote(registry, xml_parent, data):
"""yaml: trigger-remote
Trigger build of job on remote Jenkins instance.
:jenkins-wiki:`Parameterized Remote Trigger Plugin
<Parameterized+Remote+Trigger+Plugin>`
Please note that this plugin requires system configuration on the Jenkins
Master that is unavailable from individual job views; specifically, one
must add remote jenkins servers whose 'Display Name' field are what make up
valid fields on the `remote-jenkins-name` attribute below.
:arg str remote-jenkins-name: the remote Jenkins server (required)
:arg str job: the Jenkins project to trigger on the remote Jenkins server
(required)
:arg bool should-not-fail-build: if true, remote job failure will not lead
current job to fail (default false)
:arg bool prevent-remote-build-queue: if true, wait to trigger remote
builds until no other builds (default false)
:arg bool block: whether to wait for the trigger jobs to finish or not
(default true)
:arg str poll-interval: polling interval in seconds for checking statues of
triggered remote job, only necessary if current job is configured to
block (default 10)
:arg str connection-retry-limit: number of connection attempts to remote
Jenkins server before giving up. (default 5)
:arg str predefined-parameters: predefined parameters to send to the remote
job when triggering it (optional)
:arg str property-file: file in workspace of current job containing
additional parameters to be set on remote job
(optional)
Example:
.. literalinclude::
/../../tests/builders/fixtures/trigger-remote/trigger-remote001.yaml
:language: yaml
"""
triggerr = XML.SubElement(xml_parent,
'org.jenkinsci.plugins.'
'ParameterizedRemoteTrigger.'
'RemoteBuildConfiguration')
XML.SubElement(triggerr,
'remoteJenkinsName').text = data.get('remote-jenkins-name')
XML.SubElement(triggerr, 'token').text = data.get('token', '')
for attribute in ['job', 'remote-jenkins-name']:
if attribute not in data:
raise MissingAttributeError(attribute, "builders.trigger-remote")
if data[attribute] == '':
raise InvalidAttributeError(attribute,
data[attribute],
"builders.trigger-remote")
XML.SubElement(triggerr, 'job').text = data.get('job')
XML.SubElement(triggerr, 'shouldNotFailBuild').text = str(
data.get('should-not-fail-build', False)).lower()
XML.SubElement(triggerr,
'pollInterval').text = str(data.get('poll-interval', 10))
XML.SubElement(triggerr, 'connectionRetryLimit').text = str(
data.get('connection-retry-limit', 5))
XML.SubElement(triggerr, 'preventRemoteBuildQueue').text = str(
data.get('prevent-remote-build-queue', False)).lower()
XML.SubElement(triggerr, 'blockBuildUntilComplete').text = str(
data.get('block', True)).lower()
if 'predefined-parameters' in data:
parameters = XML.SubElement(triggerr, 'parameters')
parameters.text = data.get('predefined-parameters', '')
params_list = parameters.text.split("\n")
parameter_list = XML.SubElement(triggerr, 'parameterList')
for param in params_list:
if param == '':
continue
tmp = XML.SubElement(parameter_list, 'string')
tmp.text = param
if 'property-file' in data and data['property-file'] != '':
XML.SubElement(triggerr, 'loadParamsFromFile').text = 'true'
XML.SubElement(triggerr,
'parameterFile').text = data.get('property-file')
else:
XML.SubElement(triggerr, 'loadParamsFromFile').text = 'false'
XML.SubElement(triggerr, 'overrideAuth').text = "false"
def trigger_builds(registry, xml_parent, data):
"""yaml: trigger-builds
Trigger builds of other jobs.
Requires the Jenkins :jenkins-wiki:`Parameterized Trigger Plugin
<Parameterized+Trigger+Plugin>`.
:arg list project: the Jenkins project to trigger
:arg str predefined-parameters: key/value pairs to be passed to the job
(optional)
:arg list bool-parameters:
:Bool:
* **name** (`str`) -- Parameter name
* **value** (`bool`) -- Value to set (default false)
:arg str property-file:
Pass properties from file to the other job (optional)
:arg bool property-file-fail-on-missing:
Don't trigger if any files are missing (default true)
:arg bool current-parameters: Whether to include the parameters passed
to the current build to the triggered job.
:arg str node-label-name: Define a name for the NodeLabel parameter to be
set. Used in conjunction with node-label. Requires NodeLabel Parameter
Plugin (optional)
:arg str node-label: Label of the nodes where build should be triggered.
Used in conjunction with node-label-name. Requires NodeLabel Parameter
Plugin (optional)
:arg str restrict-matrix-project: Filter that restricts the subset
of the combinations that the triggered job will run (optional)
:arg bool svn-revision: Whether to pass the svn revision to the triggered
job (optional)
:arg dict git-revision: Passes git revision to the triggered job
(optional).
* **combine-queued-commits** (bool): Whether to combine queued git
hashes or not (default false)
:arg bool block: whether to wait for the triggered jobs to finish or not
(default false)
:arg dict block-thresholds: Fail builds and/or mark as failed or unstable
based on thresholds. Only apply if block parameter is true (optional)
:block-thresholds:
* **build-step-failure-threshold** (`str`) - build step failure
threshold, valid values are 'never', 'SUCCESS', 'UNSTABLE', or
'FAILURE'. (default 'FAILURE')
* **unstable-threshold** (`str`) - unstable threshold, valid
values are 'never', 'SUCCESS', 'UNSTABLE', or 'FAILURE'.
(default 'UNSTABLE')
* **failure-threshold** (`str`) - overall failure threshold, valid
values are 'never', 'SUCCESS', 'UNSTABLE', or 'FAILURE'.
(default 'FAILURE')
:arg bool same-node: Use the same node for the triggered builds that was
used for this build (optional)
:arg list parameter-factories: list of parameter factories
:Factory:
* **factory** (`str`) **filebuild** -- For every property file,
invoke one build
* **file-pattern** (`str`) -- File wildcard pattern
* **no-files-found-action** (`str`) -- Action to perform when
no files found. Valid values 'FAIL', 'SKIP', or 'NOPARMS'.
(default 'SKIP')
:Factory:
* **factory** (`str`) **binaryfile** -- For every matching
file, invoke one build
* **file-pattern** (`str`) -- Artifact ID of the artifact
* **no-files-found-action** (`str`) -- Action to perform when
no files found. Valid values 'FAIL', 'SKIP', or 'NOPARMS'.
(default 'SKIP')
:Factory:
* **factory** (`str`) **counterbuild** -- Invoke i=0...N builds
* **from** (`int`) -- Artifact ID of the artifact
* **to** (`int`) -- Version of the artifact
* **step** (`int`) -- Classifier of the artifact
* **parameters** (`str`) -- KEY=value pairs, one per line
(default '')
* **validation-fail** (`str`) -- Action to perform when
stepping validation fails. Valid values 'FAIL', 'SKIP', or
'NOPARMS'. (default 'FAIL')
:Factory:
* **factory** (`str`) **allnodesforlabel** -- Trigger a build
on all nodes having specific label. Requires NodeLabel
Parameter Plugin (optional)
* **name** (`str`) -- Name of the parameter to set (optional)
* **node-label** (`str`) -- Label of the nodes where build
should be triggered
* **ignore-offline-nodes** (`bool`) -- Don't trigger build on
offline nodes (default true)
:Factory:
* **factory** (`str`) **allonlinenodes** -- Trigger a build on
every online node. Requires NodeLabel Parameter Plugin (optional)
Examples:
Basic usage with yaml list of projects.
.. literalinclude::
/../../tests/builders/fixtures/trigger-builds/project-list.yaml
:language: yaml
Basic usage with passing svn revision through.
.. literalinclude:: /../../tests/builders/fixtures/trigger-builds001.yaml
:language: yaml
Basic usage with passing git revision through.
.. literalinclude:: /../../tests/builders/fixtures/trigger-builds006.yaml
:language: yaml
Example with all supported parameter factories.
.. literalinclude::
/../../tests/builders/fixtures/trigger-builds-configfactory-multi.yaml
:language: yaml
"""
tbuilder = XML.SubElement(xml_parent,
'hudson.plugins.parameterizedtrigger.'
'TriggerBuilder')
configs = XML.SubElement(tbuilder, 'configs')
for project_def in data:
if 'project' not in project_def or project_def['project'] == '':
logger.debug("No project specified - skipping trigger-build")
continue
tconfig = XML.SubElement(configs,
'hudson.plugins.parameterizedtrigger.'
'BlockableBuildTriggerConfig')
tconfigs = XML.SubElement(tconfig, 'configs')
if(project_def.get('current-parameters')):
XML.SubElement(tconfigs,
'hudson.plugins.parameterizedtrigger.'
'CurrentBuildParameters')
if(project_def.get('svn-revision')):
XML.SubElement(tconfigs,
'hudson.plugins.parameterizedtrigger.'
'SubversionRevisionBuildParameters')
if(project_def.get('git-revision')):
append_git_revision_config(tconfigs, project_def['git-revision'])
if(project_def.get('same-node')):
XML.SubElement(tconfigs,
'hudson.plugins.parameterizedtrigger.'
'NodeParameters')
if 'property-file' in project_def:
params = XML.SubElement(tconfigs,
'hudson.plugins.parameterizedtrigger.'
'FileBuildParameters')
propertiesFile = XML.SubElement(params, 'propertiesFile')
propertiesFile.text = project_def['property-file']
failTriggerOnMissing = XML.SubElement(params,
'failTriggerOnMissing')
failTriggerOnMissing.text = str(project_def.get(
'property-file-fail-on-missing', True)).lower()
if 'predefined-parameters' in project_def:
params = XML.SubElement(tconfigs,
'hudson.plugins.parameterizedtrigger.'
'PredefinedBuildParameters')
properties = XML.SubElement(params, 'properties')
properties.text = project_def['predefined-parameters']
if 'bool-parameters' in project_def:
params = XML.SubElement(tconfigs,
'hudson.plugins.parameterizedtrigger.'
'BooleanParameters')
configs = XML.SubElement(params, 'configs')
for bool_param in project_def['bool-parameters']:
param = XML.SubElement(configs,
'hudson.plugins.parameterizedtrigger.'
'BooleanParameterConfig')
XML.SubElement(param, 'name').text = str(bool_param['name'])
XML.SubElement(param, 'value').text = str(
bool_param.get('value', False)).lower()
if 'node-label-name' in project_def and 'node-label' in project_def:
node = XML.SubElement(tconfigs, 'org.jvnet.jenkins.plugins.'
'nodelabelparameter.parameterizedtrigger.'
'NodeLabelBuildParameter')
XML.SubElement(node, 'name').text = project_def['node-label-name']
XML.SubElement(node, 'nodeLabel').text = project_def['node-label']
if 'restrict-matrix-project' in project_def:
params = XML.SubElement(tconfigs,
'hudson.plugins.parameterizedtrigger.'
'matrix.MatrixSubsetBuildParameters')
XML.SubElement(params, 'filter').text = project_def[
'restrict-matrix-project']
if(len(list(tconfigs)) == 0):
tconfigs.set('class', 'java.util.Collections$EmptyList')
if 'parameter-factories' in project_def:
fconfigs = XML.SubElement(tconfig, 'configFactories')
supported_factories = ['filebuild',
'binaryfile',
'counterbuild',
'allnodesforlabel',
'allonlinenodes']
supported_actions = ['SKIP', 'NOPARMS', 'FAIL']
for factory in project_def['parameter-factories']:
if factory['factory'] not in supported_factories:
raise InvalidAttributeError('factory',
factory['factory'],
supported_factories)
if factory['factory'] == 'filebuild':
params = XML.SubElement(
fconfigs,
'hudson.plugins.parameterizedtrigger.'
'FileBuildParameterFactory')
if factory['factory'] == 'binaryfile':
params = XML.SubElement(
fconfigs,
'hudson.plugins.parameterizedtrigger.'
'BinaryFileParameterFactory')
parameterName = XML.SubElement(params, 'parameterName')
parameterName.text = factory['parameter-name']
if (factory['factory'] == 'filebuild' or
factory['factory'] == 'binaryfile'):
filePattern = XML.SubElement(params, 'filePattern')
filePattern.text = factory['file-pattern']
noFilesFoundAction = XML.SubElement(
params,
'noFilesFoundAction')
noFilesFoundActionValue = str(factory.get(
'no-files-found-action', 'SKIP'))
if noFilesFoundActionValue not in supported_actions:
raise InvalidAttributeError('no-files-found-action',
noFilesFoundActionValue,
supported_actions)
noFilesFoundAction.text = noFilesFoundActionValue
if factory['factory'] == 'counterbuild':
params = XML.SubElement(
fconfigs,
'hudson.plugins.parameterizedtrigger.'
'CounterBuildParameterFactory')
fromProperty = XML.SubElement(params, 'from')
fromProperty.text = str(factory['from'])
toProperty = XML.SubElement(params, 'to')
toProperty.text = str(factory['to'])
stepProperty = XML.SubElement(params, 'step')
stepProperty.text = str(factory['step'])
paramExpr = XML.SubElement(params, 'paramExpr')
paramExpr.text = str(factory.get(
'parameters', ''))
validationFail = XML.SubElement(params, 'validationFail')
validationFailValue = str(factory.get(
'validation-fail', 'FAIL'))
if validationFailValue not in supported_actions:
raise InvalidAttributeError('validation-fail',
validationFailValue,
supported_actions)
validationFail.text = validationFailValue
if factory['factory'] == 'allnodesforlabel':
params = XML.SubElement(
fconfigs,
'org.jvnet.jenkins.plugins.nodelabelparameter.'
'parameterizedtrigger.'
'AllNodesForLabelBuildParameterFactory')
nameProperty = XML.SubElement(params, 'name')
nameProperty.text = str(factory.get(
'name', ''))
nodeLabel = XML.SubElement(params, 'nodeLabel')
nodeLabel.text = str(factory['node-label'])
ignoreOfflineNodes = XML.SubElement(
params,
'ignoreOfflineNodes')
ignoreOfflineNodes.text = str(factory.get(
'ignore-offline-nodes', True)).lower()
if factory['factory'] == 'allonlinenodes':
params = XML.SubElement(
fconfigs,
'org.jvnet.jenkins.plugins.nodelabelparameter.'
'parameterizedtrigger.'
'AllNodesBuildParameterFactory')
projects = XML.SubElement(tconfig, 'projects')
if isinstance(project_def['project'], list):
projects.text = ",".join(project_def['project'])
else:
projects.text = project_def['project']
condition = XML.SubElement(tconfig, 'condition')
condition.text = 'ALWAYS'
trigger_with_no_params = XML.SubElement(tconfig,
'triggerWithNoParameters')
trigger_with_no_params.text = 'false'
build_all_nodes_with_label = XML.SubElement(tconfig,
'buildAllNodesWithLabel')
build_all_nodes_with_label.text = 'false'
block = project_def.get('block', False)
if block:
block = XML.SubElement(tconfig, 'block')
supported_thresholds = [['build-step-failure-threshold',
'buildStepFailureThreshold',
'FAILURE'],
['unstable-threshold',
'unstableThreshold',
'UNSTABLE'],
['failure-threshold',
'failureThreshold',
'FAILURE']]
supported_threshold_values = ['never',
hudson_model.SUCCESS['name'],
hudson_model.UNSTABLE['name'],
hudson_model.FAILURE['name']]
thrsh = project_def.get('block-thresholds', False)
for toptname, txmltag, tvalue in supported_thresholds:
if thrsh:
tvalue = thrsh.get(toptname, tvalue)
if tvalue.lower() == supported_threshold_values[0]:
continue
if tvalue.upper() not in supported_threshold_values:
raise InvalidAttributeError(toptname,
tvalue,
supported_threshold_values)
th = XML.SubElement(block, txmltag)
XML.SubElement(th, 'name').text = hudson_model.THRESHOLDS[
tvalue.upper()]['name']
XML.SubElement(th, 'ordinal').text = hudson_model.THRESHOLDS[
tvalue.upper()]['ordinal']
XML.SubElement(th, 'color').text = hudson_model.THRESHOLDS[
tvalue.upper()]['color']
XML.SubElement(th, 'completeBuild').text = "true"
# If configs is empty, remove the entire tbuilder tree.
if(len(configs) == 0):
logger.debug("Pruning empty TriggerBuilder tree.")
xml_parent.remove(tbuilder)
def builders_from(registry, xml_parent, data):
"""yaml: builders-from
Use builders from another project.
Requires the Jenkins :jenkins-wiki:`Template Project Plugin
<Template+Project+Plugin>`.
:arg str projectName: the name of the other project
Example:
.. literalinclude:: ../../tests/builders/fixtures/builders-from.yaml
:language: yaml
"""
pbs = XML.SubElement(xml_parent,
'hudson.plugins.templateproject.ProxyBuilder')
XML.SubElement(pbs, 'projectName').text = data
def http_request(registry, xml_parent, data):
"""yaml: http-request
This plugin sends a http request to an url with some parameters.
Requires the Jenkins :jenkins-wiki:`HTTP Request Plugin
<HTTP+Request+Plugin>`.
:arg str url: Specify an URL to be requested (required)
:arg str mode: The http mode of the request (default GET)
:mode values:
* **GET**
* **POST**
* **PUT**
* **DELETE**
* **HEAD**
:arg str content-type: Add 'Content-type: foo' HTTP request headers
where foo is the http content-type the request is using.
(default NOT_SET)
:arg str accept-type: Add 'Accept: foo' HTTP request headers
where foo is the http content-type to accept (default NOT_SET)
:content-type and accept-type values:
* **NOT_SET**
* **TEXT_HTML**
* **APPLICATION_JSON**
* **APPLICATION_TAR**
* **APPLICATION_ZIP**
* **APPLICATION_OCTETSTREAM**
:arg str output-file: Name of the file in which to write response data
(default '')
:arg int time-out: Specify a timeout value in seconds (default 0)
:arg bool console-log: This allows you to turn off writing the response
body to the log (default false)
:arg bool pass-build: Should build parameters be passed to the URL
being called (default false)
:arg str valid-response-codes: Configure response code to mark an
execution as success. You can configure simple code such as "200"
or multiple codes separeted by comma(',') e.g. "200,404,500"
Interval of codes should be in format From:To e.g. "100:399".
The default (as if empty) is to fail to 4xx and 5xx.
That means success from 100 to 399 "100:399"
To ignore any response code use "100:599". (default '')
:arg str valid-response-content: If set response must contain this string
to mark an execution as success (default '')
:arg str authentication-key: Authentication that will be used before this
request. Authentications are created in global configuration under a
key name that is selected here.
:arg list custom-headers: list of header parameters
:custom-header:
* **name** (`str`) -- Name of the header
* **value** (`str`) -- Value of the header
Example:
.. literalinclude:: ../../tests/builders/fixtures/http-request-minimal.yaml
:language: yaml
.. literalinclude::
../../tests/builders/fixtures/http-request-complete.yaml
:language: yaml
"""
http_request = XML.SubElement(xml_parent,
'jenkins.plugins.http__request.HttpRequest')
http_request.set('plugin', 'http_request')
valid_modes = ['GET', 'POST', 'PUT', 'DELETE', 'HEAD']
valid_types = ['NOT_SET', 'TEXT_HTML', 'APPLICATION_JSON',
'APPLICATION_TAR', 'APPLICATION_ZIP',
'APPLICATION_OCTETSTREAM']
mappings = [
('url', 'url', None),
('mode', 'httpMode', 'GET', valid_modes),
('content-type', 'contentType', 'NOT_SET', valid_types),
('accept-type', 'acceptType', 'NOT_SET', valid_types),
('output-file', 'outputFile', ''),
('console-log', 'consoleLogResponseBody', False),
('pass-build', 'passBuildParameters', False),
('time-out', 'timeout', 0),
('valid-response-codes', 'validResponseCodes', ''),
('valid-response-content', 'validResponseContent', '')]
convert_mapping_to_xml(http_request, data, mappings, fail_required=True)
if 'authentication-key' in data:
XML.SubElement(
http_request, 'authentication').text = data['authentication-key']
if 'custom-headers' in data:
customHeader = XML.SubElement(http_request, 'customHeaders')
header_mappings = [
('name', 'name', None),
('value', 'value', None)
]
for customhead in data['custom-headers']:
pair = XML.SubElement(customHeader, 'pair')
convert_mapping_to_xml(pair,
customhead,
header_mappings,
fail_required=True)
def inject(registry, xml_parent, data):
"""yaml: inject
Inject an environment for the job.
Requires the Jenkins :jenkins-wiki:`EnvInject Plugin
<EnvInject+Plugin>`.
:arg str properties-file: the name of the property file (optional)
:arg str properties-content: the properties content (optional)
:arg str script-file: the name of a script file to run (optional)
:arg str script-content: the script content (optional)
Example:
.. literalinclude:: ../../tests/builders/fixtures/inject.yaml
:language: yaml
"""
eib = XML.SubElement(xml_parent, 'EnvInjectBuilder')
info = XML.SubElement(eib, 'info')
jenkins_jobs.modules.base.add_nonblank_xml_subelement(
info, 'propertiesFilePath', data.get('properties-file'))
jenkins_jobs.modules.base.add_nonblank_xml_subelement(
info, 'propertiesContent', data.get('properties-content'))
jenkins_jobs.modules.base.add_nonblank_xml_subelement(
info, 'scriptFilePath', data.get('script-file'))
jenkins_jobs.modules.base.add_nonblank_xml_subelement(
info, 'scriptContent', data.get('script-content'))
def kmap(registry, xml_parent, data):
"""yaml: kmap
Publish mobile applications to your Keivox KMAP Private Mobile App Store.
Requires the Jenkins :jenkins-wiki:`Keivox KMAP Private Mobile App Store
Plugin <Keivox+KMAP+Private+Mobile+App+Store+Plugin>`.
:arg str username: KMAP's user email with permissions to upload/publish
applications to KMAP (required)
:arg str password: Password for the KMAP user uploading/publishing
applications (required)
:arg str url: KMAP's url. This url must always end with "/kmap-client/".
For example: http://testing.keivox.com/kmap-client/ (required)
:arg str categories: Categories' names. If you want to add the application
to more than one category, write the categories between commas.
(required)
:arg str file-path: Path to the application's file (required)
:arg str app-name: KMAP's application name (required)
:arg str bundle: Bundle indentifier (default '')
:arg str version: Application's version (required)
:arg str description: Application's description (default '')
:arg str icon-path: Path to the application's icon (default '')
:arg bool publish-optional: Publish application after it has been uploaded
to KMAP (default false)
:publish-optional:
* **groups** ('str') -- groups' names to publish the application
(default '')
* **users** ('str') -- users' names to publish the application
(default '')
* **notify-users** ('bool') -- Send notifications to the users and
groups when publishing the application (default false)
Minimal Example:
.. literalinclude:: ../../tests/builders/fixtures/kmap-minimal.yaml
:language: yaml
Full Example:
.. literalinclude:: ../../tests/builders/fixtures/kmap-full.yaml
:language: yaml
"""
kmap = XML.SubElement(
xml_parent, 'org.jenkinsci.plugins.KmapJenkinsBuilder')
kmap.set('plugin', 'kmap-jenkins')
publish = data.get('publish-optional', False)
mapping = [
('username', 'username', None),
('password', 'password', None),
('url', 'kmapClient', None),
('categories', 'categories', None),
('file-path', 'filePath', None),
('app-name', 'appName', None),
('bundle', 'bundle', ''),
('version', 'version', None),
('description', 'description', ''),
('icon-path', 'iconPath', ''),
]
convert_mapping_to_xml(kmap, data, mapping, fail_required=True)
if publish is True:
publish_optional = XML.SubElement(kmap, 'publishOptional')
publish_mapping = [
('groups', 'teams', ''),
('users', 'users', ''),
('notify-users', 'sendNotifications', False),
]
convert_mapping_to_xml(
publish_optional, data, publish_mapping, fail_required=True)
def artifact_resolver(registry, xml_parent, data):
"""yaml: artifact-resolver
Allows one to resolve artifacts from a maven repository like nexus
(without having maven installed)
Requires the Jenkins :jenkins-wiki:`Repository Connector Plugin
<Repository+Connector+Plugin>`.
:arg bool fail-on-error: Whether to fail the build on error (default false)
:arg bool repository-logging: Enable repository logging (default false)
:arg str target-directory: Where to resolve artifacts to
:arg list artifacts: list of artifacts to resolve
:Artifact:
* **group-id** (`str`) -- Group ID of the artifact
* **artifact-id** (`str`) -- Artifact ID of the artifact
* **version** (`str`) -- Version of the artifact
* **classifier** (`str`) -- Classifier of the artifact (default '')
* **extension** (`str`) -- Extension of the artifact
(default 'jar')
* **target-file-name** (`str`) -- What to name the artifact
(default '')
Example:
.. literalinclude:: ../../tests/builders/fixtures/artifact-resolver.yaml
:language: yaml
"""
ar = XML.SubElement(xml_parent,
'org.jvnet.hudson.plugins.repositoryconnector.'
'ArtifactResolver')
XML.SubElement(ar, 'targetDirectory').text = data['target-directory']
artifacttop = XML.SubElement(ar, 'artifacts')
artifacts = data['artifacts']
for artifact in artifacts:
rcartifact = XML.SubElement(artifacttop,
'org.jvnet.hudson.plugins.'
'repositoryconnector.Artifact')
XML.SubElement(rcartifact, 'groupId').text = artifact['group-id']
XML.SubElement(rcartifact, 'artifactId').text = artifact['artifact-id']
XML.SubElement(rcartifact, 'classifier').text = artifact.get(
'classifier', '')
XML.SubElement(rcartifact, 'version').text = artifact['version']
XML.SubElement(rcartifact, 'extension').text = artifact.get(
'extension', 'jar')
XML.SubElement(rcartifact, 'targetFileName').text = artifact.get(
'target-file-name', '')
XML.SubElement(ar, 'failOnError').text = str(data.get(
'fail-on-error', False)).lower()
XML.SubElement(ar, 'enableRepoLogging').text = str(data.get(
'repository-logging', False)).lower()
XML.SubElement(ar, 'snapshotUpdatePolicy').text = 'never'
XML.SubElement(ar, 'releaseUpdatePolicy').text = 'never'
XML.SubElement(ar, 'snapshotChecksumPolicy').text = 'warn'
XML.SubElement(ar, 'releaseChecksumPolicy').text = 'warn'
def doxygen(registry, xml_parent, data):
"""yaml: doxygen
Builds doxygen HTML documentation. Requires the Jenkins
:jenkins-wiki:`Doxygen plugin <Doxygen+Plugin>`.
:arg str doxyfile: The doxyfile path (required)
:arg str install: The doxygen installation to use (required)
:arg bool ignore-failure: Keep executing build even on doxygen generation
failure (default false)
:arg bool unstable-warning: Mark the build as unstable if warnings are
generated (default false)
Example:
.. literalinclude:: /../../tests/builders/fixtures/doxygen001.yaml
:language: yaml
"""
doxygen = XML.SubElement(xml_parent,
'hudson.plugins.doxygen.DoxygenBuilder')
mappings = [
('doxyfile', 'doxyfilePath', None),
('install', 'installationName', None),
('ignore-failure', 'continueOnBuildFailure', False),
('unstable-warning', 'unstableIfWarnings', False)
]
convert_mapping_to_xml(doxygen, data, mappings, fail_required=True)
def gradle(registry, xml_parent, data):
"""yaml: gradle
Execute gradle tasks. Requires the Jenkins :jenkins-wiki:`Gradle Plugin
<Gradle+Plugin>`.
:arg str tasks: List of tasks to execute
:arg str gradle-name: Use a custom gradle name (optional)
:arg bool wrapper: use gradle wrapper (default false)
:arg bool executable: make gradlew executable (default false)
:arg list switches: Switches for gradle, can have multiples
:arg bool use-root-dir: Whether to run the gradle script from the
top level directory or from a different location (default false)
:arg str root-build-script-dir: If your workspace has the
top-level build.gradle in somewhere other than the module
root directory, specify the path (relative to the module
root) here, such as ${workspace}/parent/ instead of just
${workspace}.
Example:
.. literalinclude:: ../../tests/builders/fixtures/gradle.yaml
:language: yaml
"""
gradle = XML.SubElement(xml_parent, 'hudson.plugins.gradle.Gradle')
XML.SubElement(gradle, 'description').text = ''
XML.SubElement(gradle, 'tasks').text = data['tasks']
XML.SubElement(gradle, 'buildFile').text = ''
XML.SubElement(gradle, 'rootBuildScriptDir').text = data.get(
'root-build-script-dir', '')
XML.SubElement(gradle, 'gradleName').text = data.get(
'gradle-name', '')
XML.SubElement(gradle, 'useWrapper').text = str(data.get(
'wrapper', False)).lower()
XML.SubElement(gradle, 'makeExecutable').text = str(data.get(
'executable', False)).lower()
switch_string = '\n'.join(data.get('switches', []))
XML.SubElement(gradle, 'switches').text = switch_string
XML.SubElement(gradle, 'fromRootBuildScriptDir').text = str(data.get(
'use-root-dir', False)).lower()
def _groovy_common_scriptSource(data):
"""Helper function to generate the XML element common to groovy builders
"""
scriptSource = XML.Element("scriptSource")
if 'command' in data and 'file' in data:
raise JenkinsJobsException("Use just one of 'command' or 'file'")
if 'command' in data:
command = XML.SubElement(scriptSource, 'command')
command.text = str(data['command'])
scriptSource.set('class', 'hudson.plugins.groovy.StringScriptSource')
elif 'file' in data:
scriptFile = XML.SubElement(scriptSource, 'scriptFile')
scriptFile.text = str(data['file'])
scriptSource.set('class', 'hudson.plugins.groovy.FileScriptSource')
else:
raise JenkinsJobsException("A groovy command or file is required")
return scriptSource
def groovy(registry, xml_parent, data):
"""yaml: groovy
Execute a groovy script or command.
Requires the Jenkins :jenkins-wiki:`Groovy Plugin <Groovy+plugin>`.
:arg str file: Groovy file to run. (Alternative: you can chose a command
instead)
:arg str command: Groovy command to run. (Alternative: you can chose a
script file instead)
:arg str version: Groovy version to use. (default '(Default)')
:arg str parameters: Parameters for the Groovy executable. (default '')
:arg str script-parameters: These parameters will be passed to the script.
(default '')
:arg str properties: Instead of passing properties using the -D parameter
you can define them here. (default '')
:arg str java-opts: Direct access to JAVA_OPTS. Properties allows only
-D properties, while sometimes also other properties like -XX need to
be setup. It can be done here. This line is appended at the end of
JAVA_OPTS string. (default '')
:arg str class-path: Specify script classpath here. Each line is one
class path item. (default '')
Minimal Example:
.. literalinclude:: ../../tests/builders/fixtures/groovy-minimal.yaml
:language: yaml
Full Example:
.. literalinclude:: ../../tests/builders/fixtures/groovy-full.yaml
:language: yaml
"""
root_tag = 'hudson.plugins.groovy.Groovy'
groovy = XML.SubElement(xml_parent, root_tag)
groovy.append(_groovy_common_scriptSource(data))
mappings = [
('version', 'groovyName', '(Default)'),
('parameters', 'parameters', ''),
('script-parameters', 'scriptParameters', ''),
('properties', 'properties', ''),
('java-opts', 'javaOpts', ''),
('class-path', 'classPath', '')
]
convert_mapping_to_xml(groovy, data, mappings, fail_required=True)
def system_groovy(registry, xml_parent, data):
"""yaml: system-groovy
Execute a system groovy script or command.
Requires the Jenkins :jenkins-wiki:`Groovy Plugin <Groovy+plugin>`.
:arg str file: Groovy file to run. (Alternative: you can chose a command
instead)
:arg str command: Groovy command to run. (Alternative: you can chose a
script file instead)
:arg str bindings: Define variable bindings (in the properties file
format). Specified variables can be addressed from the script.
(optional)
:arg str class-path: Specify script classpath here. Each line is one class
path item. (optional)
Examples:
.. literalinclude:: ../../tests/builders/fixtures/system-groovy001.yaml
:language: yaml
.. literalinclude:: ../../tests/builders/fixtures/system-groovy002.yaml
:language: yaml
"""
root_tag = 'hudson.plugins.groovy.SystemGroovy'
sysgroovy = XML.SubElement(xml_parent, root_tag)
sysgroovy.append(_groovy_common_scriptSource(data))
XML.SubElement(sysgroovy, 'bindings').text = str(data.get('bindings', ""))
XML.SubElement(sysgroovy, 'classpath').text = str(
data.get('class-path', ""))
def batch(registry, xml_parent, data):
"""yaml: batch
Execute a batch command.
:Parameter: the batch command to execute
Example:
.. literalinclude:: ../../tests/builders/fixtures/batch.yaml
:language: yaml
"""
batch = XML.SubElement(xml_parent, 'hudson.tasks.BatchFile')
XML.SubElement(batch, 'command').text = data
def powershell(registry, xml_parent, data):
"""yaml: powershell
Execute a powershell command. Requires the :jenkins-wiki:`Powershell Plugin
<PowerShell+Plugin>`.
:Parameter: the powershell command to execute
Example:
.. literalinclude:: ../../tests/builders/fixtures/powershell.yaml
:language: yaml
"""
ps = XML.SubElement(xml_parent, 'hudson.plugins.powershell.PowerShell')
XML.SubElement(ps, 'command').text = data
def msbuild(registry, xml_parent, data):
"""yaml: msbuild
Build .NET project using msbuild. Requires the :jenkins-wiki:`Jenkins
MSBuild Plugin <MSBuild+Plugin>`.
:arg str msbuild-version: which msbuild configured in Jenkins to use
(default '(Default)')
:arg str solution-file: location of the solution file to build (required)
:arg str extra-parameters: extra parameters to pass to msbuild (default '')
:arg bool pass-build-variables: should build variables be passed
to msbuild (default true)
:arg bool continue-on-build-failure: should the build continue if
msbuild returns an error (default false)
:arg bool unstable-if-warnings: If set to true and warnings on compilation,
the build will be unstable (>=1.20) (default false)
Full Example:
.. literalinclude:: ../../tests/builders/fixtures/msbuild-full.yaml
:language: yaml
Minimal Example:
.. literalinclude:: ../../tests/builders/fixtures/msbuild-minimal.yaml
:language: yaml
"""
msbuilder = XML.SubElement(xml_parent,
'hudson.plugins.msbuild.MsBuildBuilder')
msbuilder.set('plugin', 'msbuild')
mapping = [
('msbuild-version', 'msBuildName', '(Default)'),
('solution-file', 'msBuildFile', None),
('extra-parameters', 'cmdLineArgs', ''),
('pass-build-variables', 'buildVariablesAsProperties', True),
('continue-on-build-failure', 'continueOnBuildFailure', False),
('unstable-if-warnings', 'unstableIfWarnings', False)
]
convert_mapping_to_xml(msbuilder, data, mapping, fail_required=True)
def create_builders(registry, step):
dummy_parent = XML.Element("dummy")
registry.dispatch('builder', dummy_parent, step)
return list(dummy_parent)
def conditional_step(registry, xml_parent, data):
"""yaml: conditional-step
Conditionally execute some build steps. Requires the Jenkins
:jenkins-wiki:`Conditional BuildStep Plugin
<Conditional+BuildStep+Plugin>`.
Depending on the number of declared steps, a `Conditional step (single)`
or a `Conditional steps (multiple)` is created in Jenkins.
:arg str condition-kind: Condition kind that must be verified before the
steps are executed. Valid values and their additional attributes are
described in the conditions_ table.
:arg str on-evaluation-failure: What should be the outcome of the build
if the evaluation of the condition fails. Possible values are `fail`,
`mark-unstable`, `run-and-mark-unstable`, `run` and `dont-run`.
(default 'fail').
:arg list steps: List of steps to run if the condition is verified. Items
in the list can be any builder known by Jenkins Job Builder.
.. _conditions:
================== ====================================================
Condition kind Description
================== ====================================================
always Condition is always verified
never Condition is never verified
boolean-expression Run the step if the expression expends to a
representation of true
:condition-expression: Expression to expand (required)
build-cause Run if the current build has a specific cause
:cause: The cause why the build was triggered.
Following causes are supported -
:USER_CAUSE: build was triggered by a manual
interaction. (default)
:SCM_CAUSE: build was triggered by a SCM change.
:TIMER_CAUSE: build was triggered by a timer.
:CLI_CAUSE: build was triggered by via CLI interface
:REMOTE_CAUSE: build was triggered via remote
interface.
:UPSTREAM_CAUSE: build was triggered by an upstream
project.
Following supported if XTrigger plugin installed:
:FS_CAUSE: build was triggered by a file system
change (FSTrigger Plugin).
:URL_CAUSE: build was triggered by a URL change
(URLTrigger Plugin)
:IVY_CAUSE: build triggered by an Ivy dependency
version has change (IvyTrigger Plugin)
:SCRIPT_CAUSE: build was triggered by a script
(ScriptTrigger Plugin)
:BUILDRESULT_CAUSE: build was triggered by a
result of an other job (BuildResultTrigger Plugin)
:exclusive-cause: (bool) There might by multiple
casues causing a build to be triggered, with
this true, the cause must be the only one
causing this build this build to be triggered.
(default false)
day-of-week Only run on specific days of the week.
:day-selector: Days you want the build to run on.
Following values are supported -
:weekend: Saturday and Sunday (default).
:weekday: Monday - Friday.
:select-days: Selected days, defined by 'days'
below.
:days: True for days for which the build should
run. Definition needed only for 'select-days'
day-selector, at the same level as day-selector.
Define the days to run under this.
:SUN: Run on Sunday (default false)
:MON: Run on Monday (default false)
:TUES: Run on Tuesday (default false)
:WED: Run on Wednesday (default false)
:THURS: Run on Thursday (default false)
:FRI: Run on Friday (default false)
:SAT: Run on Saturday (default false)
:use-build-time: (bool) Use the build time instead of
the the time that the condition is evaluated.
(default false)
execution-node Run only on selected nodes.
:nodes: (list) List of nodes to execute on. (required)
strings-match Run the step if two strings match
:condition-string1: First string (optional)
:condition-string2: Second string (optional)
:condition-case-insensitive: Case insensitive
(default false)
current-status Run the build step if the current build status is
within the configured range
:condition-worst: Accepted values are SUCCESS,
UNSTABLE, FAILURE, NOT_BUILD, ABORTED
(default SUCCESS)
:condition-best: Accepted values are SUCCESS,
UNSTABLE, FAILURE, NOT_BUILD, ABORTED
(default SUCCESS)
shell Run the step if the shell command succeed
:condition-command: Shell command to execute
(optional)
windows-shell Similar to shell, except that commands will be
executed by cmd, under Windows
:condition-command: Command to execute (optional)
file-exists Run the step if a file exists
:condition-filename: Check existence of this file
(required)
:condition-basedir: If condition-filename is
relative, it will be considered relative to
either `workspace`, `artifact-directory`,
or `jenkins-home`. (default 'workspace')
files-match Run if one or more files match the selectors.
:include-pattern: (list str) List of Includes
Patterns. Since the separator in the patterns is
hardcoded as ',', any use of ',' would need
escaping. (optional)
:exclude-pattern: (list str) List of Excludes
Patterns. Since the separator in the patterns is
hardcoded as ',', any use of ',' would need
escaping. (optional)
:condition-basedir: Accepted values are `workspace`,
`artifact-directory`, or `jenkins-home`.
(default 'workspace')
num-comp Run if the numerical comparison is true.
:lhs: Left Hand Side. Must evaluate to a number.
(required)
:rhs: Right Hand Side. Must evaluate to a number.
(required)
:comparator: Accepted values are `less-than`,
`greater-than`, `equal`, `not-equal`,
`less-than-equal`, `greater-than-equal`.
(default 'less-than')
regex-match Run if the Expression matches the Label.
:regex: The regular expression used to match the label
(optional)
:label: The label that will be tested by the regular
expression. (optional)
time Only run during a certain period of the day.
:earliest-hour: Starting hour (default "09")
:earliest-min: Starting min (default "00")
:latest-hour: Ending hour (default "17")
:latest-min: Ending min (default "30")
:use-build-time: (bool) Use the build time instead of
the the time that the condition is evaluated.
(default false)
not Run the step if the inverse of the condition-operand
is true
:condition-operand: Condition to evaluate. Can be
any supported conditional-step condition. (required)
and Run the step if logical and of all conditional-operands
is true
:condition-operands: (list) Conditions to evaluate.
Can be any supported conditional-step condition.
(required)
or Run the step if logical or of all conditional-operands
is true
:condition-operands: (list) Conditions to evaluate.
Can be any supported conditional-step condition.
(required)
================== ====================================================
Example:
.. literalinclude::
/../../tests/builders/fixtures/conditional-step-success-failure.yaml
:language: yaml
.. literalinclude::
/../../tests/builders/fixtures/conditional-step-not-file-exists.yaml
:language: yaml
.. literalinclude::
/../../tests/builders/fixtures/conditional-step-day-of-week001.yaml
:language: yaml
.. literalinclude::
/../../tests/builders/fixtures/conditional-step-day-of-week003.yaml
:language: yaml
.. literalinclude::
/../../tests/builders/fixtures/conditional-step-time.yaml
:language: yaml
.. literalinclude::
/../../tests/builders/fixtures/conditional-step-regex-match.yaml
:language: yaml
.. literalinclude::
/../../tests/builders/fixtures/conditional-step-or.yaml
:language: yaml
.. literalinclude::
/../../tests/builders/fixtures/conditional-step-and.yaml
:language: yaml
"""
def build_condition(cdata, cond_root_tag):
kind = cdata['condition-kind']
ctag = XML.SubElement(cond_root_tag, condition_tag)
core_prefix = 'org.jenkins_ci.plugins.run_condition.core.'
logic_prefix = 'org.jenkins_ci.plugins.run_condition.logic.'
if kind == "always":
ctag.set('class', core_prefix + 'AlwaysRun')
elif kind == "never":
ctag.set('class', core_prefix + 'NeverRun')
elif kind == "boolean-expression":
ctag.set('class', core_prefix + 'BooleanCondition')
try:
XML.SubElement(ctag, "token").text = (
cdata['condition-expression'])
except KeyError:
raise MissingAttributeError('condition-expression')
elif kind == "build-cause":
ctag.set('class', core_prefix + 'CauseCondition')
cause_list = ('USER_CAUSE', 'SCM_CAUSE', 'TIMER_CAUSE',
'CLI_CAUSE', 'REMOTE_CAUSE', 'UPSTREAM_CAUSE',
'FS_CAUSE', 'URL_CAUSE', 'IVY_CAUSE',
'SCRIPT_CAUSE', 'BUILDRESULT_CAUSE')
cause_name = cdata.get('cause', 'USER_CAUSE')
if cause_name not in cause_list:
raise InvalidAttributeError('cause', cause_name, cause_list)
XML.SubElement(ctag, "buildCause").text = cause_name
XML.SubElement(ctag, "exclusiveCause").text = str(cdata.get(
'exclusive-cause', False)).lower()
elif kind == "day-of-week":
ctag.set('class', core_prefix + 'DayCondition')
day_selector_class_prefix = core_prefix + 'DayCondition$'
day_selector_classes = {
'weekend': day_selector_class_prefix + 'Weekend',
'weekday': day_selector_class_prefix + 'Weekday',
'select-days': day_selector_class_prefix + 'SelectDays',
}
day_selector = cdata.get('day-selector', 'weekend')
if day_selector not in day_selector_classes:
raise InvalidAttributeError('day-selector', day_selector,
day_selector_classes)
day_selector_tag = XML.SubElement(ctag, "daySelector")
day_selector_tag.set('class', day_selector_classes[day_selector])
if day_selector == "select-days":
days_tag = XML.SubElement(day_selector_tag, "days")
day_tag_text = ('org.jenkins__ci.plugins.run__condition.'
'core.DayCondition_-Day')
inp_days = cdata.get('days') if cdata.get('days') else {}
days = ['SUN', 'MON', 'TUES', 'WED', 'THURS', 'FRI', 'SAT']
for day_no, day in enumerate(days, 1):
day_tag = XML.SubElement(days_tag, day_tag_text)
XML.SubElement(day_tag, "day").text = str(day_no)
XML.SubElement(day_tag, "selected").text = str(
inp_days.get(day, False)).lower()
XML.SubElement(ctag, "useBuildTime").text = str(cdata.get(
'use-build-time', False)).lower()
elif kind == "execution-node":
ctag.set('class', core_prefix + 'NodeCondition')
allowed_nodes_tag = XML.SubElement(ctag, "allowedNodes")
try:
nodes_list = cdata['nodes']
except KeyError:
raise MissingAttributeError('nodes')
for node in nodes_list:
node_tag = XML.SubElement(allowed_nodes_tag, "string")
node_tag.text = node
elif kind == "strings-match":
ctag.set('class', core_prefix + 'StringsMatchCondition')
XML.SubElement(ctag, "arg1").text = cdata.get(
'condition-string1', '')
XML.SubElement(ctag, "arg2").text = cdata.get(
'condition-string2', '')
XML.SubElement(ctag, "ignoreCase").text = str(cdata.get(
'condition-case-insensitive', False)).lower()
elif kind == "current-status":
ctag.set('class', core_prefix + 'StatusCondition')
wr = XML.SubElement(ctag, 'worstResult')
wr_name = cdata.get('condition-worst', 'SUCCESS')
if wr_name not in hudson_model.THRESHOLDS:
raise InvalidAttributeError('condition-worst', wr_name,
hudson_model.THRESHOLDS.keys())
wr_threshold = hudson_model.THRESHOLDS[wr_name]
XML.SubElement(wr, "name").text = wr_threshold['name']
XML.SubElement(wr, "ordinal").text = wr_threshold['ordinal']
XML.SubElement(wr, "color").text = wr_threshold['color']
XML.SubElement(wr, "completeBuild").text = str(
wr_threshold['complete']).lower()
br = XML.SubElement(ctag, 'bestResult')
br_name = cdata.get('condition-best', 'SUCCESS')
if br_name not in hudson_model.THRESHOLDS:
raise InvalidAttributeError('condition-best', br_name,
hudson_model.THRESHOLDS.keys())
br_threshold = hudson_model.THRESHOLDS[br_name]
XML.SubElement(br, "name").text = br_threshold['name']
XML.SubElement(br, "ordinal").text = br_threshold['ordinal']
XML.SubElement(br, "color").text = br_threshold['color']
XML.SubElement(br, "completeBuild").text = str(
wr_threshold['complete']).lower()
elif kind == "shell":
ctag.set('class',
'org.jenkins_ci.plugins.run_condition.contributed.'
'ShellCondition')
XML.SubElement(ctag, "command").text = cdata.get(
'condition-command', '')
elif kind == "windows-shell":
ctag.set('class',
'org.jenkins_ci.plugins.run_condition.contributed.'
'BatchFileCondition')
XML.SubElement(ctag, "command").text = cdata.get(
'condition-command', '')
elif kind == "file-exists" or kind == "files-match":
if kind == "file-exists":
ctag.set('class', core_prefix + 'FileExistsCondition')
try:
XML.SubElement(ctag, "file").text = (
cdata['condition-filename'])
except KeyError:
raise MissingAttributeError('condition-filename')
else:
ctag.set('class', core_prefix + 'FilesMatchCondition')
XML.SubElement(ctag, "includes").text = ",".join(cdata.get(
'include-pattern', ''))
XML.SubElement(ctag, "excludes").text = ",".join(cdata.get(
'exclude-pattern', ''))
basedir_class_prefix = ('org.jenkins_ci.plugins.run_condition.'
'common.BaseDirectory$')
basedir_classes = {
'workspace': basedir_class_prefix + 'Workspace',
'artifact-directory': basedir_class_prefix + 'ArtifactsDir',
'jenkins-home': basedir_class_prefix + 'JenkinsHome'
}
basedir = cdata.get('condition-basedir', 'workspace')
if basedir not in basedir_classes:
raise InvalidAttributeError('condition-basedir', basedir,
basedir_classes)
XML.SubElement(ctag, "baseDir").set('class',
basedir_classes[basedir])
elif kind == "num-comp":
ctag.set('class', core_prefix + 'NumericalComparisonCondition')
try:
XML.SubElement(ctag, "lhs").text = cdata['lhs']
XML.SubElement(ctag, "rhs").text = cdata['rhs']
except KeyError as e:
raise MissingAttributeError(e.args[0])
comp_class_prefix = core_prefix + 'NumericalComparisonCondition$'
comp_classes = {
'less-than': comp_class_prefix + 'LessThan',
'greater-than': comp_class_prefix + 'GreaterThan',
'equal': comp_class_prefix + 'EqualTo',
'not-equal': comp_class_prefix + 'NotEqualTo',
'less-than-equal': comp_class_prefix + 'LessThanOrEqualTo',
'greater-than-equal': comp_class_prefix +
'GreaterThanOrEqualTo'
}
comp = cdata.get('comparator', 'less-than')
if comp not in comp_classes:
raise InvalidAttributeError('comparator', comp, comp_classes)
XML.SubElement(ctag, "comparator").set('class',
comp_classes[comp])
elif kind == "regex-match":
ctag.set('class', core_prefix + 'ExpressionCondition')
XML.SubElement(ctag, "expression").text = cdata.get('regex', '')
XML.SubElement(ctag, "label").text = cdata.get('label', '')
elif kind == "time":
ctag.set('class', core_prefix + 'TimeCondition')
XML.SubElement(ctag, "earliestHours").text = cdata.get(
'earliest-hour', '09')
XML.SubElement(ctag, "earliestMinutes").text = cdata.get(
'earliest-min', '00')
XML.SubElement(ctag, "latestHours").text = cdata.get(
'latest-hour', '17')
XML.SubElement(ctag, "latestMinutes").text = cdata.get(
'latest-min', '30')
XML.SubElement(ctag, "useBuildTime").text = str(cdata.get(
'use-build-time', False)).lower()
elif kind == "not":
ctag.set('class', logic_prefix + 'Not')
try:
notcondition = cdata['condition-operand']
except KeyError:
raise MissingAttributeError('condition-operand')
build_condition(notcondition, ctag)
elif kind == "and" or "or":
if kind == "and":
ctag.set('class', logic_prefix + 'And')
else:
ctag.set('class', logic_prefix + 'Or')
conditions_tag = XML.SubElement(ctag, "conditions")
container_tag_text = ('org.jenkins__ci.plugins.run__condition.'
'logic.ConditionContainer')
try:
conditions_list = cdata['condition-operands']
except KeyError:
raise MissingAttributeError('condition-operands')
for condition in conditions_list:
conditions_container_tag = XML.SubElement(conditions_tag,
container_tag_text)
build_condition(condition, conditions_container_tag)
def build_step(parent, step):
for edited_node in create_builders(registry, step):
if not has_multiple_steps:
edited_node.set('class', edited_node.tag)
edited_node.tag = 'buildStep'
parent.append(edited_node)
cond_builder_tag = ('org.jenkinsci.plugins.conditionalbuildstep.'
'singlestep.SingleConditionalBuilder')
cond_builders_tag = ('org.jenkinsci.plugins.conditionalbuildstep.'
'ConditionalBuilder')
steps = data['steps']
has_multiple_steps = len(steps) > 1
if has_multiple_steps:
root_tag = XML.SubElement(xml_parent, cond_builders_tag)
steps_parent = XML.SubElement(root_tag, "conditionalbuilders")
condition_tag = "runCondition"
else:
root_tag = XML.SubElement(xml_parent, cond_builder_tag)
steps_parent = root_tag
condition_tag = "condition"
build_condition(data, root_tag)
evaluation_classes_pkg = 'org.jenkins_ci.plugins.run_condition'
evaluation_classes = {
'fail': evaluation_classes_pkg + '.BuildStepRunner$Fail',
'mark-unstable': evaluation_classes_pkg + '.BuildStepRunner$Unstable',
'run-and-mark-unstable': evaluation_classes_pkg +
'.BuildStepRunner$RunUnstable',
'run': evaluation_classes_pkg + '.BuildStepRunner$Run',
'dont-run': evaluation_classes_pkg + '.BuildStepRunner$DontRun',
}
evaluation_class = evaluation_classes[data.get('on-evaluation-failure',
'fail')]
XML.SubElement(root_tag, "runner").set('class',
evaluation_class)
for step in steps:
build_step(steps_parent, step)
def maven_builder(registry, xml_parent, data):
"""yaml: maven-builder
Execute Maven3 builder
Allows your build jobs to deploy artifacts automatically to Artifactory.
Requires the Jenkins :jenkins-wiki:`Artifactory Plugin
<Artifactory+Plugin>`.
:arg str name: Name of maven installation from the configuration (required)
:arg str pom: Location of pom.xml (default 'pom.xml')
:arg str goals: Goals to execute (required)
:arg str maven-opts: Additional options for maven (default '')
Example:
.. literalinclude:: /../../tests/builders/fixtures/maven-builder001.yaml
:language: yaml
"""
maven = XML.SubElement(xml_parent, 'org.jfrog.hudson.maven3.Maven3Builder')
mapping = [
('name', 'mavenName', None),
('goals', 'goals', None),
('pom', 'rootPom', 'pom.xml'),
('maven-opts', 'mavenOpts', ''),
]
convert_mapping_to_xml(maven, data, mapping, fail_required=True)
def maven_target(registry, xml_parent, data):
"""yaml: maven-target
Execute top-level Maven targets.
Requires the Jenkins :jenkins-wiki:`Config File Provider Plugin
<Config+File+Provider+Plugin>` for the Config File Provider "settings"
and "global-settings" config.
:arg str goals: Goals to execute
:arg str properties: Properties for maven, can have multiples
:arg str pom: Location of pom.xml (default 'pom.xml')
:arg bool private-repository: Use private maven repository for this
job (default false)
:arg str maven-version: Installation of maven which should be used
(optional)
:arg str java-opts: java options for maven, can have multiples,
must be in quotes (optional)
:arg str settings: Path to use as user settings.xml
It is possible to provide a ConfigFileProvider settings file, such as
see CFP Example below. (optional)
:arg str settings-type: Type of settings file file|cfp. (default file)
:arg str global-settings: Path to use as global settings.xml
It is possible to provide a ConfigFileProvider settings file, such as
see CFP Example below. (optional)
:arg str global-settings-type: Type of settings file file|cfp. (default
file)
Example:
.. literalinclude:: /../../tests/builders/fixtures/maven-target-doc.yaml
:language: yaml
CFP Example:
.. literalinclude:: /../../tests/builders/fixtures/maven-target002.yaml
:language: yaml
"""
maven = XML.SubElement(xml_parent, 'hudson.tasks.Maven')
XML.SubElement(maven, 'targets').text = data['goals']
prop_string = '\n'.join(data.get('properties', []))
XML.SubElement(maven, 'properties').text = prop_string
if 'maven-version' in data:
XML.SubElement(maven, 'mavenName').text = str(data['maven-version'])
if 'pom' in data:
XML.SubElement(maven, 'pom').text = str(data['pom'])
use_private = str(data.get('private-repository', False)).lower()
XML.SubElement(maven, 'usePrivateRepository').text = use_private
if 'java-opts' in data:
javaoptions = ' '.join(data.get('java-opts', []))
XML.SubElement(maven, 'jvmOptions').text = javaoptions
config_file_provider_settings(maven, data)
def multijob(registry, xml_parent, data):
"""yaml: multijob
Define a multijob phase. Requires the Jenkins
:jenkins-wiki:`Multijob Plugin <Multijob+Plugin>`.
This builder may only be used in
:py:class:`jenkins_jobs.modules.project_multijob.MultiJob` projects.
:arg str name: MultiJob phase name
:arg str condition: when to trigger the other job.
Can be: 'SUCCESSFUL', 'UNSTABLE', 'COMPLETED', 'FAILURE', 'ALWAYS'.
(default 'SUCCESSFUL')
:arg list projects: list of projects to include in the MultiJob phase
:Project:
* **name** (`str`) -- Project name
* **current-parameters** (`bool`) -- Pass current build
parameters to the other job (default false)
* **node-label-name** (`str`) -- Define a list of nodes
on which the job should be allowed to be executed on.
Requires NodeLabel Parameter Plugin (optional)
* **node-label** (`str`) -- Define a label
of 'Restrict where this project can be run' on the fly.
Requires NodeLabel Parameter Plugin (optional)
* **node-parameters** (`bool`) -- Use the same Node for
the triggered builds that was used for this build. (optional)
* **git-revision** (`bool`) -- Pass current git-revision
to the other job (default false)
* **property-file** (`str`) -- Pass properties from file
to the other job (optional)
* **predefined-parameters** (`str`) -- Pass predefined
parameters to the other job (optional)
* **abort-all-job** (`bool`) -- Kill allsubs job and the phase job,
if this subjob is killed (default false)
* **enable-condition** (`str`) -- Condition to run the
job in groovy script format (optional)
* **kill-phase-on** (`str`) -- Stop the phase execution
on specific job status. Can be 'FAILURE', 'UNSTABLE',
'NEVER'. (optional)
* **restrict-matrix-project** (`str`) -- Filter that
restricts the subset of the combinations that the
downstream project will run (optional)
* **retry** (`dict`): Enable retry strategy (optional)
:retry:
* **max-retry** (`int`) -- Max number of retries
(default 0)
* **strategy-path** (`str`) -- Parsing rules path
(required)
Example:
.. literalinclude:: /../../tests/builders/fixtures/multibuild.yaml
:language: yaml
"""
builder = XML.SubElement(xml_parent, 'com.tikal.jenkins.plugins.multijob.'
'MultiJobBuilder')
XML.SubElement(builder, 'phaseName').text = data['name']
condition = data.get('condition', 'SUCCESSFUL')
conditions_available = ('SUCCESSFUL', 'UNSTABLE', 'COMPLETED', 'FAILURE',
'ALWAYS')
if condition not in conditions_available:
raise JenkinsJobsException('Multijob condition must be one of: %s.'
% ', '.join(conditions_available))
XML.SubElement(builder, 'continuationCondition').text = condition
phaseJobs = XML.SubElement(builder, 'phaseJobs')
kill_status_list = ('FAILURE', 'UNSTABLE', 'NEVER')
for project in data.get('projects', []):
phaseJob = XML.SubElement(phaseJobs, 'com.tikal.jenkins.plugins.'
'multijob.PhaseJobsConfig')
XML.SubElement(phaseJob, 'jobName').text = project['name']
# Pass through the current build params
currParams = str(project.get('current-parameters', False)).lower()
XML.SubElement(phaseJob, 'currParams').text = currParams
# Pass through other params
configs = XML.SubElement(phaseJob, 'configs')
nodeLabelName = project.get('node-label-name')
nodeLabel = project.get('node-label')
if (nodeLabelName and nodeLabel):
node = XML.SubElement(
configs, 'org.jvnet.jenkins.plugins.nodelabelparameter.'
'parameterizedtrigger.NodeLabelBuildParameter')
XML.SubElement(node, 'name').text = nodeLabelName
XML.SubElement(node, 'nodeLabel').text = nodeLabel
# Node parameter
if project.get('node-parameters', False):
XML.SubElement(configs, 'hudson.plugins.parameterizedtrigger.'
'NodeParameters')
# Git Revision
if project.get('git-revision', False):
param = XML.SubElement(configs,
'hudson.plugins.git.'
'GitRevisionBuildParameters')
combine = XML.SubElement(param, 'combineQueuedCommits')
combine.text = 'false'
# Properties File
properties_file = project.get('property-file', False)
if properties_file:
param = XML.SubElement(configs,
'hudson.plugins.parameterizedtrigger.'
'FileBuildParameters')
propertiesFile = XML.SubElement(param, 'propertiesFile')
propertiesFile.text = properties_file
failOnMissing = XML.SubElement(param, 'failTriggerOnMissing')
failOnMissing.text = 'true'
# Predefined Parameters
predefined_parameters = project.get('predefined-parameters', False)
if predefined_parameters:
param = XML.SubElement(configs,
'hudson.plugins.parameterizedtrigger.'
'PredefinedBuildParameters')
properties = XML.SubElement(param, 'properties')
properties.text = predefined_parameters
# Abort all other job
abortAllJob = str(project.get('abort-all-job', False)).lower()
XML.SubElement(phaseJob, 'abortAllJob').text = abortAllJob
# Retry job
retry = project.get('retry', False)
if retry:
try:
rules_path = str(retry['strategy-path'])
XML.SubElement(phaseJob, 'parsingRulesPath').text = rules_path
except KeyError:
raise MissingAttributeError('strategy-path')
max_retry = retry.get('max-retry', 0)
XML.SubElement(phaseJob, 'maxRetries').text = str(int(max_retry))
XML.SubElement(phaseJob, 'enableRetryStrategy').text = 'true'
else:
XML.SubElement(phaseJob, 'enableRetryStrategy').text = 'false'
# Restrict matrix jobs to a subset
if project.get('restrict-matrix-project') is not None:
subset = XML.SubElement(
configs, 'hudson.plugins.parameterizedtrigger.'
'matrix.MatrixSubsetBuildParameters')
XML.SubElement(
subset, 'filter').text = project['restrict-matrix-project']
# Enable Condition
enable_condition = project.get('enable-condition')
if enable_condition is not None:
XML.SubElement(
phaseJob,
'enableCondition'
).text = 'true'
XML.SubElement(
phaseJob,
'condition'
).text = enable_condition
# Kill phase on job status
kill_status = project.get('kill-phase-on')
if kill_status is not None:
kill_status = kill_status.upper()
if kill_status not in kill_status_list:
raise JenkinsJobsException(
'multijob kill-phase-on must be one of: %s'
+ ','.join(kill_status_list))
XML.SubElement(
phaseJob,
'killPhaseOnJobResultCondition'
).text = kill_status
def config_file_provider(registry, xml_parent, data):
"""yaml: config-file-provider
Provide configuration files (i.e., settings.xml for maven etc.)
which will be copied to the job's workspace.
Requires the Jenkins :jenkins-wiki:`Config File Provider Plugin
<Config+File+Provider+Plugin>`.
:arg list files: List of managed config files made up of three
parameters
:files:
* **file-id** (`str`) -- The identifier for the managed config
file
* **target** (`str`) -- Define where the file should be created
(default '')
* **variable** (`str`) -- Define an environment variable to be
used (default '')
Example:
.. literalinclude::
../../tests/builders/fixtures/config-file-provider01.yaml
:language: yaml
"""
cfp = XML.SubElement(xml_parent,
'org.jenkinsci.plugins.configfiles.builder.'
'ConfigFileBuildStep')
cfp.set('plugin', 'config-file-provider')
config_file_provider_builder(cfp, data)
def grails(registry, xml_parent, data):
"""yaml: grails
Execute a grails build step. Requires the :jenkins-wiki:`Jenkins Grails
Plugin <Grails+Plugin>`.
:arg bool use-wrapper: Use a grails wrapper (default false)
:arg str name: Select a grails installation to use (default '(Default)')
:arg bool force-upgrade: Run 'grails upgrade --non-interactive'
first (default false)
:arg bool non-interactive: append --non-interactive to all build targets
(default false)
:arg str targets: Specify target(s) to run separated by spaces (required)
:arg str server-port: Specify a value for the server.port system
property (default '')
:arg str work-dir: Specify a value for the grails.work.dir system
property (default '')
:arg str project-dir: Specify a value for the grails.project.work.dir
system property (default '')
:arg str base-dir: Specify a path to the root of the Grails
project (default '')
:arg str properties: Additional system properties to set (default '')
:arg bool plain-output: append --plain-output to all build targets
(default false)
:arg bool stack-trace: append --stack-trace to all build targets
(default false)
:arg bool verbose: append --verbose to all build targets
(default false)
:arg bool refresh-dependencies: append --refresh-dependencies to all
build targets (default false)
Full Example:
.. literalinclude:: ../../tests/builders/fixtures/grails-full.yaml
:language: yaml
Minimal Example:
.. literalinclude:: ../../tests/builders/fixtures/grails-minimal.yaml
:language: yaml
"""
grails = XML.SubElement(xml_parent, 'com.g2one.hudson.grails.'
'GrailsBuilder')
grails.set('plugin', 'grails')
mappings = [
('targets', 'targets', None),
('name', 'name', '(Default)'),
('work-dir', 'grailsWorkDir', ''),
('project-dir', 'projectWorkDir', ''),
('base-dir', 'projectBaseDir', ''),
('server-port', 'serverPort', ''),
('properties', 'properties', ''),
('force-upgrade', 'forceUpgrade', False),
('non-interactive', 'nonInteractive', False),
('use-wrapper', 'useWrapper', False),
('plain-output', 'plainOutput', False),
('stack-trace', 'stackTrace', False),
('verbose', 'verbose', False),
('refresh-dependencies', 'refreshDependencies', False),
]
convert_mapping_to_xml(grails, data, mappings, fail_required=True)
def sbt(registry, xml_parent, data):
"""yaml: sbt
Execute a sbt build step. Requires the Jenkins :jenkins-wiki:`Sbt Plugin
<sbt+plugin>`.
:arg str name: Select a sbt installation to use. If no name is
provided, the first in the list of defined SBT builders will be
used. (default to first in list)
:arg str jvm-flags: Parameters to pass to the JVM (default '')
:arg str actions: Select the sbt tasks to execute (default '')
:arg str sbt-flags: Add flags to SBT launcher
(default '-Dsbt.log.noformat=true')
:arg str subdir-path: Path relative to workspace to run sbt in
(default '')
Example:
.. literalinclude:: ../../tests/builders/fixtures/sbt.yaml
:language: yaml
"""
sbt = XML.SubElement(xml_parent, 'org.jvnet.hudson.plugins.'
'SbtPluginBuilder')
mappings = [
('name', 'name', ''),
('jvm-flags', 'jvmFlags', ''),
('sbt-flags', 'sbtFlags', '-Dsbt.log.noformat=true'),
('actions', 'actions', ''),
('subdir-path', 'subdirPath', ''),
]
convert_mapping_to_xml(sbt, data, mappings, fail_required=True)
def critical_block_start(registry, xml_parent, data):
"""yaml: critical-block-start
Designate the start of a critical block. Must be used in conjuction with
critical-block-end.
Must also add a build wrapper (exclusion), specifying the resources that
control the critical block. Otherwise, this will have no effect.
Requires Jenkins :jenkins-wiki:`Exclusion Plugin <Exclusion-Plugin>`.
Example:
.. literalinclude::
../../tests/yamlparser/fixtures/critical_block_complete001.yaml
:language: yaml
"""
cbs = XML.SubElement(
xml_parent, 'org.jvnet.hudson.plugins.exclusion.CriticalBlockStart')
cbs.set('plugin', 'Exclusion')
def critical_block_end(registry, xml_parent, data):
"""yaml: critical-block-end
Designate the end of a critical block. Must be used in conjuction with
critical-block-start.
Must also add a build wrapper (exclusion), specifying the resources that
control the critical block. Otherwise, this will have no effect.
Requires Jenkins :jenkins-wiki:`Exclusion Plugin <Exclusion-Plugin>`.
Example:
.. literalinclude::
../../tests/yamlparser/fixtures/critical_block_complete001.yaml
:language: yaml
"""
cbs = XML.SubElement(
xml_parent, 'org.jvnet.hudson.plugins.exclusion.CriticalBlockEnd')
cbs.set('plugin', 'Exclusion')
def publish_over_ssh(registry, xml_parent, data):
"""yaml: publish-over-ssh
Send files or execute commands over SSH.
Requires the Jenkins :jenkins-wiki:`Publish over SSH Plugin
<Publish+Over+SSH+Plugin>`.
:arg str site: name of the ssh site
:arg str target: destination directory
:arg bool target-is-date-format: whether target is a date format. If true,
raw text should be quoted (default false)
:arg bool clean-remote: should the remote directory be deleted before
transferring files (default false)
:arg str source: source path specifier
:arg str command: a command to execute on the remote server (optional)
:arg int timeout: timeout in milliseconds for the Exec command (optional)
:arg bool use-pty: run the exec command in pseudo TTY (default false)
:arg str excludes: excluded file pattern (optional)
:arg str remove-prefix: prefix to remove from uploaded file paths
(optional)
:arg bool fail-on-error: fail the build if an error occurs (default false)
Example:
.. literalinclude:: /../../tests/builders/fixtures/publish-over-ssh.yaml
:language: yaml
"""
ssh(registry, xml_parent, data)
def saltstack(parser, xml_parent, data):
"""yaml: saltstack
Send a message to Salt API. Requires the :jenkins-wiki:`saltstack plugin
<saltstack-plugin>`.
:arg str servername: Salt master server name (required)
:arg str authtype: Authentication type ('pam' or 'ldap', default 'pam')
:arg str credentials: Credentials ID for which to authenticate to Salt
master (required)
:arg str target: Target minions (default '')
:arg str targettype: Target type ('glob', 'pcre', 'list', 'grain',
'pillar', 'nodegroup', 'range', or 'compound', default 'glob')
:arg str function: Function to execute (default '')
:arg str arguments: Salt function arguments (default '')
:arg str kwarguments: Salt keyword arguments (default '')
:arg bool saveoutput: Save Salt return data into environment variable
(default false)
:arg str clientinterface: Client interface type ('local', 'local-batch',
or 'runner', default 'local')
:arg bool wait: Wait for completion of command (default false)
:arg str polltime: Number of seconds to wait before polling job completion
status (default '')
:arg str batchsize: Salt batch size, absolute value or %-age (default 100%)
:arg str mods: Mods to runner (default '')
:arg bool setpillardata: Set Pillar data (default false)
:arg str pillarkey: Pillar key (default '')
:arg str pillarvalue: Pillar value (default '')
Minimal Example:
.. literalinclude:: ../../tests/builders/fixtures/saltstack-minimal.yaml
:language: yaml
Full Example:
.. literalinclude:: ../../tests/builders/fixtures/saltstack-full.yaml
:language: yaml
"""
saltstack = XML.SubElement(xml_parent, 'com.waytta.SaltAPIBuilder')
supported_auth_types = ['pam', 'ldap']
supported_target_types = ['glob', 'pcre', 'list', 'grain', 'pillar',
'nodegroup', 'range', 'compound']
supported_client_interfaces = ['local', 'local-batch', 'runner']
mapping = [
('servername', 'servername', None),
('credentials', 'credentialsId', None),
('authtype', 'authtype', 'pam', supported_auth_types),
('target', 'target', ''),
('targettype', 'targettype', 'glob', supported_target_types),
('clientinterface', 'clientInterface', 'local',
supported_client_interfaces),
('function', 'function', ''),
('arguments', 'arguments', ''),
('kwarguments', 'kwarguments', ''),
('setpillardata', 'usePillar', False),
('pillarkey', 'pillarkey', ''),
('pillarvalue', 'pillarvalue', ''),
('wait', 'blockbuild', False),
('polltime', 'jobPollTime', ''),
('batchsize', 'batchSize', '100%'),
('mods', 'mods', ''),
('saveoutput', 'saveEnvVar', False)
]
helpers.convert_mapping_to_xml(saltstack, data, mapping,
fail_required=True)
clientInterface = data.get('clientinterface', 'local')
blockbuild = str(data.get('wait', False)).lower()
jobPollTime = str(data.get('polltime', ''))
batchSize = data.get('batchsize', '100%')
mods = data.get('mods', '')
usePillar = str(data.get('setpillardata', False)).lower()
# Build the clientInterfaces structure, based on the
# clientinterface setting
clientInterfaces = XML.SubElement(saltstack, 'clientInterfaces')
XML.SubElement(clientInterfaces, 'nullObject').text = 'false'
ci_attrib = {
'class': 'org.apache.commons.collections.map.ListOrderedMap',
'serialization': 'custom'
}
properties = XML.SubElement(clientInterfaces, 'properties', ci_attrib)
lomElement = 'org.apache.commons.collections.map.ListOrderedMap'
listOrderedMap = XML.SubElement(properties, lomElement)
default = XML.SubElement(listOrderedMap, 'default')
ordered_map = XML.SubElement(listOrderedMap, 'map')
insertOrder = XML.SubElement(default, 'insertOrder')
ci_config = []
if clientInterface == 'local':
ci_config = [
('blockbuild', blockbuild),
('jobPollTime', jobPollTime),
('clientInterface', clientInterface)
]
elif clientInterface == 'local-batch':
ci_config = [
('batchSize', batchSize),
('clientInterface', clientInterface)
]
elif clientInterface == 'runner':
ci_config = [
('mods', mods),
('clientInterface', clientInterface)
]
if usePillar == 'true':
ci_config.append(('usePillar', usePillar))
pillar_cfg = [
('pillarkey', data.get('pillarkey')),
('pillarvalue', data.get('pillarvalue'))
]
for emt, value in ci_config:
XML.SubElement(insertOrder, 'string').text = emt
entry = XML.SubElement(ordered_map, 'entry')
XML.SubElement(entry, 'string').text = emt
# Special handling when usePillar == true, requires additional
# structure in the builder XML
if emt != 'usePillar':
XML.SubElement(entry, 'string').text = value
else:
jsonobj = XML.SubElement(entry, 'net.sf.json.JSONObject')
XML.SubElement(jsonobj, 'nullObject').text = 'false'
pillarProps = XML.SubElement(jsonobj, 'properties', ci_attrib)
XML.SubElement(pillarProps, 'unserializable-parents')
pillarLom = XML.SubElement(pillarProps, lomElement)
pillarDefault = XML.SubElement(pillarLom, 'default')
pillarMap = XML.SubElement(pillarLom, 'map')
pillarInsertOrder = XML.SubElement(pillarDefault, 'insertOrder')
for pemt, value in pillar_cfg:
XML.SubElement(pillarInsertOrder, 'string').text = pemt
pillarEntry = XML.SubElement(pillarMap, 'entry')
XML.SubElement(pillarEntry, 'string').text = pemt
XML.SubElement(pillarEntry, 'string').text = value
class Builders(jenkins_jobs.modules.base.Base):
sequence = 60
component_type = 'builder'
component_list_type = 'builders'
def gen_xml(self, xml_parent, data):
for alias in ['prebuilders', 'builders', 'postbuilders']:
if alias in data:
builders = XML.SubElement(xml_parent, alias)
for builder in data[alias]:
self.registry.dispatch('builder', builders, builder)
# Make sure freestyle projects always have a <builders> entry
# or Jenkins v1.472 (at least) will NPE.
project_type = data.get('project-type', 'freestyle')
if project_type in ('freestyle', 'matrix') and 'builders' not in data:
XML.SubElement(xml_parent, 'builders')
def shining_panda(registry, xml_parent, data):
"""yaml: shining-panda
Execute a command inside various python environments. Requires the Jenkins
:jenkins-wiki:`ShiningPanda plugin <ShiningPanda+Plugin>`.
:arg str build-environment: Building environment to set up (required).
:build-environment values:
* **python**: Use a python installation configured in Jenkins.
* **custom**: Use a manually installed python.
* **virtualenv**: Create a virtualenv
For the **python** environment
:arg str python-version: Name of the python installation to use.
Must match one of the configured installations on server
configuration (default 'System-CPython-2.7')
For the **custom** environment:
:arg str home: path to the home folder of the custom installation
(required)
For the **virtualenv** environment:
:arg str python-version: Name of the python installation to use.
Must match one of the configured installations on server
configuration (default 'System-CPython-2.7')
:arg str name: Name of this virtualenv. Two virtualenv builders with
the same name will use the same virtualenv installation (optional)
:arg bool clear: If true, delete and recreate virtualenv on each build.
(default false)
:arg bool use-distribute: if true use distribute, if false use
setuptools. (default true)
:arg bool system-site-packages: if true, give access to the global
site-packages directory to the virtualenv. (default false)
Common to all environments:
:arg str nature: Nature of the command field. (default shell)
:nature values:
* **shell**: execute the Command contents with default shell
* **xshell**: like **shell** but performs platform conversion
first
* **python**: execute the Command contents with the Python
executable
:arg str command: The command to execute
:arg bool ignore-exit-code: mark the build as failure if any of the
commands exits with a non-zero exit code. (default false)
Examples:
.. literalinclude::
/../../tests/builders/fixtures/shining-panda-pythonenv.yaml
:language: yaml
.. literalinclude::
/../../tests/builders/fixtures/shining-panda-customenv.yaml
:language: yaml
.. literalinclude::
/../../tests/builders/fixtures/shining-panda-virtualenv.yaml
:language: yaml
"""
pluginelementpart = 'jenkins.plugins.shiningpanda.builders.'
buildenvdict = {'custom': 'CustomPythonBuilder',
'virtualenv': 'VirtualenvBuilder',
'python': 'PythonBuilder'}
envs = (buildenvdict.keys())
try:
buildenv = data['build-environment']
except KeyError:
raise MissingAttributeError('build-environment')
if buildenv not in envs:
raise InvalidAttributeError('build-environment', buildenv, envs)
t = XML.SubElement(xml_parent, '%s%s' %
(pluginelementpart, buildenvdict[buildenv]))
if buildenv in ('python', 'virtualenv'):
XML.SubElement(t, 'pythonName').text = data.get("python-version",
"System-CPython-2.7")
if buildenv in ('custom'):
try:
homevalue = data["home"]
except KeyError:
raise JenkinsJobsException("'home' argument is required for the"
" 'custom' environment")
XML.SubElement(t, 'home').text = homevalue
if buildenv in ('virtualenv'):
XML.SubElement(t, 'home').text = data.get("name", "")
clear = data.get("clear", False)
XML.SubElement(t, 'clear').text = str(clear).lower()
use_distribute = data.get('use-distribute', False)
XML.SubElement(t, 'useDistribute').text = str(use_distribute).lower()
system_site_packages = data.get('system-site-packages', False)
XML.SubElement(t, 'systemSitePackages').text = str(
system_site_packages).lower()
# Common arguments
nature = data.get('nature', 'shell')
naturetuple = ('shell', 'xshell', 'python')
if nature not in naturetuple:
raise InvalidAttributeError('nature', nature, naturetuple)
XML.SubElement(t, 'nature').text = nature
XML.SubElement(t, 'command').text = data.get("command", "")
ignore_exit_code = data.get('ignore-exit-code', False)
XML.SubElement(t, 'ignoreExitCode').text = str(ignore_exit_code).lower()
def tox(registry, xml_parent, data):
"""yaml: tox
Use tox to build a multi-configuration project. Requires the Jenkins
:jenkins-wiki:`ShiningPanda plugin <ShiningPanda+Plugin>`.
:arg str ini: The TOX configuration file path (default tox.ini)
:arg bool recreate: If true, create a new environment each time (default
false)
:arg str toxenv-pattern: The pattern used to build the TOXENV environment
variable. (optional)
Example:
.. literalinclude:: /../../tests/builders/fixtures/tox001.yaml
:language: yaml
"""
pluginelement = 'jenkins.plugins.shiningpanda.builders.ToxBuilder'
t = XML.SubElement(xml_parent, pluginelement)
XML.SubElement(t, 'toxIni').text = data.get('ini', 'tox.ini')
XML.SubElement(t, 'recreate').text = str(
data.get('recreate', False)).lower()
pattern = data.get('toxenv-pattern')
if pattern:
XML.SubElement(t, 'toxenvPattern').text = pattern
def managed_script(registry, xml_parent, data):
"""yaml: managed-script
This step allows to reference and execute a centrally managed
script within your build. Requires the Jenkins
:jenkins-wiki:`Managed Script Plugin <Managed+Script+Plugin>`.
:arg str script-id: Id of script to execute (required)
:arg str type: Type of managed file (default script)
:type values:
* **batch**: Execute managed windows batch
* **script**: Execute managed script
:arg list args: Arguments to be passed to referenced script
Example:
.. literalinclude:: /../../tests/builders/fixtures/managed-script.yaml
:language: yaml
.. literalinclude:: /../../tests/builders/fixtures/managed-winbatch.yaml
:language: yaml
"""
step_type = data.get('type', 'script').lower()
if step_type == 'script':
step = 'ScriptBuildStep'
script_tag = 'buildStepId'
elif step_type == 'batch':
step = 'WinBatchBuildStep'
script_tag = 'command'
else:
raise InvalidAttributeError('type', step_type, ['script', 'batch'])
ms = XML.SubElement(xml_parent,
'org.jenkinsci.plugins.managedscripts.' + step)
try:
script_id = data['script-id']
except KeyError:
raise MissingAttributeError('script-id')
XML.SubElement(ms, script_tag).text = script_id
args = XML.SubElement(ms, 'buildStepArgs')
for arg in data.get('args', []):
XML.SubElement(args, 'string').text = arg
def cmake(registry, xml_parent, data):
"""yaml: cmake
Execute a CMake target. Requires the Jenkins :jenkins-wiki:`CMake Plugin
<CMake+Plugin>`.
This builder is compatible with both versions 2.x and 1.x of the
plugin. When specifying paramenters from both versions only the ones from
the installed version in Jenkins will be used, and the rest will be
ignored.
:arg str source-dir: the source code directory relative to the workspace
directory. (required)
:arg str build-type: Sets the "build type" option for CMake (default
"Debug").
:arg str preload-script: Path to a CMake preload script file. (optional)
:arg str other-arguments: Other arguments to be added to the CMake
call. (optional)
:arg bool clean-build-dir: If true, delete the build directory before each
build (default false).
:arg list generator: The makefile generator (default "Unix Makefiles").
:type Possible generators:
* **Borland Makefiles**
* **CodeBlocks - MinGW Makefiles**
* **CodeBlocks - Unix Makefiles**
* **Eclipse CDT4 - MinGW Makefiles**
* **Eclipse CDT4 - NMake Makefiles**
* **Eclipse CDT4 - Unix Makefiles**
* **MSYS Makefiles**
* **MinGW Makefiles**
* **NMake Makefiles**
* **Unix Makefiles**
* **Visual Studio 6**
* **Visual Studio 7 .NET 2003**
* **Visual Studio 8 2005**
* **Visual Studio 8 2005 Win64**
* **Visual Studio 9 2008**
* **Visual Studio 9 2008 Win64**
* **Watcom WMake**
:Version 2.x: Parameters that available only to versions 2.x of the plugin
* **working-dir** (`str`): The directory where the project will be
built in. Relative to the workspace directory. (optional)
* **installation-name** (`str`): The CMake installation to be used on
this builder. Use one defined in your Jenkins global configuration
page (default "InSearchPath").
* **build-tool-invocations** (`list`): list of build tool invocations
that will happen during the build:
:Build tool invocations:
* **use-cmake** (`str`) -- Whether to run the actual build tool
directly (by expanding ``$CMAKE_BUILD_TOOL``) or to have
cmake run the build tool (by invoking ``cmake --build
<dir>``) (default false).
* **arguments** (`str`) -- Specify arguments to pass to the
build tool or cmake (separated by spaces). Arguments may
contain spaces if they are enclosed in double
quotes. (optional)
* **environment-variables** (`str`) -- Specify extra
environment variables to pass to the build tool as
key-value pairs here. Each entry must be on its own line,
for example:
``DESTDIR=${WORKSPACE}/artifacts/dir``
``KEY=VALUE``
:Version 1.x: Parameters available only to versions 1.x of the plugin
* **build-dir** (`str`): The directory where the project will be built
in. Relative to the workspace directory. (optional)
* **install-dir** (`str`): The directory where the project will be
installed in, relative to the workspace directory. (optional)
* **build-type** (`list`): Sets the "build type" option. A custom type
different than the default ones specified on the CMake plugin can
also be set, which will be automatically used in the "Other Build
Type" option of the plugin. (default "Debug")
:Default types present in the CMake plugin:
* **Debug**
* **Release**
* **RelWithDebInfo**
* **MinSizeRel**
* **make-command** (`str`): The make command (default "make").
* **install-command** (`arg`): The install command (default "make
install").
* **custom-cmake-path** (`str`): Path to cmake executable. (optional)
* **clean-install-dir** (`bool`): If true, delete the install dir
before each build (default false).
Example (Versions 2.x):
.. literalinclude::
../../tests/builders/fixtures/cmake/version-2.0/complete-2.x.yaml
:language: yaml
Example (Versions 1.x):
.. literalinclude::
../../tests/builders/fixtures/cmake/version-1.10/complete-1.x.yaml
:language: yaml
"""
BUILD_TYPES = ['Debug', 'Release', 'RelWithDebInfo', 'MinSizeRel']
cmake = XML.SubElement(xml_parent, 'hudson.plugins.cmake.CmakeBuilder')
source_dir = XML.SubElement(cmake, 'sourceDir')
try:
source_dir.text = data['source-dir']
except KeyError:
raise MissingAttributeError('source-dir')
XML.SubElement(cmake, 'generator').text = str(
data.get('generator', "Unix Makefiles"))
XML.SubElement(cmake, 'preloadScript').text = str(
data.get('preload-script', ''))
XML.SubElement(cmake, 'cleanBuild').text = str(
data.get('clean-build-dir', False)).lower()
plugin_info = registry.get_plugin_info("CMake plugin")
version = pkg_resources.parse_version(plugin_info.get("version", "1.0"))
# Version 2.x breaks compatibility. So parse the input data differently
# based on it:
if version >= pkg_resources.parse_version("2.0"):
XML.SubElement(cmake, 'workingDir').text = str(
data.get('working-dir', ''))
XML.SubElement(cmake, 'buildType').text = str(
data.get('build-type', 'Debug'))
XML.SubElement(cmake, 'installationName').text = str(
data.get('installation-name', 'InSearchPath'))
XML.SubElement(cmake, 'toolArgs').text = str(
data.get('other-arguments', ''))
tool_steps = XML.SubElement(cmake, 'toolSteps')
for step_data in data.get('build-tool-invocations', []):
tagname = 'hudson.plugins.cmake.BuildToolStep'
step = XML.SubElement(tool_steps, tagname)
XML.SubElement(step, 'withCmake').text = str(
step_data.get('use-cmake', False)).lower()
XML.SubElement(step, 'args').text = str(
step_data.get('arguments', ''))
XML.SubElement(step, 'vars').text = str(
step_data.get('environment-variables', ''))
else:
build_dir = XML.SubElement(cmake, 'buildDir')
build_dir.text = data.get('build-dir', '')
install_dir = XML.SubElement(cmake, 'installDir')
install_dir.text = data.get('install-dir', '')
# The options buildType and otherBuildType work together on the CMake
# plugin:
# * If the passed value is one of the predefined values, set buildType
# to it and otherBuildType to blank;
# * Otherwise, set otherBuildType to the value, and buildType to
# "Debug". The CMake plugin will ignore the buildType option.
#
# It is strange and confusing that the plugin author chose to do
# something like that instead of simply passing a string "buildType"
# option, so this was done to simplify it for the JJB user.
build_type = XML.SubElement(cmake, 'buildType')
build_type.text = data.get('build-type', BUILD_TYPES[0])
other_build_type = XML.SubElement(cmake, 'otherBuildType')
if(build_type.text not in BUILD_TYPES):
other_build_type.text = build_type.text
build_type.text = BUILD_TYPES[0]
else:
other_build_type.text = ''
make_command = XML.SubElement(cmake, 'makeCommand')
make_command.text = data.get('make-command', 'make')
install_command = XML.SubElement(cmake, 'installCommand')
install_command.text = data.get('install-command', 'make install')
other_cmake_args = XML.SubElement(cmake, 'cmakeArgs')
other_cmake_args.text = data.get('other-arguments', '')
custom_cmake_path = XML.SubElement(cmake, 'projectCmakePath')
custom_cmake_path.text = data.get('custom-cmake-path', '')
clean_install_dir = XML.SubElement(cmake, 'cleanInstallDir')
clean_install_dir.text = str(data.get('clean-install-dir',
False)).lower()
# The plugin generates this tag, but there doesn't seem to be anything
# that can be configurable by it. Let's keep it to maintain
# compatibility:
XML.SubElement(cmake, 'builderImpl')
def dsl(registry, xml_parent, data):
"""yaml: dsl
Process Job DSL
Requires the Jenkins :jenkins-wiki:`Job DSL plugin <Job+DSL+Plugin>`.
:arg str script-text: dsl script which is Groovy code (Required if targets
is not specified)
:arg str targets: Newline separated list of DSL scripts, located in the
Workspace. Can use wildcards like 'jobs/\*/\*/\*.groovy' (Required
if script-text is not specified)
:arg str ignore-existing: Ignore previously generated jobs and views
:arg str removed-job-action: Specifies what to do when a previously
generated job is not referenced anymore, can be 'IGNORE', 'DISABLE',
or 'DELETE' (default 'IGNORE')
:arg str removed-view-action: Specifies what to do when a previously
generated view is not referenced anymore, can be 'IGNORE' or 'DELETE'.
(default 'IGNORE')
:arg str lookup-strategy: Determines how relative job names in DSL
scripts are interpreted, can be 'JENKINS_ROOT' or 'SEED_JOB'.
(default 'JENKINS_ROOT')
:arg str additional-classpath: Newline separated list of additional
classpath entries for the Job DSL scripts. All entries must be
relative to the workspace root, e.g. build/classes/main. (optional)
Example:
.. literalinclude:: /../../tests/builders/fixtures/dsl001.yaml
:language: yaml
.. literalinclude:: /../../tests/builders/fixtures/dsl002.yaml
:language: yaml
"""
dsl = XML.SubElement(xml_parent,
'javaposse.jobdsl.plugin.ExecuteDslScripts')
if 'target' in data:
if 'targets' not in data:
logger.warning("Converting from old format of 'target' to new "
"name 'targets', please update your job "
"definitions.")
data['targets'] = data['target']
else:
logger.warning("Ignoring old argument 'target' in favour of new "
"format argument 'targets', please remove old "
"format.")
if data.get('script-text'):
XML.SubElement(dsl, 'scriptText').text = data.get('script-text')
XML.SubElement(dsl, 'usingScriptText').text = 'true'
elif data.get('targets'):
XML.SubElement(dsl, 'targets').text = data.get('targets')
XML.SubElement(dsl, 'usingScriptText').text = 'false'
else:
raise MissingAttributeError(['script-text', 'target'])
XML.SubElement(dsl, 'ignoreExisting').text = str(data.get(
'ignore-existing', False)).lower()
supportedJobActions = ['IGNORE', 'DISABLE', 'DELETE']
removedJobAction = data.get('removed-job-action',
supportedJobActions[0])
if removedJobAction not in supportedJobActions:
raise InvalidAttributeError('removed-job-action',
removedJobAction,
supportedJobActions)
XML.SubElement(dsl, 'removedJobAction').text = removedJobAction
supportedViewActions = ['IGNORE', 'DELETE']
removedViewAction = data.get('removed-view-action',
supportedViewActions[0])
if removedViewAction not in supportedViewActions:
raise InvalidAttributeError('removed-view-action',
removedViewAction,
supportedViewActions)
XML.SubElement(dsl, 'removedViewAction').text = removedViewAction
supportedLookupActions = ['JENKINS_ROOT', 'SEED_JOB']
lookupStrategy = data.get('lookup-strategy',
supportedLookupActions[0])
if lookupStrategy not in supportedLookupActions:
raise InvalidAttributeError('lookup-strategy',
lookupStrategy,
supportedLookupActions)
XML.SubElement(dsl, 'lookupStrategy').text = lookupStrategy
XML.SubElement(dsl, 'additionalClasspath').text = data.get(
'additional-classpath')
def github_notifier(registry, xml_parent, data):
"""yaml: github-notifier
Set pending build status on Github commit.
Requires the Jenkins :jenkins-wiki:`Github Plugin <GitHub+Plugin>`.
Example:
.. literalinclude:: /../../tests/builders/fixtures/github-notifier.yaml
:language: yaml
"""
XML.SubElement(xml_parent,
'com.cloudbees.jenkins.GitHubSetCommitStatusBuilder')
def scan_build(registry, xml_parent, data):
"""yaml: scan-build
This plugin allows you configure a build step that will execute the Clang
scan-build static analysis tool against an XCode project.
The scan-build report has to be generated in the directory
``${WORKSPACE}/clangScanBuildReports`` for the publisher to find it.
Requires the Jenkins :jenkins-wiki:`Clang Scan-Build Plugin
<Clang+Scan-Build+Plugin>`.
:arg str target: Provide the exact name of the XCode target you wish to
have compiled and analyzed (required)
:arg str target-sdk: Set the simulator version of a currently installed SDK
(default iphonesimulator)
:arg str config: Provide the XCode config you wish to execute scan-build
against (default Debug)
:arg str clang-install-name: Name of clang static analyzer to use (default
'')
:arg str xcode-sub-path: Path of XCode project relative to the workspace
(default '')
:arg str workspace: Name of workspace (default '')
:arg str scheme: Name of scheme (default '')
:arg str scan-build-args: Additional arguments to clang scan-build
(default --use-analyzer Xcode)
:arg str xcode-build-args: Additional arguments to XCode (default
-derivedDataPath $WORKSPACE/build)
:arg str report-folder: Folder where generated reports are located
(>=1.7) (default clangScanBuildReports)
Full Example:
.. literalinclude:: /../../tests/builders/fixtures/scan-build-full.yaml
:language: yaml
Minimal Example:
.. literalinclude::
/../../tests/builders/fixtures/scan-build-minimal.yaml
:language: yaml
"""
p = XML.SubElement(
xml_parent,
'jenkins.plugins.clangscanbuild.ClangScanBuildBuilder')
p.set('plugin', 'clang-scanbuild')
mappings = [
('target', 'target', None),
('target-sdk', 'targetSdk', 'iphonesimulator'),
('config', 'config', 'Debug'),
('clang-install-name', 'clangInstallationName', ''),
('xcode-sub-path', 'xcodeProjectSubPath', 'myProj/subfolder'),
('workspace', 'workspace', ''),
('scheme', 'scheme', ''),
('scan-build-args', 'scanbuildargs', '--use-analyzer Xcode'),
('xcode-build-args',
'xcodebuildargs',
'-derivedDataPath $WORKSPACE/build'),
('report-folder', 'outputFolderName', 'clangScanBuildReports'),
]
convert_mapping_to_xml(p, data, mappings, fail_required=True)
def ssh_builder(registry, xml_parent, data):
"""yaml: ssh-builder
Executes command on remote host
Requires the Jenkins :jenkins-wiki:`SSH plugin <SSH+plugin>`.
:arg str ssh-user-ip: user@ip:ssh_port of machine that was defined
in jenkins according to SSH plugin instructions
:arg str command: command to run on remote server
Example:
.. literalinclude:: /../../tests/builders/fixtures/ssh-builder.yaml
:language: yaml
"""
builder = XML.SubElement(
xml_parent, 'org.jvnet.hudson.plugins.SSHBuilder')
try:
XML.SubElement(builder, 'siteName').text = str(data['ssh-user-ip'])
XML.SubElement(builder, 'command').text = str(data['command'])
except KeyError as e:
raise MissingAttributeError("'%s'" % e.args[0])
def sonar(registry, xml_parent, data):
"""yaml: sonar
Invoke standalone Sonar analysis.
Requires the Jenkins `Sonar Plugin.
<http://docs.sonarqube.org/display/SCAN/\
Analyzing+with+SonarQube+Scanner+for+Jenkins\
#AnalyzingwithSonarQubeScannerforJenkins-\
AnalyzingwiththeSonarQubeScanner>`_
:arg str sonar-name: Name of the Sonar installation.
:arg str task: Task to run. (default '')
:arg str project: Path to Sonar project properties file. (default '')
:arg str properties: Sonar configuration properties. (default '')
:arg str java-opts: Java options for Sonnar Runner. (default '')
:arg str additional-arguments: additional command line arguments
(default '')
:arg str jdk: JDK to use (inherited from the job if omitted). (optional)
Example:
.. literalinclude:: /../../tests/builders/fixtures/sonar.yaml
:language: yaml
"""
sonar = XML.SubElement(xml_parent,
'hudson.plugins.sonar.SonarRunnerBuilder')
sonar.set('plugin', 'sonar')
XML.SubElement(sonar, 'installationName').text = data['sonar-name']
mappings = [
('task', 'task', ''),
('project', 'project', ''),
('properties', 'properties', ''),
('java-opts', 'javaOpts', ''),
('additional-arguments', 'additionalArguments', ''),
]
convert_mapping_to_xml(sonar, data, mappings, fail_required=True)
if 'jdk' in data:
XML.SubElement(sonar, 'jdk').text = data['jdk']
def xcode(registry, xml_parent, data):
"""yaml: xcode
This step allows to execute an xcode build step. Requires the Jenkins
:jenkins-wiki:`Xcode Plugin <Xcode+Plugin>`.
:arg str developer-profile: the jenkins credential id for a
ios developer profile. (optional)
:arg bool clean-build: if true will delete the build directories
before invoking the build. (default false)
:arg bool clean-test-reports: UNKNOWN. (default false)
:arg bool archive: if true will generate an xcarchive of the specified
scheme. A workspace and scheme are are also needed for archives.
(default false)
:arg str configuration: This is the name of the configuration
as defined in the Xcode project. (default 'Release')
:arg str configuration-directory: The value to use for
CONFIGURATION_BUILD_DIR setting. (default '')
:arg str target: Leave empty for all targets. (default '')
:arg str sdk: Leave empty for default SDK. (default '')
:arg str symroot: Leave empty for default SYMROOT. (default '')
:arg str project-path: Relative path within the workspace
that contains the xcode project file(s). (default '')
:arg str project-file: Only needed if there is more than one
project file in the Xcode Project Directory. (default '')
:arg str build-arguments: Extra commandline arguments provided
to the xcode builder. (default '')
:arg str schema: Only needed if you want to compile for a
specific schema instead of a target. (default '')
:arg str workspace: Only needed if you want to compile a
workspace instead of a project. (default '')
:arg str profile: The relative path to the mobileprovision to embed,
leave blank for no embedded profile. (default '')
:arg str codesign-id: Override the code signing identity specified
in the project. (default '')
:arg bool allow-failing: if true will prevent this build step from
failing if xcodebuild exits with a non-zero return code. (default
false)
:arg str version-technical: The value to use for CFBundleVersion.
Leave blank to use project's technical number. (default '')
:arg str version-marketing: The value to use for
CFBundleShortVersionString. Leave blank to use project's
marketing number. (default '')
:arg str ipa-version: A pattern for the ipa file name. You may use
${VERSION} and ${BUILD_DATE} (yyyy.MM.dd) in this string.
(default '')
:arg str ipa-output: The output directory for the .ipa file,
relative to the build directory. (default '')
:arg str keychain-name: The globally configured keychain to unlock for
this build. (default '')
:arg str keychain-path: The path of the keychain to use to sign the IPA.
(default '')
:arg str keychain-password: The password to use to unlock the keychain.
(default '')
:arg str keychain-unlock: Unlocks the keychain during use.
(default false)
Example:
.. literalinclude:: /../../tests/builders/fixtures/xcode.yaml
:language: yaml
"""
if data.get('developer-profile'):
profile = XML.SubElement(xml_parent, 'au.com.rayh.'
'DeveloperProfileLoader')
XML.SubElement(profile, 'id').text = str(
data['developer-profile'])
xcode = XML.SubElement(xml_parent, 'au.com.rayh.XCodeBuilder')
XML.SubElement(xcode, 'cleanBeforeBuild').text = str(
data.get('clean-build', False)).lower()
XML.SubElement(xcode, 'cleanTestReports').text = str(
data.get('clean-test-reports', False)).lower()
XML.SubElement(xcode, 'generateArchive').text = str(
data.get('archive', False)).lower()
XML.SubElement(xcode, 'configuration').text = str(
data.get('configuration', 'Release'))
XML.SubElement(xcode, 'configurationBuildDir').text = str(
data.get('configuration-directory', ''))
XML.SubElement(xcode, 'target').text = str(data.get('target', ''))
XML.SubElement(xcode, 'sdk').text = str(data.get('sdk', ''))
XML.SubElement(xcode, 'symRoot').text = str(data.get('symroot', ''))
XML.SubElement(xcode, 'xcodeProjectPath').text = str(
data.get('project-path', ''))
XML.SubElement(xcode, 'xcodeProjectFile').text = str(
data.get('project-file', ''))
XML.SubElement(xcode, 'xcodebuildArguments').text = str(
data.get('build-arguments', ''))
XML.SubElement(xcode, 'xcodeSchema').text = str(data.get('schema', ''))
XML.SubElement(xcode, 'xcodeWorkspaceFile').text = str(
data.get('workspace', ''))
XML.SubElement(xcode, 'embeddedProfileFile').text = str(
data.get('profile', ''))
XML.SubElement(xcode, 'codeSigningIdentity').text = str(
data.get('codesign-id', ''))
XML.SubElement(xcode, 'allowFailingBuildResults').text = str(
data.get('allow-failing', False)).lower()
version = XML.SubElement(xcode, 'provideApplicationVersion')
version_technical = XML.SubElement(xcode,
'cfBundleVersionValue')
version_marketing = XML.SubElement(xcode,
'cfBundleShortVersionStringValue')
if data.get('version-technical') or data.get('version-marketing'):
version.text = 'true'
version_technical.text = data.get('version-technical', '')
version_marketing.text = data.get('version-marketing', '')
else:
version.text = 'false'
XML.SubElement(xcode, 'buildIpa').text = str(
bool(data.get('ipa-version')) or False).lower()
XML.SubElement(xcode, 'ipaName').text = data.get('ipa-version', '')
XML.SubElement(xcode, 'ipaOutputDirectory').text = str(
data.get('ipa-output', ''))
XML.SubElement(xcode, 'keychainName').text = str(
data.get('keychain-name', ''))
XML.SubElement(xcode, 'keychainPath').text = str(
data.get('keychain-path', ''))
XML.SubElement(xcode, 'keychainPwd').text = str(
data.get('keychain-password', ''))
XML.SubElement(xcode, 'unlockKeychain').text = str(
data.get('keychain-unlock', False)).lower()
def sonatype_clm(registry, xml_parent, data):
"""yaml: sonatype-clm
Requires the Jenkins :jenkins-wiki:`Sonatype CLM Plugin
<Sonatype+CLM+%28formerly+Insight+for+CI%29>`.
:arg str value: Select CLM application from a list of available CLM
applications or specify CLM Application ID (default list)
:arg str application-name: Determines the policy elements to associate
with this build. (required)
:arg str username: Username on the Sonatype CLM server. Leave empty to
use the username configured at global level. (default '')
:arg str password: Password on the Sonatype CLM server. Leave empty to
use the password configured at global level. (default '')
:arg bool fail-on-clm-server-failure: Controls the build outcome if there
is a failure in communicating with the CLM server. (default false)
:arg str stage: Controls the stage the policy evaluation will be run
against on the CLM server. Valid stages: build, stage-release, release,
operate. (default 'build')
:arg str scan-targets: Pattern of files to include for scanning.
(default '')
:arg str module-excludes: Pattern of files to exclude. (default '')
:arg str advanced-options: Options to be set on a case-by-case basis as
advised by Sonatype Support. (default '')
Minimal Example:
.. literalinclude::
/../../tests/builders/fixtures/sonatype-clm-minimal.yaml
:language: yaml
Full Example:
.. literalinclude::
/../../tests/builders/fixtures/sonatype-clm-complete.yaml
:language: yaml
"""
clm = XML.SubElement(xml_parent,
'com.sonatype.insight.ci.hudson.PreBuildScan')
clm.set('plugin', 'sonatype-clm-ci')
SUPPORTED_VALUES = ['list', 'manual']
SUPPORTED_STAGES = ['build', 'stage-release', 'release', 'operate']
application_select = XML.SubElement(clm,
'applicationSelectType')
application_mappings = [
('value', 'value', 'list', SUPPORTED_VALUES),
('application-name', 'applicationId', None),
]
convert_mapping_to_xml(
application_select, data, application_mappings, fail_required=True)
path = XML.SubElement(clm, 'pathConfig')
path_mappings = [
('scan-targets', 'scanTargets', ''),
('module-excludes', 'moduleExcludes', ''),
('advanced-options', 'scanProperties', ''),
]
convert_mapping_to_xml(path, data, path_mappings, fail_required=True)
mappings = [
('fail-on-clm-server-failure', 'failOnClmServerFailures', False),
('stage', 'stageId', 'build', SUPPORTED_STAGES),
('username', 'username', ''),
('password', 'password', ''),
]
convert_mapping_to_xml(clm, data, mappings, fail_required=True)
def beaker(registry, xml_parent, data):
"""yaml: beaker
Execute a beaker build step. Requires the Jenkins :jenkins-wiki:`Beaker
Builder Plugin <Beaker+Builder+Plugin>`.
:arg str content: Run job from string
(Alternative: you can choose a path instead)
:arg str path: Run job from file
(Alternative: you can choose a content instead)
:arg bool download-logs: Download Beaker log files (default false)
Example:
.. literalinclude:: ../../tests/builders/fixtures/beaker-path.yaml
:language: yaml
.. literalinclude:: ../../tests/builders/fixtures/beaker-content.yaml
:language: yaml
"""
beaker = XML.SubElement(xml_parent, 'org.jenkinsci.plugins.beakerbuilder.'
'BeakerBuilder')
jobSource = XML.SubElement(beaker, 'jobSource')
if 'content' in data and 'path' in data:
raise JenkinsJobsException("Use just one of 'content' or 'path'")
elif 'content' in data:
jobSourceClass = "org.jenkinsci.plugins.beakerbuilder.StringJobSource"
jobSource.set('class', jobSourceClass)
XML.SubElement(jobSource, 'jobContent').text = data['content']
elif 'path' in data:
jobSourceClass = "org.jenkinsci.plugins.beakerbuilder.FileJobSource"
jobSource.set('class', jobSourceClass)
XML.SubElement(jobSource, 'jobPath').text = data['path']
else:
raise JenkinsJobsException("Use one of 'content' or 'path'")
XML.SubElement(beaker, 'downloadFiles').text = str(data.get(
'download-logs', False)).lower()
def cloudformation(registry, xml_parent, data):
"""yaml: cloudformation
Create cloudformation stacks before running a build and optionally
delete them at the end. Requires the Jenkins :jenkins-wiki:`AWS
Cloudformation Plugin <AWS+Cloudformation+Plugin>`.
:arg list name: The names of the stacks to create (required)
:arg str description: Description of the stack (optional)
:arg str recipe: The cloudformation recipe file (required)
:arg list parameters: List of key/value pairs to pass
into the recipe, will be joined together into a comma separated
string (optional)
:arg int timeout: Number of seconds to wait before giving up creating
a stack (default 0)
:arg str access-key: The Amazon API Access Key (required)
:arg str secret-key: The Amazon API Secret Key (required)
:arg int sleep: Number of seconds to wait before continuing to the
next step (default 0)
:arg array region: The region to run cloudformation in (required)
:region values:
* **us-east-1**
* **us-west-1**
* **us-west-2**
* **eu-central-1**
* **eu-west-1**
* **ap-southeast-1**
* **ap-southeast-2**
* **ap-northeast-1**
* **sa-east-1**
Example:
.. literalinclude:: ../../tests/builders/fixtures/cloudformation.yaml
:language: yaml
"""
region_dict = cloudformation_region_dict()
stacks = cloudformation_init(xml_parent, data, 'CloudFormationBuildStep')
for stack in data:
cloudformation_stack(xml_parent, stack, 'PostBuildStackBean', stacks,
region_dict)
def openshift_build_verify(registry, xml_parent, data):
"""yaml: openshift-build-verify
Performs the equivalent of an 'oc get builds` command invocation for the
provided buildConfig key provided; once the list of builds are obtained,
the state of the latest build is inspected for up to a minute to see if
it has completed successfully.
Requires the Jenkins :jenkins-wiki:`OpenShift
Pipeline Plugin <OpenShift+Pipeline+Plugin>`.
:arg str api-url: this would be the value you specify if you leverage the
--server option on the OpenShift `oc` command.
(default '\https://openshift.default.svc.cluster.local')
:arg str bld-cfg: The value here should be whatever was the output
form `oc project` when you created the BuildConfig you
want to run a Build on (default 'frontend')
:arg str namespace: If you run `oc get bc` for the project listed in
"namespace", that is the value you want to put here. (default 'test')
:arg str auth-token: The value here is what you supply with the --token
option when invoking the OpenShift `oc` command. (default '')
:arg bool verbose: This flag is the toggle for
turning on or off detailed logging in this plug-in. (default false)
Full Example:
.. literalinclude::
../../tests/builders/fixtures/openshift-build-verify001.yaml
:language: yaml
Minimal Example:
.. literalinclude::
../../tests/builders/fixtures/openshift-build-verify002.yaml
:language: yaml
"""
osb = XML.SubElement(xml_parent,
'com.openshift.jenkins.plugins.pipeline.'
'OpenShiftBuildVerifier')
mapping = [
# option, xml name, default value
("api-url", 'apiURL', 'https://openshift.default.svc.cluster.local'),
("bld-cfg", 'bldCfg', 'frontend'),
("namespace", 'namespace', 'test'),
("auth-token", 'authToken', ''),
("verbose", 'verbose', False),
]
convert_mapping_to_xml(osb, data, mapping, fail_required=True)
def openshift_builder(registry, xml_parent, data):
"""yaml: openshift-builder
Perform builds in OpenShift for the job.
Requires the Jenkins :jenkins-wiki:`OpenShift
Pipeline Plugin <OpenShift+Pipeline+Plugin>`.
:arg str api-url: this would be the value you specify if you leverage the
--server option on the OpenShift `oc` command.
(default '\https://openshift.default.svc.cluster.local')
:arg str bld-cfg: The value here should be whatever was the output
form `oc project` when you created the BuildConfig you want to run a
Build on (default 'frontend')
:arg str namespace: If you run `oc get bc` for the project listed in
"namespace", that is the value you want to put here. (default 'test')
:arg str auth-token: The value here is what you supply with the --token
option when invoking the OpenShift `oc` command. (default '')
:arg str commit-ID: The value here is what you supply with the
--commit option when invoking the
OpenShift `oc start-build` command. (default '')
:arg bool verbose: This flag is the toggle for
turning on or off detailed logging in this plug-in. (default false)
:arg str build-name: TThe value here is what you supply with the
--from-build option when invoking the
OpenShift `oc start-build` command. (default '')
:arg bool show-build-logs: Indicates whether the build logs get dumped
to the console of the Jenkins build. (default false)
Full Example:
.. literalinclude:: ../../tests/builders/fixtures/openshift-builder001.yaml
:language: yaml
Minimal Example:
.. literalinclude:: ../../tests/builders/fixtures/openshift-builder002.yaml
:language: yaml
"""
osb = XML.SubElement(xml_parent,
'com.openshift.jenkins.plugins.pipeline.'
'OpenShiftBuilder')
mapping = [
# option, xml name, default value
("api-url", 'apiURL', 'https://openshift.default.svc.cluster.local'),
("bld-cfg", 'bldCfg', 'frontend'),
("namespace", 'namespace', 'test'),
("auth-token", 'authToken', ''),
("commit-ID", 'commitID', ''),
("verbose", 'verbose', False),
("build-name", 'buildName', ''),
("show-build-logs", 'showBuildLogs', False),
]
convert_mapping_to_xml(osb, data, mapping, fail_required=True)
def openshift_creator(registry, xml_parent, data):
"""yaml: openshift-creator
Performs the equivalent of an oc create command invocation;
this build step takes in the provided JSON or YAML text, and if it
conforms to OpenShift schema, creates whichever
OpenShift resources are specified.
Requires the Jenkins :jenkins-wiki:`OpenShift
Pipeline Plugin <OpenShift+Pipeline+Plugin>`.
:arg str api-url: this would be the value you specify if you leverage the
--server option on the OpenShift `oc` command.
(default '\https://openshift.default.svc.cluster.local')
:arg str jsonyaml: The JSON or YAML formatted text that conforms to
the schema for defining the various OpenShift resources. (default '')
:arg str namespace: If you run `oc get bc` for the project listed in
"namespace", that is the value you want to put here. (default 'test')
:arg str auth-token: The value here is what you supply with the --token
option when invoking the OpenShift `oc` command. (default '')
:arg bool verbose: This flag is the toggle for
turning on or off detailed logging in this plug-in. (default false)
Full Example:
.. literalinclude::
../../tests/builders/fixtures/openshift-creator001.yaml
:language: yaml
Minimal Example:
.. literalinclude::
../../tests/builders/fixtures/openshift-creator002.yaml
:language: yaml
"""
osb = XML.SubElement(xml_parent,
'com.openshift.jenkins.plugins.pipeline.'
'OpenShiftCreator')
mapping = [
# option, xml name, default value
("api-url", 'apiURL', 'https://openshift.default.svc.cluster.local'),
("jsonyaml", 'jsonyaml', ''),
("namespace", 'namespace', 'test'),
("auth-token", 'authToken', ''),
("verbose", 'verbose', False),
]
convert_mapping_to_xml(osb, data, mapping, fail_required=True)
def openshift_dep_verify(registry, xml_parent, data):
"""yaml: openshift-dep-verify
Determines whether the expected set of DeploymentConfig's,
ReplicationController's, and active replicas are present based on prior
use of the scaler (2) and deployer (3) steps
Requires the Jenkins :jenkins-wiki:`OpenShift
Pipeline Plugin <OpenShift+Pipeline+Plugin>`._
:arg str api-url: this would be the value you specify if you leverage the
--server option on the OpenShift `oc` command.
(default \https://openshift.default.svc.cluster.local\)
:arg str dep-cfg: The value here should be whatever was the output
form `oc project` when you created the BuildConfig you want to run a
Build on (default frontend)
:arg str namespace: If you run `oc get bc` for the project listed in
"namespace", that is the value you want to put here. (default test)
:arg int replica-count: The value here should be whatever the number
of pods you want started for the deployment. (default 0)
:arg str auth-token: The value here is what you supply with the --token
option when invoking the OpenShift `oc` command. (default '')
:arg bool verbose: This flag is the toggle for
turning on or off detailed logging in this plug-in. (default false)
Full Example:
.. literalinclude::
../../tests/builders/fixtures/openshift-dep-verify001.yaml
:language: yaml
Minimal Example:
.. literalinclude::
../../tests/builders/fixtures/openshift-dep-verify002.yaml
:language: yaml
"""
osb = XML.SubElement(xml_parent,
'com.openshift.jenkins.plugins.pipeline.'
'OpenShiftDeploymentVerifier')
mapping = [
# option, xml name, default value
("api-url", 'apiURL', 'https://openshift.default.svc.cluster.local'),
("dep-cfg", 'depCfg', 'frontend'),
("namespace", 'namespace', 'test'),
("replica-count", 'replicaCount', 0),
("auth-token", 'authToken', ''),
("verbose", 'verbose', False),
]
convert_mapping_to_xml(osb, data, mapping, fail_required=True)
def openshift_deployer(registry, xml_parent, data):
"""yaml: openshift-deployer
Start a deployment in OpenShift for the job.
Requires the Jenkins :jenkins-wiki:`OpenShift
Pipeline Plugin <OpenShift+Pipeline+Plugin>`.
:arg str api-url: this would be the value you specify if you leverage the
--server option on the OpenShift `oc` command.
(default '\https://openshift.default.svc.cluster.local')
:arg str dep-cfg: The value here should be whatever was the output
form `oc project` when you created the BuildConfig you want to run a
Build on (default 'frontend')
:arg str namespace: If you run `oc get bc` for the project listed in
"namespace", that is the value you want to put here. (default 'test')
:arg str auth-token: The value here is what you supply with the --token
option when invoking the OpenShift `oc` command. (default '')
:arg bool verbose: This flag is the toggle for
turning on or off detailed logging in this plug-in. (default false)
Full Example:
.. literalinclude::
../../tests/builders/fixtures/openshift-deployer001.yaml
:language: yaml
Minimal Example:
.. literalinclude::
../../tests/builders/fixtures/openshift-deployer002.yaml
:language: yaml
"""
osb = XML.SubElement(xml_parent,
'com.openshift.jenkins.plugins.pipeline.'
'OpenShiftDeployer')
mapping = [
# option, xml name, default value
("api-url", 'apiURL', 'https://openshift.default.svc.cluster.local'),
("dep-cfg", 'depCfg', 'frontend'),
("namespace", 'namespace', 'test'),
("auth-token", 'authToken', ''),
("verbose", 'verbose', False),
]
convert_mapping_to_xml(osb, data, mapping, fail_required=True)
def openshift_img_tagger(registry, xml_parent, data):
"""yaml: openshift-img-tagger
Performs the equivalent of an oc tag command invocation in order to
manipulate tags for images in OpenShift ImageStream's
Requires the Jenkins :jenkins-wiki:`OpenShift
Pipeline Plugin <OpenShift+Pipeline+Plugin>`.
:arg str api-url: this would be the value you specify if you leverage the
--server option on the OpenShift `oc` command.
(default '\https://openshift.default.svc.cluster.local')
:arg str test-tag: The equivalent to the name supplied to a
`oc get service` command line invocation.
(default 'origin-nodejs-sample:latest')
:arg str prod-tag: The equivalent to the name supplied to a
`oc get service` command line invocation.
(default 'origin-nodejs-sample:prod')
:arg str namespace: If you run `oc get bc` for the project listed in
"namespace", that is the value you want to put here. (default 'test')
:arg str auth-token: The value here is what you supply with the --token
option when invoking the OpenShift `oc` command. (default '')
:arg bool verbose: This flag is the toggle for
turning on or off detailed logging in this plug-in. (default false)
Full Example:
.. literalinclude::
../../tests/builders/fixtures/openshift-img-tagger001.yaml
:language: yaml
Minimal Example:
.. literalinclude::
../../tests/builders/fixtures/openshift-img-tagger002.yaml
:language: yaml
"""
osb = XML.SubElement(xml_parent,
'com.openshift.jenkins.plugins.pipeline.'
'OpenShiftImageTagger')
mapping = [
# option, xml name, default value
("api-url", 'apiURL', 'https://openshift.default.svc.cluster.local'),
("test-tag", 'testTag', 'origin-nodejs-sample:latest'),
("prod-tag", 'prodTag', 'origin-nodejs-sample:prod'),
("namespace", 'namespace', 'test'),
("auth-token", 'authToken', ''),
("verbose", 'verbose', False),
]
convert_mapping_to_xml(osb, data, mapping, fail_required=True)
def openshift_scaler(registry, xml_parent, data):
"""yaml: openshift-scaler
Scale deployments in OpenShift for the job.
Requires the Jenkins :jenkins-wiki:`OpenShift
Pipeline Plugin <OpenShift+Pipeline+Plugin>`.
:arg str api-url: this would be the value you specify if you leverage the
--server option on the OpenShift `oc` command.
(default '\https://openshift.default.svc.cluster.local')
:arg str dep-cfg: The value here should be whatever was the output
form `oc project` when you created the BuildConfig you want to run a
Build on (default 'frontend')
:arg str namespace: If you run `oc get bc` for the project listed in
"namespace", that is the value you want to put here. (default 'test')
:arg int replica-count: The value here should be whatever the number
of pods you want started for the deployment. (default 0)
:arg str auth-token: The value here is what you supply with the --token
option when invoking the OpenShift `oc` command. (default '')
:arg bool verbose: This flag is the toggle for
turning on or off detailed logging in this plug-in. (default false)
Full Example:
.. literalinclude:: ../../tests/builders/fixtures/openshift-scaler001.yaml
:language: yaml
Minimal Example:
.. literalinclude:: ../../tests/builders/fixtures/openshift-scaler002.yaml
:language: yaml
"""
osb = XML.SubElement(xml_parent,
'com.openshift.jenkins.plugins.pipeline.'
'OpenShiftScaler')
mapping = [
# option, xml name, default value
("api-url", 'apiURL', 'https://openshift.default.svc.cluster.local'),
("dep-cfg", 'depCfg', 'frontend'),
("namespace", 'namespace', 'test'),
("replica-count", 'replicaCount', 0),
("auth-token", 'authToken', ''),
("verbose", 'verbose', False),
]
convert_mapping_to_xml(osb, data, mapping, fail_required=True)
def openshift_svc_verify(registry, xml_parent, data):
"""yaml: openshift-svc-verify
Verify a service is up in OpenShift for the job.
Requires the Jenkins :jenkins-wiki:`OpenShift
Pipeline Plugin <OpenShift+Pipeline+Plugin>`.
:arg str api-url: this would be the value you specify if you leverage the
--server option on the OpenShift `oc` command.
(default '\https://openshift.default.svc.cluster.local')
:arg str svc-name: The equivalent to the name supplied to a
`oc get service` command line invocation. (default 'frontend')
:arg str namespace: If you run `oc get bc` for the project listed in
"namespace", that is the value you want to put here. (default 'test')
:arg str auth-token: The value here is what you supply with the --token
option when invoking the OpenShift `oc` command. (default '')
:arg bool verbose: This flag is the toggle for
turning on or off detailed logging in this plug-in. (default false)
Full Example:
.. literalinclude::
../../tests/builders/fixtures/openshift-svc-verify001.yaml
:language: yaml
Minimal Example:
.. literalinclude::
../../tests/builders/fixtures/openshift-svc-verify002.yaml
:language: yaml
"""
osb = XML.SubElement(xml_parent,
'com.openshift.jenkins.plugins.pipeline.'
'OpenShiftServiceVerifier')
mapping = [
# option, xml name, default value
("api-url", 'apiURL', 'https://openshift.default.svc.cluster.local'),
("svc-name", 'svcName', 'frontend'),
("namespace", 'namespace', 'test'),
("auth-token", 'authToken', ''),
("verbose", 'verbose', False),
]
convert_mapping_to_xml(osb, data, mapping, fail_required=True)
def runscope(registry, xml_parent, data):
"""yaml: runscope
Execute a Runscope test.
Requires the Jenkins :jenkins-wiki:`Runscope Plugin <Runscope+Plugin>`.
:arg str test-trigger-url: Trigger URL for test. (required)
:arg str access-token: OAuth Personal Access token. (required)
:arg int timeout: Timeout for test duration in seconds. (default 60)
Minimal Example:
.. literalinclude:: /../../tests/builders/fixtures/runscope-minimal.yaml
:language: yaml
Full Example:
.. literalinclude:: /../../tests/builders/fixtures/runscope-full.yaml
:language: yaml
"""
runscope = XML.SubElement(xml_parent,
'com.runscope.jenkins.Runscope.RunscopeBuilder')
runscope.set('plugin', 'runscope')
mapping = [
('test-trigger-url', 'triggerEndPoint', None),
('access-token', 'accessToken', None),
('timeout', 'timeout', 60),
]
convert_mapping_to_xml(runscope, data, mapping, fail_required=True)
def description_setter(registry, xml_parent, data):
"""yaml: description-setter
This plugin sets the description for each build,
based upon a RegEx test of the build log file.
Requires the Jenkins :jenkins-wiki:`Description Setter Plugin
<Description+Setter+Plugin>`.
:arg str regexp: A RegEx which is used to scan the build log file
(default '')
:arg str description: The description to set on the build (optional)
Example:
.. literalinclude::
/../../tests/builders/fixtures/description-setter001.yaml
:language: yaml
"""
descriptionsetter = XML.SubElement(
xml_parent,
'hudson.plugins.descriptionsetter.DescriptionSetterBuilder')
XML.SubElement(descriptionsetter, 'regexp').text = data.get('regexp', '')
if 'description' in data:
XML.SubElement(descriptionsetter, 'description').text = data[
'description']
def docker_build_publish(parse, xml_parent, data):
"""yaml: docker-build-publish
Requires the Jenkins :jenkins-wiki:`Docker build publish Plugin
<Docker+build+publish+Plugin>`.
:arg str repo-name: Name of repository to push to.
:arg str repo-tag: Tag for image. (default '')
:arg bool no-cache: If build should be cached. (default false)
:arg bool no-force-pull: Don't update the source image before building when
it exists locally. (default false)
:arg bool skip-build: Do not build the image. (default false)
:arg bool skip-decorate: Do not decorate the build name. (default false)
:arg bool skip-tag-latest: Do not tag this build as latest. (default false)
:arg bool skip-push: Do not push. (default false)
:arg str file-path: Path of the Dockerfile. (default '')
:arg str build-context: Project root path for the build, defaults to the
workspace if not specified. (default '')
Example:
.. literalinclude:: /../../tests/builders/fixtures/docker-builder001.yaml
"""
db = XML.SubElement(xml_parent,
'com.cloudbees.dockerpublish.DockerBuilder')
db.set('plugin', 'docker-build-publish')
mapping = [
('repo-name', 'repoName', None),
('repo-tag', 'repoTag', ''),
('no-cache', 'noCache', False),
('no-force-pull', 'noForcePull', False),
('skip-build', 'skipBuild', False),
('skip-decorate', 'skipDecorate', False),
('skip-tag-latest', 'skipTagLatest', False),
('skip-push', 'skipPush', False),
('file-path', 'dockerfilePath', ''),
('build-context', 'buildContext', ''),
]
convert_mapping_to_xml(db, data, mapping, fail_required=True)
def build_name_setter(registry, xml_parent, data):
"""yaml: build-name-setter
Define Build Name Setter options which allows your build name to be
updated during the build process.
Requires the Jenkins :jenkins-wiki:`Build Name Setter Plugin
<Build+Name+Setter+Plugin>`.
:arg str name: Filename to use for Build Name Setter, only used if
file bool is true. (default 'version.txt')
:arg str template: Macro Template string, only used if macro
bool is true. (default '#${BUILD_NUMBER}')
:arg bool file: Read from named file (default false)
:arg bool macro: Read from macro template (default false)
:arg bool macro-first: Insert macro first (default false)
File Example:
.. literalinclude::
/../../tests/builders/fixtures/build-name-setter001.yaml
:language: yaml
Macro Example:
.. literalinclude::
/../../tests/builders/fixtures/build-name-setter002.yaml
:language: yaml
"""
build_name_setter = XML.SubElement(
xml_parent,
'org.jenkinsci.plugins.buildnameupdater.BuildNameUpdater')
mapping = [
('name', 'buildName', 'version.txt'),
('template', 'macroTemplate', '#${BUILD_NUMBER}'),
('file', 'fromFile', False),
('macro', 'fromMacro', False),
('macro-first', 'macroFirst', False),
]
convert_mapping_to_xml(
build_name_setter, data, mapping, fail_required=True)
def nexus_artifact_uploader(registry, xml_parent, data):
"""yaml: nexus-artifact-uploader
To upload result of a build as an artifact in Nexus without the need of
Maven. Requires the Jenkins :nexus-artifact-uploader:
`Nexus Artifact Uploader Plugin <Nexus+Artifact+Uploader>`.
:arg str protocol: Protocol to use to connect to Nexus (default https)
:arg str nexus_url: Nexus url (without protocol) (default '')
:arg str nexus_user: Username to upload artifact to Nexus (default '')
:arg str nexus_password: Password to upload artifact to Nexus
(default '')
:arg str group_id: GroupId to set for the artifact to upload
(default '')
:arg str artifact_id: ArtifactId to set for the artifact to upload
(default '')
:arg str version: Version to set for the artifact to upload
(default '')
:arg str packaging: Packaging to set for the artifact to upload
(default '')
:arg str type: Type to set for the artifact to upload (default '')
:arg str classifier: Classifier to set for the artifact to upload
(default '')
:arg str repository: In which repository to upload the artifact
(default '')
:arg str file: File which will be the uploaded artifact (default '')
:arg str credentials_id: Credentials to use (instead of password)
(default '')
File Example:
.. literalinclude::
/../../tests/builders/fixtures/nexus-artifact-uploader.yaml
:language: yaml
"""
nexus_artifact_uploader = XML.SubElement(
xml_parent,
'sp.sd.nexusartifactuploader.NexusArtifactUploader')
mapping = [
('protocol', 'protocol', 'https'),
('nexus_url', 'nexusUrl', ''),
('nexus_user', 'nexusUser', ''),
('nexus_password', 'nexusPassword', ''),
('group_id', 'groupId', ''),
('artifact_id', 'artifactId', ''),
('version', 'version', ''),
('packaging', 'packaging', ''),
('type', 'type', ''),
('classifier', 'classifier', ''),
('repository', 'repository', ''),
('file', 'file', ''),
('credentials_id', 'credentialsId', ''),
]
convert_mapping_to_xml(
nexus_artifact_uploader, data, mapping, fail_required=True) | zerotk.jenkins-job-builder | /zerotk.jenkins-job-builder-2.0.0.0b2.tar.gz/zerotk.jenkins-job-builder-2.0.0.0b2/jenkins_jobs/modules/builders.py | builders.py |
import xml.etree.ElementTree as XML
from jenkins_jobs.errors import JenkinsJobsException
import jenkins_jobs.modules.base
from jenkins_jobs.modules.helpers import build_trends_publisher
from jenkins_jobs.modules.helpers import findbugs_settings
def email(registry, xml_parent, data):
"""yaml: email
Email notifications on build failure.
:arg str recipients: Recipient email addresses
:arg bool notify-every-unstable-build: Send an email for every
unstable build (default true)
:arg bool send-to-individuals: Send an email to the individual
who broke the build (default false)
:arg bool notify-for-each-module: Send an email for each module
(e.g. failed, unstable). (default true)
Example::
reporters:
- email:
recipients: [email protected]
"""
mailer = XML.SubElement(xml_parent,
'hudson.maven.reporters.MavenMailer')
XML.SubElement(mailer, 'recipients').text = data['recipients']
# Note the logic reversal (included here to match the GUI
if data.get('notify-every-unstable-build', True):
XML.SubElement(mailer, 'dontNotifyEveryUnstableBuild').text = 'false'
else:
XML.SubElement(mailer, 'dontNotifyEveryUnstableBuild').text = 'true'
XML.SubElement(mailer, 'sendToIndividuals').text = str(
data.get('send-to-individuals', False)).lower()
XML.SubElement(mailer, 'perModuleEmail').text = str(
data.get('notify-for-every-module', True)).lower()
def findbugs(registry, xml_parent, data):
"""yaml: findbugs
FindBugs reporting for builds
Requires the Jenkins :jenkins-wiki:`FindBugs Plugin
<FindBugs+Plugin>`.
:arg bool rank-priority: Use rank as priority (default false)
:arg str include-files: Comma separated list of files to include.
(Optional)
:arg str exclude-files: Comma separated list of files to exclude.
(Optional)
:arg bool can-run-on-failed: Weather or not to run plug-in on failed builds
(default false)
:arg int healthy: Sunny threshold (optional)
:arg int unhealthy: Stormy threshold (optional)
:arg str health-threshold: Threshold priority for health status
('low', 'normal' or 'high', defaulted to 'low')
:arg bool dont-compute-new: If set to false, computes new warnings based on
the reference build (default true)
:arg bool use-delta-values: Use delta for new warnings. (default false)
:arg bool use-previous-build-as-reference: If set then the number of new
warnings will always be calculated based on the previous build.
Otherwise the reference build. (default false)
:arg bool use-stable-build-as-reference: The number of new warnings will be
calculated based on the last stable build, allowing reverts of unstable
builds where the number of warnings was decreased. (default false)
:arg dict thresholds:
:thresholds:
* **unstable** (`dict`)
:unstable: * **total-all** (`int`)
* **total-high** (`int`)
* **total-normal** (`int`)
* **total-low** (`int`)
* **new-all** (`int`)
* **new-high** (`int`)
* **new-normal** (`int`)
* **new-low** (`int`)
* **failed** (`dict`)
:failed: * **total-all** (`int`)
* **total-high** (`int`)
* **total-normal** (`int`)
* **total-low** (`int`)
* **new-all** (`int`)
* **new-high** (`int`)
* **new-normal** (`int`)
* **new-low** (`int`)
Minimal Example:
.. literalinclude:: /../../tests/reporters/fixtures/findbugs-minimal.yaml
Full Example:
.. literalinclude:: /../../tests/reporters/fixtures/findbugs01.yaml
"""
findbugs = XML.SubElement(xml_parent,
'hudson.plugins.findbugs.FindBugsReporter')
findbugs.set('plugin', 'findbugs')
findbugs_settings(findbugs, data)
build_trends_publisher('[FINDBUGS] ', findbugs, data)
class Reporters(jenkins_jobs.modules.base.Base):
sequence = 55
component_type = 'reporter'
component_list_type = 'reporters'
def gen_xml(self, xml_parent, data):
if 'reporters' not in data:
return
if xml_parent.tag != 'maven2-moduleset':
raise JenkinsJobsException("Reporters may only be used for Maven "
"modules.")
reporters = XML.SubElement(xml_parent, 'reporters')
for action in data.get('reporters', []):
self.registry.dispatch('reporter', reporters, action) | zerotk.jenkins-job-builder | /zerotk.jenkins-job-builder-2.0.0.0b2.tar.gz/zerotk.jenkins-job-builder-2.0.0.0b2/jenkins_jobs/modules/reporters.py | reporters.py |
import io
import os
import logging
import platform
import sys
from stevedore import extension
import yaml
from jenkins_jobs.cli.parser import create_parser
from jenkins_jobs.config import JJBConfig
from jenkins_jobs import utils
from jenkins_jobs import version
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
def __version__():
return "Jenkins Job Builder version: %s" % \
version.version_info.version_string()
class JenkinsJobs(object):
""" This is the entry point class for the `jenkins-jobs` command line tool.
While this class can be used programmatically by external users of the JJB
API, the main goal here is to abstract the `jenkins_jobs` tool in a way
that prevents test suites from caring overly much about various
implementation details--for example, tests of subcommands must not have
access to directly modify configuration objects, instead they must provide
a fixture in the form of an .ini file that provides the configuration
necessary for testing.
External users of the JJB API may be interested in this class as an
alternative to wrapping `jenkins_jobs` with a subprocess that execs it as a
system command; instead, python scripts may be written that pass
`jenkins_jobs` args directly to this class to allow programmatic setting of
various command line parameters.
"""
def __init__(self, args=None, **kwargs):
if args is None:
args = []
self.parser = create_parser()
self.options = self.parser.parse_args(args)
self.jjb_config = JJBConfig(self.options.conf, **kwargs)
if not self.options.command:
self.parser.error("Must specify a 'command' to be performed")
if (self.options.log_level is not None):
self.options.log_level = getattr(logging,
self.options.log_level.upper(),
logger.getEffectiveLevel())
logger.setLevel(self.options.log_level)
self._parse_additional()
self.jjb_config.validate()
def _set_config(self, target, option):
"""
Sets the option in target only if the given option was explicitly set
"""
opt_val = getattr(self.options, option, None)
if opt_val is not None:
target[option] = opt_val
def _parse_additional(self):
self._set_config(self.jjb_config.builder, 'ignore_cache')
self._set_config(self.jjb_config.builder, 'flush_cache')
self._set_config(self.jjb_config.yamlparser, 'allow_empty_variables')
self._set_config(self.jjb_config.jenkins, 'user')
self._set_config(self.jjb_config.jenkins, 'password')
if getattr(self.options, 'plugins_info_path', None) is not None:
with io.open(self.options.plugins_info_path, 'r',
encoding='utf-8') as yaml_file:
plugins_info = yaml.load(yaml_file)
if not isinstance(plugins_info, list):
self.parser.error("{0} must contain a Yaml list!".format(
self.options.plugins_info_path))
self.jjb_config.builder['plugins_info'] = plugins_info
if getattr(self.options, 'path', None):
if hasattr(self.options.path, 'read'):
logger.debug("Input file is stdin")
if self.options.path.isatty():
if platform.system() == 'Windows':
key = 'CTRL+Z'
else:
key = 'CTRL+D'
logger.warning("Reading configuration from STDIN. "
"Press %s to end input.", key)
else:
# take list of paths
self.options.path = self.options.path.split(os.pathsep)
do_recurse = (getattr(self.options, 'recursive', False) or
self.jjb_config.recursive)
excludes = ([e for elist in self.options.exclude
for e in elist.split(os.pathsep)] or
self.jjb_config.excludes)
paths = []
for path in self.options.path:
if do_recurse and os.path.isdir(path):
paths.extend(utils.recurse_path(path, excludes))
else:
paths.append(path)
self.options.path = paths
def execute(self):
extension_manager = extension.ExtensionManager(
namespace='jjb.cli.subcommands',
invoke_on_load=True,)
ext = extension_manager[self.options.command]
ext.obj.execute(self.options, self.jjb_config)
def main():
argv = sys.argv[1:]
jjb = JenkinsJobs(argv)
jjb.execute() | zerotk.jenkins-job-builder | /zerotk.jenkins-job-builder-2.0.0.0b2.tar.gz/zerotk.jenkins-job-builder-2.0.0.0b2/jenkins_jobs/cli/entry.py | entry.py |
import argparse
import jenkins_jobs.version
from stevedore import extension
def __version__():
return "Jenkins Job Builder version: %s" % \
jenkins_jobs.version.version_info.version_string()
def create_parser():
""" Create an ArgumentParser object usable by JenkinsJobs.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--conf',
dest='conf',
help="configuration file")
parser.add_argument(
'-l',
'--log_level',
dest='log_level',
default='info',
help="log level (default: %(default)s)")
parser.add_argument(
'--ignore-cache',
action='store_true',
dest='ignore_cache',
default=None,
help="ignore the cache and update the jobs anyhow (that will "
"only flush the specified jobs cache)")
parser.add_argument(
'--flush-cache',
action='store_true',
dest='flush_cache',
default=None,
help="flush all the cache entries before updating")
parser.add_argument(
'--version',
dest='version',
action='version',
version=__version__(),
help="show version")
parser.add_argument(
'--allow-empty-variables',
action='store_true',
dest='allow_empty_variables',
default=None,
help="Don\'t fail if any of the variables inside any string are "
"not defined, replace with empty string instead.")
parser.add_argument(
'--user', '-u',
help="The Jenkins user to use for authentication. This overrides "
"the user specified in the configuration file.")
parser.add_argument(
'--password', '-p',
help="Password or API token to use for authenticating towards Jenkins."
" This overrides the password specified in the configuration file.")
subparser = parser.add_subparsers(
dest='command',
help="update, test or delete job")
extension_manager = extension.ExtensionManager(
namespace='jjb.cli.subcommands',
invoke_on_load=True,
)
def parse_subcommand_args(ext, subparser):
ext.obj.parse_args(subparser)
extension_manager.map(parse_subcommand_args, subparser)
return parser | zerotk.jenkins-job-builder | /zerotk.jenkins-job-builder-2.0.0.0b2.tar.gz/zerotk.jenkins-job-builder-2.0.0.0b2/jenkins_jobs/cli/parser.py | parser.py |
import logging
import sys
import time
from jenkins_jobs.builder import JenkinsManager
from jenkins_jobs.parser import YamlParser
from jenkins_jobs.registry import ModuleRegistry
from jenkins_jobs.xml_config import XmlJobGenerator
from jenkins_jobs.xml_config import XmlViewGenerator
from jenkins_jobs.errors import JenkinsJobsException
import jenkins_jobs.cli.subcommand.base as base
logger = logging.getLogger(__name__)
class UpdateSubCommand(base.BaseSubCommand):
def parse_arg_path(self, parser):
parser.add_argument(
'path',
nargs='?',
default=sys.stdin,
help="colon-separated list of paths to YAML files "
"or directories")
def parse_arg_names(self, parser):
parser.add_argument(
'names',
help='name(s) of job(s)', nargs='*')
def parse_args(self, subparser):
update = subparser.add_parser('update')
self.parse_option_recursive_exclude(update)
self.parse_arg_path(update)
self.parse_arg_names(update)
update.add_argument(
'--delete-old',
action='store_true',
dest='delete_old',
default=False,
help='delete obsolete jobs')
update.add_argument(
'--workers',
type=int,
default=1,
dest='n_workers',
help="number of workers to use, 0 for autodetection and 1 "
"for just one worker.")
def _generate_xmljobs(self, options, jjb_config=None):
builder = JenkinsManager(jjb_config)
logger.info("Updating jobs in {0} ({1})".format(
options.path, options.names))
orig = time.time()
# Generate XML
parser = YamlParser(jjb_config)
registry = ModuleRegistry(jjb_config, builder.plugins_list)
xml_job_generator = XmlJobGenerator(registry)
xml_view_generator = XmlViewGenerator(registry)
parser.load_files(options.path)
registry.set_parser_data(parser.data)
job_data_list, view_data_list = parser.expandYaml(
registry, options.names)
xml_jobs = xml_job_generator.generateXML(job_data_list)
xml_views = xml_view_generator.generateXML(view_data_list)
jobs = parser.jobs
step = time.time()
logging.debug('%d XML files generated in %ss',
len(jobs), str(step - orig))
return builder, xml_jobs, xml_views
def execute(self, options, jjb_config):
if options.n_workers < 0:
raise JenkinsJobsException(
'Number of workers must be equal or greater than 0')
builder, xml_jobs, xml_views = self._generate_xmljobs(
options, jjb_config)
jobs, num_updated_jobs = builder.update_jobs(
xml_jobs, n_workers=options.n_workers)
logger.info("Number of jobs updated: %d", num_updated_jobs)
views, num_updated_views = builder.update_views(
xml_views, n_workers=options.n_workers)
logger.info("Number of views updated: %d", num_updated_views)
keep_jobs = [job.name for job in xml_jobs]
if options.delete_old:
n = builder.delete_old_managed(keep=keep_jobs)
logger.info("Number of jobs deleted: %d", n) | zerotk.jenkins-job-builder | /zerotk.jenkins-job-builder-2.0.0.0b2.tar.gz/zerotk.jenkins-job-builder-2.0.0.0b2/jenkins_jobs/cli/subcommand/update.py | update.py |
# Most of this code originated in sphinx.domains.python and
# sphinx.ext.autodoc and has been only slightly adapted for use in
# subclasses here.
# :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.
# :license: BSD, see LICENSE for details.
import re
from sphinx import addnodes
from sphinx.domains.python import _pseudo_parse_arglist
from sphinx.domains.python import PyModulelevel
from sphinx.ext.autodoc import Documenter
from sphinx.ext.autodoc import FunctionDocumenter
from sphinx.locale import _
yaml_sig_re = re.compile('yaml:\s*(.*)')
class PyYAMLFunction(PyModulelevel):
def handle_signature(self, sig, signode):
"""Transform a Python signature into RST nodes.
Return (fully qualified name of the thing, classname if any).
If inside a class, the current class name is handled intelligently:
* it is stripped from the displayed name if present
* it is added to the full name (return value) if not present
"""
name_prefix = None
name = sig
arglist = None
retann = None
# determine module and class name (if applicable), as well as full name
modname = self.options.get(
'module', self.env.temp_data.get('py:module'))
classname = self.env.temp_data.get('py:class')
fullname = name
signode['module'] = modname
signode['class'] = classname
signode['fullname'] = fullname
sig_prefix = self.get_signature_prefix(sig)
if sig_prefix:
signode += addnodes.desc_annotation(sig_prefix, sig_prefix)
if name_prefix:
signode += addnodes.desc_addname(name_prefix, name_prefix)
anno = self.options.get('annotation')
signode += addnodes.desc_name(name, name)
if not arglist:
if self.needs_arglist():
# for callables, add an empty parameter list
signode += addnodes.desc_parameterlist()
if retann:
signode += addnodes.desc_returns(retann, retann)
if anno:
signode += addnodes.desc_annotation(' ' + anno, ' ' + anno)
return fullname, name_prefix
_pseudo_parse_arglist(signode, arglist)
if retann:
signode += addnodes.desc_returns(retann, retann)
if anno:
signode += addnodes.desc_annotation(' ' + anno, ' ' + anno)
return fullname, name_prefix
def get_index_text(self, modname, name_cls):
return _('%s (in module %s)') % (name_cls[0], modname)
class YAMLFunctionDocumenter(FunctionDocumenter):
priority = FunctionDocumenter.priority + 10
objtype = 'yamlfunction'
directivetype = 'yamlfunction'
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
if not FunctionDocumenter.can_document_member(member, membername,
isattr, parent):
return False
if member.__doc__ is not None and yaml_sig_re.match(member.__doc__):
return True
return False
def _find_signature(self, encoding=None):
docstrings = Documenter.get_doc(self, encoding, 2)
if len(docstrings) != 1:
return
doclines = docstrings[0]
setattr(self, '__new_doclines', doclines)
if not doclines:
return
# match first line of docstring against signature RE
match = yaml_sig_re.match(doclines[0])
if not match:
return
name = match.group(1)
# ok, now jump over remaining empty lines and set the remaining
# lines as the new doclines
i = 1
while i < len(doclines) and not doclines[i].strip():
i += 1
setattr(self, '__new_doclines', doclines[i:])
return name
def get_doc(self, encoding=None, ignore=1):
lines = getattr(self, '__new_doclines', None)
if lines is not None:
return [lines]
return Documenter.get_doc(self, encoding, ignore)
def format_signature(self):
result = self._find_signature()
self._name = result
return ''
def format_name(self):
return self._name
def setup(app):
app.add_autodocumenter(YAMLFunctionDocumenter)
app.add_directive_to_domain('py', 'yamlfunction', PyYAMLFunction) | zerotk.jenkins-job-builder | /zerotk.jenkins-job-builder-2.0.0.0b2.tar.gz/zerotk.jenkins-job-builder-2.0.0.0b2/jenkins_jobs/sphinx/yaml.py | yaml.py |
from functools import lru_cache
import logging
class GitIgnored(object):
"""
Helper class to find out if a filename is being ignored by .gitignore files.
"""
def __init__(self, git_directory=".git", gitignore_filename=".gitignore"):
self.__gitignore_filename = gitignore_filename
self.__git_directory = git_directory
self._logger = logging.getLogger(__name__)
def filter(self, filenames):
"""
Filters filenames that match .gitignore files in their parent path.
:param list(Path) filenames:
:return list(Path):
"""
from pathlib import Path
filenames = map(Path, filenames)
return [i for i in filenames if not self.is_ignored(i)]
def is_ignored(self, filename):
"""
Returns whether the given filename is git-ignored considering all .gitignore files in their parent path.
:param Path filename:
:return bool:
"""
from pathspec import PathSpec
from pathspec.patterns import GitWildMatchPattern
self._logger.debug("is_ignored", filename)
patterns = self._collect_patterns(filename)
self._logger.debug("patterns", patterns)
spec = PathSpec(map(GitWildMatchPattern, patterns))
result = spec.match_file(str(filename))
return result
def _collect_patterns(self, filename):
"""
Collect patterns from all .gitignore files in the parent path.
:param Path filename:
:return list(str):
"""
from pathlib import Path
result = []
filename = Path(filename).resolve()
for i_filename in self.list(filename):
self._logger.debug("_collect_patterns", i_filename)
result += self._read_patterns(i_filename)
return result
@lru_cache()
def _read_patterns(self, filename):
"""
Returns a list of patterns from the given filename.
:param Path filename:
:return list(str):
"""
result = filename.read_text().split("\n")
result = [i.strip() for i in result]
result = [i for i in result if len(i) > 0]
self._logger.debug("_read_patterns", result)
return result
def list(self, filename):
"""
Lists all gitignore files that have influence to the given filename.
Stops when it finds the repository root directory, in other words, when it finds the directory with a .git directory
in it.
:param Path filename:
:return list(Path):
"""
from pathlib import Path
result = []
curdir = Path(filename)
while curdir != curdir.root:
gitignore = curdir / self.__gitignore_filename
if gitignore.exists():
result.append(gitignore)
if self._is_git_root(curdir):
break
curdir = curdir.parent
return result
def _is_git_root(self, directory):
"""
Returns wheter the given directory is the root directory of a Git repository.
:param Path directory:
:return bool:
"""
git_filename = directory / self.__git_directory
return git_filename.exists() | zerotk.lib | /zerotk.lib-1.3.1.tar.gz/zerotk.lib-1.3.1/zerotk/lib/gitignored.py | gitignored.py |
[]()
[]()
[]()
# zerotk.reraiseit
Reraise utility function. Just that!
```python
from zerotk.reraiseit import reraise
try:
raise RuntimeError('An error occurred')
except Exception as e:
reraise(e, '')
```
This will produce the following output:
```
(... traceback ...)
RuntimeError:
While testing reraise.
An error occurred
```
As you can see, it added a message to the exception and re-raise it.
| zerotk.reraiseit | /zerotk.reraiseit-2.0.7.tar.gz/zerotk.reraiseit-2.0.7/README.md | README.md |
from __future__ import unicode_literals
'''
Inspired by http://www.thescripts.com/forum/thread46361.html
Derived from github.com/esss/ben10.
'''
import six
import locale
#===================================================================================================
# reraiseit
#===================================================================================================
def reraise(exception, message, separator='\n'):
'''
Raised the same exception given, with an additional message.
:param Exception exception:
Original exception being raised with additional messages
:param unicode message:
Message to be added to the given exception
:param unicode separator:
String separating `message` from the `exception`'s original message.
e.g.
try:
raise RuntimeError('original message')
except Exception, e:
Reraise(e, 'message')
>>> RuntimeError:
>>> message
>>> original message
try:
raise RuntimeError('original message')
except Exception, e:
Reraise(e, '[message]', separator=' ')
>>> RuntimeError:
>>> [message] original message
'''
import sys
# IMPORTANT: Do NOT use try/except mechanisms in this method or the sys.exc_info()[-1] will be invalid
if hasattr(exception, 'reraised_message'):
current_message = exception.reraised_message
else:
current_message = exception_to_unicode(exception)
# Build the new message
if not current_message.startswith(separator):
current_message = separator + current_message
message = '\n' + message + current_message
if exception.__class__ in _SPECIAL_EXCEPTION_MAP:
# Handling for special case, some exceptions have different behaviors.
exception = _SPECIAL_EXCEPTION_MAP[exception.__class__](*exception.args)
elif exception.__class__ not in _SPECIAL_EXCEPTION_MAP.values():
# In Python 2.5 overriding the exception "__str__" has no effect in "unicode()". Instead, we
# must change the "args" attribute which is used to build the string representation.
# Even though the documentation says "args" will be deprecated, it uses its first argument
# in unicode() implementation and not "message".
exception.args = (message,)
exception.message = message
# keep the already decoded message in the object in case this exception is reraised again
exception.reraised_message = message
# Reraise the exception with the EXTRA message information
if six.PY2:
six.reraise(exception, None, sys.exc_info()[-1])
else:
raise exception.with_traceback(sys.exc_info()[-1])
#===================================================================================================
# exception_to_unicode
#===================================================================================================
def exception_to_unicode(exception):
'''
Obtains unicode representation of an Exception.
This wrapper is used to circumvent Python 2.7 problems with built-in exceptions with unicode
messages.
Steps used:
* Try to obtain Exception.__unicode__
* Try to obtain Exception.__str__ and decode with utf-8
* Try to obtain Exception.__str__ and decode with locale.getpreferredencoding
* If all fails, return Exception.__str__ and decode with (ascii, errors='replace')
:param Exception exception:
:return unicode:
Unicode representation of an Exception.
'''
if six.PY2:
try:
# First, try to obtain __unicode__ as defined by the Exception
return six.text_type(exception)
except UnicodeDecodeError:
try:
# If that fails, try decoding with utf-8 which is the strictest and will complain loudly.
return bytes(exception).decode('utf-8')
except UnicodeDecodeError:
try:
# If that fails, try obtaining bytes repr and decoding with locale
return bytes(exception).decode(locale.getpreferredencoding())
except UnicodeDecodeError:
# If all failed, give up and decode with ascii replacing errors.
return bytes(exception).decode(errors='replace')
except UnicodeEncodeError:
# Some exception contain unicode messages, but try to convert them to bytes when calling
# unicode() (such as IOError). In these cases, we do our best to fix Python 2.7's poor
# handling of unicode exceptions.
assert type(exception.message) == six.text_type # This should be true if code got here.
return exception.message
else:
return str(exception)
#===================================================================================================
# SPECIAL_EXCEPTIONS
#===================================================================================================
# [[[cog
# SPECIAL_EXCEPTIONS = [
# KeyError,
# OSError,
# IOError,
# SyntaxError,
# UnicodeDecodeError,
# UnicodeEncodeError,
# ]
# from ben10.foundation.string import Dedent
# exception_map = []
# for exception_class in SPECIAL_EXCEPTIONS:
# superclass_name = exception_class.__name__
# exception_map.append('\n ' + superclass_name + ' : Reraised' + superclass_name + ',')
# cog.out(Dedent(
# '''
# class Reraised%(superclass_name)s(%(superclass_name)s):
# def __init__(self, *args):
# %(superclass_name)s.__init__(self, *args)
# self.message = None
#
# def __str__(self):
# return self.message
#
#
# '''% locals()
# ))
# cog.out(Dedent(
# '''
# _SPECIAL_EXCEPTION_MAP = {%s
# }
# ''' % ''.join(exception_map)
# ))
# ]]]
class ReraisedKeyError(KeyError):
def __init__(self, *args):
KeyError.__init__(self, *args)
self.message = None
def __str__(self):
return self.message
class ReraisedOSError(OSError):
def __init__(self, *args):
OSError.__init__(self, *args)
self.message = None
def __str__(self):
return self.message
class ReraisedOSError(OSError):
def __init__(self, *args):
OSError.__init__(self, *args)
self.message = None
def __str__(self):
return self.message
class ReraisedSyntaxError(SyntaxError):
def __init__(self, *args):
SyntaxError.__init__(self, *args)
self.message = None
def __str__(self):
return self.message
class ReraisedUnicodeDecodeError(UnicodeDecodeError):
def __init__(self, *args):
UnicodeDecodeError.__init__(self, *args)
self.message = None
def __str__(self):
return self.message
class ReraisedUnicodeEncodeError(UnicodeEncodeError):
def __init__(self, *args):
UnicodeEncodeError.__init__(self, *args)
self.message = None
def __str__(self):
return self.message
_SPECIAL_EXCEPTION_MAP = {
KeyError : ReraisedKeyError,
OSError : ReraisedOSError,
OSError : ReraisedOSError,
SyntaxError : ReraisedSyntaxError,
UnicodeDecodeError : ReraisedUnicodeDecodeError,
UnicodeEncodeError : ReraisedUnicodeEncodeError,
}
# [[[end]]] (checksum: 896c3faa794c9a17cbe89209d38816dc)
if six.PY3:
class ReraisedFileNotFoundError(FileNotFoundError):
def __init__(self, *args):
FileNotFoundError.__init__(self, *args)
self.message = None
def __str__(self):
return self.message
_SPECIAL_EXCEPTION_MAP[FileNotFoundError] = ReraisedFileNotFoundError | zerotk.reraiseit | /zerotk.reraiseit-2.0.7.tar.gz/zerotk.reraiseit-2.0.7/zerotk/reraiseit/_reraiseit.py | _reraiseit.py |
# url2env
Turns database URLs into shell environment variables.
### Usage:
```
$ url2env psql://joebloggs:[email protected]:4433/blog
PGUSER=joebloggs
PGPASSWORD=secret
PGHOST=db.example.com
PGPORT=4433
PGDATABASE=blog
```
With all options:
```
$ url2env --engine --export --prefix=DB_ psql://joebloggs:[email protected]:4433/blog
export DB_ENGINE=psql
export DB_USER=joebloggs
export DB_PASSWORD=secret
export DB_HOST=db.example.com
export DB_PORT=4433
export DB_DATABASE=blog
```
The output could be used in conjunction with `eval`, e.g.::
```
$ eval $(url2env $DATABASE_URL)
```
### Installation
```
$ pip install zerotk.url2env
```
### Distribution
```
$ git tag <x.y>
$ make build upload
```
| zerotk.url2env | /zerotk.url2env-1.4.0b1.tar.gz/zerotk.url2env-1.4.0b1/README.md | README.md |
virtualenv-api - an API for virtualenv
======================================
|Build Status|
|Latest version|
|BSD License|
`virtualenv`_ is a tool to create isolated Python environments. Unfortunately,
it does not expose a native Python API. This package aims to provide an API in
the form of a wrapper around virtualenv.
It can be used to create and delete environments and perform package management
inside the environment.
Full support is provided for Python 2.7 and Python 3.3+.
.. _virtualenv: http://www.virtualenv.org/
.. |Build Status| image:: https://travis-ci.org/sjkingo/virtualenv-api.svg
:target: https://travis-ci.org/sjkingo/virtualenv-api
.. |Latest version| image:: https://img.shields.io/pypi/v/virtualenv-api.svg
:target: https://pypi.python.org/pypi/virtualenv-api
.. |BSD License| image:: https://img.shields.io/pypi/l/virtualenv-api.svg
:target: https://github.com/sjkingo/virtualenv-api/blob/master/LICENSE
Installation
------------
The latest stable release is available on `PyPi`_:
::
$ pip install virtualenv-api
Please note that the distribution is named ``virtualenv-api``, yet the Python
package is named ``virtualenvapi``.
Alternatively, you may fetch the latest version from git:
::
$ pip install git+https://github.com/sjkingo/virtualenv-api.git
.. _PyPi: https://pypi.python.org/pypi/virtualenv-api
Examples
--------
- To begin managing an environment (it will be created if it does not
exist):
.. code:: python
from virtualenvapi.manage import VirtualEnvironment
env = VirtualEnvironment('/path/to/environment/name')
You may also specify the Python interpreter to use in this environment by
passing the ``python`` argument to the class constructor (new in 2.1.3):
.. code:: python
env = VirtualEnvironment('/path/to/environment/name', python='python3')
If you have already activated a virtualenv and wish to operate on it, simply
call ``VirtualEnvironment`` with no arguments:
.. code:: python
env = VirtualEnvironment()
*New in 2.1.7:*
An optional argument ``readonly`` may be provided (defaults to ``False``) that
will prevent all operations that could potentially modify the environment.
- Check if the ``mezzanine`` package is installed:
.. code:: python
>>> env.is_installed('mezzanine')
False
- Install the latest version of the ``mezzanine`` package:
.. code:: python
>>> env.install('mezzanine')
- A wheel of the latest version of the ``mezzanine`` package (new in
2.1.4):
.. code:: python
>>> env.wheel('mezzanine')
- Install version 1.4 of the ``django`` package (this is pip’s syntax):
.. code:: python
>>> env.install('django==1.4')
- Upgrade the ``django`` package to the latest version:
.. code:: python
>>> env.upgrade('django')
- Upgrade all packages to their latest versions (new in 2.1.7):
.. code:: python
>>> env.upgrade_all()
- Uninstall the ``mezzanine`` package:
.. code:: python
>>> env.uninstall('mezzanine')
Packages may be specified as name only (to work on the latest version), using
pip’s package syntax (e.g. ``django==1.4``) or as a tuple of ``('name',
'ver')`` (e.g. ``('django', '1.4')``).
- A package may be installed directly from a git repository (must end
with ``.git``):
.. code:: python
>>> env.install('git+git://github.com/sjkingo/cartridge-payments.git')
*New in 2.1.10:*
- A package can be installed in pip's *editable* mode by prefixing the package
name with `-e` (this is pip's syntax):
.. code:: python
>>> env.install('-e git+https://github.com/stephenmcd/cartridge.git')
- Instances of the environment provide an ``installed_packages``
property:
.. code:: python
>>> env.installed_packages
[('django', '1.5'), ('wsgiref', '0.1.2')]
- A list of package names is also available in the same manner:
.. code:: python
>>> env.installed_package_names
['django', 'wsgiref']
- Search for a package on PyPI (changed in 2.1.5: this now returns a
dictionary instead of list):
.. code:: python
>>> env.search('virtualenv-api')
{'virtualenv-api': 'An API for virtualenv/pip'}
>>> len(env.search('requests'))
231
- The old functionality (pre 2.1.5) of ``env.search`` may be used:
.. code:: python
>>> list(env.search('requests').items())
[('virtualenv-api', 'An API for virtualenv/pip')]
Verbose output from each command is available in the environment’s
``build.log`` file, which is appended to with each operation. Any errors are
logged to ``build.err``.
| zerotk.virtualenv-api | /zerotk.virtualenv-api-2.2.1.tar.gz/zerotk.virtualenv-api-2.2.1/README.rst | README.rst |
# Changes
## 2.1.14 - 2017-02-08
* Adding support to '-r' option.
## 2.1.13 - 2016-10-26
* #31: Workaround to prevent shebang length errors when calling pip (@rmb938)
## 2.1.12 - 2016-10-22
* #29: Fix AttributeError when raising OSError from inside environment (@rmb938)
## 2.1.11 - 2016-07-14
* #28: Add support for locating pip on win32 (@eoghancunneen)
## 2.1.10 - 2016-06-03
* #27: Support installing an editable package (`-e`) (@sjkingo)
## 2.1.9 - 2016-05-01
* Move version number to library instead of `setup.py` (@sjkingo)
* Support Python 3.5 (@sjkingo)
* #24: Fixed failing test suite by adding missing dependency (@mcyprian)
## 2.1.8 - 2016-04-01
* #21: Converted `README.md` to an rST document to remove dependency on
`setuptools-markdown` (@sjkingo)
## 2.1.7 - 2015-08-10
* Added `upgrade_all()` function to upgrade all packages in an environment (@sjkingo)
* Added `readonly` argument to constructing environment that can be used to prevent
operations that could potentially modify the environment (@sjkingo)
* Added support for passing ~ to construct environment (e.g. `~user/venv`) (@sjkingo)
## 2.1.6 - 2015-06-10
* Version bump for broken PyPi release (@sjkingo)
(no new changes to code)
## 2.1.5 - 2015-06-10
* Improved search function that will return more accurate results. This
includes a breaking change where the package list returned by `env.search()`
is now a dictionary. (@sjkingo)
* Prevent pip from checking for new version of itself and polluting the output
of some commands (@sjkingo)
## 2.1.4 - 2015-06-04
* Support for creating a wheel of packages (@rmb938)
## 2.1.3 - 2015-03-28
* Support changing the interpreter path (@jlafon)
* Improve Unicode support in pip search that broke tests (@jlafon)
## 2.1.2 - 2014-11-25
* Added test builds through Travis CI (@sjkingo)
* Fixed default `options` bug introduced in 2.0.1 (@ColMcp, @sposs)
* Updated example.py (@sjkingo)
* Fix typo in logging (@yannik-ammann)
## 2.1.1 - 2014-11-19
* Fix typo that broke from 2.1.0 release (@sjkingo)
## 2.1.0 - 2014-11-19
* Python 3 support (@r1s)
* Unit tests for base functionality (@r1s)
* Better Unicode handling (@r1s)
* Tuple support for specifying package versions (@philippeowagner)
## 2.0.1 - 2014-11-14
* Support passing command line options directly to pip (@sposs)
* Misc. PEP8 fixes (@sposs)
## 2.0.0 - 2014-04-09
* Added pip search functionality
* Re-worked underlying pip processing and error handling
## 1.0.0 - 2013-03-27
* Initial release with basic install/uninstall
| zerotk.virtualenv-api | /zerotk.virtualenv-api-2.2.1.tar.gz/zerotk.virtualenv-api-2.2.1/CHANGES.md | CHANGES.md |
from os import linesep, environ
import os.path
import subprocess
import six
import sys
from virtualenvapi.util import split_package_name, to_text, get_env_path
from virtualenvapi.exceptions import *
class VirtualEnvironment(object):
def __init__(self, path=None, python=None, cache=None, readonly=False):
if path is None:
path = get_env_path()
if not path:
raise VirtualenvPathNotFound('Path for virtualenv is not define or virtualenv is not activate')
self.python = python
# remove trailing slash so os.path.split() behaves correctly
if path[-1] == os.path.sep:
path = path[:-1]
# Expand path so shell shortcuts may be used such as ~
self.path = os.path.abspath(os.path.expanduser(path))
self.env = environ.copy()
if cache is not None:
self.env['PIP_DOWNLOAD_CACHE'] = os.path.expanduser(os.path.expandvars(cache))
self.readonly = readonly
# True if the virtual environment has been set up through open_or_create()
self._ready = False
def __str__(self):
return six.u(self.path)
@property
def _pip_rpath(self):
"""The relative path (from environment root) to pip."""
# Windows virtualenv installation installs pip to the [Ss]cripts
# folder. Here's a simple check to support:
if sys.platform == 'win32':
return os.path.join('Scripts', 'pip.exe')
return os.path.join('bin', 'pip')
@property
def _python_rpath(self):
"""The relative path (from environment root) to python."""
# Windows virtualenv installation installs pip to the [Ss]cripts
# folder. Here's a simple check to support:
if sys.platform == 'win32':
return os.path.join('Scripts', 'python.exe')
return os.path.join('bin', 'python')
@property
def pip_version(self):
"""Version of installed pip."""
if not self._pip_exists:
return None
if not hasattr(self, '_pip_version'):
string_version = self._execute_pip(['-V']).split()[1]
self._pip_version = tuple([int(n) for n in string_version.split('.')])
return self._pip_version
@property
def root(self):
"""The root directory that this virtual environment exists in."""
return os.path.split(self.path)[0]
@property
def name(self):
"""The name of this virtual environment (taken from its path)."""
return os.path.basename(self.path)
@property
def _logfile(self):
"""Absolute path of the log file for recording installation output."""
return os.path.join(self.path, 'build.log')
@property
def _errorfile(self):
"""Absolute path of the log file for recording installation errors."""
return os.path.join(self.path, 'build.err')
def _create(self):
"""Executes `virtualenv` to create a new environment."""
if self.readonly:
raise VirtualenvReadonlyException()
if self.python is None:
args = ['virtualenv', self.name]
else:
args = ['virtualenv', '-p', self.python, self.name]
proc = subprocess.Popen(args, cwd=self.root, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output, _error = proc.communicate()
returncode = proc.returncode
if returncode:
raise VirtualenvCreationException((returncode, output, self.name))
self._write_to_log(output, truncate=True)
# self._write_to_error(_error, truncate=True)
def _execute_pip(self, args, raw_pip=False, log=True):
"""
Executes pip commands.
When raw_pip is True use the pip binary otherwise use "python -m pip".
This is to prevent a shebang length issue documented here: https://github.com/pypa/pip/issues/1773
:param args: Arguments to pass to pip (list[str])
:param raw_pip: Use the pip binary [default: False] (boolean)
:param log: Log the output to a file [default: True] (boolean)
:return: See _execute
"""
if raw_pip:
exec_args = [self._pip_rpath, '--disable-pip-version-check']
else:
exec_args = [self._python_rpath, '-m', 'pip', '--disable-pip-version-check']
exec_args.extend(args)
return self._execute(exec_args, log=log)
def _execute(self, args, log=True):
"""Executes the given command inside the environment and returns the output."""
if not self._ready:
self.open_or_create()
output = ''
error = ''
try:
proc = subprocess.Popen(args, cwd=self.path, env=self.env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output, _error = proc.communicate()
returncode = proc.returncode
if returncode:
raise subprocess.CalledProcessError(returncode, proc, output)
return to_text(output)
except OSError as e:
# raise a more meaningful error with the program name
prog = args[0]
if prog[0] != os.sep:
prog = os.path.join(self.path, prog)
raise OSError('%s: %s' % (prog, six.u(str(e))))
finally:
if log:
try:
self._write_to_log(to_text(output))
# self._write_to_error(to_text(error))
except NameError:
pass # We tried
def _write_to_log(self, s, truncate=False):
"""Writes the given output to the log file, appending unless `truncate` is True."""
# if truncate is True, set write mode to truncate
with open(self._logfile, 'w' if truncate else 'a') as fp:
fp.writelines((to_text(s) if six.PY2 else to_text(s), ))
def _write_to_error(self, s, truncate=False):
"""Writes the given output to the error file, appending unless `truncate` is True."""
# if truncate is True, set write mode to truncate
with open(self._errorfile, 'w' if truncate else 'a') as fp:
fp.writelines((to_text(s)), )
def _pip_exists(self):
"""Returns True if pip exists inside the virtual environment. Can be
used as a naive way to verify that the environment is installed."""
return os.path.isfile(os.path.join(self.path, self._pip_rpath))
def open_or_create(self):
"""Attempts to open the virtual environment or creates it if it
doesn't exist.
XXX this should probably be expanded to do some proper checking?"""
if not self._pip_exists():
self._create()
self._ready = True
def force_create(self):
"""Forces the creation of a new virtual environment, deleting any
existing virtual environment."""
import shutil
shutil.rmtree(self.name, ignore_errors=True)
self._create()
def install(self, package, force=False, upgrade=False, options=None):
"""Installs the given package into this virtual environment, as
specified in pip's package syntax or a tuple of ('name', 'ver'),
only if it is not already installed. Some valid examples:
'Django'
'Django==1.5'
('Django', '1.5')
If `force` is True, force an installation. If `upgrade` is True,
attempt to upgrade the package in question. If both `force` and
`upgrade` are True, reinstall the package and its dependencies.
The `options` is a list of strings that can be used to pass to
pip."""
if self.readonly:
raise VirtualenvReadonlyException()
options = self._pip_options(upgrade, force, options=options)
if isinstance(package, tuple):
package = '=='.join(package)
if package.startswith('-e'):
package_args = package.split()
else:
package_args = [package]
if not (force or upgrade) and self.is_installed(package_args[-1]):
self._write_to_log('%s is already installed, skipping (use force=True to override)' % package_args[-1])
return
try:
return self._execute_pip(['install'] + package_args + options)
except subprocess.CalledProcessError as e:
raise PackageInstallationException((e.returncode, e.output, package))
def editable(self, uri, upgrade=False, force=False, options=None):
return self.install('-e {}'.format(uri), upgrade=upgrade, force=force, options=options)
def requirement(self, requirement, upgrade=False, force=False, options=None):
options = self._pip_options(upgrade, force, options=options)
requirement = os.path.abspath(requirement)
package_args = ['-r', requirement]
try:
return self._execute_pip(['install'] + package_args + options)
except subprocess.CalledProcessError as e:
raise RequirementInstallationException((e.returncode, e.output, requirement))
def _pip_options(self, upgrade, force, options=None):
result = options or []
if not isinstance(result, list):
raise ValueError("Options must be a list of strings.")
if upgrade:
result += ['--upgrade']
if force:
result += ['--force-reinstall']
elif force:
result += ['--ignore-installed']
return result
def uninstall(self, package):
"""Uninstalls the given package (given in pip's package syntax or a tuple of
('name', 'ver')) from this virtual environment."""
if isinstance(package, tuple):
package = '=='.join(package)
if not self.is_installed(package):
self._write_to_log('%s is not installed, skipping' % package)
return
try:
return self._execute_pip(['uninstall', '-y', package])
except subprocess.CalledProcessError as e:
raise PackageRemovalException((e.returncode, e.output, package))
def wheel(self, package, options=None):
"""Creates a wheel of the given package from this virtual environment,
as specified in pip's package syntax or a tuple of ('name', 'ver'),
only if it is not already installed. Some valid examples:
'Django'
'Django==1.5'
('Django', '1.5')
The `options` is a list of strings that can be used to pass to
pip."""
if self.readonly:
raise VirtualenvReadonlyException()
if options is None:
options = []
if isinstance(package, tuple):
package = '=='.join(package)
if not self.is_installed('wheel'):
raise PackageWheelException((0, "Wheel package must be installed in the virtual environment", package))
if not isinstance(options, list):
raise ValueError("Options must be a list of strings.")
try:
return self._execute_pip(['wheel', package] + options)
except subprocess.CalledProcessError as e:
raise PackageWheelException((e.returncode, e.output, package))
def is_installed(self, package):
"""Returns True if the given package (given in pip's package syntax or a
tuple of ('name', 'ver')) is installed in the virtual environment."""
if isinstance(package, tuple):
package = '=='.join(package)
if package.endswith('.git'):
pkg_name = os.path.split(package)[1][:-4]
return pkg_name in self.installed_package_names or \
pkg_name.replace('_', '-') in self.installed_package_names
pkg_tuple = split_package_name(package)
if pkg_tuple[1] is not None:
return pkg_tuple in self.installed_packages
else:
return pkg_tuple[0] in self.installed_package_names
def upgrade(self, package, force=False):
"""Shortcut method to upgrade a package. If `force` is set to True,
the package and all of its dependencies will be reinstalled, otherwise
if the package is up to date, this command is a no-op."""
self.install(package, upgrade=True, force=force)
def upgrade_all(self):
"""
Upgrades all installed packages to their latest versions.
"""
for pkg in self.installed_package_names:
self.install(pkg, upgrade=True)
def search(self, term):
"""
Searches the PyPi repository for the given `term` and returns a
dictionary of results.
New in 2.1.5: returns a dictionary instead of list of tuples
"""
packages = {}
results = self._execute_pip(['search', term], log=False) # Don't want to log searches
for result in results.split(linesep):
try:
name, description = result.split(six.u(' - '), 1)
except ValueError:
# '-' not in result so unable to split into tuple;
# this could be from a multi-line description
continue
else:
name = name.strip()
if len(name) == 0:
continue
packages[name] = description.split(six.u('<br'), 1)[0].strip()
return packages
def search_names(self, term):
return list(self.search(term).keys())
@property
def installed_packages(self):
"""
List of all packages that are installed in this environment in
the format [(name, ver), ..].
"""
freeze_options = ['-l', '--all'] if self.pip_version >= (8, 1, 0) else ['-l']
return list(map(split_package_name, filter(None, self._execute_pip(
['freeze'] + freeze_options).split(linesep))))
@property
def installed_package_names(self):
"""List of all package names that are installed in this environment."""
return [name.lower() for name, _ in self.installed_packages] | zerotk.virtualenv-api | /zerotk.virtualenv-api-2.2.1.tar.gz/zerotk.virtualenv-api-2.2.1/virtualenvapi/manage.py | manage.py |
[](https://pypi.python.org/pypi/zerotk.xml_factory)
[](https://travis-ci.org/zerotk/xml_factory)
[](https://coveralls.io/github/zerotk/xml_factory)
XML Factory
===========
About
-----
XMl Factory is a simple XMl writer that uses dict syntax to write files.
Example
------------
```python
# Create a root tag
factory = XmlFactory('root')
'''
<root>
</root>
'''
# Add elements using dict syntax
factory['elements/alpha'] = 'Alpha'
'''
<root>
<elements>
<alpha>Alpha</alpha>
</elements>
</root>
'''
# Set tag fields using @
factory['elements@coding'] = 'utf8'
'''
<root>
<elements coding="utf8">
<alpha>Alpha</alpha>
</elements>
</root>
'''
# Values can be overridden by using the same path twice
factory['elements/alpha'] = 'Overridden Alpha'
'''
<root>
<elements coding="utf8">
<alpha>Overridden Alpha</alpha>
</elements>
</root>
'''
# New values can be added to a same path by ending a string with '+'
factory['elements/alpha+'] = 'New Alpha'
'''
<root>
<elements coding="utf8">
<alpha>Overridden Alpha</alpha>
<alpha>New Alpha</alpha>
</elements>
</root>
'''
```
Contributing
------------
This library follows Jeff Knupp's guide on Python open source projects:
http://www.jeffknupp.com/blog/2013/08/16/open-sourcing-a-python-project-the-right-way/
| zerotk.xml-factory | /zerotk.xml_factory-0.1.2.tar.gz/zerotk.xml_factory-0.1.2/README.md | README.md |
from __future__ import unicode_literals, absolute_import, print_function
"""
Module for string manipulation functions
"""
def dedent(text, ignore_first_linebreak=True, ignore_last_linebreak=True):
"""
Heavily inspired by textwrap.dedent, with a few changes (as of python 2.7)
- No longer transforming all-whitespace lines into ''
- Options to ignore first and last linebreaks of `text`.
The last option is particularly useful because of ESSS coding standards.
For example, using the default textwrap.dedent to create a 3-line string would look like this:
textwrap.dedent(''' line1
line2
line3'''
)
With these options, you can create a better looking code with:
dedent(
'''
line1
line2
line3
'''
)
:param unicode text:
Text to be dedented (see examples above)
:param bool ignore_first_linebreak:
If True, blank characters (\r\n\t ) up to the first '\n' is ignored
:param bool ignore_last_linebreak:
If True, black characters (\r\n\t ) after the last '\n' is ignored
Original docs:
Remove any common leading whitespace from every line in `text`.
This can be used to make triple-quoted strings line up with the left edge of the display,
while still presenting them in the source code in indented form.
Note that tabs and spaces are both treated as whitespace, but they are not equal: the lines
" hello" and "\thello" are considered to have no common leading whitespace. (This
behaviour is new in Python 2.5; older versions of this module incorrectly expanded tabs
before searching for common leading whitespace.)
"""
if ignore_first_linebreak and '\n' in text:
first, others = text.split('\n', 1)
if first.strip('\n\r\t ') == '':
text = others
if ignore_last_linebreak and '\n' in text:
others, last = text.rsplit('\n', 1)
if last.strip('\n\r\t ') == '':
text = others
import re
_leading_whitespace_re = re.compile('(^[ ]*)(?:[^ \n])', re.MULTILINE)
# Look for the longest leading string of spaces and tabs common to
# all non-empty lines.
margin = None
indents = _leading_whitespace_re.findall(text)
for indent in indents:
if margin is None:
margin = indent
# Current line more deeply indented than previous winner:
# no change (previous winner is still on top).
elif indent.startswith(margin):
pass
# Current line consistent with and no deeper than previous winner:
# it's the new winner.
elif margin.startswith(indent):
margin = indent
if margin:
text = re.sub(r'(?m)^' + margin, '', text)
return text
def indent(text, indent=1, indentation=' '):
"""
Indents multiple lines of text.
:param list(unicode)|unicode text:
The text to apply the indentation.
:param int indent:
Number of indentations to add. Defaults to 1.
:param unicode indentation:
The text used as indentation. Defaults to 4 spaces.
:return unicode:
Returns the text with applied indentation.
"""
indentation = indent * indentation
lines = text
if isinstance(lines, unicode):
append_eol = lines.endswith('\n')
lines = lines.splitlines()
else:
append_eol = True
result = []
for i in lines:
if i.strip():
result.append(indentation + i)
else:
result.append(i)
if result:
result = '\n'.join(result)
if append_eol:
result += '\n'
else:
result = ''
return result
def safe_split(s, sep, maxsplit=None, default='', reversed=False):
"""
Perform a string split granting the size of the resulting list.
:param unicode s: The input string.
:param unicode sep: The separator.
:param int maxsplit: The max number of splits. The len of the resulting len is granted to be maxsplit + 1
:param default: The default value for filled values in the result.
:return list(unicode):
Returns a list with fixed size of maxsplit + 1.
"""
# NOTE: Can't import "string" module for string.split/string.rsplit because of module name
# clashing with this module.
if reversed:
def split(s, *args):
return s.rsplit(*args)
else:
def split(s, *args):
return s.split(*args)
if maxsplit is None:
result = split(s, sep)
else:
result = split(s, sep, maxsplit)
result_len = maxsplit + 1
diff_len = result_len - len(result)
if diff_len > 0:
defaults = [default] * diff_len
if reversed:
result = defaults + result
else:
result = result + defaults
return result
def to_byte_string(arg):
"""
Converts basestrings to byte string.
Does nothing if parameter is not a basestring.
:param object arg:
"""
if isinstance(arg, basestring):
return str(arg)
return arg
def format_iterable(iterable, format_expr="'%s'"):
"""
Formats an iterable into a string by applying format_expr to each item.
The resulting string is equivalent to stringifying a list, but unicode
items won't have the prefix 'u'.
Ex:
a = u'My Item'
b = [a]
FormatIterable(b) # outputs "['a']", rather than "[u'a']"
:param object iterable:
Any iterable object.
:param unicode format_expr:
The format expression to use on each item. Defaults to "'%s'" so that the
string representation of each item is encapsulated in single quotes.
"""
items = ', '.join((format_expr % (item,) for item in iterable))
return '[%s]' % (items,)
def to_unicode(value, encoding=None, error_strategy='replace'):
"""
Converts given ``value`` to unicode.
If given encoding fails, the value will be converted to "ascii" using the given
``error_strategy``.
:param bytes value:
Value that should be converted to ``unicode``.
:type encoding: unicode | None
:param encoding:
The encoding that ``value`` is supposedly encoded.
:param unicode error_strategy:
Possible values are ``strict``, ``replace`` and ``ignore``.
Check :meth:`bytes.decode` for details.
:rtype: unicode
.. seealso:: bytes.decode
"""
import locale
if encoding is None:
encoding = locale.getpreferredencoding()
try:
return unicode(value)
except UnicodeDecodeError:
value = bytes(value)
try:
return value.decode(encoding)
except UnicodeDecodeError:
return value.decode('ascii', error_strategy)
def match_any(text, regexes):
"""
Returns whether the given text matches any of the given regular expressions.
:param unicode text: The text to check for match.
:param list(unicode) regexes: List of regular expressions.
:return boolean:
Return True if the given text matches any of the given regular expressions.
"""
import re
for i_regex in regexes:
if re.match(i_regex, text) is not None:
return True
return False | zerotk.xml-factory | /zerotk.xml_factory-0.1.2.tar.gz/zerotk.xml_factory-0.1.2/zerotk/string.py | string.py |
from __future__ import unicode_literals
from collections import OrderedDict
from six import StringIO
from ._pretty_xml import WritePrettyXMLElement
from xml.etree import ElementTree
import six
class XmlFactory(object):
"""
Fast and easy XML creation class.
This class provides a simple a fast way of creating XML files in Python. It tries to deduce as
much information as possible, creating intermediate elements as necessary.
Example:
xml = XmlFactory('root')
xml['alpha/bravo/charlie'] # Create intermediate nodes
xml['alpha/bravo.one'] # Create attribute on "alpha/bravo" tag
xml['alpha/delta'] = 'XXX' # Create delta tag with text
xml.Write('filename.xml') # Always write with a pretty XML format
"""
def __init__(self, root_element):
"""
:param str|Element root_element:
"""
if isinstance(root_element, six.string_types):
self.root = ElementTree.Element(root_element, attrib=OrderedDict())
elif isinstance(root_element, ElementTree.Element):
self.root = root_element
else:
raise TypeError("Unknown root_element parameter type: %s" % type(root_element))
def __setitem__(self, name, value):
"""
Create a new element or attribute:
:param unicode name:
A XML path including or not an attribute definition
:param unicode value:
The value to associate with the element or attribute
:returns Element:
Returns the element created.
If setting an attribute value, returns the owner element.
@examples:
xml['alpha/bravo'] = 'XXX' # Create bravo tag with 'XXX' as text contents
xml['alpha.class'] = 'CLS' # Create alpha with the attribute class='CLS'
"""
if '@' in name:
element_name, attr_name = name.rsplit('@')
result = self._obtain_element(element_name)
result.attrib[attr_name] = str(value)
else:
result = self._obtain_element(name)
result.text = six.text_type(value)
return XmlFactory(result)
def __getitem__(self, name):
"""
Create and returns xml element.
:param unicode name:
A XML path including or not an attribute definition.
:rtype: Element
:returns:
Returns the element created.
"""
assert '@' not in name, 'The "at" (@) is used for attribute definitions'
result = self._obtain_element(name)
return XmlFactory(result)
def _obtain_element(self, name):
"""
Create and returns a xml element with the given name.
:param unicode name:
A XML path including. Each sub-client tag separated by a slash.
If any of the parts ends with a "+" it creates a new sub-element in that part even if
it already exists.
"""
parent = self.root
if name == '':
# On Python 2.7 parent.find('') returns None instead of the parent itself
result = parent
else:
parts = name.split('/')
for i_part in parts:
if i_part.endswith('+'):
i_part = i_part[:-1]
result = ElementTree.SubElement(parent, i_part, attrib=OrderedDict())
else:
result = parent.find(i_part)
if result is None:
result = ElementTree.SubElement(parent, i_part, attrib=OrderedDict())
parent = result
return result
def print_(self, oss=None, xml_header=False):
"""
Prints the resulting XML in the stdout or the given output stream.
:type oss: file-like object | None
:param oss:
A file-like object where to write the XML output. If None, writes the output in the
stdout.
"""
if oss is None:
import sys
oss = sys.stdout
if xml_header:
oss.write('<?xml version="1.0" ?>\n')
WritePrettyXMLElement(oss, self.root)
def write(self, filename, xml_header=False):
"""
Writes the XML in a file with the given filename.
:param unicode filename:
A filename.
"""
with open(filename, 'w') as f:
f.write(self.get_contents(xml_header=xml_header))
def get_contents(self, xml_header=False):
"""
Returns the resulting XML.
:return unicode:
"""
oss = StringIO()
self.print_(oss, xml_header=xml_header)
return oss.getvalue()
def as_dict(self):
"""
Returns the data-structure as dict.
:return dict:
Code from: http://code.activestate.com/recipes/410469-xml-as-dictionary/
"""
def xml_to_list(aList):
result = []
for element in aList:
if element:
# treat like dict
if len(element) == 1 or element[0].tag != element[1].tag:
result.append(xml_to_dict(element))
# treat like list
elif element[0].tag == element[1].tag:
result.append(xml_to_list(element))
elif element.text:
text = element.text.strip()
if text:
result.append(text)
return result
def xml_to_dict(parent_element):
"""
Example usage:
>>> tree = ElementTree.parse('your_file.xml')
>>> root = tree.getroot()
>>> xmldict = XmlDictConfig(root)
Or, if you want to use an XML string:
>>> root = ElementTree.XML(xml_string)
>>> xmldict = XmlDictConfig(root)
And then use xmldict for what it is... a dict.
"""
def _dict(*values):
return OrderedDict(values)
result = _dict()
if parent_element.items():
result.update(dict(parent_element.items()))
for element in parent_element:
if element:
# treat like dict - we assume that if the first two tags
# in a series are different, then they are all different.
if len(element) == 1 or element[0].tag != element[1].tag:
aDict = xml_to_dict(element)
# treat like list - we assume that if the first two tags
# in a series are the same, then the rest are the same.
else:
# here, we put the list in dictionary; the key is the
# tag name the list elements all share in common, and
# the value is the list itself
aDict = _dict((element[0].tag, xml_to_list(element)))
# if the tag has attributes, add those to the dict
if element.items():
aDict.update(dict(element.items()))
result.update(_dict((element.tag, aDict)))
# this assumes that if you've got an attribute in a tag,
# you won't be having any text. This may or may not be a
# good idea -- time will tell. It works for the way we are
# currently doing XML configuration files...
elif element.items():
result.update(_dict((element.tag, OrderedDict(sorted(element.items())))))
# finally, if there are no child tags and no attributes, extract
# the text
else:
result.update(_dict((element.tag, element.text)))
return result
# return _elem2list(self.root, return_children=True)
return xml_to_dict(self.root)
def as_json(self):
"""
Returns the data-structure as a JSON.
:return unicode:
"""
import json
return json.dumps(self.as_dict()) | zerotk.xml-factory | /zerotk.xml_factory-0.1.2.tar.gz/zerotk.xml_factory-0.1.2/zerotk/xml_factory/_xml_factory.py | _xml_factory.py |
# zops
ZOPS is an extendable command line utility intended to centralize and reuse software development solutions and
processes.
## Creating ZOPS commands in your project
Using project `alpha` as example:
```
/
/alpha
__init__.py
zops.py
setup.py
```
### ./alpha/zops.py
```python
import click
@click.group(name='alpha')
def main():
pass
@main.command()
def my_command():
"""
This is my command.
$ zops alpha my_command
"""
click.echo('my command')
```
### ./setup.py
```python
# ...
setup(
# ...
entry_points="""
[zops.plugins]
alpha=alpha.zops:main
""",
)
```
## Creating a ZOPS extension library
```
/
/zops
/bravo
cli.py
setup.py
```
### ./zops/bravo/cli.py
| zerotk.zops | /zerotk.zops-0.5.3.tar.gz/zerotk.zops-0.5.3/README.md | README.md |
def setenv(name, value):
import os
os.environ[name] = value
Console.setting('{}={}'.format(name, os.environ[name]))
def add_pythonpath(value):
import sys
import os
value = os.path.normpath(value)
sys.path.insert(0, value)
Console.setting('SYS.PATH={}'.format(value))
def call_main(main_func, *argv):
import sys
old_argv = sys.argv
sys.argv = [''] + list(argv)
try:
return main_func()
except SystemExit as e:
return e.code
finally:
sys.argv = old_argv
def ensure_dir(path):
import os
os.makedirs(path, exist_ok=True)
Console.setting('DIRECTORY: {}'.format(path))
class Console(object):
TITLE_COLOR = 'yellow'
EXECUTION_COLOR = 'green'
SETTING_COLOR = 'blue'
OUTPUT_COLOR = 'white'
INFO_COLOR = 'white'
DEBUG_COLOR = 'red'
@classmethod
def title(cls, *args):
cls._secho(['#'] + list(args), cls.TITLE_COLOR)
@classmethod
def execution(cls, *args):
cls._secho(['$'] + list(args), cls.EXECUTION_COLOR)
@classmethod
def setting(cls, *args):
cls._secho(['!'] + list(args), cls.SETTING_COLOR)
@classmethod
def item(cls, *args, ident=0):
prefix = cls._idented('*', ident)
cls._secho([prefix] + list(args), cls.OUTPUT_COLOR)
@classmethod
def output(cls, *args):
cls._secho(args, cls.OUTPUT_COLOR)
@classmethod
def response(cls, *args):
cls._secho(['>'] + list(args), cls.OUTPUT_COLOR)
@classmethod
def info(cls, *args):
cls._secho(['\U0001F6C8'] + list(args), cls.INFO_COLOR)
@classmethod
def debug(cls, *args):
cls._secho(['***'] + list(args), cls.DEBUG_COLOR)
@classmethod
def _idented(cls, text, ident):
return ' ' * ident + text
@classmethod
def _secho(cls, args, fg, join_char=' '):
import click
message = join_char.join(args)
message.rstrip('\n')
click.secho(message, fg=fg) | zerotk.zops | /zerotk.zops-0.5.3.tar.gz/zerotk.zops-0.5.3/zerotk/zops/__init__.py | __init__.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.