code
stringlengths 1
13.8M
|
---|
setMethodS3("truncateThetaAB", "array", function(data, ...) {
dim <- dim(data);
x <- data[,1,];
idxsA <- which(x < 0);
dA <- x[idxsA];
rm(x);
x <- data[,2,];
idxsB <- which(x < 0);
dB <- x[idxsB];
rm(x);
delta <- array(0, dim=dim[-2]);
delta[idxsA] <- dA;
rm(idxsA,dA);
data[,1,] <- data[,1,] - delta;
data[,2,] <- data[,2,] + delta;
rm(delta);
delta <- array(0, dim=dim[-2]);
delta[idxsB] <- dB;
rm(idxsB,dB);
data[,1,] <- data[,1,] + delta;
data[,2,] <- data[,2,] - delta;
rm(delta);
data;
})
setMethodS3("truncateThetaAB", "matrix", function(data, ...) {
x <- data[1,];
idxsA <- which(x < 0);
dA <- x[idxsA];
rm(x);
x <- data[2,];
idxsB <- which(x < 0);
dB <- x[idxsB];
rm(x);
delta <- array(0, dim=ncol(data));
delta[idxsA] <- dA;
rm(idxsA,dA);
data[1,] <- data[1,] - delta;
data[2,] <- data[2,] + delta;
rm(delta);
delta <- array(0, dim=ncol(data));
delta[idxsB] <- dB;
rm(idxsB,dB);
data[1,] <- data[1,] + delta;
data[2,] <- data[2,] - delta;
rm(delta);
data;
}) |
wkmeans <- function(Y, r, asymm) {
imat <- F
if (length(dim(Y)) == 2) {
cat("matrix case \n")
dim(Y) <- c(dim(Y), 1)
imat <- T
}
if (imat == T & length(r) != 2) {
warning("need to input a length 2 vector for the number of clusters")
return()
}
if (imat == F & length(r) != 3) {
warning("need to input a length 3 vector for the number of clusters")
return()
}
r1 <- r[1]
r2 <- r[2]
if (imat == T) {
r3 <- 1
if (r1 != r2) {
warning("matrix case requires the same number of clusters on two modes")
return()
}
if (r1 <= 1 | r2 <= 1) {
warning("all the numbers of clusters should be larger than 1")
return()
}
if (sum(dim(Y)[1:2]) / dim(Y)[1] != 2 & asymm == F) {
warning("use asymmetric algorithm for observation with non-identical dimension on each mode")
return()
}
} else if (imat == F) {
r3 <- r[3]
if (asymm == F) {
if (r1 != r2 | r2 != r3 | r1 != r3) {
warning("symmetric case requires the same number of clusters on every mode")
return()
}
if (sum(dim(Y)) / dim(Y)[1] != 3) {
warning("use asymmetric algorithm for observation with non-identical dimension on each mode")
return()
}
}
if (r1 <= 1 | r2 <= 1 | r3 <= 1) {
warning("all the numbers of clusters should be larger than 1")
return()
}
}
Y <- as.tensor(Y)
u1 <- svd(unfold(Y, 1, c(3, 2))@data)$u[, 1:r1]
u2 <- svd(unfold(Y, 2, c(1, 3))@data)$u[, 1:r2]
if (imat == F) {
u3 <- svd(unfold(Y, 3, c(1, 2))@data)$u[, 1:r3]
} else if (imat == T) {
u3 <- as.matrix(1)
}
hu1 <- svd(unfold(ttl(Y, list(t(u2), t(u3)), ms = c(2, 3)), 1, c(3, 2))@data)$u[, 1:r1]
hu2 <- svd(unfold(ttl(Y, list(t(u1), t(u3)), ms = c(1, 3)), 2, c(1, 3))@data)$u[, 1:r2]
if (imat == F) {
hu3 <- svd(unfold(ttl(Y, list(t(u1), t(u2)), ms = c(1, 2)), 3, c(1, 2))@data)$u[, 1:r3]
} else if (imat == T) {
hu3 <- as.matrix(1)
}
X1 <- hu1 %*% t(hu1) %*% unfold(ttl(Y, list(hu2 %*% t(hu2), hu3 %*% t(hu3)), ms = c(2, 3)), 1, c(3, 2))@data
X2 <- hu2 %*% t(hu2) %*% unfold(ttl(Y, list(hu1 %*% t(hu1), hu3 %*% t(hu3)), ms = c(1, 3)), 2, c(1, 3))@data
X3 <- hu3 %*% t(hu3) %*% unfold(ttl(Y, list(hu1 %*% t(hu1), hu2 %*% t(hu2)), ms = c(1, 2)), 3, c(1, 2))@data
if (asymm == T) {
res1 <- single_wkmeans(X1, r1)
res2 <- single_wkmeans(X2, r2)
if (imat == F) {
res3 <- single_wkmeans(X3, r3)
z0 <- list(as.numeric(res1$z), as.numeric(res2$z), as.numeric(res3$z))
s0 <- list(s01 = res1$s0, s02 = res2$s0, s03 = res3$s0)
} else if (imat == T) {
z0 <- list(as.numeric(res1$z), as.numeric(res2$z))
s0 <- list(s01 = res1$s0, s02 = res2$s0)
}
return(list(z0 = z0, s0 = s0))
} else if (asymm == F) {
z <- rep(0, dim(Y)[1])
sc <- 1:length(z)
l2 <- apply(X1, 1, function(x) sqrt(sum(x^2)))
if (length(which(l2 == 0) > 0)) {
sc <- which(l2 != 0)
X1 <- X1[sc, ]
l2 <- l2[sc]
}
Xs <- diag(l2^(-1)) %*% X1
diss <- dist(Xs, method = "euclidean", p = 2, upper = T, diag = T)
z[sc] <- wcKMedoids(diss^2, k = r1, weights = l2^2, method = "PAMonce", cluster.only = T)
z[-sc] <- sample(unique(z[sc]), length(z[-sc]), replace = T)
s0 <- setdiff(1:length(z), sc)
z <- as.factor(z)
levels(z) <- 1:r1
z0 <- as.numeric(z)
if (imat == F) {
z0 <- list(z0, z0, z0)
s0 <- list(s0, s0, s0)
} else if (imat == T) {
z0 <- list(z0, z0)
s0 <- list(s0, s0)
}
return(list(z0 = z0, s0 = s0))
}
}
angle_iteration = function(Y, z0, max_iter, alpha1 = 0.01, asymm){
imat <- F
s_deg <- F
if (length(dim(Y)) == 2) {
cat("matrix case \n")
dim(Y) <- c(dim(Y), 1)
imat <- T
if(sum(dim(Y)[1:2])/dim(Y)[1] != 2 & asymm == F){
warning("use asymmetric algorithm for observation with non-identical dimension on each mode")
return()
}
}
if(sum(dim(Y))/dim(Y)[1] != 3 & asymm == F & imat == F){
warning("use asymmetric algorithm for observation with non-identical dimension on each mode")
return()
}
z <- lapply(z0, renumber)
if (imat == T) {
z[[3]] <- 1
}
for (iter in 1:max_iter) {
cat("iter = ", iter, "\n")
est_S <- updateS(Y, z, imat)
Y1 <- Cal_Y1(Y, z, imat)
re1 <- single_Aiteration(unfold(as.tensor(est_S), 1, c(3, 2))@data, unfold(as.tensor(Y1), 1, c(3, 2))@data, alpha1)
z1_new = re1$z
z1_new <- renumber(z1_new)
if(asymm == F){
if(imat == T){
z_new = list(z1_new, z1_new,as.vector(1))
}else if(imat == F){
z_new = list(z1_new, z1_new, z1_new)
}
if(re1$s_deg == T){
s_deg = T
}
if (identical(z_new, z)) {
break
}
z = z_new
}else if(asymm == T){
Y2 <- Cal_Y2(Y, z,imat)
re2 <- single_Aiteration(unfold(as.tensor(est_S), 2, c(1, 3))@data, unfold(as.tensor(Y2), 2, c(1, 3))@data, alpha1)
z2_new = re2$z
z2_new <- renumber(z2_new)
if (imat == T) {
z3_new <- 1
if(re1$s_deg == T | re2$s_deg == T){
s_deg = T
}
} else if (imat == F) {
Y3 <- Cal_Y3(Y, z)
re3 <- single_Aiteration(unfold(as.tensor(est_S), 3, c(1, 2))@data, unfold(as.tensor(Y3), 3, c(1, 2))@data, alpha1)
z3_new = re3$z
z3_new <- renumber(z3_new)
if(re1$s_deg == T | re2$s_deg == T|re3$s_deg == T){
s_deg = T
}
}
z_new_list = list(z1_new,z2_new, z3_new)
if (identical(z_new_list, z)) {
break
}
z <- z_new_list
}
}
if(imat == T){
z[[3]] = NULL
}
return(list(z = z, s_deg = s_deg))
}
sim_dTBM = function(seed = NA,imat = F,asymm = F, p, r,
core_control = c("random", "control"), delta = NULL, s_min = NULL, s_max =NULL,
dist = c("normal", "binary"), sigma = 1,
theta_dist = c("abs_normal", "pareto", "non"), alpha = NULL, beta = NULL){
if (is.na(seed) == FALSE) set.seed(seed)
if(imat == T){
cat("generate matrix data \n")
r = r[1:2]
p = p[1:2]
}
if(asymm == F){
if(sum(p)/p[1] != 3 & imat == F){
warning("all the modes share the same dimension in symmetric case")
return()
}
if(sum(p)/p[1] != 2 & imat == T){
warning("all the modes share the same dimension in symmetric case")
return()
}
if(sum(r)/r[1] != 3 & imat == F){
warning("all the modes share the same number of clusters in symmetric case")
return()
}
if(sum(r)/r[1] != 2 & imat == T){
warning("all the modes share the same number of clusters in symmetric case")
return()
}
}
if(core_control == "control"){
if(asymm == T){
warning("core control is only applicable for symmetric case")
return()
}
S <- sim_S(r[1], s_min, s_max, delta, imat)
}else if(core_control == "random"){
S = array(runif(prod(r)), dim = r)
}
if(asymm == F){
z <- sample(1:r[1], p[1], replace = T)
z <- renumber(z)
theta = generate_theta(p[1],theta_dist,z,alpha, beta)
if(imat == F){
X <- ttl(as.tensor(S[z, z, z]), list(diag(theta), diag(theta), diag(theta)), ms = c(1, 2, 3))@data
z = list(z,z,z)
theta = list(theta,theta,theta)
}else if(imat == T){
X <- ttl(as.tensor(S[z, z]), list(diag(theta), diag(theta)), ms = c(1, 2))@data
z = list(z,z)
theta = list(theta,theta)
}
}else if(asymm == T){
z1 = renumber( sample(1:r[1], p[1], replace = T))
z2 = renumber( sample(1:r[2], p[2], replace = T))
theta1 = generate_theta(p[1], theta_dist,z1,alpha, beta)
theta2 = generate_theta(p[2], theta_dist,z2,alpha, beta)
if(imat == F){
z3 = renumber( sample(1:r[3], p[3], replace = T))
theta3 = generate_theta(p[3], theta_dist,z3,alpha, beta)
theta = list(theta1, theta2, theta3)
z = list(z1,z2,z3)
X <- ttl(as.tensor(S[z1, z2, z3]), list(diag(theta1), diag(theta2), diag(theta3)), ms = c(1, 2, 3))@data
}else if(imat == T){
X <- ttl(as.tensor(S[z1, z2]), list(diag(theta1), diag(theta2)), ms = c(1, 2))@data
theta = list(theta1, theta2)
z = list(z1,z2)
}
}
if (dist == "normal") {
Y <- X + array(rnorm(prod(dim(X)), 0, sigma), dim = dim(X))
} else if (dist == "binary") {
X[which(X > 1)] <- 1
Y <- array(rbinom(prod(dim(X)), 1, as.vector(X)), dim = dim(X))
}
return(list(Y = Y, X = X, S = S, theta = theta, z = z))
}
select_r = function(Y,r_range,asymm = F){
imat = F
if (length(dim(Y)) == 2) {
cat("matrix case \n")
imat <- T
r_range = r_range[,1:2]
}
if(asymm == F){
if(sum(rowSums(r_range)/r_range[,1]) != 3*dim(r_range)[1] & imat == F){
warning("all the modes share the same number of clusters in symmetric case")
return()
}
}
if(sum(rowSums(r_range)/r_range[,1]) != 2*dim(r_range)[1] & imat == T){
warning("all the modes share the same number of clusters in matrix case")
return()
}
bic = rep(0,dim(r_range)[1])
p = dim(Y)
for (i in 1:dim(r_range)[1]) {
r = r_range[i,]
cat("given r = ",r, "\n")
initial = wkmeans(Y, r, asymm = asymm)
z_hat = angle_iteration(Y,initial$z0, max_iter = 20, asymm = asymm)$z
if(imat == T){
dim(Y) <- c(dim(Y), 1)
}
S_hat = updateS(Y,z_hat,imat)
theta_hat = theta_estimate(Y,z_hat, imat)
if(imat == T){
X_hat = ttl(as.tensor(S_hat[z_hat[[1]], z_hat[[2]],]), list(diag(theta_hat[[1]]),diag(theta_hat[[2]])), ms = c(1,2))@data
dim(X_hat) = c(dim(X_hat),1)
if(asymm == F){
bic[i] = p[1]^2*log(sum((X_hat - Y)^2)) +(r[1]^2 + p[1]*log(r[1]) + p[1] - r[1])*log(p[1]^2)
}else if(asymm == T){
bic[i] = prod(p)*log(sum((X_hat - Y)^2)) + (prod(r) + sum(p*log(r) + p)- sum(r))*log(prod(p))
}
}else if(imat == F){
X_hat = ttl(as.tensor(S_hat[z_hat[[1]], z_hat[[2]], z_hat[[3]]]), list(diag(theta_hat[[1]]),diag(theta_hat[[2]]), diag(theta_hat[[3]])), ms = c(1,2,3))@data
if(asymm == F){
bic[i] = p[1]^3*log(sum((X_hat - Y)^2)) +(r[1]^3 + p[1]*log(r[1]) + p[1] - r[1])*log(p[1]^3)
}else if(asymm == T){
bic[i] = prod(p)*log(sum((X_hat - Y)^2)) + (prod(r) + sum(p*log(r) + p)- sum(r))*log(prod(p))
}
}
if(imat == T){
dim(Y) = dim(Y)[1:2]
}
}
return(list(r = r_range[which.min(bic),], BIC = bic))
} |
SimCiDiff.default <-
function(data, grp, resp=NULL, na.action="na.error", type="Dunnett", base=1,
ContrastMat=NULL, alternative="two.sided", covar.equal=FALSE,
conf.level=0.95, CorrMatDat=NULL, ...) {
check.out <- SimCheck(data=data, grp=grp, resp=resp, na.action=na.action, type=type, base=base,
ContrastMat=ContrastMat, Num.Contrast=NULL, Den.Contrast=NULL,
alternative=alternative, Margin=NULL, covar.equal=covar.equal,
conf.level=conf.level, CorrMatDat=CorrMatDat)
test <- if (covar.equal==TRUE) {
if (na.action=="multi.df") stop("Procedure not yet available") else "SimCiDiffHom"
} else {
if (na.action=="multi.df") stop("Procedure not yet available") else "SimCiDiffHet"
}
out <- do.call(test,
list(trlist=check.out$trlist, grp=grp, ntr=check.out$ntr, nep=check.out$nep,
ssmat=check.out$ssmat, ContrastMat=check.out$ContrastMat,
ncomp=check.out$ncomp, alternative=alternative, conf.level=conf.level,
meanmat=check.out$meanmat, CorrMatDat=check.out$CorrMatDat))
out$type <- check.out$type
out$test.class <- "differences"
out$covar.equal <- covar.equal
out$comp.names <- check.out$comp.namesDiff
out$resp <- check.out$resp
out$na.action <- na.action
rownames(out$degr.fr) <- rownames(out$lower.raw) <- rownames(out$upper.raw) <- rownames(out$lower) <-
rownames(out$upper) <- check.out$comp.namesDiff
colnames(out$degr.fr) <- colnames(out$lower.raw) <- colnames(out$upper.raw) <- colnames(out$lower) <-
colnames(out$upper) <- check.out$resp
rownames(out$CorrMatComp) <- colnames(out$CorrMatComp) <- rep(check.out$resp,times=check.out$ncomp)
if (na.action=="multi.df") {
rownames(out$estimate) <- check.out$comp.namesDiff
colnames(out$estimate) <- check.out$resp
}
if (covar.equal==FALSE) {
names(out$CovMatDat) <- check.out$tr.names
if (class(check.out$CorrMatDat)!="UserMatrix") {
names(out$CorrMatDat) <- check.out$tr.names
}
}
if ( (class(check.out$CorrMatDat)=="UserMatrix") & (covar.equal==TRUE) ) {
rownames(out$CovMatDat) <- colnames(out$CovMatDat) <- check.out$resp
}
class(out) <- "SimCi"
return(out)
} |
expect_similar <- function(object, expected, tolerance = .5, ...) {
testthat::expect_equal(object, expected, scale = 1, tolerance = tolerance, ...)
} |
cachePut <- function(x, prefix, type, args=NULL, graph = NULL, ...) {
fname <- cacheName(prefix = prefix, type = type, args = args, graph = graph, mode = "put", ...)
if (!is.null(fname)) {
if (!dir.exists(dirname(fname))) {
dir.create(dirname(fname), recursive = TRUE)
}
attr(x, "cachefile") <- basename(fname)
vcat(1, " - writing cache ", basename(fname), fill = 300, show_prefix = FALSE)
saveRDS(x, file = fname, compress = getConfig("cachecompression"))
Sys.chmod(fname, mode = "0666", use_umask = FALSE)
}
} |
"caladeniavalida" |
r.fgt <- function(x, weight, k, alpha){
n <- length(x)
if (is.null(weight)) weight <- rep(1, n)
rhow <- k*weighted.median(x, weight)
ind <- ifelse(x > rhow, 1, 0)
r.fgt <- sum(((((x - rhow)/rhow)*ind)^alpha*weight))/sum(weight)
return(r.fgt)
} |
expected <- eval(parse(text="structure(list(c0 = logical(0)), .Names = \"c0\", row.names = integer(0), class = \"data.frame\")"));
test(id=0, code={
argv <- eval(parse(text="list(structure(list(c0 = structure(integer(0), .Label = character(0), class = \"factor\")), .Names = \"c0\", row.names = character(0), class = \"data.frame\"), structure(list(c0 = structure(integer(0), .Label = character(0), class = \"factor\")), .Names = \"c0\", row.names = character(0), class = \"data.frame\"))"));
do.call(`+`, argv);
}, o=expected); |
MFDFAplot.fn <- function(Result,scale,q,cex.lab=1.6,cex.axis=1.6, col.points=1,
col.line = 1,lty=1,pch = 16,lwd = 2,model = TRUE,cex.legend=1){
if(model){
Coeff <- fit.model(Result$Hq,q)
Model <- Result[1:4]
Model$Hq <- (1/q)*(1-log(Coeff["a"]^q+Coeff["b"]^q)/log(2))
Model$Hq[which(q==0)] <- -log(Coeff["a"]*Coeff["b"])/(2*log(2))
Model$tau_q <- -log(Coeff["a"]^q+Coeff["b"]^q)/log(2)
Model$hq <- diff(Model$tau_q)
Model$Dq <- q[1:(length(q)-1)]*Model$hq - Model$tau_q[1:(length(q)-1)]
}
layout(matrix(c(1,2,3,4), 2, 2, byrow = TRUE),heights=c(4, 4))
oldpar <- par(no.readonly = TRUE)
on.exit(par(oldpar))
par(mai=c(0.8,1,0.8,0.4))
xRange <- range(log2(scale))
yRange <- range(log2(Result$Fqi))
plot(xRange, yRange, type = "n", axes = FALSE, xlab = expression('log'[2]*'(Scale)'),
ylab=expression('log'[2]*'(F'[q]*')'), cex.lab=cex.lab, cex.axis=cex.axis,
main= "q-order Fluctuation function")
Index<-c(1,which(q==0),which(q==q[length(q)]))
axis(2)
axis(1)
box()
for (i in 1:3){
k<-Index[i]
points(log2(scale), log2(Result$Fqi[,k]), col=col.points+i-1,pch=pch)
lines(log2(scale),Result$line[,k], type="l", col=col.points+i-1, lwd=lwd)
}
legend("bottomright", c(paste('q','=',q[Index] , sep=' ' )),cex=cex.legend,
lwd=c(lwd,lwd,lwd),pch=c(pch,pch,pch),bty="n", col=col.points:(col.points+2))
par(mai=c(0.8,1,0.8,0.4))
plot(q, Result$Hq, col=col.points, axes= F, ylab=expression('h'[q]), pch=pch, cex.lab=cex.lab,
cex.axis=cex.axis, main="Hurst exponent", ylim=range(Result$Hq))
if(model){
lines(q,Model$Hq,col=col.line, lwd=lwd, lty=lty)
legend("topright", c("Data","Model"),cex=cex.legend,
lwd=c(-1,lwd),pch=c(pch,-1),bty="n", col=c(col.points,col.line))
}
axis(1, cex=4)
axis(2, cex=4)
box()
par(mai=c(0.8,1,0.8,0.4))
plot(q, Result$tau_q, col=col.points, axes=F,cex.lab=cex.lab, cex.axis=cex.axis,
main="Mass exponent",pch=16,ylab=expression(tau[q]))
if(model){
lines(q,Model$tau_q,col=col.line, lwd=lwd,lty=lty)
legend("bottom", c("Data","Model"),cex=cex.legend,
lwd=c(-1,lwd),pch=c(pch,-1),bty="n", col=c(col.points,col.line))
}
axis(1, cex=4)
axis(2, cex=4)
box()
par(mai=c(0.8,1,0.8,0.4))
plot(Result$hq, Result$Dq, col=col.points, axes=F, pch=16,
main="Multifractal spectrum",ylab=bquote("f ("~alpha~")"),
cex.lab=cex.lab,cex.axis=cex.axis,xlab=bquote(~alpha))
axis(1, cex=4)
axis(2, cex=4)
box()
if(model){
lines(Model$hq,Model$Dq,col=col.line, lwd=lwd,lty=lty)
legend("bottom", c("Data","Model"),cex=cex.legend,
lwd=c(-1,lwd),pch=c(pch,-1),bty="n", col=c(col.points,col.line))
}
} |
setSpecies <- function(x, method=c('pamguard', 'manual', 'reassign'), value, type='id') {
if(length(method) > 1) {
methodPick <- menu(title='Please select a single assignment method.', choices = method)
if(methodPick == 0) {
pamWarning('No assignment method chosen, species cannot be assigned.')
return(x)
}
method <- method[methodPick]
}
method <- match.arg(method[1], choices = c('pamguard', 'manual', 'am', 'reassign'))
if(is.AcousticStudy(x)) {
acev <- events(x)
} else if(is.AcousticEvent(x)) {
acev <- list(x)
} else if(is.list(x)) {
acev <- x
}
switch(method,
'pamguard' = {
spCol <- c('Text_Annotation', 'eventType', 'eventLabel')
for(i in seq_along(acev)) {
sp <- sapply(detectors(acev[[i]]), function(y) {
hasCol <- spCol[spCol %in% colnames(y)]
unique(y[, hasCol])
})
sp <- unique(sp)
if(length(sp) > 1) {
spix <- menu(title = paste0('More than one species found for event ',
id(acev[[i]]), ', select one to assign:'),
choices = sp)
if(spix == 0) {
pamWarning('No species selected, assigning NA. Please fix later.')
sp <- NA_character_
} else {
sp <- sp[spix]
}
}
species(acev[[i]])[[type]] <- sp
}
},
'manual' = {
if(missing(value)) {
pamWarning('Manual mode requires a "value" to set."')
return(x)
}
if(inherits(value, 'data.frame')) {
if(!all(c('species', 'event') %in% colnames(value))) {
pamWarning('If "value" is a dataframe it must contain columns species and event.')
return(x)
}
allIds <- sapply(acev, id)
hasId <- allIds %in% value$event
if(!all(hasId)) {
message('No match found for event(s) ',
printN(allIds[!hasId], 6),
' (Event names in "value" must match exactly)')
}
for(i in which(hasId)) {
species(acev[[i]])[[type]] <- value[['species']][value$event == id(acev[[i]])]
}
} else {
if(length(value) != 1 &&
length(value) != length(acev)) {
pamWarning('Length of "value" must be either 1 or the number of events.')
return(x)
}
if(length(value) == 1) {
value <- rep(value, length(acev))
}
for(i in seq_along(acev)) {
species(acev[[i]])[[type]] <- value[i]
}
}
},
'am' = {
specDf <- bind_rows(lapply(acev, function(oneAe) {
list(event = id(oneAe),
eventType = str_trim(getClickData(oneAe)$eventLabel[1]),
comment = ancillary(oneAe)$eventComment)
}))
specDf$comment <- gsub('OFF EFF', '', specDf$comment)
specDf$comment <- gsub("[[:punct:]]", '', specDf$comment)
specDf$comment <- str_trim(specDf$comment)
specDf$species <- 'unid'
goodEvents <- c('BEAK', 'FORG')
specDf$species <- 'unid'
specDf$species[specDf$eventType %in% goodEvents] <- str_split(specDf$comment[specDf$eventType %in% goodEvents],
' ', simplify=TRUE)[, 1]
specDf$species <- tolower(specDf$species)
specDf$species[specDf$species %in% c('mmme', 'mm')] <- 'unid'
specToAssign <- unique(specDf$species)
if(length(specToAssign) > 0) {
cat('Assigning unique species: ', paste0(specToAssign, collapse = ', '), '.\n', sep = '')
}
acev <- setSpecies(acev, method = 'manual', type=type, value = specDf)
},
'reassign' = {
if(missing(value)) {
pamWarning('"reassign" mode requires a "value" dataframe.')
return(x)
}
colnames(value) <- tolower(colnames(value))
if(!all(c('old', 'new') %in% colnames(value))) {
pamWarning('Data frame must have columns "old" and "new" to reassign.')
return(x)
}
unchanged <- vector('character', length=0)
for(i in seq_along(acev)) {
oldSpec <- species(acev[[i]])[[type]]
newSpec <- value[value$old == oldSpec, c('new')]
if(length(newSpec) == 0) {
unchanged <- c(unchanged, oldSpec)
newSpec <- oldSpec
}
species(acev[[i]])[[type]] <- as.character(newSpec)
}
unchanged <- unique(unchanged)
if(length(unchanged) > 0) {
message(length(unchanged), ' species (', printN(unchanged, 6), ') ',
'were not in reassignment dataframe, they have not been changed.', sep='')
}
},
pamWarning('Method ', method, ' not supported.')
)
if(is.AcousticStudy(x)) {
events(x) <- acev
x <- .addPamWarning(x)
return(x)
}
if(is.AcousticEvent(x)) {
return(acev[[1]])
}
acev
} |
hsb2 <- read.csv("https://stats.idre.ucla.edu/stat/data/hsb2.csv")
str(hsb2)
hsb2$race.f <- factor(hsb2$race)
is.factor(hsb2$race.f)
summary(lm(write ~ race.f, data = hsb2))
summary(lm(write ~ factor(race), data = hsb2))
hsb2 <- within(hsb2, {
race.ct <- C(race.f, treatment)
print(attributes(race.ct))
})
hsb2 <- within(hsb2, {
race.ch <- C(race.f, helmert)
print(attributes(race.ch))
})
summary(lm(write ~ race.ct, data = hsb2))
summary(lm(write ~ race.ch, data = hsb2))
hsb2 <- within(hsb2, {
race.ch1 <- C(race.f, helmert, 3)
print(attributes(race.ch1))
})
summary(lm(write ~ race.ch1, data = hsb2))
(a <- contrasts(hsb2$race.f))
(contrasts(hsb2$race.f) <- contr.treatment(4))
summary(lm(write ~ race.f, data = hsb2))
(contrasts(hsb2$race.f) <- contr.treatment(4, base = 2))
summary(lm(write ~ race.f, data = hsb2))
summary(lm(write ~ I(race.f == 1) + I(race.f == 3) + I(race.f == 4), data = hsb2))
(contrasts(hsb2$race.f) <- contr.helmert(4))
summary(lm(write ~ race.f, data = hsb2)) |
context("environment")
source("utils.R")
test_that("create environment and check parameters", {
skip_if_no_azureml()
env_name <- "testenv"
env <- r_environment(env_name, version = "1", r_version = "3.5.2")
expect_equal(env$name, env_name)
expect_equal(env$version, "1")
expect_equal(env$docker$enabled, TRUE)
expect_equal(env$docker$base_dockerfile, NULL)
expect_equal(env$r$r_version, "3.5.2")
custom_docker_image_name = "temp_image"
env <- r_environment(env_name, custom_docker_image = custom_docker_image_name)
expect_equal(env$name, env_name)
expect_equal(env$docker$enabled, TRUE)
expect_equal(env$docker$base_dockerfile, NULL)
expect_equal(env$docker$base_image, custom_docker_image_name)
cran_pkg1 <- cran_package("ggplot2")
cran_pkg2 <- cran_package("dplyr")
github_pkg1 <- github_package("Azure/azureml-sdk-for-r")
env <- r_environment(env_name, cran_packages = list(cran_pkg1, cran_pkg2),
github_packages = list(github_pkg1),
custom_url_packages = c("/some/package/dir"),
bioconductor_packages = c("a4", "BiocCheck"))
expect_equal(length(env$r$cran_packages), 2)
expect_equal(env$r$cran_packages[[1]]$name, "ggplot2")
expect_equal(env$r$cran_packages[[2]]$name, "dplyr")
expect_equal(length(env$r$github_packages), 1)
expect_equal(env$r$github_packages[[1]]$repository, "Azure/azureml-sdk-for-r")
expect_equal(length(env$r$custom_url_packages), 1)
expect_equal(env$r$custom_url_packages[[1]], "/some/package/dir")
expect_equal(length(env$r$bioconductor_packages), 2)
expect_equal(env$r$bioconductor_packages[[1]], "a4")
expect_equal(env$r$bioconductor_packages[[2]], "BiocCheck")
})
test_that("create, register, and get environment", {
skip_if_no_subscription()
ws <- existing_ws
env_name <- "testenv"
env <- r_environment(env_name, version = "1")
register_environment(env, ws)
environ <- get_environment(ws, env_name, "1")
expect_equal(env$name, environ$name)
}) |
designMat2Vec <-
function(desmat){
desvec = numeric(nrow(desmat))
for (j in 1:ncol(desmat)){
desvec[which(desmat[,j]>0)] = j
}
return(desvec)
} |
sankey_drake_graph <- function(
...,
file = character(0),
selfcontained = FALSE,
build_times = "build",
digits = 3,
targets_only = FALSE,
from = NULL,
mode = c("out", "in", "all"),
order = NULL,
subset = NULL,
make_imports = TRUE,
from_scratch = FALSE,
group = NULL,
clusters = NULL,
show_output_files = TRUE,
config = NULL
) {
}
sankey_drake_graph_impl <- function(
config,
file = character(0),
selfcontained = FALSE,
build_times = "build",
digits = 3,
targets_only = FALSE,
from = NULL,
mode = c("out", "in", "all"),
order = NULL,
subset = NULL,
make_imports = TRUE,
from_scratch = FALSE,
group = NULL,
clusters = NULL,
show_output_files = TRUE
) {
assert_pkg("networkD3")
graph_info <- drake_graph_info_impl(
config = config,
from = from,
mode = mode,
order = order,
subset = subset,
build_times = build_times,
digits = digits,
targets_only = targets_only,
make_imports = make_imports,
from_scratch = from_scratch,
group = group,
clusters = clusters,
show_output_files = show_output_files
)
render_sankey_drake_graph(
graph_info,
file = file,
selfcontained = selfcontained
)
}
body(sankey_drake_graph) <- config_util_body(sankey_drake_graph_impl)
render_sankey_drake_graph <- function(
graph_info,
file = character(0),
selfcontained = FALSE,
...
) {
assert_pkg("networkD3")
nodes <- as.data.frame(graph_info$nodes)
timed <- grepl("\n", nodes$label)
nodes$label <- gsub("\n", " (", nodes$label)
nodes$label[timed] <- paste0(nodes$label[timed], ")")
nodes$status <- gsub(pattern = " ", replacement = "_", x = nodes$status)
edges <- as.data.frame(graph_info$edges)
edges$src <- as.integer(match(edges$from, table = nodes$id) - 1)
edges$target <- as.integer(match(edges$to, table = nodes$id) - 1)
edges$value <- rep(1, nrow(edges))
colordf <- nodes[, c("status", "color")]
colordf <- colordf[!duplicated(colordf), ]
domain <- paste(paste0("'", colordf$status, "'"), collapse = ", ")
range <- paste(paste0("'", colordf$color, "'"), collapse = ", ")
color <- paste0(
"d3.scaleOrdinal() .domain([", domain, "]) .range([", range, "])"
)
sankey <- networkD3::sankeyNetwork(
Links = edges,
Nodes = nodes,
NodeID = "label",
Source = "src",
Target = "target",
NodeGroup = "status",
Value = "value",
colourScale = color,
... = ...
)
sankey_render_webshot(
file = file,
graph = sankey,
selfcontained = selfcontained
)
}
sankey_render_webshot <- function(file, graph, selfcontained) {
if (!length(file)) {
return(graph)
}
file <- path.expand(file)
if (is_image_filename(file)) {
assert_pkg("webshot")
dir <- tempfile()
dir.create(dir)
url <- file.path(dir, "tmp.html")
networkD3::saveNetwork(
network = graph,
file = url,
selfcontained = FALSE
)
webshot::webshot(url = url, file = file)
} else {
networkD3::saveNetwork(
network = graph,
file = file,
selfcontained = selfcontained
)
}
invisible()
} |
set_time_locale <- function(language_code) {
assert_that(is.string(language_code))
assert_that(
language_code %in% c("eng", "deu", ""),
msg = "Only 'eng' and 'deu' are supported languages. Or use '' to reset."
)
german <- c(
"de",
"de_DE",
"de_DE.UTF-8",
"de_DE.utf8",
"German"
)
english <- c(
"en",
"en_US",
"en_US.UTF-8",
"en_US.utf8",
"English"
)
language <- switch(
language_code,
"eng" = english,
"deu" = german
)
new <- ""
for (lang_format in language) {
if (new != "") break
new <- suppressWarnings(Sys.setlocale("LC_TIME", lang_format))
}
if (language_code != "" && new == "") {
stop(paste0(
"Couldn't set locale to '",
language_code,
"'. Are you sure it's installed on the system?"
))
}
return(new)
} |
"print.turnogram" <-
function(x, ...) {
cat(x$type, "turnogram for:", x$data, "\n\n")
cat("options :", x$fun, "/", x$proba, "\n")
cat("intervals :", min(x$interval), "..", max(x$interval), x$units.text, "/ step =", x$interval[2] - x$interval[1], "\n")
cat("nbr of obs. :", max(x$n), "..", min(x$n), "\n")
maxinfo <- max(x$info)
pos <- x$info == maxinfo
cat("max. info. : ", max(x$info), " at interval ", x$interval[pos], " (P = ", 2^-abs(max(x$info)), ": ", x$turns[pos], " turning points for ", x$n[pos], " observations)", "\n", sep="")
cat("extract level: ", x$level, " (", x$units.text, ")\n\n", sep="")
invisible(x)
} |
dgbinom <-
function(x,size,prob,log=FALSE){
n<-sum(size)
y<-round(x)
if (any(is.na(x) | (x < 0)) || max(abs(x - y)) > 1e-07)
stop("'x' must be nonnegative and integer")
z<-round(n)
if (length(size) != length(prob) || is.na(n) || (n < 1) || abs(n -
z) > 1e-07 || any(x > z))
stop("'size' must contain positive integers and their sum must be >= 'x'")
if (any(is.na(prob)| (prob<0) | (prob>1) ))
stop("'prob' must contain numbers between 0 und 1")
x<-x+1
theta<-c(rep(prob,size))
xi<-c(1-theta[1],theta[1])
if(n>1){
for(i in c(2:n)){
P<-matrix(c(xi,0,0,xi),i+1,2)
xi<-P%*%c(1-theta[i],theta[i])}}
if (log==TRUE){
xi<-log(xi)}
xi[x]
} |
test_that("fill_all atomic objects", {
expect_identical(fill_all(numeric(0)), numeric(0))
expect_identical(fill_all(numeric(0), nas = FALSE), numeric(0))
expect_identical(fill_all(integer(0)), integer(0))
expect_identical(fill_all(integer(0), nas = FALSE), integer(0))
expect_identical(fill_all(NA), FALSE)
expect_identical(fill_all(NA, nas = FALSE), NA)
expect_identical(fill_all(c(10L, NA)), c(0L, 0L))
expect_identical(fill_all(c(10L, NA), nas = FALSE), c(0L, NA))
expect_identical(
fill_all(matrix(c(1L, 3L, 7L, NA), nrow = 2, ), value = 2L),
matrix(c(2L, 2L, 2L, 2L), nrow = 2)
)
expect_identical(
fill_all(matrix(c(1L, 3L, 7L, NA), nrow = 2), nas = FALSE),
matrix(c(0L, 0L, 0L, NA), nrow = 2)
)
expect_identical(fill_all(c(10L, NA), value = 11L), c(11L, 11L))
expect_identical(fill_all(c(10L, NA),
value = 11,
nas = FALSE
), c(11L, NA))
})
test_that("fill_all.matrix", {
expect_identical(
fill_all(matrix(c(TRUE, NA, FALSE, NA), nrow = 2),
value = "TRUE"
),
matrix(c(TRUE, TRUE, TRUE, TRUE), nrow = 2)
)
expect_identical(
fill_all(matrix(c(TRUE, NA, FALSE, NA), nrow = 2),
value = "TRUE", nas = FALSE
),
matrix(c(TRUE, NA, TRUE, NA), nrow = 2)
)
expect_identical(fill_all(matrix(NA_integer_)), matrix(0L))
})
test_that("fill_all.character", {
expect_identical(fill_all(c("a", NA)), c("0", "0"))
expect_identical(fill_all(c("a", NA), nas = FALSE), c("0", NA))
}) |
sheet_write <- function(data,
ss = NULL,
sheet = NULL) {
data_quo <- enquo(data)
data <- eval_tidy(data_quo)
check_data_frame(data)
if (is.null(ss)) {
if (quo_is_symbol(data_quo)) {
sheet <- sheet %||% as_name(data_quo)
}
if (is.null(sheet)) {
return(gs4_create(sheets = data))
} else {
check_string(sheet)
return(gs4_create(sheets = list2(!!sheet := data)))
}
}
ssid <- as_sheets_id(ss)
maybe_sheet(sheet)
x <- gs4_get(ssid)
gs4_bullets(c(v = "Writing to {.s_sheet {x$name}}."))
if (is.null(sheet) && quo_is_symbol(data_quo)) {
candidate <- as_name(data_quo)
if (!is.null(candidate)) {
m <- match(candidate, x$sheets$name)
sheet <- if (is.na(m)) candidate else NULL
}
}
requests <- list()
s <- NULL
if (!is.null(sheet)) {
s <- tryCatch(
lookup_sheet(sheet, sheets_df = x$sheets),
googlesheets4_error_sheet_not_found = function(cnd) NULL
)
}
if (is.null(s)) {
x <- sheet_add_impl_(ssid, sheet_name = sheet)
s <- lookup_sheet(nrow(x$sheets), sheets_df = x$sheets)
} else {
requests <- c(
requests,
list(bureq_clear_sheet(s$id))
)
}
gs4_bullets(c(v = "Writing to sheet {.w_sheet {s$name}}."))
requests <- c(
requests,
prepare_df(s$id, data)
)
req <- request_generate(
"sheets.spreadsheets.batchUpdate",
params = list(
spreadsheetId = ssid,
requests = requests,
responseIncludeGridData = FALSE
)
)
resp_raw <- request_make(req)
gargle::response_process(resp_raw)
invisible(ssid)
}
write_sheet <- sheet_write |
raster_to_raw_tiles <- function(input_file,
output_prefix,
side_length = 4097,
raw = TRUE) {
.Deprecated(
"make_manifest",
"terrainr",
msg = paste("'raster_to_raw_tiles' is deprecated as of terrainr 0.5.0.",
"Use 'make_manifest' instead.",
sep = "\n"
)
)
input_raster <- raster::raster(input_file)
max_raster <- raster::cellStats(input_raster, "max")
x_tiles <- ceiling(input_raster@ncols / side_length)
y_tiles <- ceiling(input_raster@nrows / side_length)
if (requireNamespace("progressr", quietly = TRUE)) {
p <- progressr::progressor(steps = x_tiles * y_tiles * 3)
}
temptiffs <- NULL
while (length(temptiffs) != x_tiles * y_tiles) {
temptiffs <- unique(vapply(
1:(x_tiles * y_tiles),
function(x) tempfile(fileext = ".tiff"),
character(1)
))
}
x_tiles <- 0:(x_tiles - 1)
x_tiles <- (x_tiles * side_length)
y_tiles <- 0:(y_tiles - 1)
y_tiles <- (y_tiles * side_length)
counter <- 1
for (i in seq_along(x_tiles)) {
for (j in seq_along(y_tiles)) {
if (requireNamespace("progressr", quietly = TRUE)) {
p(message = sprintf(
"Cropping tile (%d,%d)",
x_tiles[[i]],
y_tiles[[j]]
))
}
gdalUtils::gdal_translate(input_file, temptiffs[[counter]],
srcwin = paste0(
x_tiles[[i]],
", ",
y_tiles[[j]],
", ",
side_length,
", ",
side_length
)
)
names(temptiffs)[[counter]] <- paste0(
output_prefix,
"_",
i,
"_",
j,
ifelse(raw, ".raw", ".png")
)
counter <- counter + 1
}
}
temppngs <- NULL
if (raw) {
while (length(temppngs) != length(temptiffs)) {
temppngs <- unique(vapply(
seq_along(temptiffs),
function(x) tempfile(fileext = ".png"),
character(1)
))
}
} else {
temppngs <- names(temptiffs)
}
names(temppngs) <- names(temptiffs)
mapply(
function(x, y) {
if (requireNamespace("progressr", quietly = TRUE)) {
p(message = sprintf("Converting tile %s to PNG", x))
}
sf::gdal_utils(
"translate",
source = x,
destination = y,
options = c(
"-ot", "UInt16",
"-of", "png",
"-scale", "0", max_raster, "0", "65535"
)
)
},
temptiffs,
temppngs
)
unlink(temptiffs)
mapply(
function(x, y) {
processing_image <- magick::image_read(x)
if (requireNamespace("progressr", quietly = TRUE)) {
if (raw) {
p(message = sprintf("Converting tile %s to RAW", x))
} else {
p(message = sprintf("Flipping tile %s for Unity", x))
}
}
if (raw) {
processing_image <- magick::image_flop(processing_image)
processing_image <- magick::image_convert(processing_image,
format = "RGB",
depth = 16,
interlace = "Plane"
)
} else {
processing_image <- magick::image_flip(processing_image)
processing_image <- magick::image_flop(processing_image)
}
magick::image_write(processing_image, y)
},
temppngs,
names(temppngs)
)
if (raw) unlink(temppngs)
return(invisible(names(temppngs)))
} |
output$tabinfo_sb_results <- output$tabinfo_sb_anonymize <- renderUI({
inp <- infodat()$df
inp[,3] <- as.character(inp[,3])
if (is.null(inp)) {
return(NULL)
}
fluidRow(
column(12, h4("Variable selection"), align="center"),
column(12, DT::renderDataTable({
inp
}, rownames=FALSE, colnames = c("Variable name", "Type", "Additional suppressions by local suppression algorithm"), selection='none', style='bootstrap', class='table-condensed',
options = list(searching=FALSE, scrollX=TRUE, paging=FALSE, ordering=FALSE, bInfo=FALSE)), align="center")
)
})
output$tabparam_sb_results <- output$tabparam_sb_anonymize <- renderUI({
inp <- infodat()$params
if (is.null(inp)) {
return(NULL)
}
fluidRow(
column(12, h4("Additional parameters"), align="center"),
column(12, DT::renderDataTable({
inp
}, rownames=FALSE, selection='none', style='bootstrap', class='table-condensed',
options = list(searching=FALSE, scrollX=TRUE, paging=FALSE, ordering=FALSE, bInfo=FALSE)), align="center")
)
})
output$risk_sb_anonymize <- renderUI({
curObj <- sdcObj()
if (is.null(curObj)) {
return(NULL)
}
risks <- get_risk()
obs <- nrow(risks)
n2 <- sum(risks$fk<2)
n3 <- sum(risks$fk<3)
n5 <- sum(risks$fk<5)
v1 <- paste0(n2," (",formatC(100*(n2/obs), format="f", digits=2),"%)")
v2 <- paste0(n3," (",formatC(100*(n3/obs), format="f", digits=2),"%)")
v3 <- paste0(n5," (",formatC(100*(n5/obs), format="f", digits=2),"%)")
origrisks <- curObj@originalRisk$individual
n2_o <- sum(origrisks[,2]<2)
n3_o <- sum(origrisks[,2]<3)
n5_o <- sum(origrisks[,2]<5)
v1_o <- paste0(n2_o," (",formatC(100*(n2_o/obs), format="f", digits=2),"%)")
v2_o <- paste0(n3_o," (",formatC(100*(n3_o/obs), format="f", digits=2),"%)")
v3_o <- paste0(n5_o," (",formatC(100*(n5_o/obs), format="f", digits=2),"%)")
df <- data.table(
"k-anonymity"=c("2-anonymity","3-anonymity","5-anonymity"),
"Modified data"=c(v1,v2,v3),
"Original data"=c(v1_o, v2_o, v3_o))
fluidRow(
column(12, h4("k-anonymity"), align="center"),
column(12, DT::renderDataTable({
df
}, rownames=FALSE, selection='none', style='bootstrap', class='table-condensed',
options = list(searching=FALSE, scrollX=TRUE, paging=FALSE, ordering=FALSE, bInfo=FALSE)), align="center")
)
})
output$numrisk_sb_anonymize <- renderUI({
curObj <- sdcObj()
if (is.null(curObj)) {
return(invisible(NULL))
}
x <- print(curObj, type="numrisk", docat=FALSE)
if (is.null(x)) {
return(invisible(NULL))
}
dt <- data.table(data=c("modified","original"), risk_min=paste0(c("0.00","0.00"),"%"), risk_max=paste0(c(x$risk_up, "100.00"),"%"))
fluidRow(
column(12, h4("Risk in numerical key variables"), align="center"),
column(12, DT::renderDataTable({
dt
}, rownames=FALSE, colnames =c("Data", "Minimum risk", "Maximum risk"), selection='none', style='bootstrap', class='table-condensed',
options = list(searching=FALSE, scrollX=TRUE, paging=FALSE, ordering=FALSE, bInfo=FALSE)))
)
})
output$loss_sb_anonymize <- renderUI({
curObj <- sdcObj()
if (is.null(curObj)) {
return(NULL)
}
utility <- get.sdcMicroObj(curObj, type="utility")
if (is.null(utility)) {
return(invisible(NULL))
}
il1 <- formatC(utility$il1, format="f", digits=2)
diff_eigen <- formatC(utility$eigen*100, format="f", digits=2)
df <- data.frame(
Measure=c("IL1s","Difference in eigenvalues"),
"Modified data"=c(il1, diff_eigen),
"Original data"=c("0.00", "0.00"))
fluidRow(
column(12, h4("Information loss"), align="center"),
column(12, DT::renderDataTable({
df
}, rownames=FALSE, selection='none', style='bootstrap', class='table-condensed',
options = list(searching=FALSE, scrollX=TRUE, paging=FALSE, ordering=FALSE, bInfo=FALSE)),align="center")
)
})
output$pram_sb_anonymize <- renderUI({
curObj <- sdcObj()
if (is.null(curObj)) {
return(NULL)
}
pI <- curObj@pram
if (is.null(pI)) {
return(NULL)
}
out <- fluidRow(column(12, h4("PRAM summary"), align="center"))
wn <- curObj@additionalResults$sdcMicro_warnings
if (!is.null(wn) && "pram" %in% wn$method) {
out <- list(out, fluidRow(column(12, p("Note: Pram was applied on at least one cate gorical
key variable. Risk measures for categorical key variables including k-anonymity are not useful anymore!", align="center"))))
}
out <- list(out, fluidRow(
column(12, DT::renderDataTable({
pI$summary
}, rownames=FALSE, colnames = c("Variable name", "Number of changed values", "Percentage of changed values"), selection='none', style='bootstrap', class='table-condensed',
options = list(searching=FALSE, scrollX=TRUE, paging=FALSE, ordering=FALSE, bInfo=FALSE)), align="center")
))
})
output$anonmeth_sb_risk <- renderUI({
curMethods <- obj$anon_performed
if (is.null(curMethods)) {
return(NULL)
}
res <- tags$ul(
lapply(1:length(curMethods), function(x) {
tags$li(sub(" (see above) ","",curMethods[x]))
}
))
out <- fluidRow(column(12, h4("Anonymization steps"), align="center"))
out <- list(out, fluidRow(column(12, res)))
return(out)
})
output$sb_info_results <- renderUI({
out <- list(
uiOutput("tabinfo_sb_results"),
uiOutput("tabparam_sb_results"),
uiOutput("anonmeth_sb_risk"))
out
})
output$sb_info_anonymize <- renderUI({
out <- list(
uiOutput("tabinfo_sb_anonymize"),
uiOutput("tabparam_sb_anonymize"),
uiOutput("risk_sb_anonymize"),
uiOutput("numrisk_sb_anonymize"),
uiOutput("loss_sb_anonymize"),
uiOutput("pram_sb_anonymize"))
out
}) |
hSDM.poisson.iCAR <- function (
counts,
suitability, spatial.entity, data,
n.neighbors, neighbors,
suitability.pred=NULL, spatial.entity.pred=NULL,
burnin=5000, mcmc=10000, thin=10,
beta.start,
Vrho.start,
mubeta=0, Vbeta=1.0E6,
priorVrho="1/Gamma",
shape=0.5, rate=0.0005,
Vrho.max=1000,
seed=1234, verbose=1,
save.rho=0, save.p=0)
{
check.mcmc.parameters(burnin, mcmc, thin)
check.verbose(verbose)
check.save.rho(save.rho)
check.save.p(save.p)
Y <- counts
nobs <- length(Y)
mf.suit <- model.frame(formula=suitability,data=data)
X <- model.matrix(attr(mf.suit,"terms"),data=mf.suit)
ncell <- length(n.neighbors)
cells <- spatial.entity
if (is.null(suitability.pred) | is.null(spatial.entity.pred)) {
X.pred <- X
cells.pred <- cells
npred <- nobs
}
if (!is.null(suitability.pred) & !is.null(spatial.entity.pred)) {
mf.pred <- model.frame(formula=suitability,data=suitability.pred)
X.pred <- model.matrix(attr(mf.pred,"terms"),data=mf.pred)
cells.pred <- spatial.entity.pred
npred <- length(cells.pred)
}
np <- ncol(X)
ngibbs <- mcmc+burnin
nthin <- thin
nburn <- burnin
nsamp <- mcmc/thin
check.Y.poisson(Y)
check.X(X,nobs)
check.cells(cells,nobs)
check.neighbors(n.neighbors,ncell,neighbors)
check.cells.pred(cells.pred,npred)
beta.start <- form.beta.start(beta.start,np)
rho.start <- rep(0,ncell)
Vrho.start <- check.Vrho.start(Vrho.start)
mubeta <- check.mubeta(mubeta,np)
Vbeta <- check.Vbeta(Vbeta,np)
check.ig.prior(shape,rate)
Vrho.max <- check.Vrho.max(Vrho.max)
priorVrho <- form.priorVrho(priorVrho)
beta <- rep(beta.start,nsamp)
if (save.rho==0) {rho_pred <- rho.start}
if (save.rho==1) {rho_pred <- rep(rho.start,nsamp)}
Vrho <- rep(Vrho.start,nsamp)
prob_p_latent <- rep(0,nobs)
prob_q_latent <- rep(0,nobs)
if (save.p==0) {prob_p_pred <- rep(0,npred)}
if (save.p==1) {prob_p_pred <- rep(0,npred*nsamp)}
Deviance <- rep(0,nsamp)
Sample <- .C("hSDM_poisson_iCAR",
ngibbs=as.integer(ngibbs), nthin=as.integer(nthin), nburn=as.integer(nburn),
nobs=as.integer(nobs),
ncell=as.integer(ncell),
np=as.integer(np),
Y_vect=as.integer(c(Y)),
X_vect=as.double(c(X)),
C_vect=as.integer(c(cells)-1),
nNeigh=as.integer(c(n.neighbors)),
Neigh_vect=as.integer(c(neighbors-1)),
npred=as.integer(npred),
X_pred_vect=as.double(c(X.pred)),
C_pred_vect=as.integer(c(cells.pred)-1),
beta_start=as.double(c(beta.start)),
rho_start=as.double(c(rho.start)),
beta.nonconst=as.double(beta),
rho_pred.nonconst=as.double(rho_pred),
Vrho.nonconst=as.double(Vrho),
mubeta=as.double(c(mubeta)), Vbeta=as.double(c(Vbeta)),
priorVrho=as.double(priorVrho),
shape=as.double(shape), rate=as.double(rate),
Vrho.max=as.double(Vrho.max),
Deviance.nonconst=as.double(Deviance),
prob_p_latent.nonconst=as.double(prob_p_latent),
prob_p_pred.nonconst=as.double(prob_p_pred),
seed=as.integer(seed),
verbose=as.integer(verbose),
save_rho=as.integer(save.rho),
save_p=as.integer(save.p),
PACKAGE="hSDM")
Matrix <- matrix(NA,nrow=nsamp,ncol=np+2)
names.fixed <- paste("beta.",colnames(X),sep="")
colnames(Matrix) <- c(names.fixed,"Vrho","Deviance")
Matrix[,c(1:np)] <- matrix(Sample[[17]],ncol=np)
Matrix[,ncol(Matrix)-1] <- Sample[[19]]
Matrix[,ncol(Matrix)] <- Sample[[26]]
MCMC <- mcmc(Matrix,start=nburn+1,end=ngibbs,thin=nthin)
if (save.rho==0) {rho.pred <- Sample[[18]]}
if (save.rho==1) {
Matrix.rho.pred <- matrix(Sample[[18]],ncol=ncell)
colnames(Matrix.rho.pred) <- paste("rho.",c(1:ncell),sep="")
rho.pred <- mcmc(Matrix.rho.pred,start=nburn+1,end=ngibbs,thin=nthin)
}
if (save.p==0) {lambda.pred <- Sample[[28]]}
if (save.p==1) {
Matrix.p.pred <- matrix(Sample[[28]],ncol=npred)
colnames(Matrix.p.pred) <- paste("p.",c(1:npred),sep="")
lambda.pred <- mcmc(Matrix.p.pred,start=nburn+1,end=ngibbs,thin=nthin)
}
return (list(mcmc=MCMC,
rho.pred=rho.pred, lambda.pred=lambda.pred,
lambda.latent=Sample[[27]]))
} |
simulate_simpson <- function(n = 100,
r = 0.5,
groups = 3,
difference = 1,
group_prefix = "G_") {
if (n <= 3) {
stop("The number of observation `n` should be higher than 3")
}
data <- data.frame()
for (i in 1:groups) {
dat <- simulate_correlation(n = n, r = r)
dat$V1 <- dat$V1 + difference * i
dat$V2 <- dat$V2 + difference * (i * -sign(r))
dat$Group <- sprintf(paste0(group_prefix, "%0", nchar(trunc(abs(groups))), "d"), i)
data <- rbind(data, dat)
}
data
} |
shell92pf <- function(d, wth, uts, depth, l){
checkmate::assert_double(d, lower = 1, upper = 5e3, finite = TRUE, any.missing = FALSE, min.len = 1)
checkmate::assert_double(wth, lower = 0, upper = 5e2, finite = TRUE, any.missing = FALSE, min.len = 1)
checkmate::assert_double(uts, lower = 5, upper = 2e3, finite = TRUE, any.missing = FALSE, min.len = 1)
checkmate::assert_double(depth, lower = 0, upper = 1e3, finite = TRUE, any.missing = FALSE, min.len = 1)
checkmate::assert_double(l, lower = 0, upper = 5e3, finite = TRUE, any.missing = FALSE, min.len = 1)
Q <- sqrt(1 + .805*l^2/d/wth)
Pf <- 2*wth*.9*uts*(1 - depth/wth)/d/(1 - depth/wth/Q)
Pf[depth >= .85*wth] <- NA_real_
Pf
} |
configure_imap <- function(url,
username,
password = NULL,
xoauth2_bearer = NULL,
use_ssl = TRUE,
verbose = FALSE,
buffersize = 16000,
timeout_ms = 0,
...) {
con <- ImapCon$new(url,
username,
password = password,
xoauth2_bearer = xoauth2_bearer,
use_ssl = use_ssl,
verbose = verbose,
buffersize = buffersize,
timeout_ms = timeout_ms,
...)
return(con)
} |
logLikeCLTEstPopMean=function(muEst,muObs,varObs,nObs){
t.model<-sqrt(nObs/varObs)*(muObs-muEst);
return( dt(t.model,df=nObs-1,log=TRUE)-dt(0,df=nObs-1,log=TRUE));
}
hzar.doCLTData1DPops<-function(distance,muObs,varObs,nEff){
if((length(distance) != length(muObs)) ||
(length(distance) != length(nEff)) ||
(length(distance) != length(varObs)) ){
stop("Distance, muObs, varObs and nEff are not all of the same length.");
}
if(sum(nEff<2)>0)
stop("There must be at least two samples per population!");
if(sum(nEff<10)>0)
warning("Some populations have less than 10 samples. More samples would be nice.");
if(sum(varObs==0)>0){
warning("Some of the population samples have a variance of 0. Adding in estimated variance due to measurement error.");
m.err<-5/3*10^-quantile(1+as.numeric(lapply(muObs,
function(nV) {min((-1:12)[round(nV,digits=-1:12)==nV])}
)),probs=0.75)[[1]];
varObs<-varObs+m.err*m.err;
}
obj<-list(frame=na.omit(data.frame(dist=distance,obsMean=muObs,obsVariance=varObs,n=nEff)));
obj$model.LL <- function(model.func){
muEst=model.func(obj$frame$dist);
res<-logLikeCLTEstPopMean(muEst=as.numeric(muEst),
muObs=as.numeric(obj$frame$obsMean),
varObs=as.numeric(obj$frame$obsVariance),
nObs=as.numeric(obj$frame$n));
result<-sum(res);
if(is.na(result))
return(-1e8);
return(result);
}
class(obj)<-c("clineSampleData1DCLT","hzar.obsData");
return(obj);
}
hzar.doCLTData1DRaw<-function(distance,traitValue){
if(length(distance)!=length(traitValue))
stop("Distance and traitValue vectors not of equal length.");
dist.group<-unique(distance);
group.nSamp<-as.numeric(lapply(X=dist.group,
function(a) sum(distance==a)));
if(sum(group.nSamp<3)>0)
stop("There are not enough samples in discrete populations. If the data submitted is correct, you should either drop low sample populations or use one of the population sample interpolation methods.");
if(sum(group.nSamp<10)>0)
warning("There are very few samples in discrete populations. You should consider dropping low sample populations or using one of the population sample interpolation methods.");
group.mean<-as.numeric(lapply(X=dist.group,
function(a)
mean(traitValue[distance==a])));
group.var<-as.numeric(lapply(X=dist.group,
function(a)
var(traitValue[distance==a])));
if(sum(group.var==0)>0){
warning("Some of the population samples have a variance of 0. Adding in estimated variance due to measurement error.");
m.err<-5/3*10^-quantile(1+as.numeric(lapply(traitValue,
function(nV) {min((-1:12)[round(nV,digits=-1:12)==nV])}
)),probs=0.75)[[1]];
group.var<-group.var+m.err*m.err;
}
return(data.frame(dist=dist.group,
mu=group.mean,
sigma2=group.var,
nSamp=group.nSamp));
}
cline.meta.CLTnA =
list(
prior=function(center,width,xMin,xMax){
return(0); },
func=function(center,width,xMin,xMax){
pCline<- function(x) {
u <- (x - center) * 4/width
return(xMin+(xMax-xMin)* (1/(1+ exp(-u)))) }
return(pCline)
},
req=function(center,width,xMin,xMax){
return(xMin <xMax & width>0)},
parameterTypes=CLINEPARAMETERS[c("center","width","xMin","xMax")]
);
cline.meta.CLTnD =
list(
prior=function(center,width,xMin,xMax){
return(0); },
func=function(center,width,xMin,xMax){
pCline<- function(x) {
u <- (x - center) * -4/width
return(xMin+(xMax-xMin)* (1/(1+ exp(-u)))) }
return(pCline)
},
req=function(center,width,xMin,xMax){
return(xMin <xMax &width>0)},
parameterTypes=CLINEPARAMETERS[c("center","width","xMin","xMax")]
);
class(cline.meta.CLTnA)<-"clineMetaModel";
class(cline.meta.CLTnD)<-"clineMetaModel";
cline.meta.CLTrA =
list(req= function(center,width,xMin,xMax,deltaR,tauR)
{
return(width>0 & deltaR>=0 &
xMin <xMax &
tauR>=0 & tauR<=1 )
},
prior=function(center,width,xMin,xMax,deltaR,tauR){
return(0); },
func=function(center,width,xMin,xMax,deltaR,tauR)
{
gamma=4/width;
tail.HI=meta.tail.upper(gamma=gamma,d2=deltaR,tau2=tauR);
clineComposite=
meta.cline.func.upStep(center=center,
direction=1,
gamma=gamma,
upperTail=tail.HI);
return(meta.cline.func.pScale(xMin,xMax,clineComposite));
},
parameterTypes=CLINEPARAMETERS[c("center","width","xMin","xMax","deltaR","tauR")]
);
cline.meta.CLTrD =
list(req= function(center,width,xMin,xMax,deltaR,tauR)
{
return(width>0 & deltaR>=0 &
xMin <xMax &
tauR>=0 & tauR<=1 )
},
prior=function(center,width,xMin,xMax,deltaR,tauR){
return(0); },
func=function(center,width,xMin,xMax,deltaR,tauR)
{
gamma=4/width;
tail.LO=meta.tail.lower(gamma=gamma,d1=deltaR,tau1=tauR);
clineComposite=
meta.cline.func.lowStep(center=center,
direction=-1,
gamma=gamma,
lowerTail=tail.LO);
return(meta.cline.func.pScale(xMin,xMax,clineComposite));
},
parameterTypes=CLINEPARAMETERS[c("center","width","xMin","xMax","deltaR","tauR")]
);
class(cline.meta.CLTrA)<-"clineMetaModel";
class(cline.meta.CLTrD)<-"clineMetaModel";
setupCLTCenterClineParameters<-function(myModel,scaling,x=NULL,y=NULL) {
if(scaling=="fixed") {
attr(myModel$parameterTypes$xMin,"fixed")<-TRUE;
attr(myModel$parameterTypes$xMax,"fixed")<-TRUE;
if(!is.null(y)){
myModel$parameterTypes$xMin$val<-min(y);
myModel$parameterTypes$xMax$val<-max(y);
}
} else if(scaling=="free") {
attr(myModel$parameterTypes$xMin,"fixed")<-FALSE;
attr(myModel$parameterTypes$xMax,"fixed")<-FALSE;
if(!is.null(y)){
myModel$parameterTypes$xMin$val<-min(y);
myModel$parameterTypes$xMax$val<-max(y);
cline.suggestionFunc1D$xMin(x,y)->junk;
attr(myModel$parameterTypes$xMin,"limit.lower")<-junk[[1]];
attr(myModel$parameterTypes$xMin,"limit.upper")<-junk[[2]];
cline.suggestionFunc1D$xMax(x,y)->junk;
attr(myModel$parameterTypes$xMax,"limit.lower")<-junk[[1]];
attr(myModel$parameterTypes$xMax,"limit.upper")<-junk[[2]];
}
} else {
stop(paste("Scaling type",scaling,"unrecignized. Please use none, fixed, or free."));
}
pTnames<-names(myModel$parameterTypes);
if(!is.null(x)){
qX<-quantile(x,probs=c(0.25,0.5,0.75));
myModel$parameterTypes$center$val<-qX[[2]];
myModel$parameterTypes$width$val<-qX[[3]]-qX[[1]];
cline.suggestionFunc1D$center(x,y)->junk;
attr(myModel$parameterTypes$center,"limit.lower")<-junk[[1]];
attr(myModel$parameterTypes$center,"limit.upper")<-junk[[2]];
cline.suggestionFunc1D$width(x,y)->junk;
attr(myModel$parameterTypes$width,"limit.lower")<-junk[[1]];
attr(myModel$parameterTypes$width,"limit.upper")<-junk[[2]];
index<-"deltaR";
if(index %in% pTnames){
cline.suggestionFunc1D[[index]](x,y)->junk;
attr(myModel$parameterTypes[[index]],"limit.lower")<-junk[[1]];
attr(myModel$parameterTypes[[index]],"limit.upper")<-junk[[2]];
}
index<-"deltaM";
if(index %in% pTnames){
cline.suggestionFunc1D[[index]](x,y)->junk;
attr(myModel$parameterTypes[[index]],"limit.lower")<-junk[[1]];
attr(myModel$parameterTypes[[index]],"limit.upper")<-junk[[2]];
}
index<-"deltaL";
if(index %in% pTnames){
cline.suggestionFunc1D[[index]](x,y)->junk;
attr(myModel$parameterTypes[[index]],"limit.lower")<-junk[[1]];
attr(myModel$parameterTypes[[index]],"limit.upper")<-junk[[2]];
}
}
index<-"tauR";
if(index %in% pTnames){
cline.suggestionFunc1D[[index]](x,y)->junk;
attr(myModel$parameterTypes[[index]],"limit.lower")<-junk[[1]];
attr(myModel$parameterTypes[[index]],"limit.upper")<-junk[[2]];
}
index<-"tauM";
if(index %in% pTnames){
cline.suggestionFunc1D[[index]](x,y)->junk;
attr(myModel$parameterTypes[[index]],"limit.lower")<-junk[[1]];
attr(myModel$parameterTypes[[index]],"limit.upper")<-junk[[2]];
}
index<-"tauL";
if(index %in% pTnames){
cline.suggestionFunc1D[[index]](x,y)->junk;
attr(myModel$parameterTypes[[index]],"limit.lower")<-junk[[1]];
attr(myModel$parameterTypes[[index]],"limit.upper")<-junk[[2]];
}
return(myModel);
}
hzar.makeCline1DCLT<- function(data=NULL,scaling="free",tails="none",direction=NULL){
if(identical(tolower(tails),"none")){
return(buildCline1D(data,scaling,direction,
cline.meta.CLTnA,cline.meta.CLTnD));
}else if(identical(tolower(tails),"right")) {
myRightCline<-buildCline1D(data,scaling,direction,
cline.meta.CLTrA, cline.meta.CLTrD);
attr(myRightCline,"tails")<-"right";
return(myRightCline);
}
stop(paste("Cline with",tails,"tail(s) not available."));
} |
[
{
"title": "Improve your shiny dashboard with Disqus panel",
"href": "http://r-addict.com/2016/04/20/Disqus-Shinydashboards.html"
},
{
"title": "Monthly R jobs on r-bloggers",
"href": "https://www.r-bloggers.com/monthly-r-jobs-on-r-bloggers/"
},
{
"title": "Need user feedback? Send it directly from R",
"href": "http://data-steve.github.io/need-user-feedback-send-programmatically/"
},
{
"title": "The R Parallel Programming Blog",
"href": "http://www.parallelr.com/the-r-parallel-programming-blog/"
},
{
"title": "Using SVG graphics in blog posts",
"href": "http://www.magesblog.com/2016/02/using-svg-graphics-in-blog-posts.html"
},
{
"title": "R jobs (February 2014)",
"href": "https://www.r-bloggers.com/r-jobs-february-2014/"
},
{
"title": "A Tiny Model of Evolution",
"href": "https://web.archive.org/web/http://mickeymousemodels.blogspot.com/2011/04/tiny-model-of-evolution.html"
},
{
"title": "Charting the Defeat of AV using R (and some ggplot2 and merge operations on top)",
"href": "http://www.psychwire.co.uk/2011/05/charting-the-defeat-of-av-using-r-and-some-ggplot2-and-merge-operations-on-top/"
},
{
"title": "A JAGS calculation on pattern of rain January 1906-1915 against 2003-2012",
"href": "http://wiekvoet.blogspot.com/2013/07/a-jags-calculation-on-pattern-of-rain.html"
},
{
"title": "Cellular Automata: The Beauty Of Simplicity",
"href": "https://aschinchon.wordpress.com/2014/01/14/cellular-automata-the-beauty-of-simplicity/"
},
{
"title": "Article Spotlight: Persistent data storage in Shiny apps",
"href": "https://blog.rstudio.org/2015/07/15/article-spotlight-persistent-data-storage-in-shiny-apps/"
},
{
"title": "Comparing Two Distributions",
"href": "http://thebiobucket.blogspot.com/2011/08/comparing-two-distributions.html"
},
{
"title": "Summarizing Data in R",
"href": "https://web.archive.org/web/https://mathewanalytics.com/2013/04/11/summarizing-data-in-r-using-plyr-and-reshape/"
},
{
"title": "NYT uses R to investigate NFL draft picks",
"href": "http://blog.revolutionanalytics.com/2013/05/nyt-uses-r-to-investigate-nfl-draft-picks.html"
},
{
"title": "Bubble Plots (ggplot2)",
"href": "https://web.archive.org/web/http://www.knowledgediscovery.jp/bubbleplots/"
},
{
"title": "Decisionstats/OpenCPU interview: R, D3, security, the cloud, and snacks.",
"href": "https://www.opencpu.org/"
},
{
"title": "R programming books (updated)",
"href": "https://csgillespie.wordpress.com/2011/01/28/r-programming-books-updated/"
},
{
"title": "Conway’s Game of Life in R with ggplot2 and animation",
"href": "https://web.archive.org/web/http://ramhiser.com/blog/2011/06/05/conways-game-of-life-in-r-with-ggplot2-and-animation/"
},
{
"title": "Clustering the world’s diets",
"href": "https://web.archive.org/web/http://blog.revolution-computing.com/2010/03/clustering-the-worlds-diets.html"
},
{
"title": "garch and the distribution of returns",
"href": "https://feedproxy.google.com/~r/PortfolioProbeRLanguage/~3/KUqYcz2Bqp0/"
},
{
"title": "Multivariate Techniques in Python: EcoPy Alpha Launch!",
"href": "https://climateecology.wordpress.com/2015/08/03/multivariate-techniques-in-python-ecopy-alpha-launch/"
},
{
"title": "Some problems with the Mexican mortality database",
"href": "https://blog.diegovalle.net/2010/12/some-problems-with-mexican-mortality.html"
},
{
"title": "No more ascii-art",
"href": "http://conjugateprior.org/2013/01/no-more-ascii-art/?utm_source=rss&utm_medium=rss&utm_campaign=no-more-ascii-art"
},
{
"title": "Animated great circles 2: smoother lines",
"href": "https://blog.snap.uaf.edu/2015/04/16/animated-great-circles-2-smoother-lines/"
},
{
"title": "How Big Is The Vatican City?",
"href": "https://aschinchon.wordpress.com/2015/02/24/how-big-is-the-vatican-city/"
},
{
"title": "Where to Start with PDQ?",
"href": "http://perfdynamics.blogspot.com/2010/08/where-to-start-with-pdq.html"
},
{
"title": "Reporting Good Enough to Share",
"href": "http://timelyportfolio.blogspot.com/2011/09/reporting-good-enough-to-share.html"
},
{
"title": "DataCamp R Certifications – Now Available on Your LinkedIn Profile",
"href": "https://www.datacamp.com/community/blog/datacamp-r-certifications-now-available-linkedin-profile"
},
{
"title": "stringdist 0.8: now with soundex",
"href": "http://www.markvanderloo.eu/yaRb/2014/08/22/stringdist-0-8-now-with-soundex/"
},
{
"title": "Global Volcanic Eruptions",
"href": "http://seanmulcahy.blogspot.com/2011/12/global-volcanic-eruptions.html"
},
{
"title": "2nd CFP: the 10th Australasian Data Mining Conference (AusDM 2012)",
"href": "https://rdatamining.wordpress.com/2012/07/10/2nd-cfp-the-10th-australasian-data-mining-conference-ausdm-2012/"
},
{
"title": "choroplethr v3.0.0 is now on CRAN",
"href": "https://justanrblog.wordpress.com/2015/03/16/choroplethr-v3-0-0-is-now-on-cran/"
},
{
"title": "Real-time model scoring for streaming data – a prototype based on Oracle Stream Explorer and Oracle R Enterprise",
"href": "https://blogs.oracle.com/R/entry/real_time_model_scoring_for"
},
{
"title": "Rewriting plot.qcc using ggplot2 and grid",
"href": "https://tomhopper.me/2014/03/03/rewriting-plot-qcc-using-ggplot2-and-grid/"
},
{
"title": "Computing Power Functions",
"href": "http://davegiles.blogspot.com/2014/11/computing-power-functions.html"
},
{
"title": "Merging Two Different Datasets Containing a Common Column With R and R-Studio",
"href": "https://blog.ouseful.info/2011/08/02/merging-two-different-datasets-containing-a-common-column-with-r-and-r-studio/"
},
{
"title": "The Rise of the Samurai Pitcher",
"href": "https://feedproxy.google.com/~r/graphoftheweek/fzVA/~3/ipytcERabBk/the-rise-of-samurai-pitcher.html"
},
{
"title": "Chinese versus Japanese editions",
"href": "https://xianblog.wordpress.com/2010/03/09/chinese-versus-japanese-editions/"
},
{
"title": "IV Estimates via GMM with Clustering in R",
"href": "https://diffuseprior.wordpress.com/2014/04/01/iv-estimates-via-gmm-with-clustering-in-r/"
},
{
"title": "embeding a subplot in ggplot via subview",
"href": "https://web.archive.org/web/http://ygc.name/2015/08/31/subview/"
},
{
"title": "Computing with GPUs in R",
"href": "http://blog.revolutionanalytics.com/2015/06/computing-with-gpus-in-r.html"
},
{
"title": "Call by reference in R",
"href": "https://ctszkin.com/2011/09/11/call-by-reference-in-r/"
},
{
"title": "MPK Analytics – putting the R into analytics",
"href": "https://web.archive.org/web/http://mpkanalytics.com/2012/08/24/mpk-analytics-putting-the-r-into-analytics/"
},
{
"title": "Avoid overlapping labels in ggplot2 charts",
"href": "http://blog.revolutionanalytics.com/2016/01/avoid-overlapping-labels-in-ggplot2-charts.html"
},
{
"title": "Canabalt",
"href": "http://www.johnmyleswhite.com/notebook/2009/11/12/canabalt/"
},
{
"title": "informality, the 2010 edition",
"href": "https://web.archive.org/web/http://jackman.stanford.edu/blog/?p=1753"
},
{
"title": "Fixing Colors & Proportions in Jerusalem Post Election Graphics",
"href": "http://rud.is/b/2015/03/20/fixing-colors-proportions-in-jerusalem-post-election-graphics/"
},
{
"title": "Obama 2008 received 3x more media coverage than Sanders 2016",
"href": "http://www.econometricsbysimulation.com/2016/01/obama-2008-recieved-3x-Sanders.html"
},
{
"title": "Introducing pkgKitten: Creating R Packages that purr",
"href": "http://dirk.eddelbuettel.com/blog/2014/06/13/"
},
{
"title": "Book Review: R Object-oriented Programming",
"href": "http://r-nold.blogspot.com/2014/12/book-review-r-object-oriented.html"
}
] |
execute_safely <- function(expr, title = "Error",
message = "An error occured, detail below:",
include_error = TRUE,
error_return = NULL,
session = shiny::getDefaultReactiveDomain()) {
tryCatch(
expr = expr,
error = function(e) {
if (isTRUE(include_error)) {
message <- tags$div(
style= "text-align: left;",
message,
tags$br(),
tags$br(),
tags$pre(
style = "white-space:normal;",
tags$code(
as.character(e$message)
)
)
)
}
message("Error: ", e$message)
sendSweetAlert(
session = session,
title = title,
text = message,
html = TRUE,
type = "error"
)
return(error_return)
}
)
} |
library(purge)
context("Purged model predictions")
purge_test_helper <- function(unpurged.model, purged.model, test.new.data,
predict.method=predict) {
preds.purged <- predict.method(purged.model, test.new.data)
expect_equal(length(preds.purged), nrow(test.new.data))
preds.unpurged <- predict.method(unpurged.model, test.new.data)
expect_equal(preds.purged, preds.unpurged)
}
test_that("lm purge works correctly", {
sample.size <- 1000
x <- rnorm(sample.size)
y <- rnorm(sample.size)
unpurged.model <- lm(y ~ x)
purged.model <- purge(unpurged.model)
test.new.data <- data.frame(x=1:10)
expect_is(purged.model, 'lm')
purge_test_helper(unpurged.model, purged.model, test.new.data)
})
test_that("glm purge works correctly", {
sample.size <- 1000
x <- rnorm(sample.size)
y <- as.factor(runif(sample.size) > 0.5)
unpurged.model <- glm(y ~ x, family=binomial())
purged.model <- purge(unpurged.model)
test.new.data <- data.frame(x=1:10)
expect_is(purged.model, 'glm')
purge_test_helper(unpurged.model, purged.model, test.new.data)
})
test_that("merMod purge works correctly", {
if (requireNamespace('lme4', quietly=TRUE)) {
sample.size <- 1000
x <- rnorm(sample.size)
y <- rnorm(sample.size)
z <- as.factor(runif(sample.size) > 0.5)
unpurged.model <- lme4::lmer(y ~ x + (1|z))
purged.model <- purge(unpurged.model)
test.new.data <- data.frame(x=1:10, z=as.factor(runif(10) > 0.5))
expect_is(purged.model, 'merMod')
purge_test_helper(unpurged.model, purged.model, test.new.data)
}
})
test_that("glmerMod purge works correctly", {
if (requireNamespace('lme4', quietly=TRUE)) {
sample.size <- 1000
x <- rnorm(sample.size)
y <- as.factor(runif(sample.size) > 0.5)
z <- as.factor(runif(sample.size) > 0.5)
unpurged.model <- lme4::glmer(y ~ x + (1|z), family=binomial())
purged.model <- purge(unpurged.model)
test.new.data <- data.frame(x=1:10, z=as.factor(runif(10) > 0.5))
expect_is(purged.model, 'glmerMod')
purge_test_helper(unpurged.model, purged.model, test.new.data)
}
})
test_that("rpart purge works correctly", {
if (requireNamespace('rpart', quietly=TRUE)) {
sample.size <- 1000
x <- rnorm(sample.size)
y <- x + rnorm(sample.size)
unpurged.model <- rpart::rpart(y ~ x)
purged.model <- purge(unpurged.model)
test.new.data <- data.frame(x=1:10)
expect_is(purged.model, 'rpart')
purge_test_helper(unpurged.model, purged.model, test.new.data)
}
})
test_that("randomForest purge works correctly", {
if (requireNamespace('randomForest', quietly=TRUE)) {
sample.size <- 1000
x <- rnorm(sample.size)
y <- x + rnorm(sample.size)
unpurged.model <- randomForest::randomForest(y ~ x, ntree=11)
purged.model <- purge(unpurged.model)
test.new.data <- data.frame(x=1:10)
expect_is(purged.model, 'randomForest')
purge_test_helper(unpurged.model, purged.model, test.new.data)
}
})
test_that("ranger classification purge works correctly", {
if (requireNamespace('ranger', quietly=TRUE)) {
sample.size <- 1000
x <- rnorm(sample.size)
y <- as.factor(runif(sample.size) > 0.5)
unpurged.model <- ranger::ranger(y ~ x, data.frame(x, y),
num.trees=11, write.forest=TRUE)
purged.model <- purge(unpurged.model)
test.new.data <- data.frame(x=1:10)
expect_is(purged.model, 'ranger')
purge_test_helper(unpurged.model, purged.model, test.new.data,
predict.method=function(ranger.model, test.data) {
return(predict(ranger.model, test.data)$predictions)
})
}
})
test_that("ranger regression purge works correctly", {
if (requireNamespace('ranger', quietly=TRUE)) {
sample.size <- 1000
x <- rnorm(sample.size)
y <- rnorm(sample.size)
unpurged.model <- ranger::ranger(y ~ x, data.frame(x, y),
num.trees=11, write.forest=TRUE)
purged.model <- purge(unpurged.model)
test.new.data <- data.frame(x=1:10)
expect_is(purged.model, 'ranger')
purge_test_helper(unpurged.model, purged.model, test.new.data,
predict.method=function(ranger.model, test.data) {
return(predict(ranger.model, test.data)$predictions)
})
}
})
test_that("ranger survival purge works correctly", {
if (requireNamespace('ranger', quietly=TRUE)) {
sample.size <- 1000
x <- rnorm(sample.size)
if (requireNamespace('survival', quietly=TRUE)) {
y.time <- abs(rnorm(sample.size))
y.status <- ifelse(runif(sample.size) > 0.5, 0, 1)
unpurged.model <- ranger::ranger(survival::Surv(y.time, y.status) ~ x,
data.frame(x, y.time, y.status),
num.trees=11, write.forest=TRUE)
purged.model <- purge(unpurged.model)
test.new.data <- data.frame(x=1:10)
expect_is(purged.model, 'ranger')
purge_test_helper(unpurged.model, purged.model, test.new.data,
predict.method=function(ranger.model, test.data) {
preds <- predict(ranger.model, test.data)$chf
return(preds[, ncol(preds)])
})
}
}
})
test_that("coxph purge works correctly", {
if (requireNamespace('survival', quietly=TRUE)) {
sample.size <- 1000
x <- rnorm(sample.size)
y.time <- abs(rnorm(sample.size))
y.status <- ifelse(runif(sample.size) > 0.5, 0, 1)
unpurged.model <- survival::coxph(survival::Surv(y.time, y.status) ~ x)
purged.model <- purge(unpurged.model)
test.new.data <- data.frame(x=1:10)
expect_is(purged.model, 'coxph')
purge_test_helper(unpurged.model, purged.model, test.new.data)
}
}) |
rsu.sep.rb2rf <- function(N, n, rr1, ppr1, rr2, ppr2, pstar, se.u, method = "binomial") {
if(method == "binomial")
{ar1 <- rsu.adjrisk(rr1, ppr1)
ar2 <- array(0, dim = dim(rr2))
rownames(ar2) <- paste("RR1",1:length(rr1), se.p = "=")
colnames(ar2) <- paste("RR2",1:ncol(rr2), se.p = "=")
epi <- ar2
p.neg <- ar2
if(length(se.u) == 1) se.u <- array(se.u, dim = dim(rr2))
for (i in 1:length(rr1)){
ar2[i,]<- rsu.adjrisk(rr2[i,], ppr2[i,])
epi[i,]<- ar1[i] * ar2[i,] * pstar
p.neg[i,] <- (1 - epi[i,] * se.u[i,])^n[i,]
}
se.p <- 1 - prod(p.neg)
rval <- list(se.p = se.p, epi = epi, adj.risk1 = ar1, adj.risk2 = ar2)
}
else
if(method == "hypergeometric")
{ppr1 <- rowSums(N) / sum(N)
ppr2 <- array(0, dim = dim(rr2))
rownames(ppr2)<- paste("RR1",1:length(rr1), se.p = "=")
colnames(ppr2)<- paste("RR2",1:ncol(rr2), se.p = "=")
ar1 <- rsu.adjrisk(rr1, ppr1)
ar2 <- array(0, dim = dim(rr2))
rownames(ar2) <- rownames(ppr2)
colnames(ar2) <- colnames(ppr2)
epi <- ar2
p.neg <- ar2
if (length(se.u) == 1) se.u <- array(se.u, dim = dim(rr2))
for (i in 1:length(rr1)){
ppr2[i,] <- N[i,] / sum(N[i,])
ar2[i,] <- rsu.adjrisk(rr2[i,], ppr2[i,])
epi[i,] <- ar1[i] * ar2[i,] * pstar
p.neg[i,] <- (1 - se.u[i,] * n[i,] / N[i,])^(epi[i,] * N[i,])
}
se.p <- 1 - prod(p.neg)
rval <- list(se.p = se.p, epi = epi, adj.risk1 = ar1, adj.risk2 = ar2)
}
rval
} |
plot.vb.ppc <- function(x, Style=NULL, Data=NULL, Rows=NULL,
PDF=FALSE, ...)
{
if(missing(x)) stop("The x argument is required.")
if(class(x) != "vb.ppc") stop("x is not of class vb.ppc.")
if(is.null(Style)) Style <- "Density"
if(is.null(Rows)) Rows <- 1:nrow(x[["yhat"]])
if(Style == "Covariates") {
if(PDF == TRUE) {
pdf("PPC.Plots.Covariates.pdf")
par(mfrow=c(3,3))}
else par(mfrow=c(3,3), ask=TRUE)
if(is.null(Data))
stop("Data is required for Style=Covariates.")
if(is.null(Data[["X"]]) & is.null(Data[["x"]]))
stop("X or x is required in Data.")
if(is.null(Data[["X"]]))
co <- matrix(Data[["x"]], length(Data[["x"]]), 1)
else if(is.null(Data[["x"]])) co <- Data[["X"]]
temp <- summary(x, Quiet=TRUE)$Summary
mycol <- rgb(0, 100, 0, 50, maxColorValue=255)
for (i in 1:ncol(co)) {
plot(co[Rows,i], temp[Rows,5], col=mycol, pch=16, cex=0.75,
ylim=c(min(temp[Rows,c(1,4:6)]),max(temp[Rows,c(1,4:6)])),
xlab=paste("X[,",i,"]", sep=""),
ylab="yhat",
sub="Gray lines are yhat at 2.5% and 95%.")
panel.smooth(co[Rows,i], temp[Rows,5], col=mycol, pch=16,
cex=0.75)}}
if(Style == "Covariates, Categorical DV") {
if(PDF == TRUE) {
pdf("PPC.Plots.Covariates.Cat.pdf")
par(mfrow=c(3,3))}
else par(mfrow=c(3,3), ask=TRUE)
if(is.null(Data))
stop("Data is required for Style=Covariates.")
if(is.null(Data[["X"]]) & is.null(Data[["x"]]))
stop("X or x is required in Data.")
if(is.null(Data[["X"]]))
co <- matrix(Data[["x"]], length(Data[["x"]]), 1)
else if(is.null(Data[["x"]])) co <- Data[["X"]]
temp <- summary(x, Categorical=TRUE, Quiet=TRUE)$Summary
ncat <- length(table(temp[,1]))
mycol <- rgb(0, 100, 0, 50, maxColorValue=255)
for (i in 1:ncol(co)) {for (j in 2:(ncat+1)) {
plot(co[Rows,i], temp[Rows,j], col=mycol, pch=16, cex=0.75,
xlab=paste("X[,",i,"]", sep=""),
ylab=colnames(temp)[j])
panel.smooth(co[Rows,i], temp[Rows,j], col=mycol, pch=16,
cex=0.75)}}}
if(Style == "Density") {
if(PDF == TRUE) {
pdf("PPC.Plots.Density.pdf")
par(mfrow=c(3,3))}
else par(mfrow=c(3,3), ask=TRUE)
for (j in 1:length(Rows)) {
plot(density(x[["yhat"]][Rows[j],]),
main=paste("Post. Pred. Plot of yhat[", Rows[j],
",]", sep=""), xlab="Value",
sub="Black=Density, Red=y")
polygon(density(x[["yhat"]][Rows[j],]), col="black",
border="black")
abline(v=x[["y"]][Rows[j]], col="red")}}
if(Style == "DW") {
if(PDF == TRUE) pdf("PPC.Plots.DW.pdf")
par(mfrow=c(1,1))
epsilon.obs <- x[["y"]] - x[["yhat"]]
N <- nrow(epsilon.obs)
S <- ncol(epsilon.obs)
epsilon.rep <- matrix(rnorm(N*S), N, S)
d.obs <- d.rep <- rep(0, S)
for (s in 1:S) {
d.obs[s] <- sum(c(0,diff(epsilon.obs[,s]))^2, na.rm=TRUE) /
sum(epsilon.obs[,s]^2, na.rm=TRUE)
d.rep[s] <- sum(c(0,diff(epsilon.rep[,s]))^2, na.rm=TRUE) /
sum(epsilon.rep[,s]^2, na.rm=TRUE)}
result <- "no"
if(mean(d.obs > d.rep, na.rm=TRUE) < 0.025) result <- "positive"
if(mean(d.obs > d.rep, na.rm=TRUE) > 0.975) result <- "negative"
d.d.obs <- density(d.obs, na.rm=TRUE)
d.d.rep <- density(d.rep, na.rm=TRUE)
plot(d.d.obs, xlim=c(0,4),
ylim=c(0, max(d.d.obs$y, d.d.rep$y)), col="white",
main="Durbin-Watson test",
xlab=paste("d.obs=", round(mean(d.obs, na.rm=TRUE),2), " (",
round(as.vector(quantile(d.obs, probs=0.025, na.rm=TRUE)),2),
", ", round(as.vector(quantile(d.obs, probs=0.975, na.rm=TRUE)),
2), "), p(d.obs > d.rep) = ", round(mean(d.obs > d.rep,
na.rm=TRUE),3), " = ", result, " autocorrelation", sep=""))
polygon(d.d.obs, col=rgb(0,0,0,50,maxColorValue=255), border=NA)
polygon(d.d.rep, col=rgb(255,0,0,50,maxColorValue=255), border=NA)
abline(v=2, col="red")}
if(Style == "DW, Multivariate, C") {
if(PDF == TRUE) {
pdf("PPC.Plots.DW.M.pdf")
par(mfrow=c(1,1))}
else par(mfrow=c(1,1), ask=TRUE)
if(is.null(Data))
stop("Data is required for Style=Fitted, Multivariate, C.")
if(is.null(Data[["Y"]])) stop("Y is required in Data.")
M <- nrow(Data[["Y"]])
J <- ncol(Data[["Y"]])
epsilon.obs <- x[["y"]] - x[["yhat"]]
N <- nrow(epsilon.obs)
S <- ncol(epsilon.obs)
epsilon.rep <- matrix(rnorm(N*S), N, S)
d.obs <- d.rep <- rep(0, S)
for (j in 1:J) {
for (s in 1:S) {
d.obs[s] <- sum(c(0,diff(epsilon.obs[((j-1)*M+1):(j*M),s]))^2, na.rm=TRUE) /
sum(epsilon.obs[((j-1)*M+1):(j*M),s]^2, na.rm=TRUE)
d.rep[s] <- sum(c(0,diff(epsilon.rep[((j-1)*M+1):(j*M),s]))^2, na.rm=TRUE) /
sum(epsilon.rep[((j-1)*M+1):(j*M),s]^2, na.rm=TRUE)}
result <- "no"
if(mean(d.obs > d.rep, na.rm=TRUE) < 0.025) result <- "positive"
if(mean(d.obs > d.rep, na.rm=TRUE) > 0.975) result <- "negative"
d.d.obs <- density(d.obs, na.rm=TRUE)
d.d.rep <- density(d.rep, na.rm=TRUE)
plot(d.d.obs, xlim=c(0,4),
ylim=c(0, max(d.d.obs$y, d.d.rep$y)), col="white",
main="Durbin-Watson test",
xlab=paste("d.obs=", round(mean(d.obs, na.rm=TRUE),2), " (",
round(as.vector(quantile(d.obs, probs=0.025, na.rm=TRUE)),2),
", ", round(as.vector(quantile(d.obs, probs=0.975, na.rm=TRUE)),
2), "), p(d.obs > d.rep) = ", round(mean(d.obs > d.rep,
na.rm=TRUE),3), " = ", result, " autocorrelation", sep=""),
sub=paste("Y[,",j,"]",sep=""))
polygon(d.d.obs, col=rgb(0,0,0,50,maxColorValue=255),
border=NA)
polygon(d.d.rep, col=rgb(255,0,0,50,maxColorValue=255),
border=NA)
abline(v=2, col="red")}}
if(Style == "ECDF") {
if(PDF == TRUE) pdf("PPC.Plots.ECDF.pdf")
par(mfrow=c(1,1))
plot(ecdf(x[["y"]][Rows]), verticals=TRUE, do.points=FALSE,
main="Cumulative Fit",
xlab="y (black) and yhat (red; gray)",
ylab="Cumulative Frequency")
lines(ecdf(apply(x[["yhat"]][Rows,], 1, quantile, probs=0.975)),
verticals=TRUE, do.points=FALSE, col="gray")
lines(ecdf(apply(x[["yhat"]][Rows,], 1, quantile, probs=0.025)),
verticals=TRUE, do.points=FALSE, col="gray")
lines(ecdf(apply(x[["yhat"]][Rows,], 1, quantile, probs=0.500)),
verticals=TRUE, do.points=FALSE, col="red")}
if(Style == "Fitted") {
if(PDF == TRUE) pdf("PPC.Plots.Fitted.pdf")
par(mfrow=c(1,1))
temp <- summary(x, Quiet=TRUE)$Summary
plot(temp[Rows,1], temp[Rows,5], pch=16, cex=0.75,
ylim=c(min(temp[Rows,4], na.rm=TRUE),
max(temp[Rows,6], na.rm=TRUE)),
xlab="y", ylab="yhat", main="Fitted")
for (i in Rows) {
lines(c(temp[Rows[i],1], temp[Rows[i],1]),
c(temp[Rows[i],4], temp[Rows[i],6]))}
panel.smooth(temp[Rows,1], temp[Rows,5], pch=16, cex=0.75)}
if(Style == "Fitted, Multivariate, C") {
if(PDF == TRUE) {
pdf("PPC.Plots.Fitted.M.pdf")
par(mfrow=c(1,1))}
else par(mfrow=c(1,1), ask=TRUE)
if(is.null(Data))
stop("Data is required for Style=Fitted, Multivariate, C.")
if(is.null(Data[["Y"]])) stop("Y is required in Data.")
temp <- summary(x, Quiet=TRUE)$Summary
for (i in 1:ncol(Data[["Y"]])) {
temp1 <- as.vector(matrix(temp[,1], nrow(Data[["Y"]]),
ncol(Data[["Y"]]))[,i])
temp2 <- as.vector(matrix(temp[,4], nrow(Data[["Y"]]),
ncol(Data[["Y"]]))[,i])
temp3 <- as.vector(matrix(temp[,5], nrow(Data[["Y"]]),
ncol(Data[["Y"]]))[,i])
temp4 <- as.vector(matrix(temp[,6], nrow(Data[["Y"]]),
ncol(Data[["Y"]]))[,i])
plot(temp1, temp3, pch=16, cex=0.75,
ylim=c(min(temp2, na.rm=TRUE),
max(temp4, na.rm=TRUE)),
xlab=paste("Y[,", i, "]", sep=""), ylab="yhat",
main="Fitted")
for (j in 1:nrow(Data[["Y"]])) {
lines(c(temp1[j], temp1[j]),
c(temp2[j], temp4[j]))}
panel.smooth(temp1, temp3, pch=16, cex=0.75)}}
if(Style == "Fitted, Multivariate, R") {
if(PDF == TRUE) {
pdf("PPC.Plots.Fitted.M.pdf")
par(mfrow=c(1,1))}
else par(mfrow=c(1,1), ask=TRUE)
if(is.null(Data))
stop("Data is required for Style=Fitted, Multivariate, R.")
if(is.null(Data[["Y"]])) stop("Y is required in Data.")
temp <- summary(x, Quiet=TRUE)$Summary
for (i in 1:nrow(Data[["Y"]])) {
temp1 <- as.vector(matrix(temp[,1], nrow(Data[["Y"]]),
ncol(Data[["Y"]]))[i,])
temp2 <- as.vector(matrix(temp[,4], nrow(Data[["Y"]]),
ncol(Data[["Y"]]))[i,])
temp3 <- as.vector(matrix(temp[,5], nrow(Data[["Y"]]),
ncol(Data[["Y"]]))[i,])
temp4 <- as.vector(matrix(temp[,6], nrow(Data[["Y"]]),
ncol(Data[["Y"]]))[i,])
plot(temp1, temp3, pch=16, cex=0.75,
ylim=c(min(temp2, na.rm=TRUE),
max(temp4, na.rm=TRUE)),
xlab=paste("Y[,", i, "]", sep=""), ylab="yhat",
main="Fitted")
for (j in 1:ncol(Data[["Y"]])) {
lines(c(temp1[j], temp1[j]),
c(temp2[j], temp4[j]))}
panel.smooth(temp1, temp3, pch=16, cex=0.75)}}
if(Style == "Jarque-Bera") {
if(PDF == TRUE) pdf("PPC.Plots.Jarque.Bera.pdf")
par(mfrow=c(1,1))
epsilon.obs <- epsilon.rep <- x[["y"]][Rows] - x[["yhat"]][Rows,]
kurtosis <- function(x) {
m4 <- mean((x-mean(x, na.rm=TRUE))^4, na.rm=TRUE)
kurt <- m4/(sd(x, na.rm=TRUE)^4)-3
return(kurt)}
skewness <- function(x) {
m3 <- mean((x-mean(x, na.rm=TRUE))^3, na.rm=TRUE)
skew <- m3/(sd(x, na.rm=TRUE)^3)
return(skew)}
JB.obs <- JB.rep <- rep(0, ncol(epsilon.obs))
N <- nrow(epsilon.obs)
for (s in 1:ncol(epsilon.obs)) {
epsilon.rep[,s] <- rnorm(N, mean(epsilon.obs[,s],
na.rm=TRUE), sd(epsilon.obs[,s], na.rm=TRUE))
K.obs <- kurtosis(epsilon.obs[,s])
S.obs <- skewness(epsilon.obs[,s])
K.rep <- kurtosis(epsilon.rep[,s])
S.rep <- skewness(epsilon.rep[,s])
JB.obs[s] <- (N/6)*(S.obs^2 + ((K.obs-3)^2)/4)
JB.rep[s] <- (N/6)*(S.rep^2 + ((K.rep-3)^2)/4)}
p <- round(mean(JB.obs > JB.rep, na.rm=TRUE), 3)
result <- "Non-Normality"
if((p >= 0.025) & (p <= 0.975)) result <- "Normality"
d.obs <- density(JB.obs)
d.rep <- density(JB.rep)
plot(d.obs, xlim=c(min(d.obs$x,d.rep$x), max(d.obs$x,d.rep$x)),
ylim=c(0, max(d.obs$y, d.rep$y)), col="white",
main="Jarque-Bera Test",
xlab="JB", ylab="Density",
sub=paste("JB.obs=", round(mean(JB.obs, na.rm=TRUE),2),
" (", round(as.vector(quantile(JB.obs, probs=0.025,
na.rm=TRUE)),2), ",", round(as.vector(quantile(JB.obs,
probs=0.975, na.rm=TRUE)),2), "), p(JB.obs > JB.rep) = ",
p, " = ", result, sep=""))
polygon(d.obs, col=rgb(0,0,0,50,maxColorValue=255), border=NA)
polygon(d.rep, col=rgb(255,0,0,50,maxColorValue=255), border=NA)}
if(Style == "Jarque-Bera, Multivariate, C") {
if(PDF == TRUE) {
pdf("PPC.Plots.Jarque.Bera.pdf")
par(mfrow=c(1,1))}
else par(mfrow=c(1,1), ask=TRUE)
if(is.null(Data))
stop("Data is required for Style=Jarque-Bera, Multivariate, C.")
if(is.null(Data[["Y"]])) stop("Y is required in Data.")
M <- nrow(Data[["Y"]])
J <- ncol(Data[["Y"]])
epsilon.obs <- epsilon.rep <- x[["y"]] - x[["yhat"]]
kurtosis <- function(x) {
m4 <- mean((x-mean(x, na.rm=TRUE))^4, na.rm=TRUE)
kurt <- m4/(sd(x, na.rm=TRUE)^4)-3
return(kurt)}
skewness <- function(x) {
m3 <- mean((x-mean(x, na.rm=TRUE))^3, na.rm=TRUE)
skew <- m3/(sd(x, na.rm=TRUE)^3)
return(skew)}
JB.obs <- JB.rep <- rep(0, ncol(epsilon.obs))
N <- nrow(epsilon.obs)
for (j in 1:J) {
for (s in 1:ncol(epsilon.obs)) {
e.obs <- matrix(epsilon.obs[,s], M, J)
e.rep <- rnorm(M, mean(e.obs[,j], na.rm=TRUE),
sd(e.obs[,j], na.rm=TRUE))
K.obs <- kurtosis(e.obs[,j])
S.obs <- skewness(e.obs[,j])
K.rep <- kurtosis(e.rep)
S.rep <- skewness(e.rep)
JB.obs[s] <- (N/6)*(S.obs^2 + ((K.obs-3)^2)/4)
JB.rep[s] <- (N/6)*(S.rep^2 + ((K.rep-3)^2)/4)}
p <- round(mean(JB.obs > JB.rep, na.rm=TRUE), 3)
result <- "Non-Normality"
if((p >= 0.025) & (p <= 0.975)) result <- "Normality"
d.obs <- density(JB.obs)
d.rep <- density(JB.rep)
plot(d.obs, xlim=c(min(d.obs$x,d.rep$x), max(d.obs$x,d.rep$x)),
ylim=c(0, max(d.obs$y, d.rep$y)), col="white",
main="Jarque-Bera Test",
xlab=paste("JB for Y[,",j,"]", sep=""), ylab="Density",
sub=paste("JB.obs=", round(mean(JB.obs, na.rm=TRUE),2),
" (", round(as.vector(quantile(JB.obs, probs=0.025,
na.rm=TRUE)),2), ",", round(as.vector(quantile(JB.obs,
probs=0.975, na.rm=TRUE)),2), "), p(JB.obs > JB.rep) = ",
p, " = ", result, sep=""))
polygon(d.obs, col=rgb(0,0,0,50,maxColorValue=255),
border=NA)
polygon(d.rep, col=rgb(255,0,0,50,maxColorValue=255),
border=NA)}}
if(Style == "Mardia") {
if(PDF == TRUE) pdf("PPC.Plots.Mardia.pdf")
par(mfrow=c(2,1))
if(is.null(Data))
stop("Data is required for Style=Mardia, C.")
if(is.null(Data[["Y"]]))
stop("Variable Y is required for Style=Mardia, C.")
epsilon.obs <- x[["y"]] - x[["yhat"]]
M <- nrow(Data[["Y"]])
J <- ncol(Data[["Y"]])
K3.obs <- K3.rep <- K4.obs <- K4.rep <- rep(0, ncol(epsilon.obs))
for (s in 1:ncol(epsilon.obs)) {
e.obs <- matrix(epsilon.obs[,s], M, J)
e.obs.mu <- colMeans(e.obs)
e.obs.mu.mat <- matrix(e.obs.mu, M, J, byrow=TRUE)
e.obs.stand <- e.obs - e.obs.mu.mat
S.obs <- var(e.obs)
A.obs <- t(chol(S.obs))
A.inv.obs <- solve(A.obs)
Z.obs <- t(A.inv.obs %*% t(e.obs.stand))
Dij.obs <- Z.obs %*% t(Z.obs)
D2.obs <- diag(Dij.obs)
K3.obs[s] <- mean(as.vector(Dij.obs)^3)
K4.obs[s] <- mean(D2.obs^2)
e.rep <- rmvn(M, e.obs.mu.mat, S.obs)
e.rep.mu <- colMeans(e.rep)
e.rep.mu.mat <- matrix(e.rep.mu, M, J, byrow=TRUE)
e.rep.stand <- e.rep - e.rep.mu.mat
S.rep <- var(e.rep)
A.rep <- t(chol(S.rep))
A.inv.rep <- solve(A.rep)
Z.rep <- t(A.inv.rep %*% t(e.rep.stand))
Dij.rep <- Z.rep %*% t(Z.rep)
D2.rep <- diag(Dij.rep)
K3.rep[s] <- mean(as.vector(Dij.rep)^3)
K4.rep[s] <- mean(D2.rep^2)}
p.K3 <- round(mean(K3.obs > K3.rep), 3)
p.K4 <- round(mean(K4.obs > K4.rep), 3)
K3.result <- K4.result <- "Non-Normality"
if((p.K3 >= 0.025) & (p.K3 <= 0.975)) K3.result <- "Normality"
if((p.K4 >= 0.025) & (p.K4 <= 0.975)) K4.result <- "Normality"
d.K3.obs <- density(K3.obs)
d.K3.rep <- density(K3.rep)
d.K4.obs <- density(K4.obs)
d.K4.rep <- density(K4.rep)
plot(d.K3.obs, xlim=c(min(d.K3.obs$x, d.K3.rep$x),
max(d.K3.obs$x, d.K3.rep$x)),
ylim=c(0, max(d.K3.obs$y, d.K3.rep$y)), col="white",
main="Mardia's Test of MVN Skewness",
xlab="Skewness Test Statistic (K3)", ylab="Density",
sub=paste("K3.obs=", round(mean(K3.obs, na.rm=TRUE), 2),
" (", round(quantile(K3.obs, probs=0.025, na.rm=TRUE),
2), ", ", round(quantile(K3.obs, probs=0.975,
na.rm=TRUE), 2), "), p(K3.obs > K3.rep) = ",
p.K3, " = ", K3.result, sep=""))
polygon(d.K3.obs, col=rgb(0,0,0,50,maxColorValue=255), border=NA)
polygon(d.K3.rep, col=rgb(255,0,0,50,maxColorValue=255), border=NA)
plot(d.K4.obs, xlim=c(min(d.K4.obs$x, d.K4.rep$x),
max(d.K4.obs$x, d.K4.rep$x)),
ylim=c(0, max(d.K4.obs$y, d.K4.rep$y)), col="white",
main="Mardia's Test of MVN Kurtosis",
xlab="Kurtosis Test Statistic (K4)", ylab="Density",
sub=paste("K4.obs=", round(mean(K4.obs, na.rm=TRUE), 2),
" (", round(quantile(K4.obs, probs=0.025, na.rm=TRUE),
2), ", ", round(quantile(K4.obs, probs=0.975,
na.rm=TRUE), 2), "), p(K4.obs > K4.rep) = ",
p.K4, " = ", K4.result, sep=""))
polygon(d.K4.obs, col=rgb(0,0,0,50,maxColorValue=255), border=NA)
polygon(d.K4.rep, col=rgb(255,0,0,50,maxColorValue=255), border=NA)}
if(Style == "Predictive Quantiles") {
if(PDF == TRUE) pdf("PPC.Plots.PQ.pdf")
par(mfrow=c(1,1))
temp <- summary(x, Quiet=TRUE)$Summary
mycol <- rgb(0, 100, 0, 50, maxColorValue=255)
plot(temp[Rows,1], temp[Rows,7], ylim=c(0,1), col=mycol,
pch=16, cex=0.75, xlab="y", ylab="PQ",
main="Predictive Quantiles")
panel.smooth(temp[Rows,1], temp[Rows,7], col=mycol, pch=16,
cex=0.75)
abline(h=0.025, col="gray")
abline(h=0.975, col="gray")}
if(Style == "Residual Density") {
if(PDF == TRUE) pdf("PPC.Plots.Residual.Density.pdf")
par(mfrow=c(1,1))
epsilon <- x[["y"]] - x[["yhat"]]
epsilon.summary <- apply(epsilon, 1, quantile,
probs=c(0.025,0.500,0.975), na.rm=TRUE)
dens <- density(epsilon.summary[2,Rows], na.rm=TRUE)
plot(dens, col="black", main="Residual Density",
xlab=expression(epsilon), ylab="Density")
polygon(dens, col="black", border="black")
abline(v=0, col="red")}
if(Style == "Residual Density, Multivariate, C") {
if(PDF == TRUE) {
pdf("PPC.Plots.Residual.Density.pdf")
par(mfrow=c(1,1))}
else par(mfrow=c(1,1), ask=TRUE)
if(is.null(Data))
stop("Data is required for Style=Residual Density, Multivariate, C.")
if(is.null(Data[["Y"]]))
stop("Variable Y is required for Style=Residual Density, Multivariate, C.")
epsilon <- x[["y"]] - x[["yhat"]]
epsilon.summary <- apply(epsilon, 1, quantile,
probs=c(0.025,0.500,0.975), na.rm=TRUE)
epsilon.500 <- matrix(epsilon.summary[2,], nrow(Data[["Y"]]),
ncol(Data[["Y"]]))
for (i in 1:ncol(Data[["Y"]])) {
dens <- density(epsilon.500[,i], na.rm=TRUE)
plot(dens, col="black", main="Residual Density",
xlab=paste("epsilon[,", i, "]", sep=""),
ylab="Density")
polygon(dens, col="black", border="black")
abline(v=0, col="red")}}
if(Style == "Residual Density, Multivariate, R") {
if(PDF == TRUE) {
pdf("PPC.Plots.Residual.Density.pdf")
par(mfrow=c(1,1))}
else par(mfrow=c(1,1), ask=TRUE)
if(is.null(Data))
stop("Data is required for Style=Residual Density, Multivariate, R.")
if(is.null(Data[["Y"]]))
stop("Variable Y is required for Style=Residual Density, Multivariate, R.")
epsilon <- x[["y"]] - x[["yhat"]]
epsilon.summary <- apply(epsilon, 1, quantile,
probs=c(0.025,0.500,0.975), na.rm=TRUE)
epsilon.500 <- matrix(epsilon.summary[2,], nrow(Data[["Y"]]),
ncol(Data[["Y"]]))
for (i in 1:nrow(Data[["Y"]])) {
dens <- density(epsilon.500[i,], na.rm=TRUE)
plot(dens, col="black", main="Residual Density",
xlab=paste("epsilon[", i, ",]", sep=""),
ylab="Density")
polygon(dens, col="black", border="black")
abline(v=0, col="red")}}
if(Style == "Residuals") {
if(PDF == TRUE) pdf("PPC.Plots.Residuals.pdf")
par(mfrow=c(1,1))
epsilon <- x[["y"]] - x[["yhat"]]
epsilon.summary <- apply(epsilon, 1, quantile,
probs=c(0.025,0.500,0.975), na.rm=TRUE)
plot(epsilon.summary[2,Rows], pch=16, cex=0.75,
ylim=c(min(epsilon.summary[,Rows], na.rm=TRUE),
max(epsilon.summary[,Rows], na.rm=TRUE)),
xlab="y", ylab=expression(epsilon))
lines(rep(0, ncol(epsilon.summary[,Rows])), col="red")
for (i in Rows) {
lines(c(i,i), c(epsilon.summary[1,Rows[i]],
epsilon.summary[3,Rows[i]]), col="black")}}
if(Style == "Residuals, Multivariate, C") {
if(PDF == TRUE) {
pdf("PPC.Plots.Residuals.pdf")
par(mfrow=c(1,1))}
else par(mfrow=c(1,1), ask=TRUE)
if(is.null(Data))
stop("Data is required for Style=Residuals, Multivariate, C.")
if(is.null(Data[["Y"]]))
stop("Variable Y is required for Style=Residuals, Multivariate, C.")
epsilon <- x[["y"]] - x[["yhat"]]
epsilon.summary <- apply(epsilon, 1, quantile,
probs=c(0.025,0.500,0.975), na.rm=TRUE)
epsilon.025 <- matrix(epsilon.summary[1,], nrow(Data[["Y"]]),
ncol(Data[["Y"]]))
epsilon.500 <- matrix(epsilon.summary[2,], nrow(Data[["Y"]]),
ncol(Data[["Y"]]))
epsilon.975 <- matrix(epsilon.summary[3,], nrow(Data[["Y"]]),
ncol(Data[["Y"]]))
for (i in 1:ncol(Data[["Y"]])) {
plot(epsilon.500[,i], pch=16, cex=0.75,
ylim=c(min(epsilon.025[,i], na.rm=TRUE),
max(epsilon.975[,i], na.rm=TRUE)),
xlab=paste("Y[,", i, "]", sep=""), ylab=expression(epsilon))
lines(rep(0, nrow(epsilon.500)), col="red")
for (j in 1:nrow(Data[["Y"]])) {
lines(c(j,j), c(epsilon.025[j,i],
epsilon.975[j,i]), col="black")}}}
if(Style == "Residuals, Multivariate, R") {
if(PDF == TRUE) {
pdf("PPC.Plots.Residuals.pdf")
par(mfrow=c(1,1))}
else par(mfrow=c(1,1), ask=TRUE)
if(is.null(Data))
stop("Data is required for Style=Residuals, Multivariate, C.")
if(is.null(Data[["Y"]]))
stop("Variable Y is required for Style=Residuals, Multivariate, C.")
epsilon <- x[["y"]] - x[["yhat"]]
epsilon.summary <- apply(epsilon, 1, quantile,
probs=c(0.025,0.500,0.975), na.rm=TRUE)
epsilon.025 <- matrix(epsilon.summary[1,], nrow(Data[["Y"]]),
ncol(Data[["Y"]]))
epsilon.500 <- matrix(epsilon.summary[2,], nrow(Data[["Y"]]),
ncol(Data[["Y"]]))
epsilon.975 <- matrix(epsilon.summary[3,], nrow(Data[["Y"]]),
ncol(Data[["Y"]]))
for (i in 1:nrow(Data[["Y"]])) {
plot(epsilon.500[i,], pch=16, cex=0.75,
ylim=c(min(epsilon.025[i,], na.rm=TRUE),
max(epsilon.975[i,], na.rm=TRUE)),
xlab=paste("Y[", i, ",]", sep=""), ylab=expression(epsilon))
lines(rep(0, ncol(epsilon.500)), col="red")
for (j in 1:ncol(Data[["Y"]])) {
lines(c(j,j), c(epsilon.025[i,j],
epsilon.975[i,j]), col="black")}}}
if(Style == "Space-Time by Space") {
if(PDF == TRUE) {
pdf("PPC.Plots.SpaceTime.pdf")
par(mfrow=c(1,1))}
else par(mfrow=c(1,1), ask=TRUE)
if(is.null(Data))
stop("Data is required for Style=Space-Time by Space.")
if(is.null(Data[["longitude"]]))
stop("Variable longitude is required in Data.")
if(is.null(Data[["latitude"]]))
stop("Variable latitude is required in Data.")
if(is.null(Data[["S"]])) stop("Variable S is required in Data.")
if(is.null(Data[["T"]])) stop("Variable T is required in Data.")
temp <- summary(x, Quiet=TRUE)$Summary
for (s in 1:Data[["S"]]) {
plot(matrix(temp[,1], Data[["S"]], Data[["T"]])[s,],
ylim=c(min(c(matrix(temp[,4], Data[["S"]], Data[["T"]])[s,],
matrix(temp[,1], Data[["S"]], Data[["T"]])[s,]), na.rm=TRUE),
max(c(matrix(temp[,6], Data[["S"]], Data[["T"]])[s,],
matrix(temp[,1], Data[["S"]], Data[["T"]])[s,]), na.rm=TRUE)),
type="l", xlab="Time", ylab="y",
main=paste("Space-Time at Space s=",s," of ",
Data[["S"]], sep=""),
sub="Actual=Black, Fit=Red, Interval=Transparent Red")
polygon(c(1:Data[["T"]],rev(1:Data[["T"]])),
c(matrix(temp[,4], Data[["S"]], Data[["T"]])[s,],
rev(matrix(temp[,6], Data[["S"]], Data[["T"]])[s,])),
col=rgb(255, 0, 0, 50, maxColorValue=255), border=FALSE)
lines(matrix(temp[,5], Data[["S"]], Data[["T"]])[s,], col="red")}}
if(Style == "Space-Time by Time") {
if(PDF == TRUE) {
pdf("PPC.Plots.SpaceTime.pdf")
par(mfrow=c(1,1))}
else par(mfrow=c(1,1), ask=TRUE)
if(is.null(Data))
stop("Data is required for Style=Space-Time by Time.")
if(is.null(Data[["longitude"]]))
stop("Variable longitude is required in Data.")
if(is.null(Data[["latitude"]]))
stop("Variable latitude is required in Data.")
if(is.null(Data[["S"]])) stop("Variable S is required in Data.")
if(is.null(Data[["T"]])) stop("Variable T is required in Data.")
Heat <- (1-(x[["y"]]-min(x[["y"]], na.rm=TRUE)) /
max(x[["y"]]-min(x[["y"]], na.rm=TRUE), na.rm=TRUE)) * 99 + 1
Heat <- matrix(Heat, Data[["S"]], Data[["T"]])
for (t in 1:Data[["T"]]) {
plot(Data[["longitude"]], Data[["latitude"]],
col=heat.colors(120)[Heat[,t]],
pch=16, cex=0.75, xlab="Longitude", ylab="Latitude",
main=paste("Space-Time at t=",t," of ", Data[["T"]],
sep=""), sub="Red=High, Yellow=Low")}}
if(Style == "Spatial") {
if(PDF == TRUE) pdf("PPC.Plots.Spatial.pdf")
par(mfrow=c(1,1))
if(is.null(Data)) stop("Data is required for Style=Spatial.")
if(is.null(Data[["longitude"]]))
stop("Variable longitude is required in Data.")
if(is.null(Data[["latitude"]]))
stop("Variable latitude is required in Data.")
heat <- (1-(x[["y"]][Rows]-min(x[["y"]][Rows], na.rm=TRUE)) /
max(x[["y"]][Rows]-min(x[["y"]][Rows], na.rm=TRUE), na.rm=TRUE)) * 99 + 1
plot(Data[["longitude"]][Rows], Data[["latitude"]][Rows],
col=heat.colors(120)[heat],
pch=16, cex=0.75, xlab="Longitude", ylab="Latitude",
main="Spatial Plot", sub="Red=High, Yellow=Low")}
if(Style == "Spatial Uncertainty") {
if(PDF == TRUE) pdf("PPC.Plots.Spatial.Unc.pdf")
par(mfrow=c(1,1))
if(is.null(Data))
stop("Data is required for Style=Spatial Uncertainty.")
if(is.null(Data[["longitude"]]))
stop("Variable longitude is required in Data.")
if(is.null(Data[["latitude"]]))
stop("Variable latitude is required in Data.")
heat <- apply(x[["yhat"]], 1, quantile, probs=c(0.025,0.975))
heat <- heat[2,] - heat[1,]
heat <- (1-(heat[Rows]-min(heat[Rows])) /
max(heat[Rows]-min(heat[Rows]))) * 99 + 1
plot(Data[["longitude"]][Rows], Data[["latitude"]][Rows],
col=heat.colors(120)[heat],
pch=16, cex=0.75, xlab="Longitude", ylab="Latitude",
main="Spatial Uncertainty Plot",
sub="Red=High, Yellow=Low")}
if(Style == "Time-Series") {
if(PDF == TRUE) pdf("PPC.Plots.TimeSeries.pdf")
par(mfrow=c(1,1))
temp <- summary(x, Quiet=TRUE)$Summary
plot(Rows, temp[Rows,1],
ylim=c(min(temp[Rows,c(1,4)], na.rm=TRUE),
max(temp[Rows,c(1,6)], na.rm=TRUE)),
type="l", xlab="Time", ylab="y",
main="Plot of Fitted Time-Series",
sub="Actual=Black, Fit=Red, Interval=Transparent Red")
polygon(c(Rows,rev(Rows)),c(temp[Rows,4],rev(temp[Rows,6])),
col=rgb(255, 0, 0, 50, maxColorValue=255),
border=FALSE)
lines(Rows, temp[Rows,1])
lines(Rows, temp[Rows,5], col="red")}
if(Style == "Time-Series, Multivariate, C") {
if(PDF == TRUE) {
pdf("PPC.Plots.TimeSeries.pdf")
par(mfrow=c(1,1))}
else par(mfrow=c(1,1), ask=TRUE)
if(is.null(Data))
stop("Data is required for Style=Time-Series, Multivariate.")
if(is.null(Data[["Y"]]))
stop("Variable Y is required in Data.")
temp <- summary(x, Quiet=TRUE)$Summary
for (i in 1:ncol(Data[["Y"]])) {
tempy <- matrix(temp[Rows,1], nrow(Data[["Y"]]),
ncol(Data[["Y"]]))[,i]
qLB <- matrix(temp[Rows,4], nrow(Data[["Y"]]),
ncol(Data[["Y"]]))[,i]
qMed <- matrix(temp[Rows,5], nrow(Data[["Y"]]),
ncol(Data[["Y"]]))[,i]
qUB <- matrix(temp[Rows,6], nrow(Data[["Y"]]),
ncol(Data[["Y"]]))[,i]
plot(1:length(tempy), tempy,
ylim=c(min(Data[["Y"]][,i],
matrix(temp[Rows,4], nrow(Data[["Y"]]),
ncol(Data[["Y"]]))[,i], na.rm=TRUE),
max(Data[["Y"]][,i],
matrix(temp[Rows,6], nrow(Data[["Y"]]),
ncol(Data[["Y"]]))[,i], na.rm=TRUE)),
type="l", xlab="Time", ylab="y",
main=paste("Time-Series ", i, " of ", ncol(Data[["Y"]]), sep=""),
sub="Actual=Black, Fit=Red, Interval=Transparent Red")
polygon(c(1:length(tempy),rev(1:length(tempy))),c(qLB,rev(qUB)),
col=rgb(255, 0, 0, 50, maxColorValue=255),
border=FALSE)
lines(1:length(tempy), tempy)
lines(1:length(tempy), qMed, col="red")}}
if(Style == "Time-Series, Multivariate, R") {
if(PDF == TRUE) {
pdf("PPC.Plots.TimeSeries.pdf")
par(mfrow=c(1,1))}
else par(mfrow=c(1,1), ask=TRUE)
if(is.null(Data))
stop("Data is required for Style=Time-Series, Multivariate.")
if(is.null(Data[["Y"]]))
stop("Variable Y is required in Data.")
temp <- summary(x, Quiet=TRUE)$Summary
for (i in 1:nrow(Data[["Y"]])) {
tempy <- matrix(temp[Rows,1], nrow(Data[["Y"]]),
ncol(Data[["Y"]]))[i,]
qLB <- matrix(temp[Rows,4], nrow(Data[["Y"]]),
ncol(Data[["Y"]]))[i,]
qMed <- matrix(temp[Rows,5], nrow(Data[["Y"]]),
ncol(Data[["Y"]]))[i,]
qUB <- matrix(temp[Rows,6], nrow(Data[["Y"]]),
ncol(Data[["Y"]]))[i,]
plot(1:length(tempy), tempy,
ylim=c(min(Data[["Y"]][i,],
matrix(temp[Rows,4], nrow(Data[["Y"]]),
ncol(Data[["Y"]]))[i,], na.rm=TRUE),
max(Data[["Y"]][i,],
matrix(temp[Rows,6], nrow(Data[["Y"]]),
ncol(Data[["Y"]]))[i,], na.rm=TRUE)),
type="l", xlab="Time", ylab="y",
main=paste("Time-Series ", i, " of ", nrow(Data[["Y"]]), sep=""),
sub="Actual=Black, Fit=Red, Interval=Transparent Red")
polygon(c(1:length(tempy),rev(1:length(tempy))),c(qLB,rev(qUB)),
col=rgb(255, 0, 0, 50, maxColorValue=255),
border=FALSE)
lines(1:length(tempy), tempy)
lines(1:length(tempy), qMed, col="red")}}
if(PDF == TRUE) dev.off()
} |
carto_pal = function(n = NULL, name){
if(!(name %in% rcartocolor::cartocolors$Name)){
stop(paste(name,"is not a valid palette name for color_pal\n"))
}
selected_metadata = rcartocolor::metacartocolors[rcartocolor::metacartocolors$Name == name, ]
min_n = selected_metadata$Min_n
max_n = selected_metadata$Max_n
if(is.null(n)){
n = max_n
}
if(!(n %in% min_n:max_n)){
warning(paste("Number of colors (n) in the", name,
"palette should be between", min_n, "and", max_n, "\n"))
n = max_n
}
coln = paste0("n", n)
rcartocolor::cartocolors[rcartocolor::cartocolors$Name == name, coln][[1]]
} |
smooth_erf <- function(matched_Y,bw,matched_w){
if (length(bw)!=1){
stop("bw should be of length 1.")
}
smoothed_val <- stats::approx(locpoly(matched_w, matched_Y, bandwidth=bw,
gridsize=1000),
xout=matched_w,rule=2)$y
return(smoothed_val)
} |
summary.bivRegion = function(object,tau = 0.95,...){
if(sum(tau%in%object$tau)!=length(tau)){return("WARNING: this coverage probability should be previously estimated with bivregion")}
bb = which((object$tau) %in% tau)
Out_points = list()
Coverage = as.numeric()
if(is.null(object$fit)){
for(i in 1:length(bb)){
pts_test = SpatialPoints(object$Y)
spol = SpatialPolygons(list(Polygons(list(Polygon(cbind(object$region[[bb[i]]][,1],object$region[[bb[i]]][,2]))),ID = " ")))
out_res = as.data.frame(object$Y[which(is.na(over(pts_test,spol))),])
Coverage[i] = 1-(nrow(out_res)/nrow(object$Y))
cuadrant_1 = which(out_res[,1] > apply(object$Y,2,mean)[1] & out_res[,2] > apply(object$Y,2,mean)[2])
cuadrant_2 = which(out_res[,1] < apply(object$Y,2,mean)[1] & out_res[,2] > apply(object$Y,2,mean)[2])
cuadrant_3 = which(out_res[,1] < apply(object$Y,2,mean)[1] & out_res[,2] < apply(object$Y,2,mean)[2])
cuadrant_4 = which(out_res[,1] > apply(object$Y,2,mean)[1] & out_res[,2] < apply(object$Y,2,mean)[2])
Out_points[[i]] = list(out_res[cuadrant_1,],out_res[cuadrant_2,],out_res[cuadrant_3,],out_res[cuadrant_4,])
names(Out_points[[i]]) = c("Both high",paste0(names(out_res)[1]," low and ",names(out_res)[2]," high"),
"Both low",paste0(names(out_res)[1]," high and ",names(out_res)[2]," low"))
}
names(Out_points) = paste0("Tau = ",object$tau[bb])
}
if(!is.null(object$fit)){
for(i in 1:length(bb)){
pts_test = SpatialPoints(object$Y)
spol = SpatialPolygons(list(Polygons(list(Polygon(cbind(object$region[[bb[i]]][,1],object$region[[bb[i]]][,2]))),ID = " ")))
data_out = as.data.frame(object$data[which(is.na(over(pts_test,spol))),])
out_res = as.data.frame(object$Y[which(is.na(over(pts_test,spol))),])
names(out_res) = c(paste0(names(data_out)[1],"-res"),
paste0(names(data_out)[2],"-res"))
Coverage[i] = 1-(nrow(out_res)/nrow(object$data))
cuadrant_1 = which(out_res[,1] > apply(object$Y,2,mean)[1] & out_res[,2] > apply(object$Y,2,mean)[2])
cuadrant_2 = which(out_res[,1] < apply(object$Y,2,mean)[1] & out_res[,2] > apply(object$Y,2,mean)[2])
cuadrant_3 = which(out_res[,1] < apply(object$Y,2,mean)[1] & out_res[,2] < apply(object$Y,2,mean)[2])
cuadrant_4 = which(out_res[,1] > apply(object$Y,2,mean)[1] & out_res[,2] < apply(object$Y,2,mean)[2])
data_out = cbind(data_out,out_res)
Out_points[[i]] = list(data_out[cuadrant_1,],data_out[cuadrant_2,],
data_out[cuadrant_3,],data_out[cuadrant_4,])
names(Out_points[[i]]) = c("Both high",paste0(names(data_out)[1]," low and ",names(data_out)[2]," high"),
"Both low",paste0(names(data_out)[1]," high and ",names(data_out)[2]," low"))
}
names(Out_points) = paste0("Tau = ",object$tau[bb])
}
return(list(Out_points = Out_points,Coverage = Coverage))
} |
packageLoad <- function(packName){
if(!require(packName,character.only = TRUE)){
install.packages(packName,
dependencies = TRUE,
repos = "http://cran.r-project.org")
if(!require(packName, character.only = TRUE)){
stop(paste("Package ",
packName,
"not found and its installation failed.",
sep=""))
}
}
}
createCluster = function(noCores,
logfile = "/dev/null",
export = NULL,
lib = NULL){
packageLoad("doSNOW")
cl <- makeCluster(noCores,
type = "SOCK",
outfile = logfile,
verbose = FALSE)
if(!is.null(export))clusterExport(cl, export)
if(!is.null(lib)){
l_ply(lib, function(dum){
clusterExport(cl, "dum", envir = environment())
clusterEvalQ(cl, library(dum, character.only = TRUE))
})
}
registerDoSNOW(cl)
return(cl)
}
myCluster <- createCluster(noCores = 6)
clusterApply(myCluster, x = 1:10, fun = log, base = exp(1))
stopCluster(myCluster)
substrFirst <- function(x, n){
substr(x, 1, n)
}
substrLast <- function(x, n){
substr(x, nchar(x) - n + 1, nchar(x))
}
outlier.fnc <- function(x){
rem <- which(x > median(x) + 5 * sd(x) | x < median(x) - 5 * sd(x))
if(length(rem) == 0)rem <- NA
rem
}
monitor <- function(D, method = "PCA",
train.obs.num, train.update.freq = NULL,
fix.parameters = FALSE, alpha,
rm.outlier = TRUE, rm.flagged = TRUE,
thrsh.type = "np", var.amnt.pca = 0.9,
kern.sigma = 1, kern.g = 10,
var.amnt.kpca = NULL, k = NULL, d = NULL,
k.rng = NULL, d.rng = NULL,
parallel.lle = FALSE,cpus = 2,
results.path = NULL, burn = NULL){
{
}
ave.comps = FALSE
Adaptive =! is.null(train.update.freq)
if(!Adaptive){
rm.flagged = FALSE
thrsh.type = c("p","np")
}
if(method == "KPCA" & is.null(var.amnt.kpca))ave.comps <- TRUE
if(method == "LLE" & fix.parameters){k <- 8; d <- 2}
if(method == "LLE" & is.null(k) | is.null(d))KDsearch = TRUE
if(method == "PCA"){
store.params=c("comps","comps.prop.var")
}
if(method == "KPCA"){
store.params <- c("comps.ave",
"comps.ave.prop.var",
"comps",
"comps.prop.var")
}
if(method == "LLE"){
store.params <- c("k",
"d",
"resid.var",
"rv.elapse.sec")
}
Results <- vector("list", length = length(thrsh.type))
names(Results) <- thrsh.type
store.vars <- c(store.params, "SPE.thrsh", "T2.thrsh", "SPE", "T2")
for(l in 1:length(Results)){
Results[[l]] <- as.data.frame(matrix(nrow = dim(D)[1],
ncol = length(store.vars)))
rownames(Results[[l]]) <- rownames(D)[1:dim(D)[1]]
colnames(Results[[l]]) <- store.vars
}
for(thrsh.i in 1:ifelse(Adaptive & length(thrsh.type)>1,2,1)){
thrsh.type.i <- thrsh.type[thrsh.i]
if(!Adaptive)thrsh.type.i <- thrsh.type
flagged.all <- array()
train.first <- 1
train.last <- train.first + train.obs.num - 1
if(!Adaptive){
train.update.freq <- dim(D)[1] - train.obs.num
}
for(i in 1:ceiling((dim(D)[1] - train.obs.num) / train.update.freq)){
if(rm.flagged == TRUE && i != 1){
rm.time.stamp <- flagged.current
print(paste("removed flagged: ", length(rm.time.stamp)))
D.tr <- D[train.first:train.last,]
if(length(rm.time.stamp)!= 0){
tack.on.time.stamp <- which(rownames(D) == rownames(D.tr)[1]) - 1
tack.on.options <- rownames(D)[which(!rownames(D)[1:tack.on.time.stamp] %in%
flagged.all)]
tack.on.us <- tack.on.options[((length(tack.on.options) + 1) -
length(rm.time.stamp)):length(tack.on.options)]
length(tack.on.us) == length(rm.time.stamp)
D.tr <- rbind(D[tack.on.us,], D.tr)
rm.rows <- which(rownames(D.tr) %in% rm.time.stamp)
D.tr <- D.tr[-rm.rows,]
}
dim(D.tr)[1] == train.obs.num
print(paste("Training Window: ",
rownames(D.tr)[1],
" thru ",
rownames(D.tr)[dim(D.tr)[1]],
"; Length = ",
round(difftime(rownames(D.tr)[dim(D.tr)[1]],
rownames(D.tr)[1], units = "days"), 1),
" days", sep = ""))
}
if(rm.flagged == FALSE | i == 1){
D.tr <- D[train.first:train.last,]
if(!is.null(burn) & i == 1){
D.tr <- D.tr[-burn,]
if("p" %in% thrsh.type.i){
Results[["p"]] <- Results[["p"]][-burn,]
}
if("np" %in% thrsh.type.i){
Results[["np"]] <- Results[["np"]][-burn,]
}
}
print(paste("Training Window: ",
rownames(D.tr)[1],
" thru ",
rownames(D.tr)[dim(D.tr)[1]],
"; Length = ",
round(difftime(rownames(D.tr)[dim(D.tr)[1]],
rownames(D.tr)[1], units = "days"), 1),
" days", sep = ""))
}
rm.us <- unique(as.vector(unlist(apply(as.data.frame(D.tr), 2,
outlier.fnc))))
rm.us <- rm.us[-which(is.na(rm.us))]
if(length(rm.us) > 0)D.tr <- D.tr[-rm.us,]
monitor.first <- train.last + 1
monitor.last <- monitor.first + train.update.freq - 1
monitor.last <- ifelse(monitor.last <= dim(D)[1],
monitor.last,
dim(D)[1])
D.m <- D[monitor.first:monitor.last,]
print(paste("Monitor Window: ",
rownames(D.m)[1],
" thru ",
rownames(D.m)[dim(D.m)[1]],
"; Length = ",
round(difftime(rownames(D.m)[dim(D.m)[1]],
rownames(D.m)[1],
units="days"), 1),
" days", sep = ""))
Mean <- apply(D.tr, 2, function(x) mean(x, na.rm = TRUE))
SD <- apply(D.tr, 2, function(x) sd(x, na.rm = TRUE))
X <- scale(D.tr)
p <- dim(X)[2]
n <- dim(X)[1]
if(method == "PCA"){
{
R <- cor(X, use = "pairwise.complete.obs")
eigenR <- eigen(R)
evalR <- eigenR$values
evecR <- eigenR$vectors
prop.var <- as.matrix(cumsum(evalR) / sum(evalR) * 100)
comps <- which(prop.var - (var.amnt.pca * 100) > 0)[1]
}
if("p" %in% thrsh.type.i){
Results[["p"]][rownames(D.m),"comps"] <- rep(comps,
length(monitor.first:monitor.last))
Results[["p"]][rownames(D.m),"comps.prop.var"] <- rep(prop.var[comps],
length(monitor.first:monitor.last))
}
if("np" %in% thrsh.type.i){
Results[["np"]][rownames(D.m),"comps"] <- rep(comps,
length(monitor.first:monitor.last))
Results[["np"]][rownames(D.m),"comps.prop.var"] <- rep(prop.var[comps],
length(monitor.first:monitor.last))
}
{
P <- evecR[, 1:comps, drop = FALSE]
if(comps == 1){
Lambda <- evalR[1]
}else{
Lambda <- diag(evalR[1:comps])
}
PCs <- X %*% P
X.hat <- PCs %*% t(P)
E <- X - X.hat
SPEs <- diag(E %*% t(E))
T2s <- diag(PCs %*% solve(Lambda) %*% t(PCs))
}
if(i == 1){
if("p" %in% thrsh.type.i){
Results[["p"]][rownames(D.tr), c("SPE","T2")] <- cbind(SPEs,T2s)
}
if("np" %in% thrsh.type.i){
Results[["np"]][rownames(D.tr), c("SPE","T2")] <- cbind(SPEs,T2s)
}
}
{
if("p" %in% thrsh.type.i){
Thetas <- array(dim = 3)
for(ii in 1:length(Thetas)){
Thetas[ii] <- sum((evalR[(comps + 1):length(evalR)]) ^ ii)
}
h0 <- 1 - (2 / 3 * Thetas[1] * Thetas[3] / Thetas[2] ^ 2)
c.alpha <- qnorm(1 - alpha)
term1 <- h0 * c.alpha * sqrt(2 * Thetas[2]) / Thetas[1]
term2 <- Thetas[2] * h0 * (h0 - 1) / Thetas[1] ^ 2
SPE.lim.p <- Thetas[1] * (term1 + 1 + term2) ^ (1 / h0)
Results[["p"]][rownames(D.m),"SPE.thrsh"] <- rep(SPE.lim.p,
length(monitor.first:monitor.last))
}
if("np" %in% thrsh.type.i){
SPE.np.dens <- density(SPEs,
bw = "SJ",
kernel = "gaussian",
from = 0)
SPE.lim.np <- quantile(SPE.np.dens, 1 - alpha)
Results[["np"]][rownames(D.m),"SPE.thrsh"] <- rep(SPE.lim.np,
length(monitor.first:monitor.last))
}
if(length(thrsh.type.i) == 1){
SPE.lim <- ifelse(thrsh.type.i == "p", SPE.lim.p, SPE.lim.np)
}
if("p" %in% thrsh.type.i){
T2.lim.p <- ((n ^ 2 - 1) * comps) /
(n * (n - comps)) * qf((1 - alpha), comps, n - comps)
Results[["p"]][rownames(D.m),"T2.thrsh"] <- rep(T2.lim.p,
length(monitor.first:monitor.last))
}
if("np" %in% thrsh.type.i){
T2s.np.dens <- density(T2s,
bw = "SJ",
kernel = "gaussian",
from = 0)
T2.lim.np <- quantile(T2s.np.dens, 1 - alpha)
Results[["np"]][rownames(D.m),"T2.thrsh"] <- rep(T2.lim.np,
length(monitor.first:monitor.last))
}
if(length(thrsh.type.i) == 1){
T2.lim <- ifelse(thrsh.type.i == "p", T2.lim.p, T2.lim.np)
}
}
{
X.new <- scale(D.m, center = Mean, scale = SD)
spe.online <- array()
t2.online <- array()
for(j in 1:dim(X.new)[1]){
xnew <- as.vector(X.new[j,])
xproj <- xnew %*% P
xhat <- xproj %*% t(P)
r <- t(xnew - xhat)
SPE.new <- t(r) %*% r
spe.online[j] <- SPE.new
T2.new <- t(xnew) %*% P %*% solve(Lambda) %*% t(P) %*% xnew
t2.online[j] <- T2.new
}
}
if("p" %in% thrsh.type.i){
Results[["p"]][rownames(D.m),"SPE"] <- spe.online
Results[["p"]][rownames(D.m),"T2"] <- t2.online
}
if("np" %in% thrsh.type.i){
Results[["np"]][rownames(D.m),"SPE"] <- spe.online
Results[["np"]][rownames(D.m),"T2"] <- t2.online
}
}
if(method == "KPCA"){
{
X.df <- as.data.frame(X)
sig.rbf <- 1 / (kern.g * (kern.sigma ^ 2) * dim(X.df)[2])
rbf <- kernlab::rbfdot(sigma = sig.rbf)
K <- kernelMatrix(rbf, as.matrix(X.df));
I <- diag(dim(K)[1])
E <- rep((dim(K)[1]) ^ (-1 / 2), dim(K)[1])
K.c <- (I - E %*% t(E)) %*% K %*% (I - E %*% t(E))
Eig.K <- eigen(K.c)
e.val <- Eig.K$values
e.vec <- Eig.K$vectors
if(class(e.val) == "complex"){
if(max(abs(Im(e.val))) > 10 ^ -9){
stop("Complex Eig.K$values")
}else{
e.val <- Re(e.val)
e.vec <- Re(e.vec)
}
}
chk <- which(e.val < 0)
if(length(chk) > 0){
if(sum(max(abs(e.val[chk]))) > 10 ^ -9){
stop("Negative values in Eig.K$values")
}else{
e.val[chk] <- abs(e.val[chk])
}
}
n.tilde <- which(cumsum(e.val) / sum(e.val) >= 0.99)[1]
ys <- t(apply(e.vec[,1:n.tilde], 1, function(x){
sqrt(e.val[1:n.tilde]) * x
}))
if(fix.parameters)comps <- 2
if(!fix.parameters){
ave.eig <- mean(e.val)
ave.eig
(comps.ave <- length(which(e.val >= ave.eig)))
prop.var <- as.matrix(cumsum(e.val) / sum(e.val) * 100)
(comps.var <- which(prop.var - (var.amnt.kpca * 100) > 0)[1])
(comps <- ifelse(ave.comps == TRUE, comps.ave, comps.var))
if(comps > n.tilde){
print("Warning: more KPCA's than dim(F)")
print(paste("comps = ",
comps,
"; n.tilde = ",
n.tilde,
sep = ""))
if(comps.var < n.tilde){
comps <- comps.var
}else{
comps <- n.tilde - 1
}
}
if("p" %in% thrsh.type.i){
Results[["p"]][rownames(D.m), "comps.ave"] <- rep(comps.ave,
length(monitor.first:monitor.last))
Results[["p"]][rownames(D.m), "comps.ave.prop.var"] <- rep(prop.var[comps.ave],
length(monitor.first:monitor.last))
Results[["p"]][rownames(D.m), "comps"] <- rep(comps.var,
length(monitor.first:monitor.last))
Results[["p"]][rownames(D.m), "comps.prop.var"] <- rep(prop.var[comps.var],
length(monitor.first:monitor.last))
}
if("np" %in% thrsh.type.i){
Results[["np"]][rownames(D.m), "comps.ave"] <- rep(comps.ave,
length(monitor.first:monitor.last))
Results[["np"]][rownames(D.m), "comps.ave.prop.var"] <- rep(prop.var[comps.ave],
length(monitor.first:monitor.last))
Results[["np"]][rownames(D.m), "comps"] <- rep(comps.var,
length(monitor.first:monitor.last))
Results[["np"]][rownames(D.m), "comps.prop.var"] <- rep(prop.var[comps.var],
length(monitor.first:monitor.last))
}
}
Lambda <- (1 / n) * diag(e.val[1:comps])
T2s <- diag(ys[,1:comps] %*% solve(Lambda) %*% t(ys[,1:comps]))
head(T2s)
SPEs <- diag(ys[,1:n.tilde] %*% t(ys[,1:n.tilde])) -
diag(ys[,1:comps] %*% t(ys[,1:comps]))
}
if(i == 1){
if("p" %in% thrsh.type.i){
Results[["p"]][rownames(D.tr), c("SPE","T2")] <- cbind(SPEs, T2s)
}
if("np" %in% thrsh.type.i){
Results[["np"]][rownames(D.tr), c("SPE","T2")] <- cbind(SPEs, T2s)
}
}
{
{
if("p" %in% thrsh.type.i){
a <- mean(SPEs)
b <- var(SPEs)
g <- b / (2 * a)
h <- (2 * a ^ 2) / b
SPE.lim.p <- g * qchisq(p = 1 - alpha, df = h)
{
}
Results[["p"]][rownames(D.m), "SPE.thrsh"] <- rep(SPE.lim.p,
length(monitor.first:monitor.last))
}
if("np" %in% thrsh.type.i){
SPE.np.dens <- density(SPEs,
bw = "SJ",
kernel = "gaussian",
from = 0)
SPE.lim.np <- quantile(SPE.np.dens, 1 - alpha)
{
}
Results[["np"]][rownames(D.m), "SPE.thrsh"] <- rep(SPE.lim.np,
length(monitor.first:monitor.last))
}
if(length(thrsh.type.i) == 1){
SPE.lim <- ifelse(thrsh.type.i == "p", SPE.lim.p, SPE.lim.np)
}
}
{
if("p" %in% thrsh.type.i){
T2.lim.p <- (n ^ 2 - 1) * comps /
n * (n - comps) * qf(1 - alpha, comps, n - comps)
hld2 <- head(T2s)
{
}
Results[["p"]][rownames(D.m),"T2.thrsh"] <- rep(T2.lim.p,
length(monitor.first:monitor.last))
}
if("np" %in% thrsh.type.i){
T2s.np.dens <- density(T2s,
bw = "SJ",
kernel = "gaussian",
from = 0)
T2.lim.np <- quantile(T2s.np.dens, 1 - alpha)
{
}
Results[["np"]][rownames(D.m),"T2.thrsh"] <- rep(T2.lim.np,
length(monitor.first:monitor.last))
}
if(length(thrsh.type.i) == 1){
T2.lim <- ifelse(thrsh.type.i == "p", T2.lim.p, T2.lim.np)
}
}
}
{
kernel.vec <- function(x, y, sig = 1){
x <- as.matrix(x); y <- as.matrix(y)
exp(-(x-y) %*% t(x-y) * sig)
}
X.new <- scale(D.m, center = Mean, scale = SD)
em.online <- matrix(nrow = dim(X.new)[1], ncol = comps)
spe.online <- array()
t2.online <- array()
for(k in 1:dim(X.new)[1]){
x.new <- t(as.vector(X.new[k,]))
k.vec <- apply(X.df, 1, function(x){
kernel.vec(x.new, t(x), sig.rbf)
})
k.vec.c <- k.vec - rep(1 / n, n) %*% K %*% (I - E %*% t(E))
y.new <- 1 / sqrt(e.val) * t(e.vec) %*% t(k.vec.c)
em.online[k,] <- y.new[1:comps]
{
}
T2.new <- t(y.new[1:comps]) %*% solve(Lambda) %*% y.new[1:comps]
t2.online[k] <- T2.new
SPE.new <- t(y.new)[1:n.tilde] %*% y.new[1:n.tilde] -
t(y.new)[1:comps] %*% y.new[1:comps]
spe.online[k] <- SPE.new
}
}
if("p" %in% thrsh.type.i){
Results[["p"]][rownames(D.m), "SPE"] <- spe.online
Results[["p"]][rownames(D.m),"T2"] <- t2.online
}
if("np" %in% thrsh.type.i){
Results[["np"]][rownames(D.m),"SPE"] <- spe.online
Results[["np"]][rownames(D.m),"T2"] <- t2.online
}
}
if(method == "LLE"){
if(KDsearch){
kd.matrix <- matrix(nrow = length(d.rng), ncol = length(k.rng))
rv.start.time <- proc.time()
for(j in 1:length(d.rng)){
if(parallel.lle){
ks <- calc_k(X, m = d.rng[j], k.rng[1], k.rng[length(k.rng)],
plotres = FALSE, parallel = TRUE, cpus = cpus)
}
if(!parallel.lle){
ks <- calc_k(X, m = d.rng[j], k.rng[1], k.rng[length(k.rng)],
plotres = FALSE)
}
if(dim(ks)[1] < dim(kd.matrix)[2]){
ks <- c(ks[,2], rep(NA, dim(kd.matrix)[2] - dim(ks)[1]))
kd.matrix[j,] <- ks
}else{
kd.matrix[j,] <- ks[,2]
}
}
rv.end.time <- proc.time()
rv <- min(kd.matrix, na.rm = TRUE)
kd.best <- which(kd.matrix == rv, arr.ind = TRUE)
d <- d.rng[kd.best[1,1]]
k <- k.rng[kd.best[1,2]]
}
if(!KDsearch){
rv.start.time <- proc.time()
hld <- calc_k(X, m = d, kmin = k, kmax = k, plotres = FALSE)
rv <- hld[[2]]
rv.end.time <- proc.time()
}
if("p" %in% thrsh.type.i){
Results[["p"]][rownames(D.m), "k"] <- rep(k,
length(monitor.first:monitor.last))
Results[["p"]][rownames(D.m), "d"] <- rep(d,
length(monitor.first:monitor.last))
Results[["p"]][rownames(D.m), "resid.var"] <- rep(rv,
length(monitor.first:monitor.last))
rv.calc.time <- rv.end.time - rv.start.time
Results[["p"]][rownames(D.m), "rv.elapse.sec"] <- rep(rv.calc.time[3],
length(monitor.first:monitor.last))
}
if("np" %in% thrsh.type.i){
Results[["np"]][rownames(D.m), "k"] <- rep(k,
length(monitor.first:monitor.last))
Results[["np"]][rownames(D.m), "d"] <- rep(d,
length(monitor.first:monitor.last))
Results[["np"]][rownames(D.m), "resid.var"] <- rep(rv,
length(monitor.first:monitor.last))
rv.calc.time <- rv.end.time - rv.start.time
Results[["np"]][rownames(D.m), "rv.elapse.sec"] <- rep(rv.calc.time[3],
length(monitor.first:monitor.last))
}
{
{
NNs <- find_nn_k(X, k, iLLE = FALSE)
Weights <- find_weights(NNs, X, d, ss = FALSE, id = FALSE)
W <- Weights$wgts; dim(W)
I <- diag(dim(W)[1])
M <- t(I - W) %*% (I - W)
Eig.M <- eigen(M)
e.vecs <- Eig.M$vector[,-dim(M)[2]]
e.vecs <- e.vecs[,dim(e.vecs)[2]:1]
e.vals <- Eig.M$values[-dim(M)[2]]
e.vals <- e.vals[length(e.vals):1]
if(class(e.vals) == "complex"){
if(max(abs(Im(e.vals))) > 10 ^ -9){
stop("Complex eigenvalues")
}else{
e.vals <- Re(e.vals)
e.vecs <- Re(e.vecs)
}
}
chk <- which(e.vals < 0)
if(length(chk) > 0){
if(sum(max(abs(e.vals[chk]))) > 10 ^ -9){
stop("Negative values in eigenvalues")
}else{
e.vals[chk] <- abs(e.vals[chk])
}
}
e.vals.matrix <- rbind(e.vals, e.vals)
e.vals.matrix <- e.vals.matrix[rep(1,n),]
ys <- e.vecs[,1:d]
ys.klle <- sqrt(e.vals.matrix) * (e.vecs)
}
n.tilde <- which(cumsum(e.vals) / sum(e.vals) >= 0.99)[1]
SPEs <- diag(ys.klle[,1:n.tilde] %*% t(ys.klle[,1:n.tilde])) -
diag(ys.klle[,1:d] %*% t(ys.klle[,1:d]))
{
}
{
if(d > 1){
Lambda <- diag(e.vals[1:d] / (n - 1))
T2s <- diag(ys.klle[,1:d] %*% solve(Lambda) %*% t(ys.klle[,1:d]))
}else{
Lambda <- e.vals[1:d] / (n - 1)
T2s <- diag(ys.klle[,1:d] %*% solve(Lambda) %*% t(ys.klle[,1:d]))
}
{
}
}
}
if(i == 1){
if("p" %in% thrsh.type.i){
Results[["p"]][rownames(D.tr), c("SPE", "T2")] <- cbind(SPEs, T2s)
}
if("np" %in% thrsh.type.i){
Results[["np"]][rownames(D.tr), c("SPE", "T2")] <- cbind(SPEs, T2s)
}
}
{
{
if("p" %in% thrsh.type.i){
a <- mean(SPEs)
b <- var(SPEs)
g <- b / (2 * a)
h <- 2 * a ^ 2 / b
SPE.lim.p <- g * qchisq(p = 1 - alpha, df = h)
{
}
Results[["p"]][rownames(D.m), "SPE.thrsh"] <- rep(SPE.lim.p,
length(monitor.first:monitor.last))
}
if("np" %in% thrsh.type.i){
SPE.np.dens <- density(SPEs, bw = "SJ",
kernel = "gaussian", from = 0)
SPE.lim.np <- quantile(SPE.np.dens, 1 - alpha)
{
}
Results[["np"]][rownames(D.m), "SPE.thrsh"] <- rep(SPE.lim.np,
length(monitor.first:monitor.last))
}
if(length(thrsh.type.i) == 1){
SPE.lim <- ifelse(thrsh.type.i == "p", SPE.lim.p, SPE.lim.np)
}
}
{
if("p" %in% thrsh.type.i){
T2.lim.p <- (n ^ 2 - 1) * d / (n * (n - d)) *
qf(1 - alpha, d, n - d)
{
}
Results[["p"]][rownames(D.m), "T2.thrsh"] <- rep(T2.lim.p,
length(monitor.first:monitor.last))
}
if("np" %in% thrsh.type.i){
T2s.np.dens <- density(T2s, bw = "SJ",
kernel = "gaussian", from = 0)
T2.lim.np <- quantile(T2s.np.dens, 1 - alpha)
{
}
Results[["np"]][rownames(D.m), "T2.thrsh"] <- rep(T2.lim.np,
length(monitor.first:monitor.last))
}
if(length(thrsh.type.i) == 1){
T2.lim <- ifelse(thrsh.type.i == "p", T2.lim.p, T2.lim.np)
}
}
}
{
X.new <- scale(D.m, center = Mean, scale = SD)
em.online <- matrix(nrow = dim(X.new)[1], ncol = d)
spe.online <- array()
t2.online <- array()
for(j in 1:dim(X.new)[1]){
{
x.new <- as.vector(X.new[j,])
dists <- sqrt(colSums((t(X) - x.new) ^ 2))
NNs.xnew.rows <- order(dists)[1:k]
NNs.xnew <- X[NNs.xnew.rows,]
{
}
ws.i <- array()
Q <- matrix(nrow = k, ncol = k)
xi <- as.numeric(x.new)
nns <- NNs.xnew.rows
for(jj in 1:dim(Q)[1]){
for(q in 1:dim(Q)[1]){
Q[jj, q] <- t(xi - X[nns[jj],]) %*% (xi - X[nns[q],])
}
}
for(jj in 1:dim(Q)[1]){
Q.new <- Q + diag(k) * (0.1 ^ 2 / k) * sum(diag(Q))
num <- sum(solve(Q.new)[jj,])
den <- sum(solve(Q.new))
ws.i[jj] <- num / den
}
sum(ws.i) == 1
if(d > 1){
y.new <- t(ys[NNs.xnew.rows,]) %*% ws.i
}
if(d == 1){
y.new <- t(ys[NNs.xnew.rows]) %*% ws.i
}
{
}
}
{
K.new <- rep(0, n)
K.new[NNs.xnew.rows] <- ws.i
y.ks <- t(e.vecs) %*% K.new
y.new.klle <- sqrt(e.vals) * y.ks
T2.new <- t(y.new.klle[1:d]) %*%
solve(Lambda) %*%
y.new.klle[1:d]
t2.online[j] <- T2.new
{
}
SPE.new <- t(y.new.klle)[1:n.tilde] %*% y.new.klle[1:n.tilde] -
t(y.new.klle)[1:d] %*% y.new.klle[1:d]
spe.online[j] <- SPE.new
{
}
}
}
}
if("p" %in% thrsh.type.i){
Results[["p"]][rownames(D.m), "SPE"] <- spe.online
Results[["p"]][rownames(D.m), "T2"] <- t2.online
}
if("np" %in% thrsh.type.i){
Results[["np"]][rownames(D.m), "SPE"] <- spe.online
Results[["np"]][rownames(D.m), "T2"] <- t2.online
}
}
if(!is.null(results.path)){
save(Results, file=results.path)
}
if(rm.flagged){
SPE.flagged <- rownames(D.m)[which(spe.online > SPE.lim)]
T2.flagged <- rownames(D.m)[which(t2.online > T2.lim)]
flagged.current <- union(SPE.flagged, T2.flagged)
flagged.all <- c(flagged.current, flagged.all)
if(any(is.na(flagged.all))){
flagged.all <- flagged.all[-which(is.na(flagged.all))]
}
}
train.first <- train.first + train.update.freq
train.last <- train.last + train.update.freq
}
}
return(Results)
} |
fortify.tis <- function (x, offset = 0.5, dfNames = NULL, timeName = "date"){
if(is.null(dfNames)){
if(length(dim(x))< 2) dfNames <- as.character(substitute(x))
else dfNames <- NA
}
assign(timeName, as.Date(POSIXct(ti(x), offset = offset)))
df.x <- as.data.frame(as.matrix(x))
df <- data.frame(df.x, get(timeName))
if(!is.na(dfNames[1])) names(df) <- c(dfNames, timeName)
else names(df) <- c(names(df.x), timeName)
return(df)
} |
context("Testing config_default")
test_that("Testing that config_default functions work", {
skip_on_github_actions()
skip_on_cran()
docker_config <- create_docker_config()
set_default_config(docker_config, permanent = FALSE)
expect_equal(getOption("babelwhale_config"), docker_config)
singularity_config <- create_singularity_config(cache_dir = tempdir())
set_default_config(singularity_config, permanent = FALSE)
expect_equal(getOption("babelwhale_config"), singularity_config)
set_default_config(NULL, permanent = FALSE)
expect_null(getOption("babelwhale_config"))
orig_config <- get_default_config()
on.exit({
set_default_config(orig_config, permanent = TRUE)
})
set_default_config(docker_config, permanent = TRUE)
expect_equal(get_default_config(), docker_config)
set_default_config(singularity_config, permanent = TRUE)
expect_equal(get_default_config(), singularity_config)
}) |
test_that("uninstall() unloads and removes from library", {
withr::local_temp_libpaths()
install(test_path("testHelp"), quiet = TRUE)
expect_true(require(testHelp, quietly = TRUE))
expect_true("testHelp" %in% loaded_packages()$package)
uninstall(test_path("testHelp"), quiet = TRUE)
expect_false("testHelp" %in% loaded_packages()$package)
suppressWarnings(expect_false(require(testHelp, quietly = TRUE)))
}) |
set_panel_size <- function(p = NULL, g = ggplot2::ggplotGrob(p), file = NULL, margin = unit(1,
"mm"), width = unit(4, "cm"), height = unit(4, "cm")) {
panels <- grep("panel", g$layout$name)
panel_index_w <- unique(g$layout$l[panels])
panel_index_h <- unique(g$layout$t[panels])
nw <- length(panel_index_w)
nh <- length(panel_index_h)
g$widths[panel_index_w] <- rep(width, nw)
g$heights[panel_index_h] <- rep(height, nh)
if (!is.null(file)) {
ggplot2::ggsave(file, g, width = grid::convertWidth(sum(g$widths) + margin, unitTo = "in",
valueOnly = TRUE), height = grid::convertHeight(sum(g$heights) + margin, unitTo = "in",
valueOnly = TRUE))
}
g
} |
context("SCRIPsimu")
data(acinar.data)
data(params_acinar)
test_that("SCRIPSimulate output is valid", {
SCRIPsimu(data=acinar.data, params=params_acinar)
}) |
siteymlgen_parsenav <- function(.yml, dir=NULL,
navbar_title=NULL, left=NULL, right =NULL, ...){
if(length(is.na(right)) != 0){
left=NULL
}else{
right = NULL
}
infiles <- list.files(dir, pattern = c("md$"))
files_dict <- vector(mode="list", length=length(infiles))
names(files_dict) <- as.character(stringr::str_extract(infiles, "[A-Z]"))
for (infile in infiles){
letter <- stringr::str_extract(infile, "^[A-Z]")
number <- as.integer(stringr::str_extract(infile, "[1-9]+"))
files_dict[[letter]] <- number
}
files_dict <- files_dict[!sapply(files_dict, is.null)]
files_dict <- files_dict[!sapply(files_dict, is.na)]
cond <- lapply(files_dict, function(x) x > 1)
more_than_one <- paste0(names(files_dict[as.logical(cond)]), collapse="")
equal_to_one <- paste0(names(files_dict[!as.logical(cond)]), collapse = "")
navbar_list <- list(ymlthis::navbar_page("Home", href = "index.html"))
menu_list = list()
for(infile in infiles){
sensortext <- readChar(paste0(dir, "/",infile), file.info(paste0(dir, "/",infile))$size)
icon <- stringr::str_replace(stringr::str_extract(sensortext, "icon: (.+)"), "icon: ", "")
icon <- replace(icon, is.na(icon), '~')
infile.replace <- gsub(infile, pattern = "\\.\\S+$", replacement = "")
if (infile.replace == "index"){
}else if(!grepl(stringr::str_extract(infile.replace, "[A-Z]"),more_than_one)){
name <- stringr::str_extract(infile.replace, "\\S+")
name <- stringr::str_replace(name, "[A-Z][1-9]_", "")
href_infile <- paste0(name, ".html", sep="")
navbar_list <- rlist::list.append(navbar_list,
siteymlgen_navbar_page(name, href = href_infile, icon=icon))
}else{
name <- stringr::str_extract(infile.replace, "\\S+")
name <- stringr::str_replace(name, "[A-Z][1-9]_", "")
href_infile <- paste0(name, ".html", sep="")
file_no <- stringr::str_extract(infile.replace, "[1-9]")
file_letter <- file_value <- stringr::str_extract(infile.replace, "[A-Z]")
max_files <- unlist(files_dict)[file_letter][[1]]
if (grepl("[A-Z]1", infile)){
first_name = name
first_href = href_infile
menu_list <- rlist::list.append(menu_list,
siteymlgen_navbar_page(name, href = href_infile, icon=icon))
} else if(file_no < max_files){
menu_list <- rlist::list.append(menu_list,
siteymlgen_navbar_page(name, href = href_infile, icon=icon))
}else if(file_no == max_files){
menu_list <- rlist::list.append(menu_list,
siteymlgen_navbar_page(name, href = href_infile, icon=icon))
navbar_list <- rlist::list.append(navbar_list, siteymlgen_navbar_page(first_name, href = first_href, menu=menu_list, icon=icon))
menu_list = list()
}
}
}
siteymlgen_navbar_init(left=left, right=right, navbar_list=navbar_list, navbar_title=navbar_title)
} |
find.tree.popset<-function(fstats,f3.zcore.threshold=-1.65,f4.zscore.absolute.threshold=1.96,excluded.pops=NULL,nthreads=1,verbose=TRUE){
if(nthreads>1){
tmp.ncores=detectCores()
if(nthreads>tmp.ncores){nthreads=tmp.ncores}
options(cores=nthreads)
registerDoParallel() ; getDoParWorkers()
parallel=TRUE
}else{parallel=FALSE}
if(!(is.fstats(fstats))){stop("Input data should either be an object of class fstats (see the function compute.fstats)\n")}
if(!fstats@blockjacknife){stop("fstats object must contain estimates of Z-score for F3 and F4-based criteria to be evaluated (see compute.fstats taht should be run with nsnp.per.bjack.block>0 to allow block-jackknife estimates of s.e. of the estimates)\n")}
if(nrow([email protected])==0){stop("fstats object must contain estimates of F3 (see compute.fstats that should be run with computeF3=TRUE)\n")}
if(nrow([email protected])==0){stop("fstats object must contain estimates of F4 (see compute.fstats that should be run with computeF4=TRUE)\n")}
f3.pop.names=fstats@comparisons[["F3"]]
f4.pop.names=fstats@comparisons[["F4"]]
comp.kept=abs([email protected][,"Z-score"])<f4.zscore.absolute.threshold
if(sum(comp.kept)==0){
stop("No quadruplets could be found (none passing the F4 treeness test)\n")
}
if(sum(comp.kept)==1){
pop.quadruplets=t(matrix(f4.pop.names[comp.kept,]))
}else{
pop.quadruplets=f4.pop.names[comp.kept,]
}
if(!is.null(excluded.pops)){
comp.kept=apply(pop.quadruplets,1,f<-function(x){sum(x %in% excluded.pops)==0})
if(sum(comp.kept)==0){
stop("No quadruplets left after excluding those involving populations in the vector excluded.pops\n")
}
if(sum(comp.kept)==1){
pop.quadruplets=t(matrix(pop.quadruplets[comp.kept,]))
}else{
pop.quadruplets=pop.quadruplets[comp.kept,]
}
}
[email protected][,"Z-score"]<f3.zcore.threshold
if(sum(comp.elim)>0){
pop.elim=unique(f3.pop.names[comp.elim,1])
comp.kept=apply(pop.quadruplets,1,f<-function(x){sum(x %in% pop.elim)==0})
if(sum(comp.kept)==0){
stop("No quadruplets remaining after F3 filtering\n")
}else{
if(sum(comp.kept)==1){
pop.quadruplets=t(matrix(pop.quadruplets[comp.kept,]))
}else{
pop.quadruplets=pop.quadruplets[comp.kept,]
}
}
}
n.quads=nrow(pop.quadruplets)
if(n.quads==1){
subset.pops=pop.quadruplets
}else{
all.pops=unique(as.character(pop.quadruplets))
mat.f4.ok=t(apply(pop.quadruplets,1,f<-function(x){all.pops %in% x}))
colnames(mat.f4.ok)=all.pops
subset.new=pop.quadruplets
while(nrow(subset.new)>0 & ncol(subset.new)<length(all.pops)){
if(verbose){cat("Number of sets:",nrow(subset.new),"of Npops=",ncol(subset.new),"each\n")}
all.pops=unique(as.character(subset.new))
subset.cur=subset.new
subset.length=ncol(subset.cur)
if(parallel){
subset.new=foreach(i=1:nrow(subset.cur),.combine=c) %dopar% {
tmp.subset=subset.cur[i,]
tmp.pop.test=all.pops[!(all.pops %in% tmp.subset)]
tmp.subset.triplets=combn(tmp.subset,3)
tmp.ntriplets=ncol(tmp.subset.triplets)
tmp.subset.new=c()
for(j in tmp.pop.test){
tmp.cnt=0
for(k in 1:ncol(tmp.subset.triplets)){
if(sum(rowSums(mat.f4.ok[,c(tmp.subset.triplets[,k],j)])==4)>0){tmp.cnt=tmp.cnt+1}
}
if(tmp.cnt==tmp.ntriplets){tmp.subset.new=c(tmp.subset.new,paste(sort(c(tmp.subset,j)),collapse=":"))}
}
tmp.subset.new
}
}else{
subset.new=c()
for(i in 1:nrow(subset.cur)){
tmp.subset=subset.cur[i,]
tmp.pop.test=all.pops[!(all.pops %in% tmp.subset)]
tmp.subset.triplets=combn(tmp.subset,3)
tmp.ntriplets=ncol(tmp.subset.triplets)
for(j in tmp.pop.test){
tmp.cnt=0
for(k in 1:ncol(tmp.subset.triplets)){
if(sum(rowSums(mat.f4.ok[,c(tmp.subset.triplets[,k],j)])==4)>0){tmp.cnt=tmp.cnt+1}
}
if(tmp.cnt==tmp.ntriplets){subset.new=c(subset.new,paste(sort(c(tmp.subset,j)),collapse=":"))}
}
}
}
if(length(subset.new)>0){
subset.new=unique(subset.new)
subset.new=matrix(unlist(strsplit(subset.new, split=":")),ncol=subset.length+1,byrow=T)
}else{
subset.new=matrix("",nrow=0,ncol=subset.length+1)
}
}
if(nrow(subset.new)==0){subset.pops=subset.cur}else{subset.pops=subset.new}
}
n.subsets=nrow(subset.pops)
summary.f4.zscores=matrix(0,n.subsets,2)
colnames(summary.f4.zscores)=c("Min. |Zscore|","Max. |Zscore|")
for(i in 1:n.subsets){
tmp.comp = apply(fstats@comparisons[["F4"]] ,1,f<-function(x){sum(x %in% subset.pops[i,])==4})
[email protected]$`Z-score`[tmp.comp]
tmp.conf.id=order(abs(tmp.f4zscores),decreasing = F)[1:(length(tmp.f4zscores)/3)]
summary.f4.zscores[i,]=range(abs(tmp.f4zscores[tmp.conf.id]))
if(i==1){
conf.ok=rownames(fstats@comparisons[["F4"]])[tmp.comp][tmp.conf.id]
}else{
conf.ok=rbind(conf.ok,rownames(fstats@comparisons[["F4"]])[tmp.comp][tmp.conf.id])
}
}
if(n.subsets==1){conf.ok=t(as.matrix(conf.ok))}
rownames(conf.ok)=rownames(subset.pops)=rownames(summary.f4.zscores)=paste0("PopSet",1:n.subsets)
return(list(n.sets=n.subsets,set.size=ncol(subset.pops),pop.sets=subset.pops,Z_f4.range=summary.f4.zscores,passing.quaduplets=conf.ok))
} |
ll.self.ai <- function(y,xb,sigma.y,sigma.re,tau,n.cat,vlist=NULL,
do.print=0) {
dd <- dim(y)
n <- dd[1]
n.self<- dd[2]
se <- sigma.y[1]
su <- sigma.re
if (is.null(vlist)) {
int.lb <- -Inf
int.ub <- Inf
int.tol<- 1e-12
int.sub<- 100
} else {
int.lb <- vlist$int.range[1]
int.ub <- vlist$int.range[2]
int.tol<- vlist$int.tol
int.sub<- vlist$int.sub
}
pp <- rep(NA,n)
for (i in 1:n) {
f <- function(eta) {
p <- ll.self.ai.prob(y[i,],xb[i,]+eta,se,su,tau[i,],n.self,n.cat)
return( dnorm(eta,0,su) * apply(p,1,prod) )
}
pp[i] <- integrate(f,int.lb,int.ub,subdivisions=int.sub,
rel.tol=int.tol,abs.tol=int.tol )$value
}
return(sum(log(pp)))
}
ll.self.ai.prob <- function(y,xb,se,su,tau,n.self,n.cat) {
tidx <- function(i.self,i.cat) {
(n.cat-1)*(i.self-1)+i.cat
}
p <- NULL
for (i in 1:n.self){
taus <- cumsum( tau[ tidx(i,1):tidx(i,(n.cat-1)) ] )
j <- y[i]
if (j==1|j>(n.cat-1)) {
if (j==1)
p <- cbind(p,pnorm(taus[1],xb,se))
else
p <- cbind(p,pnorm(taus[n.cat-1],xb,se,lower.tail=FALSE))
}
else
p <- cbind(p, pnorm(taus[j],xb,se)-pnorm(taus[j-1],xb,se))
}
return(p)
} |
position_stack <- function() {
PositionStack
}
PositionStack <- gganimintproto("PositionStack", Position,
setup_data = function(self, data, params) {
data = remove_missing(data, FALSE,
c("x", "y", "ymin", "ymax", "xmin", "xmax"), name = "position_stack")
if (is.null(data$ymax) && is.null(data$y)) {
message("Missing y and ymax in position = 'stack'. ",
"Maybe you want position = 'identity'?")
return(data)
}
if (!is.null(data$ymin) && !all(data$ymin == 0))
warning("Stacking not well defined when ymin != 0", call. = FALSE)
data
},
compute_panel = function(data, params, scales) {
collide(data, NULL, "position_stack", pos_stack)
}
) |
summary.dsm.var <- function(object, alpha=0.05, boxplot.coef=1.5,
bootstrap.subregions=NULL, ...){
sinfo <- list()
sinfo$alpha <- alpha
if(object$bootstrap){
sinfo$pred.est <- object$study.area.total[1]
sinfo$block.size <- object$block.size
sinfo$n.boot <- object$n.boot
sinfo$bootstrap <- TRUE
sinfo$ds.uncertainty <- object$ds.uncertainty
bootstrap.abund <- object$study.area.total
if(any(class(object$dsm.object$ddf)=="fake_ddf") | object$ds.uncertainty){
trimmed.variance <- trim.var(bootstrap.abund[is.finite(bootstrap.abund)],
boxplot.coef=boxplot.coef)
sinfo$var <- trimmed.variance
sinfo$se <- sqrt(trimmed.variance)
sinfo$cv <- sinfo$se/sinfo$pred.est
sinfo$bootstrap.cv <- sinfo$cv
}else{
ddf.summary <- summary(object$dsm.object$ddf)
sinfo$average.p.se <- ddf.summary$average.p.se
cvp.sq <- (ddf.summary$average.p.se/
ddf.summary$average.p)^2
sinfo$detfct.cv <- sqrt(cvp.sq)
trimmed.variance <- trim.var(bootstrap.abund[is.finite(bootstrap.abund)],
boxplot.coef=boxplot.coef)
sinfo$N.bs.se <- sqrt(trimmed.variance)
cvNbs.sq <- (sinfo$N.bs.se/sinfo$pred.est)^2
sinfo$bootstrap.cv <- sqrt(cvNbs.sq)
cvN <- sqrt(cvp.sq + cvNbs.sq)
sinfo$cv <- cvN
sinfo$var <- (cvN*sinfo$pred.est)^2
sinfo$se <- sqrt(sinfo$var)
}
sinfo$boxplot.coef <- boxplot.coef
sinfo$trim.prop <- attr(trimmed.variance, "trim.prop")
sinfo$trim.ind <- attr(trimmed.variance, "trim.ind")
sinfo$boot.outliers <- attr(trimmed.variance, "outliers")
sinfo$boot.infinite <- sum(is.infinite(bootstrap.abund))
sinfo$boot.finite <- sum(!is.infinite(bootstrap.abund))
sinfo$boot.NA <- sum(is.na(bootstrap.abund))
sinfo$boot.NaN <- sum(is.nan(bootstrap.abund))
sinfo$boot.usable <- sinfo$boot.finite - (sinfo$boot.outliers +
sinfo$boot.infinite + sinfo$boot.NA + sinfo$boot.NaN)
sinfo$quantiles <- quantile(bootstrap.abund[sinfo$trim.ind],
c(alpha, 0.5, 1-alpha),
na.rm=TRUE)
attr(sinfo$quantiles, "names")[2] <- "Median"
if(!is.null(bootstrap.subregions)){
subregions <- list()
i<-1
for(region.ind in bootstrap.subregions){
this.object <- object
this.object$short.var <- NULL
this.object$study.area.total <- object$study.area.total[region.ind]
this.object$pred.data <- object$pred.data[region.ind,]
subregions[[i]] <- summary(this.object)
i<-i+1
}
sinfo$subregions<-subregions
}
}else{
sinfo$varprop <- object$var.prop
sinfo$saved <- object
sinfo$bootstrap <- object$bootstrap
if(all(dim(as.matrix(object$pred.var))==1)){
sinfo$se <- sqrt(object$pred.var)
}else{
pd <- c()
off <- c()
for(i in 1:length(object$pred.data)){
pd <- rbind(pd, object$pred.data[[i]])
off <- rbind(off, object$off.set[[i]])
}
object$pred.data <- pd
object$off.set <- as.vector(off)
if(object$var.prop){
var.prop <- dsm.var.prop(object$dsm.obj, object$pred.data,
object$off.set, object$seglen.varname,
object$type.pred)
}else{
var.prop <- dsm.var.gam(object$dsm.obj, object$pred.data,object$off.set,
object$seglen.varname, object$type.pred)
}
sinfo$se <- sqrt(var.prop$pred.var)
}
if(length(object$pred)>1){
sinfo$pred.est <- sum(unlist(object$pred), na.rm=TRUE)
}else{
sinfo$pred.est <- object$pred[[1]]
}
if(sinfo$varprop | any(class(object$dsm.object$ddf)=="fake_ddf")){
sinfo$cv <- sinfo$se/sinfo$pred.est
}else{
ddf.summary <- summary(object$dsm.object$ddf)
if(!any(class(object$dsm.object$ddf)=="list")){
ddf <- list(object$dsm.object$ddf)
}else{
ddf <- object$dsm.object$ddf
}
sinfo$detfct.cv <- c()
cvp.sq <- 0
for(i in seq_along(ddf)){
this_ddf <- ddf[[i]]
if(all(class(this_ddf)!="fake_ddf")){
ddf.summary <- summary(this_ddf)
this_cvp.sq <- (ddf.summary$average.p.se/
ddf.summary$average.p)^2
cvp.sq <- cvp.sq + this_cvp.sq
}else{
this_cvp.sq <- NA
}
sinfo$detfct.cv <- c(sinfo$detfct.cv, sqrt(this_cvp.sq))
}
sinfo$gam.cv <- sinfo$se/sinfo$pred.est
sinfo$cv <- sqrt(cvp.sq+sinfo$gam.cv^2)
sinfo$se <- sinfo$cv*sinfo$pred.est
}
if(sinfo$varprop){
sinfo$model.check <- object$model.check
}
}
class(sinfo) <- "summary.dsm.var"
return(sinfo)
} |
get_soilseries_from_NASIS <- function(stringsAsFactors = default.stringsAsFactors(),
dsn = NULL, delimiter = " over ") {
q.soilseries <- "
SELECT soilseriesname, soilseriesstatus, benchmarksoilflag, soiltaxclasslastupdated, mlraoffice, taxclname, taxorder, taxsuborder, taxgrtgroup, taxsubgrp, taxpartsize, taxpartsizemod, taxceactcl, taxreaction, taxtempcl, taxfamhahatmatcl, originyear, establishedyear, descriptiondateinitial, descriptiondateupdated, statsgoflag, soilseriesiid, areasymbol, areaname, areaacres, obterm, areatypename, soilseriesedithistory
FROM soilseries ss
INNER JOIN area a ON a.areaiid = ss.typelocstareaiidref
INNER JOIN areatype at ON at.areatypeiid = ss.typelocstareatypeiidref
ORDER BY soilseriesname;"
q.min <- "SELECT soilseriesiidref, minorder, taxminalogy FROM soilseriestaxmineralogy
ORDER BY soilseriesiidref, minorder;"
channel <- dbConnectNASIS(dsn)
if (inherits(channel, 'try-error'))
return(data.frame())
d.soilseries <- dbQueryNASIS(channel, q.soilseries, close = FALSE)
d.soilseriesmin <- dbQueryNASIS(channel, q.min)
d.soilseries <- uncode(d.soilseries, stringsAsFactors = stringsAsFactors, dsn = dsn)
d.soilseriesmin <- uncode(d.soilseriesmin, stringsAsFactors = stringsAsFactors, dsn = dsn)
d.soilseries$soiltaxclassyearlastupdated <- format(as.Date(d.soilseries$soiltaxclasslastupdated), "%Y")
d.minagg <- aggregate(d.soilseriesmin$taxminalogy,
list(soilseriesiid = d.soilseriesmin$soilseriesiidref),
paste0, collapse = delimiter)
colnames(d.minagg) <- c("soilseriesiid", "taxminalogy")
res <- merge(
d.soilseries,
d.minagg,
by = "soilseriesiid",
all.x = TRUE,
incomparables = NA,
sort = FALSE
)
return(res[,c("soilseriesiid", "soilseriesname", "soilseriesstatus", "benchmarksoilflag",
"soiltaxclasslastupdated", "mlraoffice", "taxclname", "taxorder",
"taxsuborder", "taxgrtgroup", "taxsubgrp", "taxpartsize", "taxpartsizemod",
"taxceactcl", "taxreaction", "taxtempcl", "taxminalogy", "taxfamhahatmatcl",
"originyear", "establishedyear", "descriptiondateinitial", "descriptiondateupdated",
"statsgoflag", "soilseriesedithistory", "areasymbol", "areaname",
"areaacres", "obterm", "areatypename")])
}
get_soilseries_from_NASISWebReport <- function(soils, stringsAsFactors = default.stringsAsFactors()) {
url <- "https://nasis.sc.egov.usda.gov/NasisReportsWebSite/limsreport.aspx?report_name=get_soilseries_from_NASISWebReport"
d.ss <- lapply(soils, function(x) {
args = list(p_soilseriesname = x)
d = parseWebReport(url, args)
})
d.ss <- do.call("rbind", d.ss)
d.ss[!names(d.ss) %in% c("mlraoffice", "taxminalogy")] <- uncode(d.ss[!names(d.ss) %in% c("mlraoffice", "taxminalogy")],
db = "SDA", stringsAsFactors = stringsAsFactors)
d.ss[names(d.ss) %in% c("mlraoffice")] <- uncode(d.ss[names(d.ss) %in% c("mlraoffice")],
db = "LIMS", stringsAsFactors = stringsAsFactors)
return(d.ss)
} |
test_that("lower", {
expect_identical(lower(NA_integer_), NA_real_)
expect_identical(lower(integer(0)), NA_real_)
expect_equal(lower(1), 1)
expect_equal(lower(c(1, 1)), 1)
expect_equal(lower(0:100), 2.5)
expect_equal(lower(c(0:100, NA)), NA_real_)
expect_equal(lower(c(0:100, NA), na_rm = TRUE), 2.5)
}) |
reveal.model.designs <- function(fit) {
model.sets <- fit$LM$model.sets
terms.f <- model.sets$terms.f
terms.r <- model.sets$terms.r
forms.r <- lapply(terms.r, function(x) formula(x)[[3]])
forms.f <- lapply(terms.f, function(x) formula(x)[[3]])
reduced <- lapply(forms.r, function(x) Reduce(paste, deparse(x)))
full<- lapply(forms.f, function(x) Reduce(paste, deparse(x)))
term.labels <- names(forms.f)
k <- length(term.labels)
blank <- target <- rep("", k)
target[k] <- "<- Null/Full inherent in pairwise"
df <- as.data.frame(cbind(blank, reduced = reduced, blank, full = full, target))
names(df) <- c("", "Reduced", "", "Full", "")
return(df)
} |
dupes <- function(
file,
ignore.empty=TRUE,
ignore.space=TRUE,
tofile=missing(n),
n=length(d)
)
{
spaces <- if(ignore.empty) sapply(0:9, function(i)
paste(rep(" ", i), collapse="")) else FALSE
R <- readLines(file)
R2 <- if(ignore.space) removeSpace(R) else R
d <- which(duplicated(R2, incomparables=spaces, fromLast=TRUE) |
duplicated(R2, incomparables=spaces, fromLast=FALSE) )
if(!tofile) {
nd <- sapply(d, function(i) sum(R2[i]==R2[-i]))
return(head(data.frame(line=d, number=nd), n))
}
nd <- sapply(1:length(R2), function(i) sum(R2[i]==R2[-i]))
if(ignore.empty) nd[R2==""] <- ""
write.table(data.frame(nd, R), paste(file,"_dupes.txt"), row.names=F,
col.names=F, quote=F, sep="\t")
message("Created the file '", file,"_dupes.txt'\nin getwd: ", getwd())
} |
context("Indicator fiche")
test_that("Basic operation", {
testest <- function(){
tmp <- go_indica_fi(
workDF = NA,
time_0 = 2004,
time_t = 2018,
timeName = 'time',
indicaT = 'emp_20_64',
indiType = c('highBest','lowBest')[1],
seleMeasure = 'all',
seleAggre = 'EU28',
x_angle = 45,
data_res_download = FALSE,
auth = 'A.Student',
dataNow = '2019/01/31',
outFile = "test_indica-fi-emp_20_64_MS",
outDir = tempdir(),
workTB = emp_20_64_MS
)
tmp <- go_indica_fi(
workDF = NA,
time_0 = 2004,
time_t = 2018,
timeName = 'time',
indicaT = 'emp_20_64',
indiType = c('highBest','lowBest')[1],
seleMeasure = 'all',
seleAggre = 'EU28',
x_angle = 45,
data_res_download = FALSE,
auth = 'A.Student',
dataNow = '2019/01/31',
outFile = "test_indica-fi-emp_20_64_MS_ONE",
outDir = tempdir(),
workTB = emp_20_64_MS,
selfContained = TRUE
)
folder_tmp <- tempdir()
go_indica_fi(
time_0 = 2004,
time_t = 2014,
timeName = 'time',
workDF = 'emp_20_64_MS' ,
indicaT = 'emp_20_64',
indiType = c('highBest','lowBest')[1],
seleMeasure = 'all',
seleAggre = 'EU28',
x_angle = 45,
data_res_download = FALSE,
auth = 'A.Student',
dataNow = '2019/01/31',
outFile = "test_indica-fi-emp_20_64_MS",
outDir = folder_tmp
)
browseURL(file.path(folder_tmp,'test_indica-fi-emp_20_64_MS.html'))
go_indica_fi(
time_0 = 2002,
time_t = 2010,
timeName = 'time',
workDF = 'emp_20_64_MS' ,
indicaT = 'emp_20_64',
indiType = 'lowBest',
seleMeasure = 'all',
seleAggre = 'EU28',
x_angle = 45,
data_res_download = FALSE,
auth = 'A.Student',
dataNow = '2019/10/16',
outFile = "newtest_IT-emp_20_64_MS",
outDir = folder_tmp
)
browseURL(file.path(folder_tmp,'newtest_IT-emp_20_64_MS.html'))
go_indica_fi(
time_0 = 2002,
time_t = 2010,
timeName = 'time',
workDF = 'emp_20_64_MS' ,
indicaT = 'emp_20_64',
indiType = 'lowBest',
seleMeasure = 'all',
seleAggre = 'EurozoneBITUR',
x_angle = 45,
data_res_download = FALSE,
auth = 'A.Student',
dataNow = '2019/05/16',
outFile = "newtest_IT-emp_20_64_MS",
outDir = tempdir()
)
browseURL(file.path(folder_tmp,'newtest_IT-emp_20_64_MS.html'))
go_indica_fi(
time_0 = 2002,
time_t = 2010,
timeName = 'time',
workDF = 'emp_20_64_MS' ,
indicaT = 'emp_20_64',
indiType = 'highBest',
seleMeasure = c('all'),
seleAggre = 'EU28',
x_angle = 45,
data_res_download = FALSE,
auth = 'A.Student',
dataNow = '2019/05/16',
outFile = "newtest_IT",
outDir = tempdir
)
browseURL(file.path(tempdir(),'newtest_IT.html'))
myTTB <- emp_20_64_MS
dim(myTTB)
names(myTTB)<- c("time",paste("PP",1:28,sep="~"))
go_indica_fi(
time_0 = 2005,
time_t = 2010,
timeName = 'time',
workDF = 'myTTB' ,
indicaT = 'emp_20_64',
indiType = 'highBest',
seleMeasure = c('all'),
seleAggre = 'custom',
x_angle = 45,
data_res_download = FALSE,
auth = 'A.Student',
dataNow = '2019/10/16',
outFile = "newtest_IT",
outDir = tempdir()
)
browseURL(file.path(tempdir(),'newtest_IT.html'))
myTTB <- tibble::tribble(
~time, ~UK, ~DE, ~IT,
2005, 10 , 7 , 6,
2006, 10 , 7 , 6,
2007, 10, 7 , 6
)
go_indica_fi(
time_0 = 2005,
time_t = 2007,
timeName = 'time',
workDF = 'myTTB' ,
indicaT = 'testerIndica',
indiType = 'highBest',
seleMeasure = c('all'),
seleAggre = 'custom',
x_angle = 45,
data_res_download = FALSE,
auth = 'A.Student',
dataNow = '2019/12/07',
outFile = "indica_custom",
outDir = tempdir()
)
myTTB <- tibble::tribble(
~time, ~UK, ~DE, ~IT,
2005, 10 , 5 , 2,
2006, 12 , 9 , 6,
2007, 10, 9 , 6
)
myTTB <- tibble::tribble(
~time, ~UK, ~DE, ~IT,
2005, 10 , 5 , 2,
2006, 12 , 9 , 6,
2007, 25, 9 , 10
)
go_indica_fi(
time_0 = 2005,
time_t = 2007,
timeName = 'time',
workDF = 'myTTB' ,
indicaT = 'testerIndica',
indiType = 'highBest',
seleMeasure = c('all'),
seleAggre = 'custom',
x_angle = 45,
data_res_download = FALSE,
auth = 'A.Student',
dataNow = '2019/12/07',
outFile = "indica_custom",
outDir = tempdir()
)
browseURL(file.path(tempdir(),'indica_custom.html'))
myTTB <- tibble::tribble(
~time, ~UK, ~DE, ~IT,
2005, 10 , 5 , 2,
2006, 12 , 9 , 6,
2007, 7, 6 , 4
)
myTTB <- tibble::tribble(
~time, ~UK, ~DE, ~IT,
2005, 10 , 4 , 2,
2006, 12 , 5 , 6,
2007, 25, 4 , 10
)
myTTB <- tibble::tribble(
~time, ~UK, ~DE, ~IT,
2005, 10 , 14 , 8,
2006, 12 , 10 , 7,
2007, 25, 7 , 6
)
go_indica_fi(
time_0 = 2005,
time_t = 2007,
timeName = 'time',
workDF = 'myTTB' ,
indicaT = 'testerIndica',
indiType = 'lowBest',
seleMeasure = c('all'),
seleAggre = 'custom',
x_angle = 45,
data_res_download = FALSE,
auth = 'A.Student',
dataNow = '2019/12/07',
outFile = "indica_custom",
outDir = tempdir()
)
browseURL(file.path(tempdir(),'indica_custom.html'))
negaTB <- emp_20_64_MS
go_indica_fi(
time_0 = 2002,
time_t = 2010,
timeName = 'time',
workDF = 'negaTB' ,
indicaT = 'emp_20_64',
indiType = 'lowBest',
seleMeasure = c('all'),
seleAggre = 'EA',
x_angle = 45,
data_res_download = FALSE,
auth = 'A.Student',
dataNow = '2019/05/16',
outFile = "negafake",
outDir = tempdir()
)
browseURL(file.path(tempdir(),'negafake.html'))
TB <- emp_20_64_MS
TB[15,1]<- 2020
go_indica_fi(time_0=2002,time_t=2020,
timeName = 'time',
workDF = "TB",
indicaT = 'emp_20_64_MS',
indiType = c("highBest","lowBest")[1],
seleMeasure = 'all',
seleAggre ='EU28',
dataNow = Sys.time(),
outFile = 'test_EA_indicator fiche',
outDir = tempdir(),
pdf_out = T)
browseURL(file.path(tempdir(),'test_EA_indicator fiche.html'))
lbTB <- structure(list(time = c(2005, 2006, 2007, 2008, 2009, 2010, 2011,
2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019), BE = c(15.7,
15.2, 14.7, 13.4, 12.2, 11.9, 11.5, 11, 10.2, 8.7, 8.3, 9.3,
9.8, 8.4, 8), DK = c(8.6, 9, 8.5, 8.7, 6, 5.7, 6.9, 6.4, 6.3,
7.4, 7.8, 6.9, 6.7, 7, 7.2), FR = c(11.8, 11.2, 10.4, 10.1, 9.4,
9.3, 9.5, 8.9, 8.3, 7.5, 7.2, 7.4, 7.8, 7.6, 7.1), DE = c(12.5,
12.2, 12.4, 12.3, 10.9, 10.7, 10.4, 10.5, 9.6, 9.1, 8.7, 8.2,
7.9, 8.1, 8), EL = c(29.6, 28.6, 28.4, 27.5, 25.6, 24.2, 22.1,
19.8, 19.4, 18.3, 18, 19, 19.7, 21, 20), IE = c(20.5, 19.9, 18.4,
16, 10.2, 8.8, 8.7, 8.6, 10.5, 11.8, 12.3, 12.1, 12.1, 12.2,
12.4), IT = c(26.3, 25.8, 25.8, 24.7, 24, 23.2, 22.6, 21, 19.8,
19.4, 20, 20.1, 19.8, 19.8, 19.6), LU = c(21, 19.5, 17.3, 17.1,
17.5, 17.2, 16.2, 14.4, 14.1, 12.9, 11.7, 11, 7.9, 8, 9.1), NL = c(16.5,
15.9, 15.5, 15, 13.7, 12.7, 12, 11.3, 10.5, 11.4, 11.1, 11, 10.5,
10.1, 9.3), PT = c(12.7, 12.9, 12.8, 12.3, 10.3, 9.8, 8.6, 6.8,
6.4, 7.1, 6.7, 6.8, 7.5, 6.8, 7.2), ES = c(24.7, 23.6, 22, 19,
14.2, 12.9, 11.6, 10, 9.6, 10.2, 11.2, 11.5, 11.9, 12.1, 11.9
), AT = c(12.9, 12.9, 13.3, 12.5, 10.5, 10.2, 10, 9.7, 9.1, 8.2,
8.2, 7.8, 8, 9, 8.8), FI = c(4.3, 4.8, 4.7, 5.3, 2.3, 3, 3.7,
3, 2.8, 1.9, 2.1, 3.3, 3.5, 3.7, 2.7), SE = c(5.3, 5.9, 6, 6.3,
5.2, 6.1, 5.6, 5.1, 5, 4.6, 4.2, 3.8, 4, 4.2, 4.7), CY = c(21.7,
20.3, 18.7, 17, 14.5, 12.9, 11.9, 11.3, 10.4, 7.7, 8.3, 9.7,
9.5, 10.4, 11.6), CZ = c(18.8, 18.6, 19.1, 19.5, 18.8, 18.7,
18.2, 17.7, 17.2, 17.5, 16.6, 16, 15.8, 15.2, 15), EE = c(4.9,
7, 8.8, 8.6, 2, 1.9, 5.7, 5.7, 6.6, 7.7, 7.9, 8.2, 7.3, 7.8,
7.7), HU = c(13.6, 14.5, 14.6, 13.9, 12.5, 10.9, 11.7, 11.1,
12.4, 13.3, 13.7, 14, 15.3, 15.3, 15.5), LV = c(9.8, 10, 10.2, 7.4, 0.3, -0.5, 2.2, 3.6, 4.2, 4.6, 4.1, 2.9, 4.3, 4.2, 3.8),
LT = c(8.4, 6.9, 7.5, 6.9, -0.4, -1.5, 0.6, 1.2, 2.6, 2.5,
2.4, 1.9, 1, 2.3, 1.6), MT = c(44.9, 43.9, 41.3, 39.1, 37.5,
36.6, 35.2, 31.4, 28.6, 26.8, 26.8, 25.5, 24.1, 21.9, 20),
PL = c(13.4, 14.2, 14.7, 15.7, 15, 14, 14.7, 14.5, 14.5,
14.2, 13.8, 14.2, 14.6, 14.4, 15.4), SK = c(15.8, 17.1, 17.3,
17.1, 16.4, 14.5, 15.1, 15.5, 14.4, 14.6, 14.7, 14.2, 12.8,
13.7, 13), SI = c(9.6, 9.8, 10.4, 8.9, 7.7, 7.5, 7, 7.2,
8.2, 8, 8.6, 6.6, 7.2, 7.3, 6.8), BG = c(9.7, 9.5, 9.9, 10.7,
9.8, 7.8, 6.2, 5.6, 5.7, 6.1, 6.6, 7.3, 8, 8.2, 8.6), RO = c(13.5,
12.7, 13.1, 14.3, 14.4, 16.6, 15.3, 16.1, 16.3, 16.7, 17.5,
17.6, 17.1, 18.3, 19), HR = c(14.5, 14.1, 16.2, 15.9, 12.5,
11.5, 12.5, 11.1, 8.8, 10, 9.5, 9.6, 10.6, 10.2, 10.5)), row.names = c(NA,
-15L), class = c("tbl_df", "tbl", "data.frame"))
go_indica_fi(time_0=2005,time_t=2019,
timeName = 'time',
workDF = "lbTB",
indicaT = 'I.02.01.00',
indiType = c("highBest","lowBest")[2],
seleMeasure = 'all',
seleAggre ='EU27',
dataNow = Sys.time(),
outFile = 'test_EA_indica_lb',
outDir = tempdir(),
pdf_out = T)
browseURL(file.path(tempdir(),'test_EA_indica_lb.html'))
}
expect_equal(1, 1)
}) |
isolate_wl <- function(rspecdata, keep = c("wl", "spec")) {
keep <- match.arg(keep)
is_wl <- colnames(rspecdata) == "wl"
if (keep == "wl") {
if (any(is_wl)) {
return(rspecdata[, is_wl])
} else {
warning("wl column missing from input rspec data. Using arbritrary ",
"values based on object length.",
call. = FALSE
)
return(seq_len(nrow(rspecdata)))
}
} else {
return(rspecdata[, !is_wl, drop = FALSE])
}
} |
plot_records <- function(df, top_ten = TRUE, color = TRUE) {
if (color == TRUE){
res <- plot(df$ScientificName, xlab = "Scientific Name", ylab = "Number of records", main = "Records by Scientific Name", col = rainbow(nlevels(df$ScientificName)))
} else {
res <- plot(df$ScientificName, xlab = "Scientific Name", ylab = "Number of records", main = "Records by Scientific Name")
}
return(res)
} |
lab <- function(file,
id,
age,
sex,
normal,
abnormal,
is_post = T,
name_to_find = "LBNRIND") {
obj <- list(
file = file,
id = dplyr::enquo(id),
age = dplyr::enquo(age),
sex = dplyr::enquo(sex),
normal = normal,
abnormal = abnormal,
is_post = is_post,
name_to_find = name_to_find,
bond = "_"
)
class(obj) <- "lab"
obj
}
choose_test.lab <- function(obj, test = "mis", group_id = T) {
result <- obj %>% get_result(group_id)
if (test == "mis") {
result <-
result %>%
dplyr::filter(.data$IS_RIGHT == F) %>%
dplyr::select(-.data$IS_RIGHT)
} else if (test == "ok") {
result <-
result %>%
dplyr::filter(.data$IS_RIGHT == T) %>%
dplyr::select(-.data$IS_RIGHT)
} else if (test == "skip") {
result <-
result %>%
dplyr::filter(!is.na(.data$LBORRES) &
is.na(.data$LBNRIND)) %>%
dplyr::select(-.data$IS_RIGHT)
} else if (test == "null") {
result <-
result %>%
dplyr::filter(is.na(.data$LBORRES) &
is.na(.data$LBNRIND)) %>%
dplyr::select(-.data$IS_RIGHT)
} else {
stop("uknown parameter ", test)
}
result
}
to_long.lab <- function(obj, dataset, row_file, part) {
id <- obj[["id"]]
age <- obj[["age"]]
sex <- obj[["sex"]]
normal <- obj[["normal"]]
abnormal <- obj[["abnormal"]]
is_post <- obj[["is_post"]]
obj_cl <- obj[["clsig"]]
lbtest <- row_file$LBTEST
lbtestcd <- row_file$LBORRES
lbnrind <- row_file$LBNRIND
lbornrlo <- as.double(row_file$LBORNRLO)
lbornrhi <- as.double(row_file$LBORNRHI)
age_low <- as.double(row_file$AGELOW)
age_high <- as.double(row_file$AGEHIGH)
pattern_sex <- paste0("^", row_file$SEX, "$")
lborres <- ifelse(is_post, paste0(lbtestcd, part), paste0(part, lbtestcd))
lbnrind <- ifelse(is_post, paste0(lbnrind, part), paste0(part, lbnrind))
vars_rename <- c("LBORRES" = lborres, "LBNRIND" = lbnrind)
by_age_sex <- dataset %>%
dplyr::mutate(!!age := as.double(!!age)) %>%
dplyr::filter(dplyr::between(!!age, age_low, age_high), grepl(pattern_sex, !!sex))
result <- by_age_sex %>%
dplyr::mutate(LBTESTCD = lbtestcd, LBTEST = lbtest, VISIT = part, LBORNRLO = lbornrlo, LBORNRHI = lbornrhi) %>%
dplyr::select(!!id, !!age, !!sex, .data$LBTEST, .data$LBTESTCD, .data$VISIT, .data$LBORNRLO, .data$LBORNRHI, !!lborres, !!lbnrind) %>%
dplyr::mutate(RES_TYPE_NUM = to_dbl(.data[[lborres]])) %>%
dplyr::mutate(IND_EXPECTED = ifelse(dplyr::between(.data$RES_TYPE_NUM, lbornrlo, lbornrhi), normal, abnormal)) %>%
dplyr::mutate(IS_RIGHT = .data$IND_EXPECTED == .data[[lbnrind]]) %>%
dplyr::rename(!!vars_rename)
result
} |
dclone <-
function(x, n.clones=1, ...)
{
UseMethod("dclone")
} |
BackupQueue <- R6::R6Class(
"BackupQueue",
inherit = DirectoryQueue,
cloneable = FALSE,
public = list(
initialize = function(
origin,
dir = dirname(origin),
max_backups = Inf,
compression = FALSE,
backup_dir = NULL
){
if (!is.null(backup_dir)){
.Deprecated(msg = "the `backup_dir` argument is deprecated, please use `dir` instead.")
dir <- backup_dir
}
self$set_origin(origin)
self$set_dir(dir)
self$set_compression(compression)
self$set_max_backups(max_backups)
self
},
prune = function(
max_backups = self$max_backups
){
if (!should_prune(self, max_backups))
return(self)
if (max_backups > 0){ warning(
"Pruning a generic BackupQueue with `max_backups > 0` is not",
"recommended, because it is not defined which backups will be",
"deleted. Use BackupQueueIndex or BackupQueueDate instead."
)}
if (self$n <= max_backups){
to_remove <- character()
} else {
to_keep <- self$files$path[seq_len(max_backups)]
to_remove <- setdiff(self$files$path, to_keep)
}
file_remove(to_remove)
self
},
prune_identical = function(){
NotImplementedError()
},
print = function(){
cat(fmt_class(class(self)[[1]]), "\n\n")
ori <- file.info(self$origin)
bus <- self$files
info <- data.frame(
file = c(row.names(ori), bus$path),
size = c(ori$size, bus$size)
)
dd <- as.matrix(info)
if (nrow(dd) == 1){
dd[, "size"] <- fmt_bytes(dd[, "size"])
dd <- rbind(
dd,
c("[no backups]", "")
)
dd[, "size"] <- pad_left(dd[, "size"], max(nchar(dd[, "size"])) + 2)
dd[, "file"] <- pad_right(dd[, "file"])
assert(nrow(dd) == 2)
dd[2, ] <- apply(dd[2, ,drop = FALSE], 1:2, style_subtle)
} else if (nrow(dd) > 1){
dd <- rbind(
dd,
c(paste(nrow(dd), "files total"), sum(as.integer(dd[, "size"])))
)
dd[, "size"] <- pad_left(fmt_bytes(dd[, "size"]))
dd[, "file"] <- pad_right(dd[, "file"])
assert(nrow(dd) >= 3)
sel <- 2:(nrow(dd) - 1)
dd[sel, ] <- apply(dd[sel, ,drop = FALSE], 1:2, style_subtle)
} else {
stop("Error while printing backup queue. Please file an issue.")
}
apply(dd, 1, cat, "\n")
invisible(self)
},
push_backup = function(...){
.Deprecated(new = "$push()", old = "$push_backup()")
self$push(...)
},
set_origin = function(
x
){
assert(
is_scalar_character(x) && file_exists(x),
"File '", x, "' does not exist"
)
assert(!is_dir(x))
private[[".origin"]] <- x
self
},
set_compression = function(
x
){
assert_valid_compression(x)
private[[".compression"]] <- x
self
},
set_max_backups = function(
x
){
assert(
is.infinite(x) || is_n0(x),
"`max_backups` must be a positive integer (or `Inf` for no max)"
)
private[[".max_backups"]] <- x
self
},
set_file = function(
x
){
.Deprecated(new = "$set_origin()", old = "$set_file()")
self$set_origin(x)
},
set_backup_dir = function(
x
){
.Deprecated(new = "$set_dir()", old = "$set_backup_dir()")
self$set_dir(x, create = FALSE)
}
),
active = list(
origin = function(){
get(".origin", envir = private)
},
compression = function(){
get(".compression", envir = private)
},
max_backups = function(){
get(".max_backups", envir = private)
},
has_backups = function(){
self$n > 0
},
files = function(){
backup_files <- get_backups(
origin = self$origin,
potential_backups =
list_files(self$dir, full.names = self$dir != "."),
sfx_patterns = c(
"\\d+",
"\\d{4}-\\d{2}-\\d{2}",
"\\d{4}-\\d{2}"
)
)
if (!length(backup_files)){
return(EMPTY_BACKUPS_INDEX)
}
fname_matrix <- filenames_as_matrix(self$origin, backups = backup_files)
fname_df <- as.data.frame(
fname_matrix[, c("name", "sfx", "ext"), drop = FALSE],
stringsAsFactors = FALSE
)
finfo <- file.info(backup_files)
res <- cbind(
path = data.frame(path = rownames(finfo), stringsAsFactors = FALSE),
fname_df,
finfo
)
row.names(res) <- NULL
res
},
backups = function(){
.Deprecated(new = "$files", old = "$backups")
self$files
},
file = function(){
.Deprecated(new = "$origin", old = "$file")
self$origin
},
backup_dir = function(){
.Deprecated(new = "$dir", old = "$backup_dir")
self$dir
}
),
private = list(
.origin = NULL,
.dir = NULL,
.compression = NULL,
.max_backups = NULL
)
)
BackupQueueIndex <- R6::R6Class(
"BackupQueueIndex",
inherit = BackupQueue,
cloneable = FALSE,
public = list(
push = function(){
name <- file.path(
self$dir,
tools::file_path_sans_ext(basename(self$origin))
)
ext <- tools::file_ext(self$origin)
sfx <- "1"
if (is_blank(ext)) {
name_new <- paste(name, sfx, sep = ".")
} else {
name_new <- paste(name, sfx, ext, sep = ".")
}
self$increment_index()
copy_or_compress(
self$origin,
outname = name_new,
compression = self$compression,
add_ext = TRUE,
overwrite = FALSE
)
self$pad_index( )
},
prune = function(
max_backups = self$max_backups
){
if (!should_prune(self, max_backups))
return(self)
if (self$n > max_backups){
to_keep <- self$files$path[seq_len(max_backups)]
to_remove <- setdiff(self$files$path, to_keep)
file_remove(to_remove)
}
self$pad_index()
},
prune_identical = function(
){
dd <- self$files
dd$md5 <- tools::md5sum(self$files$path)
dd <- dd[nrow(dd):1L, ]
sel <- duplicated(dd$md5)
remove <- dd[sel, ]
keep <- dd[!sel, ]
unlink(remove$path)
keep$path_new <- paste(
file.path(dirname(keep$path), keep$name),
pad_left(nrow(keep):1, pad = "0"),
keep$ext,
sep = "."
)
keep$path_new <- gsub("\\.$", "", keep$path_new)
keep <- keep[order(keep$sfx), ]
file.rename(keep$path, keep$path_new)
self
},
should_rotate = function(size, verbose = FALSE){
size <- parse_size(size)
if (size <= 0)
return(TRUE)
if (is.infinite(size)){
if (verbose) message("Not rotating: rotation `size` is infinite")
return(FALSE)
}
fsize <- file.size(self$origin)
if (fsize < size){
if (verbose){
message(sprintf(
"Not rotating: size of '%s'(%s) is smaller than %s.",
self$origin, fmt_bytes(file.size(self$origin)), fmt_bytes(size)
))
}
FALSE
} else {
TRUE
}
},
pad_index = function(){
if (nrow(self$files) <= 0)
return(self)
backups <- self$files
backups$sfx_new <- pad_left(backups$index, pad = "0")
backups$path_new <-
paste(file.path(dirname(backups$path), backups$name), backups$sfx_new, backups$ext, sep = ".")
backups$path_new <- gsub("\\.$", "", backups$path_new)
file_rename(
backups$path,
backups$path_new
)
self
},
increment_index = function(
n = 1
){
assert(
is_scalar_integerish(n) & n > 0,
"indices can only be incremented by positive integers, but `n` is ", preview_object(n), "."
)
if (self$n <= 0)
return(self)
backups <- self$files
backups$index <- backups$index + as.integer(n)
backups$path_new <- paste(
file.path(dirname(backups$path), backups$name),
pad_left(backups$index, pad = "0"),
backups$ext,
sep = "."
)
backups$path_new <- gsub("\\.$", "", backups$path_new)
file_rename(
rev(backups$path),
rev(backups$path_new)
)
self
}
),
active = list(
files = function(){
res <- super$files
if (nrow(res) < 1)
return(EMPTY_BACKUPS_INDEX)
res <- res[grep("^\\d+$", res$sfx), ]
res$index <- as.integer(res$sfx)
res[order(res$sfx, decreasing = FALSE), ]
}
),
private = list(
.fmt = NULL
)
)
BackupQueueDateTime <- R6::R6Class(
"BackupQueueDateTime",
inherit = BackupQueue,
cloneable = FALSE,
public = list(
initialize = function(
origin,
dir = dirname(origin),
max_backups = Inf,
compression = FALSE,
fmt = "%Y-%m-%d--%H-%M-%S",
cache_backups = FALSE,
backup_dir = NULL
){
if (!is.null(backup_dir)){
.Deprecated(msg = "the `backup_dir` argument is deprecated, please use `dir` instead.")
dir <- backup_dir
}
self$set_origin(origin)
self$set_dir(dir)
self$set_compression(compression)
self$set_max_backups(max_backups)
self$set_fmt(fmt)
self$set_cache_backups(cache_backups)
self$update_backups_cache()
},
push = function(
overwrite = FALSE,
now = Sys.time()
){
assert_valid_datetime_format(self$fmt)
now <- parse_datetime(now)
stopifnot(
is_scalar_logical(overwrite),
is_scalar_POSIXct(now)
)
name <- file.path(
self$dir,
tools::file_path_sans_ext(basename(self$origin))
)
ext <- tools::file_ext(self$origin)
sfx <- format(now, format = self$fmt)
if (is_blank(ext)) {
name_new <- paste(name, sfx, sep = ".")
} else {
name_new <- paste(name, sfx, ext, sep = ".")
}
copy_or_compress(
self$origin,
outname = name_new,
compression = self$compression,
add_ext = TRUE,
overwrite = overwrite
)
self$update_backups_cache()
},
prune = function(
max_backups = self$max_backups
){
assert(is_scalar(max_backups))
if (!should_prune(self, max_backups)){
return(self)
} else {
self$update_backups_cache()
}
if (is_integerish(max_backups) && is.finite(max_backups)){
backups <- rev(sort(self$files$path))
if (length(backups) <= max_backups){
to_remove <- character()
} else {
to_remove <- backups[(max_backups + 1):length(backups)]
}
} else {
to_remove <- select_prune_files_by_age(
self$files$path,
self$files$timestamp,
max_age = max_backups,
now = as.Date(as.character(self$last_rotation))
)
}
file_remove(to_remove)
self$update_backups_cache()
self
},
should_rotate = function(
size,
age,
now = Sys.time(),
last_rotation = self$last_rotation %||% file.info(self$origin)$ctime,
verbose = FALSE
){
now <- parse_datetime(now)
size <- parse_size(size)
if (is.infinite(size) || is.infinite(age) || is.null(last_rotation) || file.size(self$origin) < size){
if (verbose){
reasons <- character()
if (is.infinite(age))
reasons[["age"]] <- "rotation `age` is infinite"
if (is.infinite(size)){
reasons[["size"]] <- "rotation `size` is infinite"
} else if (file.size(self$origin) < size) {
reasons[["size"]] <- sprintf(
"size of '%s'(%s) is smaller than %s.",
self$origin, fmt_bytes(file.size(self$origin)), fmt_bytes(size)
)
}
message("Not rotating: ", paste(reasons, collapse = ", "))
}
return(FALSE)
}
if (is.null(last_rotation))
return(TRUE)
else if (is_parsable_datetime(age))
return(is_backup_older_than_datetime(last_rotation, age, verbose = verbose))
else if (is_parsable_rotation_interval(age))
return(is_backup_older_than_interval(last_rotation, age, now, verbose = verbose))
stop("`age` must be a parsable date or datetime")
},
update_backups_cache = function(){
res <- super$files
if (nrow(res) < 1){
res <- EMPTY_BACKUPS_DATETIME
} else {
sel <- vapply(res$sfx, is_parsable_datetime, logical(1))
res <- res[sel, ]
res$timestamp <- parse_datetime(res$sfx)
res <- res[order(res$timestamp, decreasing = TRUE), ]
}
private[["backups_cache"]] <- res
self
},
set_max_backups = function(
x
){
assert(is.infinite(x) || is_n0(x) || is.character(x) || is_Date(x))
if (is.infinite(x)){
} else if (is.character(x)){
if (is_parsable_rotation_interval(x)){
x <- parse_rotation_interval(x)
} else {
x <- parse_date(x)
}
} else if (is_n0(x)){
x <- as.integer(x)
}
private[[".max_backups"]] <- x
self
},
set_fmt = function(x){
assert_valid_datetime_format(x)
private[[".fmt"]] <- x
self
},
set_cache_backups = function(x){
assert(is_scalar_bool(x))
private$.cache_backups <- x
self$update_backups_cache()
}
),
active = list(
fmt = function(){
get(".fmt", envir = private, mode = "character")
},
cache_backups = function(){
get(".cache_backups", envir = private, mode = "logical")
},
last_rotation = function() {
bus <- get("files", envir = self)
if (nrow(bus) < 1) {
NULL
} else {
max(get("files", envir = self)$timestamp)
}
},
files = function(){
if (!get(".cache_backups", envir = private, mode = "logical")){
self$update_backups_cache()
}
get("backups_cache", envir = private)
}
),
private = list(
backups_cache = NULL,
.cache_backups = NULL,
.fmt = NULL
)
)
BackupQueueDate <- R6::R6Class(
inherit = BackupQueueDateTime,
"BackupQueueDate",
cloneable = FALSE,
public = list(
initialize = function(
origin,
dir = dirname(origin),
max_backups = Inf,
compression = FALSE,
fmt = "%Y-%m-%d",
cache_backups = FALSE,
backup_dir = NULL
){
if (!is.null(backup_dir)){
.Deprecated(msg = "the `backup_dir` argument is deprecated, please use `dir` instead.")
dir <- backup_dir
}
self$set_origin(origin)
self$set_dir(dir)
self$set_compression(compression)
self$set_max_backups(max_backups)
self$set_fmt(fmt)
self$set_cache_backups(cache_backups)
self$update_backups_cache()
},
set_fmt = function(x){
assert_valid_date_format(x)
private[[".fmt"]] <- x
self
}
),
active = list(
last_rotation = function() {
bus <- get("files", envir = self)
if (nrow(bus) < 1) {
NULL
} else {
as.Date(as.character(max(get("files", envir = self)$timestamp)))
}
}
)
)
filenames_as_matrix <- function(
file,
backups
){
if (length(backups) < 1)
return(NULL)
file_dir <- dirname(file)
file_name <- basename(tools::file_path_sans_ext(file))
file_ext <- tools::file_ext(file)
back_dir <- dirname(backups)
assert(
all_are_identical(back_dir),
"All backups of `file` must be in the same directory, not \n",
paste("*", unique(back_dir), collapse = "\n")
)
back_names <- basename(backups)
filename_end <-
attr(gregexpr(file_name, back_names[[1]])[[1]], "match.length") + 1L
a <- strsplit_at_seperator_pos(back_names, filename_end)
assert(
all_are_identical(a[, 1]),
"All backups of `file` must have the same basename, not \n",
paste("*", unique(a[, 1]), collapse = "\n")
)
if (!is_blank(file_ext)){
ext_start <- unlist(gregexpr(file_ext, a[, 2]))
b <- strsplit_at_seperator_pos(a[, 2], ext_start - 1L)
res <- cbind(back_dir, a[, 1], b)
colnames(res) <- c("dir", "name", "sfx", "ext")
} else {
res <- cbind(back_dir, a, "")
colnames(res) <- c("dir", "name", "sfx", "ext")
}
assert(is.matrix(res))
res
}
get_backups <- function(
origin,
potential_backups,
sfx_patterns
){
if (!length(potential_backups))
return(character())
sfx_patterns <- paste0("(", sfx_patterns, ")", collapse = "|")
file_dir <- dirname(origin)
file_name <- basename(tools::file_path_sans_ext(origin))
file_ext <- tools::file_ext(origin)
back_dir <- dirname(potential_backups)
assert(
all_are_identical(back_dir),
"All backups of `origin` must be in the same directory, not \n",
paste("*", unique(back_dir), collapse = "\n")
)
back_names <- basename(potential_backups)
sel <- grepl(paste0("^", file_name), back_names)
backups <- potential_backups[sel]
back_names <- basename(backups)
if (!length(backups))
return(character())
file_sufext <- substr(back_names, nchar(file_name) + 2L, 64000L)
sfx <- gsub("\\..*", "", file_sufext)
sfx <- standardize_datetime_stamp(sfx)
sel <- grepl( "^\\d{1,14}$", sfx)
sort(backups[sel])
}
select_prune_files_by_age <- function(
path,
timestamp,
max_age,
now
){
assert(is.character(path))
assert(is_POSIXct(timestamp))
assert(is_Date(now))
assert(is_equal_length(path, timestamp))
if (is_parsable_date(max_age)){
limit <- parse_date(max_age)
to_remove <- path[as.Date(as.character(timestamp)) < limit]
} else if (is_parsable_datetime(max_age)){
limit <- parse_datetime(max_age)
to_remove <- path[timestamp < limit]
} else if (is_parsable_rotation_interval(max_age)){
max_age <- parse_rotation_interval(max_age)
now <- as.Date(now)
if (identical(max_age[["unit"]], "year")){
limit <- dint::first_of_year(dint::get_year(now) - max_age$value + 1L)
} else if (identical(max_age[["unit"]], "quarter")){
limit <- dint::first_of_quarter(dint::as_date_yq(now) - max_age$value + 1L)
} else if (identical(max_age[["unit"]], "month")) {
limit <- dint::first_of_month(dint::as_date_ym(now) - max_age$value + 1L)
} else if (identical(max_age[["unit"]], "week")){
limit <- dint::first_of_isoweek(dint::as_date_yw(now) - max_age$value + 1L)
} else if (identical(max_age[["unit"]], "day")){
limit <- as.Date(as.character(now)) - max_age$value + 1L
}
to_remove <- path[as.Date(as.character(timestamp)) < limit]
} else {
stop(ValueError(paste0(preview_object(max_age), " is not a valid timestamp or interval. See ?rotate_time for more info.")))
}
to_remove
}
EMPTY_BACKUPS <- data.frame(
path = character(0),
name = character(0),
sfx = character(0),
ext = character(0),
size = numeric(0),
isdir = logical(0),
mode = structure(integer(0)),
mtime = structure(numeric(0), class = c("POSIXct", "POSIXt")),
ctime = structure(numeric(0), class = c("POSIXct", "POSIXt")),
atime = structure(numeric(0), class = c("POSIXct", "POSIXt")),
uid = integer(0),
gid = integer(0),
uname = character(0),
grname = character(0),
stringsAsFactors = FALSE
)
EMPTY_BACKUPS_DATETIME <- EMPTY_BACKUPS
EMPTY_BACKUPS_DATETIME$timestamp <-
structure(numeric(0), class = c("POSIXct", "POSIXt"), tzone = "")
EMPTY_BACKUPS_INDEX <- EMPTY_BACKUPS
EMPTY_BACKUPS_INDEX$index <- integer(0) |
genbernoullidata <- function(sample_size, prob1, odds_ratio) {
A <- odds_ratio * (prob1 / (1 - prob1))
prob2 <- A / (1 + A)
y1 <- stats::rbinom(sample_size, size = 1, prob = prob1)
y2 <- stats::rbinom(sample_size, size = 1, prob = prob2)
subjid <- seq(from = 1, to = 2 * sample_size)
trt <- c(rep(0, sample_size), rep(1, sample_size))
y <- c(y1, y2)
gendata <- data.frame(subjid, trt, y)
colnames(gendata) <- c("id", "treatment", "y")
return(gendata)
}
bernoulliloglike <- function(params, randdata, histdata, a0) {
beta0 <- params[1]
beta1 <- params[2]
logit_i <- beta0 + beta1 * randdata$treatment
prob_i <- exp(logit_i) / (1 + exp(logit_i))
ll_R <- stats::dbinom(randdata$y, size = 1, prob = prob_i, log = TRUE)
probH_i <- exp(beta0) / (1 + exp(beta0))
ll_H <- stats::dbinom(histdata$y, size = 1, prob = probH_i, log = TRUE)
ll <- sum(ll_R) + a0 * sum(ll_H)
return(-ll)
}
bernoullitrialsimulator <- function(sample_size_val, histdata, prob1_val, odds_ratio_val, a0_val, alpha) {
sampleranddata <- genbernoullidata(sample_size = sample_size_val, prob1 = prob1_val, odds_ratio = odds_ratio_val)
initializemodel <- stats::glm(y ~ treatment, family = stats::binomial(link = "logit"), data = sampleranddata)
initialbeta0 <- initializemodel$coefficients[1]
initialbeta1 <- initializemodel$coefficients[2]
fitmod <- stats::optim(c(initialbeta0, initialbeta1), bernoulliloglike, randdata = sampleranddata, histdata = histdata,
a0 = a0_val, method = "Nelder-Mead", hessian = TRUE)
modparm <- fitmod$par
covarmat <- solve(fitmod$hessian)
logoddsratio <- modparm[2]
odds_ratio <- exp(logoddsratio)
lower_oddsratio <- exp(logoddsratio - stats::qnorm(1 - alpha/2) * sqrt(covarmat[2, 2]))
upper_oddsratio <- exp(logoddsratio + stats::qnorm(1 - alpha/2) * sqrt(covarmat[2, 2]))
reject <- ifelse(((lower_oddsratio > 1) | (upper_oddsratio < 1)), 1, 0)
output <- c(odds_ratio, covarmat[2, 2], reject)
names(output) <- c("odds_ratio", "log_or_var", "reject")
return(output)
}
bernoullitrialsimulatornohist <- function(sample_size_val, prob1_val, odds_ratio_val, alpha) {
sampleranddata <- genbernoullidata(sample_size = sample_size_val, prob1 = prob1_val, odds_ratio = odds_ratio_val)
initializemodel <- stats::glm(y ~ treatment, family = stats::binomial(link = "logit"), data = sampleranddata)
modparm <- initializemodel$coefficients
covarmat <- stats::vcov(initializemodel)
logoddsratio <- modparm[2]
odds_ratio <- exp(logoddsratio)
lower_oddsratio <- exp(logoddsratio - stats::qnorm(1 - alpha/2) * sqrt(covarmat[2, 2]))
upper_oddsratio <- exp(logoddsratio + stats::qnorm(1 - alpha/2) * sqrt(covarmat[2, 2]))
reject <- ifelse(((lower_oddsratio > 1) | (upper_oddsratio < 1)), 1, 0)
output <- c(odds_ratio, covarmat[2, 2], reject)
names(output) <- c("odds_ratio", "log_or_var", "reject")
return(output)
}
bernoulli_sim <- function(trial_reps=100, subj_per_arm, a0_vals, effect_vals,
rand_control_diff, hist_control_data, alpha=0.05,
get_var=FALSE, get_bias=FALSE, get_mse=FALSE,
quietly=TRUE) {
histdata = hist_control_data
hist_model <- stats::glm(y ~ 1, family = stats::binomial(link = "logit"), data = histdata)
initialprob1 <- exp(hist_model$coefficients[1])/(1 + exp(hist_model$coefficients[1]))
len_val <- length(rand_control_diff) * length(effect_vals) * length(a0_vals) * length(subj_per_arm)
power_results <- array(rep(0, len_val), c(length(subj_per_arm), length(a0_vals), length(effect_vals), length(rand_control_diff)))
est_results <- array(rep(0, len_val), c(length(subj_per_arm), length(a0_vals), length(effect_vals), length(rand_control_diff)))
if (get_mse == TRUE) {
mse_results <- array(rep(0, len_val), c(length(subj_per_arm), length(a0_vals), length(effect_vals), length(rand_control_diff)))
}
if (get_bias == TRUE) {
bias_results <- array(rep(0, len_val), c(length(subj_per_arm), length(a0_vals), length(effect_vals), length(rand_control_diff)))
}
if (get_var == TRUE) {
var_results <- array(rep(0, len_val), c(length(subj_per_arm), length(a0_vals), length(effect_vals), length(rand_control_diff)))
}
for (diffs in 1:length(rand_control_diff)) {
rand_cont_odds <- (initialprob1 / (1 - initialprob1)) * rand_control_diff[diffs]
adjprob1 <- rand_cont_odds / (1 + rand_cont_odds)
for (effvals in 1:length(effect_vals)) {
for (a0vals in 1:length(a0_vals)) {
for (sizes in 1:length(subj_per_arm)) {
if (!quietly){
cat("\r", c(subj_per_arm[sizes], a0_vals[a0vals], effect_vals[effvals], rand_control_diff[diffs]))
}
collect <- matrix(rep(0, 3 * trial_reps), ncol = 3)
for (k in 1:trial_reps) {
collect[k, ] <- bernoullitrialsimulator(sample_size_val = subj_per_arm[sizes], histdata, prob1_val = adjprob1,
odds_ratio_val = effect_vals[effvals], a0_val = a0_vals[a0vals], alpha = alpha)
}
colnames(collect) <- c("odds_ratio", "log_or_var", "reject")
power_results[sizes, a0vals, effvals, diffs] <- mean(collect[, 3])
est_results[sizes, a0vals, effvals, diffs] <- mean(collect[, 1])
if (get_bias == TRUE) {
bias_results[sizes, a0vals, effvals, diffs] <- mean(collect[, 1] - effect_vals[effvals])
}
if (get_var == TRUE) {
var_results[sizes, a0vals, effvals, diffs] <- mean((collect[, 1]*sqrt(collect[, 2]))^2)
}
if (get_mse == TRUE) {
mse_results[sizes, a0vals, effvals, diffs] <- mean((collect[, 1] - effect_vals[effvals])^2)
}
if (!quietly){
cat("\r", " ")
}
}
}
}
}
cat("\n")
if (get_bias == FALSE & get_var == FALSE & get_mse == FALSE) {
if (length(subj_per_arm) == 1) {
dimnames(power_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(power_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(power_results)[[2]] <- as.character(a0_vals)
dimnames(power_results)[[3]] <- as.character(effect_vals)
dimnames(power_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(est_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(est_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(est_results)[[2]] <- as.character(a0_vals)
dimnames(est_results)[[3]] <- as.character(effect_vals)
dimnames(est_results)[[4]] <- as.character(rand_control_diff)
output <- list(power_results, est_results)
names(output) <- c("power", "est")
}
if (get_bias == FALSE & get_var == FALSE & get_mse == TRUE) {
if (length(subj_per_arm) == 1) {
dimnames(power_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(power_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(power_results)[[2]] <- as.character(a0_vals)
dimnames(power_results)[[3]] <- as.character(effect_vals)
dimnames(power_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(est_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(est_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(est_results)[[2]] <- as.character(a0_vals)
dimnames(est_results)[[3]] <- as.character(effect_vals)
dimnames(est_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(mse_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(mse_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(mse_results)[[2]] <- as.character(a0_vals)
dimnames(mse_results)[[3]] <- as.character(effect_vals)
dimnames(mse_results)[[4]] <- as.character(rand_control_diff)
output <- list(power_results, est_results, mse_results)
names(output) <- c("power", "est", "mse")
}
if (get_bias == TRUE & get_var == FALSE & get_mse == FALSE) {
if (length(subj_per_arm) == 1) {
dimnames(power_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(power_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(power_results)[[2]] <- as.character(a0_vals)
dimnames(power_results)[[3]] <- as.character(effect_vals)
dimnames(power_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(est_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(est_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(est_results)[[2]] <- as.character(a0_vals)
dimnames(est_results)[[3]] <- as.character(effect_vals)
dimnames(est_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(bias_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(bias_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(bias_results)[[2]] <- as.character(a0_vals)
dimnames(bias_results)[[3]] <- as.character(effect_vals)
dimnames(bias_results)[[4]] <- as.character(rand_control_diff)
output <- list(power_results, est_results, bias_results)
names(output) <- c("power", "est", "bias")
}
if (get_bias == TRUE & get_var == FALSE & get_mse == TRUE) {
if (length(subj_per_arm) == 1) {
dimnames(power_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(power_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(power_results)[[2]] <- as.character(a0_vals)
dimnames(power_results)[[3]] <- as.character(effect_vals)
dimnames(power_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(est_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(est_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(est_results)[[2]] <- as.character(a0_vals)
dimnames(est_results)[[3]] <- as.character(effect_vals)
dimnames(est_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(bias_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(bias_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(bias_results)[[2]] <- as.character(a0_vals)
dimnames(bias_results)[[3]] <- as.character(effect_vals)
dimnames(bias_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(mse_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(mse_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(mse_results)[[2]] <- as.character(a0_vals)
dimnames(mse_results)[[3]] <- as.character(effect_vals)
dimnames(mse_results)[[4]] <- as.character(rand_control_diff)
output <- list(power_results, est_results, bias_results, mse_results)
names(output) <- c("power", "est", "bias", "mse")
}
if (get_bias == FALSE & get_var == TRUE & get_mse == FALSE) {
if (length(subj_per_arm) == 1) {
dimnames(power_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(power_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(power_results)[[2]] <- as.character(a0_vals)
dimnames(power_results)[[3]] <- as.character(effect_vals)
dimnames(power_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(est_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(est_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(est_results)[[2]] <- as.character(a0_vals)
dimnames(est_results)[[3]] <- as.character(effect_vals)
dimnames(est_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(var_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(var_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(var_results)[[2]] <- as.character(a0_vals)
dimnames(var_results)[[3]] <- as.character(effect_vals)
dimnames(var_results)[[4]] <- as.character(rand_control_diff)
output <- list(power_results, est_results, var_results)
names(output) <- c("power", "est", "var")
}
if (get_bias == FALSE & get_var == TRUE & get_mse == TRUE) {
if (length(subj_per_arm) == 1) {
dimnames(power_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(power_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(power_results)[[2]] <- as.character(a0_vals)
dimnames(power_results)[[3]] <- as.character(effect_vals)
dimnames(power_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(est_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(est_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(est_results)[[2]] <- as.character(a0_vals)
dimnames(est_results)[[3]] <- as.character(effect_vals)
dimnames(est_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(var_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(var_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(var_results)[[2]] <- as.character(a0_vals)
dimnames(var_results)[[3]] <- as.character(effect_vals)
dimnames(var_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(mse_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(mse_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(mse_results)[[2]] <- as.character(a0_vals)
dimnames(mse_results)[[3]] <- as.character(effect_vals)
dimnames(mse_results)[[4]] <- as.character(rand_control_diff)
output <- list(power_results, est_results, var_results, mse_results)
names(output) <- c("power", "est", "var", "mse")
}
if (get_bias == TRUE & get_var == TRUE & get_mse == FALSE) {
if (length(subj_per_arm) == 1) {
dimnames(power_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(power_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(power_results)[[2]] <- as.character(a0_vals)
dimnames(power_results)[[3]] <- as.character(effect_vals)
dimnames(power_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(est_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(est_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(est_results)[[2]] <- as.character(a0_vals)
dimnames(est_results)[[3]] <- as.character(effect_vals)
dimnames(est_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(var_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(var_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(var_results)[[2]] <- as.character(a0_vals)
dimnames(var_results)[[3]] <- as.character(effect_vals)
dimnames(var_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(bias_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(bias_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(bias_results)[[2]] <- as.character(a0_vals)
dimnames(bias_results)[[3]] <- as.character(effect_vals)
dimnames(bias_results)[[4]] <- as.character(rand_control_diff)
output <- list(power_results, est_results, var_results, bias_results)
names(output) <- c("power", "est", "var", "bias")
}
if (get_bias == TRUE & get_var == TRUE & get_mse == TRUE) {
if (length(subj_per_arm) == 1) {
dimnames(power_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(power_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(power_results)[[2]] <- as.character(a0_vals)
dimnames(power_results)[[3]] <- as.character(effect_vals)
dimnames(power_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(est_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(est_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(est_results)[[2]] <- as.character(a0_vals)
dimnames(est_results)[[3]] <- as.character(effect_vals)
dimnames(est_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(bias_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(bias_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(bias_results)[[2]] <- as.character(a0_vals)
dimnames(bias_results)[[3]] <- as.character(effect_vals)
dimnames(bias_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(var_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(var_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(var_results)[[2]] <- as.character(a0_vals)
dimnames(var_results)[[3]] <- as.character(effect_vals)
dimnames(var_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(mse_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(mse_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(mse_results)[[2]] <- as.character(a0_vals)
dimnames(mse_results)[[3]] <- as.character(effect_vals)
dimnames(mse_results)[[4]] <- as.character(rand_control_diff)
output <- list(power_results, est_results, var_results, bias_results, mse_results)
names(output) <- c("power", "est", "var", "bias", "mse")
}
class_out <- list(data = output, subj_per_arm = subj_per_arm, a0_vals = a0_vals, effect_vals = effect_vals, rand_control_diff = rand_control_diff, objtype= 'historic')
class(class_out) <- append("bayes_ctd_array", class(class_out))
return(class_out)
}
simple_bernoulli_sim <- function(trial_reps=100, subj_per_arm, effect_vals, prob1_val,
alpha=0.05, get_var=FALSE, get_bias=FALSE,
get_mse=FALSE, quietly=TRUE) {
rand_control_diff <- 1
a0_vals <- 0
len_val <- length(rand_control_diff) * length(effect_vals) * length(a0_vals) * length(subj_per_arm)
power_results <- array(rep(0, len_val), c(length(subj_per_arm), length(a0_vals), length(effect_vals), length(rand_control_diff)))
est_results <- array(rep(0, len_val), c(length(subj_per_arm), length(a0_vals), length(effect_vals), length(rand_control_diff)))
if (get_mse == TRUE) {
mse_results <- array(rep(0, len_val), c(length(subj_per_arm), length(a0_vals), length(effect_vals), length(rand_control_diff)))
}
if (get_bias == TRUE) {
bias_results <- array(rep(0, len_val), c(length(subj_per_arm), length(a0_vals), length(effect_vals), length(rand_control_diff)))
}
if (get_var == TRUE) {
var_results <- array(rep(0, len_val), c(length(subj_per_arm), length(a0_vals), length(effect_vals), length(rand_control_diff)))
}
for (diffs in 1:length(rand_control_diff)) {
for (effvals in 1:length(effect_vals)) {
for (a0vals in 1:length(a0_vals)) {
for (sizes in 1:length(subj_per_arm)) {
if (!quietly){
cat("\r", c(subj_per_arm[sizes], a0_vals[a0vals], effect_vals[effvals], rand_control_diff[diffs]))
}
collect <- matrix(rep(0, 3 * trial_reps), ncol = 3)
for (k in 1:trial_reps) {
collect[k, ] <- bernoullitrialsimulatornohist(sample_size_val = subj_per_arm[sizes], prob1_val = prob1_val,
odds_ratio_val = effect_vals[effvals], alpha = alpha)
}
colnames(collect) <- c("odds_ratio", "log_or_var", "reject")
power_results[sizes, a0vals, effvals, diffs] <- mean(collect[, 3])
est_results[sizes, a0vals, effvals, diffs] <- mean(collect[, 1])
if (get_bias == TRUE) {
bias_results[sizes, a0vals, effvals, diffs] <- mean(collect[, 1] - effect_vals[effvals])
}
if (get_var == TRUE) {
var_results[sizes, a0vals, effvals, diffs] <- mean((collect[, 1]*sqrt(collect[, 2]))^2)
}
if (get_mse == TRUE) {
mse_results[sizes, a0vals, effvals, diffs] <- mean((collect[, 1] - effect_vals[effvals])^2)
}
if (!quietly){
cat("\r", " ")
}
}
}
}
}
cat("\n")
if (get_bias == FALSE & get_var == FALSE & get_mse == FALSE) {
if (length(subj_per_arm) == 1) {
dimnames(power_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(power_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(power_results)[[2]] <- as.character(a0_vals)
dimnames(power_results)[[3]] <- as.character(effect_vals)
dimnames(power_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(est_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(est_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(est_results)[[2]] <- as.character(a0_vals)
dimnames(est_results)[[3]] <- as.character(effect_vals)
dimnames(est_results)[[4]] <- as.character(rand_control_diff)
output <- list(power_results, est_results)
names(output) <- c("power", "est")
}
if (get_bias == FALSE & get_var == FALSE & get_mse == TRUE) {
if (length(subj_per_arm) == 1) {
dimnames(power_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(power_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(power_results)[[2]] <- as.character(a0_vals)
dimnames(power_results)[[3]] <- as.character(effect_vals)
dimnames(power_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(est_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(est_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(est_results)[[2]] <- as.character(a0_vals)
dimnames(est_results)[[3]] <- as.character(effect_vals)
dimnames(est_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(mse_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(mse_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(mse_results)[[2]] <- as.character(a0_vals)
dimnames(mse_results)[[3]] <- as.character(effect_vals)
dimnames(mse_results)[[4]] <- as.character(rand_control_diff)
output <- list(power_results, est_results, mse_results)
names(output) <- c("power", "est", "mse")
}
if (get_bias == TRUE & get_var == FALSE & get_mse == FALSE) {
if (length(subj_per_arm) == 1) {
dimnames(power_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(power_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(power_results)[[2]] <- as.character(a0_vals)
dimnames(power_results)[[3]] <- as.character(effect_vals)
dimnames(power_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(est_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(est_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(est_results)[[2]] <- as.character(a0_vals)
dimnames(est_results)[[3]] <- as.character(effect_vals)
dimnames(est_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(bias_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(bias_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(bias_results)[[2]] <- as.character(a0_vals)
dimnames(bias_results)[[3]] <- as.character(effect_vals)
dimnames(bias_results)[[4]] <- as.character(rand_control_diff)
output <- list(power_results, est_results, bias_results)
names(output) <- c("power", "est", "bias")
}
if (get_bias == TRUE & get_var == FALSE & get_mse == TRUE) {
if (length(subj_per_arm) == 1) {
dimnames(power_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(power_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(power_results)[[2]] <- as.character(a0_vals)
dimnames(power_results)[[3]] <- as.character(effect_vals)
dimnames(power_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(est_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(est_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(est_results)[[2]] <- as.character(a0_vals)
dimnames(est_results)[[3]] <- as.character(effect_vals)
dimnames(est_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(bias_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(bias_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(bias_results)[[2]] <- as.character(a0_vals)
dimnames(bias_results)[[3]] <- as.character(effect_vals)
dimnames(bias_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(mse_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(mse_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(mse_results)[[2]] <- as.character(a0_vals)
dimnames(mse_results)[[3]] <- as.character(effect_vals)
dimnames(mse_results)[[4]] <- as.character(rand_control_diff)
output <- list(power_results, est_results, bias_results, mse_results)
names(output) <- c("power", "est", "bias", "mse")
}
if (get_bias == FALSE & get_var == TRUE & get_mse == FALSE) {
if (length(subj_per_arm) == 1) {
dimnames(power_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(power_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(power_results)[[2]] <- as.character(a0_vals)
dimnames(power_results)[[3]] <- as.character(effect_vals)
dimnames(power_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(est_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(est_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(est_results)[[2]] <- as.character(a0_vals)
dimnames(est_results)[[3]] <- as.character(effect_vals)
dimnames(est_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(var_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(var_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(var_results)[[2]] <- as.character(a0_vals)
dimnames(var_results)[[3]] <- as.character(effect_vals)
dimnames(var_results)[[4]] <- as.character(rand_control_diff)
output <- list(power_results, est_results, var_results)
names(output) <- c("power", "est", "var")
}
if (get_bias == FALSE & get_var == TRUE & get_mse == TRUE) {
if (length(subj_per_arm) == 1) {
dimnames(power_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(power_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(power_results)[[2]] <- as.character(a0_vals)
dimnames(power_results)[[3]] <- as.character(effect_vals)
dimnames(power_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(est_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(est_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(est_results)[[2]] <- as.character(a0_vals)
dimnames(est_results)[[3]] <- as.character(effect_vals)
dimnames(est_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(var_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(var_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(var_results)[[2]] <- as.character(a0_vals)
dimnames(var_results)[[3]] <- as.character(effect_vals)
dimnames(var_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(mse_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(mse_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(mse_results)[[2]] <- as.character(a0_vals)
dimnames(mse_results)[[3]] <- as.character(effect_vals)
dimnames(mse_results)[[4]] <- as.character(rand_control_diff)
output <- list(power_results, est_results, var_results, mse_results)
names(output) <- c("power", "est", "var", "mse")
}
if (get_bias == TRUE & get_var == TRUE & get_mse == FALSE) {
if (length(subj_per_arm) == 1) {
dimnames(power_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(power_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(power_results)[[2]] <- as.character(a0_vals)
dimnames(power_results)[[3]] <- as.character(effect_vals)
dimnames(power_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(est_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(est_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(est_results)[[2]] <- as.character(a0_vals)
dimnames(est_results)[[3]] <- as.character(effect_vals)
dimnames(est_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(var_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(var_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(var_results)[[2]] <- as.character(a0_vals)
dimnames(var_results)[[3]] <- as.character(effect_vals)
dimnames(var_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(bias_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(bias_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(bias_results)[[2]] <- as.character(a0_vals)
dimnames(bias_results)[[3]] <- as.character(effect_vals)
dimnames(bias_results)[[4]] <- as.character(rand_control_diff)
output <- list(power_results, est_results, var_results, bias_results)
names(output) <- c("power", "est", "var", "bias")
}
if (get_bias == TRUE & get_var == TRUE & get_mse == TRUE) {
if (length(subj_per_arm) == 1) {
dimnames(power_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(power_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(power_results)[[2]] <- as.character(a0_vals)
dimnames(power_results)[[3]] <- as.character(effect_vals)
dimnames(power_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(est_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(est_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(est_results)[[2]] <- as.character(a0_vals)
dimnames(est_results)[[3]] <- as.character(effect_vals)
dimnames(est_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(bias_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(bias_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(bias_results)[[2]] <- as.character(a0_vals)
dimnames(bias_results)[[3]] <- as.character(effect_vals)
dimnames(bias_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(var_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(var_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(var_results)[[2]] <- as.character(a0_vals)
dimnames(var_results)[[3]] <- as.character(effect_vals)
dimnames(var_results)[[4]] <- as.character(rand_control_diff)
if (length(subj_per_arm) == 1) {
dimnames(mse_results)[[1]] <- list(as.character(subj_per_arm))
}
if (length(subj_per_arm) > 1) {
dimnames(mse_results)[[1]] <- as.character(subj_per_arm)
}
dimnames(mse_results)[[2]] <- as.character(a0_vals)
dimnames(mse_results)[[3]] <- as.character(effect_vals)
dimnames(mse_results)[[4]] <- as.character(rand_control_diff)
output <- list(power_results, est_results, var_results, bias_results, mse_results)
names(output) <- c("power", "est", "var", "bias", "mse")
}
class_out <- list(data = output, subj_per_arm = subj_per_arm, a0_vals = 0, effect_vals = effect_vals, rand_control_diff = 1, objtype= 'simple')
class(class_out) <- append("bayes_ctd_array", class(class_out))
return(class_out)
} |
library(testthat)
library(parsnip)
library(dplyr)
library(rlang)
source(test_path("helper-objects.R"))
hpc <- hpc_data[1:150, c(2:5, 8)]
context("prediciton with failed models")
hpc_bad <-
hpc %>%
mutate(big_num = Inf)
lending_club <-
lending_club %>%
dplyr::slice(1:200) %>%
mutate(big_num = Inf)
lvl <- levels(lending_club$Class)
ctrl <- control_parsnip(catch = TRUE)
test_that('numeric model', {
lm_mod <-
linear_reg() %>%
set_engine("lm") %>%
fit(compounds ~ ., data = hpc_bad, control = ctrl)
expect_warning(num_res <- predict(lm_mod, hpc_bad[1:11, -1]))
expect_equal(num_res, NULL)
expect_warning(ci_res <- predict(lm_mod, hpc_bad[1:11, -1], type = "conf_int"))
expect_equal(ci_res, NULL)
expect_warning(pi_res <- predict(lm_mod, hpc_bad[1:11, -1], type = "pred_int"))
expect_equal(pi_res, NULL)
})
test_that('classification model', {
log_reg <-
logistic_reg() %>%
set_engine("glm") %>%
fit(Class ~ log(funded_amnt) + int_rate + big_num, data = lending_club, control = ctrl)
expect_warning(
cls_res <-
predict(log_reg, lending_club %>% dplyr::slice(1:7) %>% dplyr::select(-Class))
)
expect_equal(cls_res, NULL)
expect_warning(
prb_res <-
predict(log_reg, lending_club %>% dplyr::slice(1:7) %>% dplyr::select(-Class), type = "prob")
)
expect_equal(prb_res, NULL)
expect_warning(
ci_res <-
predict(log_reg, lending_club %>% dplyr::slice(1:7) %>% dplyr::select(-Class), type = "conf_int")
)
expect_equal(ci_res, NULL)
}) |
ui.overlay <- function() {
tabItem(
tabName = "overlay",
conditionalPanel("output.overlay_display_flag == false", ui.notice.no.pred.original()),
conditionalPanel(
condition = "output.overlay_display_flag",
fluidRow(
column(
width = 4,
fluidRow(
box(
title = "Import Study Area Polygon", status = "warning", solidHeader = FALSE, width = 12, collapsible = TRUE,
checkboxInput("overlay_bound", "Clip the base geometry to a study area polygon in the overlay process", value = FALSE),
conditionalPanel(
condition = "input.overlay_bound == true",
column(
width = 12,
tags$h5("Uncheck the above checkbox to remove an imported study area polygon"),
selectInput("overlay_bound_file_type", tags$h5("File type"), choices = file.type.list1, selected = 1, width = "70%")
),
box(
width = 12,
conditionalPanel(
condition = "input.overlay_bound_file_type == 1",
ui.instructions.upload.csv(),
ui.instructions.poly.csv.single(),
fileInput("overlay_bound_csv_file", label.csv.upload, accept = ".csv"),
textOutput("overlay_bound_csv_text")
),
conditionalPanel(
condition = "input.overlay_bound_file_type == 2",
ui.instructions.upload.shp(),
fileInput("overlay_bound_gis_shp_files", label.shp.upload, multiple = TRUE),
textOutput("overlay_bound_gis_shp_text")
),
conditionalPanel(
condition = "input.overlay_bound_file_type == 3",
ui.instructions.upload.gdb(),
textInput("overlay_bound_gis_gdb_path", label.gdb.path, value = ".../folder.gdb"),
textInput("overlay_bound_gis_gdb_name", label.gdb.name, value = ""),
actionButton("overlay_bound_gis_gdb_load", label.gdb.upload),
textOutput("overlay_bound_gis_gdb_text")
)
),
column(12, tags$span(textOutput("overlay_bound_message"), style = "color: blue;"))
)
),
box(
title = "Import Erasing Polygon", status = "warning", solidHeader = FALSE, width = 12, collapsible = TRUE,
checkboxInput("overlay_land", "Erase area from the base geometry in the overlay process", value = FALSE),
conditionalPanel(
condition = "input.overlay_land == true",
column(
width = 12,
tags$h5("Uncheck the above checkbox to remove an imported erasing polygon"),
radioButtons("overlay_land_load_type", NULL,
choices = list("Use provided erasing polygon" = 1, "Upload personal erasing polygon" = 2),
selected = 1),
conditionalPanel(
condition = "input.overlay_land_load_type == 2 ",
selectInput("overlay_land_file_type", tags$h5("File type"), choices = file.type.list1, selected = 1, width = "70%")
)
),
box(
width = 12,
conditionalPanel(
condition = "input.overlay_land_load_type == 1",
helpText(
"The provided erasing polygon is from the Global Self-consistent, Hierarchical, ",
"High-resolution Geography (GSHHG) Database. It is a low-resolution polygon that",
"represents the land of all continents, including Antarctica,",
"but not lakes, rivers, or islands within those continents.",
tags$br(),
"See the", tags$a("GSHHG website", href = "http://www.soest.hawaii.edu/pwessel/gshhg/"),
"for more information about the provided erasing polygon,",
"or to download polygons with higher resolutions."
),
actionButton("overlay_land_provided", "Import provided erasing polygon"),
tags$span(textOutput("overlay_land_prov_message"), style = "color: blue"),
textOutput("overlay_land_prov_text")
),
conditionalPanel(
condition = "input.overlay_land_load_type == 2",
conditionalPanel(
condition = "input.overlay_land_file_type == 1",
ui.instructions.upload.csv(),
ui.instructions.poly.csv(),
fileInput("overlay_land_csv_file", label.csv.upload, accept = ".csv"),
textOutput("overlay_land_csv_text")
),
conditionalPanel(
condition = "input.overlay_land_file_type == 2",
ui.instructions.upload.shp(),
fileInput("overlay_land_gis_shp_files", label.shp.upload, multiple = TRUE),
textOutput("overlay_land_gis_shp_text")
),
conditionalPanel(
condition = "input.overlay_land_file_type == 3",
ui.instructions.upload.gdb(),
textInput("overlay_land_gis_gdb_path", label.gdb.path, value = ".../folder.gdb"),
textInput("overlay_land_gis_gdb_name", label.gdb.name, value = ""),
actionButton("overlay_land_gis_gdb_load", label.gdb.upload),
textOutput("overlay_land_gis_gdb_text")
)
)
),
column(12, tags$span(textOutput("overlay_land_message"), style = "color: blue;"))
)
)
)
),
column(
width = 8,
fluidRow(
box(
title = "Imported Original Predictions", status = "warning", solidHeader = FALSE, width = 12, collapsible = TRUE,
ui.instructions.table.select(text.pre = "original", text.in = "to use as the base geometry:", sel.num = 1),
conditionalPanel("input.overlay_loaded_table_stats != true", DTOutput("overlay_loaded_table")),
conditionalPanel("input.overlay_loaded_table_stats", DTOutput("overlay_loaded_stats_table")),
column(12, checkboxInput("overlay_loaded_table_stats", paste("Display additional information - NOTE that you can only",
"select or deselect a row when this box is unchecked")))
)
),
fluidRow(
box(
title = "Overlay Predictions", status = "warning", solidHeader = FALSE, width = 12, collapsible = TRUE,
fluidRow(
column(
width = 6,
fluidRow(
box(
width = 12,
tags$strong("1) Overlay options: study area and erasing polygons"),
tags$h5("Import these polygons in their respecitve sections: 'Import Study Area Polygon' and",
"'Import Erasing Polygon'."),
helpText("Note that the study area polygon performs the same function as the 'clip feature' in the",
tags$a(href = "http://pro.arcgis.com/en/pro-app/tool-reference/analysis/clip.htm", "clip tool"),
"in ArcGIS, while the erasing polygon performs the same function as the 'erase feature' in the",
tags$a(href = "http://pro.arcgis.com/en/pro-app/tool-reference/analysis/erase.htm", "erase tool"),
"."),
tags$br(),
tags$strong("2) Overlay options: base geometry"),
tags$h5("Specify the base geometry in the 'Imported Original Predictions' table.")
),
box(
width = 12,
tags$strong("3) Overlay options: coordinate system"),
helpText("The overlay process involves calculating the intersection and area of polygons,",
"and thus the coordinate system in which the overlay is performed will have an effect on the results.",
tags$br(),
"NOTE: Performing the overlay in a lat/long coordinate system (geographic coordinates; e.g., WGS 84)",
"may take a long time"),
checkboxInput("overlay_proj_native",
"Perform the overlay in the native coordinate system of the specified base geometry",
value = TRUE),
conditionalPanel(
condition = "input.overlay_proj_native == false",
radioButtons("overlay_proj_method", NULL,
choices = list("Perform overlay in WGS 84 geographic coordinates" = 1,
"Select predictions with desired coordinate system" = 2,
"Enter numeric EPSG code" = 3),
selected = 1),
conditionalPanel(
condition = "input.overlay_proj_method == 1",
helpText("When calculating area using WGS 84 geographic coordinates, the following assumptions are made:",
"1) 'Equatorial axis of ellipsoid' = 6378137 and",
"2) 'Inverse flattening of ellipsoid' = 1/298.257223563.", tags$br(),
"See", tags$a("this article", href = "https://link.springer.com/article/10.1007%2Fs00190-012-0578-z"),
"for more details about assumptions that must be made when calculating the area",
"using WGS 84 geographic coordinates.")
),
uiOutput("overlay_proj_sdm_uiOut_select"),
conditionalPanel(
condition = "input.overlay_proj_method == 3",
numericInput("overlay_proj_epsg", tags$h5("EPSG code"), value = 4326, step = 1),
helpText("See", tags$a("epsg.io", href = "http://epsg.io/"), "or the",
tags$a("EPSG home page", href = "http://www.epsg.org/"), "for more information about EPSG codes")
)
)
)
)
),
column(
width = 6,
fluidRow(
box(
width = 12,
tags$strong("4) Overlay options: percent overlap threshold"),
tags$h5("Specify the percent overlap threshold."),
helpText("The percent overlap threshold is the minimum percentage of a base geometry polygon",
"that must overlap with overlaid prediction polygons.",
"All base geometry polygons with an overlap percentage less than this threshold",
"will be assigned an overlaid prediction value of 'NA'.",
tags$br(),
"A threshold of '0' means that base geometry polygons will not be assigned a prediction value of 'NA'",
"if they overlap with any original predictions."),
sliderInput("overlay_grid_coverage", label = NULL, min = 0, max = 100, value = 50)
),
box(
width = 12,
tags$strong("5) Perform overlay"),
helpText(tags$strong("Reminder: imported study area and erasing polygons will be used during",
"will overwrite previously created overlaid predictions.")),
actionButton("overlay_create_overlaid_models_modal", "Overlay all predictions onto the specified base geometry"),
textOutput("overlay_overlay_all_text"),
tags$br(),
tags$span(uiOutput("overlay_overlaid_models_message"), style = "color: blue")
)
)
)
)
)
),
fluidRow(
box(
title = "Base Geometry and Overlaid Predictions Previews", status = "primary", solidHeader = TRUE, width = 12,
collapsible = TRUE,
fluidRow(
column(3, radioButtons("overlay_preview_which", NULL,
choices = list("Base geometry preview" = 1, "Overlaid predictions preview" = 2))),
column(
width = 9,
fluidRow(
box(
width = 12,
conditionalPanel(
condition = "input.overlay_preview_which == 1",
helpText("The base geometry preview will be displayed in the Leaflet default coordinate system:",
"EPSG:3857 Web Mercator.",
"The base geometry will be outlined in black while, if applicable, the erasing and study area",
"polygons will be filled in tan with no outline and outlined in red with no fill, respectively.",
"The erasing polygon will be clipped to the extent of the base geometry,",
"plus two degrees in each direction.",
tags$br(), tags$br(),
"Any base geometry polygon that spans the antimeridian (i.e. 180 decimal degrees or the",
"equivalent in the base geometry coordinate system) will appear to be split at the antimeridian.",
"However, it will still be treated as a single polygon for the overlay."),
uiOutput("overlay_preview_base_execute_uiOut_button"),
textOutput("overlay_preview_base_create_text")
),
conditionalPanel(
condition = "input.overlay_preview_which == 2",
conditionalPanel("output.overlay_preview_display_flag == false", uiOutput("overlay_preview_message")),
conditionalPanel(
condition = "output.overlay_preview_display_flag",
fluidRow(
column(5, uiOutput("overlay_preview_overlaid_models_uiOut_selectize")),
column(3, offset = 1, radioButtons("overlay_preview_overlaid_models_perc", tags$h5("Units"),
choices = preview.static.perc, selected = 1)),
column(3, radioButtons("overlay_preview_overlaid_models_var", tags$h5("Uncertainty"),
choices = preview.static.var, selected = 1))
),
conditionalPanel(
condition = "input.overlay_preview_overlaid_models_var_preview_var == 2",
helpText("Uncertainty plots will have \"- SE\" in their title.",
"Uncertainty plots of units type 'values' will have the same",
"color scale as their assocaited predictions.")
),
actionButton("overlay_preview_overlaid_execute", "Preview selected overlaid predictions")
)
)
)
)
)
),
conditionalPanel(
condition = "input.overlay_preview_which == 1",
leafletOutput("overlay_preview_base")
),
conditionalPanel(
condition = "input.overlay_preview_which == 2",
plotOutput("overlay_preview_overlaid")
)
)
)
)
)
)
)
} |
ukc_forces <- function() {
df <- ukc_get_data("forces")
df
}
ukc_force_details <- function(force = NULL) {
if (is.null(force)) {
df <- ukc_get_data("forces")
} else {
df <- ukc_get_data(paste0("forces/", force))
}
df
}
ukc_officers <- function(force) {
query <- paste0("forces/", force, "/people")
df <- ukc_get_data(query)
df
} |
knitr::opts_chunk$set(
collapse = TRUE,
comment = "
eval = FALSE
)
data.frame(stringsAsFactors=FALSE,
app = c("iris-cluster", "iris-cluster", "iris-cluster", "iris-cluster", "iris-cluster", "iris-cluster"),
user = c("dreamRs", "dreamRs", "dreamRs", "dreamRs", "dreamRs", "dreamRs"),
server_connected = c("2019-06-19 14:07:09", "2019-06-19 14:56:32",
"2019-06-19 14:57:45", "2019-06-19 15:03:00",
"2019-06-19 15:08:53", "2019-08-04 10:58:47"),
sessionid = c("9815194cfaa6317fbea68ae9537d63d1",
"0feeacf3201f5cca088059cec2b0a710",
"8b12c138f159cc5805e136338dddac59",
"f3950a1588b78d45c53c756a4136df67", "254cafb3d5866384f13022d066546cae",
"8a431f4b90b3b1926047dd2539be0793"),
server_disconnected = c("2019-06-19 14:07:17", "2019-06-19 14:57:39",
"2019-06-19 15:01:36", "2019-06-19 15:08:13",
"2019-06-19 15:09:38", "2019-08-04 10:58:49"),
user_agent = c("Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36", NA),
screen_res = c("1920x1080", "1920x1080", "1920x1080", "1920x1080",
"1920x1080", NA),
browser_res = c("1574x724", "1920x937", "1920x937", "1920x937",
"1920x937", NA),
pixel_ratio = c(1L, 1L, 1L, 1L, 1L, NA),
browser_connected = c("2019-06-19 14:07:09", "2019-06-19 14:56:32",
"2019-06-19 14:57:45", "2019-06-19 15:03:00",
"2019-06-19 15:08:53", NA)
)
data.frame(stringsAsFactors=FALSE,
sessionid = c("9815194cfaa6317fbea68ae9537d63d1",
"9815194cfaa6317fbea68ae9537d63d1",
"9815194cfaa6317fbea68ae9537d63d1", "9815194cfaa6317fbea68ae9537d63d1",
"9815194cfaa6317fbea68ae9537d63d1", "9815194cfaa6317fbea68ae9537d63d1"),
name = c("xcol", "clusters", "clusters", "clusters", "clusters",
"clusters"),
timestamp = c("2019-06-19 14:07:11", "2019-06-19 14:07:15",
"2019-06-19 14:07:15", "2019-06-19 14:07:15",
"2019-06-19 14:07:14", "2019-06-19 14:07:13"),
value = c("Sepal.Width", 6, 4, 5, 7, 4),
type = c(NA, "shiny.number", "shiny.number", "shiny.number",
"shiny.number", "shiny.number"),
binding = c("shiny.selectInput", "shiny.numberInput", "shiny.numberInput",
"shiny.numberInput", "shiny.numberInput",
"shiny.numberInput")
)
data.frame(stringsAsFactors=FALSE,
sessionid = c("9815194cfaa6317fbea68ae9537d63d1",
"9815194cfaa6317fbea68ae9537d63d1",
"9815194cfaa6317fbea68ae9537d63d1", "9815194cfaa6317fbea68ae9537d63d1",
"9815194cfaa6317fbea68ae9537d63d1", "9815194cfaa6317fbea68ae9537d63d1"),
name = c("plot1", "plot1", "plot1", "plot1", "plot1", "plot1"),
timestamp = c("2019-06-19 14:07:15", "2019-06-19 14:07:15",
"2019-06-19 14:07:14", "2019-06-19 14:07:15",
"2019-06-19 14:07:11", "2019-06-19 14:07:10"),
binding = c("shiny.imageOutput", "shiny.imageOutput", "shiny.imageOutput",
"shiny.imageOutput", "shiny.imageOutput",
"shiny.imageOutput")
)
data.frame(stringsAsFactors=FALSE,
sessionid = c("f8f50a3743023aae7d0d6350a2fd6841"),
name = c("plot1"),
timestamp = c("2019-06-19 14:07:18"),
error = c("NA/NaN/Inf in foreign function call (arg 1)")
) |
diff_vec <- function(x, lag = 1, difference = 1, log = FALSE, initial_values = NULL, silent = FALSE) {
if (!is.numeric(x)) rlang::abort("Non-numeric data detected. 'x' must be numeric.")
if (length(lag) > 1) rlang::abort("length(lag) > 1): Multiple lags detected. Use tk_augment_diff().")
if (length(difference) > 1) rlang::abort("diff_vec(length(difference) > 1): Multiple differences detected. Use tk_augment_diff().")
if (!is.null(initial_values)) rlang::warn("`initial_values` are not required for the `diff_vec()` calculation.")
if (!silent) message("diff_vec(): Initial values: ",
stringr::str_c(x[1:(lag * difference)], collapse = ", "))
ret_vec <- xts::diff.xts(
x = x,
lag = lag,
differences = difference,
arithmetic = TRUE,
log = log,
na.pad = TRUE
)
pad_len <- length(x) - length(ret_vec)
if (pad_len > 0) {
ret_vec <- c(rep(NA, pad_len), ret_vec)
}
return(ret_vec)
}
diff_inv_vec <- function(x, lag = 1, difference = 1, log = FALSE, initial_values = NULL) {
if (!is.numeric(x)) rlang::abort("Non-numeric data detected. 'x' must be numeric.")
if (length(lag) > 1) stop(call. = FALSE, "diff_inv_vec(length(lag) > 1): Multiple lags detected. Use tk_augment_diff().")
if (length(difference) > 1) stop(call. = FALSE, "diff_inv_vec(length(difference) > 1): Multiple differences detected. Use tk_augment_diff().")
if (!is.null(initial_values)) {
if (length(initial_values) != (lag * difference)) {
stop(call. = FALSE, "diff_inv_vec(initial_values): Size of lag values must match the number of missing values generated by the differencing operation.")
}
}
na_len <- is.na(x) %>% sum()
x_trim <- x[!is.na(x)]
if (!log) {
if (!is.null(initial_values)) {
ret_vec <- stats::diffinv(x = x_trim, lag = lag, differences = difference, xi = initial_values)
} else {
ret_vec <- stats::diffinv(x = x_trim, lag = lag, differences = difference)
}
} else {
if (difference > 1) {
stop(call. = FALSE, "diff_inv_vec(log = TRUE): Log-Difference inversion for multiple differences is not yet implemented.")
}
if (is.null(initial_values)) {
initial_values <- rep(1, na_len)
}
ret_vec <- stats::diffinv(
x = x_trim,
lag = lag,
differences = difference
)
ret_vec <- exp(ret_vec) * initial_values[1]
}
return(ret_vec)
} |
library(RcppEigen)
library(Rcpp)
library(RcppArmadillo)
perccal_interval = function(Xy, alpha, G = 20, B = 999, B2 = 999){
p = ncol(Xy)-1
theta.hat.boot = matrix(NA,nrow=p+1,ncol=B)
theta.hat.dboot = matrix(NA,nrow=p+1,ncol=B2)
theta.qtl.lgrid.lo = array(NA, dim=c(p+1,B,G))
theta.qtl.lgrid.hi = array(NA, dim=c(p+1,B,G))
perc.cal.ints = matrix(NA,nrow=p+1,ncol=2)
l.grid.lo = seq(.0001, alpha*1.4, length.out=G)
l.grid.hi = 1-l.grid.lo
X=as.matrix(Xy[,1:p])
Xm = cbind(1,X)
y=Xy[,p+1]
regr_output = coefficients(summary(lm(y~X)))
prednames = rownames(regr_output)
theta.hat = as.numeric(regr_output[,"Estimate"])
boot_output = Cdboot_multi(as.matrix(Xy), l.grid.lo, l.grid.hi, B, B2, G)
for(k in 1:(p+1)){
theta.hat.boot = boot_output$theta_hat_boot[,k]
theta.qtl.lgrid.lo = boot_output$theta_qtl_lgrid_lo[k,1][[1]][,,1]
theta.qtl.lgrid.hi = boot_output$theta_qtl_lgrid_hi[k,1][[1]][,,1]
log.lgrid.lo = theta.qtl.lgrid.lo < theta.hat[k]
log.lgrid.hi = theta.qtl.lgrid.hi > theta.hat[k]
percs.lo = 1-colSums(log.lgrid.lo)/B
percs.hi = 1-colSums(log.lgrid.hi)/B
lo.log = (percs.lo[1:(G-1)]<alpha)*(percs.lo[2:G]>=alpha)
hi.log = (percs.hi[1:(G-1)]<alpha)*(percs.hi[2:G]>=alpha)
if(sum(lo.log)>0){l.alpha.lo = l.grid.lo[lo.log*(1:(G-1))]}
if(sum(lo.log)<=0){l.alpha.lo = .0001}
if(sum(hi.log)>0){l.alpha.hi = l.grid.hi[hi.log*(1:(G-1))]}
if(sum(hi.log)<=0){l.alpha.hi = 1-.0001}
perc.cal.ints[k,] = quantile(theta.hat.boot,c(l.alpha.lo, l.alpha.hi))
}
colnames(perc.cal.ints) = c(paste("lower ",round(alpha,3),sep=""),paste("upper ",1-round(alpha,3),sep=""))
rownames(perc.cal.ints) = prednames
perc.cal.ints
}
Cquantile <- function(xx, p) {
.Call('perccal_Cquantile', PACKAGE = 'perccal', xx, p)
}
sample_rcpp <- function(N, nsamp) {
.Call('perccal_sample_rcpp', PACKAGE = 'perccal', N, nsamp)
}
Cdboot_multi <- function(xxyy, lgridlo, lgridhi, B, B2, G) {
.Call('perccal_Cdboot_multi', PACKAGE = 'perccal', xxyy, lgridlo, lgridhi, B, B2, G)
}
|
Bcopula <-
function(mat.xy, m, both.cont = FALSE, tolimit = 0.00001){
n <- nrow(mat.xy)
if (!(m %in% (2:n))){
error.msg <- paste("Order m must be an integer value in 2:n, n =", n)
stop(error.msg)
} else{
if (both.cont){
SC <- subcopemc(mat.xy, m)
} else{
SC <- subcopem(mat.xy)
}
um <- (0:m)/m
vm <- um
Cm <- matrix(0, nrow = (m + 1), ncol = (m + 1))
Cm[ , m + 1] <- um
Cm[m + 1, ] <- vm
L <- function(u){
u.inf <- max(SC$part1[SC$part1 <= u])
u.sup <- min(SC$part1[SC$part1 >= u])
valor <- ifelse(u.inf < u.sup, (u - u.inf)/(u.sup - u.inf), 1)
return(valor)
}
M <- function(v){
v.inf <- max(SC$part2[SC$part2 <= v])
v.sup <- min(SC$part2[SC$part2 >= v])
valor <- ifelse(v.inf < v.sup, (v - v.inf)/(v.sup - v.inf), 1)
return(valor)
}
S <- SC$matrix
n1 <- nrow(S)
n2 <- ncol(S)
iu.inf <- function(u) sum(SC$part1 <= u)
iu.sup <- function(u) n1 - sum(SC$part1 >= u) + 1
iv.inf <- function(v) sum(SC$part2 <= v)
iv.sup <- function(v) n2 - sum(SC$part2 >= v) + 1
C.bilineal <- function(u, v) (1-L(u))*(1-M(v))*S[iu.inf(u),iv.inf(v)] +
(1-L(u))*M(v)*S[iu.inf(u),iv.sup(v)] +
L(u)*(1-M(v))*S[iu.sup(u),iv.inf(v)] +
L(u)*M(v)*S[iu.sup(u),iv.sup(v)]
for (i in 2:m){
for (j in 2:m){
Cm[i, j] <- C.bilineal(um[i], vm[j])
}
}
BCopula <- function(u, v) sum(Cm*(dbinom(0:(dim(Cm)[1] - 1), dim(Cm)[1] - 1, u)%*%t(dbinom(0:(dim(Cm)[1] - 1), dim(Cm)[1] - 1, v))))
BC.du <- function(u, v) (dim(Cm)[1] - 1)*sum(Cm*((dbinom(-1:(dim(Cm)[1] - 2), dim(Cm)[1] - 2, u) - dbinom(0:(dim(Cm)[1] - 1),
dim(Cm)[1] - 2, u)*c(-1, rep(1, dim(Cm)[1] - 1)))%*%t(dbinom(0:(dim(Cm)[1] - 1), dim(Cm)[1] - 1, v))))
BC.du.aux <- function(v, ua.vec) BC.du(ua.vec[1], v) - ua.vec[2]
BC.du.inv <- function(u, a) uniroot(BC.du.aux, interval = c(0, 1), ua.vec = c(u, a), tol = tolimit)$root
tCm <- t(Cm)
BC.dv0 <- function(u, v) (dim(tCm)[1] - 1)*sum(tCm*((dbinom(-1:(dim(tCm)[1] - 2), dim(tCm)[1] - 2, u) - dbinom(0:(dim(tCm)[1] - 1),
dim(tCm)[1] - 2, u)*c(-1, rep(1, dim(tCm)[1] - 1)))%*%t(dbinom(0:(dim(tCm)[1] - 1), dim(tCm)[1] - 1, v))))
BC.dv0.aux <- function(v, ua.vec) BC.dv0(ua.vec[1], v) - ua.vec[2]
BC.dv0.inv <- function(u, a) uniroot(BC.dv0.aux, interval = c(0, 1), ua.vec = c(u, a), tol = tolimit)$root
BC.dv <- function(u, v) BC.dv0(v, u)
BC.dv.inv <- function(v, a) BC.dv0.inv(v, a)
Bdensity <- function(u, v) ((dim(Cm)[1] - 1)^2)*sum(Cm*((dbinom(-1:(dim(Cm)[1] - 2), dim(Cm)[1] - 2, u) - dbinom(0:(dim(Cm)[1] - 1),
dim(Cm)[1] - 2, u)*c(-1, rep(1, dim(Cm)[1] - 1)))%*%t(dbinom(-1:(dim(Cm)[1] - 2), dim(Cm)[1] - 2, v) -
dbinom(0:(dim(Cm)[1] - 1), dim(Cm)[1] - 2, v) * c(-1, rep(1, dim(Cm)[1] - 1)))))
lista <- list(copula = BCopula, du = BC.du, du.inv = BC.du.inv, dv = BC.dv, dv.inv = BC.dv.inv,
density = Bdensity, bilinearCopula = C.bilineal, bilinearSubcopula = Cm, sample.size = n,
order = m, both.cont = both.cont, tolerance = tolimit, subcopemObject = SC)
return(lista)
}
} |
plot.estPI <- function(x,col="black",highlight=NULL,hlCol="red",pch=20,zoom=FALSE,...){
if(x$type=="single")
{
estPlotSingle(x,col=col,highlight=highlight,hlCol=hlCol,pch=pch,zoom=zoom,...)
} else if(x$type=="pair")
{
estPlotPair(x,col=col,highlight=highlight,hlCol=hlCol,pch=pch,zoom=zoom,...)
} else if(x$type=="triple")
{
if(!is.vector(x$obs)){
if(nrow(x$probs)==1){
stop("There is only one row of probabilities. You might want to use the option 'order=FALSE' in the estPI call in order to get all combinations.")
}
}
estPlotTriple(x,col=col,highlight=highlight,hlCol=hlCol,pch=pch,zoom=zoom,...)
}
} |
data(channing, package = "boot")
chan <- subset(channing, sex == "Male" & entry < exit)
attach(chan)
cKendall(entry, exit, cens)
cKendall(entry, exit, cens, method = "IPW1")
cKendall(entry, exit, cens, method = "IPW2")
detach(chan) |
knitr::opts_chunk$set(echo = TRUE)
library(rnpn) |
library(shiny)
library(shinyalert)
function(input, output, session) {
output$return <- renderText({
input$shinyalert
})
code <- reactive({
type <- input$type
if (type == "<none>") type <- ""
code <- paste0(
'shinyalert(\n',
' title = "', input$title, '",\n',
' text = "', input$text, '",\n',
' size = "', input$size, '", \n',
' closeOnEsc = ', input$closeOnEsc, ',\n',
' closeOnClickOutside = ', input$closeOnClickOutside, ',\n',
' html = ', input$html, ',\n',
' type = "', type, '",\n'
)
if (type == "input") {
code <- paste0(
code,
' inputType = "', input$inputType, '",\n',
' inputValue = "', input$inputValue, '",\n',
' inputPlaceholder = "', input$inputPlaceholder, '",\n'
)
}
code <- paste0(
code,
' showConfirmButton = ', input$showConfirmButton, ',\n',
' showCancelButton = ', input$showCancelButton, ',\n'
)
if (input$showConfirmButton) {
code <- paste0(
code,
' confirmButtonText = "', input$confirmButtonText, '",\n',
' confirmButtonCol = "', input$confirmButtonCol, '",\n'
)
}
if (input$showCancelButton) {
code <- paste0(
code,
' cancelButtonText = "', input$cancelButtonText, '",\n'
)
}
code <- paste0(
code,
' timer = ', input$timer, ',\n',
' imageUrl = "', input$imageUrl, '",\n'
)
if (input$imageUrl != "") {
code <- paste0(
code,
' imageWidth = ', input$imageWidth, ',\n',
' imageHeight = ', input$imageHeight, ',\n'
)
}
code <- paste0(
code,
' animation = ', input$animation, '\n'
)
code <- paste0(code, ")")
code
})
observeEvent(input$show, {
if (input$html && input$type == "input") {
shinyalert::shinyalert(text = "Cannot use 'input' type and HTML together (because when using HTML, you're able to provide custom shiny inputs/outputs).", type = "error")
return()
}
eval(parse(text = code()))
})
output$code <- renderText({
paste0(
"library(shiny)\nlibrary(shinyalert)\n\n",
"ui <- fluidPage()\n\n",
"server <- function(input, output) {\n ",
gsub("\n", "\n ", code()),
"\n}\n\nshinyApp(ui, server)"
)
})
} |
pmx_plot_vpc <-
function(ctr, type, idv, obs, pi, ci, rug, bin, is.legend, sim_blq, dname, filter,
strat.facet, facets, strat.color, trans, pmxgpar, labels,
axis.title, axis.text, ranges, is.smooth, smooth, is.band,
band, is.draft, draft, is.identity_line, identity_line,
scale_x_log10, scale_y_log10, color.scales, is.footnote,...) {
params <- as.list(match.call(expand.dots = TRUE))[-1]
params$is.smooth <- FALSE
wrap_pmx_plot_generic(ctr, "pmx_vpc", params)
} |
plotit <- function(model.name,
sub.name,
single.sub.data.clip,
data.list,
plot.subregion) {
png(paste(plot.subregion$save.subregions.plots,
model.name, "_", sub.name, ".png", sep = "" ))
x.list <- data.list[[1]]
if ( !is.null(plot.subregion$xlim) ) {
x.lim <- plot.subregion$xlim
} else {
x.lim <- c(range(single.sub.data.clip$lon[!is.na(x.list)])[1] - 1,
range(single.sub.data.clip$lon[!is.na(x.list)])[2] + 1)
}
if ( !is.null(plot.subregion$ylim) ) {
y.lim <- plot.subregion$ylim
} else {
y.lim <- c(range(single.sub.data.clip$lat[!is.na(x.list)])[1] - 1,
range(single.sub.data.clip$lat[!is.na(x.list)])[2] + 1)
}
if (!is.null(plot.subregion$cex))
fac <- plot.subregion$cex
weight.cex <- single.sub.data.clip$weight[which(!is.na(x.list))]
is.area.fraction <- "TRUE"
if (is.null(weight.cex)){
is.area.fraction <- "FALSE"
weight.cex <- 1
}
plot(single.sub.data.clip$lon[!is.na(x.list)],
single.sub.data.clip$lat[!is.na(x.list)],
xlim = x.lim,
ylim = y.lim,
cex = weight.cex * fac,
col = "red", pch = 20, xlab = "lon", ylab = "lat",
main = paste("climate model:", model.name, "\nregion:", sub.name, "\narea.fraction: ",is.area.fraction, sep =" "))
grid()
bound.sp <- getMap(resolution="less islands")
plot(bound.sp, add=TRUE)
dev.off()
} |
furnasI <- function(tree){
if (!inherits(tree,"phylo")) stop("The input tree must be in phylo-format.")
if (!is_binary(tree)) stop("The input tree is not binary.")
n <- length(tree$tip.label)
if(n == 1) return(1)
if(n == 2) return(1)
allranks <- getfurranks(tree)
return(allranks[n+1])
} |
HPDplotBygene <-
function(model,gene,conditions,pval="mcmc",newplot=T,ylimits=NULL,inverse=F,jitter=0,plot=T,yscale="log2",interval="ci",grid=F,zero=F,...){
if (inverse) inv=-1 else inv=1
res1=res2=c(rep(0,length(model$Sol[,1])))
names1=names2=c()
allnames=names(posterior.mode(model$Sol))
intercept=paste('gene',gene,sep="")
if (yscale=="log2") {
model$Sol=model$Sol/log(2)
Ylab="log2(abundance)"
} else {
if (yscale=="log10") {
model$Sol=model$Sol/log(10)
Ylab="log10(abundance)"
} else {
Ylab="ln(abundance)"
}
}
cstats=c();means=c();hpds=c();pz=c()
for (c in 1:length(names(conditions))) {
co=names(conditions)[c]
factors=conditions[[co]]$factors
factors2=conditions[[co]]$factors2
res1=res2=c(rep(0,length(model$Sol[,1])))
for (f in factors){
if (f==0) {
f1=intercept
} else {
pattern=paste('gene',gene,':',f,"$",sep="")
f1=grep(pattern,allnames)
}
res1=res1+model$Sol[,f1]
}
for (f in factors2){
if (f==0) {
f2=intercept
} else {
pattern=paste('gene',gene,':',f,sep="")
f2=grep(pattern, allnames)
}
res2=res2+model$Sol[,f1]
}
stat=(inv*(res1-res2))
means=append(means,mean(stat))
if (interval=="sd") hpds=rbind(hpds,c(mean(stat)-sd(stat),mean(stat)+sd(stat)),deparse.level=0)
if (interval=="ci") hpds=rbind(hpds,HPDinterval(stat),deparse.level=0)
row.names(hpds)=paste(row.names(hpds),co)
ress=data.frame(cbind("mean"=means,"lo"=hpds[,1],"up"=hpds[,2]),deparse.level=0)
if (yscale=="proportion") {
ress[ress<0]=0
ress=sin(ress)^2
Ylab="proportion"
}
cstats=cbind(cstats,stat,deparse.level=0)
names(cstats)[c]=co
}
cstats=data.frame(cstats)
tukey=data.frame(diag(length(conditions))*0)
names(tukey)=names(conditions)
row.names(tukey)=names(conditions)
meant=tukey
for (cc in 1:(length(names(tukey))-1)) {
for (cc2 in (cc+1):length(names(tukey))){
stat=cstats[,cc2]-cstats[,cc]
zs=mean(stat)/sd(stat)
if (pval=="z") pv=2*(1-pnorm(abs(zs)))
if (pval=="mcmc") pv=mcmc.pval(stat)
tukey[cc,cc2]=pv
meant[cc,cc2]=mean(stat)
}
}
if (plot==F) {
return(list(mean.pairwise.differences=meant,pvalues=tukey,min=min(ress$lo),max=max(ress$up)))
stop
}
marg=0.25
if (yscale=="proportion") { marg=0.05 }
if(is.null(ylimits)) ylimits=c(min(ress[,2])-marg,max(ress[,3])+marg)
if (newplot==T) plot(c(1:length(conditions))+jitter,ress[,1],xlim=c(0.5,length(conditions)+0.5),xaxt="n",ylim=ylimits,xlab="",ylab=Ylab,mgp=c(2.3,1,0),...)
if (newplot==F) points(c(1:length(conditions))+jitter,ress[,1],...)
lines(c(1:length(conditions))+jitter,ress[,1],type="l",...)
arrows(c(1:length(conditions))+jitter,ress[,1],c(1:length(conditions))+jitter,ress[,2],angle=90,code=2,length=0.03,...)
arrows(c(1:length(conditions))+jitter,ress[,1],c(1:length(conditions))+jitter,ress[,3],angle=90,code=2,length=0.03,...)
if (grid==TRUE){
verts=seq(0.5,length(conditions)+0.5,1)
abline(v=verts,lty=3,col="grey60")
}
if(newplot==T) axis(side=1,labels=names(conditions),at=c(1:length(conditions)),las=2)
return(list(mean.pairwise.differences=meant,pvalues=tukey))
} |
PAMA.PL=function(datfile,PLdatfile,nRe,iter=1000,init="EMM"){
adaptation=0.25*iter
dat=datfile
datPL=PLdatfile
n=dim(dat)[1]
m=dim(dat)[2]
mPL=dim(PLdatfile)[2]
gamma.hyper=rep(0.15,m+mPL)
smlgamma.upper=5
smlgamma.lower=0.01
phi.start=0.3
phi.hyper=0.1
if(init=="EMM"){
mallowsinfer=ExtMallows::EMM((dat))
mallowsinfer=as.numeric(mallowsinfer$op.pi0)
mallowsinfer[mallowsinfer>nRe]=0
I.start=mallowsinfer
} else if(init=="mean"){
mallowsinfer=PerMallows::lmm(t(dat),dist.name="kendall",estimation="approx")
mallowsinfer=mallowsinfer$mode
mallowsinfer[mallowsinfer>nRe]=0
I.start=mallowsinfer
}
smlgamma.start=c(rep(3,m),rep(0.3,mPL))
I.mat=matrix(NA,n,iter)
l.mat=matrix(NA,1,iter)
I.r.list=list()
smlgamma.mat=matrix(NA,m+mPL,iter)
phi.mat=matrix(NA,iter,1)
for(i in 1:iter){
FdatPL=pl2fullV2(I.start,datPL,nRe,phi.start,smlgamma.start[-c(1:m)])
zerolist=which(I.start==0)
for(j in zerolist){
nonzerolist=which(I.start==nRe)
zeropvec=c()
I.new=replace(I.start,c(j,nonzerolist),I.start[c(nonzerolist,j)])
FdatPLnew=pl2fullV2(I.new,datPL,nRe,phi.start,smlgamma.start[-c(1:m)])
zeropvec=c(zeropvec,fulllikepower(dat = cbind(dat,FdatPLnew),I = I.new,phi = phi.start,smlgamma = smlgamma.start))
zeropvec=c(zeropvec,fulllikepower(dat = cbind(dat,FdatPL),I = I.start,phi = phi.start,smlgamma = smlgamma.start))
zeropvec=exp(zeropvec-(max(zeropvec)))
zeropvec=zeropvec/sum(zeropvec)
gibbsrlz=mc2d::rmultinomial(1,1,zeropvec)
pos=which(gibbsrlz==1)
if(pos==1){
I.start=I.new
}
FdatPL=pl2fullV2(I.start,datPL,nRe,phi.start,smlgamma.start[-c(1:m)])
}
FdatPL=pl2fullV2(I.start,datPL,nRe,phi.start,smlgamma.start[-c(1:m)])
for(j in (nRe-1):2){
pos1=which(I.start==j)
pos2=which(I.start==(j-1))
pos3=which(I.start==(j+1))
poses=c(pos2,pos3)
nonzeropvec=rep(NA,3)
I.new=replace(I.start,c(pos1,pos2),I.start[c(pos2,pos1)])
nonzeropvec[1]=fulllikepower(cbind(dat,FdatPL),I = I.new,phi = phi.start,smlgamma = smlgamma.start)
I.new=replace(I.start,c(pos1,pos3),I.start[c(pos3,pos1)])
nonzeropvec[2]=fulllikepower(cbind(dat,FdatPL),I = I.new,phi = phi.start,smlgamma = smlgamma.start)
nonzeropvec[3]=fulllikepower(cbind(dat,FdatPL),I = I.start,phi = phi.start,smlgamma = smlgamma.start)
nonzeropvec=exp(nonzeropvec-(max(nonzeropvec)))
nonzeropvec=nonzeropvec/sum(nonzeropvec)
gibbsrlz=mc2d::rmultinomial(1, 1,nonzeropvec)
pos=which(gibbsrlz==1)
if(pos<3){
I.start=replace(I.start,c(pos1,poses[pos]),I.start[c(poses[pos],pos1)])
}
}
FdatPL=pl2fullV2(I.start,datPL,nRe,phi.start,smlgamma.start[-c(1:m)])
tem=cbind(dat,FdatPL)
Mallowsdat=tem[I.start>0,]
Mallowsdat=apply(Mallowsdat,2,rank)
phi.new=phi.start + phi.hyper* rnorm(1)
if (phi.new>0 & phi.new<1){
log.prob.start <- lapply(seq_len(ncol(Mallowsdat)), function(i) log(PerMallows::dmm(Mallowsdat[,i],I.start[I.start>0], -log(phi.start)*smlgamma.start[i])) )
log.prob.new <- lapply(seq_len(ncol(Mallowsdat)), function(i) log(PerMallows::dmm(Mallowsdat[,i],I.start[I.start>0], -log(phi.new)*smlgamma.start[i])) )
if (sum(unlist(log.prob.new))-sum(unlist(log.prob.start)) >log(runif(1))){
phi.start=phi.new
}
}
dattem=cbind(dat,FdatPL)
for(j in 1:(m+mPL)){
smlgamma.tem=smlgamma.start[j]
smlgamma.tem=smlgamma.tem+gamma.hyper[j]*rnorm(1)
if(smlgamma.tem>smlgamma.lower && smlgamma.tem< smlgamma.upper){
like.start=PAMAlike(dattem[,j],I.start,phi.start,smlgamma.start[j])
like.tem=PAMAlike(dattem[,j],I.start,phi.start,smlgamma.tem)
if((like.tem-like.start) > log(runif(1))){
smlgamma.start[j]=smlgamma.tem
}
}
}
FdatPL=pl2fullV2(I.start,datPL,nRe,phi.start,smlgamma.start[-c(1:m)])
l.mat[i]=fulllikepower(dat = cbind(dat,FdatPL),I = I.start,phi = phi.start,smlgamma = smlgamma.start)
I.mat[,i]=I.start
phi.mat[i]=phi.start
smlgamma.mat[,i]=smlgamma.start
if(i==adaptation){
gamma.hyper=sqrt(diag(cov(t(smlgamma.mat[,(adaptation-adaptation*0.6):adaptation]))))
phi.hyper=sd((phi.mat[(adaptation-adaptation*0.6):adaptation]))
}
}
return(list(I.mat=I.mat,phi.mat=phi.mat,smlgamma.mat=smlgamma.mat,l.mat=l.mat))
} |
clusterPIC_Z_DP <-
function(
L,
R,
y,
xcov,
IC,
scale.designX,
scaled,
zcov,
area,
binary,
I,
order,
knots,
grids,
a_eta,
b_eta,
a_ga,
b_ga,
a_alpha,
b_alpha,
H,
a_tau_star,
b_tau_star,
beta_iter,
phi_iter,
beta_cand,
phi_cand,
beta_sig0,
x_user,
total,
burnin,
thin,
conf.int,
seed){
Ispline<-function(x,order,knots){
k=order+1
m=length(knots)
n=m-2+k
t=c(rep(1,k)*knots[1], knots[2:(m-1)], rep(1,k)*knots[m])
yy1=array(rep(0,(n+k-1)*length(x)),dim=c(n+k-1, length(x)))
for (l in k:n){
yy1[l,]=(x>=t[l] & x<t[l+1])/(t[l+1]-t[l])
}
yytem1=yy1
for (ii in 1:order){
yytem2=array(rep(0,(n+k-1-ii)*length(x)),dim=c(n+k-1-ii, length(x)))
for (i in (k-ii):n){
yytem2[i,]=(ii+1)*((x-t[i])*yytem1[i,]+(t[i+ii+1]-x)*yytem1[i+1,])/(t[i+ii+1]-t[i])/ii
}
yytem1=yytem2
}
index=rep(0,length(x))
for (i in 1:length(x)){
index[i]=sum(t<=x[i])
}
yy=array(rep(0,(n-1)*length(x)),dim=c(n-1,length(x)))
if (order==1){
for (i in 2:n){
yy[i-1,]=(i<index-order+1)+(i==index)*(t[i+order+1]-t[i])*yytem2[i,]/(order+1)
}
}else{
for (j in 1:length(x)){
for (i in 2:n){
if (i<(index[j]-order+1)){
yy[i-1,j]=1
}else if ((i<=index[j]) && (i>=(index[j]-order+1))){
yy[i-1,j]=(t[(i+order+1):(index[j]+order+1)]-t[i:index[j]])%*%yytem2[i:index[j],j]/(order+1)
}else{
yy[i-1,j]=0
}
}
}
}
return(yy)
}
Mspline<-function(x,order,knots){
k1=order
m=length(knots)
n1=m-2+k1
t1=c(rep(1,k1)*knots[1], knots[2:(m-1)], rep(1,k1)*knots[m])
tem1=array(rep(0,(n1+k1-1)*length(x)),dim=c(n1+k1-1, length(x)))
for (l in k1:n1){
tem1[l,]=(x>=t1[l] & x<t1[l+1])/(t1[l+1]-t1[l])
}
if (order==1){
mbases=tem1
}else{
mbases=tem1
for (ii in 1:(order-1)){
tem=array(rep(0,(n1+k1-1-ii)*length(x)),dim=c(n1+k1-1-ii, length(x)))
for (i in (k1-ii):n1){
tem[i,]=(ii+1)*((x-t1[i])*mbases[i,]+(t1[i+ii+1]-x)*mbases[i+1,])/(t1[i+ii+1]-t1[i])/ii
}
mbases=tem
}
}
return(mbases)
}
poissrndpositive<-function(lambda){
q=200
t=seq(0,q,1)
p=dpois(t,lambda)
pp=cumsum(p[2:(q+1)])/(1-p[1])
u=runif(1)
while(u>pp[q]){
q=q+1
pp[q]=pp[q-1]+dpois(q,lambda)/(1-p[1])
}
ll=sum(u>pp)+1
}
set.seed(seed)
L=matrix(L,ncol=1)
R=matrix(R,ncol=1)
y=matrix(y,ncol=1)
xcov=as.matrix(xcov)
zcov=as.matrix(zcov)
area=matrix(area,ncol=1)
IC=matrix(IC,ncol=1)
p=ncol(xcov)
q=ncol(zcov)
if (scale.designX==TRUE){
mean_X<-apply(xcov,2,mean)
sd_X<-apply(xcov,2,sd)
for (r in 1:p){
if (scaled[r]==1) xcov[,r]<-(xcov[,r]-mean_X[r])/sd_X[r]
}
}
n1=sum(IC==0)
n2=sum(IC==1)
N=n1+n2
t<-rep(0,N)
for (i in 1:N) {t[i]=ifelse(IC[i]==0,L[i],0)}
K=length(knots)-2+order
kgrids=length(grids)
bmsT=Mspline(t[1:n1],order,knots)
bisT=Ispline(t[1:n1],order,knots)
bisL=Ispline(L[(n1+1):N],order,knots)
bisR=Ispline(R[(n1+1):N],order,knots)
bisg=Ispline(grids,order,knots)
eta=rgamma(1,a_eta,rate=b_eta)
gamcoef=matrix(rgamma(K, 1, rate=1),ncol=K)
phicoef<-matrix(rep(0,I*q),ncol=q)
phicoef2<-matrix(rep(0,N*q),ncol=q)
for (j in 1:N){
phicoef2[j,]<-phicoef[area[j],]
}
beta=matrix(rep(0,p),p,1)
beta_original=matrix(rep(0,p),ncol=1)
u=array(rep(0,n1*K),dim=c(n1,K))
for (i in 1:n1){
u[i,]=rmultinom(1,1,gamcoef*t(bmsT[,i]))
}
alpha=rep(1,q)
tau_star<-array(rep(1,H*q),dim=c(H,q))
ns<-v<-pi<-array(rep(NA,H*q),dim=c(H,q))
tau<-c<-array(rep(NA,I*q),dim=c(I,q))
LK<-array(rep(NA,I*H),dim=c(I,H))
for (r in 1:q){
pi[,r]<-rdirichlet(1,rep(alpha[r]/H,H))
}
for (r in 1:q){
c[,r]<-sample(seq(1:H),I,replace=T,pi[,r])
}
for (r in 1:q){
for (h in 1:H) {
c1=c[,r]
ns[h,r]<-length(c1[c1==h])
}
}
for (r in 1:q){
for (i in 1:I) {
tau[i,r]<-tau_star[c[i,r],r]
}
}
lambdatT=t(gamcoef%*%bmsT)
LambdatT=t(gamcoef%*%bisT)
LambdatL=t(gamcoef%*%bisL)
LambdatR=t(gamcoef%*%bisR)
Lambdatg=t(gamcoef%*%bisg)
parbeta=array(rep(0,total*p),dim=c(total,p))
parbeta_original=array(rep(0,total*p),dim=c(total,p))
pareta=array(rep(0,total),dim=c(total,1))
partau_star=array(rep(0,total*H*q),dim=c(total,H,q))
paralpha=array(rep(0,total*q),dim=c(total,q))
parphi=array(rep(0,I*q*total),dim=c(I,q,total))
parpi=array(rep(0,total*H*q),dim=c(total,H,q))
pargam=array(rep(0,total*K),dim=c(total,K))
parsurv0=array(rep(0,total*kgrids),dim=c(total,kgrids))
parlambdatT=array(rep(0,total*n1),dim=c(total,n1))
parLambdatT=array(rep(0,total*n1),dim=c(total,n1))
parLambdatL=array(rep(0,total*n2),dim=c(total,n2))
parLambdatR=array(rep(0,total*n2),dim=c(total,n2))
pardev=array(rep(0,total),dim=c(total,1))
parfinv_exact=array(rep(0,total*n1),dim=c(total,n1))
parfinv_IC=array(rep(0,total*n2),dim=c(total,n2))
if (is.null(x_user)){parsurv=parsurv0} else {
G<-length(x_user)/p
parsurv=array(rep(0,total*kgrids*G),dim=c(total,kgrids*G))}
iter=1
while (iter<total+1)
{
z=array(rep(0,n2),dim=c(n2,1)); w=z
zz=array(rep(0,n2*K),dim=c(n2,K)); ww=zz
for (j in 1:n2){
if (y[n1+j]==0){
templam1=LambdatR[j]*exp(xcov[(n1+j),]%*%beta+zcov[(n1+j),]%*%phicoef[area[n1+j],])
z[j]=poissrndpositive(templam1)
zz[j,]=rmultinom(1,z[j],gamcoef*t(bisR[,j]))
} else if (y[n1+j]==1){
templam1=(LambdatR[j]-LambdatL[j])*exp(xcov[(n1+j),]%*%beta+zcov[(n1+j),]%*%phicoef[area[n1+j],])
w[j]=poissrndpositive(templam1)
ww[j,]=rmultinom(1,w[j],gamcoef*t(bisR[,j]-bisL[,j]))
}
}
te1=z*(y[(n1+1):N]==0)+w*(y[(n1+1):N]==1)
te2=(LambdatR*(y[(n1+1):N]==0)+LambdatR*(y[(n1+1):N]==1)+LambdatL*(y[(n1+1):N]==2))
te3=LambdatT
for (r in 1:p){
if (binary[r]==0){
beta1<-beta2<-beta
if (iter<beta_iter) sd_cand<-beta_cand[r] else sd_cand<-sd(parbeta[1:(iter-1),r])
xt<-beta[r]
yt<-rnorm(1,xt,sd_cand)
beta1[r]<-yt
beta2[r]<-xt
log_f1<-sum(yt*xcov[1:n1,r]-te3*exp(xcov[1:n1,]%*%beta1+apply(zcov[1:n1,]*phicoef2[1:n1,],1,sum)))+sum(yt*xcov[(n1+1):N,r]*te1)-sum(exp(xcov[(n1+1):N,]%*%beta1+apply(zcov[(n1+1):N,]*phicoef2[(n1+1):N,],1,sum))*te2)-0.5*(yt^2)/(beta_sig0^2)
log_f2<-sum(xt*xcov[1:n1,r]-te3*exp(xcov[1:n1,]%*%beta2+apply(zcov[1:n1,]*phicoef2[1:n1,],1,sum)))+sum(xt*xcov[(n1+1):N,r]*te1)-sum(exp(xcov[(n1+1):N,]%*%beta2+apply(zcov[(n1+1):N,]*phicoef2[(n1+1):N,],1,sum))*te2)-0.5*(xt^2)/(beta_sig0^2)
num<-log_f1
den<-log_f2
if (log(runif(1))<(num-den)) beta[r]<-yt else beta[r]<-xt
}
if (binary[r]==1 & p>1){
te4=sum(xcov[1:n1,r])+sum(xcov[(n1+1):N,r]*te1)
te5=sum(te3*exp(as.matrix(xcov[1:n1,-r])%*%as.matrix(beta[-r])+apply(zcov[1:n1,]*phicoef2[1:n1,],1,sum))*xcov[1:n1,r])+sum(te2*exp(as.matrix(xcov[(n1+1):N,-r])%*%as.matrix(beta[-r])+apply(zcov[(n1+1):N,]*phicoef2[(n1+1):N,],1,sum))*xcov[(n1+1):N,r])
beta[r]<-log(rgamma(1,a_ga+te4,rate=b_ga+te5))
}
if (binary[r]==1 & p==1){
te4=sum(xcov[1:n1,r])+sum(xcov[(n1+1):N,r]*te1)
te5=sum(te3*exp(apply(zcov[1:n1,]*phicoef2[1:n1,],1,sum))*xcov[1:n1,r])+sum(te2*exp(apply(zcov[(n1+1):N,]*phicoef2[(n1+1):N,],1,sum))*xcov[(n1+1):N,r])
beta[r]<-log(rgamma(1,a_ga+te4,rate=b_ga+te5))
}
}
if (scale.designX==TRUE){
for (r in 1:p) beta_original[r]<-ifelse(scaled[r]==1,beta[r]/sd_X[r],beta[r])
}
if (scale.designX==FALSE) beta_original<-beta
for (l in 1:K){
tempa=1+sum(u[,l])+sum(zz[,l]*(y[(n1+1):N]==0)+ww[,l]*(y[(n1+1):N]==1))
tempb=eta+sum(bisT[l,]*exp(xcov[1:n1,]%*%beta+apply(zcov[1:n1,]*phicoef2[1:n1,],1,sum)))+sum(((bisR[l,])*(y[(n1+1):N]==0)+(bisR[l,])*(y[(n1+1):N]==1)
+(bisL[l,])*(y[(n1+1):N]==2))*exp(xcov[(n1+1):N,]%*%beta+apply(zcov[(n1+1):N,]*phicoef2[(n1+1):N,],1,sum)))
gamcoef[l]=rgamma(1,tempa,rate=tempb)
}
lambdatT=t(gamcoef%*%bmsT)
LambdatT=t(gamcoef%*%bisT)
LambdatL=t(gamcoef%*%bisL)
LambdatR=t(gamcoef%*%bisR)
u=array(rep(0,n1*K),dim=c(n1,K))
for (i in 1:n1){
u[i,]=rmultinom(1,1,gamcoef*t(bmsT[,i]))
}
eta=rgamma(1,a_eta+K, rate=b_eta+sum(gamcoef))
for (r in 1:q){
phi1<-array(rep(0,N*q),dim=c(N,q))
phi2<-array(rep(0,N*q),dim=c(N,q))
for (i in 1:I){
phi1<-phi2<-phicoef2
if (iter<phi_iter) sd_cand<-phi_cand else sd_cand<-sd(parphi[i,r,1:(iter-1)])
xt<-phicoef[i,r]
yt<-rnorm(1,xt,sd_cand)
phi1[area==i,r]<-yt
phi2[area==i,r]<-xt
log_f1<-sum(zcov[1:n1,r]*phi1[1:n1,r])-sum(te3*exp(xcov[1:n1,]%*%beta+apply(zcov[1:n1,]*phi1[1:n1,],1,sum)))+sum(zcov[(n1+1):N,r]*phi1[(n1+1):N,r]*te1)-sum((exp(xcov[(n1+1):N,]%*%beta+apply(zcov[(n1+1):N,]*phi1[(n1+1):N,],1,sum)))*te2)-0.5*tau[i,r]*(yt^2)
log_f2<-sum(zcov[1:n1,r]*phi2[1:n1,r])-sum(te3*exp(xcov[1:n1,]%*%beta+apply(zcov[1:n1,]*phi2[1:n1,],1,sum)))+sum(zcov[(n1+1):N,r]*phi2[(n1+1):N,r]*te1)-sum((exp(xcov[(n1+1):N,]%*%beta+apply(zcov[(n1+1):N,]*phi2[(n1+1):N,],1,sum)))*te2)-0.5*tau[i,r]*(xt^2)
num<-log_f1
den<-log_f2
if (log(runif(1))<(num-den)) phicoef[i,r]<-yt else phicoef[i,r]<-xt
phicoef2[area==i,r]<-phicoef[i,r]
}
}
for (r in 1:q){
for (h in 1:H){
tau_star[h,r]<-rgamma(1,a_tau_star+0.5*ns[h,r],b_tau_star+0.5*t(phicoef[c[,r]==h,r])%*%phicoef[c[,r]==h,r])
}
}
for (r in 1:q){
for (i in 1:I){
for (h in 1:H){
LK[i,h]<-dnorm(phicoef[i,r],0,1/sqrt(tau_star[h,r]))
}
p_c<-(t(pi[,r])*LK[i,])/sum(t(pi[,r])*LK[i,])
c[i,r]<-sample(seq(1:H),1,replace=T,p_c)
}
}
for (r in 1:q){
for (h in 1:H) {
c1=c[,r]
ns[h,r]<-length(c1[c1==h])
}
}
for (r in 1:q){
for (h in 1:(H-1)) v[h,r]<-rbeta(1,1+ns[h,r],alpha[r]+sum(ns[(h+1):H,r]))
v[H,r]=1
cumv<-cumprod(1-v[,r])
pi[1,r]<-v[1,r]
for (h in 2:H) pi[h,r]<-v[h,r]*cumv[h-1]
}
for (r in 1:q){
alpha[r]<-rgamma(1,a_alpha+H-1,b_alpha-sum(log(1-v[,r])[1:(H-1)]))
}
for (r in 1:q){
for (i in 1:I) tau[i,r]<-tau_star[c[i,r],r]
}
f_iter_exact<-lambdatT*exp(xcov[1:n1,]%*%beta+apply(zcov[1:n1,]*phicoef2[1:n1,],1,sum))*exp(-LambdatT*exp(xcov[1:n1,]%*%beta+apply(zcov[1:n1,]*phicoef2[1:n1,],1,sum)))
FL<-1-exp(-LambdatL*exp(xcov[(n1+1):N,]%*%beta+apply(zcov[(n1+1):N,]*phicoef2[(n1+1):N,],1,sum)))
FR<-1-exp(-LambdatR*exp(xcov[(n1+1):N,]%*%beta+apply(zcov[(n1+1):N,]*phicoef2[(n1+1):N,],1,sum)))
f_iter_IC<-(FR^(y[(n1+1):N]==0))*((FR-FL)^(y[(n1+1):N]==1))*((1-FL)^(y[(n1+1):N]==2))
finv_iter_exact<-1/f_iter_exact
finv_iter_IC<-1/f_iter_IC
loglike<-sum(log(f_iter_exact))+sum(log(FR^(y[(n1+1):N]==0))+log((FR-FL)^(y[(n1+1):N]==1))+log((1-FL)^(y[(n1+1):N]==2)))
dev<--2*loglike
parbeta[iter,]=beta
parbeta_original[iter,]=beta_original
pareta[iter]=eta
paralpha[iter,]=alpha
parphi[,,iter]=phicoef
pargam[iter,]=gamcoef
ttt=gamcoef%*%bisg
if (scale.designX==FALSE) {parsurv0[iter,]<-exp(-ttt)}
if (scale.designX==TRUE) {parsurv0[iter,]<-exp(-ttt*exp(-sum((beta*mean_X/sd_X)[scaled==1])))}
parlambdatT[iter,]=lambdatT
parLambdatT[iter,]=LambdatT
parLambdatL[iter,]=LambdatL
parLambdatR[iter,]=LambdatR
pardev[iter]=dev
parfinv_exact[iter,]=finv_iter_exact
parfinv_IC[iter,]=finv_iter_IC
if (is.null(x_user)){parsurv[iter,]=parsurv0[iter,]} else {
A<-matrix(x_user,byrow=TRUE,ncol=p)
if (scale.designX==TRUE){
for (r in 1:p){
if (scaled[r]==1) A[,r]<-(A[,r]-mean_X[r])/sd_X[r]}
}
B<-exp(A%*%beta)
for (g in 1:G){
parsurv[iter,((g-1)*kgrids+1):(g*kgrids)]=exp(-ttt*B[g,1])}
}
iter=iter+1
if (iter%%100==0) print(iter)
}
wbeta=as.matrix(parbeta_original[seq((burnin+thin),total,by=thin),],ncol=p)
wparsurv0=as.matrix(parsurv0[seq((burnin+thin),total,by=thin),],ncol=kgrids)
wparsurv=as.matrix(parsurv[seq((burnin+thin),total,by=thin),],ncol=kgrids*G)
coef<-apply(wbeta,2,mean)
coef_ssd<-apply(wbeta,2,sd)
coef_ci<-array(rep(0,p*2),dim=c(p,2))
S0_m<-apply(wparsurv0,2,mean)
S_m<- apply(wparsurv,2,mean)
colnames(coef_ci)<-c(paste(100*(1-conf.int)/2,"%CI"),paste(100*(0.5+conf.int/2),"%CI"))
for (r in 1:p) coef_ci[r,]<-quantile(wbeta[,r],c((1-conf.int)/2,0.5+conf.int/2))
CPO_exact=1/apply(parfinv_exact[seq((burnin+thin),total,by=thin),],2,mean)
CPO_IC=1/apply(parfinv_IC[seq((burnin+thin),total,by=thin),],2,mean)
NLLK_exact=-sum(log(CPO_exact))
NLLK_IC=-sum(log(CPO_IC))
NLLK=NLLK_exact+NLLK_IC
LambdatL_m<-apply(parLambdatL[seq((burnin+thin),total,by=thin),],2,mean)
LambdatR_m<-apply(parLambdatR[seq((burnin+thin),total,by=thin),],2,mean)
beta_m<-apply(parbeta[seq((burnin+thin),total,by=thin),],2,mean)
phicoef_m<-apply(parphi[,,seq((burnin+thin),total,by=thin)],c(1,2),mean)
phicoef2_m<-array(rep(0,N*q),dim=c(N,q))
for (j in 1:N){
phicoef2_m[j,]<-phicoef_m[area[j],]
}
FL_m<-1-exp(-LambdatL_m*exp(xcov[(n1+1):N,]%*%beta_m+apply(zcov[(n1+1):N,]*phicoef2_m[(n1+1):N,],1,sum)))
FR_m<-1-exp(-LambdatR_m*exp(xcov[(n1+1):N,]%*%beta_m+apply(zcov[(n1+1):N,]*phicoef2_m[(n1+1):N,],1,sum)))
loglike_m_IC<-sum(log(FR_m^(y[(n1+1):N]==0))+log((FR_m-FL_m)^(y[(n1+1):N]==1))+log((1-FL_m)^(y[(n1+1):N]==2)))
D_thetabar_IC<--2*loglike_m_IC
lambdatT_m<-apply(parlambdatT[seq((burnin+thin),total,by=thin),],2,mean)
LambdatT_m<-apply(parLambdatT[seq((burnin+thin),total,by=thin),],2,mean)
loglike_m_exact<-sum(log(lambdatT_m)+xcov[1:n1,]%*%beta_m+apply(zcov[1:n1,]*phicoef2[1:n1,],1,sum)-LambdatT_m*exp(xcov[1:n1,]%*%beta_m+apply(zcov[1:n1,]*phicoef2_m[1:n1,],1,sum)))
D_thetabar_exact<--2*loglike_m_exact
D_bar=mean(pardev[seq((burnin+thin),total,by=thin)])
D_thetabar=D_thetabar_IC+D_thetabar_exact
DIC=2*D_bar-D_thetabar
est<-list(
N=nrow(xcov),
nameX=colnames(xcov),
parbeta=parbeta_original,
parsurv0=parsurv0,
parsurv=parsurv,
paralpha=paralpha,
coef = coef,
coef_ssd = coef_ssd,
coef_ci = coef_ci,
S0_m = S0_m,
S_m = S_m,
grids=grids,
DIC = DIC,
NLLK = NLLK
)
est
} |
test_that_cli("make_progress_bar", {
withr::local_options(
cli.progress_bar_style = NULL,
cli.progress_bar_style_unicode = NULL,
cli.progress_bar_style_ascii = NULL
)
expect_snapshot(make_progress_bar(.5))
})
test_that_cli(configs = "fancy", "cli_progress_styles", {
withr::local_options(
cli.progress_bar_style_unicode = NULL,
cli.progress_bar_style_ascii = NULL
)
withr::local_options(cli.progress_bar_style = "classic")
expect_snapshot(make_progress_bar(.5))
withr::local_options(cli.progress_bar_style = "squares")
expect_snapshot(make_progress_bar(.5))
withr::local_options(cli.progress_bar_style = "dot")
expect_snapshot(make_progress_bar(.5))
withr::local_options(cli.progress_bar_style = "fillsquares")
expect_snapshot(make_progress_bar(.5))
withr::local_options(cli.progress_bar_style = "bar")
expect_snapshot(make_progress_bar(.5))
})
test_that_cli(configs = c("plain", "unicode"), "custom style", {
mybar <- list(complete = "X", incomplete = "O", current = ">")
withr::local_options(
cli.progress_bar_style = mybar,
cli.progress_bar_style_unicode = NULL,
cli.progress_bar_style_ascii = NULL
)
expect_snapshot(make_progress_bar(.5))
}) |
GTSplot <- function(tsdata, NEWtitle = "Result", Ylab = "Value", Xlab = "Time", Unit = NULL, ts_name = NULL, title_size = 10, COLO = NULL) {
TSP <- plot_ly(type = "scatter", mode = "lines")
for (i in 1:ncol(tsdata)) {
tsd <- tsdata[, i]
tsn <- ts_name[i]
Col <- COLO[i]
TSP <- add_trace(TSP, x = time(tsd), text = paste(time(tsd), Unit), type = "scatter", mode = "lines", opacity = 0.75, y = tsd,
name = tsn, line = list(color = c(Col)))
}
TSP <- TSP %>% layout(title = list(text = NEWtitle, font = list(family = "Times New Roman", size = title_size, color = "black")),
paper_bgcolor = "rgb(255,255,255)", plot_bgcolor = "rgb(229,229,229)", xaxis = list(title = Xlab, gridcolor = "rgb(255,255,255)",
showgrid = TRUE, showline = FALSE, showticklabels = TRUE, tickcolor = "rgb(127,127,127)", ticks = "outside", zeroline = FALSE),
yaxis = list(title = Ylab, gridcolor = "rgb(255,255,255)", showgrid = TRUE, showline = FALSE, showticklabels = TRUE, tickcolor = "rgb(127,127,127)",
ticks = "outside", zeroline = FALSE))
return(TSP)
}
TSplot_gen <- function(origin_t, ARIMAmodel, XREG = NULL, periods = NULL, NEWtitle = "Result", Ylab = "Value", Xlab = "Time", plot_labels = NULL,
ts_original = "original time series", ts_forecast = "forecasted time series", title_size = 10, ts_list = "empty", ts_labels = NULL,
ts_names = NULL, COLO = NULL) {
tsmodel <- forecast(ARIMAmodel, xreg = XREG, h = periods)
if (origin_t == "all") {
TIME = 1
} else {
TIME = (length(tsmodel$x) - origin_t + 1)
}
includetime <- c(tsmodel$x[TIME:length(tsmodel$x)], rep(NA, length(tsmodel$mean)))
includetime2 <- c(rep(NA, length((time(tsmodel$x)[TIME:length(tsmodel$x)]))), tsmodel$mean)
includetime3 <- c(rep(NA, length((time(tsmodel$x)[TIME:length(tsmodel$x)]))), tsmodel$lower[, 1])
includetime4 <- c(rep(NA, length((time(tsmodel$x)[TIME:length(tsmodel$x)]))), tsmodel$upper[, 1])
includetime5 <- c(rep(NA, length((time(tsmodel$x)[TIME:length(tsmodel$x)]))), tsmodel$lower[, 2])
includetime6 <- c(rep(NA, length((time(tsmodel$x)[TIME:length(tsmodel$x)]))), tsmodel$upper[, 2])
alltime <- c((time(tsmodel$x)[TIME:length(tsmodel$x)]), (time(tsmodel$mean)))
TSP <- plot_ly(type = "scatter", mode = "lines") %>% layout(title = list(text = NEWtitle, font = list(family = "Times New Roman",
size = title_size, color = "black")), paper_bgcolor = "rgb(255,255,255)", plot_bgcolor = "rgb(229,229,229)", xaxis = list(title = Xlab,
gridcolor = "rgb(255,255,255)", showgrid = TRUE, showline = FALSE, showticklabels = TRUE, tickcolor = "rgb(127,127,127)",
ticks = "outside", zeroline = FALSE), yaxis = list(title = Ylab, gridcolor = "rgb(255,255,255)", showgrid = TRUE, showline = FALSE,
showticklabels = TRUE, tickcolor = "rgb(127,127,127)", ticks = "outside", zeroline = FALSE)) %>% add_lines(x = alltime,
text = plot_labels, y = includetime, name = ts_original, line = list(color = "green")) %>% add_lines(x = alltime, text = plot_labels,
y = includetime5, name = "95% lower bound", line = list(color = "powderblue")) %>% add_trace(x = alltime, text = plot_labels,
y = includetime6, type = "scatter", mode = "lines", line = list(color = "powderblue"), fill = "tonexty", fillcolor = "powderblue",
name = "95% upper bound") %>% add_lines(x = alltime, text = plot_labels, y = includetime3, name = "80% lower bound", line = list(color = "lightpink")) %>%
add_trace(x = alltime, text = plot_labels, y = includetime4, type = "scatter", mode = "lines", line = list(color = "lightpink"),
fill = "tonexty", fillcolor = "lightpink", name = "80% upper bound") %>% add_lines(x = alltime, text = plot_labels,
y = includetime2, name = ts_forecast, line = list(color = "red"))
if (ts_list != "empty") {
for (i in 1:length(ts_list)) {
tsd <- ts_list[[i]]
tsl <- ts_labels[[i]]
tsn <- ts_names[i]
Color <- COLO[i]
TSP <- add_trace(TSP, x = time(tsd), text = tsl, type = "scatter", mode = "lines", y = tsd, name = tsn, line = list(Color))
}
}
return(TSP)
}
fmri_split_ab_bl <- function(vect, option = "vector") {
if (option == "list") {
overalllen <- length(which(vect != 0)) + 1
ab_len <- length(which(vect > 0)) + 1
bl_len <- length(which(vect < 0)) + 1
nulllist <- as.list(1:overalllen)
s <- seq_gradient_pal("
for (i in 1:bl_len) {
nulllist[[i]] <- c((i - 1)/overalllen, s[i])
}
s <- seq_gradient_pal("
for (i in 1:ab_len) {
nulllist[[bl_len - 1 + i]] <- c((bl_len - 1 + i - 1)/overalllen, s[i])
}
} else if (option == "vector") {
overalllen <- length(which(vect != 0)) + 1
ab_len <- length(which(vect > 0)) + 1
bl_len <- length(which(vect < 0)) + 1
nulllist <- rep(NA, overalllen)
s <- seq_gradient_pal("
for (i in 1:bl_len) {
nulllist[i] <- s[i]
}
s <- seq_gradient_pal("
for (i in 1:ab_len) {
nulllist[[bl_len - 1 + i]] <- s[i]
}
}
return(nulllist)
} |
library(igraph)
test_that("undirected graphs allow_self_loops = FALSE", {
set.seed(1)
n <- 1000
k <- 5
X <- matrix(rpois(n = n * k, 1), nrow = n)
S <- matrix(runif(n = k * k, 0, .1), nrow = k)
ufm <- undirected_factor_model(
X, S,
expected_density = 0.1
)
edgelist <- sample_edgelist(ufm, allow_self_loops = FALSE)
expect_false(any(edgelist$from == edgelist$to))
A <- sample_sparse(ufm, allow_self_loops = FALSE)
expect_false(any(diag(A) > 0))
igraph <- sample_igraph(ufm, allow_self_loops = FALSE)
expect_false(any(diag(get.adjacency(igraph)) > 0))
tbl_graph <- sample_tidygraph(ufm, allow_self_loops = FALSE)
expect_false(any(diag(get.adjacency(tbl_graph)) > 0))
})
test_that("directed graphs poisson_edges = FALSE", {
set.seed(2)
n2 <- 1000
k1 <- 5
k2 <- 3
d <- 500
X <- matrix(rpois(n = n2 * k1, 1), nrow = n2)
S <- matrix(runif(n = k1 * k2, 0, .1), nrow = k1, ncol = k2)
Y <- matrix(rexp(n = k2 * d, 1), nrow = d)
fm <- directed_factor_model(X, S, Y, expected_density = 0.01)
edgelist <- sample_edgelist(fm, allow_self_loops = FALSE)
expect_false(any(edgelist$from == edgelist$to))
A <- sample_sparse(fm, allow_self_loops = FALSE)
expect_false(any(diag(A) > 0))
igraph <- sample_igraph(fm, allow_self_loops = FALSE)
expect_false(any(diag(get.adjacency(igraph)) > 0))
tbl_graph <- sample_tidygraph(fm, allow_self_loops = FALSE)
expect_false(any(diag(get.adjacency(tbl_graph)) > 0))
}) |
setMethodS3("getPlatformDesignDB", "CrlmmModel", function(this, ..., verbose=FALSE) {
requireNamespace("oligoClasses") || throw("Package not loaded: oligoClasses")
db <- oligoClasses::db
verbose <- Arguments$getVerbose(verbose)
if (verbose) {
pushState(verbose)
on.exit(popState(verbose))
}
verbose && enter(verbose, "Getting Platform Design Database")
ds <- getDataSet(this)
cdf <- getCdf(ds)
chipType <- getChipType(cdf, fullname=FALSE)
verbose && cat(verbose, "Chip type: ", chipType)
pdPkgName <- .cleanPlatformName(chipType)
verbose && cat(verbose, "Plaform Design package: ", pdPkgName)
require(pdPkgName, character.only=TRUE) || throw("Package not loaded: ", pdPkgName)
pdDB <- db(get(pdPkgName, mode="S4"))
verbose && print(verbose, pdDB)
verbose && exit(verbose)
pdDB
}, private=TRUE)
setMethodS3("getCrlmmPriors", "CrlmmModel", function(this, ..., verbose=FALSE) {
verbose <- Arguments$getVerbose(verbose)
if (verbose) {
pushState(verbose)
on.exit(popState(verbose))
}
verbose && enter(verbose, "Getting CRLMM priors")
ds <- getDataSet(this)
cdf <- getCdf(ds)
chipType <- getChipType(cdf, fullname=FALSE)
verbose && cat(verbose, "Chip type: ", chipType)
res <- NULL
pkgName <- "oligoParams"
if (isPackageInstalled(pkgName) && require(pkgName, character.only=TRUE)) {
getCrlmmSnpNames <- NULL; rm(list="getCrlmmSnpNames")
verbose && enter(verbose, "Querying oligoParams")
tryCatch({
res <- getCrlmmSnpNames(chipType, tags="SNPs",
verbose=less(verbose, -20))
}, error=function(ex) {})
verbose && exit(verbose)
}
if (is.null(res)) {
verbose && enter(verbose, "Querying PD package")
pdPkgName <- .cleanPlatformName(chipType)
verbose && cat(verbose, "Platform Design (PD) package: ", pdPkgName)
path <- system.file(package=pdPkgName)
if (path == "") {
throw("Cannot load HapMap reference target quantiles. Package not installed: ", pdPkgName)
}
verbose && enter(verbose, "Loading CRLMM priors etc")
path <- file.path(path, "extdata")
path <- Arguments$getReadablePath(path)
filename <- sprintf("%sCrlmmInfo.rda", pdPkgName)
pathname <- Arguments$getReadablePathname(filename, path=path)
verbose && cat(verbose, "Pathname: ", pathname)
key <- sprintf("%sCrlmm", pdPkgName)
res <- loadToEnv(pathname)[[key]]
verbose && exit(verbose)
verbose && exit(verbose)
}
verbose && cat(verbose, "Loaded data:")
verbose && capture(verbose, ll(envir=res))
verbose && exit(verbose)
res
}, protected=TRUE)
setMethodS3("getCrlmmSNPs", "CrlmmModel", function(this, flavor=c("oligoPD", "oligoCDF"), ..., verbose=FALSE) {
requireNamespace("DBI") || throw("Package not loaded: DBI")
dbGetQuery <- DBI::dbGetQuery
flavor <- match.arg(flavor)
verbose <- Arguments$getVerbose(verbose)
if (verbose) {
pushState(verbose)
on.exit(popState(verbose))
}
verbose && enter(verbose, "Identifying SNP according to oligo::CRLMM")
verbose && cat(verbose, "Flavor: ", flavor)
if (flavor == "oligoCDF") {
verbose && enter(verbose, "Querying the CDF")
ds <- getDataSet(this)
cdf <- getCdf(ds)
units <- indexOf(cdf, pattern="^SNP")
verbose && exit(verbose)
} else if (flavor == "oligoPD") {
verbose && enter(verbose, "Querying")
res <- NULL
pkgName <- "oligoParams"
if (isPackageInstalled(pkgName) && require(pkgName, character.only=TRUE)) {
getCrlmmSnpNames <- NULL; rm(list="getCrlmmSnpNames")
ds <- getDataSet(this)
cdf <- getCdf(ds)
chipType <- getChipType(cdf, fullname=FALSE)
verbose && enter(verbose, "Querying oligoParams")
tryCatch({
res <- getCrlmmSnpNames(chipType, tags="SNPs",
verbose=less(verbose, -20))
}, error=function(ex) {})
verbose && exit(verbose)
}
if (is.null(res)) {
verbose && enter(verbose, "Querying the PD package")
pdDB <- getPlatformDesignDB(this, verbose=less(verbose,1))
verbose && print(verbose, pdDB)
res <- dbGetQuery(pdDB, "SELECT man_fsetid FROM featureSet WHERE man_fsetid LIKE 'SNP%' ORDER BY man_fsetid")[[1]]
verbose && str(verbose, res)
verbose && exit(verbose)
}
verbose && enter(verbose, "Mapping to CDF unit indices")
ds <- getDataSet(this)
cdf <- getCdf(this)
units <- indexOf(cdf, names=res)
names(units) <- res
verbose && exit(verbose)
verbose && exit(verbose)
}
verbose && str(verbose, units)
verbose && exit(verbose)
units
}, private=TRUE)
setMethodS3("getCrlmmSNPsOnChrX", "CrlmmModel", function(this, flavor=c("oligoPD", "oligoCDF"), ..., verbose=FALSE) {
requireNamespace("DBI") || throw("Package not loaded: DBI")
dbGetQuery <- DBI::dbGetQuery
flavor <- match.arg(flavor)
verbose <- Arguments$getVerbose(verbose)
if (verbose) {
pushState(verbose)
on.exit(popState(verbose))
}
verbose && enter(verbose, "Identifying all SNPs on ChrX")
if (flavor == "oligoCDF") {
verbose && enter(verbose, "Querying the CDF and UGP")
ds <- getDataSet(this)
cdf <- getCdf(ds)
gi <- getGenomeInformation(cdf)
units <- getUnitsOnChromosome(gi, 23)
unitNames <- getUnitNames(cdf, units=units)
units <- units[grep("^SNP", unitNames)]
unitNames <- NULL
verbose && exit(verbose)
} else if (flavor == "oligoPD") {
verbose && enter(verbose, "Querying")
res <- NULL
pkgName <- "oligoParams"
if (isPackageInstalled(pkgName) && require(pkgName, character.only=TRUE)) {
getCrlmmSnpNames <- NULL; rm(list="getCrlmmSnpNames")
ds <- getDataSet(this)
cdf <- getCdf(ds)
chipType <- getChipType(cdf, fullname=FALSE)
verbose && enter(verbose, "Querying oligoParams")
tryCatch({
res <- getCrlmmSnpNames(chipType, tags="SNPs,ChrX",
verbose=less(verbose, -20))
}, error=function(ex) {})
verbose && exit(verbose)
}
if (is.null(res)) {
verbose && enter(verbose, "Querying the PD package")
pdDB <- getPlatformDesignDB(this, verbose=less(verbose,1))
verbose && print(verbose, pdDB)
res <- dbGetQuery(pdDB, "SELECT man_fsetid FROM featureSet WHERE man_fsetid LIKE 'SNP%' AND chrom = 'X'")[[1]]
verbose && str(verbose, res)
}
verbose && enter(verbose, "Mapping to CDF unit indices")
ds <- getDataSet(this)
cdf <- getCdf(this)
units <- indexOf(cdf, names=res)
names(units) <- res
verbose && exit(verbose)
verbose && exit(verbose)
}
verbose && str(verbose, units)
verbose && exit(verbose)
units
}, private=TRUE)
setMethodS3("getCrlmmSplineParameters", "CrlmmModel", function(this, flavor=c("oligoPD"), ..., verbose=FALSE) {
flavor <- match.arg(flavor)
verbose <- Arguments$getVerbose(verbose)
if (verbose) {
pushState(verbose)
on.exit(popState(verbose))
}
verbose && enter(verbose, "Retrieving spline parameters")
verbose && cat(verbose, "Flavor: ", flavor)
ds <- getDataSet(this)
cdf <- getCdf(ds)
chipType <- getChipType(cdf, fullname=FALSE)
verbose && cat(verbose, "Chip type: ", chipType)
if (flavor == "oligoPD") {
verbose && enter(verbose, "Querying the PD package")
pdPkgName <- .cleanPlatformName(chipType)
verbose && cat(verbose, "Platform Design (PD) package: ", pdPkgName)
path <- system.file(package=pdPkgName)
if (path == "") {
throw("Cannot load spline parameters. Package not installed: ", pdPkgName)
}
path <- file.path(path, "extdata")
path <- Arguments$getReadablePath(path)
filename <- sprintf("%s.spline.params.rda", pdPkgName)
pathname <- Arguments$getReadablePathname(filename, path=path)
verbose && cat(verbose, "Pathname: ", pathname)
res <- loadToEnv(pathname)
verbose && exit(verbose)
}
verbose && cat(verbose, "Loaded data:")
verbose && print(verbose, ll(envir=res))
verbose && str(verbose, res)
verbose && exit(verbose)
res
}, private=TRUE) |
TMLjac21.W <-
function(d.Beta,d.sigma,rs0,delta,X,cl,cu) {
n <- length(rs0); zero <- 1e-6
D1 <- D2 <- D3 <- D <- rep(0,n); p <- ncol(X)
rsd <- (rs0-X%*%d.Beta)/d.sigma
Fo <- plweibul(rsd)
fo <- dlweibul(rsd)
ok <- (1-Fo) > zero
ai <- (pmax(rs0,cl)-X%*%d.Beta)/d.sigma
bi <- (cu - X%*%d.Beta)/d.sigma
foai <- dlweibul(ai)
fobi <- dlweibul(bi)
Foai <- plweibul(ai)
Fobi <- plweibul(bi)
fopai <- foai*(-ps0W(ai))
fopbi <- fobi*(-ps0W(bi))
D1 <- - delta*ww(rs0,cl,cu)*psp1W(rsd)
D2[ok] <- - ( (1-delta)*fo/(1-Fo)^2*(foai*ai-fobi*bi + Fobi - Foai) )[ok]
D3[ok] <- - ( (1-delta)/(1-Fo)*( fopai*ai - fopbi*bi ) )[ok]
D <- D1 + D2 + D3
Jac <- t(X)%*%(as.vector(D))/d.sigma/(n-p)
Jac} |
aov_pcaSpectra <- function(spectra, fac, type = "class", choice = NULL, showNames = TRUE) {
.chkArgs(mode = 11L)
types <- c("class", "rob")
check <- type %in% types
if (!check) {
stop("PCA option invalid")
}
if (length(fac) > 3) {
stop("Cannot process more than 3 factors!")
}
chkSpectra(spectra)
nf <- length(fac)
MC <- scale(spectra$data, scale = FALSE)
if (nf == 1) {
big <- list(DA = MC)
flist <- list(spectra[[fac[1]]])
}
if (nf == 2) {
big <- list(DA = MC, DB = MC, DAB = MC)
flist <- list(
fA = spectra[[fac[1]]], fB = spectra[[fac[2]]],
fAB = interaction(spectra[[fac[1]]], spectra[[fac[2]]])
)
}
if (nf == 3) {
big <- list(DA = MC, DB = MC, DC = MC, DAB = MC, DAC = MC, DBC = MC)
flist <- list(
fA = spectra[[fac[1]]], fB = spectra[[fac[2]]],
fC = spectra[[fac[3]]], fAB = interaction(spectra[[fac[1]]], spectra[[fac[2]]]),
fAC = interaction(spectra[[fac[1]]], spectra[[fac[3]]]),
fBC = interaction(spectra[[fac[2]]], spectra[[fac[3]]])
)
}
lvlcnt <- 0L
for (i in 1:length(flist)) lvlcnt <- lvlcnt + length(levels(flist[[i]]))
if (lvlcnt >= length(spectra$names)) {
msg <- paste("There are too many levels (", lvlcnt, ") in argument fac for the number of samples.", sep = "")
stop(msg)
}
if (nf == 1) {
warning("aov_pcaSpectra is the same as ordinary PCA for 1 factor")
}
if (nf == 1) {
big[[1]] <- .avgFacLvls(matrix = MC, flist[[1]])
DAR <- MC - big[[1]]
LM <- list(DA = big[[1]], DPE = DAR, MC = MC)
names(LM) <- c(fac[1], "Res.Error", "MC Data")
}
if (nf == 2) {
big[[1]] <- .avgFacLvls(matrix = MC, flist[[1]])
DAR <- MC - big[[1]]
big[[2]] <- .avgFacLvls(matrix = DAR, flist[[2]])
DBR <- DAR - big[[2]]
big[[3]] <- .avgFacLvls(matrix = DBR, flist[[3]])
DABR <- DBR - big [[3]]
LM <- list(DA = big[[1]], DB = big[[2]], DAB = big[[3]], DPE = DABR, MC = MC)
names(LM) <- c(fac[1], fac[2], paste(fac[1], "x", fac[2], sep = " "), "Res.Error", "MC Data")
}
if (nf == 3) {
big[[1]] <- .avgFacLvls(matrix = MC, flist[[1]])
DAR <- MC - big[[1]]
big[[2]] <- .avgFacLvls(matrix = DAR, flist[[2]])
DBR <- DAR - big[[2]]
big[[3]] <- .avgFacLvls(matrix = DBR, flist[[3]])
DCR <- DBR - big[[3]]
big[[4]] <- .avgFacLvls(matrix = DCR, flist[[4]])
DABR <- DCR - big[[4]]
big[[5]] <- .avgFacLvls(matrix = DABR, flist[[5]])
DACR <- DABR - big[[5]]
big[[6]] <- .avgFacLvls(matrix = DACR, flist[[6]])
DBCR <- DACR - big[[6]]
LM <- list(
DA = big[[1]], DB = big[[2]], DC = big[[3]], DAB = big[[4]],
DAC = big[[5]], DBC = big[[6]], DPE = DBCR, MC = MC
)
names(LM) <- c(
fac[1], fac[2], fac[3], paste(fac[1], "x", fac[2], sep = " "),
paste(fac[1], "x", fac[3], sep = " "), paste(fac[2], "x", fac[3], sep = " "),
"Res.Error", "MC Data"
)
}
if (showNames) cat("The submatrices are:", paste(names(LM), collapse = ", "), "\n")
n_pca <- length(LM) - 2
PCA <- vector("list", n_pca)
names(PCA) <- names(LM)[1:n_pca]
for (i in 1:n_pca) {
spectra$data <- LM[[i]] + LM$Res.Error
if (is.null(choice)) choice <- "noscale"
if (type == "class") PCA[[i]] <- c_pcaSpectra(spectra, choice = choice, cent = FALSE)
if (type == "rob") PCA[[i]] <- r_pcaSpectra(spectra, choice = choice)
}
return(PCA)
} |
tune <- function (dat,controlPts,extrapolate=F,genplot=T,check=T,verbose=T)
{
if(verbose) cat("\n----- TUNING STRATIGRAPHIC SERIES -----\n")
dat=data.frame(dat)
controlPts=data.frame(controlPts)
npts <- length(dat[,1])
if(verbose) cat(" * Number of data points=", npts,"\n")
ictrl <- length(controlPts[,1])
if(verbose) cat(" * Number of time control points=", ictrl,"\n")
if(check)
{
if(length(data.frame(controlPts)) == 1)
{
cat("\n**** ERROR: controlPts must have at least two tuning control points defined.\n")
stop("**** TERMINATING NOW!")
}
if(verbose) cat(" * Sorting datasets into ensure increasing order, removing empty entries\n")
dat <- dat[order(dat[,1],na.last=NA,decreasing=F),]
controlPts <- controlPts[order(controlPts[,1],na.last=NA,decreasing=F),]
dx1=dat[2:npts,1]-dat[1:(npts-1),1]
if(min(dx1) == 0) cat("\n**** WARNING: duplicate depth/height datum found in dat\n")
dx2=controlPts[2:ictrl,1]-controlPts[1:(ictrl-1),1]
if(min(dx2) == 0)
{
cat("\n**** ERROR: duplicate depth/height datum found in controlPts\n")
stop("**** TERMINATING NOW!")
}
}
tuneit <- function (npts,x,ictrl,ctrl,t)
{
F_dat = .Fortran( 'tune_r',
npts=as.integer(npts),x=as.double(x),ictrl=as.integer(ictrl),
ctrl=as.double(ctrl),t=as.double(t),
tuned=double(npts)
)
return(F_dat)
}
tuneout <- tuneit(npts,dat[,1],ictrl,controlPts[,1],controlPts[,2])
out <- data.frame (cbind (tuneout$tuned,dat[,2]) )
dtDir=controlPts[1,2]-controlPts[ictrl,2]
if(!extrapolate && dtDir <0) out = subset(out, (out[1] >= controlPts[1,2]) & (out[1] <= controlPts[ictrl,2]) )
if(!extrapolate && dtDir >0) out = subset(out, (out[1] <= controlPts[1,2]) & (out[1] >= controlPts[ictrl,2]) )
ipts=length(out[,1])
if(verbose)
{
t1<-out[1:(ipts-1),1]
t2<-out[2:ipts,1]
dt=t2-t1
dtMin=min(dt)
dtMax=max(dt)
dtMean=mean(dt)
dtMedian=median(dt)
cat("\n * Mean sampling interval=", dtMean,"\n")
cat(" * Median sampling interval=",dtMedian,"\n")
cat(" * Maximum sampling interval=",dtMax,"\n")
cat(" * Minimum sampling interval=", dtMin,"\n")
}
if(genplot)
{
par(mfrow=c(2,1))
plot(dat, xlab="Location",ylab="Value",main="Data Series",bty="n",lwd=2,cex.axis=1.3,cex.lab=1.3,cex.main=1.4,cex=0.5)
lines(dat)
plot(out, xlab="Tuned",ylab="Value",main="Tuned Data Series",bty="n",lwd=2,cex.axis=1.3,cex.lab=1.3,cex.main=1.4,cex=0.5)
lines(out)
}
return(out)
} |
context("ppl - pipeline_stacking")
test_that("Stacking Pipeline", {
base_learners = list(
lrn("classif.rpart", predict_type = "prob", id = "base.rpart")
)
super_learner = lrn("classif.rpart", id = "super.rpart")
graph_stack = pipeline_stacking(base_learners, super_learner)
expect_graph(graph_stack)
expect_names(graph_stack$ids(), identical.to = c("base.rpart", "nop", "featureunion", "super.rpart"))
graph_learner = as_learner(graph_stack)
graph_learner$train(tsk("iris"))
expect_class(graph_learner$model$super.rpart$model, "rpart")
expect_class(graph_learner$model$base.rpart$model, "rpart")
graph_stack = pipeline_stacking(base_learners, super_learner, use_features = FALSE)
expect_graph(graph_stack)
expect_names(graph_stack$ids(), identical.to = c("base.rpart", "featureunion", "super.rpart"))
graph_learner = as_learner(graph_stack)
graph_learner$train(tsk("iris"))
expect_class(graph_learner$model$super.rpart$model, "rpart")
expect_class(graph_learner$model$base.rpart$model, "rpart")
graph_stack = pipeline_stacking(base_learners, super_learner, folds = 5)
expect_graph(graph_stack)
expect_names(graph_stack$ids(), identical.to = c("base.rpart", "nop", "featureunion", "super.rpart"))
graph_learner = as_learner(graph_stack)
graph_learner$train(tsk("iris"))
expect_equal(graph_learner$graph$pipeops$base.rpart$param_set$values$resampling.folds, 5)
expect_class(graph_learner$model$super.rpart$model, "rpart")
expect_class(graph_learner$model$base.rpart$model, "rpart")
graph_stack = pipeline_stacking(base_learners, super_learner, method = "insample")
expect_graph(graph_stack)
expect_names(graph_stack$ids(), identical.to = c("base.rpart", "nop", "featureunion", "super.rpart"))
graph_learner = as_learner(graph_stack)
graph_learner$train(tsk("iris"))
expect_equal(graph_learner$graph$pipeops$base.rpart$param_set$values$resampling.method, "insample")
expect_class(graph_learner$model$super.rpart$model, "rpart")
expect_class(graph_learner$model$base.rpart$model, "rpart")
}) |
imageplot.bma <-
function (bma.out, color = c("red", "blue", "
"probne0", "mds"), ...)
{
clr <- color
if (length(color) == 1) {
if (color == "default")
clr <- c("
if (color == "blackandwhite")
clr <- c("black", "black", "white")
}
keep.mar <- par(mar = c(5, 6, 4, 2) + 0.1)
nmodel <- nrow(bma.out$which)
which <- bma.out$which
probne0 <- bma.out$probne0
if (class(bma.out) == "bic.surv")
mle <- bma.out$mle
else mle <- bma.out$mle[, -1, drop = FALSE]
nvar <- ncol(mle)
rownms <- bma.out$namesx
if (ifelse(!is.null(bma.out$factor.type), bma.out$factor.type,
FALSE)) {
which <- matrix(NA, ncol = nvar, nrow = nmodel)
probne0 <- rep(NA, times = nvar)
rownms <- rep(NA, times = nvar)
assign <- bma.out$assign
offset <- 1
if (class(bma.out) == "bic.surv")
offset <- 0
assign[[1]] <- NULL
for (i in 1:length(assign)) {
probne0[assign[[i]] - offset] <- bma.out$probne0[i]
which[, assign[[i]] - offset] <- bma.out$which[,
i]
nm <- names(bma.out$output.names)[i]
if (!is.na(bma.out$output.names[[i]][1]))
nm <- paste(nm, bma.out$output.names[[i]][-1],
sep = ".")
rownms[assign[[i]] - offset] <- nm
}
}
ordr.type <- match.arg(order)
if (ordr.type == "probne0")
ordr <- order(-probne0)
else if (ordr.type == "mds") {
postprob.rep <- matrix(bma.out$postprob, ncol = nvar,
nrow = nmodel)
k11 <- t(which + 0) %*% ((which + 0) * postprob.rep)
k00 <- t(1 - which) %*% ((1 - which) * postprob.rep)
k01 <- t(which + 0) %*% ((1 - which) * postprob.rep)
k10 <- t(1 - which) %*% ((0 + which) * postprob.rep)
ktau <- 4 * (k00 * k11 - k01 * k10)
dissm <- 1 - abs(ktau)
diag(dissm) <- 0
ordr <- order(as.vector(cmdscale(dissm, k = 1)))
}
else ordr <- 1:nvar
ordr <- rev(ordr)
postprob <- bma.out$postprob
which <- which[, ordr, drop = FALSE]
mle <- mle[, ordr, drop = FALSE]
rownms <- rownms[ordr]
color.matrix <- (which) * (2 - (mle > 0)) + 3 * (!which)
par(las = 1)
image(c(0, cumsum(postprob)), 1:nvar, color.matrix, col = clr,
xlab = "Model
xlim = c(0, 1), main = "Models selected by BMA", ...)
xat <- (cumsum(postprob) + c(0, cumsum(postprob[-nmodel])))/2
axis(1, at = xat, labels = 1:nmodel, ...)
axis(2, at = 1:nvar, labels = rownms, ...)
par(mar = keep.mar)
} |
randomUniformForest <- function(...) UseMethod("randomUniformForest")
importance <- function(object, ...) UseMethod("importance")
unsupervised <- function(object,...) UseMethod("unsupervised")
randomUniformForest.formula <- function(formula, data = NULL, subset = NULL, ...)
{
if (is.null(data))stop ("Please provide data.\n")
if (!is.null(subset)) data = data[subset,]
data <- fillVariablesNames(data)
mf <- model.frame(formula = formula, data = as.data.frame(data))
x <- model.matrix(attr(mf, "terms"), data = mf)[,-1]
y <- model.response(mf)
names(y) = NULL
RUFObject <- randomUniformForest.default(x, Y = y, ...)
RUFObject$call <- match.call()
RUFObject$formula <- formula
class(RUFObject) <- c("randomUniformForest.formula", "randomUniformForest")
RUFObject
}
print.randomUniformForest <- function(x,...)
{
object <- x
cat("Call:\n")
print(object$call)
cat("\n")
cat("Type of random uniform forest: ")
if (!is.null(object$unsupervised))
{ cat("Unsupervised learning\n") }
else
{
if (object$forest$regression) { cat("Regression\n") }
else { cat("Classification\n") }
}
cat("\n")
print(object$forestParams)
cat("\n")
if (!is.null(object$forest$OOB))
{
cat("Out-of-bag (OOB) evaluation")
if (!object$forest$regression)
{
if (!is.numeric(object$forest$pred.error))
{ cat("\n", object$forest$pred.error, "\n", "Options used seem have to be reconsidered.","\n") }
else
{
OOBErrorRate = mean(100*object$forest$pred.error)
cat("\nOOB estimate of error rate: ", round(OOBErrorRate, 2),"%\n", sep ="")
cat("OOB error rate bound (with 1% deviation): ",round(OOBErrorRate + OOBErrorRate*(1 - estimatePredictionAccuracy(floor(length(object$forest$OOB.predicts)*0.368))), 2),"%\n", sep = "")
cat("\nOOB confusion matrix:\n")
colnames(object$forest$OOB)[1:length(object$classes)] = object$classes
rownames(object$forest$OOB)[1:length(object$classes)] = object$classes
print(round(object$forest$OOB,4))
if ((length(object$classes) == 2) & (rownames(object$forestParams)[1] != "reduceDimension"))
{
cat("\nOOB estimate of AUC: ", round(pROC::auc(as.numeric(object$y), as.numeric(object$forest$OOB.predicts))[[1]], 4), sep = "")
cat("\nOOB estimate of AUPR: ", round(myAUC(as.numeric(object$forest$OOB.predicts),
as.numeric(object$y), falseDiscoveryRate = TRUE)$auc, 4),sep = "")
cat("\nOOB estimate of F1-score: ", round(fScore(object$forest$OOB), 4),sep = "")
}
cat("\nOOB (adjusted) estimate of geometric mean:", round(gMean(object$forest$OOB),4),"\n")
if (nrow(object$forest$OOB) > 2)
{ cat("OOB (adjusted) estimate of geometric mean for precision:", round(gMean(object$forest$OOB, precision = TRUE),4),"\n") }
if (!is.null(object$forest$OOB.strengthCorr))
{
cat("\nBreiman's bounds")
cat("\nExpected prediction error (under approximatively balanced classes): ", round(mean(100*rmNA(object$forest$OOB.strengthCorr$PE)),2),"%\n", sep ="")
cat("Upper bound: ", round(mean(100*rmNA(object$forest$OOB.strengthCorr$std.strength^2/object$forest$OOB.strengthCorr$strength^2)),2),"%\n", sep ="")
cat("Average correlation between trees:", round(mean(round(rmNA(object$forest$OOB.strengthCorr$avg.corr),4)),4),"\n")
cat("Strength (margin):", round(mean(round(rmNA(object$forest$OOB.strengthCorr$strength),4)),4),"\n")
cat("Standard deviation of strength:", round(mean(round(rmNA(object$forest$OOB.strengthCorr$std.strength),4)),4),"\n")
}
}
}
else
{
cat("\nMean of squared residuals:", round(mean(object$forest$pred.error),8), "\n")
cat("Mean squared error bound (experimental):", round(object$forest$pred.error + object$forest$pred.error*(1- estimatePredictionAccuracy(floor(length(object$forest$OOB.predicts)*
(1 - as.numeric(as.vector(object$forestParams["subsamplerate",1])))))), 6),"\n")
if (length(object$forest$percent.varExplained) > 1)
{
varExplained = object$forest$percent.varExplained
for (i in 1:length(varExplained))
{ varExplained[i] = paste(object$forest$percent.varExplained[i], "%", sep="") }
cat("Variance explained:", varExplained, "\n")
}
else { cat("Variance explained: ", object$forest$percent.varExplained, "%\n", sep = "") }
cat("\nOOB residuals:\n")
Residuals = summary(rmNA(object$forest$OOB.predicts - object$y))
names(Residuals) = c("Min", "1Q", "Median", "Mean", "3Q", "Max")
print(Residuals)
cat("Mean of absolute residuals:", sum(abs(rmNA(object$forest$OOB.predicts - object$y)))/length(rmNA(object$y)),"\n")
if (!is.null(object$forest$OOB.strengthCorr))
{
cat("\nBreiman's bounds")
cat("\nTheoretical prediction error:", round(rmNA(object$forest$OOB.strengthCorr$PE.forest),6) ,"\n")
cat("Upper bound:", round(rmNA(object$forest$OOB.strengthCorr$PE.max),6),"\n")
cat("Mean prediction error of a tree:", round(rmNA(object$forest$OOB.strengthCorr$PE.tree),6),"\n")
cat("Average correlation between trees residuals:", round(rmNA(object$forest$OOB.strengthCorr$mean.corr),4),"\n")
residuals_hat = vector(length = ncol(object$forest$OOB.votes))
residuals_hat <- apply(object$forest$OOB.votes, 2, function(Z) mean(rmInf(Z) - mean(rmNA(object$forest$OOB.predicts))))
cat("Expected squared bias (experimental):", round(mean(rmNA(residuals_hat))^2,6),"\n")
}
}
}
if (!is.null(object$errorObject))
{
cat("\nTest set")
if (!object$forest$regression)
{
cat("\nError rate: ")
cat(round(100*object$errorObject$error,2), "%\n", sep="")
cat("\nConfusion matrix:\n")
print(round(object$errorObject$confusion,4))
if (!is.null(object$errorObject$AUC))
{
cat("\nArea Under ROC Curve:", round(object$errorObject$AUC,4))
cat("\nArea Under Precision-Recall Curve:", round(object$errorObject$AUPR,4))
cat("\nF1 score:", round(fScore(object$errorObject$confusion),4))
}
cat("\nGeometric mean:", round(gMean(object$errorObject$confusion),4),"\n")
if (nrow(object$errorObject$confusion) > 2)
{ cat("Geometric mean of the precision:", round(gMean(object$errorObject$confusion, precision = TRUE),4),"\n") }
}
else
{
cat("\nMean of squared residuals: ", round(object$errorObject$error, 6), "\n", sep="")
cat("Variance explained: ", object$errorObject$percent.varExplained, "%\n\n", sep = "")
Residuals <- object$errorObject$Residuals
names(Residuals) = c("Min", "1Q", "Median", "Mean", "3Q", "Max")
cat("Residuals:\n")
print(Residuals)
cat("Mean of absolute residuals: ", round(rmNA(object$errorObject$meanAbsResiduals), 6), "\n", sep="")
}
}
}
summary.randomUniformForest <- function(object, maxVar = 30, border = NA,...)
{
object <- filter.object(object)
if (!is.null(object$forest$variableImportance))
{
par(las=1)
maxChar = floor(2 + max(nchar(object$variablesNames))/2)
par(mar=c(5, maxChar + 1,4,2))
varImportance1 = varImportance = object$forest$variableImportance
if (!object$forest$regression)
{
varImportance[,"class"] = object$classes[as.numeric(varImportance[,"class"])]
varImportance1[,"class"] = varImportance[,"class"]
}
nVar = nrow(varImportance)
if (nVar > maxVar) { varImportance = varImportance[1:maxVar,] }
barplot( varImportance[nrow(varImportance):1,"percent.importance"], horiz = TRUE,
col = sort(heat.colors(nrow(varImportance)), decreasing = TRUE),
names.arg = varImportance[nrow(varImportance):1,"variables"], border = border,
xlab = "Relative Influence (%)",
main = if (!object$forest$regression)
{ "Variable Importance based on Information Gain" }
else
{
if ( length(grep(as.character(object$forestParams[nrow(object$forestParams),1]), "absolute")) == 1 )
{ "Variable Importance based on L1 distance" }
else
{ "Variable Importance based on L2 distance" }
})
abline(v = 100/nVar, col ='grey')
cat("\nGlobal Variable importance:\n")
if (!object$forest$regression)
{
cat("Note: most predictive features are ordered by 'score' and plotted. Most discriminant ones\nshould also be taken into account by looking 'class' and 'class.frequency'.\n\n")
}
print(varImportance1)
cat("\n")
cat("Average tree size (number of nodes) summary: ", "\n")
nodesView = unlist(lapply(object$forest$object,nrow))
print(floor(summary(nodesView)))
cat("\n")
cat("Average Leaf nodes (number of terminal nodes) summary: ", "\n")
terminalNodesView = unlist(lapply(object$forest$object, function(Z) length(which(Z[,"status"] == -1))))
print(floor(summary(terminalNodesView)))
cat("\n")
cat("Leaf nodes size (number of observations per leaf node) summary: ", "\n")
print(summary(unlist(lapply(object$forest$object, function(Z) Z[which(Z[,"prediction"] != 0), "nodes"]) )))
cat("\n")
cat("Average tree depth :", round(log(mean(nodesView))/log(2),0), "\n")
cat("\n")
cat("Theoretical (balanced) tree depth :", round(log(length(object$y), base = 2),0), "\n")
cat("\n")
}
else
{
cat("Average tree size (number of nodes) summary: ", "\n")
nodesView = unlist(lapply(object$forest$object,nrow))
print(floor(summary(nodesView)))
cat("\n")
cat("Average Leaf nodes (number of terminal nodes) summary: ", "\n")
terminalNodesView = unlist(lapply(object$forest$object, function(Z) length(which(Z[,"status"] == -1))))
print(floor(summary(terminalNodesView)))
cat("\n")
cat("Leaf nodes size (number of observations per leaf node) summary: ", "\n")
print(summary(unlist(lapply(object$forest$object, function(Z) Z[which(Z[,"prediction"] != 0), "nodes"]) )))
cat("\n")
cat("Average tree depth :", round(log(mean(nodesView), base = 2),0), "\n")
cat("\n")
cat("Theoretical (balanced) tree depth :", round(log(length(object$y), base = 2),0), "\n")
cat("\n")
}
}
plot.randomUniformForest <- function(x, threads = "auto", ...)
{
object <- x
if (!is.null(object$forest$OOB.votes))
{
Ytrain = object$y
OOBMonitoring = object$forest$OOB.votes
ff = L2Dist
ffComment = "OOB mean squared error"
if ( length(grep(as.character(object$forestParams[nrow(object$forestParams),1]), "absolute")) == 1 )
{
ff = L1Dist
ffComment = "OOB mean absolute error"
}
ZZ <- monitorOOBError(OOBMonitoring, Ytrain, regression = object$forest$regression, threads = threads, f = ff)
if (object$forest$regression)
{ plot(ZZ, type = 'l', lty=2, xlab = "Trees", ylab = ffComment, ...) }
else
{
plot(apply(ZZ[,1:3],1, min), type='l', lty=2, col = "green", xlab = "Trees", ylab ="OOB error", ...)
points(apply(ZZ[,1:3],1, mean), type='l', lty=3)
points(apply(ZZ[,1:3],1, max), type='l', lty=3, col='red')
}
grid()
}
else
{ print("no OOB data to plot") }
}
getTree.randomUniformForest <- function(object, whichTree, labelVar = TRUE)
{
if (labelVar)
{
Tree = data.frame(object$forest$object[[whichTree]])
idx = which(Tree[, "split.var"] != 0)
Tree[idx, "split.var"] = object$variablesNames[Tree[idx, "split.var"]]
return(Tree)
}
else
{ return(object$forest$object[[whichTree]]) }
}
predict.randomUniformForest <- function(object, X,
type = c("response", "prob", "votes", "confInt", "ranking", "quantile", "truemajority", "all"),
classcutoff = c(0,0),
conf = 0.95,
whichQuantile = NULL,
rankingIDs = NULL,
threads = "auto",
parallelpackage = "doParallel", ...) rUniformForestPredict(object, X, type = type, classcutoff = classcutoff,
conf = conf,
whichQuantile = whichQuantile,
rankingIDs = rankingIDs,
threads = threads,
parallelpackage = parallelpackage, ...)
residualsRandomUniformForest <- function(object, Y = NULL)
{
object = filter.object(object)
if (is.null(Y))
{
if (is.null(object$forest$OOB.predicts))
{ stop("please enable OOB option when computing a random uniform forest") }
else
{
print("OOB residuals:")
cat("\n")
return(object$y - object$forest$OOB.predicts)
}
}
else
{
if (is.numeric(object))
{ return(object - Y) }
else
{
if (!is.null(object$predictionObject$majority.vote))
{ return(object$predictionObject$majority.vote - Y) }
else
{ stop("Please provide model responses to compute residuals") }
}
}
}
genericOutput <- function(xtest, ytest, paramsObject, RUF.model, ytrain = NULL, classcutoff = c(0,0))
{
classes = NULL
if (!is.null(ytest))
{
if (is.factor(ytest)) { YNames = classes = levels(ytest) }
}
else
{
if (!is.null(ytrain))
{
if (!RUF.model$regression)
{
ytrain = as.factor(ytrain)
YNames = classes = levels(ytrain)
}
}
else
{
if (!RUF.model$regression) { YNames = classes = sort(unique(RUF.model$object[[2]][,6])[-1]) }
}
}
if (!is.null(RUF.model$OOB))
{
if (!RUF.model$regression & is.factor(ytest) & !is.character(RUF.model$OOB))
{ row.names(RUF.model$OOB) = colnames(RUF.model$OOB)[-(length(YNames)+1)] = YNames }
}
if (!is.null(xtest))
{
classwtString = as.vector(paramsObject[which(row.names(paramsObject) == "classwt"),1])
classwt = FALSE
if (is.na(as.logical(classwtString))) { classwt = TRUE }
if (!RUF.model$regression & (as.numeric(classcutoff[2]) != 0))
{
classcutoff = c(which(classes == as.character(classcutoff[1])), as.numeric( classcutoff[2]))
}
if (classcutoff[2] != 0 ) { classcutoff[2] = 0.5/classcutoff[2] }
RUF.predicts <- randomUniformForestCore.predict(RUF.model, xtest, pr.classwt = classwt,
pr.imbalance = classcutoff)
if (!is.null(ytest))
{
majorityVote = RUF.predicts$majority.vote
if (!RUF.model$regression)
{
majorityVote = as.factor(majorityVote)
levels(majorityVote) = classes[as.numeric(levels(majorityVote))]
}
errorObject <- someErrorType(majorityVote, ytest, regression = RUF.model$regression)
if (!RUF.model$regression & is.factor(ytest))
{ row.names(errorObject$confusion) = colnames(errorObject$confusion)[-(length(YNames)+1)] = YNames }
RUFObject = list(forest = RUF.model, predictionObject = RUF.predicts, errorObject = errorObject, forestParams = paramsObject, classes = classes)
}
else
{
RUFObject = list(forest = RUF.model, predictionObject = RUF.predicts, forestParams = paramsObject,
classes = classes)
}
}
else
{ RUFObject = list(forest = RUF.model, forestParams = paramsObject, classes = classes) }
RUFObject
}
randomUniformForest.default <- function(X, Y = NULL, xtest = NULL, ytest = NULL, ntree = 100,
mtry = ifelse(bagging,ncol(X),floor(4/3*ncol(X))),
nodesize = 1,
maxnodes = Inf,
depth = Inf,
depthcontrol = NULL,
regression = ifelse(is.factor(Y), FALSE, TRUE),
replace = ifelse(regression,FALSE,TRUE),
OOB = TRUE,
BreimanBounds = ifelse(OOB, TRUE, FALSE),
subsamplerate = ifelse(regression,0.7,1),
importance = TRUE,
bagging = FALSE,
unsupervised = FALSE,
unsupervisedMethod = c("uniform univariate sampling", "uniform multivariate sampling", "with bootstrap"),
classwt = NULL,
oversampling = 0,
targetclass = -1,
outputperturbationsampling = FALSE,
rebalancedsampling = FALSE,
featureselectionrule = c("entropy", "gini", "random", "L2", "L1"),
randomcombination = 0,
randomfeature = FALSE,
categoricalvariablesidx = NULL,
na.action = c("fastImpute", "accurateImpute", "omit"),
logX = FALSE,
classcutoff = c(0,0),
subset = NULL,
usesubtrees = FALSE,
threads = "auto",
parallelpackage = "doParallel",
...)
{
{
if (threads != 1)
{
if (sample(9,1) == 9) { rm.tempdir() }
}
if (!is.null(subset))
{
X = X[subset,]
Y = Y[subset]
}
if (exists("categorical", inherits = FALSE)) { categoricalvariablesidx = categorical }
else { categorical = NULL }
if (is.null(Y) | (unsupervised == TRUE))
{
cat("Enter in unsupervised learning.\n")
if (unsupervisedMethod[1] == "with bootstrap")
{
cat("'with bootstrap' is only needed as the second argument of 'unsupervisedMethod' option. Default option will be computed.\n")
unsupervisedMethod[1] = "uniform univariate sampling"
}
XY <- unsupervised2supervised(X, method = unsupervisedMethod[1], bootstrap = if (length(unsupervisedMethod) > 1) { TRUE } else {FALSE })
X = XY$X
Y = as.factor(XY$Y)
unsupervised = TRUE
}
if (ntree < 2) { stop("Please use at least 2 trees for computing forest.\n") }
if ( (subsamplerate == 1) & (replace == FALSE)) { OOB = FALSE }
if (subsamplerate > 1) { replace = TRUE }
if (depth < 3) { stop("Stumps are not allowed. Minimal depth is 3, leading, at most, to 8 leaf nodes.\n") }
if (!is.null(classwt) & (!is.factor(Y) | regression))
{
cat("Class reweighing is not allowed for regression. Resetting to default values.\n")
classwt = NULL
}
if (targetclass == 0) { stop("'targetclass' must take a strictly positive value") }
if ( (BreimanBounds & (length(unique(Y)) > 2) & (ntree > 500)) | (BreimanBounds & (ntree > 500)) )
{ cat("Note: Breiman's bounds (especially for multi-class problems) are computationally intensive.\n") }
if (maxnodes < 6) { stop("Maximal number of nodes must be above 5.\n") }
X <- fillVariablesNames(X)
getFactors <- which.is.factor(X, count = TRUE)
if ( (sum(getFactors) > 0) & is.null(categoricalvariablesidx))
{
cat("Note: categorical variables are found in data. Please use option categoricalvariablesidx = 'all' to match them more closely.\n")
}
if (is.data.frame(X))
{ cat("X is a data frame. String or factors have been converted to numeric values.\n") }
X <- NAfactor2matrix(X, toGrep = "anythingParticular")
if (!is.null(categoricalvariablesidx))
{
if (categoricalvariablesidx[1] == "all")
{
factorVariables <- which(getFactors > 0)
if (length(factorVariables) > 0) { categoricalvariablesidx = factorVariables }
else { cat("\nNo categorical variables found. Please type them manually\n") }
}
else
{
if (is.character(categoricalvariablesidx[1]))
{ categoricalvariablesidx = sort(match(categoricalvariablesidx, colnames(X))) }
}
}
if ( length(which( (X == Inf) | (X == -Inf) ) ) > 0)
{ stop("Inf or -Inf values found in data. Learning can not be done.\nRemove or replace them with NA in order to learn.\n") }
XY <- NATreatment(X, Y, na.action = na.action, regression = regression)
X <- XY$X
Y <- XY$Y
rm(XY)
if (randomcombination[1] > 0)
{
randomcombinationString = randomcombination[1]
L.combination = length(randomcombination)
if (L.combination%%3 != 0)
{ weights = round(replicate(length(randomcombination)/2, sample(c(-1,1),1)*runif(1)), 2) }
else
{
weights = round(randomcombination[(L.combination + 1 - L.combination/3):L.combination],2)
randomcombination = randomcombination[1:(L.combination - (L.combination/3))]
}
for (i in 2:length(randomcombination)) { randomcombinationString = paste(randomcombinationString, randomcombination[i], sep=",") }
for (i in 1:length(weights)) { randomcombinationString = paste(randomcombinationString, weights[i], sep=",") }
if (!is.null(xtest)) { xtest <- randomCombination(NAfactor2matrix(xtest, toGrep = "anythingParticular"), combination = randomcombination, weights = weights) }
randomcombinationObject = list(randomcombination, weights)
X <- randomCombination(X, combination = randomcombinationObject[[1]], weights = randomcombinationObject[[2]])
}
else
{ randomcombinationObject = randomcombination }
}
RUF.model <- randomUniformForestCore(X, trainLabels = Y, ntree = ntree, nodeMinSize = nodesize, maxNodes = maxnodes,
features = mtry, rf.bootstrap = replace, depth = depth, depthControl = depthcontrol,
rf.treeSubsampleRate = subsamplerate, classwt = classwt, classCutOff = classcutoff,
rf.overSampling = oversampling, rf.targetClass = targetclass, rf.rebalancedSampling = rebalancedsampling,
rf.outputPerturbationSampling = outputperturbationsampling, rf.randomCombination = randomcombinationObject,
rf.randomFeature = randomfeature, rf.treeBagging = bagging, rf.featureSelectionRule = featureselectionrule[1],
rf.regression = regression, use.OOB = OOB, BreimanBounds = BreimanBounds, variableImportance = importance,
whichCatVariables = categoricalvariablesidx, logX = logX, threads = threads, useSubTrees = usesubtrees,unsupervised = unsupervised,
parallelPackage = parallelpackage[1])
if (!is.null(classwt))
{
classwtString = classwt[1]
for (i in 2:length(classwt)) { classwtString = paste(classwtString, classwt[i], sep=",") }
}
if (length(targetclass) > 1)
{
targetclassString = targetclass[1]
for (i in 2:length(targetclass)) { targetclassString = paste(targetclassString, targetclass[i], sep=",") }
}
if (length(rebalancedsampling) > 1)
{
rebalancedsamplingString = rebalancedsampling[1]
for (i in 2:length(rebalancedsampling))
{ rebalancedsamplingString = paste(rebalancedsamplingString, rebalancedsampling[i], sep= ",") }
}
if (RUF.model$regression) { classcutoff = c(0,0) }
if (as.numeric(classcutoff[2]) == 0) { classcutoffString = FALSE }
else
{
classcutoffString = levels(Y)[which(levels(Y) == as.character(classcutoff[1]))]
classcutoffString = paste("Class ", classcutoffString, "," , as.numeric(classcutoff[2])*100, "%", sep ="")
}
paramsObject = c(ntree, mtry, nodesize, maxnodes, as.character(replace), bagging, depth,
ifelse(is.null(depthcontrol), length(depthcontrol)> 0, depthcontrol), OOB, importance, subsamplerate,
ifelse(is.null(classwt), length(classwt) > 0, classwtString), classcutoffString,
ifelse((oversampling == 0), (oversampling != 0), oversampling), outputperturbationsampling,
ifelse(length(targetclass) > 1, targetclassString, targetclass),
ifelse(length(rebalancedsampling) > 1, rebalancedsamplingString, rebalancedsampling),
ifelse((randomcombination[1] == 0),(randomcombination != 0), randomcombinationString), randomfeature,
ifelse(is.null(categoricalvariablesidx), length(categoricalvariablesidx) > 0, length(categoricalvariablesidx)), featureselectionrule[1])
if (RUF.model$regression)
{
paramsObject[21] = if (featureselectionrule[1] == "L1") { "Sum of absolute residuals" } else { "Sum of squared residuals" }
if ((paramsObject[5] == "FALSE") & (subsamplerate == 1)) { paramsObject[8] = FALSE }
}
names(paramsObject) = c("ntree", "mtry", "nodesize", "maxnodes", "replace", "bagging", "depth", "depthcontrol", "OOB", "importance", "subsamplerate", "classwt", "classcutoff", "oversampling", "outputperturbationsampling", "targetclass", "rebalancedsampling", "randomcombination", "randomfeature", "categorical variables", "featureselectionrule")
paramsObject = as.data.frame(paramsObject)
RUF.model$logX = logX
if (!regression & !is.null(ytest) )
{
if (!is.factor(ytest))
{ ytest = as.factor(ytest) }
}
RUFObject <- genericOutput(xtest, ytest, paramsObject, RUF.model, ytrain = Y, classcutoff = classcutoff)
RUFObject$logX = logX
if (!is.null(Y)) { RUFObject$y = Y }
if (is.null(colnames(X)))
{
varNames = NULL
for (i in 1:ncol(X)) { varNames = c(varNames, paste("V", i, sep="")) }
RUFObject$variablesNames = varNames
}
else
{ RUFObject$variablesNames = colnames(X) }
if (randomcombination[1] > 0)
{
tempVarNames = vector(length = length(randomcombination)/2)
idx = 1
for (i in 1:(length(randomcombination)/2))
{
tempVarNames[i] = paste("V", randomcombination[idx], "x", randomcombination[idx+1], sep="")
idx = idx + 2
}
RUFObject$variablesNames = c(RUFObject$variablesNames, tempVarNames)
}
if (!is.null(categoricalvariablesidx)) { RUFObject$categoricalvariables = categoricalvariablesidx }
if (unsupervised)
{
RUFObject$unsupervised = TRUE
RUFObject$unsupervisedMethod = if (length(unsupervisedMethod) == 3) {unsupervisedMethod[1] }
else { if (length(unsupervisedMethod) == 1) { unsupervisedMethod } else { unsupervisedMethod[1:2] } }
}
RUFObject$call <- match.call()
class(RUFObject) <- "randomUniformForest"
RUFObject
}
randomUniformForestCore <- function(trainData, trainLabels = 0,
features = floor(4/3*(ncol(trainData))),
ntree = 100,
nodeMinSize = 1,
maxNodes = Inf,
depth = Inf,
depthControl = NULL,
splitAt = "random",
rf.regression = TRUE,
rf.bootstrap = ifelse(rf.regression, FALSE, TRUE),
use.OOB = TRUE,
BreimanBounds = ifelse(use.OOB, TRUE, FALSE),
rf.treeSubsampleRate = ifelse(rf.regression, 0.7, 1),
rf.treeBagging = FALSE,
classwt = NULL,
classCutOff = c(0,0),
rf.overSampling = 0,
rf.targetClass = -1,
rf.outputPerturbationSampling = FALSE,
rf.rebalancedSampling = FALSE,
rf.featureSelectionRule = c("entropy", "gini", "random", "L2", "L1"),
variableImportance = TRUE,
rf.randomCombination = 0,
rf.randomFeature = FALSE,
whichCatVariables = NULL,
logX = FALSE,
unsupervised = FALSE,
useSubTrees = FALSE,
threads = "auto",
parallelPackage = "doParallel",
export = c("uniformDecisionTree", "CheckSameValuesInAllAttributes", "CheckSameValuesInLabels", "fullNode", "genericNode", "leafNode", "randomUniformForestCore.predict", "onlineClassify", "overSampling", "predictDecisionTree", "options.filter", "majorityClass", "randomCombination", "randomWhichMax", "which.is.na", "factor2vector", "outputPerturbationSampling", "rmNA", "count.factor", "find.idx", "classifyMatrixCPP",
"L2DistCPP", "checkUniqueObsCPP", "crossEntropyCPP", "giniCPP", "L2InformationGainCPP",
"entropyInformationGainCPP", "runifMatrixCPP", "NATreatment", "rmInf", "rm.InAList"),
...)
{
set.seed(sample(ntree,1))
n = nrow(trainData)
p = ncol(trainData)
if (exists("categorical", inherits = FALSE)) { whichCatVariables = categorical }
else { categorical = NULL }
if (useSubTrees)
{
if (depth == Inf) { depth = min(10, floor(0.5*log(n)/log(2))) }
if (rf.outputPerturbationSampling)
{
rf.outputPerturbationSampling = FALSE
cat("'output perturbation sampling' is not currently compatible with 'usesubtrees'.\n Option has been reset.\n")
}
}
if ((rf.randomFeature) & (features == "random") ) { stop("random feature is a special case of mtry = 'random'") }
if (is.numeric(threads))
{
if (threads < 1) { stop("Number of threads must be positive") }
}
if (!is.matrix(trainData)) { trainData <- NAfactor2matrix(trainData, toGrep = "anythingParticular") }
if (!is.null(whichCatVariables))
{
cat("Considering use of Dummies (for example by using formula) for categorical variables might be an alternative.\n")
}
{
if (!is.null(classwt))
{
classwt = classwt/sum(classwt)
if (is.na(sum(classwt))) { stop ("NA found in class weights.\n") }
if (sum(classwt) > 1.1) { stop ("(inverse) weights do not sum to 1.\n") }
}
if (length(trainLabels) != 1)
{
if (n != length(trainLabels))
{ stop ("X and Y don't have the same size.\n") }
}
if (!is.matrix(trainData))
{ stop ("X cannot be converted to a matrix. Please provide true matrix not data frame.") }
if (rf.treeSubsampleRate == 0) { rf.treeSubsampleRate = 1 }
if (rf.treeBagging) { features = min(features, p) }
if ( (rf.treeSubsampleRate < 0.5) & (n < 300) )
{ rf.treeSubsampleRate = 0.5; cat("Too small output matrix. Subsample rate has been set to 0.5.\n") }
if (!is.character(nodeMinSize))
{
if ( (nodeMinSize != 1) & (nodeMinSize > floor(nrow(trainData)/4)) ) { stop("nodeMinSize is too high. Not suitable for a random forest.\n") }
if ( (nodeMinSize < 1) ) { nodeMinSize = 1; cat("Minimal node size has been set to 1.\n") }
}
if ( features == 1 ) { rf.randomFeature = TRUE }
if ( features < 1 )
{
features = floor(4/3*p)
bagging = FALSE
cat("Error setting mtry. Resetting to default values.\n")
}
if (is.factor(trainLabels))
{
if ((as.numeric(classCutOff[2]) != 0))
{
classCutOff = c(which(levels(trainLabels) == as.character(classCutOff[1])),
0.5/as.numeric(classCutOff[2]))
if (length(classCutOff) == 1)
{ stop("Label not found. Please provide name of the label instead of its position.\n") }
}
rf.regression = FALSE
labelsObject = factor2vector(trainLabels)
trainLabels = as.numeric(as.vector(labelsObject$vector))
if (length(unique(trainLabels)) == 1 ) { stop ("Y is a constant value. Thus, learning is not needed.\n") }
}
else
{
if (rf.regression & (length(unique(trainLabels)) > 32))
{
if ((rf.treeSubsampleRate < 1) & (rf.bootstrap == TRUE))
{ cat("For only accuracy, use option 'subsamplerate = 1' and 'replace = FALSE'\n") }
classCutOff = c(0,0)
}
else
{
if (!rf.regression)
{
if ((as.numeric(classCutOff[2]) != 0))
{
classCutOff = c(which(levels(trainLabels) == as.character(classCutOff[1])), 0.5/as.numeric(classCutOff[2]))
if (length(classCutOff) == 1)
{ stop("Label not found. Please provide name of the label instead of its position") }
}
labelsObject = factor2vector(trainLabels)
trainLabels = as.numeric(as.vector(labelsObject$vector))
labelsObject$factors = levels(as.factor(trainLabels))
if (length(unique(trainLabels)) == 1 ) { stop ("Y is a constant value. Thus, learning is not needed\n") }
}
else
{
if (length(unique(trainLabels)) <= 32)
{
cat("Regression has been performed but there is less than 32 distinct values.\nRegression option could be set to FALSE, if classification is
classCutOff = c(0,0)
}
}
}
}
if (!rf.regression)
{
rf.classes <- as.numeric(levels(as.factor(trainLabels)) )
if (as.character(rf.classes[1]) != labelsObject$factors[1])
{
cat("Labels", labelsObject$factors, "have been converted to", rf.classes,
"for ease of computation and will be used internally as a replacement.\n")
}
if (!is.null(classwt))
{
if (length(classwt) != length(rf.classes))
{ stop("Length of class weights is not equal to length of classes.\n") }
}
}
else
{ rf.classes = trainLabels; rf.overSampling = 0; rf.rebalancedSampling = FALSE; classCutOff = c(0,0) }
if (logX)
{
if (is.null(whichCatVariables)) { trainData <- generic.log(trainData) }
else { trainData[,-whichCatVariables] <- generic.log(trainData[,-whichCatVariables]) }
}
if (!rf.regression)
{
if ( (rf.featureSelectionRule[1] == "L1") | (rf.featureSelectionRule[1] == "L2") )
{
rf.featureSelectionRule[1] = "entropy"
cat("Feature selection rule has been set to entropy.\n")
}
}
else
{
if ( (rf.featureSelectionRule[1] == "entropy") | (rf.featureSelectionRule[1] == "gini") )
{ rf.featureSelectionRule[1] = "L2" }
if (!is.null(depthControl) & (!is.character(depthControl)))
{
if (depthControl < 1)
{
depthControl = NULL
cat("'depthcontrol' option is lower than its lower bound. Resetting to default value.\n")
}
}
}
if (!rf.bootstrap & (rf.treeSubsampleRate == 1)) { use.OOB = FALSE }
}
{
max_threads = detectCores()
if (threads == "auto")
{
if (max_threads == 2) { threads = max_threads }
else { threads = max(1, max_threads - 1) }
}
else
{
if (max_threads < threads)
{ cat("Note: number of threads is higher than logical threads in this computer.\n") }
}
{
Cl = makePSOCKcluster(threads, type = "SOCK")
registerDoParallel(Cl)
}
chunkSize <- ceiling(ntree/getDoParWorkers())
smpopts <- list(chunkSize = chunkSize)
}
rufObject = vector('list', ntree)
if (use.OOB)
{
rufObject <- foreach(i = 1:ntree, .export = export, .options.smp = smpopts, .inorder = FALSE,
.multicombine = TRUE, .maxcombine = ntree) %dopar%
{
uniformDecisionTree(trainData, trainLabels, nodeMinSize = nodeMinSize, maxNodes = maxNodes,
treeFeatures = features, getSplitAt = splitAt, regression = rf.regression, bootstrap = rf.bootstrap, treeSubsampleRate = rf.treeSubsampleRate, treeDepth = depth, treeClasswt = classwt,
treeOverSampling = rf.overSampling, targetClass = rf.targetClass, OOB = use.OOB,
treeRebalancedSampling = rf.rebalancedSampling, treeBagging = rf.treeBagging,
randomFeature = rf.randomFeature, treeCatVariables = whichCatVariables,
outputPerturbation = rf.outputPerturbationSampling, featureSelectionRule = rf.featureSelectionRule, treeDepthControl = depthControl, unsupervised = unsupervised)
}
stopCluster(Cl)
if (useSubTrees)
{
rufObject2 = vector('list', ntree)
Cl = makePSOCKcluster(threads, type = "SOCK")
registerDoParallel(Cl)
rufObject2 <- foreach(i = 1:ntree, .export = export, .options.smp = smpopts, .inorder = FALSE,
.multicombine = TRUE, .maxcombine = ntree) %dopar%
{
finalTree = list()
OOBIdx = rufObject[[i]]$OOB.idx
idxList = rufObject[[i]]$idxList
trainData2 = trainData[rufObject[[i]]$followIdx,]
trainLabels2 = trainLabels[rufObject[[i]]$followIdx]
nodeVector = rufObject[[i]]$nodeVector
lengthIdx = sapply(idxList, length)
rmlengthIdx = which(lengthIdx < 2)
nodeVector = if (length(rmlengthIdx) > 0) nodeVector[-rmlengthIdx] else nodeVector
newIdxList <- if (length(rmlengthIdx) > 0) rm.InAList(idxList, rmlengthIdx) else idxList
originalTree = rufObject[[i]]$Tree
lengthNodeVector = length(nodeVector)
newTrees = vector('list', lengthNodeVector)
if ( (lengthNodeVector > 0) )
{
newTrees <- lapply(newIdxList, function(Z)
{
newTrainData = trainData2[Z, ,drop = FALSE]; newTrainLabels = trainLabels2[Z]
uniformDecisionTree(newTrainData, newTrainLabels, nodeMinSize = nodeMinSize,
maxNodes = maxNodes, treeFeatures = features, getSplitAt = splitAt,
regression = rf.regression, bootstrap = FALSE, treeSubsampleRate = 1, treeDepth = Inf, treeClasswt = classwt, treeOverSampling = rf.overSampling, targetClass = rf.targetClass, OOB = FALSE, treeRebalancedSampling = rf.rebalancedSampling, treeBagging = rf.treeBagging,
randomFeature = rf.randomFeature, treeCatVariables = whichCatVariables,
outputPerturbation = rf.outputPerturbationSampling,
featureSelectionRule = rf.featureSelectionRule, treeDepthControl = NULL,
unsupervised = unsupervised, moreThreadsProcessing = FALSE, moreNodes = TRUE)
}
)
for (j in 1:lengthNodeVector)
{
toInsertTree = newTrees[[j]]$Tree
keepMaxIdx = nrow(originalTree)
toInsertTree[,1:2] = toInsertTree[,1:2] + keepMaxIdx
toInsertTree[toInsertTree[,"status"] == -1,1:2] = 0
originalTree = rbind(originalTree, toInsertTree)
originalTree[nodeVector[j],] = toInsertTree[1,]
}
rownames(originalTree) = 1:nrow(originalTree)
}
finalTree$Tree = originalTree
finalTree$OOB.idx = OOBIdx
finalTree
}
rufObject = rufObject2
stopCluster(Cl)
}
{
OOB.matrix = NULL
new.rufObject = vector("list", ntree)
new.rufObject$Tree = lapply(rufObject, function(Z) Z$Tree)
for (i in 1:ntree)
{ OOB.matrix <- rbind(OOB.matrix, cbind(rufObject[[i]]$OOB.idx, rep(i,length(rufObject[[i]]$OOB.idx)))) }
OOB.val = sort(unique(OOB.matrix[,1]))
n.OOB = nrow(trainData)
OOB.votes2 = matrix(Inf, n.OOB, ntree)
if (is.null(classwt))
{
OOB.votes <- randomUniformForestCore.predict(new.rufObject$Tree, trainData,
pr.regression = rf.regression, classes = rf.classes, OOB.idx = TRUE,
pr.parallelPackage = parallelPackage[1], pr.imbalance = classCutOff, pr.threads = threads)
for (j in 1:ntree)
{
idxJ = which(OOB.matrix[,2] == j)
if (length(idxJ) > 0) { OOB.votes2[OOB.matrix[idxJ,1],j] = OOB.votes[OOB.matrix[idxJ,1],j] }
}
OOB.object <- majorityClass(OOB.votes2, rf.classes, m.imbalance = classCutOff,
m.regression = rf.regression)
}
else
{
OOB.allWeightedVotes = matrix(Inf, n.OOB, ntree)
OOB.votes <- randomUniformForestCore.predict(new.rufObject$Tree, trainData,
pr.regression = rf.regression, classes = rf.classes, pr.classwt = TRUE, OOB.idx = TRUE, pr.parallelPackage = parallelPackage[1], pr.imbalance = classCutOff, pr.threads = threads)
for (j in 1:ntree)
{
idxJ = which(OOB.matrix[,2] == j)
if (length(idxJ) > 0)
{
OOB.votes2[OOB.matrix[idxJ,1],j] = OOB.votes$all.votes[OOB.matrix[idxJ,1],j]
OOB.allWeightedVotes[OOB.matrix[idxJ,1],j] = OOB.votes$allWeightedVotes[OOB.matrix[idxJ,1],j]
}
}
OOB.object <- majorityClass(OOB.votes2, rf.classes, m.classwt = OOB.allWeightedVotes,
m.imbalance = classCutOff, m.regression = rf.regression)
}
OOB.pred = OOB.object$majority.vote
if (BreimanBounds)
{
strengthCorr.object <- strength_and_correlation(s.trainLabels = trainLabels, OOB.votes2,
OOB.object, rf.classes, s.regression = rf.regression, s.parallelPackage = parallelPackage[1], s.threads = threads)
}
if (rf.regression)
{
OOB.confusion = "only prediction error for regression. See ..$pred.error"
MSE <- L2Dist(OOB.pred[OOB.val], trainLabels[OOB.val])/n.OOB
pred.error = round(MSE,4)
percent.varExplained = max(0, 100*round(1 - MSE/var(trainLabels[OOB.val]),4))
}
else
{
if ( min(trainLabels) == 0) { OOB.pred = OOB.pred - 1 }
if ( (length(unique(trainLabels[OOB.val])) == 1) | (length(unique(OOB.pred[OOB.val])) == 1) )
{
OOB.confusion = "Minority class can not be predicted or OOB predictions contain only one label."
pred.error = "Minority class can not be predicted or OOB predictions contain only one label."
}
else
{
OOB.confusion <- confusion.matrix(OOB.pred[OOB.val], trainLabels[OOB.val])
pred.error <- generalization.error(OOB.confusion)
}
}
}
if (variableImportance)
{
Cl = makePSOCKcluster(threads, type = "SOCK")
registerDoParallel(Cl)
gen.rufObject = new.rufObject$Tree
}
else
{
if (rf.regression)
{
if (BreimanBounds)
{
return(list(object = new.rufObject$Tree, OOB = OOB.confusion, OOB.predicts = as.numeric(OOB.pred),
OOB.strengthCorr = strengthCorr.object, OOB.votes = OOB.votes2, pred.error = pred.error,
percent.varExplained = percent.varExplained, regression = rf.regression) )
}
else
{
return(list(object = new.rufObject$Tree, OOB = OOB.confusion, OOB.predicts = as.numeric(OOB.pred),
OOB.votes = OOB.votes2, pred.error = pred.error, percent.varExplained = percent.varExplained,
regression = rf.regression) )
}
}
else
{
if (BreimanBounds)
{
return(list(object = new.rufObject$Tree, OOB = OOB.confusion, OOB.predicts = as.numeric(OOB.pred),
OOB.strengthCorr = strengthCorr.object, OOB.votes = OOB.votes2, pred.error = pred.error,
regression = rf.regression) )
}
else
{
return(list(object = new.rufObject$Tree, OOB = OOB.confusion, OOB.predicts = as.numeric(OOB.pred),
OOB.votes = OOB.votes2, pred.error = pred.error, regression = rf.regression) )
}
}
}
}
else
{
if (!useSubTrees)
{
rufObject <- foreach(i = 1:ntree, .export = export, .options.smp = smpopts, .inorder = FALSE,
.multicombine = TRUE, .maxcombine = ntree) %dopar%
{
uniformDecisionTree(trainData, trainLabels, nodeMinSize = nodeMinSize, maxNodes = maxNodes,
treeFeatures = features, getSplitAt = splitAt, regression = rf.regression, bootstrap = rf.bootstrap, treeSubsampleRate = rf.treeSubsampleRate, treeDepth = depth, treeClasswt = classwt,
treeOverSampling = rf.overSampling, targetClass = rf.targetClass, treeBagging = rf.treeBagging,
treeRebalancedSampling = rf.rebalancedSampling, randomFeature = rf.randomFeature,
treeCatVariables = whichCatVariables, outputPerturbation = rf.outputPerturbationSampling,
featureSelectionRule = rf.featureSelectionRule, treeDepthControl = depthControl,
unsupervised = unsupervised, moreThreadsProcessing = useSubTrees)$Tree
}
}
else
{
rufObject <- foreach(i = 1:ntree, .export = export, .options.smp = smpopts, .inorder = FALSE,
.multicombine = TRUE, .maxcombine = ntree) %dopar%
{
uniformDecisionTree(trainData, trainLabels, nodeMinSize = nodeMinSize, maxNodes = maxNodes,
treeFeatures = features, getSplitAt = splitAt, regression = rf.regression, bootstrap = rf.bootstrap, treeSubsampleRate = rf.treeSubsampleRate, treeDepth = depth, treeClasswt = classwt,
treeOverSampling = rf.overSampling, targetClass = rf.targetClass, treeBagging = rf.treeBagging,
treeRebalancedSampling = rf.rebalancedSampling, randomFeature = rf.randomFeature,
treeCatVariables = whichCatVariables, outputPerturbation = rf.outputPerturbationSampling,
featureSelectionRule = rf.featureSelectionRule, treeDepthControl = depthControl,
unsupervised = unsupervised, moreThreadsProcessing = useSubTrees)
}
stopCluster(Cl)
rufObject2 = vector('list', ntree)
Cl = makePSOCKcluster(threads, type = "SOCK")
registerDoParallel(Cl)
rufObject2 <- foreach(i = 1:ntree, .export = export, .options.smp = smpopts, .inorder = FALSE,
.multicombine = TRUE, .maxcombine = ntree) %dopar%
{
OOBIdx = rufObject[[i]]$OOB.idx
idxList = rufObject[[i]]$idxList
trainData2 = trainData[rufObject[[i]]$followIdx,]
trainLabels2 = trainLabels[rufObject[[i]]$followIdx]
nodeVector = rufObject[[i]]$nodeVector
lengthIdx = sapply(idxList, length)
rmlengthIdx = which(lengthIdx < 2)
nodeVector = if (length(rmlengthIdx) > 0) nodeVector[-rmlengthIdx] else nodeVector
newIdxList <- if (length(rmlengthIdx) > 0) rm.InAList(idxList, rmlengthIdx) else idxList
originalTree = rufObject[[i]]$Tree
lengthNodeVector = length(nodeVector)
newTrees = vector('list', lengthNodeVector)
if (length(nodeVector) > 0)
{
newTrees <- lapply(newIdxList, function(Z)
{
newTrainData = trainData2[Z, ,drop = FALSE]; newTrainLabels = trainLabels2[Z]
uniformDecisionTree(newTrainData, newTrainLabels, nodeMinSize = nodeMinSize,
maxNodes = maxNodes, treeFeatures = features, getSplitAt = splitAt,
regression = rf.regression, bootstrap = FALSE, treeSubsampleRate = 1, treeDepth = Inf, treeClasswt = classwt, treeOverSampling = rf.overSampling, targetClass = rf.targetClass, OOB = FALSE, treeRebalancedSampling = rf.rebalancedSampling, treeBagging = rf.treeBagging,
randomFeature = rf.randomFeature, treeCatVariables = whichCatVariables,
outputPerturbation = rf.outputPerturbationSampling,
featureSelectionRule = rf.featureSelectionRule, treeDepthControl = NULL,
unsupervised = unsupervised, moreThreadsProcessing = FALSE, moreNodes = TRUE)
}
)
for (j in 1:lengthNodeVector)
{
toInsertTree = newTrees[[j]]$Tree
keepMaxIdx = nrow(originalTree)
toInsertTree[,1:2] = toInsertTree[,1:2] + keepMaxIdx
toInsertTree[toInsertTree[,"status"] == -1,1:2] = 0
originalTree = rbind(originalTree, toInsertTree)
originalTree[nodeVector[j],] = toInsertTree[1,]
}
rownames(originalTree) = 1:nrow(originalTree)
}
originalTree
}
rufObject = rufObject2
}
if (variableImportance) { gen.rufObject = rufObject }
else
{
stopCluster(Cl)
return( list(object = rufObject, regression = rf.regression))
}
}
if (variableImportance)
{
if (ntree <= 100) { threads = 1 }
varImpMatrix1 <- unlist(lapply(gen.rufObject, function(Z) Z[,"split var"]))
if (rf.regression) { varImpMatrix2 <- unlist(lapply(gen.rufObject, function(Z) Z[,"L2Dist"]))/1000 }
else { varImpMatrix2 <- unlist(lapply(gen.rufObject, function(Z) Z[,"Gain"])) }
varImpMatrix <- cbind(varImpMatrix1, varImpMatrix2)
if (!rf.regression)
{
predMatrix <- foreach(i = 1:ntree, .options.smp = smpopts, .inorder = TRUE, .combine = rbind,
.multicombine = TRUE) %dopar%
{
predIdx = which(gen.rufObject[[i]][,"left daughter"] == 0)
predClass = gen.rufObject[[i]][predIdx, "prediction"]
predVar = vector(length = length(predIdx))
if (useSubTrees)
{
for (j in seq_along(predIdx))
{
predVar[j] = gen.rufObject[[i]][which(gen.rufObject[[i]][,"left daughter"] == predIdx[j] |gen.rufObject[[i]][,"right daughter"] == predIdx[j]), "split var"][1]
}
}
else
{
for (j in seq_along(predIdx))
{
if ((predIdx[j] %% 2) == 0)
{
predVar[j] = gen.rufObject[[i]][which(gen.rufObject[[i]][,"left daughter"] == predIdx[j]), "split var"]
}
else
{
predVar[j] = gen.rufObject[[i]][which(gen.rufObject[[i]][,"right daughter"] == predIdx[j]), "split var"]
}
}
}
cbind(predVar, predClass)
}
}
stopCluster(Cl)
varImpMatrix = varImpMatrix[-which(varImpMatrix[,1] == 0),]
na.gain = which(is.na(varImpMatrix[,2]))
if (length(na.gain) > 0) { varImpMatrix = varImpMatrix[-na.gain,] }
rf.var = unique(sortCPP(varImpMatrix[,1]))
n.var = length(rf.var)
if (!rf.regression)
{
var.imp.output <- matrix(NA, n.var, 4)
for (i in 1:n.var)
{
classTable = sort(table(predMatrix[which(rf.var[i] == predMatrix[,1]),2]),decreasing = TRUE)
classMax = as.numeric(names(classTable))[1]
classFreq = (classTable/sum(classTable))[1]
var.imp.output[i,] = c(rf.var[i], sum(varImpMatrix[which(rf.var[i] == varImpMatrix[,1]),2]),
classMax, classFreq)
}
}
else
{
var.imp.output <- matrix(NA, n.var, 2)
for (i in 1:n.var)
{ var.imp.output[i,] = c(rf.var[i], sum(varImpMatrix[which(rf.var[i] == varImpMatrix[,1]),2])) }
}
var.imp.output = var.imp.output[order(var.imp.output[,2], decreasing = TRUE),]
var.imp.output = round(cbind( var.imp.output, 100*var.imp.output[,2]/max(var.imp.output[,2])),2)
percent.importance = round(100*var.imp.output[,2]/sum(var.imp.output[,2]),0)
var.imp.output[,2] = round(var.imp.output[,2],0)
var.imp.output = cbind(var.imp.output, percent.importance)
if (rf.regression)
{ colnames(var.imp.output) = c("variables", "score", "percent", "percent importance") }
else
{
colnames(var.imp.output) = c("variables", "score", "class", "class frequency", "percent",
"percent importance")
row.names(var.imp.output) = NULL
}
var.imp.output = data.frame(var.imp.output)
if (!is.null(colnames(trainData))) { var.imp.output[,1] = colnames(trainData)[var.imp.output[,1]] }
if (use.OOB)
{
if (rf.regression)
{
if (BreimanBounds)
{
return(list(object = gen.rufObject, OOB = OOB.confusion, OOB.predicts = as.numeric(OOB.pred),
OOB.strengthCorr = strengthCorr.object, OOB.votes = OOB.votes2, pred.error = pred.error,
percent.varExplained = percent.varExplained, variableImportance = var.imp.output,
regression = rf.regression) )
}
else
{
return(list(object = gen.rufObject, OOB = OOB.confusion, OOB.predicts = as.numeric(OOB.pred),
OOB.votes = OOB.votes2, pred.error = pred.error, percent.varExplained = percent.varExplained,
variableImportance = var.imp.output, regression = rf.regression) )
}
}
else
{
if (BreimanBounds)
{
return( list(object = gen.rufObject, OOB = OOB.confusion, OOB.predicts = as.numeric(OOB.pred),
OOB.strengthCorr = strengthCorr.object, OOB.votes = OOB.votes2, pred.error = pred.error,
variableImportance = var.imp.output, regression = rf.regression) )
}
else
{
return(list(object = gen.rufObject, OOB = OOB.confusion, OOB.predicts = as.numeric(OOB.pred),
OOB.votes = OOB.votes2, pred.error = pred.error, variableImportance = var.imp.output, regression = rf.regression) )
}
}
}
else
{ return(list(object = gen.rufObject, variableImportance = var.imp.output, regression = rf.regression) ) }
}
}
rUniformForestPredict <- function(object, X,
type = c("response", "prob", "votes", "confInt", "ranking", "quantile", "truemajority", "all"),
classcutoff = c(0,0),
conf = 0.95,
whichQuantile = NULL,
rankingIDs = NULL,
threads = "auto",
parallelpackage = "doParallel", ...)
{
object <- filter.object(object)
X <- if (is.vector(X)) { t(X) } else { X }
X <- fillVariablesNames(X)
if (!is.null(object$formula) & (length(object$variablesNames) != ncol(X)))
{
mf <- model.frame(data = as.data.frame(cbind(rep(1, nrow(X)),X)))
X <- model.matrix(attr(mf, "terms"), data = mf)[,-1]
}
else
{
if (length(object$variablesNames) != ncol(X))
{
cat("Data to predict have not the same dimension than data that have been computed by the model.\n Relevant variables have been extracted.\n")
newVars <- rmNA(match(object$variablesNames, colnames(X)))
if (length(newVars) == 0)
{
stop("No relevant variable has been found. Please give to both train and test data the same column names.\n")
}
X = X[,newVars, drop = FALSE]
}
}
classes = object$classes
flag1 = flag2 = 1
for (i in 1:ncol(object$forestParams))
{
classCutOffString = as.vector(object$forestParams[which(row.names(object$forestParams) == "classcutoff"), i])
if ((classCutOffString != "FALSE") & (as.numeric(classcutoff[2]) == 0))
{
classCutOffString = rm.string(classCutOffString, "%")
classCutOffString = rm.string(classCutOffString, "Class ")
classCutOffString = strsplit(classCutOffString, ",")
classcutoff[1] = which(classes == classCutOffString[[1]][1])
classcutoff[2] = 0.5/(as.numeric(classCutOffString[[1]][2])/100)
}
else
{
if ((as.numeric(classcutoff[2]) != 0) & (i == 1))
{
classcutoff <- c(which(classes == as.character(classcutoff[1])), 0.5/(as.numeric(classcutoff[2])))
if (length(classcutoff) == 1)
{ stop("Label not found. Please provide name of the label instead of its position") }
if (i > 1) { cat("Only last cutoff will be used in incremental random Uniform Forest.\n") }
}
}
classwtString = as.vector(object$forestParams[which(row.names(object$forestParams) == "classwt"),i])
classwt = FALSE
if (is.na(as.logical(classwtString))) { classwt = TRUE; flag1 = flag2 = -1 }
else
{ flag2 = 1 }
if (flag1 != flag2)
{ stop("Incremental random Uniform forest. Class reweighing must remain (or miss) in all forests.\n") }
randomCombinationString = as.vector(object$forestParams[which(row.names(object$forestParams) == "randomcombination"),i])
if ((randomCombinationString != "FALSE") )
{
if (i == 1)
{
random.combination = as.numeric(unlist(strsplit(randomCombinationString, ",")))
nbCombination = length(random.combination)/3
X <- randomCombination(NAfactor2matrix(X, toGrep = "anythingParticular"),
combination = random.combination[1:(length(random.combination) - nbCombination)],
weights = random.combination[(length(random.combination) - nbCombination + 1):length(random.combination)])
}
else { cat("Only first random combination will be used in incremental random uniform forest.\n") }
}
}
r.threads = threads
predObject <- randomUniformForestCore.predict(object, X, rf.aggregate = TRUE, pr.imbalance = classcutoff, pr.threads = threads,
pr.classwt = classwt, pr.parallelPackage = parallelpackage[1])
object = filter.forest(object)
if (type[1] == "response")
{
predictedObject = predObject$majority.vote
if (!is.null(classes))
{
predictedObject = as.factor(predictedObject)
levels(predictedObject) = classes[as.numeric(levels(predictedObject))]
}
}
if (type[1] == "truemajority")
{
{
nbObs = nrow(predObject$all.votes)
trueMajorityVotes = rep(0,nbObs)
alpha1 = (1 - conf)/2
alpha2 = conf + alpha1
for (i in 1:nbObs)
{
expectedClasses = unlist(lapply(predObject$votes.data, function(Z) Z[i,1]))
votingMembers = unlist(lapply(predObject$votes.data, function(Z) Z[i,2]))
if (object$regression)
{
votes = cbind(expectedClasses, votingMembers)
colnames(votes) = c("expectedClasses", "votingMembers")
trueMajorityVotes[i] = sum( votes[,"expectedClasses"]*votes[,"votingMembers"])/sum(votes[,"votingMembers"])
}
else
{
outliers = c(quantile(votingMembers, alpha1), quantile(votingMembers, alpha2))
idxWoOutliers = which(votingMembers <= outliers[1] | votingMembers >= outliers[2])
votes = cbind(expectedClasses[-idxWoOutliers], votingMembers[-idxWoOutliers])
colnames(votes) = c("expectedClasses", "votingMembers")
trueMajorityVotes[i] = which.max(by(votes, votes[,"expectedClasses"], sum))
}
}
if (object$regression) { predictedObject = trueMajorityVotes }
else { predictedObject = as.factor(trueMajorityVotes); levels(predictedObject) = classes }
}
}
if (type[1] == "confInt")
{
if (!object$regression)
{ stop( "confidence interval can only be computed for regression") }
else
{
alpha1 = (1 - conf)/2
alpha2 = conf + alpha1
Q_1 = apply(predObject$all.votes, 1, function(Z) quantile(Z, alpha1))
Q_2 = apply(predObject$all.votes, 1, function(Z) quantile(Z, alpha2))
SD = apply(predObject$all.votes, 1, function(Z) sd(Z))
predictedObject = data.frame(cbind(predObject$majority.vote, Q_1, Q_2, SD))
Q1Name = paste("LowerBound", "(q = ", round(alpha1,3), ")", sep ="")
Q2Name = paste("UpperBound", "(q = ", round(alpha2,3), ")",sep ="")
colnames(predictedObject) = c("Estimate", Q1Name, Q2Name, "standard deviation")
}
}
if (type[1] == "quantile")
{
if (!object$regression)
{ stop( "quantile(s) can only be computed for regression") }
else
{
if (!is.numeric(whichQuantile))
{
stop( "Please use option 'whichQuantile', providing as argument a numeric value greater than 0 and lower than 1.\n")
}
if ( (whichQuantile <= 0) | (whichQuantile >= 1))
{ stop( "Please provide for 'whichQuantile' option, a numeric value than 0 and lower than 1.\n") }
predictedObject = apply(predObject$all.votes, 1, function(Z) quantile(Z, whichQuantile))
}
}
if (type[1] == "all")
{
predictedObject = predObject
if (object$regression)
{
stdDev = apply(predObject$all.votes, 1, sd)
confIntObject = data.frame(cbind(predObject$majority.vote, apply(predObject$all.votes, 1, function(Z) quantile(Z,0.025)),
apply(predObject$all.votes, 1, function(Z) quantile(Z,0.975)), stdDev))
colnames(confIntObject) = c("Estimate", "LowerBound", "UpperBound", "Standard deviation")
predictedObject$confidenceInterval = confIntObject
}
}
if (type[1] == "votes") { predictedObject = predObject$all.votes }
if ( (type[1] == "prob") & (!object$regression) )
{
predictedObject = round(getVotesProbability2(predObject$all.votes, 1:length(classes)), 4)
colnames(predictedObject) = classes
}
if ( (type[1] == "prob") & (object$regression) ) { stop("Probabilities can not be computed for regression") }
if ( (type[1] == "ranking") & (!object$regression) )
{
predictedObject = predictedObject2 = predObject$majority.vote
if (!is.null(classes))
{
if (!is.numeric(as.numeric(classes)))
{ stop("Class are not numeric values or factor of numeric values. For Ranking, numeric values are needed as an equivalent of each class.") }
else
{
minClass = min(as.numeric(classes))
minPred = min(predictedObject)
maxClass = max(as.numeric(classes))
maxPred = max(predictedObject)
if (minClass < (minPred - 1))
{
predictedObject[predictedObject != minPred] = predictedObject[predictedObject != minPred] - 1
predictedObject = predictedObject - 1
}
else
{
if (minClass < minPred)
{
predictedObject = predictedObject - 1
}
}
if (maxClass > maxPred)
{ predictedObject[predictedObject == maxPred] = maxPred }
}
}
else { stop("Class not found. Please check if model is computed as classification, by looking if Y is set as factor.") }
numClasses = sort(unique(predObject$all.votes[,1]))
probabilities = round(getVotesProbability2(predObject$all.votes, numClasses), 2)
colnames(probabilities) = classes
countPredObject = table(predictedObject2)
majorityVote = as.numeric(names(which.max(countPredObject)))
n = length(predictedObject)
followIdx = 1:n
if (!is.null(rankingIDs))
{
rankingObject = cbind(followIdx, rankingIDs, predictedObject, probabilities)
if (length(classes) > 2)
{
minoritiesProbability = rowSums(probabilities[,-majorityVote])
rankingObject = cbind(rankingObject, minoritiesProbability)
}
colnames(rankingObject)[1] = "idx"
if (is.vector(rankingIDs) | is.factor(rankingIDs))
{
lengthRankingIDS = 1
colnames(rankingObject)[2] = "ID"
colnames(rankingObject)[3] = "majority vote"
cases = sort(unique(rankingIDs))
}
else
{
lengthRankingIDS = ncol(rankingIDs)
if (is.null(colnames(rankingIDs)))
{ colnames(rankingObject)[2:(2+lengthRankingIDS)] = "ID" }
colnames(rankingObject)[lengthRankingIDS+2] = "majority vote"
cases = sort(unique(rankingIDs[,1]))
}
if (length(classes) > 2)
{ minorityIdx = which(colnames(rankingObject) == "minoritiesProbability") }
else
{ minorityIdx = which(colnames(rankingObject) == classes[-majorityVote]) }
lengthCases = length(cases)
subCases = vector('list', lengthCases)
for (i in 1:lengthCases)
{ subCases[[i]] = which(rankingObject[,2] == cases[i]) }
rankingOutputObject <- matrix(NA, n, ncol(rankingObject) + 1)
for (i in 1:lengthCases)
{
if (length(subCases[[i]]) > 1)
{
rankingOutputObject[subCases[[i]],] <- as.matrix(cbind(sortMatrix(rankingObject[subCases[[i]],], minorityIdx, decrease = TRUE),
1:length(subCases[[i]])))
}
else
{ rankingOutputObject[subCases[[i]],] <- c(rankingObject[subCases[[i]],], 1) }
}
rankingOutputObject = as.data.frame(rankingOutputObject)
for (j in 1:ncol(rankingOutputObject))
{
if (is.factor(rankingOutputObject[,j]) & is.numeric(as.numeric(as.vector(rankingOutputObject[,j]))))
{ rankingOutputObject[,j] = as.numeric(as.vector(rankingOutputObject[,j])) }
}
colnames(rankingOutputObject) = c(colnames(rankingObject), "rank")
predictedObject = sortMatrix(rankingOutputObject,1)
}
else
{
rankingObject = cbind(followIdx, predictedObject, probabilities)
if (length(classes) > 2)
{
minoritiesProbability = rowSums(probabilities[,-majorityVote])
rankingObject = cbind(rankingObject, minoritiesProbability)
}
colnames(rankingObject)[1] = "idx"
colnames(rankingObject)[2] = "majority vote"
if (length(classes) > 2)
{ minorityIdx = which(colnames(rankingObject) == "minoritiesProbability") }
else
{ minorityIdx = which(colnames(rankingObject) == classes[-majorityVote]) }
rankingOutputObject = sortMatrix(rankingObject, minorityIdx, decrease = TRUE)
predictedObject = cbind(rankingOutputObject, 1:n)
colnames(predictedObject)[ncol(predictedObject)] = "rank"
predictedObject = sortMatrix(predictedObject,1)
}
}
if ( (type[1] == "ranking") & (object$regression) )
{ stop("Ranking currently available for classification tasks") }
predictedObject
}
randomUniformForestCore.predict <- function(object, X,
rf.aggregate = TRUE,
OOB.idx = FALSE,
pr.imbalance = c(0,0),
pr.regression = TRUE,
pr.classwt = FALSE,
classes = -1,
pr.export = c("onlineClassify", "predictDecisionTree", "majorityClass", "randomWhichMax", "rmNA", "mergeLists", "classifyMatrixCPP", "L2DistCPP", "checkUniqueObsCPP", "crossEntropyCPP", "giniCPP", "L2InformationGainCPP", "entropyInformationGainCPP", "runifMatrixCPP", "NATreatment"),
pr.threads = "auto",
pr.parallelPackage = "doParallel")
{
if (!OOB.idx)
{
if (is.matrix(X))
{
matchNA = (length(which(is.na(X))) > 0)
if (matchNA)
{
cat("NA found in data. Fast imputation (means) is used for missing values. Please use one of many models available if accuracy is needed\n Or use na.impute() function with the option 'na.action = accurateImpute'.")
X <- na.impute(X)
}
}
else
{
X <- NAfactor2matrix(X, toGrep = "anythingParticular")
matchNA = (length(which(is.na(X))) > 0)
if (matchNA)
{
cat("NA found in data. Fast imputation (means) is used for missing values. Please use one of many models available if accuracy is needed\n Or use na.impute() function with the option 'na.action = accurateImpute'.\n")
X <- na.impute(X)
}
}
if (!is.null(object$logX))
{
if (object$logX)
{
if (is.null(object$categoricalvariables)) { X <- generic.log(X) }
else { X[,-object$categoricalvariables] <- generic.log(X[,-object$categoricalvariables]) }
}
}
object = filter.forest(object)
}
if (!is.null(object$OOB) & (!OOB.idx))
{ OOB.predicts = object$OOB.predicts; pr.regression = object$regression; object = object$object }
else
{
if (!OOB.idx) { pr.regression = object$regression; object = object$object }
if (!is.null(object$variableImportance) & (OOB.idx)) { object = object$object }
}
n = nrow(X)
if (!pr.regression)
{
if (classes[1] < 1)
{
classes = sort(as.numeric(rownames(table(object[[sample(1:length(object),1)]][,"prediction"]))))
if (classes[1] == 0) { classes = classes[-1] }
}
l.class = length(classes)
class.occur = rep(0,l.class)
}
{
ntree = length(object)
pred.X = vector("list", ntree)
all.votes = nodes.length = nodes.depth = matrix(data = Inf, nrow = n, ncol = ntree)
majority.vote = vector(length = n)
fullDim = ntree*n
if ((fullDim < 1e7) | (pr.threads == 1))
{ pred.X <- lapply(object, function(Z) predictDecisionTree(Z, X)) }
else
{
{
max_threads = detectCores()
if (pr.threads == "auto")
{
if (max_threads == 2) { pr.threads = max_threads }
else { pr.threads = max(1, max_threads - 1) }
}
else
{
if (max_threads < pr.threads)
{ cat("Note: number of threads is higher than logical threads in this computer.\n") }
}
Cl = makePSOCKcluster(pr.threads, type = "SOCK")
registerDoParallel(Cl)
chunkSize <- ceiling(n/getDoParWorkers())
smpopts <- list(chunkSize = chunkSize)
}
pushObject <- function(object, X) lapply(object, function(Z) predictDecisionTree(Z, X))
pred.X <- foreach(X = iterators::iter(X, by ='row', chunksize = chunkSize), .combine = mergeLists, .export = pr.export) %dopar%
pushObject(object, X)
stopCluster(Cl)
}
for (i in 1:ntree)
{
all.votes[,i] = pred.X[[i]][,1]
nodes.length[,i] = pred.X[[i]][,2]
nodes.depth[,i] = pred.X[[i]][,3]
}
if (pr.classwt)
{
allWeightedVotes = matrix(data = Inf, nrow = n, ncol = ntree)
for (i in 1:ntree) { allWeightedVotes[,i] = object[[i]][nodes.depth[,i], "avgLeafWeight"] }
}
else
{ allWeightedVotes = NULL }
if (pr.regression) { majority.vote = rowMeans(all.votes) }
else { majority.vote = majorityClass(all.votes, classes, m.imbalance = pr.imbalance, m.classwt = allWeightedVotes)$majority.vote }
}
if (rf.aggregate & (!OOB.idx))
{
return(list(all.votes = all.votes, majority.vote = majority.vote, nodes.depth = nodes.depth, nodes.length = nodes.length,
votes.data = pred.X))
}
else
{
if (OOB.idx)
{
if (pr.classwt) { return(list(all.votes = all.votes, allWeightedVotes = allWeightedVotes)) }
else { return(all.votes) }
}
else { return(majority.vote) }
}
}
majorityClass <- function(all.votes, classes, m.regression = FALSE, m.imbalance = c(0,0), m.classwt = NULL,
m.threads = "auto")
{
if (m.regression)
{
all.votes[all.votes == Inf] = NA
majority.vote = rowMeans(all.votes, na.rm = TRUE)
class.counts = NULL
return(list(majority.vote = majority.vote, class.counts = class.counts))
}
else
{
n = nrow(all.votes)
majority.vote = vector(length = n)
l.class = length(classes)
class.occur = vector(length = l.class)
class.countsMajorityVote = trueClass.counts = matrix(NA, n, l.class)
for (i in 1:n)
{
if (!is.null(m.classwt))
{
for (j in 1:l.class)
{
idx = rmNA(which(all.votes[i,] == classes[j]))
l.idx = length(idx)
if (l.idx > 0) { class.occur[j] = sum(m.classwt[i,idx]) }
else { class.occur[j] = 0 }
}
}
else
{
for (j in 1:l.class) { class.occur[j] <- sum(all.votes[i,] == classes[j]) }
}
if (m.imbalance[1] != 0)
{ class.occur[m.imbalance[1]] = floor(class.occur[m.imbalance[1]]*m.imbalance[2]) }
majority.vote[i] = randomWhichMax(class.occur)
class.countsMajorityVote[i,] = c(class.occur[majority.vote[i]], class.occur[-majority.vote[i]])
trueClass.counts[i,] = class.occur
}
return(list(majority.vote = majority.vote, class.counts = class.countsMajorityVote, trueClass.counts = trueClass.counts))
}
}
getVotesProbability <- function(X, classes) (majorityClass(X, classes)$class.counts/ncol(X))
getVotesProbability2 <- function(X, classes) (majorityClass(X, classes)$trueClass.counts/ncol(X))
strength_and_correlation <- function(OOB.votes, OOB.object,
rf.classes,
s.trainLabels = NULL,
s.regression = FALSE,
output = NULL,
s.threads = "auto",
s.parallelPackage = "doParallel" )
{
j = NULL
dimOOBvotes = dim(OOB.votes)
n.OOB = dimOOBvotes[1]
p.OOB = dimOOBvotes[2]
if (s.regression)
{
OOB.votes[OOB.votes == Inf] = NA
if (is.null(s.trainLabels)) { Y = rowMeans(OOB.votes, na.rm =TRUE) }
else { Y = s.trainLabels }
expectedSquaredErrorOverTrees = colMeans( (OOB.votes - Y)^2, na.rm = TRUE)
PE.tree = sum(expectedSquaredErrorOverTrees)/length(expectedSquaredErrorOverTrees)
sd.T = sqrt(expectedSquaredErrorOverTrees)
mean.corr = mean(rowMeans(cor((Y - OOB.votes), use = "pairwise.complete.obs")))
if (is.na(mean.corr))
{
cat("Not enough data to compute average correlation for trees. Error is then prediction error of a tree.\n")
return(list(PE.forest = (mean(sd.T))^2, PE.max = PE.tree, PE.tree = PE.tree, mean.corr = mean.corr))
}
else
{ return(list(PE.forest = mean.corr*(mean(sd.T))^2, PE.max = mean.corr*PE.tree, PE.tree = PE.tree, mean.corr = mean.corr)) }
}
else
{
max_threads = detectCores()
if (s.threads == "auto")
{ s.threads = max(1, max_threads - 1) }
else
{
if (max_threads < s.threads)
{ cat("Note: number of threads is higher than logical threads in this computer.\n") }
}
{
Cl <- makePSOCKcluster(s.threads, type ="SOCK")
registerDoParallel(Cl)
}
chunkSize <- ceiling(p.OOB/getDoParWorkers())
smpopts <- list(chunkSize = chunkSize)
p.new.OOB = apply(OOB.votes, 1, function(OOB.votes) sum(OOB.votes != Inf))
if (length(rf.classes) == 2)
{
Q.x.1 = OOB.object$class.counts[,1]/rowSums(OOB.object$class.counts)
rawStrength = 2*Q.x.1 - 1
Tk.1 = Tk.2 = matrix (data = NA, ncol = p.OOB, nrow = n.OOB)
OOB.votes.1 = cbind(OOB.votes, OOB.object$majority.vote)
Tk.1 <- foreach(j = 1:p.OOB, .options.smp = smpopts, .combine = cbind, .multicombine = TRUE) %dopar%
apply(OOB.votes.1[,-j], 1, function(Z) sum(Z[-p.OOB] == Z[p.OOB]))
Tk.1 = Tk.1/p.new.OOB
Tk.2 = 1 - Tk.1
stopCluster(Cl)
}
else
{
nn = rowSums(OOB.object$class.counts)
Q.x.j = apply(OOB.object$class.counts[,-1], 1, max)
Q.x.y = OOB.object$class.counts[,1]
rawStrength = (Q.x.y - Q.x.j)/nn
maj.class.j = vector(length = n.OOB)
for (i in 1:n.OOB)
{
second.class.max = randomWhichMax(OOB.object$class.counts[i,-1])
maj.class.j[i] = rf.classes[-OOB.object$majority.vote[i]][second.class.max]
}
OOB.votes.1 = cbind(OOB.votes, OOB.object$majority.vote)
OOB.votes.2 = cbind(OOB.votes, maj.class.j)
ZZ <- function(j) cbind(apply(OOB.votes.1[,-j], 1, function(Z) sum(Z[-p.OOB] == Z[p.OOB])), apply(OOB.votes.2[,-j], 1,
function(Z) sum(Z[-p.OOB] == Z[p.OOB])))
Tk <- foreach(j = 1:p.OOB, .options.smp = smpopts, .combine = cbind, .multicombine = TRUE) %dopar% ZZ(j)
mixIdx = getOddEven(1:ncol(Tk))
Tk.1 = Tk[,mixIdx$odd]
Tk.2 = Tk[,mixIdx$even]
Tk.1 = Tk.1/p.new.OOB
Tk.2 = Tk.2/p.new.OOB
stopCluster(Cl)
}
p1 = colMeans(Tk.1)
p2 = colMeans(Tk.2)
strength = mean(rawStrength)
varStrength = var(rawStrength)
sd.T = ((p1 + p2 + (p1 - p2)^2))^0.5
mean.corr = varStrength / (mean(sd.T))^2
PE.est = mean.corr*(1 - strength^2)/strength^2
return(list(PE = PE.est, avg.corr = mean.corr, strength = strength, std.strength = sqrt(varStrength)))
}
}
monitorOOBError <- function(OOB.votes, Y, regression = FALSE, threads = "auto", f = L2Dist)
{
j = NULL
n = length(Y)
n.RS = sqrt(n)
p = ncol(OOB.votes)
max_threads = detectCores()
if (threads == "auto")
{
if (max_threads == 2) { threads = max_threads }
else { threads = max(1, max_threads - 1) }
}
else
{
if ((max_threads) < threads)
{ cat("Note: number of threads is higher than logical threads in this computer\n.") }
}
Cl = makePSOCKcluster(threads, type = "SOCK")
registerDoParallel(Cl)
chunkSize <- ceiling(p/getDoParWorkers())
smpopts <- list(chunkSize = chunkSize)
if (!regression)
{
Y = as.numeric(Y)
classes = sort(unique(Y))
S1 = sample(classes,1)
S2 = classes[-S1][1]
OOBmonitor <- foreach(j = 1:(p-1),
.export = c("generalization.error", "confusion.matrix", "majorityClass", "rmNA", "randomWhichMax"),
.options.smp = smpopts, .combine = rbind) %dopar%
{
Estimate <- generalization.error(confusion.matrix(majorityClass(OOB.votes[,1:(j+1)], classes)$majority.vote, Y))
C1 <- generalization.error(confusion.matrix(majorityClass(OOB.votes[,1:(j+1)], classes, m.imbalance =c(1, 1.5))$majority.vote, Y))
C2 <- generalization.error(confusion.matrix(majorityClass(OOB.votes[,1:(j+1)], classes, m.imbalance= c(2, 1.5))$majority.vote, Y))
t(c(Estimate, C1, C2))
}
E0 <- generalization.error(confusion.matrix(majorityClass(matrix(OOB.votes[,1]), classes)$majority.vote, Y))
C1 <- generalization.error(confusion.matrix(majorityClass(matrix(OOB.votes[,1]), classes, m.imbalance =c(1, 1.5))$majority.vote, Y))
C2 <- generalization.error(confusion.matrix(majorityClass(matrix(OOB.votes[,1]), classes, m.imbalance= c(2, 1.5))$majority.vote, Y))
OOBmonitor = rbind(t(c(E0, C1, C2)),OOBmonitor)
}
else
{
OOBmonitor <- foreach(j = 1:(p-1), .export = c("generalization.error", "confusion.matrix", "majorityClass", "rmNA", "L2Dist", "L1Dist"), .options.smp = smpopts, .combine = c) %dopar%
{
Z = majorityClass(OOB.votes[,1:(j+1)], 0, m.regression = TRUE)$majority.vote
NAIdx = which(is.na(Z))
if (length(NAIdx) > 0) { f(Z[-NAIdx], Y[-NAIdx])/length(Z[-NAIdx])}
else { f(Z, Y)/length(Z) }
}
Z = majorityClass(matrix(OOB.votes[,1]), 0, m.regression = TRUE)$majority.vote
NAIdx = which(is.na(Z))
E0 = if (length(NAIdx) > 0) { f(Z[-NAIdx], Y[-NAIdx])/length(Z[-NAIdx])} else { f(Z, Y)/length(Z) }
OOBmonitor = c(E0,OOBmonitor)
}
stopCluster(Cl)
return(OOBmonitor)
}
weightedVote <- function(all.votes, idx = 2, granularity = 2)
{
all.votes <- round(all.votes, granularity)
apply(all.votes, 1, function(Z)
{
A = sort(table(rmInf(Z)), decreasing = TRUE)
B = as.numeric(names(A))[1:idx]
sum( B * (B/sum(abs(B))) )
}
)
}
weightedVoteModel <- function(votes, majorityVote, Y = NULL, nbModels = 1, idx = 1 , granularity = 1, train = TRUE, models.coeff = NULL)
{
if (train)
{
if (is.null(Y))
{ stop("output is neded to build model") }
if (nbModels == 1)
{
default.model = weightedVote(votes, idx = idx, granularity = granularity)
lin.model = lm(Y ~ cbind(majorityVote, default.model))
}
else
{
models = matrix(data = NA, ncol = nbModels + 2, nrow = length(Y))
models[,1] = majorityVote
models[,2] = weightedVote(votes, idx = idx, granularity = granularity)
for (j in 3:(nbModels+2))
{ models[,j] = weightedVote(votes, idx = max(j,idx), granularity = j ) }
lin.model = lm(Y ~ models)
}
return(lin.model)
}
else
{
p = length(models.coeff)
models = matrix(data = NA, ncol = p, nrow = length(majorityVote))
models[,1] = rep(1,length(majorityVote))
models[,2] = majorityVote
models[,3] = weightedVote(votes, idx = idx, granularity = granularity)
if (p > 3)
{
for (j in 4:p)
{ models[,j] = weightedVote(votes, idx =j , granularity = j) }
}
newMajorityVote = apply(models,1, function(Z) sum(models.coeff*Z))
return(newMajorityVote )
}
}
postProcessingVotes <- function(object, nbModels = 1, idx = 1, granularity = 1, predObject = NULL, swapPredictions = FALSE, X = NULL, imbalanced = FALSE)
{
object <- filter.object(object)
if (rownames(object$forestParams)[1] == "reduceDimension")
{ stop("Post Processing does not work with objects coming from rUniformForest.big() function") }
if (!object$forest$regression)
{
if (length(as.numeric(object$classes)) > 2)
{ stop("Optimization currently works only for binary classification") }
if (is.null(X))
{ stop("Please provide test data") }
else
{
if (is.null(predObject)) { predObject <- predict(object, X, type = "all") }
else
{
if (is.null(predObject$all.votes))
{ stop("Please provide full prediction object (option type = 'all' when calling predict() function).") }
if (is.null(predObject$majority.vote))
{ stop("Please provide full prediction object (option type = 'all' when calling predict() function).") }
}
majorityVotePosition = which.max(table(predObject$majority.vote))
numClasses = sort(unique(predObject$all.votes[,1]))
probPred = round(getVotesProbability2(predObject$all.votes, numClasses), 2)
colnames(probPred) = object$classes
if (imbalanced)
{ cutoff = 1 - ( mean(probPred[,2])/mean(probPred[,1]) ) }
else
{ cutoff = 0.5/2*( mean(probPred[,2])/mean(probPred[,1]) + mean(probPred[,1])/mean(probPred[,2]) ) }
predVotes = predict(object, X, classcutoff = c(object$classes[majorityVotePosition], cutoff))
return(predVotes)
}
}
if (swapPredictions)
{
object$forest$OOB.votes = predObject$forest$OOB.votes
object$forest$OOB.predicts = predObject$forest$OOB.predicts
}
if (is.null(object$forest$OOB.votes))
{ stop("No OOB data for post processing. Please enable OOB option and subsamplerate or bootstrap ('replace' option) when computing model.") }
if (is.null(object$predictionObject))
{
if (is.null(predObject)) { stop("Post processing can not be computed. Please provide prediction object") }
else
{
if (is.null(predObject$majority.vote))
{ stop("Post processing can not be computed. Please provide full prediction object (type = 'all') when calling predict()") }
else
{
meanEstimate = predObject$majority.vote
allVotes = predObject$all.votes
}
}
}
else
{
meanEstimate = object$predictionObject$majority.vote
allVotes = object$predictionObject$all.votes
}
Y = object$y
OOBVotes = object$forest$OOB.votes
NAIdx = which.is.na(object$forest$OOB.predicts)
if (length(NAIdx) > 0)
{
Y = Y[-NAIdx]
OOBVotes = OOBVotes[-NAIdx,]
object$forest$OOB.predicts = object$forest$OOB.predicts[-NAIdx]
}
OOBMeanEstimate = object$forest$OOB.predicts
L2DistOOBMeanEstimate <- L2Dist(OOBMeanEstimate, Y)/length(Y)
OOBMedianEstimate <- apply(OOBVotes, 1, function(Z) median(rmInf(Z)))
L2DistOOBMedianEstimate <- L2Dist(OOBMedianEstimate, Y)/length(Y)
weightedModel <- weightedVoteModel(OOBVotes, OOBMeanEstimate, Y = Y, nbModels = nbModels, idx = idx, granularity = granularity)
OOBWeightedVoteEstimate <- weightedVoteModel(OOBVotes, OOBMeanEstimate, train = FALSE, models.coeff = weightedModel$coefficients)
NAOOBWeightedVoteEstimate <- which.is.na(OOBWeightedVoteEstimate)
if (length(NAOOBWeightedVoteEstimate) > 0)
{ OOBWeightedVoteEstimate[NAOOBWeightedVoteEstimate] = OOBMeanEstimate[NAOOBWeightedVoteEstimate] }
L2DistOOBWeightedVoteEstimate <- L2Dist(OOBWeightedVoteEstimate, Y)/length(Y)
flagMedian = ( (L2DistOOBMedianEstimate <= L2DistOOBMeanEstimate) | (L1Dist(OOBMedianEstimate, Y) <= L1Dist(OOBMeanEstimate, Y)) )
flagWeighted = ( (L2DistOOBWeightedVoteEstimate <= L2DistOOBMeanEstimate) | (L1Dist(OOBWeightedVoteEstimate, Y) <= L1Dist(OOBMeanEstimate, Y)) )
if (flagMedian)
{
if (flagWeighted)
{ weightedVoteEstimate <- weightedVoteModel(allVotes, meanEstimate, train = FALSE, models.coeff = weightedModel$coefficients) }
else
{ weightedVoteEstimate = rep(0, length(meanEstimate)) }
}
else
{ weightedVoteEstimate <- weightedVoteModel(allVotes, meanEstimate, train = FALSE, models.coeff = weightedModel$coefficients) }
medianEstimate <- apply(allVotes, 1, function(Z) median(rmInf(Z)))
NAWeightedVoteEstimate <- which.is.na(weightedVoteEstimate)
if (length(NAWeightedVoteEstimate) > 0) { weightedVoteEstimate[NAWeightedVoteEstimate] = meanEstimate[NAWeightedVoteEstimate] }
negativeBias = - (mean(OOBMeanEstimate) - Y)
modelResiduals = Y - OOBMeanEstimate
biasModel = lm(modelResiduals ~ negativeBias)
biasSign = sign(meanEstimate - medianEstimate)
biasSign[which(biasSign == 0)] = 1
residuals_hat = vector(length = ncol(OOBVotes))
residuals_hat <- apply(OOBVotes, 2, function(Z) sum(rmInf(Z))/length(rmInf(Z))) - mean(OOBMeanEstimate)
MeanNegativeBias = -mean(residuals_hat^2)
biasCorrection = biasModel$coefficients[2]*biasSign*MeanNegativeBias + biasModel$coefficients[1]
if (flagMedian)
{
if (flagWeighted) { return( 0.5*(medianEstimate + weightedVoteEstimate + biasCorrection)) }
else { return(medianEstimate) }
}
else
{ return(weightedVoteEstimate + biasCorrection) }
}
localVariableImportance <- function(object, nbVariables = 2, Xtest = NULL, predObject = NULL, l.threads = "auto", l.parallelPackage = "doParallel")
{
object = filter.object(object)
rufObject = object$forest$object
if (is.null(predObject))
{
if (is.null(Xtest))
{ stop("Local variable importance can not be computed. please provide test data.") }
else
{
predObject = predict(object, Xtest, type = "all")
majority.vote = as.numeric(predObject$majority.vote)
pred.rufObject = predObject$votes.data
}
}
else
{
if (is.null(predObject$majority.vote))
{
stop("Local variable importance can not be computed. Please provide full prediction object (type = 'all') when calling predict()")
}
else
{
majority.vote = as.numeric(predObject$majority.vote)
pred.rufObject = predObject$votes.data
}
}
ntree = length(rufObject)
if (is.null(pred.rufObject))
{ stop ("no data to evaluate importance") }
else
{
n = dim(pred.rufObject[[1]])[1]
{
threads = l.threads
max_threads = detectCores()
if (threads == "auto")
{
if (max_threads == 2) { threads = max_threads }
else { threads = max(1, max_threads - 1) }
}
else
{
if ((max_threads) < threads)
{ cat("Note: number of threads is higher than logical threads in this computer.\n") }
}
{
Cl = makePSOCKcluster(threads, type = "SOCK")
registerDoParallel(Cl)
}
chunkSize <- ceiling(ntree/getDoParWorkers())
smpopts <- list(chunkSize = chunkSize)
}
b = NULL
varMatrix <- foreach(b = 1:ntree, .options.smp = smpopts, .combine = rbind, .multicombine = TRUE) %dopar%
{
predVar = vector(length = n)
for (i in 1:n)
{
predIdx = pred.rufObject[[b]][i,3]
predVar[i] = rufObject[[b]][which(rufObject[[b]][,"left daughter"] == predIdx
| rufObject[[b]][,"right daughter"] == predIdx), "split var"][1]
}
predVar
}
stopCluster(Cl)
varImp = varFreq = matrix(data = NA, ncol = nbVariables, nrow = n)
varImp <- t(apply(varMatrix, 2, function(Z)
as.numeric(names(sort(table(Z), decreasing = TRUE)[1:nbVariables])) ))
varFreq <- t(apply(varMatrix, 2, function(Z) sort(table(Z), decreasing = TRUE)[1:nbVariables]))/ntree
NAIdx = which(is.na(varImp), arr.ind = TRUE)
if (length(NAIdx[,1]) > 0)
{
varImp[NAIdx] = apply(NAIdx, 1, function(Z) {
newZ = rmNA(varImp[Z])
if (length(newZ) == 1) { rep(newZ, length(Z)) } else { sample(newZ,1) }
}
)
varFreq[NAIdx] = apply(NAIdx, 1, function(Z) {
newZ = rmNA(varFreq[Z])
if (length(newZ) == 1) { rep(newZ, length(Z)) } else { sample(newZ,1) }
}
)
}
objectNames = objectFrequencyNames = vector()
for (j in 1:nbVariables)
{
objectNames[j] = paste("localVariable", j, sep = "")
objectFrequencyNames[j] = paste("localVariableFrequency", j, sep = "")
}
variableImportance.object = cbind(majority.vote, varImp, varFreq)
if (!object$forest$regression)
{
colnames(variableImportance.object) = c("class", objectNames, objectFrequencyNames)
classObject = as.numeric(names(table(variableImportance.object[,1])))
classMatrix = list()
for (i in 1:length(classObject))
{
classMatrix[[i]] = round(sort(table(variableImportance.object[which(variableImportance.object[,1] == i),2])/
sum(table(variableImportance.object[which(variableImportance.object[,1] == i),2])), decreasing = TRUE),2)
}
orderedVar = unique(as.numeric(names(sort(unlist(classMatrix), decreasing = TRUE))))
Variables = as.numeric(names(sort(unlist(classMatrix), decreasing = TRUE)))
orderedImportance = as.numeric(sort(unlist(classMatrix), decreasing = TRUE))
classVariableImportance = matrix(data = 0, ncol = length(classObject), nrow = length(orderedVar))
for (i in 1:length(orderedVar))
{
for (j in 1:length(classObject))
{
idx = which( as.numeric(names(classMatrix[[j]])) == orderedVar[i])
if (length(idx) > 0)
{ classVariableImportance[i,j] = classMatrix[[j]][idx] }
}
}
classVariableImportance = data.frame(classVariableImportance)
rownames(classVariableImportance) = orderedVar
colnames(classVariableImportance) = paste("Class", classObject, sep=" ")
return(list( obsVariableImportance = variableImportance.object, classVariableImportance = classVariableImportance ))
}
else
{
colnames(variableImportance.object) = c("estimate", objectNames, objectFrequencyNames)
return(variableImportance.object)
}
}
}
localTreeImportance <- function(rfObject, OOBPredicts = NULL, OOBVotes = NULL)
{
rfObject = filter.object(rfObject)
if (!is.null(rfObject$OOB.votes)) { OOBVotes = rfObject$OOB.votes }
if (is.null( OOBVotes)) { stop ( "OOB outputs missing. No optimization or weighted vote can be done") }
if (!is.null(rfObject$OOB.predicts)) { OOBPredicts = rfObject$OOB.predicts }
n = length(OOBPredicts)
wPlus = wMinus = NULL
for ( i in seq_along(OOBPredicts))
{
wPlus = c(wPlus, which(OOBVotes[i,] == OOBPredicts[i]))
wMinus = c(wMinus, which(OOBVotes[i,] != OOBPredicts[i]))
}
object = list(pweights = (table(wPlus)/n), nweights = (table(wMinus)/n))
return(object)
}
importance.randomUniformForest <- function(object, maxVar = 30, maxInteractions = 3, Xtest = NULL,
predObject = NULL, ...)
{
object <- filter.object(object)
maxInteractions = max(2, maxInteractions)
if (!is.null(Xtest))
{
if (!is.null(object$formula) & (length(object$variablesNames) != ncol(Xtest)))
{
mf <- model.frame(formula = object$formula, data = as.data.frame(Xtest))
Xtest <- model.matrix(attr(mf, "terms"), data = mf)[,-1]
if (object$logX) { Xtest <- generic.log(Xtest) }
}
else
{
Xfactors <- which.is.factor(Xtest)
catVarIdx <- which(rownames(object$paramsObject) == "categorical variables")
Xtest <- NAfactor2matrix(Xtest, toGrep = "anythingParticular")
matchNA = (length(which(is.na(Xtest))) > 0)
if (matchNA)
{
cat("NA found in data. Fast imputation (means) is used for missing values\n")
Xtest <- na.impute(Xtest)
}
if (object$logX)
{
trueFactorsIdx <- which(Xfactors == 1)
if (length(trueFactorsIdx) == 0) { Xtest <- generic.log(Xtest) }
else
{
if (!is.null(object$paramsObject[1,catVarIdx]))
{
cat("Warning : all categorical variables have been ignored for the logarithm transformation.\nIf only some of them have been defined as categorical, variable importance will be altered.\n To avoid it, please define logarithm transformation outside of the model or ignore categorical variables, or set them to 'all'.\n")
Xtest[,-trueFactorsIdx] <- generic.log(Xtest[,-trueFactorsIdx])
}
else
{ Xtest <- generic.log(Xtest) }
}
}
}
}
if (!is.null(object$forest$variableImportance))
{
par(las=1)
maxChar = floor(1 + max(nchar(object$variablesNames))/2)
par(mar=c(5, maxChar + 1,4,2))
varImportance1 = varImportance = object$forest$variableImportance
if (!object$forest$regression)
{
varImportance[,"class"] = object$classes[as.numeric(varImportance[,"class"])]
varImportance1[,"class"] = varImportance[,"class"]
}
nVar = nrow(varImportance)
if (nVar > maxVar) { varImportance = varImportance[1:maxVar,] }
barplot(varImportance[nVar:1, "percent.importance"], horiz = TRUE, col = sort(heat.colors(nVar),
decreasing = TRUE),
names.arg = varImportance[nVar:1,"variables"], xlab = "Relative Influence (%)",
main = if (object$forest$regression) { "Variable Importance based on 'Lp' distance" }
else { "Variable Importance based on information gain" }, border = NA)
abline(v = 100/nVar, col = 'grey')
localVarImportance <- localVariableImportance(object, nbVariables = maxInteractions, Xtest = Xtest,
predObject = predObject)
if (!object$forest$regression)
{
cat("\n1 - Global Variable Importance (", min(maxVar, nVar), " most important based on information gain) :\n", sep = "")
cat("Note: most predictive features are ordered by 'score' and plotted. Most discriminant ones\nshould also be taken into account by looking 'class' and 'class.frequency'.\n\n")
rownames(localVarImportance$classVariableImportance) = object$variablesNames[as.numeric(rownames(localVarImportance$classVariableImportance))]
colnames(localVarImportance$classVariableImportance) = paste("Class ",
object$classes[as.numeric(rm.string(colnames(localVarImportance$classVariableImportance), "Class"))], sep = "")
obsVarImportance = data.frame(localVarImportance$obsVariableImportance)
obsVarImportance[,1] = object$classes[obsVarImportance[,1]]
}
else
{
cat("\n1 - Global Variable Importance (", min(maxVar, nVar),
" most important based on 'Lp' distance) :\n", sep = "")
obsVarImportance = data.frame(localVarImportance)
}
print(varImportance)
for (j in 2:(maxInteractions+1)) { obsVarImportance[,j] = object$variablesNames[obsVarImportance[,j]] }
obsVarImportance2 = obsVarImportance
fOrder = sort(table(obsVarImportance2[,2]), decreasing = TRUE)
sOrder = sort(table(obsVarImportance2[,3]), decreasing = TRUE)
W1 = mean(obsVarImportance2[,grep("localVariableFrequency1", colnames(obsVarImportance2))[1]])
W2 = mean(obsVarImportance2[,grep("localVariableFrequency2", colnames(obsVarImportance2))[1]])
minDim2 = min(length(fOrder), length(sOrder))
partialDependence = matrix(NA, minDim2, minDim2)
for (i in 1:minDim2) { partialDependence[,i] = fOrder[i]*W1/W2 + sOrder[1:minDim2] }
colnames(partialDependence) = names(fOrder)[1:minDim2]
rownames(partialDependence) = names(sOrder)[1:minDim2]
partialDependence = partialDependence/(2*nrow(obsVarImportance2))
avg1rstOrder = colMeans(partialDependence)
avg2ndOrder = c(rowMeans(partialDependence),0)
partialDependence = rbind(partialDependence, avg1rstOrder)
partialDependence = cbind(partialDependence, avg2ndOrder)
minDim = min(10, minDim2)
varImportanceOverInteractions = vector()
for (i in 1:minDim2)
{
idx = which(rownames(partialDependence)[i] == colnames(partialDependence))
if (length(idx) > 0)
{
varImportanceOverInteractions[i] = 0.5*(avg1rstOrder[i] + avg2ndOrder[idx] +
2*partialDependence[i,idx])
}
else
{ varImportanceOverInteractions[i] = avg1rstOrder[i] }
names(varImportanceOverInteractions)[i] = rownames(partialDependence)[i]
}
varImportanceOverInteractions = sort(varImportanceOverInteractions, decreasing = TRUE)
cat("\n\n2 - Local Variable importance")
cat("\nVariables interactions (", minDim, " most important variables at first (columns) and second (rows) order) :", sep = "")
cat("\nFor each variable (at each order), its interaction with others is computed.\n\n")
print(round(partialDependence[c(1:minDim, nrow(partialDependence)),],4))
cat("\n\nVariable Importance based on interactions (", minDim, " most important) :\n", sep = "")
print(round(varImportanceOverInteractions[1:minDim]/sum(varImportanceOverInteractions), 4))
if (!object$forest$regression)
{
cat("\nVariable importance over labels (", minDim, " most important variables conditionally to each label) :\n", sep = "")
print(localVarImportance$classVariableImportance[1:minDim,])
cat("\n\nSee ...$localVariableImportance$obsVariableImportance to get variable importance for each observation.", sep = "")
cat("\n\nCall clusterAnalysis() function to get a more compact and complementary analysis.\n Type '?clusterAnalysis' for help.", sep = "")
}
else
{ cat("\n\nSee ...$localVariableImportance to get variable importance for each observation.") }
cat("\n\nCall partialDependenceOverResponses() function to get partial dependence over responses\nfor each variable. Type '?partialDependenceOverResponses' for help.\n")
}
else
{ stop("no variable importance defined in random uniform forest") }
importanceObject = list(globalVariableImportance = varImportance1, localVariableImportance = localVarImportance,
partialDependence = partialDependence, variableImportanceOverInteractions = varImportanceOverInteractions)
class(importanceObject) <- "importance"
importanceObject
}
partialImportance <- function(X, object, whichClass = NULL, threshold = NULL, thresholdDirection = c("low", "high"), border = NA, nLocalFeatures = 5)
{
if (is.list(object$localVariableImportance))
{
Z = object$localVariableImportance$obsVariableImportance
whichClassNames <- rm.string(names(object$localVariableImportance$class), "Class ")
numericClassNames = as.numeric(as.factor(whichClassNames))
if (is.null(whichClass) ) { stop("Please provide a class.") }
if (is.character(whichClass)) { whichClass = numericClassNames[which(whichClassNames == whichClass)] }
whichClass2 = whichClassNames[which(numericClassNames == whichClass)]
idx = which(Z[,1] == whichClass)
Z = Z[idx, ,drop = FALSE]
if (dim(Z)[1] <= 1) { stop("Not enough observations found using this class.") }
}
else
{
Z = object$localVariableImportance
if (!is.null(threshold))
{
if (thresholdDirection == "low") { idx = which(Z[,1] <= threshold) }
else { idx = which(Z[,1] > threshold) }
Z = Z[idx,]
if (dim(Z)[1] < 1) { stop("No observations found using this threshold.\n") }
}
}
Z = Z[,-1]
idxLocalVar = grep("localVariable", colnames(X))
idxPos = length(idxLocalVar)/2
countVars = sort(table(Z[,1:idxPos]), decreasing = TRUE)
obsObject = rmNA(countVars[1:nLocalFeatures]/sum(countVars))
XNames = colnames(X)
par(las = 1)
maxChar = floor(2 + max(nchar(XNames[as.numeric(names(sort(obsObject)))])))/2
par(mar = c(5, maxChar + 2,4,2))
barplot(sort(obsObject*100), horiz = TRUE, col = sort(heat.colors(length(obsObject)), decreasing = TRUE),
border = border, names.arg = XNames[as.numeric(names(sort(obsObject)))], xlab = "Relative influence (%)",
main = if (is.list(object$localVariableImportance)) { paste("Partial importance based on observations over class ", whichClass2, sep ="") }
else
{
if (!is.null(threshold))
{
if (thresholdDirection == "low") { paste("Partial importance based on observations (with Response < ", round(threshold, 4), ")", sep ="") }
else { paste("Partial importance based on observations (with Response > ", round(threshold, 4), ")", sep ="") }
}
else { "Partial importance based on observations" }
}
)
cat("Relative influence: ", round(rmNA(sum(obsObject))*100, 2), "%\n", sep="")
return(obsObject)
}
partialDependenceOverResponses <- function(Xtest, importanceObject,
whichFeature = NULL,
whichOrder = c("first", "second", "all"),
outliersFilter = FALSE,
plotting = TRUE,
followIdx = FALSE,
maxClasses = if (is.null(whichFeature)) { 10 } else { max(10, which.is.factor(Xtest[, whichFeature, drop = FALSE], count = TRUE)) },
bg = "lightgrey")
{
FeatureValue = Response = Class = Observations = NULL
if (!is.null(whichFeature))
{
if (is.character(whichFeature)) { whichFeature = which(colnames(Xtest) == whichFeature) }
if (length(whichFeature) > 1)
{
whichFeature = whichFeature[1]
cat("Only one variable can be computed at the same time\n")
}
}
if (whichOrder[1] == "first") { idxOrder = 2 }
if (whichOrder[1] == "second") { idxOrder = 3 }
if (whichOrder[1] == "all")
{
if (is.matrix(importanceObject$localVariableImportance))
{ idxOrder = 2:length(grep("localVariableFrequency", colnames(importanceObject$localVariableImportance))) }
else
{ idxOrder = 2:length(grep("localVariableFrequency", colnames(importanceObject$localVariableImportance$obsVariableImportance))) }
}
idx = list()
if (is.matrix(importanceObject$localVariableImportance)) { importanceObjectMatrix = importanceObject$localVariableImportance }
else { importanceObjectMatrix = importanceObject$localVariableImportance$obsVariableImportance }
if (is.null(whichFeature))
{ whichFeature = as.numeric(names(which.max(table(importanceObjectMatrix[,idxOrder[1]])))) }
idx[[1]] = which(importanceObjectMatrix[,idxOrder[1]] == whichFeature)
if (length(idxOrder) > 1)
{
for (i in 1:length(idxOrder))
{ idx[[i+1]] = which(importanceObjectMatrix[,1+idxOrder[i]]== whichFeature) }
}
partialDependenceMatrix = cbind(Xtest[unlist(idx), whichFeature], importanceObjectMatrix[unlist(idx),1], unlist(idx))
partialDependenceMatrix = sortMatrix(partialDependenceMatrix, 1)
NAIdx = which(is.na(partialDependenceMatrix))
if (length(NAIdx) > 0) { partialDependenceMatrix = partialDependenceMatrix[-NAIdx,] }
if (outliersFilter & (!is.factor(Xtest[,whichFeature])))
{
highOutlierIdx = which(partialDependenceMatrix[,1] > quantile(partialDependenceMatrix[,1],0.95))
lowOutlierIdx = which(partialDependenceMatrix[,1] < quantile(partialDependenceMatrix[,1],0.05))
if (length(highOutlierIdx) > 0 | length(lowOutlierIdx) > 0)
{ partialDependenceMatrix = partialDependenceMatrix[-c(lowOutlierIdx,highOutlierIdx),] }
}
if (is.vector(partialDependenceMatrix) )
{ stop ("Not enough points to plot partial dependencies. Please increase order of interaction when computing importance.") }
if (dim(partialDependenceMatrix)[1] < 10)
{ stop ("Not enough points to plot partial dependencies. Please increase order of interaction when computing importance.") }
else
{
idx = partialDependenceMatrix[,3]
partialDependenceMatrix = partialDependenceMatrix[,-3]
flagFactor = 0
smallLength = length(unique(Xtest[,whichFeature]))
n = nrow(Xtest)
if ( ((smallLength < maxClasses) | is.factor(Xtest[,whichFeature])) & (smallLength/n < 1/5))
{
featureLevels = levels(as.factor(Xtest[,whichFeature]))
testNumeric = is.numeric(as.numeric(featureLevels))
if (testNumeric & length(rmNA(as.numeric(featureLevels))) > 0)
{ flagFactor = 1 }
else
{
if (is.matrix(importanceObject$localVariableImportance))
{
classFeature = unique(partialDependenceMatrix[,1])
B = round(as.numeric(partialDependenceMatrix[,2]),4)
A = as.numeric(factor2matrix(partialDependenceMatrix[,1, drop= FALSE]))
partialDependenceMatrix = cbind(A,B)
colnames(partialDependenceMatrix) = c("Class", "Response")
flagFactor = 1
valueFeature = unique(A)
referenceTab = cbind(classFeature, valueFeature)
colnames(referenceTab) = c("category", "numeric value")
cat("categorical values have been converted to numeric values :\n")
print(referenceTab)
cat("\n")
}
else
{ partialDependenceMatrix[,1] = featureLevels[ as.numeric(as.factor(partialDependenceMatrix[,1]))] }
}
}
}
if (plotting)
{
if (dim(partialDependenceMatrix)[1] < 1)
{ stop ("Not enough points to plot partial dependencies. Please increase order of interaction when computing importance.") }
if (is.matrix(importanceObject$localVariableImportance))
{
if ( ((smallLength < maxClasses) | is.factor(Xtest[,whichFeature])) & (smallLength/n < 1/5))
{
A = if (flagFactor) { as.factor(partialDependenceMatrix[,1]) } else { partialDependenceMatrix[,1] }
B = round(as.numeric(partialDependenceMatrix[,2]),4)
partialDependenceMatrix = data.frame(A , B)
colnames(partialDependenceMatrix) = c("Class", "Response")
plot(qplot(Class, Response, data = partialDependenceMatrix, geom = c("boxplot", "jitter"),
outlier.colour = "green", outlier.size = 2.5, fill= Class, main = "Partial dependence over predictor",
xlab = colnames(Xtest)[whichFeature], ylab = "Response"))
}
else
{
colnames(partialDependenceMatrix) = c("FeatureValue", "Response")
partialDependenceMatrix = data.frame(partialDependenceMatrix)
tt <- ggplot(partialDependenceMatrix, aes(x = FeatureValue, y = Response))
plot(tt + geom_point(colour = "lightblue") + stat_smooth(fill = "green", colour = "darkgreen",
size = 1) +
labs(title = "Partial dependence over predictor", x = colnames(Xtest)[whichFeature], y = "Response"))
}
}
else
{
colnames(partialDependenceMatrix) = c("Observations", "Class")
partialDependenceMatrix = data.frame(partialDependenceMatrix)
variablesNames = unique(partialDependenceMatrix$Class)
partialDependenceMatrix$Class = factor(partialDependenceMatrix$Class)
levels(partialDependenceMatrix$Class) = colnames(importanceObject$localVariableImportance$classVariableImportance)[sort(variablesNames)]
if ( ((smallLength < maxClasses) | is.factor(Xtest[,whichFeature])) & (smallLength/n < 1/5))
{
par(las=1)
if (bg != "none") par(bg = bg)
mosaicplot(t(table(partialDependenceMatrix)), color = sort(heat.colors(length(featureLevels)),
decreasing = FALSE), border = NA, ylab = colnames(Xtest)[whichFeature], xlab = "Class",
main = "Partial dependence over predictor")
}
else
{
plot(qplot(Class, Observations, data = partialDependenceMatrix, geom = c("boxplot", "jitter"),
outlier.colour = "green", outlier.size = 2.5, fill= Class, main = "Partial dependence over predictor",
xlab = "", ylab = colnames(Xtest)[whichFeature]))
}
}
}
else
{
if (!is.matrix(importanceObject$localVariableImportance))
{
colnames(partialDependenceMatrix) = c("Observations", "Class")
partialDependenceMatrix = data.frame(partialDependenceMatrix)
variablesNames = unique(partialDependenceMatrix$Class)
partialDependenceMatrix$Class = factor(partialDependenceMatrix$Class)
levels(partialDependenceMatrix$Class) = colnames(importanceObject$localVariableImportance$classVariableImportance)[sort(variablesNames)]
}
}
if (followIdx)
{ return(list(partialDependenceMatrix = partialDependenceMatrix, idx = as.numeric(idx) )) }
else
{ return(partialDependenceMatrix) }
}
partialDependenceBetweenPredictors <- function(Xtest, importanceObject, features,
whichOrder = c("first", "second", "all"),
perspective = FALSE,
outliersFilter = FALSE,
maxClasses = max(10, which.is.factor(Xtest[,features, drop = FALSE], count = TRUE)),
bg = "grey")
{
Variable1 = Variable2 = SameClass = ..level.. = Response = NULL
if (length(features) != 2) { stop("Please provide two features.") }
graphics.off()
pD1 <- partialDependenceOverResponses(Xtest, importanceObject, whichFeature = features[1], whichOrder = whichOrder,
outliersFilter = outliersFilter, plotting = FALSE, followIdx = TRUE, maxClasses = maxClasses)
pD2 <- partialDependenceOverResponses(Xtest, importanceObject, whichFeature = features[2], whichOrder = whichOrder,
outliersFilter = outliersFilter, plotting = FALSE, followIdx = TRUE, maxClasses = maxClasses)
sameIdx2 = find.idx(pD1$idx, pD2$idx)
sameIdx1 = find.idx(pD2$idx, pD1$idx)
minDim = length(sameIdx1)
if ( (minDim < 10) | (length(sameIdx2) < 10)) { stop("Not enough points. Please use option whichOrder = 'all'") }
pD1 = pD1$partialDependenceMatrix; pD2 = pD2$partialDependenceMatrix;
pD11 = factor2matrix(pD1)[sameIdx1,]; pD22 = factor2matrix(pD2)[sameIdx2,]
if (!is.matrix(importanceObject$localVariableImportance))
{
minN = min(nrow(pD11), nrow(pD22))
pD11 = pD11[1:minN,]
pD22 = pD22[1:minN,]
idx = ifelse(pD11[,2] == pD22[,2], 1, 0)
Xi = cbind(pD11[which(idx == 1), 1], pD22[which(idx == 1), 1])
Xj = cbind(pD11[which(idx == 0), 1], pD22[which(idx == 0), 1])
if (!is.character(features[1]))
{
fName1 = which(colnames(Xtest) == colnames(Xtest)[features[1]])
fName2 = which(colnames(Xtest) == colnames(Xtest)[features[2]])
features = colnames(Xtest)[c(fName1, fName2)]
}
Xi = as.data.frame(Xi); colnames(Xi) = c("Variable1", "Variable2")
Xj = as.data.frame(Xj); colnames(Xj) = c("Variable1", "Variable2")
Xi = cbind(Xi, rep(1, nrow(Xi)))
Xj = cbind(Xj, rep(0, nrow(Xj)))
colnames(Xj)[3] = colnames(Xi)[3] = "SameClass"
X = rbind(Xi, Xj)
X[,3] = ifelse(X[,3] == 1, TRUE, FALSE)
smallLength = length(unique(Xtest[,features[1]]))
n = nrow(Xtest)
Xnumeric = X
ggplotFlag1 = ggplotFlag2 = 1
options(warn = -1)
if ( ((smallLength < maxClasses) | is.factor(Xtest[,features[1]])) & (smallLength/n < 1/5))
{
featureLevels = levels(factor(Xtest[,features[1]]))
testNumeric = is.numeric(as.numeric(featureLevels))
if (testNumeric & length(rmNA(as.numeric(featureLevels))) > 0) { ggplotFlag1 = 0 }
else { X[,1] = featureLevels[X[,1]] }
}
smallLength = length(unique(Xtest[,features[2]]))
if ( ((smallLength < maxClasses) | is.factor(Xtest[,features[2]])) & (smallLength/n < 1/5))
{
featureLevels = levels(factor(Xtest[,features[2]]))
testNumeric = is.numeric(as.numeric(featureLevels))
if (testNumeric & length(rmNA(as.numeric(featureLevels))) > 0) { ggplotFlag2 = 0 }
else { X[,2] = featureLevels[X[,2]] }
}
options(warn = 0)
dev.new()
tt <- ggplot(X, aes(x = Variable1, y = Variable2, colour = SameClass))
if (ggplotFlag1 == 1)
{
plot(tt + geom_point(size = 2)
+ labs(title = "Dependence between predictors", x = features[1], y = features[2])
+ scale_colour_manual("Same class", values = c("red", "green") )
+ theme(axis.text.x = element_text(angle = 60, hjust = 1))
)
}
else
{
plot(tt + geom_point(size = 2)
+ labs(title = "Dependence between predictors", x = features[1], y = features[2])
+ scale_colour_manual("Same class", values = c("red", "green") )
)
}
dev.new()
cde1 <- geom_histogram(position = "fill", binwidth = diff(range(Xnumeric[,1]))/4, alpha = 7/10)
cde2 <- geom_histogram(position = "fill", binwidth = diff(range(Xnumeric[,2]))/4, alpha = 7/10)
tt1 <- ggplot(X, aes(x = Variable1, fill = SameClass))
if (ggplotFlag1 == 1)
{
plot(tt1 + cde1
+ labs(title = "Class distribution", x = features[1], y = "Frequency")
+ scale_fill_manual(paste("Same class as", features[2]),values = c("red", "lightgreen"))
+ theme(axis.text.x = element_text(angle = 60, hjust = 1))
)
}
else
{
plot(tt1 + cde1
+ labs(title = "Class distribution", x = features[1], y = "Frequency")
+ scale_fill_manual(paste("Same class as", features[2]),values = c("red", "lightgreen"))
)
}
dev.new()
tt1 <- ggplot(X, aes(x = Variable2, fill = SameClass))
if (ggplotFlag2 == 1)
{
plot(tt1 + cde2
+ labs(title = "Class distribution", x = features[2], y = "Frequency")
+ scale_fill_manual(paste("Same class as", features[1]),values = c("red", "lightgreen"))
+ theme(axis.text.x = element_text(angle = 60, hjust = 1))
)
}
else
{
plot(tt1 + cde2
+ labs(title = "Class distribution", x = features[2], y = "Frequency")
+ scale_fill_manual(paste("Same class as", features[1]),values = c("red", "lightgreen"))
)
}
if ( (length(unique(X[,1])) == 1) | (length(unique(X[,2])) == 1) )
{ cat("\nOne of the variable does not have variance. Heatmap is not plotted.\n") }
else
{
dev.new()
tt2 <- ggplot(Xnumeric, aes( x = Variable1, y = Variable2, z = SameClass))
try(plot(tt2 + stat_density2d(aes(fill = ..level.., alpha =..level..), geom = "polygon")
+ scale_fill_gradient2(low = "lightyellow", mid = "yellow", high = "red")
+ labs(title = "Heatmap of dependence between predictors", x = features[1], y = features[2])
), silent = TRUE)
}
colnames(X) = c(features, "Same class")
}
else
{
intervals = cut(c(pD11[,2], pD22[,2]), minDim, labels = FALSE)
pD11 = cbind(pD11,intervals[1:minDim])
if (nrow(pD22) != length(rmNA(intervals[(minDim + 1):(2*minDim)])))
{
sameN = min(nrow(pD22),length(rmNA(intervals[(minDim + 1):(2*minDim)])))
pD22 = cbind(pD22[1:sameN,], rmNA(intervals[(minDim + 1):(2*minDim)])[1:sameN])
}
else
{ pD22 = cbind(pD22, rmNA(intervals[(minDim + 1):(2*minDim)])) }
minN = min(nrow(pD11), nrow(pD22))
Xi = sortMatrix(pD11,3)[1:minN,]
Xj = sortMatrix(pD22,3)[1:minN,]
Z = (Xi[,2] + Xj[,2])/2
if (!is.character(features[1]))
{
fName1 = which(colnames(Xtest) == colnames(Xtest)[features[1]])
fName2 = which(colnames(Xtest) == colnames(Xtest)[features[2]])
features = colnames(Xtest)[c(fName1, fName2)]
}
dev.new()
XiXj = as.data.frame(cbind(Xi[,1], Xj[,1], Z)); colnames(XiXj) = c("Variable1", "Variable2", "Response")
tt <- ggplot(XiXj, aes(x = Variable1, y = Variable2, z = Response))
ttMore <- tt + stat_density2d(aes(fill = ..level.., alpha =..level..), geom = "polygon") +
scale_fill_gradient2(low = "lightyellow", mid = "yellow", high = "red") +
labs(title = "Local Heatmap of dependence (with frequency and intensity of Response)",
x = features[1], y = features[2])
try(plot(ttMore), silent= TRUE)
dev.new()
fourQuantilesCut = cut(Z,4)
XiXj[,3] = fourQuantilesCut
dataCuts = table(fourQuantilesCut)
if (length(which(dataCuts < 5)) > 0)
{
lowDataCuts = names(dataCuts[which(dataCuts < 5)])
rmIdx = find.idx(lowDataCuts, XiXj[,3])
XiXj = XiXj[,-3]
Z = Z[-rmIdx]
fourQuantilesCut = cut(Z, 4)
XiXj = cbind(XiXj[-rmIdx,], fourQuantilesCut)
dataCuts = table(fourQuantilesCut)
colnames(XiXj)[3] = "Response"
}
X = cbind(XiXj, Z)
colnames(X) = c(features, "Response in four quantile intervals", "Response")
tt1 <- ggplot(XiXj, aes(x = Variable1, y = Variable2, z = Response))
try(plot(tt1 + stat_density2d(aes(fill = ..level.., alpha =..level..), geom = "polygon")
+ scale_fill_gradient2(low = "lightyellow", mid = "yellow", high = "red")
+ labs(title = "Global Heatmap of dependence (with intensity of Response)", x = features[1],
y = features[2])
), silent = TRUE)
dev.new()
try(plot(tt + geom_point(aes(colour = Response, size = Response))
+ stat_smooth(fill = "lightgrey", colour = "grey", size = 1)
+ labs(title = "Dependence between predictors", x = features[1], y = features[2])
+ scale_colour_gradient2(low = "blue", mid = "green", high = "red")
), silent = TRUE)
}
idxf1 = which(colnames(importanceObject$partialDependence) == features[1])
idxf2 = which(rownames(importanceObject$partialDependence) == features[2])
if ( (length(idxf1) > 0) & (length(idxf2) > 0) )
{
np = dim(importanceObject$partialDependence)
cat("\nLevel of interactions between ", features[1], " and " , features[2], " at first order: ",
round(importanceObject$partialDependence[idxf2, idxf1],4), "\n", "(", round(round(importanceObject$partialDependence[idxf2, idxf1],4)/max(importanceObject$partialDependence[-np[1], -np[2]])*100,2), "% of the feature(s) with maximum level", ")\n", sep="")
}
else
{ cat("\nFeatures do not appear to have strong co-influence on the response.\n") }
idxf1 = which(rownames(importanceObject$partialDependence) == features[1])
if (length(idxf1) > 0)
{
idxf2 = which(colnames(importanceObject$partialDependence) == features[2])
if (length(idxf2) > 0)
{
cat("Level of interactions between ", features[1], " and ", features[2], " at second order: ",
round(importanceObject$partialDependence[idxf1, idxf2],4), "\n", "(", round(round(importanceObject$partialDependence[idxf1, idxf2],4)/max(importanceObject$partialDependence[-np[1], -np[2]])*100,2), "% of the feature(s) with maximum level", ")\n", sep="")
}
}
if(!is.matrix(importanceObject$localVariableImportance))
{
cat("\nClass distribution : for a variable of the pair, displays the estimated probability\nthat the considered variable has the same class than the other. If same class tends to be TRUE\nthen the variable has possibly an influence on the other (for the considered category or values) when predicting a label.\n\n")
cat("Dependence : for the pair of variables, displays the shape of their dependence\nand the estimated agreement in predicting the same class, for the values that define dependence.\nIn case of categorical variables, cross-tabulation is used.\n\n")
cat("Heatmap : for the pair of variables, displays the area where the dependence is the most effective.\nThe darker the colour, the stronger is the dependence.\n")
cat("\nFrom the pair of variables, the one that dominates is, possibly, the one\nthat is the most discriminant one (looking 'Global variable Importance') and/or the one\nthat has the higher level of interactions(looking 'Variable Importance based on interactions').\n")
}
else
{
cat("\nDependence : for the pair of variables, displays the shape of their dependence\nand the predicted value (on average) of the response for the values taken by the pair.\n\n")
cat("Heatmap : for the pair of variables, displays the area where the dependence is the most effective.\nThe darker the colour, the stronger is the dependence. First plot focuses on the intensity of the response\n, while the second considers both frequency (number of close predictions) and intensity.\n\n")
}
cat("\nPlease use the R menu to tile vertically windows in order to see all plots.\n\n")
if (perspective & is.matrix(importanceObject$localVariableImportance))
{
dev.new()
gridSize = 100; gridLag = 100
x = XiXj[,1]
y = XiXj[,2]
z = Z
n = length(x)
xyz = cbind(x,y,z)
if (n > 5*gridSize)
{
sampleIdx = sample(n, 500)
xyz = xyz[sampleIdx,]
n = length(sampleIdx)
}
xyz.byX = sortMatrix(xyz,1)
xyz.byY = sortMatrix(xyz,2)
newX = xyz.byX[,1]
newY = xyz.byY[,2]
dummyForRepX = dummyForRepY = rep(0,n)
for (i in 2:n)
{
if (newX[i] == newX[i-1]) { dummyForRepX[i] = 1 }
if (newY[i] == newY[i-1]) { dummyForRepY[i] = 1 }
}
newIdx = which(dummyForRepY == 0 & dummyForRepX == 0)
newX = newX[newIdx]
newY = newY[newIdx]
if ( (gridSize + gridLag) > length(newX))
{
interp.1 = seq(min(newX) + 0.1, max(newX) - 0.1,length = gridSize + gridLag - length(newX))
interp.2 = seq(min(newY) + 0.1, max(newY) - 0.1,length = gridSize + gridLag- length(newY))
newX = sort(c(newX, interp.1))
newY = sort(c(newY, interp.2))
}
duplicatesX = duplicated(newX)
duplicatesY = duplicated(newY)
if (sum(duplicatesX) > 0)
{
newX = newX[!duplicatesX]
newY = newY[!duplicatesX]
}
if (sum(duplicatesY) > 0)
{
newY = newY[!duplicatesY]
newX = newX[!duplicatesY]
}
newXYZ = cbind(newX, newY, rep(NA, length(newX)))
proxyM = fillNA2.randomUniformForest(rbind(xyz, newXYZ), nodesize = 2)
xyz.dim = dim(xyz)
nn = length(newX)
newZ = matrix(NA, nn, nn)
for (i in 1:nn)
{
for (j in 1:nn)
{ newZ[i,j] = mean(proxyM[which(newX[i] == proxyM[,1] | newY[j] == proxyM[,2]), 3]) }
}
L.smoothNewZ = t(apply(newZ, 1, function(Z) lagFunction(Z, lag = gridLag, FUN = mean, inRange = TRUE)))
C.smoothNewZ = apply(newZ, 2, function(Z) lagFunction(Z, lag = gridLag, FUN = mean, inRange = TRUE))
smoothIdx = round(seq(1, nrow(L.smoothNewZ), length = gridLag),0)
newX = newX[-smoothIdx]
newY = newY[-smoothIdx]
C.smoothNewZ = C.smoothNewZ[,-smoothIdx]
L.smoothNewZ = L.smoothNewZ[-smoothIdx,]
newZ = 0.5*(C.smoothNewZ + L.smoothNewZ)
highOutlierIdx2 <- apply(newZ, 2, function(Z) which(Z >= quantile(Z, 0.975)))
lowOutlierIdx2 <- apply(newZ, 2, function(Z) which(Z <= quantile(Z, 0.025)))
ouliersIdx2 <- c(lowOutlierIdx2, highOutlierIdx2)
if (length(ouliersIdx2) > 0)
{
newZ = newZ[-ouliersIdx2, -ouliersIdx2]
newX = newX[-ouliersIdx2]
newY = newY[-ouliersIdx2]
}
rm(lowOutlierIdx2);
rm(highOutlierIdx2);
highOutlierIdx2 <- apply(newZ, 1, function(Z) which(Z >= quantile(Z, 0.975)))
lowOutlierIdx2 <- apply(newZ, 1, function(Z) which(Z <= quantile(Z, 0.025)))
ouliersIdx2 <- c(lowOutlierIdx2, highOutlierIdx2)
if (length(ouliersIdx2) > 0)
{
newZ = newZ[-ouliersIdx2, -ouliersIdx2]
newX = newX[-ouliersIdx2]
newY = newY[-ouliersIdx2]
}
nNewZ = nrow(newZ)
if (nNewZ > gridSize)
{
sampleIdx = sort(sample(nNewZ, gridSize))
newX = newX[sampleIdx]
newY = newY[sampleIdx]
newZ = newZ[sampleIdx, sampleIdx]
}
if (bg != "none") par(bg = bg)
flag = endCondition = 0
lastANSWER = -40
newX = as.matrix(newX)
newY = as.matrix(newY)
newZ = as.matrix(newZ)
while (!endCondition)
{
if (!flag)
{
try(perspWithcol(newX, newY, newZ, heat.colors, nrow(newZ), theta = -40, phi = 20, xlab = features[1], ylab = features[2], zlab = "Response", main = "Dependence between predictors and effect over Response", ticktype = "detailed", box = TRUE, expand = 0.5, shade = 0.15), silent = FALSE)
}
ANSWER <- readline(cat("To get another view of 3D representation\nplease give a number between -180 and 180 (default one = -40).\nType 'b' to remove border.\nTo see animation please type 'a'.\nType Escape to leave :\n"))
if ( is.numeric(as.numeric(ANSWER)) & !is.na(as.numeric(ANSWER)) )
{
flag = 1
try(perspWithcol(newX, newY, newZ, heat.colors, nrow(newZ), theta = ANSWER, phi = 20, xlab = features[1], ylab = features[2], zlab = "Response", main = "Dependence between predictors and effect over Response", ticktype = "detailed", box = TRUE, expand = 0.5, shade = 0.15),
silent = FALSE)
lastANSWER = ANSWER
}
else
{
if (as.character(ANSWER) == "b")
{
flag = 1
try(perspWithcol(newX, newY, newZ, heat.colors, nrow(newZ), theta = lastANSWER, phi = 20, xlab = features[1], ylab = features[2], zlab = "Response", main = "Dependence between predictors and effect over Response", ticktype = "detailed", box = TRUE, expand = 0.5, border = NA, shade = 0.15), silent = FALSE)
}
else
{
if ( as.character(ANSWER) == "a")
{
flag = 1
nn = nrow(newZ)
circle = -180:180
for (i in circle)
{
try(perspWithcol(newX, newY, newZ, heat.colors, nn, theta = i, phi = 20,
xlab = features[1], ylab = features[2], zlab = "Response", main = "Dependence between predictors and effect over Response", ticktype = "detailed", box = TRUE, border = NA, expand = 0.5, shade = 0.15), silent = FALSE)
}
}
else
{ endCondition = 1 }
}
}
}
}
return(X)
}
twoColumnsImportance <- function(importanceObjectMatrix)
{
idx = length(grep("localVariableFrequency", colnames(importanceObjectMatrix))) - 1
tmpImportanceObjectMatrix = importanceObjectMatrix[,1:2]
if (idx > 0)
{
for (i in 1:idx)
{ tmpImportanceObjectMatrix = rbind(tmpImportanceObjectMatrix, importanceObjectMatrix[,c(1,2+idx[i])]) }
}
return(tmpImportanceObjectMatrix)
}
plot.importance <- function(x,
nGlobalFeatures = 30,
nLocalFeatures = 5,
Xtest = NULL,
whichFeature = NULL,
whichOrder = if (ncol(x$globalVariableImportance) > 5)
{ if (nrow(x$localVariableImportance$obsVariableImportance) > 1000) "first" else "all" }
else
{ if (nrow(x$localVariableImportance) > 1000) "first" else "all" },
outliersFilter = FALSE,
formulaInput = NULL,
border = NA,
...)
{
cat("\nPlease use the R menu to tile vertically windows in order to see all plots.\n")
object <- x
if (nrow(x$partialDependence) < 3)
{
stop("Not enough (or no) interactions between variables. Please use partialDependenceOverResponses( )\n and partialDependenceBetweenPredictors( ) functions to deeper assess importance.")
}
Variable = Response = NULL
if (!is.null(Xtest))
{
if (!is.null(formulaInput))
{
mf <- model.frame(formula = formulaInput, data = as.data.frame(Xtest))
Xtest <- model.matrix(attr(mf, "terms"), data = mf)[,-1]
cat("Note: please note that categorical variables have lost their original values when using formula.\nIt is strongly recommended to not use formula if one wants to assess importance\n \n.")
}
else
{
matchNA = (length(which(is.na(Xtest))) > 0)
if (matchNA)
{
cat("NA found in data. Fast imputation (means) is used for missing values\n")
Xtest <- na.impute(Xtest)
}
}
}
maxVar = nGlobalFeatures
maxVar2 = nLocalFeatures
graphics.off()
varImportance = object$globalVariableImportance
n = nrow(varImportance)
if (n > maxVar) { varImportance = varImportance[1:maxVar,] }
else { maxVar = n}
par(las = 1)
maxChar = if (nLocalFeatures >= 10) { 11 }
else { floor(2 + max(nchar(as.character(object$globalVariableImportance[,1])))/2) }
par(mar = c(5, maxChar + 1,4,2))
barplot(varImportance[maxVar:1,"percent.importance"], horiz = TRUE, col = sort(heat.colors(maxVar), decreasing = TRUE), border = border,
names.arg = varImportance[maxVar:1,"variables"], xlab = "Relative influence (%)", main = "Variable importance based on information gain")
abline(v = 100/n, col = 'grey')
dev.new()
par(las=1)
par(mar = c(5,4,4,2))
nbFeatures = ncol(object$partialDependence)
newNbFeatures = min(maxVar2, nbFeatures -1)
if (newNbFeatures < (nbFeatures - 1))
{
OthersVariablesCol = colSums(object$partialDependence[(newNbFeatures+1):(nbFeatures -1), -nbFeatures, drop = FALSE])[1:newNbFeatures]
OthersVariablesRow = rowSums(object$partialDependence[-nbFeatures, (newNbFeatures+1):(nbFeatures -1), drop = FALSE])[1:newNbFeatures]
corner = mean(c(OthersVariablesCol, OthersVariablesRow))
newPartialDependence = object$partialDependence[1:newNbFeatures,1:newNbFeatures]
newPartialDependence = rbind(cbind(newPartialDependence, OthersVariablesRow), c(OthersVariablesCol, corner))
colnames(newPartialDependence)[ncol(newPartialDependence)] = "Other features"
rownames(newPartialDependence)[nrow(newPartialDependence)] = "Other features"
mosaicplot(t(newPartialDependence), color = sort(heat.colors(newNbFeatures + 1), decreasing = FALSE),
main = "Variables interactions over observations", ylab = "Most important variables at 2nd order",
xlab = "Most important variables at 1rst order", las = ifelse(maxChar > 10, 2,1), border = border)
}
else
{
mosaicplot(t(object$partialDependence[1:newNbFeatures,1:newNbFeatures]), color = sort(heat.colors(newNbFeatures), decreasing = FALSE),
las = ifelse(maxChar > 10, 2, 1), main = "Variables interactions over observations", ylab = "Most important variables at 2nd order", xlab = "Most important variables at 1rst order", border = border)
}
dev.new()
par(las=1)
par(mar=c(5,maxChar + 1,4,2))
nbFeatures2 = min(maxVar2, length(object$variableImportanceOverInteractions))
importanceOverInteractions = sort(object$variableImportanceOverInteractions, decreasing = TRUE)/sum(object$variableImportanceOverInteractions)*100
barplot(importanceOverInteractions[nbFeatures2:1], horiz = TRUE, col = sort(heat.colors(nbFeatures2), decreasing = TRUE),
names.arg = names(importanceOverInteractions)[nbFeatures2:1], xlab = "Relative influence (%)",
main = "Variable importance based on interactions", border = border)
abline(v = 100/n, col = 'grey')
if (!is.matrix(object$localVariableImportance))
{
dev.new()
par(las=1)
par(mar = c(5, maxChar + 1,4,2))
nbFeatures3 = min(nbFeatures2, nrow(object$localVariableImportance$classVariableImportance))
mosaicplot(t(object$localVariableImportance$classVariableImportance[1:nbFeatures3,,drop = FALSE]),
color = sort(heat.colors(nbFeatures3),
decreasing = FALSE), main = "Variable importance over labels", border = border)
}
else
{
dev.new()
ggData = twoColumnsImportance(object$localVariableImportance)
mostImportantFeatures = as.numeric(names(sort(table(ggData[,2]), decreasing = TRUE)))
if (length(unique(ggData[,2])) > maxVar2)
{
mostImportantFeatures = mostImportantFeatures[1:maxVar2]
ggData = ggData[find.idx(mostImportantFeatures, ggData[,2], sorting = FALSE),]
}
if (is.null(Xtest)) { textX = paste("V", mostImportantFeatures, collapse = " ", sep="") }
else { textX = paste( colnames(Xtest)[mostImportantFeatures], collapse = ", ", sep="") }
textX = paste("Index of most important variables [", textX, "]")
colnames(ggData)[1] = "Response"
colnames(ggData)[2] = "Variable"
ggData = data.frame(ggData)
if (!is.null(Xtest)) { ggData[,"Variable"] = colnames(Xtest)[ggData[,"Variable"]] }
ggData[,"Variable"] = as.factor(ggData[,"Variable"])
if (nrow(ggData) > 1000)
{
randomSample = sample(nrow(ggData), 1000)
gg <- qplot(Variable, Response, data = ggData[randomSample,], geom = c("boxplot", "jitter"),
colour = Response, outlier.colour = "green", outlier.size = 1.5, fill = Variable)
cat("1000 observations have been randomly sampled to plot 'Dependence on most important predictors'.\n")
}
else
{
gg <- qplot(Variable, Response, data = ggData, geom = c("boxplot", "jitter"), colour = Response, outlier.colour = "green", outlier.size = 1.5, fill = Variable)
}
if (maxChar > 10)
{
plot(gg + labs(x ="", y = "Response", title = "Dependence on most important predictors") +
theme(axis.text.x = element_text(angle = 60, hjust = 1)))
}
else
{
plot(gg + labs(x ="", y = "Response", title = "Dependence on most important predictors"))
}
}
if (is.null(Xtest)) { stop("partial dependence between response and predictor can not be computed without data") }
else
{
endCondition = 0
dev.new()
pD <- partialDependenceOverResponses(Xtest, object, whichFeature = whichFeature, whichOrder = whichOrder, outliersFilter = outliersFilter,...)
idxMostimportant = rmNA(match(names(object$variableImportanceOverInteractions), colnames(Xtest)))[1:nbFeatures2]
mostimportantFeatures = colnames(Xtest)[idxMostimportant]
while (!endCondition)
{
ANSWER <- readline(cat("To get partial dependence of most important features\ngive a column number\namong", idxMostimportant,
"\n(", mostimportantFeatures, ")\nPress escape to quit\n"))
if ( is.numeric(as.numeric(ANSWER)) & !is.na(as.numeric(ANSWER)) )
{
whichFeature = as.numeric(ANSWER)
if (whichFeature %in% idxMostimportant)
{
pD <- partialDependenceOverResponses(Xtest, object, whichFeature = whichFeature, whichOrder = whichOrder, outliersFilter = outliersFilter,...)
}
else
{ stop("Please provide column index among most important. Partial Dependence can not be computed.") }
}
else
{ endCondition = 1 }
}
}
}
print.importance <- function(x,...)
{
object <- x
minDim = min(10,length(object$variableImportanceOverInteractions))
if (!is.matrix(object$localVariableImportance))
{
cat("\n1 - Global Variable importance (", minDim, " most important based on information gain) :\n", sep = "")
cat("Note: most predictive features are ordered by 'score' and plotted. Most discriminant ones\nshould also be taken into account by looking 'class' and 'class.frequency'.\n\n")
}
else
{
cat("\n1 - Global Variable importance (", minDim, " most important based on 'Lp' distance) :\n",
sep = "")
}
print(object$globalVariableImportance[1:minDim,])
cat("\n\n2 - Local Variable importance")
cat("\nVariables interactions (", minDim, " most important variables at first (columns) and second (rows) order) :", sep = "")
cat("\nFor each variable (at each order), its interaction with others is computed.\n\n")
print(round(object$partialDependence[c(1:minDim, nrow(object$partialDependence)),], 2))
cat("\n\nVariable Importance based on interactions (", minDim, " most important) :\n", sep = "")
print(round(object$variableImportanceOverInteractions/sum(object$variableImportanceOverInteractions),2)[1:minDim])
if (!is.matrix(object$localVariableImportance))
{
cat("\nVariable importance over labels (", minDim,
" most important variables conditionally to each label) :\n", sep = "")
print(object$localVariableImportance$classVariableImportance[1:minDim,])
cat("\n\nSee ...$localVariableImportance$obsVariableImportance to get variable importance for each observation.", sep = "")
cat("\n\nCall clusterAnalysis() function to get a more compact and complementary analysis.\n Type '?clusterAnalysis' for help.", sep = "")
}
else
{ cat("\n\nSee ...$localVariableImportance to get variable importance for each observation.") }
cat("\n\nCall partialDependenceOverResponses() function to get partial dependence over responses\nfor each variable. Type '?partialDependenceOverResponses' for help.\n")
}
combineRUFObjects <- function(rUF1, rUF2)
{
rUF1 <- filter.object(rUF1)
rUF2 <- filter.object(rUF2)
return(onlineCombineRUF(rUF1, rUF2))
}
rankingTrainData <- function(trainData = NULL, trainLabels = NULL, testData = NULL, testLabels = NULL, ntree = 100, thresholdScore = 2/3, nTimes = 2, ...)
{
score = tmpScore = rep(0, nrow(testData))
classes = unique(trainLabels)
majorityClass = modX(trainLabels)
i = 1; rmIdx = NULL; idx = 1:nrow(testData)
while ( (i <= nTimes) & (length(testData[,1]) >= 1) )
{
rUF <- randomUniformForestCore(trainData, trainLabels = as.factor(trainLabels), ntree = ntree, use.OOB = FALSE, rf.overSampling = -0.75, rf.targetClass = majorityClass, rf.treeSubsampleRate = 2/3)
predictRUF <- randomUniformForestCore.predict(rUF, testData)
tmpScore = tmpScore + apply(predictRUF$all.votes, 1, function(Z) length(which(Z != majorityClass)))
score[idx] = score[idx] + tmpScore
rmIdx = which(tmpScore < (thresholdScore*ntree))
if (length(rmIdx) > 0) { testData = testData[-rmIdx,]; idx = idx[-rmIdx]; tmpScore = tmpScore[-rmIdx]; }
i = i + 1
}
return(score)
}
plotTreeCore <- function(treeStruct, rowNum = 1, height.increment = 1)
{
if ( (treeStruct[rowNum, "status"] == -1) )
{
treeGraphStruct <- list()
attr(treeGraphStruct, "members") <- 1
attr(treeGraphStruct, "height") <- 0
attr(treeGraphStruct, "label") <- if (treeStruct[rowNum,"prediction"] == 0) { "next node" } else
{
if (is.numeric(treeStruct[rowNum,"prediction"]))
{ round(treeStruct[rowNum,"prediction"],2) } else { treeStruct[rowNum,"prediction"] }
}
attr(treeGraphStruct, "leaf") <- TRUE
}
else
{
left <- plotTreeCore(treeStruct, treeStruct[rowNum, "left.daughter"], height.increment)
right <- plotTreeCore(treeStruct, treeStruct[rowNum, "right.daughter"], height.increment)
treeGraphStruct <- list(left,right)
attr(treeGraphStruct, "members") <- attr(left, "members") + attr(right,"members")
attr(treeGraphStruct,"height") <- max(attr(left, "height"),attr(right, "height")) + height.increment
attr(treeGraphStruct, "leaf") <- FALSE
if (rowNum != 1)
{ attr(treeGraphStruct, "edgetext") <- paste(treeStruct[rowNum, "split.var"] , " > " , round(treeStruct[rowNum, "split.point"],2), " ?", sep ="") }
else
{
attr(treeGraphStruct, "edgetext") <- paste(".[no]. .",
treeStruct[rowNum, "split.var"] , " > " , round(treeStruct[rowNum, "split.point"],2), ". .[yes]", sep="")
}
}
class(treeGraphStruct) <- "dendrogram"
return(treeGraphStruct)
}
plotTreeCore2 <- function(treeStruct, rowNum = 1, height.increment = 1, maxDepth = 100 )
{
if ((treeStruct[rowNum, "status"] == -1) | (rowNum > maxDepth))
{
treeGraphStruct <- list()
attr(treeGraphStruct, "members") <- 1
attr(treeGraphStruct, "height") <- 0
attr(treeGraphStruct, "label") <- if (treeStruct[rowNum,"status"] == 1) { "next node" } else
{
if (is.numeric(treeStruct[rowNum,"prediction"]))
{ round(treeStruct[rowNum,"prediction"],2) } else { treeStruct[rowNum,"prediction"] }
}
attr(treeGraphStruct, "leaf") <- TRUE
}
else
{
left <- plotTreeCore2(treeStruct, treeStruct[rowNum, "left.daughter"], height.increment)
right <- plotTreeCore2(treeStruct, treeStruct[rowNum, "right.daughter"], height.increment)
treeGraphStruct <- list(left,right)
attr(treeGraphStruct, "members") <- attr(left, "members") + attr(right,"members")
attr(treeGraphStruct,"height") <- max(attr(left, "height"),attr(right, "height")) + height.increment
attr(treeGraphStruct, "leaf") <- FALSE
if (rowNum != 1)
{ attr(treeGraphStruct, "edgetext") <- paste(treeStruct[rowNum, "split.var"] , " > " , round(treeStruct[rowNum, "split.point"],2), "?", sep ="") }
else
{
attr(treeGraphStruct, "edgetext") <- paste(treeStruct[rowNum, "split.var"] , " > " , round(treeStruct[rowNum, "split.point"],2), "?", ". .", sep="")
}
}
class(treeGraphStruct) <- "dendrogram"
return(treeGraphStruct)
}
plotTree <- function(treeStruct, rowNum = 1, height.increment = 1, maxDepth = 100, fullTree = FALSE, xlim = NULL, ylim= NULL, center = TRUE)
{
if (fullTree)
{ drawTreeStruct <- plotTreeCore(treeStruct, rowNum = rowNum , height.increment = height.increment) }
else
{ drawTreeStruct <- plotTreeCore2(treeStruct, rowNum = rowNum , height.increment = height.increment, maxDepth = maxDepth) }
nP <- list(col = 3:2, cex = c(2.0, 0.75), pch = 21:22, bg = c("light blue", "pink"), lab.cex = 0.75, lab.col = "tomato")
if (is.null(xlim))
{
if (is.null(ylim)) { ylim = c(0,8) }
plot(drawTreeStruct, center = center, leaflab ='perpendicular', edgePar = list(t.cex = 2/3, p.col = NA, p.lty = 0, lty =c( 2,5),
col = c("purple", "red"), lwd = 1.5), nodePar = nP, ylab = "Tree depth", xlab = "Predictions",
xlim = c(0, min(30,floor(nrow(treeStruct)/2))), ylim = ylim)
}
else
{
if (is.null(ylim)) { ylim = c(0,8) }
plot(drawTreeStruct, center = center, leaflab ='perpendicular', edgePar = list(t.cex = 2/3, p.col = NA, p.lty = 0, lty =c( 2,5),
col = c("purple", "red"), lwd = 1.5), nodePar = nP, ylab = "Tree depth", xlab = "Predictions", xlim = xlim, ylim = ylim )
}
}
fillNA2.randomUniformForest <- function(X, Y = NULL, ntree = 100, mtry = 1, nodesize = 10,
categoricalvariablesidx = NULL,
NAgrep = "",
maxClasses = floor(0.01*min(3000, nrow(X))+2),
threads = "auto",
...)
{
i = NULL
n <- nrow(X)
X <- fillVariablesNames(X)
if (exists("categorical", inherits = FALSE)) { categoricalvariablesidx = categorical }
else { categorical = NULL }
if (!is.null(Y)) { trueY = Y }
trueX = X
limitSize = if (n > 2000) { 50 } else { max(nodesize, 10) }
if (!is.matrix(X))
{
flag = TRUE
X.factors <- which.is.factor(X, maxClasses = maxClasses)
X <- NAfactor2matrix(X, toGrep = NAgrep)
}
else
{
flag = FALSE
X.factors = rep(0,ncol(X))
}
NAIdx = which(is.na(X), arr.ind = TRUE)
if (dim(NAIdx)[1] == 0) { stop("No missing values in data. Please use NAgrep option to specify missing values.\nFunction checks for NA in data and for the string given by NAgrep. Please also check if string does not contain space character") }
processedFeatures = unique(NAIdx[,2])
nbNA = table(NAIdx[,2])
fullNAFeatures = which(nbNA == n)
fullNAFeaturesLength = length(fullNAFeatures)
if (fullNAFeaturesLength == length(processedFeatures))
{ stop("All features have only NA in their values.\n") }
else
{
if (fullNAFeaturesLength > 0)
{ processedFeatures = processedFeatures[-fullNAFeatures] }
}
nFeatures = length(processedFeatures)
idx <- lapply(processedFeatures, function(Z) NAIdx[which(NAIdx[,2] == Z),1])
validIdx <- sapply(idx, function(Z) (length(Z) > limitSize))
processedFeatures <- processedFeatures[which(validIdx == TRUE)]
nFeatures = length(processedFeatures)
invalidIdx <- which(validIdx == FALSE)
if (length(invalidIdx) > 0) { idx <- rm.InAList(idx, invalidIdx) }
if (nFeatures == 0)
{
cat("Not enough missing values. Rough imputation is done. Please lower 'nodesize' value to increase accuracy.\n")
return(na.replace(trueX, fast = TRUE))
}
else
{
if (!is.null(Y)){ X = cbind(X,Y) }
X <- fillVariablesNames(X)
X <- na.replace(X, fast = TRUE)
{
max_threads = min(detectCores(),4)
if (threads == "auto")
{
if (max_threads == 2) { threads = min(max_threads, nFeatures) }
else { threads = max(1, max_threads) }
}
else
{
if (max_threads < threads)
{ cat("Note: number of threads is higher than number of logical threads in this computer.\n") }
}
threads = min(nFeatures, max_threads)
Cl <- makePSOCKcluster(threads, type = "SOCK")
registerDoParallel(Cl)
chunkSize <- ceiling(nFeatures/getDoParWorkers())
smpopts <- list(chunkSize = chunkSize)
}
export = c("randomUniformForest.default", "rUniformForest.big", "randomUniformForestCore.big", "randomUniformForestCore", "predict.randomUniformForest", "rUniformForestPredict", "uniformDecisionTree", "CheckSameValuesInAllAttributes", "CheckSameValuesInLabels", "fullNode", "genericNode", "leafNode", "filter.object", "filter.forest", "randomUniformForestCore.predict", "onlineClassify", "overSampling", "predictDecisionTree", "options.filter", "majorityClass", "randomCombination", "randomWhichMax", "vector2matrix", "which.is.na", "which.is.factor", "factor2vector", "outputPerturbationSampling", "rmNA", "count.factor", "find.idx", "genericOutput", "fillVariablesNames", "is.wholenumber", "rm.tempdir", "setManyDatasets", "onlineCombineRUF", "mergeLists", "classifyMatrixCPP", "L2DistCPP", "checkUniqueObsCPP", "crossEntropyCPP", "giniCPP", "L2InformationGainCPP", "entropyInformationGainCPP", "runifMatrixCPP", "NAfactor2matrix", "factor2matrix", "as.true.matrix", "NAFeatures", "NATreatment", "rmInf", "rm.InAList")
if (nrow(X) < 2001)
{
newX <- foreach( i= 1:nFeatures, .options.smp = smpopts, .inorder = FALSE, .combine = cbind,
.multicombine = TRUE, .export = export) %dopar%
{
if (X.factors[processedFeatures[i]] == 1)
{
rufObject <- randomUniformForest.default(X[-idx[[i]],-processedFeatures[i]],
Y = as.factor(X[-idx[[i]], processedFeatures[i]]), OOB = FALSE, importance = FALSE, ntree = ntree, mtry = mtry, nodesize = nodesize, threads = 1, categoricalvariablesidx = categoricalvariablesidx)
X[idx[[i]], processedFeatures[i]] <- as.numeric(as.vector(predict.randomUniformForest(rufObject,
X[idx[[i]], -processedFeatures[i]])))
}
else
{
rufObject <- randomUniformForest.default(X[-idx[[i]],-processedFeatures[i]], Y = X[-idx[[i]], processedFeatures[i]],
OOB = FALSE, importance = FALSE, ntree = ntree, mtry = mtry, nodesize = nodesize, threads = 1,
categoricalvariablesidx = categoricalvariablesidx)
X[idx[[i]], processedFeatures[i]] <- predict.randomUniformForest(rufObject,
X[idx[[i]], -processedFeatures[i]])
}
if (mean(is.wholenumber(rmNA(X[-idx[[i]], processedFeatures[i]]))) == 1) { round(X[,processedFeatures[i]]) }
else { X[,processedFeatures[i]] }
}
X[,processedFeatures] = newX
stopCluster(Cl)
}
else
{
newX <- foreach(i = 1:nFeatures, .options.smp = smpopts, .inorder = FALSE, .combine = cbind,
.multicombine = TRUE, .export = export) %dopar%
{
if (X.factors[processedFeatures[i]] == 1)
{
rufObject <- rUniformForest.big(X[-idx[[i]],-processedFeatures[i]], Y = as.factor(X[-idx[[i]],
processedFeatures[i]]), nforest = max(1, floor(nrow(X[-idx[[i]],])/2000)), replacement = TRUE, randomCut = TRUE, OOB = FALSE, importance = FALSE, ntree = ntree, mtry = mtry,
nodesize = nodesize, threads = 1, categoricalvariablesidx = categoricalvariablesidx)
X[idx[[i]], processedFeatures[i]] <- as.numeric(as.vector(predict.randomUniformForest(rufObject,
X[idx[[i]], -processedFeatures[i]])))
}
else
{
rufObject <- rUniformForest.big(X[-idx[[i]],-processedFeatures[i]], Y = X[-idx[[i]], processedFeatures[i]],
nforest = max(1, floor(nrow(X[-idx[[i]],])/2000)), replacement = TRUE, randomCut = TRUE, OOB = FALSE,
importance = FALSE, ntree = ntree, mtry = mtry, nodesize = nodesize, threads = 1,
categoricalvariablesidx = categoricalvariablesidx)
X[idx[[i]], processedFeatures[i]] <- predict.randomUniformForest(rufObject, X[idx[[i]], -processedFeatures[i]])
}
if (mean(is.wholenumber(rmNA(X[-idx[[i]], processedFeatures[i]]))) == 1)
{ round(X[,processedFeatures[i]]) }
else { X[,processedFeatures[i]] }
}
X[,processedFeatures] = newX
stopCluster(Cl)
}
if (sum(X.factors) != 0)
{
factorsIdx = which(X.factors != 0)
X = as.true.matrix(X)
X = as.data.frame(X)
for (j in 1:length(factorsIdx))
{
k = factorsIdx[j]
X[,k] = as.factor(X[,k])
Xlevels = as.numeric(names(table(X[,k])))
asFactorTrueX_k = as.factor(trueX[,k])
oldLevels = levels(asFactorTrueX_k)
for (i in 1:length(NAgrep))
{
levelToRemove = which(oldLevels == NAgrep[i])
if (length(levelToRemove) > 0)
{
if (NAgrep[i] == "")
{ levels(X[,k]) = c("virtualClass", oldLevels[-levelToRemove]) }
else
{ levels(X[,k]) = oldLevels[-levelToRemove] }
}
else
{
checkLevel = sample(Xlevels,1)
if ((!is.numeric(trueX[,k])) & (checkLevel > 0) & (is.wholenumber(checkLevel)))
{ levels(X[,k]) = oldLevels[Xlevels] }
}
}
}
}
for (j in 1:ncol(X))
{
classTrueX = class(trueX[,j])
if ((classTrueX) == "character") X[,j] = as.character(X[,j])
}
return(X)
}
}
rufImpute = fillNA2.randomUniformForest
unsupervised2supervised <- function(X,
method = c("uniform univariate sampling", "uniform multivariate sampling"),
seed = 2014,
conditionalTo = NULL,
samplingFromGaussian = FALSE,
bootstrap = FALSE)
{
np = dim(X); n = np[1]; p = np[2]
flag = FALSE
syntheticTrainLabels1 = rep(0,n)
if (!is.null(rownames(X))) { rowNamesX = rownames(X); flag = TRUE }
if (is.data.frame(X))
{
X = NAfactor2matrix(X, toGrep = "anythingParticular")
cat("X is a data frame and has been converted to a matrix.\n")
}
XX = matrix(data = NA, nrow = n, ncol = p)
set.seed(seed)
if (method[1] == "uniform univariate sampling")
{
if (is.null(conditionalTo))
{ XX <- apply(X, 2, function(Z) sample(Z, n, replace = bootstrap)) }
else
{
if (length(conditionalTo) == nrow(X)) { ZZ = round(as.numeric(conditionalTo),0) }
else { ZZ = round(as.numeric(X[,conditionalTo[1]]),0) }
ZZtable = as.numeric(names(table(ZZ)))
XX = X
for (i in 1:length(ZZtable))
{
idx = which(ZZ == ZZtable[i])
if (samplingFromGaussian)
{ XX[idx,] <- apply(X[idx,], 2, function(Z) rnorm(length(idx), mean(Z), sd(Z))) }
else
{ XX[idx,] <- apply(X[idx,], 2, function(Z) sample(Z, length(idx), replace = bootstrap)) }
}
}
}
else
{
if (is.null(conditionalTo))
{
for (j in 1:p)
{
XX[,j] = sample(X, n, replace = bootstrap)
outOfRange = which( (XX[,j] > max(X[,j])) | (XX[,j] < min(X[,j])) )
while (length(outOfRange) > 0)
{
XX[outOfRange, j] = sample(X, length(outOfRange), replace = bootstrap)
outOfRange = which( (XX[,j] > max(X[,j])) | (XX[,j] < min(X[,j])) )
}
}
}
else
{
if (length(conditionalTo) == nrow(X)) { ZZ = round(as.numeric(conditionalTo),0) }
else { ZZ = round(as.numeric(X[,conditionalTo[1]]),0) }
ZZtable = as.numeric(names(table(ZZ)))
XX = X
meanX = mean(X)
sdX = sd(X)
for (i in 1:length(ZZtable))
{
idx = which(ZZ == ZZtable[i])
for (j in 1:p)
{
if (samplingFromGaussian)
{ XX[idx,j] = rnorm(length(idx), meanX, sdX) }
else
{ XX[idx,j] = sample(X, length(idx), replace = bootstrap) }
outOfRange = which( (XX[idx,j] > max(X[,j])) | (XX[idx,j] < min(X[,j])) )
while (length(outOfRange) > 0)
{
XX[idx[outOfRange], j] = sample(X, length(outOfRange), replace = bootstrap)
outOfRange = which( (XX[idx[outOfRange],j] > max(X[,j])) |
(XX[idx[outOfRange],j] < min(X[,j])) )
}
}
}
}
}
newX = rbind(X, XX)
if (flag) { rownames(newX) = c(rowNamesX, rowNamesX) }
syntheticTrainLabels2 = rep(1, n)
Y = c(syntheticTrainLabels1, syntheticTrainLabels2)
return(list(X = newX, Y = Y))
}
proximitiesMatrix <- function(object, fullMatrix = TRUE, Xtest = NULL, predObject = NULL, sparseProximities = TRUE, pthreads = "auto")
{
object = filter.object(object)
if (is.null(object$predictionObject))
{
if (is.null(predObject))
{
if (is.null(Xtest)) { stop("please provide test data.\n") }
else
{
predObject <- predict.randomUniformForest(object, Xtest, type = "all")
votesData = predObject$votes.data
}
}
else
{
if (is.null(predObject$majority.vote))
{ stop("Please provide full prediction object (type = 'all') when calling predict() function") }
else
{ votesData = predObject$votes.data }
}
}
else
{ votesData = object$predictionObject$votes.data }
n = nrow(votesData[[1]])
B = length(votesData)
if ((n < 500) & is.character(pthreads)) { pthreads = 1 }
else
{
max_threads = detectCores()
if (pthreads == "auto")
{
if (max_threads == 2) { pthreads = max_threads }
else { pthreads = max(1, max_threads - 1) }
}
else
{
if (max_threads < pthreads)
{ cat("Warning : number of threads is higher than logical threads in this computer.\n") }
}
Cl = makePSOCKcluster(pthreads, type = "SOCK")
registerDoParallel(Cl)
}
if (fullMatrix)
{
operatorLimit = 3*800*n^2/(10000^2)
if ( memory.limit() < operatorLimit)
{
cat("Proximity matrix is likely to be too big. Model will compute a 'n x B' matrix with B = number of trees\n")
proxMatrix = matrix(0L, B, n)
fullMatrix = FALSE
}
else
{ proxMatrix = matrix(0L, n, n) }
proxMat = proxMatrix
if (!sparseProximities)
{
if (pthreads == 1)
{
for (i in 1:n)
{
for (b in 1:B)
{
common.idx = which(votesData[[b]][i,3] == votesData[[b]][,3])
proxMatrix[i, common.idx] = 1L + proxMatrix[i,common.idx]
}
}
}
else
{
proxMatrix <- foreach(i = 1:n, .combine = rbind, .multicombine = FALSE) %dopar%
{
for (b in 1:B)
{
common.idx = which(votesData[[b]][i,3] == votesData[[b]][,3])
proxMat[i, common.idx] = 1L + proxMat[i, common.idx]
}
proxMat[i,]
}
stopCluster(Cl)
}
}
else
{
if (pthreads == 1)
{
for (i in 1:n)
{
for (b in 1:B)
{
common.idx = which(votesData[[b]][i,3] == votesData[[b]][,3])
proxMatrix[i, common.idx] = 1L + proxMat[i,common.idx]
}
}
}
else
{
proxMatrix <- foreach(i = 1:n, .combine = rbind, .multicombine = TRUE) %dopar%
{
for (b in 1:B)
{
common.idx = which(votesData[[b]][i,3] == votesData[[b]][,3])
proxMat[i, common.idx] = 1L + proxMatrix[i, common.idx]
}
proxMat[i,]
}
stopCluster(Cl)
}
}
}
else
{
proxMatrix = matrix(0L, n, B)
proxMat = proxMatrix
if (pthreads == 1)
{
for (i in 1:n)
{
for (b in 1:B)
{
common.idx = which((votesData[[b]][i,3] == votesData[[b]][,3]))
proxMat[common.idx, b] = 1L + proxMatrix[common.idx,b]
}
proxMat[i,]
}
}
else
{
proxMatrix <- foreach(i = 1:n, .combine = rbind, .multicombine = TRUE) %dopar%
{
for (b in 1:B)
{
common.idx = which((votesData[[b]][i,3] == votesData[[b]][,3]))
proxMat[common.idx, b] = 1L + proxMatrix[common.idx,b]
}
proxMat[i,]
}
stopCluster(Cl)
}
}
varNames = NULL
if (fullMatrix) { nn = n }
else { nn = B }
for (i in 1:nn) { varNames = c(varNames,paste("C", i, sep="")) }
colnames(proxMatrix) = varNames
rownames(proxMatrix) = if (!is.null(rownames(Xtest))) { rownames(Xtest) } else { 1:n }
if (sparseProximities) { return(proxMatrix) } else { return(proxMatrix/B) }
}
kBiggestProximities <- function(proximitiesMatrix, k) t(apply(proximitiesMatrix, 1, function(Z) sort(Z, decreasing = TRUE)[1:(k+1)][-1] ))
MDSscale <- function(proximitiesMatrix, metric = c("metricMDS", "nonMetricMDS"), dimension = 2, distance = TRUE, plotting = TRUE, seed = 2014)
{
set.seed(seed)
if (metric[1] == "metricMDS")
{
if (distance) { d = stats::dist(1 - proximitiesMatrix) }
else
{
if (nrow(proximitiesMatrix) == ncol(proximitiesMatrix)) { d = 1 - proximitiesMatrix }
else { d = stats::dist(1 - proximitiesMatrix); cat("Matrix is not a square matrix. Distance is used.\n") }
}
fit = stats::cmdscale(d, eig = TRUE, k = dimension)
}
else
{
eps = 1e-6
if (distance) { d = eps + stats::dist(1 - proximitiesMatrix) }
else
{
if (nrow(proximitiesMatrix) == ncol(proximitiesMatrix)) { d = 1 - (proximitiesMatrix - eps) }
else { d = eps + stats::dist(1 - proximitiesMatrix); cat("Matrix is not a square matrix. Distance is used.\n") }
}
fit = MASS::isoMDS(d, k = dimension)
}
if (plotting)
{
x <- fit$points[,1]
y <- fit$points[,2]
plot(x, y, xlab = "Coordinate 1", ylab = "Coordinate 2", main = metric[1], type = "p", lwd = 1, pch = 20)
}
return(fit)
}
specClust <- function(proxMat, k = floor(sqrt(ncol(proxMat))))
{
A = proxMat
diag(A) = 0
np = dim(A)
n = np[1]
p = np[2]
D = matrix(0, n, p)
diag(D) = (rowSums(A))^(-0.5)
L = (D%*%A%*%D)
X = eigen(L)$vectors[,1:k]
Xcol = (colSums(X^2))^0.5
return(X/Xcol)
}
gap.stats <- function(fit, B = 100, maxClusters = 5, seed = 2014)
{
set.seed(seed)
return(cluster::clusGap(fit, FUN = kmeans, K.max = max(2, maxClusters), B = B))
}
kMeans <- function(fit, gapStatFit, maxIters = 10, plotting = TRUE, algorithm = NULL, k = NULL,
reduceClusters = FALSE, seed = 2014)
{
set.seed(seed)
if (is.null(maxIters)) { maxIters = 10 }
if (is.null(algorithm)) { algorithm = "Hartigan-Wong" }
if (is.null(k))
{
k = cluster::maxSE(gapStatFit$Tab[, "gap"], gapStatFit$Tab[, "SE.sim"])
if (k == 1) { cat("Only one cluster has been found.\nNumber of clusters has been set to 2.\n") ; k = 2 }
if (k == length(gapStatFit$Tab[, "gap"]))
{ cat("\nNumber of clusters found is equal to the maximum number of clusters allowed.\n") }
}
fit.kMeans <- stats::kmeans(fit, k, iter.max = maxIters, algorithm = algorithm)
if ( (k > 2) & reduceClusters)
{
clusterSizes = table(fit.kMeans$cluster)
smallSizes = which(clusterSizes < 0.05*length(fit[,1]))
nSmallSizes = length(smallSizes)
if (nSmallSizes > 0)
{
smallSizes = sort(smallSizes, decreasing = TRUE)
for (i in 1:nSmallSizes)
{
fit.kMeans$cluster[which(fit.kMeans$cluster == smallSizes[i])] = as.numeric(names(clusterSizes))[max(1,smallSizes-1)]
}
}
}
if (plotting)
{
x = fit[,1]
y = fit[,2]
plot(x, y, xlab = "Coordinate 1", ylab = "Coordinate 2", main = "Multi-dimensional scaling", type = "p", lwd = 1, pch = 20)
for (i in 1:k)
{
idx = which(fit.kMeans$cluster == i)
points(x[idx], y[idx], type = "p", lwd = 1, pch = 20, col = i)
}
}
return(fit.kMeans)
}
hClust <- function(proximities, method = NULL, plotting = TRUE, k = NULL, reduceClusters = FALSE, seed = 2014)
{
set.seed(seed)
if (is.null(method)) { method = "complete" }
d <- stats::dist(1 - proximities)
XClust <- stats::hclust(d, method = method, members = NULL)
if (is.null(k))
{
diffHeightsIdx = which.max(diff(XClust$height))
height = (XClust$height[diffHeightsIdx] + XClust$height[diffHeightsIdx-1])/2
values <- stats::cutree(XClust, h = height)
nbClusters = length(unique(values))
}
else
{
nbClusters = k
values <- stats::cutree(XClust, k = nbClusters)
height = NULL
}
if ( (nbClusters > 2) & reduceClusters)
{
clusterSizes = table(values)
smallSizes = which(clusterSizes < 0.05*length(values))
nSmallSizes = length(smallSizes)
if (nSmallSizes > 0)
{
smallSizes = sort(smallSizes, decreasing = TRUE)
for (i in 1:nSmallSizes)
{
values[which(values == smallSizes[i])] = as.numeric(names(clusterSizes))[max(1,smallSizes - 1)]
}
}
}
if (plotting)
{
showLabels = if (nrow(proximities) < 300) { TRUE } else { FALSE }
plot(XClust, xlab = "Observations", labels = showLabels)
if (!is.null(height)) { abline(h = height , col='red') }
}
return(list(object = XClust, cluster = values))
}
observationsImportance <- function(X, importanceObject)
{
X = factor2matrix(X)
object = importanceObject$localVariableImportance$obsVariableImportance[,-1]
idx = grep("localVariableFrequency",colnames(object))
object = object[,-idx]
n = nrow(X)
XX = matrix(NA, n, ncol(object))
for (i in 1:nrow(X))
{ XX[i,] = X[i, object[i,]] }
rownames(XX) = 1:n
return(XX)
}
print.unsupervised <- function(x, ...)
{
object = x
rm(x)
Z = object$unsupervisedModel$cluster
if (!is.factor(Z))
{
if (!is.null(object$unsupervisedModel$clusterOutliers))
{
Z = c(Z, object$unsupervisedModel$clusterOutliers)
Z = sortDataframe( data.frame(Z, as.numeric(names(Z))), 2)
Z = Z[,1]
}
}
ZZ = table(Z)
names(attributes(ZZ)$dimnames) = NULL
if (object$params["endModel"] == "MDS")
{
x = object$MDSModel$points[,1]
y = object$MDSModel$points[,2]
percentExplained = (interClassesVariance(x, Z) + interClassesVariance(y, Z))/(variance(x) + variance(y))
cat("Average variance between clusters (in percent of total variance): ", round(100*percentExplained,2), "%\n", sep = "")
cat("Average silhouette: ", round(mean(cluster::silhouette(as.numeric(Z), dist(object$MDSModel$points))[,3]),4), "\n", sep = "")
cat("Clusters size:\n")
print(ZZ)
cat("Clusters centres (in the MDS coordinates):\n")
print(round(object$unsupervisedModel$centers,4))
}
if ((object$params["endModel"] == "MDSkMeans") | (object$params["endModel"] == "SpectralkMeans"))
{
cat("Average variance between clusters (in percent of total variance): ", round(100*sum(object$unsupervisedModel$betweenss)/sum(object$unsupervisedModel$totss),2), "%\n", sep = "")
cat("Average silhouette: ", round(mean(cluster::silhouette(Z, dist(object$MDSModel$points))[,3]),4), "\n",
sep = "")
cat("Clusters size:\n")
print(ZZ)
cat("Clusters centres (in the MDS coordinates):\n")
print(round(object$unsupervisedModel$centers,4))
}
if (object$params["endModel"] == "MDShClust")
{
x = object$MDSModel$points[,1]
y = object$MDSModel$points[,2]
percentExplained1 = interClassesVariance(x, Z)/variance(x)
percentExplained2 = interClassesVariance(y, Z)/variance(y)
percentExplained = round(0.5*(percentExplained1 + percentExplained2), 4)
cat("Average variance between clusters (in percent of total variance) : ", percentExplained*100, "%\n", sep = "")
cat("Average silhouette : ", round(mean(cluster::silhouette(Z, dist(object$MDSModel$points))[,3]),4), "\n", sep = "")
cat("Clusters size:\n")
print(ZZ)
cat("Clusters centers (in the MDS coordinates):\n")
print(round(object$unsupervisedModel$centers,4))
}
}
plot.unsupervised <- function(x, importanceObject = NULL, xlim = NULL, ylim = NULL, coordinates = NULL,...)
{
object = x
rm(x)
if (is.null(coordinates))
{
coordinates = c(as.numeric( substr(object$params["coordinates"], 1, 1)),
as.numeric( substr(object$params["coordinates"], 2, 2)) )
if (ncol(object$MDSModel$points) == 2)
{
if (coordinates[1] > 1)
{
cat(paste("Coordinates", coordinates[1], "and", coordinates[2], "have been plotted, but appear as Coordinate 1 and Coordinate 2.\n", sep=" "))
}
coordinates = 1:2
}
}
else
{
if (ncol(object$MDSModel$points) == 2) coordinates = 1:2
}
x = object$MDSModel$points[,coordinates[1]]
y = object$MDSModel$points[,coordinates[2]]
offsetY = 0
if (!is.null(importanceObject))
{
clusterFeatures = importanceObject$localVariableImportance$classVariableImportance
p = ncol(clusterFeatures)
clusterFeaturesNames = NULL
varNames = varValues = vector(length = p)
for (j in 1:p)
{
clusterFeatures = sortDataframe(clusterFeatures, j, decrease = TRUE)
idx = which(clusterFeatures[,j] > 0.05)
varNames[j] = rownames(clusterFeatures)[idx[1]]
varValues[j] = clusterFeatures[idx[[1]],j]
clusterFeaturesNames = c(clusterFeaturesNames, paste(varNames[j], ":", round(100*varValues[j],0), "%", sep ="" ))
}
offsetY = -abs(diff(range(y)))*0.1
}
clusters = object$unsupervisedModel$cluster
if (object$params["endModel"] != "MDS")
{
if (!is.null(object$unsupervisedModel$clusterOutliers))
{
clusters = c(clusters, object$unsupervisedModel$clusterOutliers)
clusters = sortDataframe( data.frame(clusters, as.numeric(names(clusters))),2)
clusters = clusters[,1]
}
}
offsetX = abs(diff(range(x)))*0.3
uniqueClusters = sort(unique(clusters))
dev.new()
if (object$params["endModel"] == "MDShClust")
{
dev.off()
nbClusters = length(uniqueClusters)
XClust = object$unsupervisedModel$object
diffHeightsIdx = which.max(diff(XClust$height))
height = (XClust$height[diffHeightsIdx] + XClust$height[diffHeightsIdx-1])/2
showLabels = if (nrow(object$proximityMatrix) < 300) { TRUE } else { FALSE }
plot(XClust, labels = showLabels)
abline(h = height , col='red')
cat("Two graphics have been plotted. Please slide the window to see the second one.\n")
dev.new()
}
if (is.null(xlim)) { xlim = c(min(x) - offsetX, max(x) + offsetX) }
if (is.null(ylim)) { ylim = c(min(y) + offsetY, max(y)) }
if (length(grep("MDS", object$params["endModel"])) == 1)
{ plotTitle = "Multidimensional scaling and Clusters representation" }
else
{ plotTitle = "Spectral decomposition and Clusters representation" }
plot(x, y, xlab = paste("Coordinate", coordinates[1]), ylab = paste("Coordinate", coordinates[2]),
main = plotTitle, type = "p", lwd = 1, pch = 20, xlim = xlim, ylim = ylim)
nbCusters = length(uniqueClusters)
for (i in 1:nbCusters)
{
idx = which(clusters == uniqueClusters[i])
points(x[idx], y[idx], type = "p", lwd = 1, pch = 20, col = i)
{
points(object$unsupervisedModel$centers[i,coordinates[1]],
object$unsupervisedModel$centers[i,coordinates[2]], lwd = 1, cex = 1.5, pch = 8, col = i)
}
}
if (!is.null(importanceObject))
{
clusterNames = rm.string(colnames(clusterFeatures), "Class ")
legend("topright", inset = .01, clusterNames, fill = 1:i, horiz = FALSE, border = NA, bty ="n")
legend("bottomleft", cex = 0.7, as.character(clusterFeaturesNames), fill = 1:i, horiz = FALSE, border = NA, bty ="n")
print(clusterFeatures)
}
else
{ legend("topright", inset = .01, as.character(uniqueClusters), fill = 1:i, horiz = FALSE, border = NA, bty ="n")}
}
modifyClusters <- function(object, decreaseBy = NULL, increaseBy = NULL, seed = 2014)
{
params = object$modelParams
endModel = object$params["endModel"]
nbClusters = object$nbClusters
if (!is.null(decreaseBy))
{ k = nbClusters - decreaseBy }
if (!is.null(increaseBy))
{ k = nbClusters + increaseBy }
if (is.null(decreaseBy) & (is.null(increaseBy)))
{ stop("Please increase or decrease number of clusters") }
if (k <= 1)
{
cat("Only one cluster will remain. Model has not been modified.\n")
return(object)
}
else
{
endModelMetric = if (object$params["endModelMetric"] == "NULL") { NULL }
else { object$params["endModelMetric"] }
Z = object$MDSModel$points
if ( (endModel == "MDSkMeans") | (endModel == "SpectralkMeans") )
{
maxIters = if (object$params["maxIters"] == "NULL") { 10 } else { as.numeric(object$params["maxIters"]) }
X.model <- kMeans(Z, NULL, k = k, maxIters = maxIters, algorithm = endModelMetric, plotting = FALSE, reduceClusters = FALSE, seed = seed)
}
if (endModel == "MDShClust")
{
X.model <- hClust(Z, method = endModelMetric, plotting = FALSE, k = k, reduceClusters = FALSE, seed = seed)
centers = matrix(NA, k, ncol(Z))
for (i in 1:k)
{
idx = which(X.model$cluster == i)
centers[i,] = colMeans(Z[idx,, drop = FALSE])
}
X.model$centers = centers
}
object$unsupervisedModel = NULL
object$unsupervisedModel = X.model
object$nbClusters = k
return(object)
}
}
splitClusters <- function(object, whichOnes, seed = 2014, ...)
{
cat("Note that outliers are not currently taken into account.\n")
whichOnes = sort(whichOnes)
endModel = object$params["endModel"]
if (exists("endModelMetric")) { endModelMetric = endModelMetric }
else
{
if (object$params["endModelMetric"] == "NULL") { endModelMetric = NULL }
else { endModelMetric = object$params["endModelMetric"] }
}
Z = object$MDSModel$points
nClusters = length(unique(object$unsupervisedModel$cluster))
for (i in 1:length(whichOnes))
{
idx = which(object$unsupervisedModel$cluster == whichOnes[i])
if (length(idx) == 0) { stop(paste("No elements available for cluster", i,".\n")) }
if ( (endModel == "MDSkMeans") | (endModel == "SpectralkMeans") )
{
if (exists("maxIters")) { maxIters = maxIters }
else
{
if (object$params["maxIters"] == "NULL") { maxIters = 10 }
else { maxIters = as.numeric(object$params["maxIters"]) }
}
X.model <- kMeans(Z[idx,], NULL, k = 2, maxIters = maxIters, algorithm = endModelMetric, plotting = FALSE, reduceClusters = FALSE, seed = seed)
object$unsupervisedModel$betweenss = 0.5*(object$unsupervisedModel$betweenss + X.model$betweenss)
object$unsupervisedModel$totss = 0.5*(object$unsupervisedModel$totss + X.model$totss)
}
if (endModel == "MDShClust")
{
X.model <- hClust(Z, method = endModelMetric, plotting = FALSE, k = 2, reduceClusters = FALSE, seed = seed)
centers = matrix(NA, 2, ncol(Z))
for (j in 1:2)
{
idx2 = which(X.model$cluster == j)
centers[j,] = colMeans(Z[idx2, , drop = FALSE])
}
X.model$centers = centers
}
idx3 = which(X.model$cluster == 2)
object$unsupervisedModel$cluster[idx][idx3] = nClusters + i
}
nClusters = length(unique(object$unsupervisedModel$cluster))
centers = matrix(NA, nClusters, ncol(Z))
for (cl in 1:nClusters)
{
idx = which(object$unsupervisedModel$cluster == cl)
centers[cl,] = colMeans(Z[idx, , drop = FALSE])
}
object$unsupervisedModel$centers = centers
rownames(object$unsupervisedModel$centers) = 1:nClusters
return(object)
}
mergeClusters <- function(object, whichOnes)
{
whichOnes = sort(whichOnes)
idx1 = which(object$unsupervisedModel$cluster == whichOnes[1])
object$unsupervisedModel$cluster[idx1] = whichOnes[2]
flag = FALSE
if (!is.null(object$unsupervisedModel$clusterOutliers))
{
idx1 = which(object$unsupervisedModel$clusterOutliers == whichOnes[1])
if (length(idx1) > 0) { object$unsupervisedModel$clusterOutliers[idx1] = whichOnes[2] }
flag = TRUE
}
idx2 = which(object$unsupervisedModel$cluster == whichOnes[2])
object$unsupervisedModel$centers[whichOnes[2],] = t(colMeans(object$MDSModel$points[idx2,]))
object$unsupervisedModel$centers = object$unsupervisedModel$centers[-whichOnes[1],]
idx = sort(unique(object$unsupervisedModel$cluster))
for (i in 1:length(idx))
{
newIdx = which(object$unsupervisedModel$cluster == idx[i])
object$unsupervisedModel$cluster[newIdx] = i
if (flag)
{
newOutliersIdx = which(object$unsupervisedModel$clusterOutliers == idx[i])
if (length(newOutliersIdx) > 0) { object$unsupervisedModel$clusterOutliers[newOutliersIdx] = i }
}
}
rownames(object$unsupervisedModel$centers) = 1:i
cat(paste("\ncluster ", idx, " has been changed to ", 1:i, sep=""))
cat("\n")
return(object)
}
clusteringObservations <- function(object, X, OOB = TRUE, predObject = NULL, importanceObject = NULL,
baseModel = c("proximity", "proximityThenDistance", "importanceThenDistance"),
MDSmetric = c("metricMDS", "nonMetricMDS"),
endModel = c("MDS", "MDSkMeans", "MDShClust", "SpectralkMeans"),
...)
{
clusterObject <- unsupervised.randomUniformForest(object, Xtest = X, importanceObject = importanceObject, predObject = predObject, baseModel = baseModel[1], MDSmetric = MDSmetric[1], endModel = endModel[1],
outliersFilter = FALSE, OOB = OOB, ...)
plot(clusterObject, importanceObject = importanceObject, ...)
return(clusterObject)
}
as.supervised <- function(object, X, ...)
{
if (class(object) != "unsupervised")
{ stop("Please provide an unsupervised randomUniformForest object.\n") }
n = nrow(X)
clusters = object$unsupervisedModel$cluster
if (!is.null(object$unsupervisedModel$clusterOutliers))
{ clusters = c(clusters, object$unsupervisedModel$clusterOutliers) }
if (is.null(names(clusters))) { names(clusters) = 1:n }
clusters = sortDataframe( data.frame(clusters, as.numeric(names(clusters ))),2)
Y = as.factor(clusters[,1])
if (n < 10000) { rUFObject <- randomUniformForest(X, Y, ...) }
else { rUFObject <- rUniformForest.big(X, Y, nforest = min(2, floor(n/2000)), randomCut = TRUE, ...) }
return(rUFObject)
}
update.unsupervised <- function(object, X = NULL, oldData = NULL, mapAndReduce = FALSE, updateModel = FALSE, ...)
{
if(is.null(X)) { stop("'X' is null. Please provide new data.") }
endModel = object$params["endModel"]
clusters = as.numeric(object$params["clusters"])
maxIters = if (object$params["maxIters"] == "NULL") NULL else as.numeric(object$params["maxIters"])
endModelMetric = if (object$params["endModelMetric"] == "NULL") NULL else object$params["endModelMetric"]
reduceClusters = if (object$params["reduceClusters"] == "FALSE") FALSE else TRUE
seed = as.numeric(object$params["seed"])
if ( (endModel != "MDShClust") & (endModel != "MDSkMeans") & (endModel != "SpectralkMeans") )
{
stop("Update is only available for models which were using endModel = 'MDShClust', endModel = 'MDSkMeans' or endModel = 'SpectralkMeans' options")
}
p = ncol(object$MDS$points)
updateLearningMDS = predictedMDS = learningMDS = vector('list', p)
if (is.null(object$largeDataLearningModel))
{
if (is.null(oldData))
{ stop("Former data are needed to (learn MDS points and) update model.\n") }
else
{
if (mapAndReduce)
{
for (j in 1:p)
{ learningMDS[[j]] = rUniformForest.big(oldData, object$MDSModel$points[,j], randomCut = TRUE, ...) }
}
else
{
for (j in 1:p)
{ learningMDS[[j]] = randomUniformForest(oldData, object$MDSModel$points[,j], ...) }
}
}
}
else
{
for (j in 1:p)
{ learningMDS[[j]] = object$largeDataLearningModel[[j]] }
}
formerMDSPoints = object$MDSModel$points
for (j in 1:p)
{ predictedMDS[[j]] = predict(learningMDS[[j]], X) }
newMDSPoints = do.call(cbind, predictedMDS)
Z = rbind(formerMDSPoints, newMDSPoints)
if (updateModel)
{
if (mapAndReduce)
{
for (j in 1:p)
{ updateLearningMDS[[j]] = rUniformForest.big(X, predictedMDS[[j]], randomCut = TRUE, ...) }
}
else
{
for (j in 1:p) { updateLearningMDS[[j]] = randomUniformForest(X, predictedMDS[[j]], ...) }
}
for (j in 1:p) { learningMDS[[j]] = rUniformForest.combine(learningMDS[[j]], updateLearningMDS[[j]]) }
}
if ( (endModel[1] == "MDSkMeans") | (endModel[1] == "SpectralkMeans") )
{
X.model <- kMeans(Z, NULL, k = clusters, maxIters = maxIters, plotting = FALSE, algorithm = endModelMetric, reduceClusters = reduceClusters, seed = seed)
}
if (endModel[1] == "MDShClust")
{
X.model <- hClust(Z, method = endModelMetric, plotting = FALSE, k = clusters,
reduceClusters = reduceClusters, seed = seed)
centers = matrix(NA, clusters, ncol(Z))
for (i in 1:clusters)
{
idx = which(X.model$cluster == i)
centers[i,] = colMeans(Z[idx,, drop = FALSE])
}
X.model$centers = centers
}
object$X.scale$points = Z
largeDataLearningModel = learningMDS
unsupervisedObject = list(proximityMatrix = object$proxMat, MDSModel = object$X.scale,
unsupervisedModel = X.model, largeDataLearningModel = largeDataLearningModel, gapStatistics = NULL,
rUFObject = object$rUF.model, nbClusters = clusters, params = object$params)
class(unsupervisedObject) <- "unsupervised"
unsupervisedObject
}
combineUnsupervised <- function(...)
{
object <- list(...)
n = length(object)
i = 1
endModel = object[[i]]$params["endModel"]
clusters = as.numeric(object[[i]]$params["clusters"])
maxIters = if (object[[i]]$params["maxIters"] == "NULL") NULL else as.numeric(object[[i]]$params["maxIters"])
endModelMetric = if (object[[i]]$params["endModelMetric"] == "NULL") NULL else object[[i]]$params["endModelMetric"]
reduceClusters = if (object[[i]]$params["reduceClusters"] == "FALSE") FALSE else TRUE
seed = as.numeric(object[[i]]$params["seed"])
if ( (endModel != "MDShClust") & (endModel != "MDSkMeans") & (endModel != "SpectralkMeans") )
{
stop("Combine is only available for models which were using endModel = 'MDShClust', endModel = 'MDSkMeans' or endModel = 'SpectralkMeans' options")
}
Z = NULL
for (i in 1:n)
{ Z = rbind(Z, object[[i]]$MDSModel$points) }
if ( (endModel[1] == "MDSkMeans") | (endModel != "SpectralkMeans"))
{
X.model <- kMeans(Z, NULL, k = clusters, maxIters = maxIters, plotting = FALSE, algorithm = endModelMetric, reduceClusters = reduceClusters, seed = seed)
}
if (endModel[1] == "MDShClust")
{
X.model <- hClust(Z, method = endModelMetric, plotting = FALSE, k = clusters,
reduceClusters = reduceClusters, seed = seed)
X.gapstat = NULL
}
X.scale = list()
X.scale$points = Z
unsupervisedObject = list(proximityMatrix = NULL, MDSModel = X.scale,
unsupervisedModel = X.model, largeDataLearningModel = NULL, gapStatistics = NULL,
rUFObject = NULL, nbClusters = clusters, params = object[[1]]$params)
class(unsupervisedObject) <- "unsupervised"
unsupervisedObject
}
scalingMDS <- function(object)
{
if (is.null(object$MDSModel$standardizedPoints))
{ object$MDSModel$points = standardize(object$MDSModel$points) }
else
{ object$MDSModel$points = object$MDSModel$standardizedPoints }
return(object)
}
updateCombined.unsupervised <- function(object, X, mapAndReduce = FALSE, ...)
{
if (!is.null(object$largeDataLearningModel))
{ stop("The function can only rebuild a learning model of MDS (or spectral) points for formerly combined objects.\n") }
if (nrow(X) != nrow(object$MDSModel$points))
{ stop("MDS (or spectral) points and data do not have the same number of rows.\n") }
p = ncol(object$MDSModel$points)
learningMDS = vector('list', p)
if (mapAndReduce)
{
for (j in 1:p)
{ learningMDS[[j]] = rUniformForest.big(X, object$MDSModel$points[,j], randomCut = TRUE, ...) }
}
else
{
for (j in 1:p)
{ learningMDS[[j]] = randomUniformForest(X, object$MDSModel$points[,j], ...) }
}
unsupervisedObject = list(proximityMatrix = NULL, MDSModel = object$MDSModel,
unsupervisedModel = object$unsupervisedModel, largeDataLearningModel = learningMDS,
gapStatistics = object$gapStatistics, rUFObject = object$rUFObject, nbClusters = object$nbClusters,
params = object$params)
class(unsupervisedObject) <- "unsupervised"
unsupervisedObject
}
clusterAnalysis <- function(object, X, components = 2, maxFeatures = 2, clusteredObject = NULL, categorical = NULL,
OOB = FALSE)
{
flagList = FALSE
if (!is.null(clusteredObject))
{
if (is.list(clusteredObject)) { flagList = TRUE }
}
else { flagNULL = FALSE }
p = ncol(X)
p.new = floor((ncol(object$localVariableImportance$obsVariableImportance)-1)/2)
if (p.new < components)
{
cat("Maximal number of components is set to 2. Increase 'maxInteractions' option\nin 'importance()' function in order to assign more components.\n")
components = p.new
}
n = nrow(X)
varIdx = c(1:(components+1), (p.new+2):(p.new + components + 1))
if (class(clusteredObject)[1] == "unsupervised") { clusterName = "Clusters" }
else { clusterName = "Class" }
featuresAndObs = as.data.frame(object$localVariableImportance$obsVariableImportance)
frequencyFeaturesIdx = grep("Frequency", colnames(featuresAndObs))
featuresNames = apply(featuresAndObs[,-c(1,frequencyFeaturesIdx)], 2, function(Z) colnames(X)[Z])
featuresAndObs[,-c(1,frequencyFeaturesIdx)] = featuresNames
groupedAnalysis = aggregate(featuresAndObs[,2:min((components+1), p.new+1)], list(featuresAndObs$class),
function(Z) names(sort(table(Z), decreasing = TRUE)), simplify = FALSE)
groupedAnalysisNew = groupedAnalysis
pp = ncol(groupedAnalysis)
for (j in 2:pp)
{
groupedAnalysisNew[[j]] = lapply(groupedAnalysis[[j]],
function(Z) as.character(Z[1:(min(length(Z),maxFeatures))]))
}
groupedAnalysis = groupedAnalysisNew
groupedFrequencies = aggregate(featuresAndObs[,(2 + p.new):min((p.new + 1 + components), 2*p.new+1)],
list(featuresAndObs$class), function(Z) round(mean(Z),4))
colnames(groupedAnalysis)[1] = colnames(groupedFrequencies)[1] = colnames(featuresAndObs)[1] = clusterName
newNames = vector(length = pp-1)
for (i in 2:ncol(groupedAnalysis)) { newNames[i-1] = paste("Component", i-1, sep = "") }
colnames(groupedAnalysis)[-1] = newNames
colnames(groupedFrequencies)[-1] = rm.string(colnames(groupedFrequencies)[-1], "localVariable")
if (flagList)
{
if (!is.null(clusteredObject$classes))
{
featuresAndObs[,1] = clusteredObject$classes[featuresAndObs[,1]]
groupedAnalysis[,1] = clusteredObject$classes[groupedAnalysis[,1]]
groupedFrequencies[,1] = clusteredObject$classes[groupedFrequencies[,1]]
}
}
else
{
if (is.factor(clusteredObject))
{
featuresAndObs[,1] = clusteredObject$classes[featuresAndObs[,1]]
groupedAnalysis[,1] = clusteredObject$classes[clusterAnalysis[,1]]
groupedFrequencies[,1] = clusteredObject$classes[groupedFrequencies[,1]]
}
}
cat("Clustered observations:\n")
print(head(featuresAndObs[,varIdx]))
cat("...\n\nMost influential features by component (", maxFeatures," features per component):\n", sep="")
print(groupedAnalysis)
cat("\nComponent frequencies (", components, " out of ", p," possible ones):\n", sep="")
print(groupedFrequencies)
cat("\n\n")
if (!is.null(clusteredObject))
{
aggregateCategorical = aggregateNumerical = NULL
if ((class(clusteredObject)[1] == "unsupervised")) { Class = mergeOutliers(clusteredObject) }
else
{
if ( (class(clusteredObject)[1] == "randomUniformForest.formula") || (class(clusteredObject)[1] == "randomUniformForest") )
{
if (OOB)
{
if (is.null(clusteredObject$classes)) { stop("'clusteredObject' is not a classification model") }
if (length(clusteredObject$forest$OOB.predicts) == n)
{ Class = clusteredObject$classes[clusteredObject$forest$OOB.predicts] }
else
{ stop("OOB predictions have not their length equal to the number of rows in the data") }
}
else
{
if (is.null(clusteredObject$classes)) { stop("'clusteredObject' is not a classification model") }
if (!is.null(clusteredObject$predictionObject))
{ Class = clusteredObject$classes[clusteredObject$predictionObject$majority.vote] }
else
{ stop("Predictions have not their length equal to the number of rows in the data") }
}
}
else
{ Class = clusteredObject }
Class = as.factor(Class)
}
classSize = tabulate(Class)
if (is.null(categorical))
{
categorical = which.is.factor(X, maxClasses = n)
keepIdx = which(categorical == 0)
}
else
{
if (is.character(categorical))
{ categorical = which(colnames(X) == categorical) }
keepIdx = (1:p)[-categorical]
}
pp = length(keepIdx)
if (pp > 0)
{
localImportanceVariables = names(object$variableImportanceOverInteractions)
keepIdx = keepIdx[rmNA(match(rmNA(match(localImportanceVariables, colnames(X))), keepIdx))]
nFeatures = min(10, length(keepIdx))
aggregateNumerical = aggregate(X[,keepIdx], list(Class), sum)
aggregateNumerical = cbind(aggregateNumerical[,1], classSize, aggregateNumerical[,-1])
colnames(aggregateNumerical)[1] = clusterName
colnames(aggregateNumerical)[2] = "Size"
colnames(aggregateNumerical)[-c(1,2)] = colnames(X)[keepIdx]
cat("Numerical features aggregation (", min(10, nFeatures)," most important ones by their interactions):\n", sep="")
if (nFeatures < 10) { print(aggregateNumerical) }
else { print(aggregateNumerical[,1:10]) }
cat("\n")
averageNumerical = aggregate(X[,keepIdx], list(Class), function(Z) round(mean(Z), 4))
averageNumerical = cbind(averageNumerical[,1], classSize, averageNumerical[,-1])
colnames(averageNumerical)[1] = clusterName
colnames(averageNumerical)[2] = "Size"
colnames(averageNumerical)[-c(1,2)] = colnames(X)[keepIdx]
cat("Numerical features average (", min(10, nFeatures) ," most important ones by their interactions):\n", sep="")
if (nFeatures < 10) { print(averageNumerical) }
else { print(averageNumerical[,1:10]) }
cat("\n")
standardDeviationNumerical = aggregate(X[,keepIdx], list(Class), function(Z) round(sd(Z), 4))
standardDeviationNumerical = cbind(standardDeviationNumerical[,1], classSize, standardDeviationNumerical[,-1])
colnames(standardDeviationNumerical)[1] = clusterName
colnames(standardDeviationNumerical)[2] = "Size"
colnames(standardDeviationNumerical)[-c(1,2)] = colnames(X)[keepIdx]
cat("Numerical features (standard) deviation (", nFeatures ," most important ones by their interactions):\n", sep="")
if (nFeatures < 10) { print(standardDeviationNumerical) }
else { print(standardDeviationNumerical[,1:10]) }
if (pp < p)
{
flag = FALSE
keepIdx2 = (1:p)[-keepIdx]
keepIdx2 = keepIdx2[rmNA(match(rmNA(match(localImportanceVariables, colnames(X))), keepIdx2))]
if (length(keepIdx2) == 0) { keepIdx2 = (1:p)[-keepIdx]; flag = TRUE }
nFeatures = min(10, length(keepIdx2))
aggregateCategorical = aggregate(X[,keepIdx2], list(Class), function(Z) names(which.max(table(Z))))
aggregateCategorical = cbind(aggregateCategorical[,1], classSize, aggregateCategorical[,-1])
colnames(aggregateCategorical)[1] = clusterName
colnames(aggregateCategorical)[2] = "Size"
colnames(aggregateCategorical)[-c(1,2)] = colnames(X)[keepIdx2]
if (flag)
{ cat("\nCategorical features aggregation (", min(10, nFeatures) ," first features):\n", sep="") }
else
{ cat("\nCategorical features aggregation (", min(10, nFeatures)," most important ones by their interactions):\n", sep="") }
if (nFeatures < 10) { print(aggregateCategorical) }
else { print(aggregateCategorical[,1:10]) }
cat("\n")
}
}
if (pp == 0)
{
keepIdx2 = (1:p)
nFeatures = min(10, length(keepIdx2))
aggregateCategorical = aggregate(X[,keepIdx2], list(Class), function(Z) names(which.max(table(Z))))
aggregateCategorical = cbind(aggregateCategorical[,1], classSize, aggregateCategorical[,-1])
colnames(aggregateCategorical)[1] = clusterName
colnames(aggregateCategorical)[2] = "Size"
colnames(aggregateCategorical)[-c(1,2)] = colnames(X)[keepIdx2]
cat("Categorical features aggregation (", nFeatures ," first features):\n")
if (nFeatures < 10) { print(aggregateCategorical) }
else { print(aggregateCategorical[,1:10]) }
}
return(list(featuresAndObs = featuresAndObs, clusterAnalysis = groupedAnalysis,
componentAnalysis = groupedFrequencies, numericalFeaturesAnalysis = aggregateNumerical, categoricalFeaturesAnalysis = aggregateCategorical))
}
else
{
return(list(featuresAndObs = featuresAndObs, clusterAnalysis = groupedAnalysis,
componentAnalysis = groupedFrequencies))
}
}
rm.coordinates <- function(object, whichOnes, seed = NULL, maxIters = NULL)
{
Z = object$MDSModel$points = object$MDSModel$points[,-whichOnes]
object$unsupervisedModel$centers = object$unsupervisedModel$centers[,-whichOnes]
if (is.null(seed)) { seed = as.numeric(object$params["seed"]) }
if (is.null(maxIters)) { maxIters = 10 }
if (object$params["baseModel"] == "MDShClust")
{
X.model <- hClust(Z, method = NULL, plotting = FALSE, k = as.numeric(object$params["clusters"]),
reduceClusters = FALSE, seed = seed)
X.gapstat = NULL
nbClusters = length(unique(X.model$cluster))
centers = matrix(NA, nbClusters, ncol(Z))
for (i in 1:nbClusters)
{
idx = which(X.model$cluster == i)
centers[i,] = colMeans(Z[idx, ,drop = FALSE])
}
X.model$centers = centers
}
else
{
X.model <- kMeans(Z, NULL, k = as.numeric(object$params["clusters"]),
maxIters = maxIters, plotting = FALSE, algorithm = NULL, reduceClusters = FALSE, seed = seed)
}
object$unsupervisedModel = X.model
return(object)
}
unsupervised.randomUniformForest <- function(object,
baseModel = c("proximity", "proximityThenDistance", "importanceThenDistance"),
endModel = c("MDSkMeans", "MDShClust", "MDS", "SpectralkMeans"),
endModelMetric = NULL,
samplingMethod = c("uniform univariate sampling", "uniform multivariate sampling", "with bootstrap"),
MDSmetric = c("metricMDS", "nonMetricMDS"),
proximityMatrix = NULL,
sparseProximities = FALSE,
outliersFilter = FALSE,
Xtest = NULL,
predObject = NULL,
metricDimension = 2,
coordinates = c(1,2),
bootstrapReplicates = 100,
clusters = NULL,
maxIters = NULL,
importanceObject = NULL,
maxInteractions = 2,
reduceClusters = FALSE,
maxClusters = 5,
mapAndReduce = FALSE,
OOB = FALSE,
subset = NULL,
seed = 2014,
uthreads = "auto",
...)
{
if (!is.null(predObject))
{
if (is.null(predObject$votes.data))
{ stop("predObject must be provided using option type = 'all' when calling the predict() function.\n") }
}
if (!is.null(Xtest))
{
Xtest = NAfactor2matrix(Xtest, toGrep = "anythingParticular")
if (length(which(is.na(Xtest)) > 0) ){ stop("NA found in data. Please treat them outside of the model.\n") }
}
flagBig = FALSE
objectClass = class(object)
if ( all(objectClass != "randomUniformForest") )
{
if (!is.null(subset)) { object = object[subset,] }
X = object
n = nrow(X)
if ( (n > 10000) | mapAndReduce)
{
set.seed(seed)
subsetIdx = sample(n, min(10000, floor(n/2)))
bigX = X[-subsetIdx, ]
X = X[subsetIdx,]
n = nrow(X)
flagBig = TRUE
}
if (samplingMethod[1] == "with bootstrap")
{
cat("'with bootstrap' is only needed as the second argument of 'method' option. Default option will be computed.\n")
samplingMethod[1] = "uniform univariate sampling"
}
XY <- unsupervised2supervised(X, method = samplingMethod[1], seed = seed,
bootstrap = if (length(samplingMethod) > 1) { TRUE } else {FALSE })
cat("Created synthetic data giving them labels.\n")
if ((n > 10000) | mapAndReduce)
{
rUF.model <- rUniformForest.big(XY$X, as.factor(XY$Y), BreimanBounds = FALSE, unsupervised = TRUE, unsupervisedMethod = samplingMethod[1], randomCut = TRUE,...)
}
else
{
rUF.model <- randomUniformForest(XY$X, as.factor(XY$Y), BreimanBounds = FALSE,
unsupervised = TRUE, unsupervisedMethod = samplingMethod[1],...)
}
if (is.null(Xtest)) { Xtest = XY$X[1:n,] }
}
else
{
if (any(objectClass == "randomUniformForest")) { rUF.model = object }
else { stop("Data are missing or object is not of class randomUniformForest.") }
n = if (is.null(object$unsupervised)) { length(object$y) } else { length(object$y)/2 }
}
if (!is.null(Xtest))
{
n = nrow(Xtest)
if ( ((n > 10000) | mapAndReduce) & !flagBig )
{
set.seed(seed)
subsetIdx = sample(n, min(10000, floor(n/2)))
bigX = Xtest[-subsetIdx, ]
Xtest = Xtest[subsetIdx,]
n = nrow(Xtest)
flagBig = TRUE
}
}
if (baseModel[1] != "importanceThenDistance")
{
if (is.null(proximityMatrix))
{
proxMat <- proximitiesMatrix(rUF.model, fullMatrix = TRUE, Xtest = Xtest, predObject = predObject,
sparseProximities = sparseProximities, pthreads = uthreads)
}
else
{ proxMat = proximityMatrix }
}
else
{
if (!is.null(importanceObject)) { imp.rUFModel = importanceObject }
else { imp.rUFModel <- importance(rUF.model, Xtest = Xtest, maxInteractions = maxInteractions) }
proxMat <- observationsImportance(Xtest, imp.rUFModel)
}
p = ncol(proxMat)
grepMDS = length(grep("MDS", endModel[1]))
grepSpectral = length(grep("Spectral", endModel[1]))
if ((grepMDS == 1) | (grepSpectral == 1))
{
if (grepSpectral == 1)
{
X.scale = list()
cat("Spectral clustered points have been put on ...$MDSModel for compatibility with new points and others clustering objects that have to be updated.\n\n")
Z <- specClust(proxMat, k = max(3, metricDimension))
Z = Z[, coordinates]
}
else
{
distanceProxy = TRUE
if (baseModel[1] == "proximity") { distanceProxy = FALSE }
X.scale <- MDSscale(proxMat, metric = MDSmetric[1], dimension = metricDimension, distance = distanceProxy, plotting = FALSE, seed = seed)
Z = X.scale$points[, ,drop = FALSE]
if (dim(Z)[2] == 0)
stop ("MDS can not be achieved. Data can probably be not clustered using this (randomUniformForest) dissimilarity matrix.\nOptions used might be reconsidered.\n")
}
if (flagBig)
{
pZ = ncol(Z)
X.scale.ruf = vector('list', pZ)
newZ = matrix(NA, nrow(Z) + nrow(bigX), pZ)
if (mapAndReduce)
{
cat("Entered in regression mode to learn MDS/spectral points.\n")
for (i in 1:pZ)
{
X.scale.ruf[[i]] = rUniformForest.big(X, Z[,i], randomCut = TRUE,...)
newZ[-subsetIdx,i] = predict(X.scale.ruf[[i]], bigX)
newZ[subsetIdx,i] = Z[,i]
}
}
else
{
for (i in 1:pZ)
{
X.scale.ruf[[i]] = randomUniformForest(X, Z[,i], ...)
newZ[-subsetIdx,i] = predict(X.scale.ruf[[i]], bigX)
newZ[subsetIdx,i] = Z[,i]
}
}
Z = newZ
}
nn = nrow(Z)
if (endModel[1] == "MDS") { outliersFilter = FALSE }
if (outliersFilter)
{
idx1 = which( (Z[,1] < quantile(Z[,1], 0.025)) & (Z[,2] < quantile(Z[,2], 0.025)) )
idx2 = which( (Z[,1] > quantile(Z[,1], 0.975)) & (Z[,2] > quantile(Z[,2], 0.975)))
idx12 = c(idx1, idx2)
if (length(idx12) > 0)
{
cat("Outliers have been removed for the two first coordinates.\n")
Z = Z[-idx12,]
}
nn = nrow(Z)
}
if (is.null(predObject))
{
if (endModel[1] == "MDS")
{
X.gapstat = NULL
if (is.null(object$unsupervised))
{
if (OOB)
{
if (!is.null(object$forest$OOB.predicts))
{ predictions = object$forest$OOB.predicts }
else
{
cat("OOB option has been called, but there is currently no OOB classifier. Hence true labels have been used.\n")
predictions = object$y
}
}
else
{
if (is.null(object$predictionObject)) { predictions = object$y }
else { predictions = object$predictionObject$majority.vote }
}
classes = sort(unique(predictions))
k = length(classes)
{
centers = matrix(0, k, ncol(Z))
for (i in 1:k)
{
idx = which(predictions == classes[i])
centers[i,] = colMeans(Z[idx,])
}
}
rownames(centers) = as.character(classes)
X.model = list(cluster = predictions, centers = centers)
}
else
{
cat("Clustering can not be done except in the supervised mode.\nPlease send ...$MDSModel$points to a clustering algorithm.\n")
X.model = list(cluster = NULL, centers = NULL)
}
}
if ( (endModel[1] == "MDSkMeans") | (endModel[1] == "SpectralkMeans") )
{
if (is.null(clusters))
{ X.gapstat <- gap.stats(Z, B = bootstrapReplicates, maxClusters = maxClusters, seed = seed) }
else
{ X.gapstat = NULL }
X.model <- kMeans(Z, X.gapstat, k = clusters, maxIters = maxIters, plotting = FALSE, algorithm = endModelMetric, reduceClusters = reduceClusters, seed = seed)
if (endModel[1] == "SpectralkMeans") { X.scale$points = Z }
}
if (endModel[1] == "MDShClust")
{
X.model <- hClust(Z, method = endModelMetric, plotting = FALSE, k = clusters,
reduceClusters = reduceClusters, seed = seed)
X.gapstat = NULL
nbClusters = length(unique(X.model$cluster))
centers = matrix(NA, nbClusters, ncol(Z))
for (i in 1:nbClusters)
{
idx = which(X.model$cluster == i)
centers[i,] = colMeans(Z[idx,, drop = FALSE])
}
X.model$centers = centers
}
if (outliersFilter)
{
if (length(idx12) > 0)
{
if ( (endModel[1] == "MDSkMeans") | (endModel[1] == "SpectralkMeans") )
{ clusterOutliers <- which.is.nearestCenter(Z[c(idx1,idx2),], X.model$centers) }
if (endModel[1] == "MDShClust")
{
nbClusters = length(unique(X.model$cluster))
centers = matrix(NA, nbClusters, ncol(Z))
for (i in 1:nbClusters)
{
idx = which(X.model$cluster == i)
centers[i,] = colMeans(Z[idx,, drop = FALSE])
}
clusterOutliers <- which.is.nearestCenter(Z[c(idx1,idx2),], centers)
}
names(clusterOutliers) = c(idx1,idx2)
X.model$clusterOutliers = clusterOutliers
}
}
}
else
{
predictions = predObject$majority.vote
classes = sort(unique(predictions))
k = length(classes)
centers = matrix(0, k, ncol(Z))
for (i in 1:p)
{
idx = which(predictions == classes[i])
centers[i,] = colMeans(Z[idx,])
}
rownames(centers) = as.character(classes)
X.model = list(cluster = predictions, centers = centers)
X.gapstat = NULL
}
}
else
{
if (endModel[1] == "kMeans")
{
X.scale = NULL
if (is.null(clusters))
{
X.gapstat <- gap.stats(proxMat[sample(n, floor(n/2)),, drop = FALSE], B = bootstrapReplicates, maxClusters = maxClusters, seed = seed)
}
else
{ X.gapstat = NULL }
X.model <- kMeans(proxMat, X.gapstat, k = clusters, maxIters = maxIters, plotting = FALSE, algorithm = endModelMetric, seed = seed)
}
if (endModel[1] == "hClust")
{
X.scale = NULL
X.model <- hClust(proxMat, method = endModelMetric, plotting = FALSE, k = clusters, seed = seed)
X.gapstat = NULL
}
}
clusters = length(unique(X.model$cluster))
modelParams = c(baseModel[1], endModel[1], if (is.null(endModelMetric)) { "NULL" } else { endModelMetric },
samplingMethod[1], MDSmetric[1], is.null(Xtest), is.null(predObject), metricDimension, concatCore(as.character(coordinates)), bootstrapReplicates, clusters, if (is.null(maxIters)) { "NULL" } else { maxIters },
maxInteractions, maxClusters, mapAndReduce, outliersFilter, reduceClusters, seed, sparseProximities)
names(modelParams) = c("baseModel", "endModel", "endModelMetric", "samplingMethod", "MDSmetric", "Xtest", "predObject", "metricDimension", "coordinates", "bootstrapReplicates", "clusters", "maxIters", "maxInteractions", "maxClusters", "mapAndReduce", "outliersFilter", "reduceClusters", "seed", "sparseProximities")
if (flagBig)
{
X.scale$points = Z
largeDataLearningModel = X.scale.ruf
unsupervisedObject = list(proximityMatrix = proxMat, MDSModel = X.scale, unsupervisedModel = X.model, largeDataLearningModel = largeDataLearningModel, gapStatistics = X.gapstat, rUFObject = rUF.model,
nbClusters = clusters, params = modelParams)
}
else
{
unsupervisedObject = list(proximityMatrix = proxMat, MDSModel = X.scale, unsupervisedModel = X.model, gapStatistics = X.gapstat, rUFObject = rUF.model, nbClusters = clusters, params = modelParams)
}
class(unsupervisedObject) <- "unsupervised"
unsupervisedObject
} |
readSC <- function(filename = NULL, data = NULL, sep = ",", dec = ".", sort.labels = FALSE, cvar = "case", pvar = "phase", dvar = "values", mvar = "mt", phase.names = NULL, type = "csv", ...) {
if (is.null(filename) && is.null(data)) {
filename <- file.choose()
cat("Import file", filename, "\n\n")
}
if (!is.null(data)) {
type <- "data"
dat <- as.data.frame(data)
}
if (type == "csv")
dat <- utils::read.table(filename, header = TRUE, sep = sep, dec = dec, stringsAsFactors = FALSE,...)
if (type == "excel") {
if (!requireNamespace("readxl", quietly = TRUE)) {
stop("Package readxl needed for this function to work. Please install it.",
call. = FALSE)
}
dat <- as.data.frame(readxl::read_excel(filename, ...))
}
VARS <- c(cvar, pvar, dvar, mvar)
columns <- ncol(dat)
if (!sort.labels) {
dat[[cvar]] <- factor(dat[[cvar]], levels = unique(dat[[cvar]]))
} else {
dat[[cvar]] <- factor(dat[[cvar]])
}
dat[[pvar]] <- factor(dat[[pvar]], levels = unique(dat[[pvar]]))
if (!is.null(phase.names)) levels(dat[[pvar]]) <- phase.names
lab <- levels(dat[[cvar]])
dat <- split(dat, dat[[cvar]])
dat <- lapply(dat, function(x) x[, 2:columns])
for(i in 1:length(dat)) row.names(dat[[i]]) <- 1:nrow(dat[[i]])
names(dat) <- lab
cat("Imported", length(dat), "cases.\n")
class(dat) <- c("scdf","list")
scdf_attr(dat, .opt$phase) <- pvar
scdf_attr(dat, .opt$dv) <- dvar
scdf_attr(dat, .opt$mt) <- mvar
return(dat)
}
readSC.excel <- function(...) {
readSC(..., type = "excel")
} |
test_that("get_zip_data", {
on.exit(try(unlink(tmp, recursive = TRUE)), add = TRUE)
dir.create(tmp <- tempfile())
expect_equal(
get_zip_data_nopath_recursive(tmp),
df(paste0(basename(tmp), "/"), normalizePath(tmp), TRUE)
)
expect_equal(get_zip_data_nopath_recursive(tmp), get_zip_data_nopath(tmp, TRUE))
foobar <- file.path(tmp, "foobar")
cat("foobar", file = foobar)
expect_equal(
get_zip_data_nopath_recursive(foobar),
df(basename(foobar), normalizePath(foobar), FALSE)
)
expect_equal(get_zip_data_nopath_recursive(foobar), get_zip_data_nopath(foobar, TRUE))
expect_equal(
get_zip_data_nopath_recursive(tmp),
df(c(paste0(basename(tmp), "/"), file.path(basename(tmp), "foobar")),
normalizePath(c(tmp, foobar)),
c(TRUE, FALSE)
)
)
expect_equal(get_zip_data_nopath_recursive(tmp), get_zip_data_nopath(tmp, TRUE))
expect_equal(
withr::with_dir(tmp, get_zip_data_nopath_recursive(".")),
df(c(paste0(basename(tmp), "/"), file.path(basename(tmp), "foobar")),
normalizePath(c(tmp, foobar)),
c(TRUE, FALSE)
)
)
withr::with_dir(tmp,
expect_equal(get_zip_data_nopath_recursive("."), get_zip_data_nopath(".", TRUE))
)
dir.create(file.path(tmp, "empty"))
dir.create(file.path(tmp, "foo"))
bar <- file.path(tmp, "foo", "bar")
cat("bar\n", file = bar)
data <- df(
c(paste0(basename(tmp), "/"),
paste0(file.path(basename(tmp), "empty"), "/"),
paste0(file.path(basename(tmp), "foo"), "/"),
file.path(basename(tmp), "foo", "bar"),
file.path(basename(tmp), "foobar")),
normalizePath(c(
tmp, file.path(tmp, "empty"), file.path(tmp, "foo"),
bar, file.path(tmp, "foobar"))),
c(TRUE, TRUE, TRUE, FALSE, FALSE)
)
data <- data[order(data$file), ]
rownames(data) <- NULL
data2 <- get_zip_data_nopath_recursive(tmp)
data2 <- data2[order(data2$file), ]
rownames(data2) <- NULL
expect_equal(data2, data)
expect_equal(get_zip_data_nopath(tmp, TRUE), data)
expect_equal(
get_zip_data_nopath(c(foobar, bar), TRUE),
df(c("foobar", "bar"),
normalizePath(c(foobar, bar)),
c(FALSE, FALSE))
)
expect_equal(
get_zip_data_nopath(file.path(tmp, "foo"), TRUE),
df(c("foo/", "foo/bar"),
normalizePath(c(file.path(tmp, "foo"), file.path(tmp, "foo", "bar"))),
c(TRUE, FALSE)
)
)
}) |
context("ma")
set.seed(313)
wrap(psma_model_psma <- publipha::psma(
yi = yi, vi = vi,
data = dat.baskerville2012,
chains = 1,
iter = 10, refresh = 0
))
set.seed(313)
wrap(psma_model_ma <- publipha::ma(
yi = yi, vi = vi,
data = dat.baskerville2012,
chains = 1,
iter = 10,
bias = "publication selection", refresh = 0
))
set.seed(313)
wrap(phma_model_phma <- publipha::phma(
yi = yi, vi = vi,
data = dat.baskerville2012,
chains = 1,
iter = 10, refresh = 0
))
set.seed(313)
wrap(phma_model_ma <- publipha::ma(
yi = yi, vi = vi,
data = dat.baskerville2012,
chains = 1,
iter = 10,
bias = "p-hacking", refresh = 0
))
set.seed(313)
wrap(cma_model_cma <- publipha::cma(
yi = yi, vi = vi,
data = dat.baskerville2012,
chains = 1,
iter = 10, refresh = 0
))
set.seed(313)
wrap(cma_model_ma <- publipha::ma(
yi = yi, vi = vi,
data = dat.baskerville2012,
chains = 1,
iter = 10,
bias = "none", refresh = 0
))
expect_equal(extract_theta0(psma_model_psma), extract_theta0(psma_model_ma))
expect_equal(extract_theta0(phma_model_phma), extract_theta0(phma_model_ma))
expect_equal(extract_theta0(cma_model_cma), extract_theta0(cma_model_ma))
expect_error(publipha::ma(vi, yi,
data = dat.baskerville2012,
bias = "haha"
))
expect_error(publipha::phma(
yi = yi, vi = vi, data = dat.baskerville2012,
chains = 1, iter = 10, refresh = 0, prior = list(a = 5)
))
prior <- list(
eta0 = c(3, 2, 1),
theta0_mean = 10,
theta0_sd = 0.1,
tau_mean = 1,
tau_sd = 1
)
set.seed(313)
wrap(model1 <- publipha::ma(
yi = yi, vi = vi,
data = dat.baskerville2012,
chains = 1,
iter = 10,
bias = "none", refresh = 0
))
set.seed(313)
wrap(model2 <- publipha::ma(
yi = yi, vi = vi,
data = dat.baskerville2012,
chains = 1,
iter = 10,
bias = "none", refresh = 0, prior = prior
))
expect_lt(extract_theta0(model1), extract_theta0(model2))
wrap(model <- publipha::allma(
yi = yi, vi = vi,
data = dat.baskerville2012,
chains = 1,
iter = 10, refresh = 0, prior = prior
))
expect_equal(length(model), 3) |
mlnormal_log_det <- function(matr)
{
return( log( det( matr ) ) )
} |
applied_crm <- function(prior, target, tox, level,
no_skip_esc = TRUE, no_skip_deesc = TRUE,
global_coherent_esc = TRUE,
stop_func = NULL, ...) {
x <- dfcrm::crm(prior = prior, target = target, tox = tox, level = level,
var.est = TRUE, ...)
if (no_skip_esc & x$mtd > (max(level) + 1)) {
x$mtd <- max(level) + 1
}
if (no_skip_deesc & x$mtd < (min(level) - 1)) {
x$mtd <- min(level) - 1
}
if (global_coherent_esc) {
last_dose <- utils::tail(level, 1)
tox_rate_last_dose <- sum(tox[level == last_dose]) / sum(level == last_dose)
if(tox_rate_last_dose > target) {
x$mtd <- min(x$mtd, last_dose)
}
}
if(!is.null(stop_func)) {
x = stop_func(x)
}
return(x)
}
summary_crm <- function(x) {
summary <- data.frame('Dose.level' = c(1:length(x$prior)), 'Prior.Prob(DLT)' = x$prior,
'Number.of.Evaluable.Patients' = rep(NA, length(x$prior)),
'Number.of.DLTs' = rep(NA, length(x$prior)),
'Posterior.Prob' = round(x$ptox, digits = 3))
for(i in 1:length(x$prior)) {
summary$Number.of.Evaluable.Patients[i] = sum(x$level == i)
summary$Number.of.DLTs[i] = sum(x$tox[x$level == i])
}
summary$`Posterior.Prob` <- as.character(summary$`Posterior.Prob`)
for(i in 1:length(x$ptox)) {
summary$`Posterior.Prob`[i] = paste0(summary$`Posterior.Prob`[i], ' (',
round(x$ptoxL[i], digits = 3), ', ',
round(x$ptoxU[i], digits = 3), ')')
}
names(summary) <- c('Dose level', 'Prior Prob(DLT)', 'Number of Evaluable Patients',
'Number of DLTs', 'Posterior Prob(DLT)')
return(summary)
}
plot_crm <- function(crm, dose_labels, cohort_sizes = NULL, file = NULL,
height = 600, width = 750, dose_func = NULL, ..., ylim = c(0, 1),
lwd = 1, cex.axis = 1, cex.lab = 1, cex = 1,
cohort.last = F) {
if(is.null(cohort_sizes)) {
if(!is.null(file)) {
grDevices::png( file = paste0(file, '.png'), height = height, width = width)
graphics::par(cex.axis = cex.axis, cex.lab = cex.lab)
}
doses <- c(1:length(crm$prior))
postprob <- crm$ptox
priorprob <- crm$prior
graphics::plot(x = doses, y = postprob, col = 2, type = 'b', xlab = 'Dose', xaxt = 'n',
ylab = 'Probability of Dose Limiting Toxicity', ylim = ylim, lwd = lwd)
graphics::points(x= doses, y =priorprob, type = 'b', col = 1, lwd = lwd)
graphics::abline(h = crm$target, lty = 2)
graphics::axis(1, at = doses, labels = dose_labels)
graphics::legend(x = max(postprob, priorprob) + 0.15, col = c(1, 2), lty = 1, lwd = lwd,
legend = c('Prior Curve', 'Posterior Curve'), cex = cex)
if(!is.null(file)) {
grDevices::dev.off()
}
} else {
if(is.null(dose_func)) {
stop('dose_func required for cohort history plot')
}
if(!is.null(file)) {
grDevices::png( file = paste0(file, '.png'), height = height, width = width )
graphics::par(cex.axis = cex.axis, cex.lab = cex.lab)
}
colours <- c(1, 2, 'chartreuse4', 'darkgoldenrod2', 'hotpink1',
'royalblue2', 'chocolate1', 'mediumorchid4', 'brown', 'aquamarine1',
'darkmagenta', 'darkolivegreen', 'deepskyblue4', 'dimgray', 'darksalmon',
'darkseagreen', 'darkslateblue', 'darkslategray1','cyan4', 'coral2')
doses <- c(1:length(crm$prior))
priorprob <- crm$prior
graphics::plot(x = doses, y = priorprob, col = 1, type = 'b', xlab = 'Dose', xaxt = 'n',
ylab = 'Probability of Dose Limiting Toxicity', ylim = ylim, lwd = lwd)
graphics::abline(h = crm$target, lty = 2)
graphics::axis(1, at = doses, labels = dose_labels)
j = 1
k = 1
legend_label = c('Prior Curve')
legend_pch = c(NA)
for(i in cohort_sizes) {
legend_label = c(legend_label, paste0('Cohort ', j))
legend_pch = c(legend_pch, paste0(j))
loopcohort_crm <- dose_func(prior = crm$prior, target = crm$target, tox = crm$tox[1:(k+i-1)],
level = crm$level[1:(k+i-1)], ...)
graphics::points(x = doses , y = loopcohort_crm$ptox, type = 'b', col = colours[1+j], pch = paste0(j),
lwd = lwd)
k = k + i
j = j + 1
}
graphics::legend(x = max(priorprob) + 0.15, col = colours[1:(length(cohort_sizes)+1)],
lty = rep(1, length(cohort_sizes)+1), lwd = lwd, pch = legend_pch,
legend = legend_label, cex = cex)
if(cohort.last){
graphics::points(x = doses , y = crm$ptox, type = 'b', col = colours[j], pch = paste0(j - 1),
lwd = 6)
}
if(!is.null(file)) {
grDevices::dev.off()
}
}
} |
get_imgs <- function(page, url){
links_vec <- try(get_img_links(page, url = url), silent = TRUE)
if(!'try-error' %in% class(links_vec)){
isbase64 <- grepl("data:image/([a-zA-Z]*);base64,", links_vec)
image_ext <- file_ext(links_vec)
if(sum(isbase64) > 0){
base_info <- stringr::str_extract(links_vec, "data:image/([a-zA-Z]*);base64,")
image_ext[which(isbase64)] <- gsub(';base64,|data:image/', '',
base_info[which(isbase64)])
}
img_df <- tibble(image_url = links_vec,
image_ext = image_ext,
isbase64 = isbase64)
} else {
img_df <- tibble(image_url = NA,
image_ext = NA,
isbase64 = NA)
}
return(img_df)
}
get_img_links <- function(page, url){
link_ <- page %>%
rvest::html_nodes("img") %>%
rvest::html_attr('src') %>%
xml2::url_absolute(base = url) %>%
unique(.) %>%
sort(.)
return(link_)
} |
geocode_ide_uy <- function(x, details = F) {
stopifnot(is.data.frame(x))
stopifnot(is.character(x$dpto), "dpto" %in% colnames(x), length(x$dpto) >= 1)
stopifnot(is.character(x$loc), "loc" %in% colnames(x))
stopifnot(is.character(x$dir), "dir" %in% colnames(x))
if (!curl::has_internet()) stop("No internet access detected. Please check your connection.")
x <- x %>% dplyr::mutate(dir = stringr::str_trim(dir)) %>% dplyr::filter(nchar(dir) > 0)
for (i in 1:nrow(x)) {
p <- glue::glue("http://servicios.ide.gub.uy/servicios/BusquedaDireccion?departamento={x[i,'dpto']}&localidad={x[i,'loc']}&calle={x[i,'dir']}.json") %>%
stringr::str_replace_all(" ", "%20")
p <- RCurl::getURL(p[1])
x[i, "x"] <- suppressWarnings(as.numeric(stringr::str_sub(p, stringr::str_locate(p, "puntoX\":")[2] + 1, stringr::str_locate(p, "puntoX\":")[2] + 10)))
x[i, "y"] <- suppressWarnings(as.numeric(stringr::str_sub(p, stringr::str_locate(p, "puntoY\":")[2] + 1, stringr::str_locate(p, "puntoY\":")[2] + 10)))
if (details == T) {
x[i, "idTipoClasificacion"] <- suppressWarnings(as.numeric(stringr::str_sub(p, stringr::str_locate(p, "idTipoClasificacion\":")[2] + 1,
stringr::str_locate(p, "idTipoClasificacion\":")[2] + 1)))
x[i, "error"] <- suppressWarnings(stringr::str_sub(p, stringr::str_locate(p, "error\":")[2] + 1, stringr::str_locate(p, "error\":")[2] + 50))
}
p <- NULL
Sys.sleep(10)
}
return(x)
} |
rvi.plot <- function(formula, family, data, coord, maxlevel, detail = TRUE,
wavelet = "haar", wtrafo = "dwt",
n.eff = NULL, trace = FALSE, customize_plot = NULL){
if(!is.null(customize_plot)) {
warning('"customize_plot" argument is now soft deprecated.\n',
'Use object_name$plot to print the ggplot2 object and for \n',
'subsequent modification.')
}
if(trace){
cat("\n","Model selection tables:","\n","\n")
}
wrm <- WRM(formula, family, data, coord, level = 1,
wavelet = wavelet, wtrafo = wtrafo)
mmi <- mmiWMRR(wrm, data, scale = 1, detail = detail)
nrowA <- dim(mmi$result)[1]
ncolA <- dim(mmi$result)[2]
nvar <- dim(mmi$result)[2] - 6
leg <- dimnames(mmi$result)[[2]][2:(nvar + 1)]
A <- array(NA, c(nrowA, ncolA, maxlevel))
level <- rep(NA, maxlevel)
A[ , ,1] <- mmi$result
level[1] <- mmi$level
if(maxlevel >= 2){
for (i in 2:maxlevel) {
mmi <- mmiWMRR(wrm, data, scale = i, detail = detail, trace = trace)
A[ , ,i] <- mmi$result
level[i] <- mmi$level
}
}
if(trace){
cat("\n","---","\n","Relative variable importance:","\n","\n")
}
klimitscale <- dim(A)[3]
ip <- dim(A)[1]
WeightSums <- matrix(NA, nvar, klimitscale)
for(kscale in 1:klimitscale){
for(kvar in 2:(nvar + 1)){
for (i in 1:ip){
if(!is.na(A[i, kvar, kscale])){
A[i, kvar, kscale] <- A[i, (nvar + 6), kscale]
}
}
}
B <- A[1:ip, 2:(nvar + 1), kscale]
WeightSums[ ,kscale] <- colSums(B, na.rm = TRUE)
}
vec <- 1:nvar
VarCol <- character()
Level <- rep(1:maxlevel, length(leg))
for(i in seq_len(length(leg))){
tempdata <- rep(leg[i], maxlevel)
VarCol <- c(VarCol, tempdata)
}
PltData <- data.frame(Variable = VarCol, Level = Level,
Weight = as.vector(t(WeightSums)))
plt.blank <- ggplot2::theme(panel.grid.major = ggplot2::element_blank(),
panel.grid.minor = ggplot2::element_blank(),
panel.background = ggplot2::element_blank(),
axis.line = element_line(colour = "black"))
Level <- rlang::quo(Level)
Variable <- rlang::quo(Variable)
Weight <- rlang::quo(Weight)
Plt <- ggplot2::ggplot(PltData,
ggplot2::aes(x = !! Level,
y = !! Weight)) +
plt.blank +
ggplot2::geom_point(ggplot2::aes(colour = !! Variable,
shape = !! Variable),
size = 3) +
ggplot2::geom_line(ggplot2::aes(colour = !! Variable),
linetype = 2,
size = 1) +
ggplot2::scale_x_continuous("Level", breaks = 1:maxlevel) +
ggplot2::scale_y_continuous("Relative Variable Importance",
breaks = seq(0,
max(WeightSums),
length.out = 6)) +
customize_plot
rownames(WeightSums) <- leg
colnames(WeightSums) <- paste("level", c(1:klimitscale), sep = "=")
if(trace){
print(WeightSums)
}
fit <- list(rvi = WeightSums,
plot = Plt)
return(fit)
} |
run_everything = FALSE
knitr::opts_chunk$set(
eval = nzchar(Sys.getenv("run_vignettes")),
collapse = TRUE,
comment = "
)
library(metaforest)
library(caret)
data <- fukkink_lont
set.seed(62)
check_conv <- readRDS("C:/Git_Repositories/S4_meta-analysis/check_conv.RData")
plot(check_conv)
preselected <- readRDS("C:/Git_Repositories/S4_meta-analysis/preselected.RData")
retain_mods <- preselect_vars(preselected, cutoff = .5)
mf_cv <- readRDS("C:/Git_Repositories/S4_meta-analysis/mf_cv.RData")
mf_cv$results[which.min(mf_cv$results$RMSE), ]
r2_cv <- mf_cv$results$Rsquared[which.min(mf_cv$results$RMSE)]
final <- mf_cv$finalModel
r2_oob <- final$forest$r.squared
plot(final)
VarImpPlot(final)
ordered_vars <- names(final$forest$variable.importance)[
order(final$forest$variable.importance, decreasing = TRUE)]
PartialDependence(final, vars = ordered_vars,
rawdata = TRUE, pi = .95) |
tsvreq_classic<-function(X)
{
errcheck_data(X,"tsvreq_classic")
fsvr_classic<-vrf(X)
freq<-fsvr_classic$frequency
ts<-1/freq
tsvr<-fsvr_classic$vr
CVcom2<-cv2f(X, type="com")
com<-CVcom2$cv2
CVcomip2<-cv2f(X, type="comip")
comnull<-CVcomip2$cv2
wts.res<-wts(X)
wts.res<-wts.res$wts
errcheck_tsvreq(ts=ts,com=com,comnull=comnull,tsvr=tsvr,wts=wts.res)
result<-list(ts=rev(ts),com=rev(com),comnull=rev(comnull),tsvr=rev(tsvr),wts=rev(wts.res))
class(result)<-c("tsvreq_classic","tsvreq","list")
return(result)
} |
aux_sonifysignal <- function(
data,
file,
aggregate = 1,
amplification = 10^6,
speed = 1,
dt
) {
if (requireNamespace("seewave", quietly = TRUE) == FALSE) {
stop("Package seewave is not installed, operation not possible!")
}
n <- ifelse(test = class(data) == "eseis",
yes = data$meta$n,
no = length(data))
if(class(data) == "eseis") {
dt <- data$meta$dt
} else {
if(missing(dt) == TRUE) {
stop("No eseis object and dt missing!")
}
}
if(aggregate == 1) {
s <- as.numeric(data$signal)
} else {
s <- stats::approx(x = seq(from = 1, to = n),
y = data$signal,
xout = seq(from = 1,
to = n,
by = 1 / aggregate))$y
}
s <- as.numeric(s * amplification)
seewave::savewav(wave = s,
filename = file,
f = 1/dt * speed)
} |
library(RSpectra)
library(Matrix)
n = 100
p = 50
k = 5
set.seed(123)
x = matrix(rnorm(n * p), n)
x[sample(n * p, floor(n * p / 2))] = 0
svd_resid = function(res, svd0)
{
d_resid = svd0$d[1:length(res$d)] - res$d
u_resid = v_resid = 0
if(!is.null(res$u))
u_resid = abs(svd0$u[, 1:ncol(res$u)]) - abs(res$u)
if(!is.null(res$v))
v_resid = abs(svd0$v[, 1:ncol(res$v)]) - abs(res$v)
mabs = function(x) max(abs(x))
maxerr = max(mabs(d_resid), mabs(u_resid), mabs(v_resid))
return(paste("residual <", format(maxerr, digits = 5)))
}
x1 = x
x2 = as(x, "dgeMatrix")
x3 = as(x, "dgCMatrix")
x4 = as(x, "dgRMatrix")
svd0 = svd(x)
res1 = svds(x1, k)
res2 = svds(x2, k)
res3 = svds(x3, k)
res4 = svds(x4, k)
svd_resid(res1, svd0)
svd_resid(res2, svd0)
svd_resid(res3, svd0)
svd_resid(res4, svd0)
xc = sweep(x, 2, colMeans(x), "-")
svd0 = svd(xc)
res1 = svds(x1, k, opts = list(center = TRUE))
res2 = svds(x2, k, opts = list(center = TRUE))
res3 = svds(x3, k, opts = list(center = TRUE))
res4 = svds(x4, k, opts = list(center = TRUE))
svd_resid(res1, svd0)
svd_resid(res2, svd0)
svd_resid(res3, svd0)
svd_resid(res4, svd0)
xs = sweep(x, 2, sqrt(colSums(x^2)), "/")
svd0 = svd(xs)
res1 = svds(x1, k, opts = list(scale = TRUE))
res2 = svds(x2, k, opts = list(scale = TRUE))
res3 = svds(x3, k, opts = list(scale = TRUE))
res4 = svds(x4, k, opts = list(scale = TRUE))
svd_resid(res1, svd0)
svd_resid(res2, svd0)
svd_resid(res3, svd0)
svd_resid(res4, svd0)
xcs = sweep(xc, 2, sqrt(colSums(xc^2)), "/")
svd0 = svd(xcs)
res1 = svds(x1, k, opts = list(center = TRUE, scale = TRUE))
res2 = svds(x2, k, opts = list(center = TRUE, scale = TRUE))
res3 = svds(x3, k, opts = list(center = TRUE, scale = TRUE))
res4 = svds(x4, k, opts = list(center = TRUE, scale = TRUE))
svd_resid(res1, svd0)
svd_resid(res2, svd0)
svd_resid(res3, svd0)
svd_resid(res4, svd0)
ctr = rnorm(p)
scl = abs(rnorm(p))
y = sweep(x, 2, ctr, "-")
y = sweep(y, 2, scl, "/")
svd0 = svd(y)
res1 = svds(x1, k, opts = list(center = ctr, scale = scl))
res2 = svds(x2, k, opts = list(center = ctr, scale = scl))
res3 = svds(x3, k, opts = list(center = ctr, scale = scl))
res4 = svds(x4, k, opts = list(center = ctr, scale = scl))
svd_resid(res1, svd0)
svd_resid(res2, svd0)
svd_resid(res3, svd0)
svd_resid(res4, svd0) |
has_util <- function(util_test) {
if (nzchar(Sys.which(util_test[1]))) {
try_res <- tryCatch(system2(util_test[1], util_test[-1], stdout = TRUE, stderr = TRUE),
error = function(c) {
print(c)
return(FALSE)
},
warning = function(c) {
print(c)
return(FALSE)
}
)
if (identical(try_res, FALSE)) {
notify_no_display()
} else {
TRUE
}
} else {
FALSE
}
}
has_xclip <- function() has_util(c("xclip", "-o", "-selection", "clipboard"))
has_xsel <- function() has_util(c("xsel", "--clipboard", "--output"))
has_wl_clipboard <- function() has_wl_paste() & has_wl_copy()
has_wl_paste <- function() has_util(c("wl-paste", "--primary"))
has_wl_copy <- function() has_util(c("wl-copy", "--primary"))
notify_no_cb <- function() {
stop(msg_no_clipboard(), call. = FALSE)
}
notify_no_display <- function() {
stop(msg_no_display(), call. = FALSE)
}
X11_read_clip <- function() {
if (has_xclip()) {
con <- pipe("xclip -o -selection clipboard")
} else if (has_xsel()) {
con <- pipe("xsel --clipboard --output")
} else if (has_wl_paste()) {
con <- pipe("wl-paste")
} else {
notify_no_cb()
}
content <- scan(con, what = character(), sep = "\n",
blank.lines.skip = FALSE, quiet = TRUE)
close(con)
return(content)
}
X11_write_clip <- function(content, object_type, breaks, eos, return_new, ...) {
if (has_xclip()) {
con <- pipe("xclip -i -sel p -f | xclip -i -sel c", "w")
} else if (has_xsel()) {
con <- pipe("xsel --clipboard --input", "w")
} else if (has_wl_copy()) {
con <- pipe("wl-copy", "w")
} else {
notify_no_cb()
}
.dots <- list(...)
write_nix(content, object_type, breaks, eos, return_new, con, .dots)
} |
GWDECAY <- list(
map = function(x,n,...) {
i <- 1:n
x[1] * ifelse(i==1, 1, (exp(x[2])*(1-(1-exp(-x[2]))^i)))
},
gradient = function(x,n,...) {
i <- 1:n
e2 <- exp(x[2])
a <- 1-exp(-x[2])
rbind((1-a^i)*e2, ifelse(i==1, 0, x[1] * ( (1-a^i)*e2 - i*a^(i-1) ) ) )
},
minpar = c(-Inf, 0)
)
.spcache.aux <- function(type){
type <- toupper(type)
trim_env(as.formula(as.call(list(as.name('~'), as.call(list(as.name('.spcache.net'),type=if(type=='ITP')'OTP' else type))))))
}
nodecov_names <- function(nodecov, prefix=NULL){
cn <- if(is.matrix(nodecov)){
cn <- colnames(nodecov)
if(is.null(cn) || all(cn==seq_along(cn))) paste(attr(nodecov, "name"), seq_len(ncol(nodecov)), sep=".")
else cn
}else attr(nodecov, "name")
NVL3(prefix, paste0(prefix,".",cn), cn)
}
LEVELS_BASE1 <- NULL
InitErgmTerm.absdiff <- function(nw, arglist, ..., version=packageVersion("ergm")) {
if(version <= as.package_version("3.9.4")){
a <- check.ErgmTerm(nw, arglist, directed=NULL, bipartite=NULL,
varnames = c("attrname","pow"),
vartypes = c("character","numeric"),
defaultvalues = list(NULL,1),
required = c(TRUE,FALSE))
nodecov <- get.node.attr(nw, a$attrname)
covname <- a$attrname
}else{
a <- check.ErgmTerm(nw, arglist, directed=NULL, bipartite=NULL,
varnames = c("attr","pow"),
vartypes = c(ERGM_VATTR_SPEC,"numeric"),
defaultvalues = list(NULL,1),
required = c(TRUE,FALSE))
nodecov <- ergm_get_vattr(a$attr, nw, accept="numeric")
covname <- attr(nodecov, "name")
}
list(name="absdiff",
coef.names = paste(paste("absdiff",if(a$pow!=1) a$pow else "",sep=""), covname, sep="."),
inputs = c(a$pow,nodecov),
dependence = FALSE
)
}
InitErgmTerm.absdiffcat <- function(nw, arglist, ..., version=packageVersion("ergm")) {
if(version <= as.package_version("3.9.4")){
a <- check.ErgmTerm(nw, arglist, directed=NULL, bipartite=NULL,
varnames = c("attrname","base"),
vartypes = c("character","numeric"),
defaultvalues = list(NULL,NULL),
required = c(TRUE,FALSE),
dep.inform = list(FALSE, "levels"))
attrarg <- a$attrname
}else{
a <- check.ErgmTerm(nw, arglist, directed=NULL, bipartite=NULL,
varnames = c("attr","base","levels"),
vartypes = c(ERGM_VATTR_SPEC,"numeric",ERGM_LEVELS_SPEC),
defaultvalues = list(NULL,NULL,NULL),
required = c(TRUE,FALSE,FALSE),
dep.inform = list(FALSE, "levels", FALSE))
attrarg <- a$attr
}
nodecov <- ergm_get_vattr(attrarg, nw, accept = "numeric")
attrname <- attr(nodecov, "name")
u <- sort(unique(as.vector(abs(outer(nodecov,nodecov,"-")))),na.last=NA)
u <- u[u>0]
u <- ergm_attr_levels(a$levels, nodecov, nw, levels = u)
if((!hasName(attr(a,"missing"), "levels") || attr(a,"missing")["levels"]) && any(NVL(a$base,0)!=0)) u <- u[-a$base]
if (length(u)==0)
ergm_Init_abort ("Argument to absdiffcat() has too few distinct differences")
inputs <- c(u, nodecov)
attr(inputs, "ParamsBeforeCov") <- length(u)
list(name="absdiffcat",
coef.names = paste("absdiff", attrname, u, sep="."),
inputs = inputs,
dependence = FALSE
)
}
InitErgmTerm.altkstar <- function(nw, arglist, ...) {
a <- check.ErgmTerm(nw, arglist, directed=FALSE, bipartite=NULL,
varnames = c("lambda","fixed"),
vartypes = c("numeric","logical"),
defaultvalues = list(NULL,FALSE),
required = c(FALSE,FALSE))
if(!a$fixed){
if(!is.null(a$lambda)) warning("In term 'altkstar': decay parameter 'lambda' passed with 'fixed=FALSE'. 'lambda' will be ignored. To specify an initial value for 'lambda', use the 'init' control parameter.", call.=FALSE)
d <- 1:(network.size(nw)-1)
map <- function(x,n,...) {
i <- 1:n
x[1]*(x[2]*((1-1/x[2])^i + i) - 1)
}
gradient <- function(x,n,...) {
i <- 1:n
rbind(x[2]*((1-1/x[2])^i + i) - 1,
x[1]*(i - 1 + (x[2]*x[2]-x[2]+i)*((1-1/x[2])^(i-1))/(x[2]*x[2])))
}
outlist <- list(name="degree",
coef.names = paste("altkstar
inputs = d, map=map, gradient=gradient,
params=list(altkstar=NULL, altkstar.lambda=a$lambda),
minpar = c(-Inf, 0)
)
} else {
if(is.null(a$lambda)) stop("Term 'altkstar' with 'fixed=TRUE' requires a decay parameter 'lambda'.", call.=FALSE)
coef.names = paste("altkstar", a$lambda, sep=".")
outlist <- list (name="altkstar",
coef.names = coef.names,
inputs=a$lambda
)
}
outlist
}
InitErgmTerm.asymmetric <- function(nw, arglist, ..., version=packageVersion("ergm")) {
if(version <= as.package_version("3.9.4")){
a <- check.ErgmTerm(nw, arglist, directed=TRUE, bipartite=NULL,
varnames = c("attrname", "diff", "keep"),
vartypes = c("character", "logical", "numeric"),
defaultvalues = list(NULL, FALSE, NULL),
required = c(FALSE, FALSE, FALSE),
dep.inform = list(FALSE, FALSE, "levels"))
attrarg <- a$attrname
}else{
a <- check.ErgmTerm(nw, arglist, directed=TRUE, bipartite=NULL,
varnames = c("attr", "diff", "keep", "levels"),
vartypes = c(ERGM_VATTR_SPEC, "logical", "numeric", ERGM_LEVELS_SPEC),
defaultvalues = list(NULL, FALSE, NULL, NULL),
required = c(FALSE, FALSE, FALSE, FALSE),
dep.inform = list(FALSE, FALSE, "levels", FALSE))
attrarg <- a$attr
}
if (!is.null(attrarg)) {
nodecov <- ergm_get_vattr(attrarg, nw)
attrname <- attr(nodecov, "name")
u <- ergm_attr_levels(a$levels, nodecov, nw, levels = sort(unique(nodecov)))
if((!hasName(attr(a,"missing"), "levels") || attr(a,"missing")["levels"]) && !is.null(a$keep)) u <- u[a$keep]
nodecov <- match(nodecov,u,nomatch=length(u)+1)
dontmatch <- nodecov==(length(u)+1)
nodecov[dontmatch] <- length(u) + (1:sum(dontmatch))
ui <- seq(along=u)
}
out <- list(name="asymmetric",
coef.names = "asymmetric",
minval = 0,
maxval = network.dyadcount(nw,FALSE)/2
)
if (!is.null(attrarg)) {
if (a$diff) {
out$coef.names <- paste("asymmetric", attrname, u, sep=".")
out$inputs <- c(ui, nodecov)
} else {
out$coef.names <- paste("asymmetric", attrname, sep=".")
out$inputs <- nodecov
}
}
out
}
InitErgmTerm.attrcov <- function (nw, arglist, ..., version=packageVersion("ergm")) {
a <- check.ErgmTerm(nw, arglist,
varnames = c("attr", "mat"),
vartypes = c(ERGM_VATTR_SPEC, "matrix"),
defaultvalues = list(NULL, NULL),
required = c(TRUE, TRUE))
if(is.bipartite(nw)) {
b1nodecov <- ergm_get_vattr(a$attr, nw, bip="b1")
b2nodecov <- ergm_get_vattr(a$attr, nw, bip="b2")
attrname <- attr(b1nodecov, "name")
b1levels <- sort(unique(b1nodecov))
b2levels <- sort(unique(b2nodecov))
nodecov <- c(match(b1nodecov, b1levels), match(b2nodecov, b2levels))
if(NROW(a$mat) != length(b1levels) || NCOL(a$mat) != length(b2levels)) {
ergm_Init_abort("mat has wrong dimensions for attr")
}
} else {
nodecov <- ergm_get_vattr(a$attr, nw)
attrname <- attr(nodecov, "name")
levels <- sort(unique(nodecov))
nodecov <- match(nodecov, levels)
if(NROW(a$mat) != length(levels) || NCOL(a$mat) != length(levels)) {
ergm_Init_abort("mat has wrong dimensions for attr")
}
}
list(name = "attrcov",
coef.names = paste("attrcov", attrname, sep = "."),
dependence = FALSE,
inputs = NULL,
nr = NROW(a$mat),
nc = NCOL(a$mat),
mat = if(is.double(a$mat)) a$mat else as.double(a$mat),
nodecov = c(0L, nodecov) - 1L
)
}
InitErgmTerm.b1concurrent<-function(nw, arglist, ..., version=packageVersion("ergm")) {
if(version <= as.package_version("3.9.4")){
a <- check.ErgmTerm(nw, arglist, directed=FALSE, bipartite=TRUE,
varnames = c("by", "levels"),
vartypes = c("character", "character,numeric,logical"),
defaultvalues = list(NULL, NULL),
required = c(FALSE, FALSE))
levels <- if(!is.null(a$levels)) I(a$levels) else NULL
}else{
a <- check.ErgmTerm(nw, arglist, directed=FALSE, bipartite=TRUE,
varnames = c("by", "levels"),
vartypes = c(ERGM_VATTR_SPEC, ERGM_LEVELS_SPEC),
defaultvalues = list(NULL, NULL),
required = c(FALSE, FALSE))
levels <- a$levels
}
nb1 <- get.network.attribute(nw, "bipartite")
byarg <- a$by
if(!is.null(byarg)) {
nodecov <- ergm_get_vattr(byarg, nw, bip = "b1")
attrname <- attr(nodecov, "name")
u <- ergm_attr_levels(levels, nodecov, nw, levels = sort(unique(nodecov)))
nodecov <- match(nodecov,u,nomatch=length(u)+1)
lu <- length(u)
ui <- seq(along=u)
}
if(!is.null(byarg)) {
if(length(u)==0) {return(NULL)}
name <- "b1concurrent_by_attr"
coef.names<-paste("b1concurrent",".", attrname, u, sep="")
inputs <- c(ui, nodecov)
}else{
name <- "b1concurrent"
coef.names<-paste("b1concurrent",sep="")
inputs <- NULL
}
list(name=name, coef.names=coef.names, inputs=inputs, dependence=TRUE, minval=0, maxval=nb1)
}
InitErgmTerm.b1degrange<-function(nw, arglist, ..., version=packageVersion("ergm")) {
if(version <= as.package_version("3.9.4")){
a <- check.ErgmTerm(nw, arglist, bipartite=TRUE,
varnames = c("from", "to", "by", "homophily", "levels"),
vartypes = c("numeric", "numeric", "character", "logical", "character,numeric,logical"),
defaultvalues = list(NULL, Inf, NULL, FALSE, NULL),
required = c(TRUE, FALSE, FALSE, FALSE, FALSE))
levels <- if(!is.null(a$levels)) I(a$levels) else NULL
}else{
a <- check.ErgmTerm(nw, arglist, bipartite=TRUE,
varnames = c("from", "to", "by", "homophily", "levels"),
vartypes = c("numeric", "numeric", ERGM_VATTR_SPEC, "logical", ERGM_LEVELS_SPEC),
defaultvalues = list(NULL, Inf, NULL, FALSE, NULL),
required = c(TRUE, FALSE, FALSE, FALSE, FALSE))
levels <- a$levels
}
from<-a$from; to<-a$to; byarg <- a$by; homophily <- a$homophily
to <- ifelse(to==Inf, network.size(nw)+1, to)
if(length(to)==1 && length(from)>1) to <- rep(to, length(from))
else if(length(from)==1 && length(to)>1) from <- rep(from, length(to))
else if(length(from)!=length(to)) ergm_Init_abort("The arguments of term odegrange must have arguments either of the same length, or one of them must have length 1.")
else if(any(from>=to)) ergm_Init_abort("Term odegrange must have from<to.")
nb1 <- get.network.attribute(nw, "bipartite")
emptynwstats<-NULL
if(!is.null(byarg)) {
nodecov <- ergm_get_vattr(byarg, nw, bip = if(homophily) "n" else "b1")
attrname <- attr(nodecov, "name")
u <- ergm_attr_levels(levels, nodecov, nw, levels = sort(unique(nodecov)))
nodecov <- match(nodecov,u,nomatch=length(u)+1)
}
if(!is.null(byarg) && !homophily) {
lu <- length(u)
du <- rbind(rep(from,lu), rep(to,lu), rep(1:lu, rep(length(from), lu)))
if (any(du[1,]==0)) {
emptynwstats <- rep(0, ncol(du))
tmp <- du[3,du[1,]==0]
for(i in 1:length(tmp)) tmp[i] <- sum(nodecov==tmp[i])
emptynwstats[du[1,]==0] <- tmp
}
} else {
if (any(from==0)) {
emptynwstats <- rep(0, length(from))
emptynwstats[from==0] <- network.size(nw)
}
}
if(is.null(byarg)) {
if(length(from)==0){return(NULL)}
coef.names <- ifelse(to>=network.size(nw)+1,
paste("b1deg",from,"+",sep=""),
paste("b1deg",from,"to",to,sep=""))
name <- "b1degrange"
inputs <- c(rbind(from,to))
} else if (homophily) {
if(length(from)==0){return(NULL)}
coef.names <- ifelse(to>=network.size(nw)+1,
paste("b1deg",from,"+", ".homophily.",attrname,sep=""),
paste("b1deg",from,"to",to, ".homophily.",attrname,sep=""))
name <- "b1degrange_w_homophily"
inputs <- c(rbind(from,to), nodecov)
} else {
if(ncol(du)==0) {return(NULL)}
coef.names <- ifelse(du[2,]>=network.size(nw)+1,
paste("b1deg",du[1,],"+.", attrname, u[du[3,]],sep=""),
paste("b1deg",du[1,],"to",du[2,],".",attrname, u[du[3,]],sep=""))
name <- "b1degrange_by_attr"
inputs <- c(as.vector(du), nodecov)
}
if (!is.null(emptynwstats)){
list(name=name,coef.names=coef.names, inputs=inputs,
emptynwstats=emptynwstats, dependence=TRUE, minval = 0)
}else{
list(name=name,coef.names=coef.names, inputs=inputs, dependence=TRUE, minval = 0, maxval=network.size(nw), conflicts.constraints="b1degreedist")
}
}
InitErgmTerm.b1cov<-function (nw, arglist, ..., version=packageVersion("ergm")) {
if(version <= as.package_version("3.9.4")){
a <- check.ErgmTerm(nw, arglist, directed=FALSE, bipartite=TRUE,
varnames = c("attrname","transform","transformname"),
vartypes = c("character","function","character"),
defaultvalues = list(NULL,function(x)x,""),
required = c(TRUE,FALSE,FALSE))
attrname<-a$attrname
f<-a$transform
f.name<-a$transformname
coef.names <- paste(paste("b1cov",f.name,sep=""),attrname,sep=".")
nb1 <- get.network.attribute(nw, "bipartite")
nodecov <- f(get.node.attr(nw, attrname, "b1cov", numeric=TRUE)[1:nb1])
}else{
a <- check.ErgmTerm(nw, arglist, directed=FALSE, bipartite=TRUE,
varnames = c("attr"),
vartypes = c(ERGM_VATTR_SPEC),
defaultvalues = list(NULL),
required = c(TRUE))
nodecov <- ergm_get_vattr(a$attr, nw, accept="numeric", bip = "b1", multiple="matrix")
coef.names <- nodecov_names(nodecov, "b1cov")
}
list(name="nodeocov", coef.names=coef.names, inputs=c(nodecov), dependence=FALSE)
}
InitErgmTerm.b1degree <- function(nw, arglist, ..., version=packageVersion("ergm")) {
if(version <= as.package_version("3.9.4")){
a <- check.ErgmTerm (nw, arglist, directed=FALSE, bipartite=TRUE,
varnames = c("d", "by", "levels"),
vartypes = c("numeric", "character", "character,numeric,logical"),
defaultvalues = list(NULL, NULL, NULL),
required = c(TRUE, FALSE, FALSE))
levels <- if(!is.null(a$levels)) I(a$levels) else NULL
}else{
a <- check.ErgmTerm (nw, arglist, directed=FALSE, bipartite=TRUE,
varnames = c("d", "by", "levels"),
vartypes = c("numeric", ERGM_VATTR_SPEC, ERGM_LEVELS_SPEC),
defaultvalues = list(NULL, NULL, NULL),
required = c(TRUE, FALSE, FALSE))
levels <- a$levels
}
byarg <- a$by
nb1 <- get.network.attribute(nw, "bipartite")
if (!is.null(byarg)) {
nodecov <- ergm_get_vattr(byarg, nw, bip = "b1")
attrname <- attr(nodecov, "name")
u <- ergm_attr_levels(levels, nodecov, nw, levels = sort(unique(nodecov)))
nodecov <- match(nodecov,u,nomatch=length(u)+1)
lu <- length(u)
du <- rbind(rep(a$d,lu), rep(1:lu, rep(length(a$d), lu)))
emptynwstats <- rep(0, ncol(du))
if (any(du[1,]==0)) {
tmp <- du[2,du[1,]==0]
for(i in 1:length(tmp))
tmp[i] <- sum(nodecov==tmp[i])
emptynwstats[du[1,]==0] <- tmp
}
name <- "b1degree_by_attr"
coef.names <- paste("b1deg", du[1,], ".", attrname, u[du[2,]], sep="")
inputs <- c(as.vector(du), nodecov)
} else {
name <- "b1degree"
coef.names <- paste("b1deg", a$d, sep="")
inputs <- a$d
emptynwstats <- rep(0, length(a$d))
if (any(a$d==0)) {
emptynwstats[a$d==0] <- nb1
}
}
list(name = name, coef.names = coef.names, inputs = inputs, emptynwstats = emptynwstats, minval=0, maxval=network.size(nw), dependence=TRUE,
minval = 0, maxval=nb1, conflicts.constraints="odegreedist")
}
InitErgmTerm.b1dsp<-function(nw, arglist, cache.sp=TRUE, ...) {
a <- check.ErgmTerm(nw, arglist, directed=FALSE, bipartite=TRUE,
varnames = c("d"),
vartypes = c("numeric"),
defaultvalues = list(NULL),
required = c(TRUE))
d <- a$d
if(length(d) == 0)
return(NULL)
emptynwstats <- rep(0, length(d))
nb1 <- get.network.attribute(nw, "bipartite")
type <- "OSP"
typecode <- 4
emptynwstats[d==0] <- nb1*(nb1-1)/2
list(name="ddspbwrap", coef.names=paste("b1dsp",d,sep=""), inputs=c(typecode, d),
emptynwstats=emptynwstats, minval = 0, maxval = nb1*(nb1-1)/2, dependence = TRUE, auxiliaries=if(cache.sp) .spcache.aux(type) else NULL)
}
InitErgmTerm.b1factor<-function (nw, arglist, ..., version=packageVersion("ergm")) {
if(version <= as.package_version("3.9.4")){
a <- check.ErgmTerm(nw, arglist, directed=FALSE, bipartite=TRUE,
varnames = c("attrname", "base", "levels"),
vartypes = c("character", "numeric", "character,numeric,logical"),
defaultvalues = list(NULL, 1, NULL),
required = c(TRUE, FALSE, FALSE),
dep.inform = list(FALSE, "levels", FALSE))
attrarg <- a$attrname
levels <- if(!is.null(a$levels)) I(a$levels) else NULL
}else{
a <- check.ErgmTerm(nw, arglist, directed=FALSE, bipartite=TRUE,
varnames = c("attr", "base", "levels"),
vartypes = c(ERGM_VATTR_SPEC, "numeric", ERGM_LEVELS_SPEC),
defaultvalues = list(NULL, 1, LEVELS_BASE1),
required = c(TRUE, FALSE, FALSE),
dep.inform = list(FALSE, "levels", FALSE))
attrarg <- a$attr
levels <- a$levels
}
nodecov <- ergm_get_vattr(attrarg, nw, bip = "b1")
attrname <- attr(nodecov, "name")
u <- ergm_attr_levels(levels, nodecov, nw, levels = sort(unique(nodecov)))
if (attr(a,"missing")["levels"] && any(NVL(a$base,0)!=0)) {
u <- u[-a$base]
}
if (length(u)==0) {
return()
}
nodepos <- match(nodecov,u,nomatch=0)-1
inputs <- nodepos
list(name="nodeofactor", coef.names=paste("b1factor", attrname, paste(u), sep="."), inputs=inputs, dependence=FALSE, minval=0)
}
InitErgmTerm.b1sociality<-function(nw, arglist, ...) {
a <- check.ErgmTerm(nw, arglist, directed=FALSE, bipartite=TRUE,
varnames = c("nodes"),
vartypes = c(ERGM_LEVELS_SPEC),
defaultvalues = list(-1),
required = c(FALSE))
nb1 <- get.network.attribute(nw, "bipartite")
d <- ergm_attr_levels(a$nodes, 1:nb1, nw, 1:nb1)
ld<-length(d)
if(ld==0){return(NULL)}
coef.names <- paste("b1sociality",d,sep="")
inputs <- c(d,0)
list(name="sociality", coef.names=coef.names, inputs=inputs, minval=0, maxval=network.size(nw)-nb1, conflicts.constraints="b1degrees", dependence=FALSE)
}
InitErgmTerm.b1star <- function(nw, arglist, ..., version=packageVersion("ergm")) {
if(version <= as.package_version("3.9.4")){
a <- check.ErgmTerm (nw, arglist, directed=FALSE, bipartite=TRUE,
varnames = c("k", "attrname", "levels"),
vartypes = c("numeric", "character", "character,numeric,logical"),
defaultvalues = list(NULL, NULL, NULL),
required = c(TRUE, FALSE, FALSE))
attrarg <- a$attrname
levels <- if(!is.null(a$levels)) I(a$levels) else NULL
}else{
a <- check.ErgmTerm (nw, arglist, directed=FALSE, bipartite=TRUE,
varnames = c("k", "attr", "levels"),
vartypes = c("numeric", ERGM_VATTR_SPEC, ERGM_LEVELS_SPEC),
defaultvalues = list(NULL, NULL, NULL),
required = c(TRUE, FALSE, FALSE))
attrarg <- a$attr
levels <- a$levels
}
if (!is.null(attrarg)) {
nodecov <- ergm_get_vattr(attrarg, nw)
attrname <- attr(nodecov, "name")
u <- ergm_attr_levels(levels, nodecov, nw, levels = sort(unique(nodecov)))
nodecov <- match(nodecov,u,nomatch=length(u)+1)
name <- "ostar"
coef.names <- paste("b1star", a$k, ".", attrname, sep="")
inputs <- c(a$k, nodecov)
attr(inputs, "ParamsBeforeCov") <- length(a$k)
}
else {
name <- "ostar"
coef.names <- paste("b1star",a$k,sep="")
inputs <- a$k
}
list(name = name, coef.names = coef.names,
inputs = inputs, minval = 0)
}
InitErgmTerm.b1starmix <- function(nw, arglist, ..., version=packageVersion("ergm")) {
if(version <= as.package_version("3.9.4")){
a <- check.ErgmTerm (nw, arglist, directed=FALSE, bipartite=TRUE,
varnames = c("k", "attrname", "base", "diff"),
vartypes = c("numeric", "character", "numeric", "logical"),
defaultvalues = list(NULL, NULL, NULL, TRUE),
required = c(TRUE, TRUE, FALSE, FALSE))
attrarg <- a$attrname
} else {
a <- check.ErgmTerm (nw, arglist, directed=FALSE, bipartite=TRUE,
varnames = c("k", "attr", "base", "diff"),
vartypes = c("numeric", ERGM_VATTR_SPEC, "numeric", "logical"),
defaultvalues = list(NULL, NULL, NULL, TRUE),
required = c(TRUE, TRUE, FALSE, FALSE))
attrarg <- a$attr
}
nb1 <- get.network.attribute(nw, "bipartite")
nodecov <- ergm_get_vattr(attrarg, nw)
attrname <- attr(nodecov, "name")
u <- sort(unique(nodecov))
nodecov <- match(nodecov,u,nomatch=length(u)+1)
if (length(a$k) > 1)
{ ergm_Init_abort("Only a single scalar k may be used with each b1starmix term") }
b1namescov <- sort(unique(nodecov[1:nb1]))
b2namescov <- sort(unique(nodecov[(1+nb1):network.size(nw)]))
b1nodecov <- match(nodecov[1:nb1],b1namescov)
b2nodecov <- match(nodecov[(1+nb1):network.size(nw)],b2namescov)
namescov <- u[c(b1namescov, b2namescov)]
nr <- length(b1namescov)
nc <- length(b2namescov)
nodecov <- c(b1nodecov, b2nodecov + nr)
if (a$diff) {
u <- cbind(rep(1:nr,nc), nr + rep(1:nc, each=nr))
if (any(NVL(a$base,0)!=0)) { u <- u[-a$base,] }
name <- "b1starmix"
coef.names <- paste("b1starmix", a$k, attrname,
apply(matrix(namescov[u],ncol=2), 1,paste,collapse="."),
sep=".")
inputs <- c(a$k, nodecov, u[,1], u[,2])
attr(inputs, "ParamsBeforeCov") <- length(a$k)
}
else {
u <- 1:nr
if (any(NVL(a$base,0)!=0)) { u <- u[-a$base] }
name <- "b1starmixhomophily"
coef.names <- paste("b1starmix", a$k, attrname, namescov[u], sep=".")
inputs <- c(a$k, nodecov, u)
attr(inputs, "ParamsBeforeCov") <- length(a$k)
}
list(name = name, coef.names = coef.names,
inputs = inputs, minval = 0)
}
InitErgmTerm.b1twostar <- function(nw, arglist, ..., version=packageVersion("ergm")) {
if(version <= as.package_version("3.9.4")){
a <- check.ErgmTerm (nw, arglist, directed=FALSE, bipartite=TRUE,
varnames = c("b1attrname", "b2attrname", "base", "b1levels", "b2levels"),
vartypes = c("character", "character", "numeric", "character,numeric,logical", "character,numeric,logical"),
defaultvalues = list(NULL, NULL, NULL, NULL, NULL),
required = c(TRUE, FALSE, FALSE, FALSE, FALSE),
dep.inform = list(FALSE, FALSE, "levels2", FALSE, FALSE))
b1attrarg <- a$b1attrname
b2attrarg <- a$b2attrname
b1levels <- if(!is.null(a$b1levels)) I(a$b1levels) else NULL
b2levels <- if(!is.null(a$b2levels)) I(a$b2levels) else NULL
}else{
a <- check.ErgmTerm (nw, arglist, directed=FALSE, bipartite=TRUE,
varnames = c("b1attr", "b2attr", "base", "b1levels", "b2levels", "levels2"),
vartypes = c(ERGM_VATTR_SPEC, ERGM_VATTR_SPEC, "numeric", ERGM_LEVELS_SPEC, ERGM_LEVELS_SPEC, ERGM_LEVELS_SPEC),
defaultvalues = list(NULL, NULL, NULL, NULL, NULL, NULL),
required = c(TRUE, FALSE, FALSE, FALSE, FALSE, FALSE),
dep.inform = list(FALSE, FALSE, "levels2", FALSE, FALSE, FALSE))
b1attrarg <- a$b1attr
b2attrarg <- a$b2attr
b1levels <- a$b1levels
b2levels <- a$b2levels
}
nb1 <- get.network.attribute(nw, "bipartite")
n <- network.size(nw)
b1nodecov <- ergm_get_vattr(b1attrarg, nw, bip = "b1")
b1attrname <- attr(b1nodecov, "name")
b1u <- ergm_attr_levels(b1levels, b1nodecov, nw, sort(unique(b1nodecov)))
if(is.null(b2attrarg)) { b2attrarg <- b1attrarg }
b2nodecov <- ergm_get_vattr(b2attrarg, nw, bip = "b2")
b2attrname <- attr(b2nodecov, "name")
b2u <- ergm_attr_levels(b2levels, b2nodecov, nw, sort(unique(b2nodecov)))
nr <- length(b1u)
nc <- length(b2u)
levels2.grid <- expand.grid(row = b1u, col = b2u, col2 = b2u, stringsAsFactors=FALSE)
indices2.grid <- expand.grid(row = 1:nr, col = 1:nc, col2 = 1:nc)
levels2.list <- transpose(levels2.grid[indices2.grid$col <= indices2.grid$col2,])
indices2.grid <- indices2.grid[indices2.grid$col <= indices2.grid$col2,]
levels2.sel <- if((!hasName(attr(a,"missing"), "levels2") || attr(a,"missing")["levels2"]) && any(a$base != 0)) levels2.list[-a$base]
else ergm_attr_levels(a$levels2, list(row = b1nodecov, col = b2nodecov, col2 = b2nodecov), nw, levels2.list)
rows2keep <- match(levels2.sel,levels2.list, NA)
rows2keep <- rows2keep[!is.na(rows2keep)]
u <- indices2.grid[rows2keep,]
b1nodecov <- match(b1nodecov,b1u,nomatch=length(b1u)+1)
b2nodecov <- match(b2nodecov,b2u,nomatch=length(b2u)+1)
coef.names <- paste("b1twostar", b1attrname, b1u[u[,1]], b2attrname,
apply(matrix(b2u[cbind(u[,2], u[,3])],ncol=2), 1, paste, collapse="."),
sep=".")
list(name = "b1twostar", coef.names = coef.names,
inputs = c(b1nodecov, b2nodecov, u[,1], u[,2], u[,3]), minval = 0)
}
InitErgmTerm.b2concurrent<-function(nw, arglist, ..., version=packageVersion("ergm")) {
if(version <= as.package_version("3.9.4")){
a <- check.ErgmTerm(nw, arglist, directed=FALSE, bipartite=TRUE,
varnames = c("by", "levels"),
vartypes = c("character", "character,numeric,logical"),
defaultvalues = list(NULL, NULL),
required = c(FALSE, FALSE))
levels <- if(!is.null(a$levels)) I(a$levels) else NULL
}else{
a <- check.ErgmTerm(nw, arglist, directed=FALSE, bipartite=TRUE,
varnames = c("by", "levels"),
vartypes = c(ERGM_VATTR_SPEC, ERGM_LEVELS_SPEC),
defaultvalues = list(NULL, NULL),
required = c(FALSE, FALSE))
levels <- a$levels
}
nb1 <- get.network.attribute(nw, "bipartite")
byarg <- a$by
if(!is.null(byarg)) {
nodecov <- ergm_get_vattr(byarg, nw, bip = "b2")
attrname <- attr(nodecov, "name")
u <- ergm_attr_levels(levels, nodecov, nw, levels = sort(unique(nodecov)))
nodecov <- match(nodecov,u,nomatch=length(u)+1)
lu <- length(u)
ui <- seq(along=u)
}
if(!is.null(byarg)) {
if(length(u)==0) {return(NULL)}
coef.names <- paste("b2concurrent",".", attrname,u, sep="")
name <- "b2concurrent_by_attr"
inputs <- c(ui, nodecov)
}else{
coef.names <- "b2concurrent"
name <- "b2concurrent"
inputs <- NULL
}
list(name=name, coef.names=coef.names, inputs=inputs, dependence=TRUE, minval = 0, maxval=network.size(nw)-nb1)
}
InitErgmTerm.b2cov<-function (nw, arglist, ..., version=packageVersion("ergm")) {
if(version <= as.package_version("3.9.4")){
a <- check.ErgmTerm(nw, arglist, directed=FALSE, bipartite=TRUE,
varnames = c("attrname","transform","transformname"),
vartypes = c("character","function","character"),
defaultvalues = list(NULL,function(x)x,""),
required = c(TRUE,FALSE,FALSE))
attrname<-a$attrname
f<-a$transform
f.name<-a$transformname
coef.names <- paste(paste("b2cov",f.name,sep=""),attrname,sep=".")
nb1 <- get.network.attribute(nw, "bipartite")
nodecov <- f(get.node.attr(nw, attrname, "b2cov", numeric=TRUE)[(nb1+1):network.size(nw)])
}else{
a <- check.ErgmTerm(nw, arglist, directed=FALSE, bipartite=TRUE,
varnames = c("attr"),
vartypes = c(ERGM_VATTR_SPEC),
defaultvalues = list(NULL),
required = c(TRUE))
nodecov <- ergm_get_vattr(a$attr, nw, accept="numeric", bip = "b2", multiple="matrix")
coef.names <- nodecov_names(nodecov, "b2cov")
}
list(name="b2cov", coef.names=coef.names, inputs=c(nodecov), dependence=FALSE)
}
InitErgmTerm.b2degrange<-function(nw, arglist, ..., version=packageVersion("ergm")) {
if(version <= as.package_version("3.9.4")){
a <- check.ErgmTerm(nw, arglist, bipartite=TRUE,
varnames = c("from", "to", "by", "homophily", "levels"),
vartypes = c("numeric", "numeric", "character", "logical", "character,numeric,logical"),
defaultvalues = list(NULL, Inf, NULL, FALSE, NULL),
required = c(TRUE, FALSE, FALSE, FALSE, FALSE))
levels <- if(!is.null(a$levels)) I(a$levels) else NULL
}else{
a <- check.ErgmTerm(nw, arglist, bipartite=TRUE,
varnames = c("from", "to", "by", "homophily", "levels"),
vartypes = c("numeric", "numeric", ERGM_VATTR_SPEC, "logical", ERGM_LEVELS_SPEC),
defaultvalues = list(NULL, Inf, NULL, FALSE, NULL),
required = c(TRUE, FALSE, FALSE, FALSE, FALSE))
levels <- a$levels
}
from<-a$from; to<-a$to; byarg <- a$by; homophily <- a$homophily
to <- ifelse(to==Inf, network.size(nw)+1, to)
if(length(to)==1 && length(from)>1) to <- rep(to, length(from))
else if(length(from)==1 && length(to)>1) from <- rep(from, length(to))
else if(length(from)!=length(to)) ergm_Init_abort("The arguments of term odegrange must have arguments either of the same length, or one of them must have length 1.")
else if(any(from>=to)) ergm_Init_abort("Term odegrange must have from<to.")
nb1 <- get.network.attribute(nw, "bipartite")
emptynwstats<-NULL
if(!is.null(byarg)) {
nodecov <- ergm_get_vattr(byarg, nw, bip = if(homophily) "n" else "b2")
attrname <- attr(nodecov, "name")
u <- ergm_attr_levels(levels, nodecov, nw, levels = sort(unique(nodecov)))
nodecov <- match(nodecov,u,nomatch=length(u)+1)
}
if(!is.null(byarg) && !homophily) {
lu <- length(u)
du <- rbind(rep(from,lu), rep(to,lu), rep(1:lu, rep(length(from), lu)))
if (any(du[1,]==0)) {
emptynwstats <- rep(0, ncol(du))
tmp <- du[3,du[1,]==0]
for(i in 1:length(tmp)) tmp[i] <- sum(nodecov==tmp[i])
emptynwstats[du[1,]==0] <- tmp
}
} else {
if (any(from==0)) {
emptynwstats <- rep(0, length(from))
emptynwstats[from==0] <- network.size(nw)
}
}
if(is.null(byarg)) {
if(length(from)==0){return(NULL)}
coef.names <- ifelse(to>=network.size(nw)+1,
paste("b2deg",from,"+",sep=""),
paste("b2deg",from,"to",to,sep=""))
name <- "b2degrange"
inputs <- c(rbind(from,to))
} else if (homophily) {
if(length(from)==0){return(NULL)}
coef.names <- ifelse(to>=network.size(nw)+1,
paste("b2deg",from,"+", ".homophily.",attrname,sep=""),
paste("b2deg",from,"to",to, ".homophily.",attrname,sep=""))
name <- "b2degrange_w_homophily"
inputs <- c(rbind(from,to), nodecov)
} else {
if(ncol(du)==0) {return(NULL)}
coef.names <- ifelse(du[2,]>=network.size(nw)+1,
paste("b2deg",du[1,],"+.", attrname, u[du[3,]],sep=""),
paste("b2deg",du[1,],"to",du[2,],".",attrname, u[du[3,]],sep=""))
name <- "b2degrange_by_attr"
inputs <- c(as.vector(du), nodecov)
}
if (!is.null(emptynwstats)){
list(name=name,coef.names=coef.names, inputs=inputs,
emptynwstats=emptynwstats, dependence=TRUE, minval = 0)
}else{
list(name=name,coef.names=coef.names, inputs=inputs, dependence=TRUE, minval = 0, maxval=network.size(nw), conflicts.constraints="b2degreedist")
}
}
InitErgmTerm.b2degree <- function(nw, arglist, ..., version=packageVersion("ergm")) {
if(version <= as.package_version("3.9.4")){
a <- check.ErgmTerm (nw, arglist, directed=FALSE, bipartite=TRUE,
varnames = c("d", "by", "levels"),
vartypes = c("numeric", "character", "character,numeric,logical"),
defaultvalues = list(NULL, NULL, NULL),
required = c(TRUE, FALSE, FALSE))
levels <- if(!is.null(a$levels)) I(a$levels) else NULL
}else{
a <- check.ErgmTerm (nw, arglist, directed=FALSE, bipartite=TRUE,
varnames = c("d", "by", "levels"),
vartypes = c("numeric", ERGM_VATTR_SPEC, ERGM_LEVELS_SPEC),
defaultvalues = list(NULL, NULL, NULL),
required = c(TRUE, FALSE, FALSE))
levels <- a$levels
}
byarg <- a$by
nb1 <- get.network.attribute(nw, "bipartite")
n <- network.size(nw)
if (!is.null(byarg)) {
nodecov <- ergm_get_vattr(byarg, nw, bip = "b2")
attrname <- attr(nodecov, "name")
u <- ergm_attr_levels(levels, nodecov, nw, levels = sort(unique(nodecov)))
nodecov <- match(nodecov,u,nomatch=length(u)+1)
lu <- length(u)
du <- rbind(rep(a$d,lu), rep(1:lu, rep(length(a$d), lu)))
emptynwstats <- rep(0, ncol(du))
if (any(du[1,]==0)) {
tmp <- du[2,du[1,]==0]
for(i in 1:length(tmp))
tmp[i] <- sum(nodecov==tmp[i])
emptynwstats[du[1,]==0] <- tmp
}
name <- "b2degree_by_attr"
coef.names <- paste("b2deg", du[1,], ".", attrname, u[du[2,]], sep="")
inputs <- c(as.vector(du), nodecov)
} else {
name <- "b2degree"
coef.names <- paste("b2deg", a$d, sep="")
inputs <- a$d
emptynwstats <- rep(0, length(a$d))
if (any(a$d==0)) {
emptynwstats[a$d==0] <- n-nb1
}
}
list(name = name, coef.names = coef.names, inputs = inputs, emptynwstats = emptynwstats, minval=0, maxval=network.size(nw), dependence=TRUE,
minval = 0, maxval=network.size(nw)-nb1, conflicts.constraints="b2degreedist")
}
InitErgmTerm.b2dsp<-function(nw, arglist, cache.sp=TRUE, ...) {
a <- check.ErgmTerm(nw, arglist, directed=FALSE, bipartite=TRUE,
varnames = c("d"),
vartypes = c("numeric"),
defaultvalues = list(NULL),
required = c(TRUE))
d <- a$d
if(length(d) == 0)
return(NULL)
emptynwstats <- rep(0, length(d))
nb2 <- network.size(nw) - get.network.attribute(nw, "bipartite")
type <- "ISP"
typecode <- 5
emptynwstats[d==0] <- nb2*(nb2-1)/2
list(name="ddspbwrap", coef.names=paste("b2dsp",d,sep=""), inputs=c(typecode, d),
emptynwstats=emptynwstats, minval = 0, maxval = nb2*(nb2-1)/2, dependence = TRUE, auxiliaries=if(cache.sp) .spcache.aux(type) else NULL)
}
InitErgmTerm.b2factor<-function (nw, arglist, ..., version=packageVersion("ergm")) {
if(version <= as.package_version("3.9.4")){
a <- check.ErgmTerm(nw, arglist, directed=FALSE, bipartite=TRUE,
varnames = c("attrname", "base", "levels"),
vartypes = c("character", "numeric", "character,numeric,logical"),
defaultvalues = list(NULL, 1, NULL),
required = c(TRUE, FALSE, FALSE),
dep.inform = list(FALSE, "levels", FALSE))
attrarg <- a$attrname
levels <- if(!is.null(a$levels)) I(a$levels) else NULL
}else{
a <- check.ErgmTerm(nw, arglist, directed=FALSE, bipartite=TRUE,
varnames = c("attr", "base", "levels"),
vartypes = c(ERGM_VATTR_SPEC, "numeric", ERGM_LEVELS_SPEC),
defaultvalues = list(NULL, 1, LEVELS_BASE1),
required = c(TRUE, FALSE, FALSE),
dep.inform = list(FALSE, "levels", FALSE))
attrarg <- a$attr
levels <- a$levels
}
nodecov <- ergm_get_vattr(attrarg, nw, bip = "b2")
attrname <- attr(nodecov, "name")
u <- ergm_attr_levels(levels, nodecov, nw, levels = sort(unique(nodecov)))
if (attr(a,"missing")["levels"] && any(NVL(a$base,0)!=0)) {
u <- u[-a$base]
}
if (length(u)==0) {
return()
}
nodepos <- match(nodecov,u,nomatch=0)-1
inputs <- nodepos
list(name="b2factor", coef.names=paste("b2factor", attrname, paste(u), sep="."), inputs=inputs, dependence=FALSE, minval=0)
}
InitErgmTerm.b2sociality<-function(nw, arglist, ...) {
a <- check.ErgmTerm(nw, arglist, directed=FALSE, bipartite=TRUE,
varnames = c("nodes"),
vartypes = c(ERGM_LEVELS_SPEC),
defaultvalues = list(-1),
required = c(FALSE))
nb1 <- get.network.attribute(nw, "bipartite")
d <- ergm_attr_levels(a$nodes, (1 + nb1):network.size(nw), nw, (1 + nb1):network.size(nw))
ld<-length(d)
if(ld==0){return(NULL)}
coef.names <- paste("b2sociality",d,sep="")
inputs <- c(d,0)
list(name="sociality", coef.names=coef.names, inputs=inputs, minval=0, maxval=nb1, conflicts.constraints="b2degrees", dependence=FALSE)
}
InitErgmTerm.b2star <- function(nw, arglist, ..., version=packageVersion("ergm")) {
if(version <= as.package_version("3.9.4")){
a <- check.ErgmTerm (nw, arglist, directed=FALSE, bipartite=TRUE,
varnames = c("k", "attrname", "levels"),
vartypes = c("numeric", "character", "character,numeric,logical"),
defaultvalues = list(NULL, NULL, NULL),
required = c(TRUE, FALSE, FALSE))
attrarg <- a$attrname
levels <- if(!is.null(a$levels)) I(a$levels) else NULL
}else{
a <- check.ErgmTerm (nw, arglist, directed=FALSE, bipartite=TRUE,
varnames = c("k", "attr", "levels"),
vartypes = c("numeric", ERGM_VATTR_SPEC, ERGM_LEVELS_SPEC),
defaultvalues = list(NULL, NULL, NULL),
required = c(TRUE, FALSE, FALSE))
attrarg <- a$attr
levels <- a$levels
}
if (!is.null(attrarg)) {
nodecov <- ergm_get_vattr(attrarg, nw)
attrname <- attr(nodecov, "name")
u <- ergm_attr_levels(levels, nodecov, nw, levels = sort(unique(nodecov)))
nodecov <- match(nodecov,u,nomatch=length(u)+1)
name <- "istar"
coef.names <- paste("b2star", a$k, ".", attrname, sep="")
inputs <- c(a$k, nodecov)
attr(inputs, "ParamsBeforeCov") <- length(a$k)
}
else {
name <- "istar"
coef.names <- paste("b2star",a$k,sep="")
inputs <- a$k
}
list(name = name, coef.names = coef.names,
inputs = inputs, minval=0)
}
InitErgmTerm.b2starmix <- function(nw, arglist, ..., version=packageVersion("ergm")) {
if(version <= as.package_version("3.9.4")){
a <- check.ErgmTerm (nw, arglist, directed=FALSE, bipartite=TRUE,
varnames = c("k", "attrname", "base", "diff"),
vartypes = c("numeric", "character", "numeric", "logical"),
defaultvalues = list(NULL, NULL, NULL, TRUE),
required = c(TRUE, TRUE, FALSE, FALSE))
attrarg <- a$attrname
} else {
a <- check.ErgmTerm (nw, arglist, directed=FALSE, bipartite=TRUE,
varnames = c("k", "attr", "base", "diff"),
vartypes = c("numeric", ERGM_VATTR_SPEC, "numeric", "logical"),
defaultvalues = list(NULL, NULL, NULL, TRUE),
required = c(TRUE, TRUE, FALSE, FALSE))
attrarg <- a$attr
}
nb1 <- get.network.attribute(nw, "bipartite")
nodecov <- ergm_get_vattr(attrarg, nw)
attrname <- attr(nodecov, "name")
u <- sort(unique(nodecov))
nodecov <- match(nodecov,u,nomatch=length(u)+1)
if (length(a$k) > 1)
{ ergm_Init_abort("Only a single scalar k may be used with each b2starmix term") }
b1namescov <- sort(unique(nodecov[1:nb1]))
b2namescov <- sort(unique(nodecov[(1+nb1):network.size(nw)]))
b1nodecov <- match(nodecov[1:nb1],b1namescov)
b2nodecov <- match(nodecov[(1+nb1):network.size(nw)],b2namescov)
namescov <- u[c(b1namescov, b2namescov)]
nr <- length(b1namescov)
nc <- length(b2namescov)
nodecov <- c(b1nodecov, b2nodecov + nr)
if (a$diff) {
u <- cbind(rep(1:nr,nc), nr + rep(1:nc, each=nr))
if (any(NVL(a$base,0)!=0)) { u <- u[-a$base,] }
name <- "b2starmix"
coef.names <- paste("b2starmix", a$k, attrname,
apply(matrix(namescov[u[,2:1]],ncol=2), 1,paste,collapse="."),
sep=".")
inputs <- c(a$k, nodecov, u[,1], u[,2])
attr(inputs, "ParamsBeforeCov") <- length(a$k)
}
else {
u <- nr+(1:nc)
if (any(NVL(a$base,0)!=0)) { u <- u[-a$base] }
name <- "b2starmixhomophily"
coef.names <- paste("b2starmix", a$k, attrname, namescov[u], sep=".")
inputs <- c(a$k, nodecov, u)
attr(inputs, "ParamsBeforeCov") <- length(a$k)
}
list(name = name, coef.names = coef.names,
inputs = inputs, minval=0)
}
InitErgmTerm.b2twostar <- function(nw, arglist, ..., version=packageVersion("ergm")) {
if(version <= as.package_version("3.9.4")){
a <- check.ErgmTerm (nw, arglist, directed=FALSE, bipartite=TRUE,
varnames = c("b1attrname", "b2attrname", "base", "b1levels", "b2levels"),
vartypes = c("character", "character", "numeric", "character,numeric,logical", "character,numeric,logical"),
defaultvalues = list(NULL, NULL, NULL, NULL, NULL),
required = c(TRUE, FALSE, FALSE, FALSE, FALSE),
dep.inform = list(FALSE, FALSE, "levels2", FALSE, FALSE))
b1attrarg <- a$b1attrname
b2attrarg <- a$b2attrname
b1levels <- if(!is.null(a$b1levels)) I(a$b1levels) else NULL
b2levels <- if(!is.null(a$b2levels)) I(a$b2levels) else NULL
}else{
a <- check.ErgmTerm (nw, arglist, directed=FALSE, bipartite=TRUE,
varnames = c("b1attr", "b2attr", "base", "b1levels", "b2levels", "levels2"),
vartypes = c(ERGM_VATTR_SPEC, ERGM_VATTR_SPEC, "numeric", ERGM_LEVELS_SPEC, ERGM_LEVELS_SPEC, ERGM_LEVELS_SPEC),
defaultvalues = list(NULL, NULL, NULL, NULL, NULL, NULL),
required = c(TRUE, FALSE, FALSE, FALSE, FALSE, FALSE),
dep.inform = list(FALSE, FALSE, "levels2", FALSE, FALSE, FALSE))
b1attrarg <- a$b1attr
b2attrarg <- a$b2attr
b1levels <- a$b1levels
b2levels <- a$b2levels
}
nb1 <- get.network.attribute(nw, "bipartite")
n <- network.size(nw)
b1nodecov <- ergm_get_vattr(b1attrarg, nw, bip = "b1")
b1attrname <- attr(b1nodecov, "name")
b1u <- ergm_attr_levels(b1levels, b1nodecov, nw, sort(unique(b1nodecov)))
if(is.null(b2attrarg)) { b2attrarg <- b1attrarg }
b2nodecov <- ergm_get_vattr(b2attrarg, nw, bip = "b2")
b2attrname <- attr(b2nodecov, "name")
b2u <- ergm_attr_levels(b2levels, b2nodecov, nw, sort(unique(b2nodecov)))
nr <- length(b1u)
nc <- length(b2u)
levels2.grid <- expand.grid(row = b2u, col = b1u, col2 = b1u, stringsAsFactors=FALSE)
indices2.grid <- expand.grid(row = 1:nc, col = 1:nr, col2 = 1:nr)
levels2.list <- transpose(levels2.grid[indices2.grid$col <= indices2.grid$col2,])
indices2.grid <- indices2.grid[indices2.grid$col <= indices2.grid$col2,]
levels2.sel <- if((!hasName(attr(a,"missing"), "levels2") || attr(a,"missing")["levels2"]) && any(NVL(a$base,0)!=0)) levels2.list[-a$base]
else ergm_attr_levels(a$levels2, list(row = b2nodecov, col = b1nodecov, col2 = b1nodecov), nw, levels2.list)
rows2keep <- match(levels2.sel,levels2.list, NA)
rows2keep <- rows2keep[!is.na(rows2keep)]
u <- indices2.grid[rows2keep,]
b1nodecov <- match(b1nodecov,b1u,nomatch=length(b1u)+1)
b2nodecov <- match(b2nodecov,b2u,nomatch=length(b2u)+1)
coef.names <- paste("b2twostar", b2attrname, b2u[u[,1]], b1attrname,
apply(matrix(b1u[cbind(u[,2], u[,3])],ncol=2), 1, paste, collapse="."),
sep=".")
list(name = "b2twostar", coef.names = coef.names,
inputs = c(b1nodecov, b2nodecov, u[,1], u[,2], u[,3]), minval=0)
}
InitErgmTerm.balance<-function (nw, arglist, ...) {
a <- check.ErgmTerm(nw, arglist)
list(name="balance", coef.names="balance", dependence=TRUE, minval=0)
}
InitErgmTerm.concurrent<-function(nw, arglist, ..., version=packageVersion("ergm")) {
if(version <= as.package_version("3.9.4")){
a <- check.ErgmTerm(nw, arglist, directed=FALSE,
varnames = c("by", "levels"),
vartypes = c("character", "character,numeric,logical"),
defaultvalues = list(NULL, NULL),
required = c(FALSE, FALSE))
levels <- if(!is.null(a$levels)) I(a$levels) else NULL
}else{
a <- check.ErgmTerm(nw, arglist, directed=FALSE,
varnames = c("by", "levels"),
vartypes = c(ERGM_VATTR_SPEC, ERGM_LEVELS_SPEC),
defaultvalues = list(NULL, NULL),
required = c(FALSE, FALSE))
levels <- a$levels
}
byarg <- a$by
if(!is.null(byarg)) {
nodecov <- ergm_get_vattr(byarg, nw)
attrname <- attr(nodecov, "name")
u <- ergm_attr_levels(levels, nodecov, nw, levels = sort(unique(nodecov)))
nodecov <- match(nodecov,u,nomatch=length(u)+1)
lu <- length(u)
ui <- seq(along=u)
}
if(!is.null(byarg)) {
if(length(u)==0) {return(NULL)}
coef.names <- paste("concurrent",".", attrname,u, sep="")
name <- "concurrent_by_attr"
inputs <- c(ui, nodecov)
}else{
coef.names <- "concurrent"
name <- "concurrent"
inputs <- NULL
}
list(name=name, coef.names=coef.names, inputs=inputs, dependence=TRUE, minval = 0, maxval=network.size(nw))
}
InitErgmTerm.ctriple<-InitErgmTerm.ctriad<-function (nw, arglist, ..., version=packageVersion("ergm")) {
if(version <= as.package_version("3.9.4")){
a <- check.ErgmTerm(nw, arglist, directed=TRUE,
varnames = c("attrname","diff", "levels"),
vartypes = c("character","logical", "character,numeric,logical"),
defaultvalues = list(NULL,FALSE,NULL),
required = c(FALSE,FALSE,FALSE))
attrarg <- a$attrname
levels <- if(!is.null(a$levels)) I(a$levels) else NULL
}else{
a <- check.ErgmTerm(nw, arglist, directed=TRUE,
varnames = c("attr","diff", "levels"),
vartypes = c(ERGM_VATTR_SPEC, "logical", ERGM_LEVELS_SPEC),
defaultvalues = list(NULL,FALSE,NULL),
required = c(FALSE,FALSE,FALSE))
attrarg <- a$attr
levels <- a$levels
}
diff <- a$diff;
if(!is.null(attrarg)){
nodecov <- ergm_get_vattr(attrarg, nw)
attrname <- attr(nodecov, "name")
u <- ergm_attr_levels(levels, nodecov, nw, levels = sort(unique(nodecov)))
nodecov <- match(nodecov,u,nomatch=length(u)+1)
ui <- seq(along=u)
if (!diff) {
coef.names <- paste("ctriple",attrname,sep=".")
inputs <- c(nodecov)
} else {
coef.names <- paste("ctriple", attrname, u, sep=".")
inputs <- c(ui, nodecov)
attr(inputs, "ParamsBeforeCov") <- length(ui)
}
}else{
coef.names <- "ctriple"
inputs <- NULL
}
list(name="ctriple", coef.names=coef.names, inputs=inputs, minval = 0)
}
InitErgmTerm.cycle <- function(nw, arglist, ...) {
a <- check.ErgmTerm(nw, arglist,
varnames = c("k","semi"),
vartypes = c("numeric","logical"),
defaultvalues = list(NULL,FALSE),
required = c(TRUE,FALSE))
if(any(a$k > network.size(nw))) {
ergm_Init_warn("cycles of length greater than the network size cannot exist and their statistics will be omitted")
a$k <- a$k[a$k <= network.size(nw)]
}
if(!is.directed(nw) && any(a$k < 3)) {
ergm_Init_warn("cycles of length less than 3 cannot exist in an undirected network and their statistics will be omitted")
a$k <- a$k[a$k >= 3]
}
if(any(a$k < 2)) {
ergm_Init_warn("cycles of length less than 2 cannot exist and their statistics will be omitted")
a$k <- a$k[a$k >= 2]
}
if(is.directed(nw) && a$semi && any(a$k == 2)) {
ergm_Init_warn("semicycles of length 2 are not currently supported and their statistics will be omitted")
a$k <- a$k[a$k >= 3]
}
if (length(a$k)==0) return(NULL)
semi<-is.directed(nw)&&a$semi
if(semi)
basenam<-"semicycle"
else
basenam<-"cycle"
list(name="cycle",
coef.names = paste(basenam, a$k, sep=""),
inputs = c(a$semi, max(a$k), (2:max(a$k)) %in% a$k),
minval = 0)
}
InitErgmTerm.degcor<-function (nw, arglist, ...) {
a <- check.ErgmTerm(nw, arglist, directed=FALSE)
deg=summary(nw ~ sociality(nodes=TRUE))
el=as.edgelist(nw)
deg1<-deg[el[,1]]
deg2<-deg[el[,2]]
alldeg<-c(deg1,deg2)
sigma2<-(sum(alldeg*alldeg)-length(alldeg)*(mean(alldeg)^2))
list(name="degcor",
coef.names = "degcor",
inputs=sigma2,
dependence = TRUE
)
}
InitErgmTerm.degcrossprod<-function (nw, arglist, ...) {
a <- check.ErgmTerm(nw, arglist, directed=FALSE)
list(name="degcrossprod",
coef.names = "degcrossprod",
inputs=2*summary(nw ~ edges),
dependence = TRUE
)
}
InitErgmTerm.degrange<-function(nw, arglist, ..., version=packageVersion("ergm")) {
if(version <= as.package_version("3.9.4")){
a <- check.ErgmTerm(nw, arglist, directed=FALSE,
varnames = c("from", "to", "by", "homophily", "levels"),
vartypes = c("numeric", "numeric", "character", "logical", "character,numeric,logical"),
defaultvalues = list(NULL, Inf, NULL, FALSE, NULL),
required = c(TRUE, FALSE, FALSE, FALSE, FALSE))
levels <- if(!is.null(a$levels)) I(a$levels) else NULL
}else{
a <- check.ErgmTerm(nw, arglist, directed=FALSE,
varnames = c("from", "to", "by", "homophily", "levels"),
vartypes = c("numeric", "numeric", ERGM_VATTR_SPEC, "logical", ERGM_LEVELS_SPEC),
defaultvalues = list(NULL, Inf, NULL, FALSE, NULL),
required = c(TRUE, FALSE, FALSE, FALSE, FALSE))
levels <- a$levels
}
from<-a$from; to<-a$to; byarg <- a$by; homophily <- a$homophily
to <- ifelse(to==Inf, network.size(nw)+1, to)
if(length(to)==1 && length(from)>1) to <- rep(to, length(from))
else if(length(from)==1 && length(to)>1) from <- rep(from, length(to))
else if(length(from)!=length(to)) ergm_Init_abort("The arguments of term degrange must have arguments either of the same length, or one of them must have length 1.")
else if(any(from>=to)) ergm_Init_abort("Term degrange must have from<to.")
emptynwstats<-NULL
if(!is.null(byarg)) {
nodecov <- ergm_get_vattr(byarg, nw)
attrname <- attr(nodecov, "name")
u <- ergm_attr_levels(levels, nodecov, nw, levels = sort(unique(nodecov)))
nodecov <- match(nodecov,u,nomatch=length(u)+1)
}
if(!is.null(byarg) && !homophily) {
lu <- length(u)
du <- rbind(rep(from,lu), rep(to,lu), rep(1:lu, rep(length(from), lu)))
if (any(du[1,]==0)) {
emptynwstats <- rep(0, ncol(du))
tmp <- du[3,du[1,]==0]
for(i in 1:length(tmp)) tmp[i] <- sum(nodecov==tmp[i])
emptynwstats[du[1,]==0] <- tmp
}
} else {
if (any(from==0)) {
emptynwstats <- rep(0, length(from))
emptynwstats[from==0] <- network.size(nw)
}
}
if(is.null(byarg)) {
if(length(from)==0){return(NULL)}
coef.names <- ifelse(to>=network.size(nw)+1,
paste("deg",from,"+",sep=""),
paste("deg",from,"to",to,sep=""))
name <- "degrange"
inputs <- c(rbind(from,to))
} else if (homophily) {
if(length(from)==0){return(NULL)}
coef.names <- ifelse(to>=network.size(nw)+1,
paste("deg",from,"+", ".homophily.",attrname,sep=""),
paste("deg",from,"to",to, ".homophily.",attrname,sep=""))
name <- "degrange_w_homophily"
inputs <- c(rbind(from,to), nodecov)
} else {
if(ncol(du)==0) {return(NULL)}
coef.names <- ifelse(du[2,]>=network.size(nw)+1,
paste("deg",du[1,],"+.", attrname, u[du[3,]],sep=""),
paste("deg",du[1,],"to",du[2,],".",attrname, u[du[3,]],sep=""))
name <- "degrange_by_attr"
inputs <- c(as.vector(du), nodecov)
}
if (!is.null(emptynwstats)){
list(name=name,coef.names=coef.names, inputs=inputs,
emptynwstats=emptynwstats, dependence=TRUE, minval = 0)
}else{
list(name=name,coef.names=coef.names, inputs=inputs, dependence=TRUE, minval = 0, maxval=network.size(nw), conflicts.constraints="degreedist")
}
}
InitErgmTerm.degree<-function(nw, arglist, ..., version=packageVersion("ergm")) {
if(version <= as.package_version("3.9.4")){
a <- check.ErgmTerm(nw, arglist, directed=FALSE,
varnames = c("d", "by", "homophily", "levels"),
vartypes = c("numeric", "character", "logical", "character,numeric,logical"),
defaultvalues = list(NULL, NULL, FALSE, NULL),
required = c(TRUE, FALSE, FALSE, FALSE))
levels <- if(!is.null(a$levels)) I(a$levels) else NULL
}else{
a <- check.ErgmTerm(nw, arglist, directed=FALSE,
varnames = c("d", "by", "homophily", "levels"),
vartypes = c("numeric", ERGM_VATTR_SPEC, "logical", ERGM_LEVELS_SPEC),
defaultvalues = list(NULL, NULL, FALSE, NULL),
required = c(TRUE, FALSE, FALSE, FALSE))
levels <- a$levels
}
d<-a$d; byarg <- a$by; homophily <- a$homophily
emptynwstats<-NULL
if(!is.null(byarg)) {
nodecov <- ergm_get_vattr(byarg, nw)
attrname <- attr(nodecov, "name")
u <- ergm_attr_levels(levels, nodecov, nw, levels = sort(unique(nodecov)))
nodecov <- match(nodecov,u,nomatch=length(u)+1)
}
if(!is.null(byarg) && !homophily) {
lu <- length(u)
du <- rbind(rep(d,lu), rep(1:lu, rep(length(d), lu)))
if (any(du[1,]==0)) {
emptynwstats <- rep(0, ncol(du))
tmp <- du[2,du[1,]==0]
for(i in 1:length(tmp)) tmp[i] <- sum(nodecov==tmp[i])
emptynwstats[du[1,]==0] <- tmp
}
} else {
if (any(d==0)) {
emptynwstats <- rep(0, length(d))
emptynwstats[d==0] <- network.size(nw)
}
}
if(is.null(byarg)) {
if(length(d)==0){return(NULL)}
coef.names <- paste("degree",d,sep="")
name <- "degree"
inputs <- c(d)
} else if (homophily) {
if(length(d)==0){return(NULL)}
coef.names <- paste("deg", d, ".homophily.",attrname, sep="")
name <- "degree_w_homophily"
inputs <- c(d, nodecov)
} else {
if(ncol(du)==0) {return(NULL)}
coef.names <- paste("deg", du[1,], ".", attrname,u[du[2,]], sep="")
name <- "degree_by_attr"
inputs <- c(as.vector(du), nodecov)
}
list(name = name, coef.names = coef.names, inputs = inputs, emptynwstats = emptynwstats, minval=0, maxval=network.size(nw), dependence=TRUE,
minval = 0, maxval=network.size(nw), conflicts.constraints="degreedist")
}
InitErgmTerm.degree1.5<-function (nw, arglist, ...) {
a <- check.ErgmTerm(nw, arglist, directed=FALSE,
varnames = NULL,
vartypes = NULL,
defaultvalues = list(),
required = NULL)
list(name="degreepopularity", coef.names="degree1.5",
minval=0, maxval=network.dyadcount(nw,FALSE)*sqrt(network.size(nw)-1), conflicts.constraints="degreedist")
}
InitErgmTerm.degreepopularity<-function (nw, arglist, ...) {
.Deprecated("degree1.5")
a <- check.ErgmTerm(nw, arglist, directed=FALSE,
varnames = NULL,
vartypes = NULL,
defaultvalues = list(),
required = NULL)
list(name="degreepopularity", coef.names="degreepopularity",
minval=0, maxval=network.dyadcount(nw,FALSE)*sqrt(network.size(nw)-1), conflicts.constraints="degreedist")
}
InitErgmTerm.density<-function(nw, arglist, ...) {
a <- check.ErgmTerm(nw, arglist,
varnames = NULL,
vartypes = NULL,
defaultvalues = list(),
required = NULL)
list(name="density", coef.names="density", dependence=FALSE, minval = 0, maxval = 1, conflicts.constraints="edges")
}
InitErgmTerm.diff <- function(nw, arglist, ..., version=packageVersion("ergm")) {
if(version <= as.package_version("3.9.4")){
a <- check.ErgmTerm(nw, arglist, directed=NULL, bipartite=NULL,
varnames = c("attrname","pow", "dir", "sign.action"),
vartypes = c("character","numeric", "character", "character"),
defaultvalues = list(NULL,1, "t-h", "identity"),
required = c(TRUE, FALSE, FALSE, FALSE))
attrarg <- a$attrname
}else{
a <- check.ErgmTerm(nw, arglist, directed=NULL, bipartite=NULL,
varnames = c("attr","pow", "dir", "sign.action"),
vartypes = c(ERGM_VATTR_SPEC,"numeric", "character", "character"),
defaultvalues = list(NULL,1, "t-h", "identity"),
required = c(TRUE, FALSE, FALSE, FALSE))
attrarg <- a$attr
}
nodecov <- ergm_get_vattr(attrarg, nw, accept="numeric")
attrname <- attr(nodecov, "name")
DIRS <- c("t-h", "tail-head", "b1-b2",
"h-t", "head-tail", "b2-b1")
dir <- match.arg(tolower(a$dir), DIRS)
dir.mul <- if(match(dir, DIRS)<=3) +1 else -1
SIGN.ACTIONS <- c("identity", "abs", "posonly", "negonly")
sign.action <- match.arg(tolower(a$sign.action), SIGN.ACTIONS)
sign.code <- match(sign.action, SIGN.ACTIONS)
if(sign.action!="abs" && !is.directed(nw) && !is.bipartite(nw)) ergm_Init_inform("Note that behavior of term diff() on unipartite, undirected networks may be unexpected. See help(\"ergm-terms\") for more information.")
if(sign.code %in% c(1, 4) && a$pow!=round(a$pow)) ergm_Init_abort("In term diff(attr, pow, sign=",a$sign,"), pow must be an integer.")
list(name="diff",
coef.names = paste0("diff", if(a$pow!=1) a$pow else "", if(sign.action!="identity") paste0(".", sign.action), if(sign.action!="abs") paste0(".", dir), ".", attrname),
inputs = c(a$pow, dir.mul, sign.code, nodecov),
dependence = FALSE
)
}
InitErgmTerm.dsp<-function(nw, arglist, cache.sp=TRUE, ...) {
a <- check.ErgmTerm(nw, arglist,
varnames = c("d"),
vartypes = c("numeric"),
defaultvalues = list(NULL),
required = c(TRUE))
d <- a$d
if (any(d==0)) {
emptynwstats <- rep(0, length(d))
if(is.bipartite(nw)){
nb1 <- get.network.attribute(nw, "bipartite")
nb2 <- network.size(nw) - nb1
emptynwstats[d==0] <- nb1*(nb1-1)/2 + nb2*(nb2-1)/2
}else{
emptynwstats[d==0] <- network.dyadcount(nw,FALSE)
}
}else{
emptynwstats <- NULL
}
ld<-length(d)
if(ld==0){return(NULL)}
if(is.directed(nw)){dname <- "tdsp"}else{dname <- "dsp"}
if (!is.null(emptynwstats)){
list(name=dname, coef.names=paste("dsp",d,sep=""),
inputs=c(d), emptynwstats=emptynwstats, minval = 0, auxiliaries=if(cache.sp) .spcache.aux(if(is.directed(nw)) "OTP" else "UTP") else NULL)
}else{
list(name=dname, coef.names=paste("dsp",d,sep=""),inputs=c(d), minval = 0, auxiliaries=if(cache.sp) .spcache.aux(if(is.directed(nw)) "OTP" else "UTP") else NULL)
}
}
InitErgmTerm.dyadcov<-function (nw, arglist, ...) {
a <- check.ErgmTerm(nw, arglist,
varnames = c("x","attrname"),
vartypes = c("matrix,network,character","character"),
defaultvalues = list(NULL,NULL),
required = c(TRUE,FALSE))
if(is.network(a$x))
xm<-as.matrix.network(a$x,matrix.type="adjacency",a$attrname)
else if(is.character(a$x)){
xm<-get.network.attribute(nw,a$x)
if (is.null(xm)){
ergm_Init_abort("There is no network attribute named ",a$x)
}
}
else
xm<-as.matrix(a$x)
if(!is.null(a$attrname))
cn<-paste("dyadcov", as.character(sys.call(0)[[3]][2]),
as.character(a$attrname), sep = ".")
else
cn<-paste("dyadcov", as.character(sys.call(0)[[3]][2]), sep = ".")
if(is.directed(nw)){
if (any(xm[upper.tri(xm)]!=t(xm)[upper.tri(xm)])){
xm[lower.tri(xm)]<-t(xm)[lower.tri(xm)]
ergm_Init_warn("asymmetric covariate in dyadcov; using upper triangle only")
}
coef.names <- paste(cn, c("mutual","utri","ltri"),sep=".")
}else{
coef.names <- cn
}
inputs = c(NCOL(xm), as.double(xm))
attr(inputs, "ParamsBeforeCov") <- 1
list(name = "dyadcov", coef.names=coef.names, inputs=inputs, dependence=FALSE)
}
InitErgmTerm.edgecov <- function(nw, arglist, ...) {
a <- check.ErgmTerm(nw, arglist,
varnames = c("x", "attrname"),
vartypes = c("matrix,network,character", "character"),
defaultvalues = list(NULL, NULL),
required = c(TRUE, FALSE))
if(is.network(a$x))
xm<-as.matrix.network(a$x,matrix.type="adjacency",a$attrname)
else if(is.character(a$x)){
xm<-get.network.attribute(nw,a$x)
if (is.null(xm)){
ergm_Init_abort("There is no network attribute named ",a$x)
}
}
else
xm<-as.matrix(a$x)
if(!is.null(a$attrname)) {
cn<-paste("edgecov", as.character(a$attrname), sep = ".")
} else {
cn<-paste("edgecov", as.character(sys.call(0)[[3]][2]), sep = ".")
}
inputs <- c(NCOL(xm), as.double(xm))
attr(inputs, "ParamsBeforeCov") <- 1
list(name="edgecov", coef.names = cn, inputs = inputs, dependence=FALSE,
minval = sum(c(xm)[c(xm)<0]),
maxval = sum(c(xm)[c(xm)>0])
)
}
InitErgmTerm.edges<-function(nw, arglist, ...) {
a <- check.ErgmTerm(nw, arglist,
varnames = NULL,
vartypes = NULL,
defaultvalues = list(),
required = NULL)
list(name="edges", coef.names="edges", dependence=FALSE,
minval = 0, maxval = network.dyadcount(nw,FALSE), conflicts.constraints="edges")
}
InitErgmTerm.esp<-function(nw, arglist, cache.sp=TRUE, ...) {
a <- check.ErgmTerm(nw, arglist,
varnames = c("d"),
vartypes = c("numeric"),
defaultvalues = list(NULL),
required = c(TRUE))
d<-a$d
ld<-length(d)
if(ld==0){return(NULL)}
if(is.directed(nw)){dname <- "tesp"}else{dname <- "esp"}
list(name=dname, coef.names=paste("esp",d,sep=""), inputs=c(d), minval=0, auxiliaries=if(cache.sp) .spcache.aux(if(is.directed(nw)) "OTP" else "UTP") else NULL)
}
InitErgmTerm.gwb1degree<-function(nw, arglist, gw.cutoff=30, ..., version=packageVersion("ergm")) {
if(version <= as.package_version("3.9.4")){
a <- check.ErgmTerm(nw, arglist, directed=FALSE, bipartite=TRUE,
varnames = c("decay", "fixed", "attrname","cutoff", "levels"),
vartypes = c("numeric", "logical", "character","numeric", "character,numeric,logical"),
defaultvalues = list(NULL, FALSE, NULL, gw.cutoff, NULL),
required = c(FALSE, FALSE, FALSE, FALSE, FALSE))
attrarg <- a$attrname
levels <- if(!is.null(a$levels)) I(a$levels) else NULL
}else{
a <- check.ErgmTerm(nw, arglist, directed=FALSE, bipartite=TRUE,
varnames = c("decay", "fixed", "attr","cutoff", "levels"),
vartypes = c("numeric", "logical", ERGM_VATTR_SPEC,"numeric", ERGM_LEVELS_SPEC),
defaultvalues = list(NULL, FALSE, NULL, gw.cutoff, NULL),
required = c(FALSE, FALSE, FALSE, FALSE, FALSE))
attrarg <- a$attr
levels <- a$levels
}
decay<-a$decay; fixed<-a$fixed
cutoff<-a$cutoff
nb1 <- get.network.attribute(nw,"bipartite")
maxesp <- min(cutoff, network.size(nw)-nb1)
d <- 1:maxesp
if (!is.null(attrarg) && !fixed) {
ergm_Init_abort("The gwb1degree term cannot yet handle a nonfixed decay ",
"term with an attribute. Use fixed=TRUE.")
}
if(!fixed){
if(!is.null(a$decay)) warning("In term 'gwb1degree': decay parameter 'decay' passed with 'fixed=FALSE'. 'decay' will be ignored. To specify an initial value for 'decay', use the 'init' control parameter.", call.=FALSE)
ld<-length(d)
if(ld==0){return(NULL)}
c(list(minval=0, maxval=network.size(nw), dependence=TRUE, name="b1degree", coef.names=paste("gwb1degree
conflicts.constraints="b1degreedist", params=list(gwb1degree=NULL,gwb1degree.decay=decay)), GWDECAY)
} else {
if(is.null(a$decay)) stop("Term 'gwb1degree' with 'fixed=TRUE' requires a decay parameter 'decay'.", call.=FALSE)
if(!is.null(attrarg)) {
nodecov <- ergm_get_vattr(attrarg, nw, bip="b1")
attrname <- attr(nodecov, "name")
u <- ergm_attr_levels(levels, nodecov, nw, levels = sort(unique(nodecov)))
nodecov <- match(nodecov,u,nomatch=length(u)+1)
lu <- length(u)
du <- rbind(rep(d,lu), rep(1:lu, rep(length(d), lu)))
if(nrow(du)==0) {return(NULL)}
name <- "gwb1degree_by_attr"
coef.names <- paste("gwb1deg", decay, ".",attrname, u, sep="")
inputs <- c(decay, nodecov)
}else{
name <- "gwb1degree"
coef.names <- paste("gwb1deg.fixed.",decay,sep="")
inputs <- c(decay)
}
list(minval=0, maxval=network.size(nw), dependence=TRUE, name=name, coef.names=coef.names, inputs=inputs, conflicts.constraints="b1degreedist")
}
}
InitErgmTerm.gwb1dsp<-function(nw, arglist, cache.sp=TRUE, gw.cutoff=30, ...) {
a <- check.ErgmTerm(nw, arglist, directed=FALSE, bipartite=TRUE,
varnames = c("decay","fixed","cutoff"),
vartypes = c("numeric","logical","numeric"),
defaultvalues = list(NULL, FALSE, gw.cutoff),
required = c(FALSE, FALSE, FALSE))
decay<-a$decay
fixed<-a$fixed
cutoff<-a$cutoff
decay=decay[1]
type <- "OSP"
typecode <- 4
basenam <- "gwb1dsp"
maxdsp <- min(cutoff, network.size(nw) - nw %n% "bipartite")
if(!fixed){
d <- 1:maxdsp
if(length(d) == 0)
return(NULL)
params<-list(gwb1dsp=NULL,gwb1dsp.decay=decay)
c(list(name="ddspbwrap", coef.names=paste("b1dsp
inputs=c(if(!cache.sp) -1,typecode,d), params=params, auxiliaries=if(cache.sp) .spcache.aux(type) else NULL), GWDECAY)
}else{
coef.names <- paste("gwb1dsp.fixed",decay,sep=".")
list(name="dgwdspbwrap", coef.names=coef.names, inputs=c(if(!cache.sp) -1,decay,typecode,maxdsp), auxiliaries=if(cache.sp) .spcache.aux(type) else NULL)
}
}
InitErgmTerm.gwb2degree<-function(nw, arglist, cache.sp=TRUE, gw.cutoff=30, ..., version=packageVersion("ergm")) {
if(version <= as.package_version("3.9.4")){
a <- check.ErgmTerm(nw, arglist, directed=FALSE, bipartite=TRUE,
varnames = c("decay", "fixed", "attrname","cutoff", "levels"),
vartypes = c("numeric", "logical", "character", "numeric", "character,numeric,logical"),
defaultvalues = list(NULL, FALSE, NULL, gw.cutoff, NULL),
required = c(FALSE, FALSE, FALSE, FALSE, FALSE))
attrarg <- a$attrname
levels <- if(!is.null(a$levels)) I(a$levels) else NULL
}else{
a <- check.ErgmTerm(nw, arglist, directed=FALSE, bipartite=TRUE,
varnames = c("decay", "fixed", "attr","cutoff", "levels"),
vartypes = c("numeric", "logical", ERGM_VATTR_SPEC,"numeric", ERGM_LEVELS_SPEC),
defaultvalues = list(NULL, FALSE, NULL, gw.cutoff, NULL),
required = c(FALSE, FALSE, FALSE, FALSE, FALSE))
attrarg <- a$attr
levels <- a$levels
}
decay<-a$decay; fixed<-a$fixed
cutoff<-a$cutoff
nb1 <- get.network.attribute(nw,"bipartite")
maxesp <- min(cutoff,nb1)
d <- 1:maxesp
if (!is.null(attrarg) && !fixed) {
ergm_Init_abort("The gwb2degree term cannot yet handle a nonfixed decay ",
"term with an attribute. Use fixed=TRUE.") }
if(!fixed){
if(!is.null(a$decay)) warning("In term 'gwb2degree': decay parameter 'decay' passed with 'fixed=FALSE'. 'decay' will be ignored. To specify an initial value for 'decay', use the 'init' control parameter.", call.=FALSE)
ld<-length(d)
if(ld==0){return(NULL)}
c(list(minval=0, maxval=network.size(nw), dependence=TRUE, name="b2degree", coef.names=paste("gwb2degree
conflicts.constraints="b2degreedist", params=list(gwb2degree=NULL,gwb2degree.decay=decay)), GWDECAY)
} else {
if(is.null(a$decay)) stop("Term 'gwb2degree' with 'fixed=TRUE' requires a decay parameter 'decay'.", call.=FALSE)
if(!is.null(attrarg)) {
nodecov <- ergm_get_vattr(attrarg, nw, bip="b2")
attrname <- attr(nodecov, "name")
u <- ergm_attr_levels(levels, nodecov, nw, levels = sort(unique(nodecov)))
nodecov <- match(nodecov,u,nomatch=length(u)+1)
lu <- length(u)
du <- rbind(rep(d,lu), rep(1:lu, rep(length(d), lu)))
if(nrow(du)==0) {return(NULL)}
name <- "gwb2degree_by_attr"
coef.names <- paste("gwb2deg", decay, ".", attrname, u, sep="")
inputs <- c(decay, nodecov)
}else{
name <- "gwb2degree"
coef.names <- paste("gwb2deg.fixed.",decay,sep="")
inputs <- c(decay)
}
list(minval=0, maxval=network.size(nw), dependence=TRUE, name=name, coef.names=coef.names, inputs=inputs, conflicts.constraints="b2degreedist")
}
}
InitErgmTerm.gwb2dsp<-function(nw, arglist, cache.sp=TRUE, gw.cutoff=30, ...) {
a <- check.ErgmTerm(nw, arglist, directed=FALSE, bipartite=TRUE,
varnames = c("decay","fixed","cutoff"),
vartypes = c("numeric","logical","numeric"),
defaultvalues = list(NULL, FALSE, gw.cutoff),
required = c(FALSE, FALSE, FALSE))
decay<-a$decay
fixed<-a$fixed
cutoff<-a$cutoff
decay=decay[1]
type <- "ISP"
typecode <- 5
basenam <- "gwb2dsp"
maxdsp <- min(cutoff, nw %n% "bipartite")
if(!fixed){
d <- 1:maxdsp
if(length(d) == 0)
return(NULL)
params<-list(gwb2dsp=NULL,gwb2dsp.decay=decay)
c(list(name="ddspbwrap", coef.names=paste("b2dsp
inputs=c(if(!cache.sp) -1,typecode,d), params=params, auxiliaries=if(cache.sp) .spcache.aux(type) else NULL), GWDECAY)
}else{
coef.names <- paste("gwb2dsp.fixed",decay,sep=".")
list(name="dgwdspbwrap", coef.names=coef.names, inputs=c(if(!cache.sp) -1,decay,typecode,maxdsp), auxiliaries=if(cache.sp) .spcache.aux(type) else NULL)
}
}
InitErgmTerm.gwdegree<-function(nw, arglist, gw.cutoff=30, ..., version=packageVersion("ergm")) {
if(version <= as.package_version("3.9.4")){
a <- check.ErgmTerm(nw, arglist, directed=FALSE,
varnames = c("decay", "fixed", "attrname","cutoff", "levels"),
vartypes = c("numeric", "logical", "character", "numeric", "character,numeric,logical"),
defaultvalues = list(NULL, FALSE, NULL, gw.cutoff, NULL),
required = c(FALSE, FALSE, FALSE, FALSE, FALSE))
attrarg <- a$attrname
levels <- if(!is.null(a$levels)) I(a$levels) else NULL
}else{
a <- check.ErgmTerm(nw, arglist, directed=FALSE,
varnames = c("decay", "fixed", "attr","cutoff", "levels"),
vartypes = c("numeric", "logical", ERGM_VATTR_SPEC, "numeric", ERGM_LEVELS_SPEC),
defaultvalues = list(NULL, FALSE, NULL, gw.cutoff, NULL),
required = c(FALSE, FALSE, FALSE, FALSE, FALSE))
attrarg <- a$attr
levels <- a$levels
}
decay<-a$decay; fixed<-a$fixed
cutoff<-a$cutoff
maxesp <- min(cutoff,network.size(nw)-1)
d <- 1:maxesp
if (!is.null(attrarg) && !fixed) {
ergm_Init_abort("The gwdegree term cannot yet handle a nonfixed decay ",
"term with an attribute. Use fixed=TRUE.")
}
if(!fixed){
if(!is.null(a$decay)) warning("In term 'gwdegree': decay parameter 'decay' passed with 'fixed=FALSE'. 'decay' will be ignored. To specify an initial value for 'decay', use the 'init' control parameter.", call.=FALSE)
ld<-length(d)
if(ld==0){return(NULL)}
c(list(minval=0, maxval=network.size(nw), dependence=TRUE, name="degree", coef.names=paste("gwdegree
conflicts.constraints="degreedist", params=list(gwdegree=NULL,gwdegree.decay=decay)), GWDECAY)
} else {
if(is.null(a$decay)) stop("Term 'gwdegree' with 'fixed=TRUE' requires a decay parameter 'decay'.", call.=FALSE)
if(!is.null(attrarg)) {
nodecov <- ergm_get_vattr(attrarg, nw)
attrname <- attr(nodecov, "name")
u <- ergm_attr_levels(levels, nodecov, nw, levels = sort(unique(nodecov)))
nodecov <- match(nodecov,u,nomatch=length(u)+1)
lu <- length(u)
du <- rbind(rep(d,lu), rep(1:lu, rep(length(d), lu)))
if(nrow(du)==0) {return(NULL)}
name <- "gwdegree_by_attr"
coef.names <- paste("gwdeg", decay, ".", attrname, u, sep="")
inputs <- c(decay, nodecov)
}else{
name <- "gwdegree"
coef.names <- paste("gwdeg.fixed.",decay,sep="")
inputs <- c(decay)
}
list(minval=0, maxval=network.size(nw), dependence=TRUE, name=name, coef.names=coef.names, inputs=inputs, conflicts.constraints="degreedist")
}
}
InitErgmTerm.gwdsp<-function(nw, arglist, cache.sp=TRUE, gw.cutoff=30, ...) {
a <- check.ErgmTerm(nw, arglist,
varnames = c("decay","fixed","cutoff","alpha"),
vartypes = c("numeric","logical","numeric","numeric"),
defaultvalues = list(NULL, FALSE, gw.cutoff, NULL),
required = c(FALSE, FALSE, FALSE, FALSE))
if(!is.null(a$alpha)){
ergm_Init_abort("For consistency with gw*degree terms, in all gw*sp and dgw*sp terms the argument ", sQuote("alpha"), " has been renamed to " ,sQuote("decay"), ".")
}
decay<-a$decay;fixed<-a$fixed
cutoff<-a$cutoff
if(!fixed){
if(!is.null(a$decay)) warning("In term 'gwdsp': decay parameter 'decay' passed with 'fixed=FALSE'. 'decay' will be ignored. To specify an initial value for 'decay', use the 'init' control parameter.", call.=FALSE)
maxesp <- min(cutoff,network.size(nw)-2)
d <- 1:maxesp
ld<-length(d)
if(ld==0){return(NULL)}
if(is.directed(nw)){dname <- "tdsp"}else{dname <- "dsp"}
c(list(name=dname, coef.names=paste("gwdsp
inputs=c(d), params=list(gwdsp=NULL,gwdsp.decay=decay), auxiliaries=if(cache.sp) .spcache.aux(if(is.directed(nw)) "OTP" else "UTP") else NULL),
GWDECAY)
}else{
if(is.null(a$decay)) stop("Term 'gwdsp' with 'fixed=TRUE' requires a decay parameter 'decay'.", call.=FALSE)
if (!fixed)
coef.names <- "gwdsp"
else
coef.names <- paste("gwdsp.fixed.",decay,sep="")
if(is.directed(nw)){dname <- "gwtdsp"}else{dname <- "gwdsp"}
list(name=dname, coef.names=coef.names, inputs=c(decay), auxiliaries=if(cache.sp) .spcache.aux(if(is.directed(nw)) "OTP" else "UTP") else NULL)
}
}
InitErgmTerm.gwesp<-function(nw, arglist, cache.sp=TRUE, gw.cutoff=30, ...) {
a <- check.ErgmTerm(nw, arglist,
varnames = c("decay","fixed","cutoff", "alpha"),
vartypes = c("numeric","logical","numeric", "numeric"),
defaultvalues = list(NULL, FALSE, gw.cutoff, NULL),
required = c(FALSE, FALSE, FALSE, FALSE))
if(!is.null(a$alpha)){
ergm_Init_abort("For consistency with gw*degree terms, in all gw*sp and dgw*sp terms the argument ", sQuote("alpha"), " has been renamed to " ,sQuote("decay"), ".")
}
decay<-a$decay;fixed<-a$fixed
cutoff<-a$cutoff
decay=decay[1]
if(!fixed){
if(!is.null(a$decay)) warning("In term 'gwesp': decay parameter 'decay' passed with 'fixed=FALSE'. 'decay' will be ignored. To specify an initial value for 'decay', use the 'init' control parameter.", call.=FALSE)
maxesp <- min(cutoff,network.size(nw)-2)
d <- 1:maxesp
ld<-length(d)
if(ld==0){return(NULL)}
if(is.directed(nw)){dname <- "tesp"}else{dname <- "esp"}
c(list(name=dname, coef.names=paste("esp
inputs=c(d), params=list(gwesp=NULL,gwesp.decay=decay), auxiliaries=if(cache.sp) .spcache.aux(if(is.directed(nw)) "OTP" else "UTP") else NULL),
GWDECAY)
}else{
if(is.null(a$decay)) stop("Term 'gwesp' with 'fixed=TRUE' requires a decay parameter 'decay'.", call.=FALSE)
coef.names <- paste("gwesp.fixed.",decay,sep="")
if(is.directed(nw)){dname <- "gwtesp"}else{dname <- "gwesp"}
list(name=dname, coef.names=coef.names, inputs=c(decay), auxiliaries=if(cache.sp) .spcache.aux(if(is.directed(nw)) "OTP" else "UTP") else NULL)
}
}
InitErgmTerm.gwidegree<-function(nw, arglist, gw.cutoff=30, ..., version=packageVersion("ergm")) {
if(version <= as.package_version("3.9.4")){
a <- check.ErgmTerm(nw, arglist, directed=TRUE,
varnames = c("decay", "fixed", "attrname","cutoff", "levels"),
vartypes = c("numeric", "logical", "character", "numeric", "character,numeric,logical"),
defaultvalues = list(NULL, FALSE, NULL, gw.cutoff, NULL),
required = c(FALSE, FALSE, FALSE, FALSE, FALSE))
attrarg <- a$attrname
levels <- if(!is.null(a$levels)) I(a$levels) else NULL
}else{
a <- check.ErgmTerm(nw, arglist, directed=TRUE,
varnames = c("decay", "fixed", "attr","cutoff", "levels"),
vartypes = c("numeric", "logical", ERGM_VATTR_SPEC, "numeric", ERGM_LEVELS_SPEC),
defaultvalues = list(NULL, FALSE, NULL, gw.cutoff, NULL),
required = c(FALSE, FALSE, FALSE, FALSE, FALSE))
attrarg <- a$attr
levels <- a$levels
}
decay<-a$decay; fixed<-a$fixed
cutoff<-a$cutoff
maxesp <- min(cutoff,network.size(nw)-1)
d <- 1:maxesp
if (!is.null(attrarg) && !fixed ) {
ergm_Init_abort("The gwidegree term cannot yet handle a nonfixed decay ",
"term with an attribute. Use fixed=TRUE.")
}
if(!fixed){
if(!is.null(a$decay)) warning("In term 'gwidegree': decay parameter 'decay' passed with 'fixed=FALSE'. 'decay' will be ignored. To specify an initial value for 'decay', use the 'init' control parameter.", call.=FALSE)
ld<-length(d)
if(ld==0){return(NULL)}
c(list(minval=0, maxval=network.size(nw), dependence=TRUE, name="idegree", coef.names=paste("gwidegree
conflicts.constraints="idegreedist", params=list(gwidegree=NULL,gwidegree.decay=decay)), GWDECAY)
} else {
if(is.null(a$decay)) stop("Term 'gwidegree' with 'fixed=TRUE' requires a decay parameter 'decay'.", call.=FALSE)
if(!is.null(attrarg)) {
nodecov <- ergm_get_vattr(attrarg, nw)
attrname <- attr(nodecov, "name")
u <- ergm_attr_levels(levels, nodecov, nw, levels = sort(unique(nodecov)))
nodecov <- match(nodecov,u,nomatch=length(u)+1)
lu <- length(u)
du <- rbind(rep(d,lu), rep(1:lu, rep(length(d), lu)))
if(nrow(du)==0) {return(NULL)}
name <- "gwidegree_by_attr"
coef.names <- paste("gwideg", decay, ".", attrname, u, sep="")
inputs <- c(decay, nodecov)
}else{
name <- "gwidegree"
coef.names <- paste("gwideg.fixed.",decay,sep="")
inputs <- c(decay)
}
list(minval=0, maxval=network.size(nw), dependence=TRUE, name=name, coef.names=coef.names, inputs=inputs, conflicts.constraints="idegreedist")
}
}
InitErgmTerm.gwnsp<-function(nw, arglist, cache.sp=TRUE, gw.cutoff=30, ...) {
a <- check.ErgmTerm(nw, arglist,
varnames = c("decay","fixed","cutoff", "alpha"),
vartypes = c("numeric","logical","numeric", "numeric"),
defaultvalues = list(NULL, FALSE, gw.cutoff, NULL),
required = c(FALSE, FALSE, FALSE, FALSE))
if(!is.null(a$alpha)){
ergm_Init_abort("For consistency with gw*degree terms, in all gw*sp and dgw*sp terms the argument ", sQuote("alpha"), " has been renamed to " ,sQuote("decay"), ".")
}
decay<-a$decay;fixed<-a$fixed
cutoff<-a$cutoff
decay=decay[1]
if(!fixed){
if(!is.null(a$decay)) warning("In term 'gwnsp': decay parameter 'decay' passed with 'fixed=FALSE'. 'decay' will be ignored. To specify an initial value for 'decay', use the 'init' control parameter.", call.=FALSE)
maxesp <- min(cutoff,network.size(nw)-2)
d <- 1:maxesp
ld<-length(d)
if(ld==0){return(NULL)}
if(is.directed(nw)){dname <- "tnsp"}else{dname <- "nsp"}
c(list(name=dname, coef.names=paste("nsp
inputs=c(d), params=list(gwnsp=NULL,gwnsp.decay=decay), auxiliaries=if(cache.sp) .spcache.aux(if(is.directed(nw)) "OTP" else "UTP") else NULL),
GWDECAY)
}else{
if(is.null(decay)) stop("Term 'gwnsp' with 'fixed=TRUE' requires a decay parameter 'decay'.", call.=FALSE)
coef.names <- paste("gwnsp.fixed.",decay,sep="")
if(is.directed(nw)){dname <- "gwtnsp"}else{dname <- "gwnsp"}
list(name=dname, coef.names=coef.names, inputs=c(decay), auxiliaries=if(cache.sp) .spcache.aux(if(is.directed(nw)) "OTP" else "UTP") else NULL)
}
}
InitErgmTerm.gwodegree<-function(nw, arglist, gw.cutoff=30, ..., version=packageVersion("ergm")) {
if(version <= as.package_version("3.9.4")){
a <- check.ErgmTerm(nw, arglist, directed=TRUE,
varnames = c("decay", "fixed", "attrname","cutoff", "levels"),
vartypes = c("numeric", "logical", "character", "numeric", "character,numeric,logical"),
defaultvalues = list(NULL, FALSE, NULL, gw.cutoff, NULL),
required = c(FALSE, FALSE, FALSE, FALSE, FALSE))
attrarg <- a$attrname
levels <- if(!is.null(a$levels)) I(a$levels) else NULL
}else{
a <- check.ErgmTerm(nw, arglist, directed=TRUE,
varnames = c("decay", "fixed", "attr","cutoff", "levels"),
vartypes = c("numeric", "logical", ERGM_VATTR_SPEC, "numeric", ERGM_LEVELS_SPEC),
defaultvalues = list(NULL, FALSE, NULL, gw.cutoff, NULL),
required = c(FALSE, FALSE, FALSE, FALSE, FALSE))
attrarg <- a$attr
levels <- a$levels
}
decay<-a$decay; fixed<-a$fixed
cutoff<-a$cutoff
maxesp <- min(cutoff,network.size(nw)-1)
d <- 1:maxesp
if (!is.null(attrarg) && !fixed ) {
ergm_Init_abort("The gwodegree term cannot yet handle a nonfixed decay ",
"term with an attribute. Use fixed=TRUE.")
}
if(!fixed){
if(!is.null(a$decay)) warning("In term 'gwodegree': decay parameter 'decay' passed with 'fixed=FALSE'. 'decay' will be ignored. To specify an initial value for 'decay', use the 'init' control parameter.", call.=FALSE)
ld<-length(d)
if(ld==0){return(NULL)}
c(list(minval=0, maxval=network.size(nw), dependence=TRUE, name="odegree", coef.names=paste("gwodegree
conflicts.constraints="odegreedist", params=list(gwodegree=NULL,gwodegree.decay=decay)), GWDECAY)
} else {
if(is.null(a$decay)) stop("Term 'gwodegree' with 'fixed=TRUE' requires a decay parameter 'decay'.", call.=FALSE)
if(!is.null(attrarg)) {
nodecov <- ergm_get_vattr(attrarg, nw)
attrname <- attr(nodecov, "name")
u <- ergm_attr_levels(levels, nodecov, nw, levels = sort(unique(nodecov)))
nodecov <- match(nodecov,u,nomatch=length(u)+1)
lu <- length(u)
du <- rbind(rep(d,lu), rep(1:lu, rep(length(d), lu)))
if(nrow(du)==0) {return(NULL)}
name <- "gwodegree_by_attr"
coef.names <- paste("gwodeg", decay, ".", attrname, u, sep="")
inputs <- c(decay, nodecov)
}else{
name <- "gwodegree"
coef.names <- paste("gwodeg.fixed.",decay,sep="")
inputs <- c(decay)
}
list(minval=0, maxval=network.size(nw), dependence=TRUE, name=name, coef.names=coef.names, inputs=inputs, conflicts.constraints="odegreedist")
}
}
InitErgmTerm.hamming<-function (nw, arglist, ...) {
a <- check.ErgmTerm (nw, arglist,
varnames = c("x","cov","attrname","defaultweight"),
vartypes = c("matrix,network","matrix,network","character","numeric"),
defaultvalues = list(nw, NULL, NULL, NULL),
required = c(FALSE, FALSE, FALSE, FALSE))
if(is.network(a$x)){
if( is.null(a$attrname) || is.null(get.edge.attribute(a$x,a$attrname))){
xm<-as.edgelist(a$x)
} else {
xm<-as.edgelist(a$x,a$attrname)
}
}else if(is.character(a$x)){
xm<-get.network.attribute(nw,a$x)
xm<-as.edgelist(xm)
}else if(is.null(a$x)){
xm<-as.edgelist(nw)
}else if(is.matrix(a$x) && ncol(a$x)!=2){
xm<-as.edgelist(update(nw,a$x,matrix.type="adjacency"))
}else{
xm<-as.matrix(a$x)
}
if (is.vector(xm)) xm <- matrix(xm, ncol=2)
sc03 <- sys.call(0)[[3]]
coef.names <- "hamming"
if (is.null(a$cov)) {
minval <- 0
maxval <- network.dyadcount(nw,FALSE)
if (length(sc03)>1)
coef.names <- paste("hamming", as.character(sc03[[2]]), sep=".")
covm <- NULL
if (is.null(a$defaultweight))
a$defaultweight <- 1.0
emptynwstats <- NROW(xm) * a$defaultweight
} else {
if(is.network(a$cov)){
covm<-as.edgelist(a$cov,a$attrname)
if(length(covm)==2){covm <- matrix(covm,ncol=2)}
if(length(covm)==3){covm <- matrix(covm,ncol=3)}
if (NCOL(covm)==2)
covm <- cbind(covm,1)
}else if(is.character(a$cov)){
covm<-get.network.attribute(nw,a$cov)
covm<-as.edgelist(covm)
}else{
covm<-as.matrix(a$cov)
}
if (is.null(covm) || !is.matrix(covm) || NCOL(covm)!=3){
ergm_Init_abort("Improper dyadic covariate passed to hamming()")
}
emptynwstats <- sum(apply(xm, 1, function(a,b) sum(b[(a[1]==b[,1] & a[2]==b[,2]),3]), covm))
if (is.null(a$defaultweight))
a$defaultweight <- 0
if(!is.null(a$attrname) && length(sc03)>1){
coef.names<-paste("hamming", as.character(sc03[2]), "wt",
as.character(a$attrname), sep = ".")
}else if (length(sc03)>1) {
coef.names<-paste("hamming", as.character(sc03[2]), "wt",
as.character(sys.call(0)[[3]][3]), sep = ".")
}
minval <- sum(c(covm)[c(covm)<0])
maxval <- sum(c(covm)[c(covm)<0])
}
if (!is.null(xm)) {
xm <- to_ergm_Cdouble(xm, prototype=nw)
}
if (!is.null(covm)) {
covm <- to_ergm_Cdouble(covm, prototype=nw)
}else covm <- 0
inputs <- c(xm, a$defaultweight, covm)
list(name="hamming", coef.names=coef.names,
inputs = inputs, emptynwstats = emptynwstats, dependence = FALSE,
minval = minval, maxval = maxval)
}
InitErgmTerm.hammingmix<-function (nw, arglist, ..., version=packageVersion("ergm")) {
.Deprecate_once(msg="hammingmix() has been deprecated due to disuse.")
if(version <= as.package_version("3.9.4")){
a <- check.ErgmTerm(nw, arglist, directed=TRUE,
varnames = c("attrname","x","base","contrast"),
vartypes = c("character","matrix,network","numeric","logical"),
defaultvalues = list(NULL,nw,NULL,FALSE),
required = c(TRUE,FALSE,FALSE,FALSE),
dep.inform = list(FALSE, FALSE, "levels2", FALSE))
attrarg <- a$attrname
}else{
a <- check.ErgmTerm(nw, arglist, directed=TRUE,
varnames = c("attr", "x", "base", "levels", "levels2","contrast"),
vartypes = c(ERGM_VATTR_SPEC, "matrix,network", "numeric", ERGM_LEVELS_SPEC, ERGM_LEVELS_SPEC,"logical"),
defaultvalues = list(NULL,nw,NULL,NULL,NULL,FALSE),
required = c(TRUE,FALSE,FALSE,FALSE,FALSE,FALSE),
dep.inform = list(FALSE, FALSE, "levels2", FALSE, FALSE, FALSE))
attrarg <- a$attr
}
x<-a$x
if (a$contrast) {
ergm_Init_abort("The 'contrast' argument of the hammingmix term is deprecated. Use 'levels2' instead")
}
if(is.network(x)){
xm<-as.edgelist(x)
x<-paste(quote(x))
}else if(is.character(x)){
xm<-get.network.attribute(nw,x)
xm<-as.edgelist(xm)
}else{
xm<-as.matrix(x)
x<-paste(quote(x))
}
if (is.null(xm) || ncol(xm)!=2){
ergm_Init_abort("hammingmix() requires an edgelist")
}
nodecov <- ergm_get_vattr(attrarg, nw)
attrname <- attr(nodecov, "name")
u <- ergm_attr_levels(a$levels, nodecov, nw, sort(unique(nodecov)))
namescov <- u
nr <- length(u)
nc <- length(u)
levels2.list <- transpose(expand.grid(row = u, col = u, stringsAsFactors=FALSE))
indices2.grid <- expand.grid(row = 1:nr, col = 1:nc)
levels2.sel <- if((!hasName(attr(a,"missing"), "levels2") || attr(a,"missing")["levels2"]) && any(NVL(a$base,0)!=0)) levels2.list[-a$base]
else ergm_attr_levels(a$levels2, list(row = nodecov, col = nodecov), nw, levels2.list)
rows2keep <- match(levels2.sel,levels2.list, NA)
rows2keep <- rows2keep[!is.na(rows2keep)]
u <- indices2.grid[rows2keep,]
nodecov.indices <- match(nodecov, namescov, nomatch=length(namescov) + 1)
coef.names <- paste("hammingmix",attrname,
apply(matrix(namescov[as.matrix(u)],ncol=2),1,paste,collapse="."),
sep=".")
inputs=c(to_ergm_Cdouble(xm, prototype=nw), u[,1], u[,2], nodecov.indices)
attr(inputs, "ParamsBeforeCov") <- nrow(u)
nw %v% "_tmp_nodecov" <- as.vector(nodecov)
if(version <= as.package_version("3.9.4")){
emptynwstats <- summary(nw ~ nodemix("_tmp_nodecov", base=a$base))
}else{
nodemix.call <- c(list(as.name("nodemix"),"_tmp_nodecov"), list(base=a$base, levels=a$levels, levels2=a$levels2)[!attr(a,"missing")[c("base","levels","levels2")]])
nodemix.call <- as.call(nodemix.call)
nodemix.form <- as.formula(call("~", nw, nodemix.call))
emptynwstats <- summary(nodemix.form)
}
list(name="hammingmix", coef.names=coef.names, inputs=inputs,
emptynwstats=emptynwstats, dependence=FALSE)
}
InitErgmTerm.idegrange<-function(nw, arglist, ..., version=packageVersion("ergm")) {
if(version <= as.package_version("3.9.4")){
a <- check.ErgmTerm(nw, arglist, directed=TRUE,
varnames = c("from", "to", "by", "homophily", "levels"),
vartypes = c("numeric", "numeric", "character", "logical", "character,numeric,logical"),
defaultvalues = list(NULL, Inf, NULL, FALSE, NULL),
required = c(TRUE, FALSE, FALSE, FALSE, FALSE))
levels <- if(!is.null(a$levels)) I(a$levels) else NULL
}else{
a <- check.ErgmTerm(nw, arglist, directed=TRUE,
varnames = c("from", "to", "by", "homophily", "levels"),
vartypes = c("numeric", "numeric", ERGM_VATTR_SPEC, "logical", ERGM_LEVELS_SPEC),
defaultvalues = list(NULL, Inf, NULL, FALSE, NULL),
required = c(TRUE, FALSE, FALSE, FALSE, FALSE))
levels <- a$levels
}
from<-a$from; to<-a$to; byarg <- a$by; homophily <- a$homophily
to <- ifelse(to==Inf, network.size(nw)+1, to)
if(length(to)==1 && length(from)>1) to <- rep(to, length(from))
else if(length(from)==1 && length(to)>1) from <- rep(from, length(to))
else if(length(from)!=length(to)) ergm_Init_abort("The arguments of term idegrange must have arguments either of the same length, or one of them must have length 1.")
else if(any(from>=to)) ergm_Init_abort("Term idegrange must have from<to.")
emptynwstats<-NULL
if(!is.null(byarg)) {
nodecov <- ergm_get_vattr(byarg, nw)
attrname <- attr(nodecov, "name")
u <- ergm_attr_levels(levels, nodecov, nw, levels = sort(unique(nodecov)))
nodecov <- match(nodecov,u,nomatch=length(u)+1)
}
if(!is.null(byarg) && !homophily) {
lu <- length(u)
du <- rbind(rep(from,lu), rep(to,lu), rep(1:lu, rep(length(from), lu)))
if (any(du[1,]==0)) {
emptynwstats <- rep(0, ncol(du))
tmp <- du[3,du[1,]==0]
for(i in 1:length(tmp)) tmp[i] <- sum(nodecov==tmp[i])
emptynwstats[du[1,]==0] <- tmp
}
} else {
if (any(from==0)) {
emptynwstats <- rep(0, length(from))
emptynwstats[from==0] <- network.size(nw)
}
}
if(is.null(byarg)) {
if(length(from)==0){return(NULL)}
coef.names <- ifelse(to>=network.size(nw)+1,
paste("ideg",from,"+",sep=""),
paste("ideg",from,"to",to,sep=""))
name <- "idegrange"
inputs <- c(rbind(from,to))
} else if (homophily) {
if(length(from)==0){return(NULL)}
coef.names <- ifelse(to>=network.size(nw)+1,
paste("ideg",from,"+", ".homophily.",attrname,sep=""),
paste("ideg",from,"to",to, ".homophily.",attrname,sep=""))
name <- "idegrange_w_homophily"
inputs <- c(rbind(from,to), nodecov)
} else {
if(ncol(du)==0) {return(NULL)}
coef.names <- ifelse(du[2,]>=network.size(nw)+1,
paste("ideg",du[1,],"+.", attrname, u[du[3,]],sep=""),
paste("ideg",du[1,],"to",du[2,],".",attrname, u[du[3,]],sep=""))
name <- "idegrange_by_attr"
inputs <- c(as.vector(du), nodecov)
}
if (!is.null(emptynwstats)){
list(name=name,coef.names=coef.names, inputs=inputs,
emptynwstats=emptynwstats, dependence=TRUE, minval = 0)
}else{
list(name=name,coef.names=coef.names, inputs=inputs, dependence=TRUE, minval = 0, maxval=network.size(nw), conflicts.constraints="idegreedist")
}
}
InitErgmTerm.idegree<-function(nw, arglist, ..., version=packageVersion("ergm")) {
if(version <= as.package_version("3.9.4")){
a <- check.ErgmTerm(nw, arglist, directed=TRUE,
varnames = c("d", "by", "homophily", "levels"),
vartypes = c("numeric", "character", "logical", "character,numeric,logical"),
defaultvalues = list(NULL, NULL, FALSE, NULL),
required = c(TRUE, FALSE, FALSE, FALSE))
levels <- if(!is.null(a$levels)) I(a$levels) else NULL
}else{
a <- check.ErgmTerm(nw, arglist, directed=TRUE,
varnames = c("d", "by", "homophily", "levels"),
vartypes = c("numeric", ERGM_VATTR_SPEC, "logical", ERGM_LEVELS_SPEC),
defaultvalues = list(NULL, NULL, FALSE, NULL),
required = c(TRUE, FALSE, FALSE, FALSE))
levels <- a$levels
}
d<-a$d; byarg <- a$by; homophily <- a$homophily
emptynwstats<-NULL
if(!is.null(byarg)) {
nodecov <- ergm_get_vattr(byarg, nw)
attrname <- attr(nodecov, "name")
u <- ergm_attr_levels(levels, nodecov, nw, levels = sort(unique(nodecov)))
nodecov <- match(nodecov,u,nomatch=length(u)+1)
}
if(!is.null(byarg) && !homophily) {
lu <- length(u)
du <- rbind(rep(d,lu), rep(1:lu, rep(length(d), lu)))
if (any(du[1,]==0)) {
emptynwstats <- rep(0, ncol(du))
tmp <- du[2,du[1,]==0]
for(i in 1:length(tmp)) tmp[i] <- sum(nodecov==tmp[i])
emptynwstats[du[1,]==0] <- tmp
}
} else {
if (any(d==0)) {
emptynwstats <- rep(0, length(d))
emptynwstats[d==0] <- network.size(nw)
}
}
if(is.null(byarg)) {
if(length(d)==0){return(NULL)}
name <- "idegree"
coef.names <- paste("idegree",d,sep="")
inputs <- c(d)
} else if (homophily) {
if(length(d)==0){return(NULL)}
name <- "idegree_w_homophily"
coef.names <- paste("ideg", d, ".homophily.",attrname, sep="")
inputs <- c(d, nodecov)
} else {
if(ncol(du)==0) {return(NULL)}
name <- "idegree_by_attr"
coef.names <- paste("ideg", du[1,], ".", attrname,u[du[2,]], sep="")
inputs <- c(as.vector(du), nodecov)
}
list(name = name, coef.names = coef.names, inputs = inputs, emptynwstats = emptynwstats, minval=0, maxval=network.size(nw), dependence=TRUE,
minval = 0, maxval=network.size(nw), conflicts.constraints="idegreedist")
}
InitErgmTerm.idegree1.5<-function (nw, arglist, ...) {
a <- check.ErgmTerm(nw, arglist, directed=TRUE,
varnames = NULL,
vartypes = NULL,
defaultvalues = list(),
required = NULL)
list(name="idegreepopularity", coef.names="idegree1.5",
minval=0, maxval=network.dyadcount(nw,FALSE)*sqrt(network.size(nw)-1), conflicts.constraints="idegreedist")
}
InitErgmTerm.idegreepopularity<-function (nw, arglist, ...) {
.Deprecated("idegree1.5")
a <- check.ErgmTerm(nw, arglist, directed=TRUE,
varnames = NULL,
vartypes = NULL,
defaultvalues = list(),
required = NULL)
list(name="idegreepopularity", coef.names="idegreepopularity",
minval=0, maxval=network.dyadcount(nw,FALSE)*sqrt(network.size(nw)-1), conflicts.constraints="idegreedist")
}
InitErgmTerm.intransitive<-function (nw, arglist, ...) {
a <- check.ErgmTerm(nw, arglist, directed=TRUE,
varnames = NULL,
vartypes = NULL,
defaultvalues = list(),
required = NULL)
list(name="intransitive", coef.names="intransitive", minval = 0)
}
InitErgmTerm.isolatededges <- function(nw, arglist, ...) {
a <- check.ErgmTerm(nw, arglist, directed=FALSE, bipartite=NULL,
varnames = NULL,
vartypes = NULL,
defaultvalues = list(),
required = NULL)
list(name="isolatededges",
coef.names = "isolatededges",
emptynwstats = 0,
minval = 0,
maxval = if(is.bipartite(nw)) min(nw%n%"bipartite", network.size(nw) - nw%n%"bipartite") else floor(network.size(nw)/2),
dependence = TRUE
)
}
InitErgmTerm.isolates <- function(nw, arglist, ...) {
a <- check.ErgmTerm(nw, arglist, directed=NULL, bipartite=NULL,
varnames = NULL,
vartypes = NULL,
defaultvalues = list(),
required = NULL)
list(name="isolates",
coef.names = "isolates",
emptynwstats = network.size(nw),
minval = 0,
maxval = network.size(nw),
conflicts.constraints="degreedist"
)
}
InitErgmTerm.istar<-function(nw, arglist, ..., version=packageVersion("ergm")) {
if(version <= as.package_version("3.9.4")){
a <- check.ErgmTerm(nw, arglist, directed=TRUE,
varnames = c("k", "attrname", "levels"),
vartypes = c("numeric", "character", "character,numeric,logical"),
defaultvalues = list(NULL, NULL, NULL),
required = c(TRUE, FALSE, FALSE))
attrarg <- a$attrname
levels <- if(!is.null(a$levels)) I(a$levels) else NULL
}else{
a <- check.ErgmTerm(nw, arglist, directed=TRUE,
varnames = c("k", "attr", "levels"),
vartypes = c("numeric", ERGM_VATTR_SPEC, ERGM_LEVELS_SPEC),
defaultvalues = list(NULL, NULL, NULL),
required = c(TRUE, FALSE, FALSE))
attrarg <- a$attr
levels <- a$levels
}
k <- a$k
if(!is.null(attrarg)) {
nodecov <- ergm_get_vattr(attrarg, nw)
attrname <- attr(nodecov, "name")
u <- ergm_attr_levels(levels, nodecov, nw, levels = sort(unique(nodecov)))
nodecov <- match(nodecov,u,nomatch=length(u)+1)
}else{
}
lk<-length(k)
if(lk==0){return(NULL)}
if(!is.null(attrarg)){
coef.names <- paste("istar",k,".",attrname,sep="")
inputs <- c(k, nodecov)
attr(inputs, "ParamsBeforeCov") <- lk
}else{
coef.names <- paste("istar",k,sep="")
inputs <- c(k)
}
list(name="istar", coef.names=coef.names, inputs=inputs, minval = 0, conflicts.constraints="idegreedist")
}
InitErgmTerm.kstar<-function(nw, arglist, ..., version=packageVersion("ergm")) {
if(version <= as.package_version("3.9.4")){
a <- check.ErgmTerm(nw, arglist, directed=FALSE,
varnames = c("k", "attrname", "levels"),
vartypes = c("numeric", "character", "character,numeric,logical"),
defaultvalues = list(NULL, NULL, NULL),
required = c(TRUE, FALSE, FALSE))
attrarg <- a$attrname
levels <- if(!is.null(a$levels)) I(a$levels) else NULL
}else{
a <- check.ErgmTerm(nw, arglist, directed=FALSE,
varnames = c("k", "attr", "levels"),
vartypes = c("numeric", ERGM_VATTR_SPEC, ERGM_LEVELS_SPEC),
defaultvalues = list(NULL, NULL, NULL),
required = c(TRUE, FALSE, FALSE))
attrarg <- a$attr
levels <- a$levels
}
k<-a$k
if(!is.null(attrarg)) {
nodecov <- ergm_get_vattr(attrarg, nw)
attrname <- attr(nodecov, "name")
u <- ergm_attr_levels(levels, nodecov, nw, levels = sort(unique(nodecov)))
nodecov <- match(nodecov,u,nomatch=length(u)+1)
}
lk<-length(k)
if(lk==0){return(NULL)}
if(!is.null(attrarg)){
coef.names <- paste("kstar",k,".",attrname,sep="")
inputs <- c(k, nodecov)
attr(inputs, "ParamsBeforeCov") <- lk
}else{
coef.names <- paste("kstar",k,sep="")
inputs <- c(k)
}
list(name="kstar", coef.names=coef.names, inputs=inputs, minval = 0, conflicts.constraints="degreedist")
}
InitErgmTerm.localtriangle<-function (nw, arglist, ...) {
a <- check.ErgmTerm(nw, arglist,
varnames = c("x", "attrname"),
vartypes = c("matrix,network", "character"),
defaultvalues = list(NULL, NULL),
required = c(TRUE, FALSE))
x<-a$x;attrname<-a$attrname
if(is.network(x))
xm<-as.matrix(x, matrix.type="adjacency", attrname)
else if(is.character(x))
xm<-as.matrix(nw, matrix.type="adjacency", x)
else
xm<-as.matrix(x)
if(!isSymmetric(xm)){
ergm_Init_warn("localtriangle requires an undirected neighborhood. Using only mutual ties.")
xm <- pmin(xm[],(t(xm))[])
}
if(!is.null(attrname))
coef.names <- paste("localtriangle", attrname, sep = ".")
else
coef.names <- paste("localtriangle", as.character(sys.call(0)[[3]][2]),
sep = ".")
inputs <- c(NROW(xm), as.double(xm))
attr(inputs, "ParamsBeforeCov") <- 1
list(name="localtriangle", coef.names=coef.names, inputs=inputs)
}
InitErgmTerm.m2star<-function(nw, arglist, ...) {
a <- check.ErgmTerm(nw, arglist, directed=TRUE,
varnames = NULL,
vartypes = NULL,
defaultvalues = list(),
required = NULL)
list(name="m2star", coef.names="m2star",dependence=TRUE, minval = 0)
}
InitErgmTerm.meandeg<-function(nw, arglist, ...) {
a <- check.ErgmTerm(nw, arglist,
varnames = NULL,
vartypes = NULL,
defaultvalues = list(),
required = NULL)
list(name="meandeg", coef.names="meandeg", dependence=FALSE, minval=0, maxval=if(!is.bipartite(nw)) network.size(nw)-1, conflicts.constraints="edges")
}
InitErgmTerm.mm<-function (nw, arglist, ..., version=packageVersion("ergm")) {
if(version <= as.package_version("3.11.0")){
a <- check.ErgmTerm(nw, arglist,
varnames = c("attrs", "levels", "levels2"),
vartypes = c(ERGM_VATTR_SPEC, ERGM_LEVELS_SPEC, ERGM_LEVELS_SPEC),
defaultvalues = list(NULL, NULL, NULL),
required = c(TRUE, FALSE, FALSE))
}else{
a <- check.ErgmTerm(nw, arglist,
varnames = c("attrs", "levels", "levels2"),
vartypes = c(ERGM_VATTR_SPEC, ERGM_LEVELS_SPEC, ERGM_LEVELS_SPEC),
defaultvalues = list(NULL, NULL, -1),
required = c(TRUE, FALSE, FALSE))
}
spec <-
list(attrs = a$attrs, levels = a$levels) %>%
map_if(~!is(., "formula"), ~call("~", .)) %>%
map_if(~length(.)==2, ~call("~", .[[2]], .[[2]])) %>%
map(as.list) %>% map(~.[-1]) %>%
map(set_names, c("row", "col")) %>%
transpose() %>%
unlist(recursive=FALSE) %>%
map_if(~is.name(.)&&.==".", ~NULL) %>%
map_if(~is.call(.)||(is.name(.)&&.!="."), ~as.formula(call("~", .))) %>%
relist(skeleton=list(row=c(attrs=NA, levels=NA), col=c(attrs=NA, levels=NA))) %>%
transpose()
if(is(a$attrs, "formula"))
spec[["attrs"]] <- lapply(spec[["attrs"]], function(x){if(is(x,"formula")) environment(x) <- environment(a$attrs); x})
if(is(a$levels, "formula"))
spec[["levels"]] <- lapply(spec[["levels"]], function(x){if(is(x,"formula")) environment(x) <- environment(a$levels); x})
spec <- transpose(spec)
attrval <-
spec %>%
imap(function(spec, whose){
if(is.null(spec$attrs)){
list(valcodes =
rep(0L,
if(!is.bipartite(nw)) network.size(nw)
else if(whose=="row") nw%n%"bipartite"
else if(whose=="col") network.size(nw) - nw%n%"bipartite"
),
name = ".",
levels = NA,
levelcodes = 0
)
}else{
x <- ergm_get_vattr(spec$attrs, nw, bip = if(is.bipartite(nw)) c(row="b1",col="b2")[whose] else "n")
name <- attr(x, "name")
list(name=name, val=x, levels=spec$levels, unique=sort(unique(x)))
}
})
symm <- !is.directed(nw) && !is.bipartite(nw) && identical(spec$row$attrs, spec$col$attrs)
marg <- length(attrval$row$unique)==0 || length(attrval$col$unique)==0
attrval <- attrval %>%
map_if(~is.null(.$levelcodes), function(v){
v$levels <- ergm_attr_levels(v$levels, v$val, nw, levels=v$unique)
v$levelcodes <- seq_along(v$levels)
v$valcodes <- match(v$val, v$levels, nomatch=0)
v
})
levels2codes <- expand.grid(row=attrval$row$levelcodes, col=attrval$col$levelcodes) %>% transpose()
levels2 <- expand.grid(row=attrval$row$levels, col=attrval$col$levels, stringsAsFactors=FALSE) %>% transpose()
if(symm){
levels2keep <- levels2codes %>% map_lgl(with, row <= col)
levels2codes <- levels2codes[levels2keep]
levels2 <- levels2[levels2keep]
}
levels2sel <- ergm_attr_levels(a$levels2, list(row=attrval$row$val, col=attrval$col$val), nw, levels=levels2)
if(length(levels2sel) == 0) return(NULL)
levels2codes <- levels2codes[match(levels2sel,levels2, NA)]
levels2 <- levels2sel; rm(levels2sel)
levels2names <-
levels2 %>%
transpose() %>%
map(unlist) %>%
with(paste0(
"[",
if(attrval$row$name!=".")
paste0(attrval$row$name, "=", .$row)
else ".",
",",
if(attrval$col$name!=".")
paste0(attrval$col$name, "=", .$col)
else ".",
"]"))
coef.names <- paste0("mm",levels2names)
list(name = "mixmat",
coef.names = coef.names,
inputs = c(symm+marg*2, attrval$row$valcodes, attrval$col$valcodes, unlist(levels2codes)),
dependence = FALSE,
minval = 0)
}
InitErgmTerm.mutual<-function (nw, arglist, ..., version=packageVersion("ergm")) {
if(version <= as.package_version("3.9.4")){
a <- check.ErgmTerm(nw, arglist, directed=TRUE, bipartite=NULL,
varnames = c("same", "by", "diff", "keep"),
vartypes = c("character", "character", "logical", "numeric"),
defaultvalues = list(NULL, NULL, FALSE, NULL),
required = c(FALSE, FALSE, FALSE, FALSE),
dep.inform = list(FALSE, FALSE, FALSE, "levels"))
}else{
a <- check.ErgmTerm(nw, arglist, directed=TRUE, bipartite=NULL,
varnames = c("same", "by", "diff", "keep", "levels"),
vartypes = c(ERGM_VATTR_SPEC, ERGM_VATTR_SPEC, "logical", "numeric", ERGM_LEVELS_SPEC),
defaultvalues = list(NULL, NULL, FALSE, NULL, NULL),
required = c(FALSE, FALSE, FALSE, FALSE, FALSE),
dep.inform = list(FALSE, FALSE, FALSE, "levels", FALSE))
}
if (!is.null(a$same) || !is.null(a$by)) {
if (!is.null(a$same)) {
attrarg <- a$same
if (!is.null(a$by))
ergm_Init_warn("Ignoring 'by' argument to mutual because 'same' exists")
}else{
attrarg <- a$by
}
nodecov <- ergm_get_vattr(attrarg, nw)
attrname <- attr(nodecov, "name")
u <- ergm_attr_levels(a$levels, nodecov, nw, levels = sort(unique(nodecov)))
if((!hasName(attr(a,"missing"), "levels") || attr(a,"missing")["levels"]) && !is.null(a$keep)) u <- u[a$keep]
nodecov <- match(nodecov,u,nomatch=length(u)+1)
dontmatch <- nodecov==(length(u)+1)
nodecov[dontmatch] <- length(u) + (1:sum(dontmatch))
ui <- seq(along=u)
}
if (!is.null(a$same) || !is.null(a$by)) {
if (is.null(a$same)) {
coef.names <- paste("mutual.by", attrname, u, sep=".")
inputs <- c(ui, nodecov)
}else{
if (a$diff) {
coef.names <- paste("mutual.same", attrname, u, sep=".")
inputs <- c(ui, nodecov)
}else{
coef.names <- paste("mutual", attrname, sep=".")
inputs <- nodecov
}
}
if (is.null(a$same) && !is.null(a$by)) {
name <- "mutual_by_attr"
}else{
name <- "mutual"
}
}else{
name <- "mutual"
coef.names <- "mutual"
inputs <- NULL
}
maxval <- network.dyadcount(nw,FALSE)/2
list(name=name,
coef.names = coef.names,
inputs=inputs,
minval = 0,
maxval = maxval)
}
InitErgmTerm.nearsimmelian<-function (nw, arglist, ...) {
a <- check.ErgmTerm(nw, arglist, directed=TRUE,
varnames = NULL,
vartypes = NULL,
defaultvalues = list(),
required = NULL)
list(name="nearsimmelian", coef.names="nearsimmelian", minval=0, maxval=network.dyadcount(nw,FALSE)*network.size(nw)*0.5)
}
InitErgmTerm.nodecov<-InitErgmTerm.nodemain<-function (nw, arglist, ..., version=packageVersion("ergm")) {
if(version <= as.package_version("3.9.4")){
a <- check.ErgmTerm(nw, arglist,
varnames = c("attrname","transform","transformname"),
vartypes = c("character","function","character"),
defaultvalues = list(NULL,function(x)x,""),
required = c(TRUE,FALSE,FALSE))
attrname<-a$attrname
f<-a$transform
f.name<-a$transformname
coef.names <- paste(paste("nodecov",f.name,sep=""),attrname,sep=".")
nodecov <- f(get.node.attr(nw, attrname, "nodecov", numeric=TRUE))
}else{
a <- check.ErgmTerm(nw, arglist, directed=NULL, bipartite=NULL,
varnames = c("attr"),
vartypes = c(ERGM_VATTR_SPEC),
defaultvalues = list(NULL),
required = c(TRUE))
nodecov <- ergm_get_vattr(a$attr, nw, accept="numeric", multiple="matrix")
coef.names <- nodecov_names(nodecov, "nodecov")
}
list(name="nodecov", coef.names=coef.names, inputs=c(nodecov), dependence=FALSE)
}
InitErgmTerm.nodefactor<-function (nw, arglist, ..., version=packageVersion("ergm")) {
if(version <= as.package_version("3.9.4")){
a <- check.ErgmTerm(nw, arglist,
varnames = c("attrname", "base", "levels"),
vartypes = c("character", "numeric", "character,numeric,logical"),
defaultvalues = list(NULL, 1, NULL),
required = c(TRUE, FALSE, FALSE),
dep.inform = list(FALSE, "levels", FALSE))
attrarg <- a$attrname
levels <- if(!is.null(a$levels)) I(a$levels) else NULL
}else{
a <- check.ErgmTerm(nw, arglist,
varnames = c("attr", "base", "levels"),
vartypes = c(ERGM_VATTR_SPEC, "numeric", ERGM_LEVELS_SPEC),
defaultvalues = list(NULL, 1, LEVELS_BASE1),
required = c(TRUE, FALSE, FALSE),
dep.inform = list(FALSE, "levels", FALSE))
attrarg <- a$attr
levels <- a$levels
}
nodecov <- ergm_get_vattr(attrarg, nw)
attrname <- attr(nodecov, "name")
u <- ergm_attr_levels(levels, nodecov, nw, levels = sort(unique(nodecov)))
if (attr(a,"missing")["levels"] && any(NVL(a$base,0)!=0)) {
u <- u[-a$base]
}
if (length(u)==0) {
return()
}
nodepos <- match(nodecov,u,nomatch=0)-1
inputs <- nodepos
list(name="nodefactor",
coef.names = paste("nodefactor", paste(attrname,collapse="."), u, sep="."),
iinputs = inputs,
dependence = FALSE,
minval = 0
)
}
InitErgmTerm.nodeicov<-function (nw, arglist, ..., version=packageVersion("ergm")) {
if(version <= as.package_version("3.9.4")){
a <- check.ErgmTerm(nw, arglist, directed=TRUE,
varnames = c("attrname","transform","transformname"),
vartypes = c("character","function","character"),
defaultvalues = list(NULL,function(x)x,""),
required = c(TRUE,FALSE,FALSE))
attrname<-a$attrname
f<-a$transform
f.name<-a$transformname
coef.names <- paste(paste("nodeicov",f.name,sep=""),attrname,sep=".")
nodecov <- f(get.node.attr(nw, attrname, "nodeicov", numeric=TRUE))
}else{
a <- check.ErgmTerm(nw, arglist, directed=TRUE,
varnames = c("attr"),
vartypes = c(ERGM_VATTR_SPEC),
defaultvalues = list(NULL),
required = c(TRUE))
nodecov <- ergm_get_vattr(a$attr, nw, accept="numeric", multiple="matrix")
coef.names <- nodecov_names(nodecov, "nodeicov")
}
list(name="nodeicov", coef.names=coef.names, inputs=c(nodecov), dependence=FALSE)
}
InitErgmTerm.nodeifactor<-function (nw, arglist, ..., version=packageVersion("ergm")) {
if(version <= as.package_version("3.9.4")){
a <- check.ErgmTerm(nw, arglist, directed=TRUE,
varnames = c("attrname", "base", "levels"),
vartypes = c("character", "numeric", "character,numeric,logical"),
defaultvalues = list(NULL, 1, NULL),
required = c(TRUE, FALSE, FALSE),
dep.inform = list(FALSE, "levels", FALSE))
attrarg <- a$attrname
levels <- if(!is.null(a$levels)) I(a$levels) else NULL
}else{
a <- check.ErgmTerm(nw, arglist, directed=TRUE,
varnames = c("attr", "base", "levels"),
vartypes = c(ERGM_VATTR_SPEC, "numeric", ERGM_LEVELS_SPEC),
defaultvalues = list(NULL, 1, LEVELS_BASE1),
required = c(TRUE, FALSE, FALSE),
dep.inform = list(FALSE, "levels", FALSE))
attrarg <- a$attr
levels <- a$levels
}
nodecov <- ergm_get_vattr(attrarg, nw)
attrname <- attr(nodecov, "name")
u <- ergm_attr_levels(levels, nodecov, nw, levels = sort(unique(nodecov)))
if (attr(a,"missing")["levels"] && any(NVL(a$base,0)!=0)) {
u <- u[-a$base]
}
if (length(u)==0) {
return()
}
nodepos <- match(nodecov,u,nomatch=0)-1
inputs <- nodepos
list(name="nodeifactor",
coef.names = paste("nodeifactor", paste(attrname,collapse="."), u, sep="."),
inputs = inputs,
dependence = FALSE,
minval = 0
)
}
InitErgmTerm.nodematch<-InitErgmTerm.match<-function (nw, arglist, ..., version=packageVersion("ergm")) {
if(version <= as.package_version("3.9.4")){
a <- check.ErgmTerm(nw, arglist,
varnames = c("attrname", "diff", "keep", "levels"),
vartypes = c("character", "logical", "numeric", "character,numeric,logical"),
defaultvalues = list(NULL, FALSE, NULL, NULL),
required = c(TRUE, FALSE, FALSE, FALSE),
dep.inform = list(FALSE, FALSE, "levels", FALSE))
attrarg <- a$attrname
levels <- if(!is.null(a$levels)) I(a$levels) else NULL
}else{
a <- check.ErgmTerm(nw, arglist,
varnames = c("attr", "diff", "keep", "levels"),
vartypes = c(ERGM_VATTR_SPEC, "logical", "numeric", ERGM_LEVELS_SPEC),
defaultvalues = list(NULL, FALSE, NULL, NULL),
required = c(TRUE, FALSE, FALSE, FALSE),
dep.inform = list(FALSE, FALSE, "levels", FALSE))
attrarg <- a$attr
levels <- a$levels
}
nodecov <- ergm_get_vattr(attrarg, nw)
attrname <- attr(nodecov, "name")
u <- ergm_attr_levels(levels, nodecov, nw, levels = sort(unique(nodecov)))
if(attr(a,"missing")["levels"] && !is.null(a$keep)) u <- u[a$keep]
nodecov <- match(nodecov,u,nomatch=length(u)+1)
dontmatch <- nodecov==(length(u)+1)
nodecov[dontmatch] <- length(u) + (1:sum(dontmatch))
ui <- seq(along=u)
if (a$diff) {
coef.names <- paste("nodematch", paste(attrname,collapse="."), u, sep=".")
inputs <- c(ui, nodecov)
} else {
coef.names <- paste("nodematch", paste(attrname,collapse="."), sep=".")
inputs <- nodecov
}
list(name="nodematch",
coef.names = coef.names,
inputs = inputs,
dependence = FALSE,
minval = 0
)
}
InitErgmTerm.nodemix<-function (nw, arglist, ..., version=packageVersion("ergm")) {
if(version <= as.package_version("3.9.4")){
a <- check.ErgmTerm(nw, arglist,
varnames = c("attrname", "base", "b1levels", "b2levels"),
vartypes = c("character", "numeric", "character,numeric,logical", "character,numeric,logical"),
defaultvalues = list(NULL, NULL, NULL, NULL),
required = c(TRUE, FALSE, FALSE, FALSE),
dep.inform = list(FALSE, "levels2", FALSE, FALSE))
attrarg <- a$attrname
b1levels <- if(!is.null(a$b1levels)) I(a$b1levels) else NULL
b2levels <- if(!is.null(a$b2levels)) I(a$b2levels) else NULL
}else if(version <= as.package_version("3.11.0")){
a <- check.ErgmTerm(nw, arglist,
varnames = c("attr", "base", "b1levels", "b2levels", "levels", "levels2"),
vartypes = c(ERGM_VATTR_SPEC, "numeric", ERGM_LEVELS_SPEC, ERGM_LEVELS_SPEC, ERGM_LEVELS_SPEC, ERGM_LEVELS_SPEC),
defaultvalues = list(NULL, NULL, NULL, NULL, NULL, NULL),
required = c(TRUE, FALSE, FALSE, FALSE, FALSE, FALSE),
dep.inform = list(FALSE, "levels2", FALSE, FALSE, FALSE, FALSE))
attrarg <- a$attr
b1levels <- a$b1levels
b2levels <- a$b2levels
}else{
a <- check.ErgmTerm(nw, arglist,
varnames = c("attr", "base", "b1levels", "b2levels", "levels", "levels2"),
vartypes = c(ERGM_VATTR_SPEC, "numeric", ERGM_LEVELS_SPEC, ERGM_LEVELS_SPEC, ERGM_LEVELS_SPEC, ERGM_LEVELS_SPEC),
defaultvalues = list(NULL, NULL, NULL, NULL, NULL, -1),
required = c(TRUE, FALSE, FALSE, FALSE, FALSE, FALSE),
dep.inform = list(FALSE, "levels2", FALSE, FALSE, FALSE, FALSE))
attrarg <- a$attr
b1levels <- a$b1levels
b2levels <- a$b2levels
}
if (is.bipartite(nw) && is.directed(nw)) {
ergm_Init_abort("Directed bipartite networks are not currently possible")
}
nodecov <- ergm_get_vattr(attrarg, nw)
attrname <- attr(nodecov, "name")
if (is.bipartite(nw)) {
b1nodecov <- ergm_get_vattr(attrarg, nw, bip = "b1")
b2nodecov <- ergm_get_vattr(attrarg, nw, bip = "b2")
b1namescov <- ergm_attr_levels(b1levels, b1nodecov, nw, sort(unique(b1nodecov)))
b2namescov <- ergm_attr_levels(b2levels, b2nodecov, nw, sort(unique(b2nodecov)))
nr <- length(b1namescov)
nc <- length(b2namescov)
levels2.list <- transpose(expand.grid(row = b1namescov, col = b2namescov, stringsAsFactors=FALSE))
indices2.grid <- expand.grid(row = 1:nr, col = nr + 1:nc)
levels2.sel <- if((!hasName(attr(a,"missing"), "levels2") || attr(a,"missing")["levels2"]) && any(NVL(a$base,0)!=0)) levels2.list[-a$base]
else ergm_attr_levels(a$levels2, list(row = b1nodecov, col = b2nodecov), nw, levels2.list)
rows2keep <- match(levels2.sel,levels2.list, NA)
rows2keep <- rows2keep[!is.na(rows2keep)]
u <- indices2.grid[rows2keep,]
b1nodecov <- match(b1nodecov,b1namescov,nomatch=length(b1namescov)+1)
b2nodecov <- match(b2nodecov,b2namescov,nomatch=length(b2namescov)+1)
namescov <- c(b1namescov, b2namescov)
nodecov <- c(b1nodecov, b2nodecov)
cn <- paste("mix", paste(attrname,collapse="."), apply(matrix(namescov[as.matrix(u)],ncol=2),
1,paste,collapse="."), sep=".")
indmat <- matrix(0L, nrow = nr + 1, ncol = nc + 1)
u[,2L] <- u[,2L] - nr
indmat[as.matrix(u)] <- seq_len(NROW(u))
indmat <- indmat - 1L
} else {
u <- ergm_attr_levels(a$levels, nodecov, nw, sort(unique(nodecov)))
namescov <- u
nr <- length(u)
nc <- length(u)
levels2.list <- transpose(expand.grid(row = u, col = u, stringsAsFactors=FALSE))
indices2.grid <- expand.grid(row = 1:nr, col = 1:nc)
uun <- as.vector(outer(u,u,paste,sep="."))
if (!is.directed(nw)) {
rowleqcol <- indices2.grid$row <= indices2.grid$col
levels2.list <- levels2.list[rowleqcol]
indices2.grid <- indices2.grid[rowleqcol,]
uun <- uun[rowleqcol]
}
levels2.sel <- if((!hasName(attr(a,"missing"), "levels2") || attr(a,"missing")["levels2"]) && any(NVL(a$base,0)!=0)) levels2.list[-a$base]
else ergm_attr_levels(a$levels2, list(row = nodecov, col = nodecov), nw, levels2.list)
rows2keep <- match(levels2.sel,levels2.list, NA)
rows2keep <- rows2keep[!is.na(rows2keep)]
u <- indices2.grid[rows2keep,]
uun <- uun[rows2keep]
nodecov <- match(nodecov,namescov,nomatch=length(namescov)+1)
cn <- paste("mix", paste(attrname,collapse="."), uun, sep=".")
indmat <- matrix(0L, nrow = nr + 1, ncol = nc + 1)
indmat[as.matrix(u)] <- seq_len(NROW(u))
if(!is.directed(nw)) indmat <- indmat + t(indmat) - diag(diag(indmat))
indmat <- indmat - 1L
}
list(name = "nodemix", coef.names = cn,
dependence = FALSE,
minval = 0,
inputs = NULL,
nr = as.integer(nr + 1),
nc = as.integer(nc + 1),
indmat = as.integer(t(indmat)),
nodecov = as.integer(c(0L, nodecov) - 1L)
)
}
InitErgmTerm.nodeocov<-function (nw, arglist, ..., version=packageVersion("ergm")) {
if(version <= as.package_version("3.9.4")){
a <- check.ErgmTerm(nw, arglist, directed=TRUE,
varnames = c("attrname","transform","transformname"),
vartypes = c("character","function","character"),
defaultvalues = list(NULL,function(x)x,""),
required = c(TRUE,FALSE,FALSE))
attrname<-a$attrname
f<-a$transform
f.name<-a$transformname
coef.names <- paste(paste("nodeocov",f.name,sep=""),attrname,sep=".")
nodecov <- f(get.node.attr(nw, attrname, "nodeocov", numeric=TRUE))
}else{
a <- check.ErgmTerm(nw, arglist, directed=TRUE,
varnames = c("attr"),
vartypes = c(ERGM_VATTR_SPEC),
defaultvalues = list(NULL),
required = c(TRUE))
nodecov <- ergm_get_vattr(a$attr, nw, accept="numeric", multiple="matrix")
coef.names <- nodecov_names(nodecov, "nodeocov")
}
list(name="nodeocov", coef.names=coef.names, inputs=c(nodecov), dependence=FALSE)
}
InitErgmTerm.nodeofactor<-function (nw, arglist, ..., version=packageVersion("ergm")) {
if(version <= as.package_version("3.9.4")){
a <- check.ErgmTerm(nw, arglist, directed=TRUE,
varnames = c("attrname", "base", "levels"),
vartypes = c("character", "numeric", "character,numeric,logical"),
defaultvalues = list(NULL, 1, NULL),
required = c(TRUE, FALSE, FALSE),
dep.inform = list(FALSE, "levels", FALSE))
attrarg <- a$attrname
levels <- if(!is.null(a$levels)) I(a$levels) else NULL
}else{
a <- check.ErgmTerm(nw, arglist, directed=TRUE,
varnames = c("attr", "base", "levels"),
vartypes = c(ERGM_VATTR_SPEC, "numeric", ERGM_LEVELS_SPEC),
defaultvalues = list(NULL, 1, LEVELS_BASE1),
required = c(TRUE, FALSE, FALSE),
dep.inform = list(FALSE, "levels", FALSE))
attrarg <- a$attr
levels <- a$levels
}
nodecov <- ergm_get_vattr(attrarg, nw)
attrname <- attr(nodecov, "name")
u <- ergm_attr_levels(levels, nodecov, nw, levels = sort(unique(nodecov)))
if (attr(a,"missing")["levels"] && any(NVL(a$base,0)!=0)) {
u <- u[-a$base]
}
if (length(u)==0) {
return()
}
nodepos <- match(nodecov,u,nomatch=0)-1
inputs <- nodepos
list(name="nodeofactor",
coef.names = paste("nodeofactor", paste(attrname,collapse="."), u, sep="."),
inputs = inputs,
dependence = FALSE,
minval = 0
)
}
InitErgmTerm.nsp<-function(nw, arglist, cache.sp=TRUE, ...) {
a <- check.ErgmTerm(nw, arglist,
varnames = c("d"),
vartypes = c("numeric"),
defaultvalues = list(NULL),
required = c(TRUE))
d<-a$d
if (any(d==0)) {
emptynwstats <- rep(0, length(d))
if(is.bipartite(nw)){
nb1 <- get.network.attribute(nw, "bipartite")
nb2 <- network.size(nw) - nb1
emptynwstats[d==0] <- nb1*(nb1-1)/2 + nb2*(nb2-1)/2
}else{
emptynwstats[d==0] <- network.dyadcount(nw,FALSE)
}
}else{
emptynwstats <- NULL
}
ld<-length(d)
if(ld==0){return(NULL)}
coef.names <- paste("nsp",d,sep="")
if(is.directed(nw)){dname <- "tnsp"}else{dname <- "nsp"}
if (!is.null(emptynwstats)) {
list(name=dname, coef.names=coef.names, inputs=c(d),
emptynwstats=emptynwstats, minval=0, auxiliaries=if(cache.sp) .spcache.aux(if(is.directed(nw)) "OTP" else "UTP") else NULL)
} else {
list(name=dname, coef.names=coef.names, inputs=c(d), minval=0, auxiliaries=if(cache.sp) .spcache.aux(if(is.directed(nw)) "OTP" else "UTP") else NULL)
}
}
InitErgmTerm.odegrange<-function(nw, arglist, ..., version=packageVersion("ergm")) {
if(version <= as.package_version("3.9.4")){
a <- check.ErgmTerm(nw, arglist, directed=TRUE,
varnames = c("from", "to", "by", "homophily", "levels"),
vartypes = c("numeric", "numeric", "character", "logical", "character,numeric,logical"),
defaultvalues = list(NULL, Inf, NULL, FALSE, NULL),
required = c(TRUE, FALSE, FALSE, FALSE, FALSE))
levels <- if(!is.null(a$levels)) I(a$levels) else NULL
}else{
a <- check.ErgmTerm(nw, arglist, directed=TRUE,
varnames = c("from", "to", "by", "homophily", "levels"),
vartypes = c("numeric", "numeric", ERGM_VATTR_SPEC, "logical", ERGM_LEVELS_SPEC),
defaultvalues = list(NULL, Inf, NULL, FALSE, NULL),
required = c(TRUE, FALSE, FALSE, FALSE, FALSE))
levels <- a$levels
}
from<-a$from; to<-a$to; byarg <- a$by; homophily <- a$homophily
to <- ifelse(to==Inf, network.size(nw)+1, to)
if(length(to)==1 && length(from)>1) to <- rep(to, length(from))
else if(length(from)==1 && length(to)>1) from <- rep(from, length(to))
else if(length(from)!=length(to)) ergm_Init_abort("The arguments of term odegrange must have arguments either of the same length, or one of them must have length 1.")
else if(any(from>=to)) ergm_Init_abort("Term odegrange must have from<to.")
emptynwstats<-NULL
if(!is.null(byarg)) {
nodecov <- ergm_get_vattr(byarg, nw)
attrname <- attr(nodecov, "name")
u <- ergm_attr_levels(levels, nodecov, nw, levels = sort(unique(nodecov)))
nodecov <- match(nodecov,u,nomatch=length(u)+1)
}
if(!is.null(byarg) && !homophily) {
lu <- length(u)
du <- rbind(rep(from,lu), rep(to,lu), rep(1:lu, rep(length(from), lu)))
if (any(du[1,]==0)) {
emptynwstats <- rep(0, ncol(du))
tmp <- du[3,du[1,]==0]
for(i in 1:length(tmp)) tmp[i] <- sum(nodecov==tmp[i])
emptynwstats[du[1,]==0] <- tmp
}
} else {
if (any(from==0)) {
emptynwstats <- rep(0, length(from))
emptynwstats[from==0] <- network.size(nw)
}
}
if(is.null(byarg)) {
if(length(from)==0){return(NULL)}
coef.names <- ifelse(to>=network.size(nw)+1,
paste("odeg",from,"+",sep=""),
paste("odeg",from,"to",to,sep=""))
name <- "odegrange"
inputs <- c(rbind(from,to))
} else if (homophily) {
if(length(from)==0){return(NULL)}
coef.names <- ifelse(to>=network.size(nw)+1,
paste("odeg",from,"+", ".homophily.",attrname,sep=""),
paste("odeg",from,"to",to, ".homophily.",attrname,sep=""))
name <- "odegrange_w_homophily"
inputs <- c(rbind(from,to), nodecov)
} else {
if(ncol(du)==0) {return(NULL)}
coef.names <- ifelse(du[2,]>=network.size(nw)+1,
paste("odeg",du[1,],"+.", attrname, u[du[3,]],sep=""),
paste("odeg",du[1,],"to",du[2,],".",attrname, u[du[3,]],sep=""))
name <- "odegrange_by_attr"
inputs <- c(as.vector(du), nodecov)
}
if (!is.null(emptynwstats)){
list(name=name,coef.names=coef.names, inputs=inputs,
emptynwstats=emptynwstats, dependence=TRUE, minval = 0)
}else{
list(name=name,coef.names=coef.names, inputs=inputs, dependence=TRUE, minval = 0, maxval=network.size(nw), conflicts.constraints="odegreedist")
}
}
InitErgmTerm.odegree<-function(nw, arglist, ..., version=packageVersion("ergm")) {
if(version <= as.package_version("3.9.4")){
a <- check.ErgmTerm(nw, arglist, directed=TRUE,
varnames = c("d", "by", "homophily", "levels"),
vartypes = c("numeric", "character", "logical", "character,numeric,logical"),
defaultvalues = list(NULL, NULL, FALSE, NULL),
required = c(TRUE, FALSE, FALSE, FALSE))
levels <- if(!is.null(a$levels)) I(a$levels) else NULL
}else{
a <- check.ErgmTerm(nw, arglist, directed=TRUE,
varnames = c("d", "by", "homophily", "levels"),
vartypes = c("numeric", ERGM_VATTR_SPEC, "logical", ERGM_LEVELS_SPEC),
defaultvalues = list(NULL, NULL, FALSE, NULL),
required = c(TRUE, FALSE, FALSE, FALSE))
levels <- a$levels
}
d<-a$d; byarg <- a$by; homophily <- a$homophily
emptynwstats<-NULL
if(!is.null(byarg)) {
nodecov <- ergm_get_vattr(byarg, nw)
attrname <- attr(nodecov, "name")
u <- ergm_attr_levels(levels, nodecov, nw, levels = sort(unique(nodecov)))
nodecov <- match(nodecov,u,nomatch=length(u)+1)
}
if(!is.null(byarg) && !homophily) {
lu <- length(u)
du <- rbind(rep(d,lu), rep(1:lu, rep(length(d), lu)))
if (any(du[1,]==0)) {
emptynwstats <- rep(0, ncol(du))
tmp <- du[2,du[1,]==0]
for(i in 1:length(tmp)) tmp[i] <- sum(nodecov==tmp[i])
emptynwstats[du[1,]==0] <- tmp
}
} else {
if (any(d==0)) {
emptynwstats <- rep(0, length(d))
emptynwstats[d==0] <- network.size(nw)
}
}
if(is.null(byarg)) {
if(length(d)==0){return(NULL)}
name <- "odegree"
coef.names <- paste("odegree",d,sep="")
inputs <- c(d)
} else if (homophily) {
if(length(d)==0){return(NULL)}
name <- "odegree_w_homophily"
coef.names <- paste("odeg", d, ".homophily.",attrname, sep="")
inputs <- c(d, nodecov)
} else {
if(ncol(du)==0) {return(NULL)}
name <- "odegree_by_attr"
coef.names <- paste("odeg", du[1,], ".", attrname,u[du[2,]], sep="")
inputs <- c(as.vector(du), nodecov)
}
list(name = name, coef.names = coef.names, inputs = inputs, emptynwstats = emptynwstats, minval=0, maxval=network.size(nw), dependence=TRUE,
minval = 0, maxval=network.size(nw), conflicts.constraints="odegreedist")
}
InitErgmTerm.odegree1.5<-function (nw, arglist, ...) {
a <- check.ErgmTerm(nw, arglist, directed=TRUE,
varnames = NULL,
vartypes = NULL,
defaultvalues = list(),
required = NULL)
list(name="odegreepopularity", coef.names="odegree1.5",
minval=0, maxval=network.dyadcount(nw,FALSE)*sqrt(network.size(nw)-1), conflicts.constraints="odegreedist")
}
InitErgmTerm.odegreepopularity<-function (nw, arglist, ...) {
.Deprecated("odegree1.5")
a <- check.ErgmTerm(nw, arglist, directed=TRUE,
varnames = NULL,
vartypes = NULL,
defaultvalues = list(),
required = NULL)
list(name="odegreepopularity", coef.names="odegreepopularity",
minval=0, maxval=network.dyadcount(nw,FALSE)*sqrt(network.size(nw)-1), conflicts.constraints="odegreedist")
}
InitErgmTerm.opentriad<-function (nw, arglist, ...) {
a <- check.ErgmTerm(nw, arglist, directed=FALSE,
varnames = c(),
vartypes = c(),
defaultvalues = list(),
required = c())
list(name="opentriad", coef.names="opentriad", inputs=NULL)
}
InitErgmTerm.ostar<-function(nw, arglist, ..., version=packageVersion("ergm")) {
if(version <= as.package_version("3.9.4")){
a <- check.ErgmTerm(nw, arglist, directed=TRUE,
varnames = c("k", "attrname", "levels"),
vartypes = c("numeric", "character", "character,numeric,logical"),
defaultvalues = list(NULL, NULL, NULL),
required = c(TRUE, FALSE, FALSE))
attrarg <- a$attrname
levels <- if(!is.null(a$levels)) I(a$levels) else NULL
}else{
a <- check.ErgmTerm(nw, arglist, directed=TRUE,
varnames = c("k", "attr", "levels"),
vartypes = c("numeric", ERGM_VATTR_SPEC, ERGM_LEVELS_SPEC),
defaultvalues = list(NULL, NULL, NULL),
required = c(TRUE, FALSE, FALSE))
attrarg <- a$attr
levels <- a$levels
}
k<-a$k
if(!is.null(attrarg)) {
nodecov <- ergm_get_vattr(attrarg, nw)
attrname <- attr(nodecov, "name")
u <- ergm_attr_levels(levels, nodecov, nw, levels = sort(unique(nodecov)))
nodecov <- match(nodecov,u,nomatch=length(u)+1)
}
lk<-length(k)
if(lk==0){return(NULL)}
if(!is.null(attrarg)){
coef.names <- paste("ostar",k,".",attrname,sep="")
inputs <- c(k, nodecov)
attr(inputs, "ParamsBeforeCov") <- lk
}else{
coef.names <- paste("ostar",k,sep="")
inputs <- c(k)
}
list(name="ostar", coef.names=coef.names, inputs=inputs, minval=0, conflicts.constraints="odegreedist")
}
InitErgmTerm.receiver<-function(nw, arglist, ..., version=packageVersion("ergm")) {
if(version <= as.package_version("3.9.4")){
a <- check.ErgmTerm(nw, arglist, directed=TRUE,
varnames = c("base"),
vartypes = c("numeric"),
defaultvalues = list(1),
required = c(FALSE),
dep.inform = list("nodes"))
}else{
a <- check.ErgmTerm(nw, arglist, directed=TRUE,
varnames = c("base", "nodes"),
vartypes = c("numeric", ERGM_LEVELS_SPEC),
defaultvalues = list(1, LEVELS_BASE1),
required = c(FALSE, FALSE),
dep.inform = list("nodes", FALSE))
}
d <- ergm_attr_levels(a$nodes, 1:network.size(nw), nw, 1:network.size(nw))
if((!hasName(attr(a,"missing"), "nodes") || attr(a,"missing")["nodes"]) && any(NVL(a$base,0)!=0)) d <- d[-a$base]
ld<-length(d)
if(ld==0){return(NULL)}
list(name="receiver", coef.names=paste("receiver",d,sep=""),
inputs=c(d), emptynwstats=rep(0,length(d)), dependence=FALSE, minval=0, maxval=network.size(nw)-1, conflicts.constraints="idegrees")
}
InitErgmTerm.sender<-function(nw, arglist, ..., version=packageVersion("ergm")) {
if(version <= as.package_version("3.9.4")){
a <- check.ErgmTerm(nw, arglist, directed=TRUE,
varnames = c("base"),
vartypes = c("numeric"),
defaultvalues = list(1),
required = c(FALSE),
dep.inform = list("nodes"))
}else{
a <- check.ErgmTerm(nw, arglist, directed=TRUE,
varnames = c("base", "nodes"),
vartypes = c("numeric", ERGM_LEVELS_SPEC),
defaultvalues = list(1, LEVELS_BASE1),
required = c(FALSE, FALSE),
dep.inform = list("nodes", FALSE))
}
d <- ergm_attr_levels(a$nodes, 1:network.size(nw), nw, 1:network.size(nw))
if((!hasName(attr(a,"missing"), "nodes") || attr(a,"missing")["nodes"]) && any(NVL(a$base,0)!=0)) d <- d[-a$base]
ld<-length(d)
if(ld==0){return(NULL)}
list(name="sender", coef.names=paste("sender",d,sep=""),
inputs=c(d), emptynwstats=rep(0,length(d)), dependence=FALSE, minval=0, maxval=network.size(nw)-1, conflicts.constraints="odegrees")
}
InitErgmTerm.simmelian<-function (nw, arglist, ...) {
a <- check.ErgmTerm(nw, arglist, directed=TRUE,
varnames = NULL,
vartypes = NULL,
defaultvalues = list(),
required = NULL)
list(name="simmelian", coef.names="simmelian", minval=0, maxval=network.edgecount(nw)*network.size(nw)*0.5)
}
InitErgmTerm.simmelianties<-function (nw, arglist, ...) {
a <- check.ErgmTerm(nw, arglist, directed=TRUE,
varnames = NULL,
vartypes = NULL,
defaultvalues = list(),
required = NULL)
list(name="simmelianties", coef.names="simmelianties", minval=0, maxval=network.edgecount(nw))
}
InitErgmTerm.smalldiff<-function (nw, arglist, ..., version=packageVersion("ergm")) {
if(version <= as.package_version("3.9.4")){
a <- check.ErgmTerm(nw, arglist,
varnames = c("attrname", "cutoff"),
vartypes = c("character", "numeric"),
defaultvalues = list(NULL, NULL),
required = c(TRUE, TRUE))
attrarg <- a$attrname
}else{
a <- check.ErgmTerm(nw, arglist,
varnames = c("attr", "cutoff"),
vartypes = c(ERGM_VATTR_SPEC, "numeric"),
defaultvalues = list(NULL, NULL),
required = c(TRUE, TRUE))
attrarg <- a$attr
}
cutoff <- a$cutoff
if (length(cutoff)>1)
ergm_Init_abort("cutoff for smalldiff() must be a scalar.")
nodecov <- ergm_get_vattr(attrarg, nw, accept="numeric")
attrname <- attr(nodecov, "name")
coef.names <- paste("smalldiff.", attrname, cutoff, sep="")
inputs <- c(cutoff, nodecov)
attr(inputs, "ParamsBeforeCov") <- 1
list(name="smalldiff", coef.names=coef.names, inputs=inputs,
dependence=FALSE)
}
InitErgmTerm.sociality<-function(nw, arglist, ..., version=packageVersion("ergm")) {
if(version <= as.package_version("3.9.4")){
a <- check.ErgmTerm(nw, arglist, directed=FALSE,
varnames = c("attrname", "base", "levels"),
vartypes = c("character", "numeric", "character,numeric,logical"),
defaultvalues = list(NULL, 1, NULL),
required = c(FALSE, FALSE, FALSE),
dep.inform = list(FALSE, "nodes", FALSE),
dep.warn = list(TRUE, FALSE, TRUE))
attrarg <- a$attrname
levels <- if(!is.null(a$levels)) I(a$levels) else NULL
}else{
a <- check.ErgmTerm(nw, arglist, directed=FALSE,
varnames = c("attr", "base", "levels", "nodes"),
vartypes = c(ERGM_VATTR_SPEC, "numeric", ERGM_LEVELS_SPEC, ERGM_LEVELS_SPEC),
defaultvalues = list(NULL, 1, NULL, LEVELS_BASE1),
required = c(FALSE, FALSE, FALSE, FALSE),
dep.inform = list(FALSE, "nodes", FALSE, FALSE),
dep.warn = list(TRUE, FALSE, TRUE, FALSE))
attrarg <- a$attr
levels <- a$levels
}
d <- ergm_attr_levels(a$nodes, 1:network.size(nw), nw, 1:network.size(nw))
if((!hasName(attr(a,"missing"), "nodes") || attr(a,"missing")["nodes"]) && any(NVL(a$base,0)!=0)) d <- d[-a$base]
if(!is.null(attrarg)) {
nodecov <- ergm_get_vattr(attrarg, nw)
attrname <- attr(nodecov, "name")
u <- ergm_attr_levels(levels, nodecov, nw, levels = sort(unique(nodecov)))
nodecov <- match(nodecov,u,nomatch=length(u)+1)
ui <- seq(along=u)
}
ld<-length(d)
if(ld==0){return(NULL)}
if(!is.null(attrarg)){
coef.names <- paste("sociality",d,".",attrname,sep="")
inputs <- c(d, 0, nodecov)
}else{
coef.names <- paste("sociality",d,sep="")
inputs <- c(d,0)
}
list(name="sociality", coef.names=coef.names, inputs=inputs, minval=0, maxval=network.size(nw)-1, conflicts.constraints="degrees", dependence=FALSE)
}
InitErgmTerm.threepath <- function(nw, arglist, ..., version=packageVersion("ergm")) {
ergm_Init_warn("This term is inaccurately named and actually refers to a '3-trail' in that it counts repeated vertices: i-j-k-i is a 3-trail but not a 3-path. See ergm-terms help for more information. This name has been deprecated and will be removed in a future version: if a 3-trail is what you want, use the term 'threetrail'.")
if(version <= as.package_version("3.9.4")){
a <- check.ErgmTerm (nw, arglist,
varnames = c("keep"),
vartypes = c("numeric"),
defaultvalues = list(NULL),
required = c(FALSE),
dep.inform = list("levels"))
}else{
a <- check.ErgmTerm (nw, arglist,
varnames = c("keep", "levels"),
vartypes = c("numeric", ERGM_LEVELS_SPEC),
defaultvalues = list(NULL, NULL),
required = c(FALSE, FALSE),
dep.inform = list("levels", FALSE))
}
vals = c("RRR","RRL","LRR","LRL")
types <- ergm_attr_levels(a$levels, vals, nw, levels = vals)
if((!hasName(attr(a,"missing"), "levels") || attr(a,"missing")["levels"]) && !is.null(a$keep)) types <- types[a$keep]
indices = match(types, vals)
if (is.directed(nw)) {
return(list(name = "threetrail",
coef.names = paste("threetrail", types, sep="."),
inputs=indices, minval = 0))
}
else {
return(list(name = "threetrail", coef.names = "threetrail", minval = 0))
}
}
InitErgmTerm.threetrail <- function(nw, arglist, ..., version=packageVersion("ergm")) {
if(version <= as.package_version("3.9.4")){
a <- check.ErgmTerm (nw, arglist,
varnames = c("keep"),
vartypes = c("numeric"),
defaultvalues = list(NULL),
required = c(FALSE),
dep.inform = list("levels"))
}else{
a <- check.ErgmTerm (nw, arglist,
varnames = c("keep", "levels"),
vartypes = c("numeric", ERGM_LEVELS_SPEC),
defaultvalues = list(NULL, NULL),
required = c(FALSE, FALSE),
dep.inform = list("levels", FALSE))
}
vals = c("RRR","RRL","LRR","LRL")
types <- ergm_attr_levels(a$levels, vals, nw, levels = vals)
if((!hasName(attr(a,"missing"), "levels") || attr(a,"missing")["levels"]) && !is.null(a$keep)) types <- types[a$keep]
indices = match(types, vals)
if (is.directed(nw)) {
return(list(name = "threetrail",
coef.names = paste("threetrail", types, sep="."),
inputs=indices, minval = 0))
}
else {
return(list(name = "threetrail", coef.names = "threetrail", minval = 0))
}
}
InitErgmTerm.transitive<-function (nw, arglist, ...) {
a <- check.ErgmTerm(nw, arglist, directed=TRUE,
varnames = NULL,
vartypes = NULL,
defaultvalues = list(),
required = NULL)
list(name="transitive", coef.names="transitive", minval = 0)
}
InitErgmTerm.triadcensus<-function (nw, arglist, ..., version=packageVersion("ergm")) {
if(version <= as.package_version("3.9.4")){
a <- check.ErgmTerm(nw, arglist,
varnames = c("d"),
vartypes = c("numeric"),
defaultvalues = list(NULL),
required = c(FALSE))
d <- a$d
}else{
a <- check.ErgmTerm(nw, arglist,
varnames = c("levels"),
vartypes = c(ERGM_LEVELS_SPEC),
defaultvalues = list(NULL),
required = c(FALSE))
d <- a$levels
}
emptynwstats<-NULL
if(is.directed(nw)){
tcn <- c("003","012", "102", "021D", "021U", "021C", "111D",
"111U", "030T", "030C", "201", "120D", "120U", "120C", "210", "300")
}else{
tcn <- c("0", "1", "2", "3")
}
if(is.null(d)){
d <- 1:(length(tcn) - 1)
}
if(is.numeric(d)){
d <- d + 1
}
d <- ergm_attr_levels(d, tcn, nw, levels = tcn)
d <- match(d, tcn) - 1
if (any(d==0)) {
emptynwstats <- rep(0,length(d))
nwsize <- network.size(nw)
emptynwstats[d==0] <- nwsize * (nwsize-1) * (nwsize-2) / 6
}
d <- d + 1
lengthd<-length(d)
if(lengthd==0){return(NULL)}
coef.names <- paste("triadcensus",tcn,sep=".")[d]
if (!is.null(emptynwstats)){
list(name="triadcensus", coef.names=coef.names, inputs=c(d),
emptynwstats=emptynwstats, dependence=TRUE)
}else{
list(name="triadcensus", coef.names=coef.names, inputs=c(d),
dependence=TRUE, minval = 0)
}
}
InitErgmTerm.triangle<-InitErgmTerm.triangles<-function (nw, arglist, ..., version=packageVersion("ergm")) {
if(version <= as.package_version("3.9.4")){
a <- check.ErgmTerm(nw, arglist,
varnames = c("attrname", "diff", "levels"),
vartypes = c("character", "logical", "character,numeric,logical"),
defaultvalues = list(NULL, FALSE, NULL),
required = c(FALSE, FALSE, FALSE))
attrarg <- a$attrname
levels <- if(!is.null(a$levels)) I(a$levels) else NULL
}else{
a <- check.ErgmTerm(nw, arglist,
varnames = c("attr", "diff", "levels"),
vartypes = c(ERGM_VATTR_SPEC, "logical", ERGM_LEVELS_SPEC),
defaultvalues = list(NULL, FALSE, NULL),
required = c(FALSE, FALSE, FALSE))
attrarg <- a$attr
levels <- a$levels
}
diff <- a$diff
if(!is.null(attrarg)) {
nodecov <- ergm_get_vattr(attrarg, nw)
attrname <- attr(nodecov, "name")
u <- ergm_attr_levels(levels, nodecov, nw, levels = sort(unique(nodecov)))
nodecov <- match(nodecov,u,nomatch=length(u)+1)
ui <- seq(along=u)
if (!diff) {
coef.names <- paste("triangle",attrname,sep=".")
inputs <- c(nodecov)
} else {
coef.names <- paste("triangle",attrname, u, sep=".")
inputs <- c(ui, nodecov)
attr(inputs, "ParamsBeforeCov") <- length(ui)
}
}else{
coef.names <- "triangle"
inputs <- NULL
}
list(name="triangle", coef.names=coef.names, inputs=inputs, minval=0)
}
InitErgmTerm.tripercent<-function (nw, arglist, ..., version=packageVersion("ergm")) {
if(version <= as.package_version("3.9.4")){
a <- check.ErgmTerm(nw, arglist, directed=FALSE,
varnames = c("attrname", "diff", "levels"),
vartypes = c("character", "logical", "character,numeric,logical"),
defaultvalues = list(NULL, FALSE, NULL),
required = c(FALSE, FALSE, FALSE))
attrarg <- a$attrname
levels <- if(!is.null(a$levels)) I(a$levels) else NULL
}else{
a <- check.ErgmTerm(nw, arglist, directed=FALSE,
varnames = c("attr", "diff", "levels"),
vartypes = c(ERGM_VATTR_SPEC, "logical", ERGM_LEVELS_SPEC),
defaultvalues = list(NULL, FALSE, NULL),
required = c(FALSE, FALSE, FALSE))
attrarg <- a$attr
levels <- a$levels
}
diff <- a$diff
if(!is.null(attrarg)) {
nodecov <- ergm_get_vattr(attrarg, nw)
attrname <- attr(nodecov, "name")
u <- ergm_attr_levels(levels, nodecov, nw, levels = sort(unique(nodecov)))
nodecov <- match(nodecov,u,nomatch=length(u)+1)
ui <- seq(along=u)
if (!diff) {
coef.names <- paste("tripercent",attrname,sep=".")
inputs <- c(1, nodecov)
attr(inputs, "ParamsBeforeCov") <- 1
} else {
coef.names <- paste("tripercent",attrname, u, sep=".")
inputs <- c(ui, nodecov)
attr(inputs, "ParamsBeforeCov") <- length(ui)
}
}else{
coef.names <- "tripercent"
inputs <- NULL
}
list(name="tripercent", coef.names=coef.names, inputs=inputs, minval = 0)
}
InitErgmTerm.ttriple<-InitErgmTerm.ttriad<-function (nw, arglist, ..., version=packageVersion("ergm")) {
if(version <= as.package_version("3.9.4")){
a <- check.ErgmTerm(nw, arglist, directed=TRUE,
varnames = c("attrname", "diff", "levels"),
vartypes = c("character", "logical", "character,numeric,logical"),
defaultvalues = list(NULL, FALSE, NULL),
required = c(FALSE, FALSE, FALSE))
attrarg <- a$attrname
levels <- if(!is.null(a$levels)) I(a$levels) else NULL
}else{
a <- check.ErgmTerm(nw, arglist, directed=TRUE,
varnames = c("attr", "diff", "levels"),
vartypes = c(ERGM_VATTR_SPEC, "logical", ERGM_LEVELS_SPEC),
defaultvalues = list(NULL, FALSE, NULL),
required = c(FALSE, FALSE, FALSE))
attrarg <- a$attr
levels <- a$levels
}
diff <- a$diff
if(!is.null(attrarg)) {
nodecov <- ergm_get_vattr(attrarg, nw)
attrname <- attr(nodecov, "name")
u <- ergm_attr_levels(levels, nodecov, nw, levels = sort(unique(nodecov)))
nodecov <- match(nodecov,u,nomatch=length(u)+1)
ui <- seq(along=u)
if (!diff) {
coef.names <- paste("ttriple",attrname,sep=".")
inputs <- c(nodecov)
} else {
coef.names <- paste("ttriple",attrname, u, sep=".")
inputs <- c(ui, nodecov)
attr(inputs, "ParamsBeforeCov") <- length(ui)
}
}else{
coef.names <- "ttriple"
inputs <- NULL
}
list(name="ttriple", coef.names=coef.names, inputs=inputs, minval = 0)
}
InitErgmTerm.twopath<-function(nw, arglist, ...) {
a <- check.ErgmTerm(nw, arglist,
varnames = NULL,
vartypes = NULL,
defaultvalues = list(),
required = NULL)
if(is.directed(nw)){
list(name="m2star", coef.names="twopath", dependence=TRUE, minval=0)
}else{
k<-2
lk<-length(k)
if(lk==0){return(NULL)}
list(name="kstar", coef.names="twopath", inputs=c(k), minval=0)
}
} |
ebicScore <- function(target, dataset, test, wei = NULL, targetID = -1, ncores = 1, gam = NULL) {
dm <- dim(dataset)
cols <- dm[2]
rows <- dm[1]
bic <- numeric(cols)
if (targetID != -1 ) {
target <- dataset[, targetID]
dataset[, targetID] <- rbinom(rows, 1, 0.5)
}
id <- NULL
id <- Rfast::check_data(dataset)
if ( sum(id > 0) ) dataset[, id] <- rnorm(rows * length(id) )
if ( is.null(gam) ) {
con <- 2 - log(cols) / log(rows)
} else con <- 2 * gam
if ( (con) < 0 ) con <- 0
if ( is.null(gam) ) {
con <- 2 - log(cols) / log(rows)
if ( (con) < 0 ) con <- 0
} else con <- 2 * gam
for (i in 1:cols) bic[i] <- test(target, dataset, xIndex = i, csIndex = 0, wei = wei)
if ( gam != 0 ) {
bic <- bic + con * log(cols)
} else bic <- bic
if ( targetID != - 1 ) bic[targetID] <- Inf
if ( sum(id>0) > 0 ) bic[id] <- Inf
list(ebic = bic)
} |
test_that("trajectory's method chaining works", {
t0 <- trajectory() %>%
seize("one", 1) %>%
release("one", 1) %>%
timeout(function() 1) %>%
branch(function() 1, TRUE, trajectory() %>% timeout(function() 1)) %>%
rollback(1) %>%
seize("one", 1)
expect_is(t0, "trajectory")
})
test_that("simmer's method chaining works", {
t0 <- trajectory() %>%
timeout(function() 1)
env <- simmer(verbose = TRUE) %>%
add_resource("server") %>%
add_generator("customer", t0, function() 1) %>%
stepn() %>%
run(10) %>%
reset()
expect_is(env, "simmer")
}) |
expect_recorded_error <- function(filename, status_code) {
rds_file <- test_path("fixtures", fs::path_ext_set(filename, "rds"))
resp <- readRDS(rds_file)
expect_error(response_process(resp), class = "gargle_error_request_failed")
expect_error(response_process(resp), class = glue("http_error_{status_code}"))
expect_snapshot_error(response_process(resp))
}
test_that("Resource exhausted (Sheets, ReadGroup)", {
expect_recorded_error(
"sheets-spreadsheets-get-quota-exceeded-readgroup_429",
429
)
})
test_that("Request for non-existent resource (Drive)", {
expect_recorded_error(
"drive-files-get-nonexistent-file-id_404",
404
)
})
test_that("Request for which we don't have scope (Fitness)", {
expect_recorded_error(
"fitness-get-wrong-scope_403",
403
)
})
test_that("Use key that's not enabled for the API (Sheets)", {
expect_recorded_error(
"sheets-spreadsheets-get-api-key-not-enabled_403",
403
)
})
test_that("Request with invalid argument (Sheets, bad range)", {
expect_recorded_error(
"sheets-spreadsheets-get-nonexistent-range_400",
400
)
})
test_that("Request with bad field mask (Sheets)", {
expect_recorded_error(
"sheets-spreadsheets-get-bad-field-mask_400",
400
)
})
test_that("Request for nonexistent resource (Sheets)", {
expect_recorded_error(
"sheets-spreadsheets-get-nonexistent-sheet-id_404",
404
)
})
test_that("Request with invalid value (tokeninfo, stale token)", {
expect_recorded_error(
"tokeninfo-stale_400",
400
)
})
test_that("Request to bad URL (tokeninfo, HTML content)", {
expect_recorded_error(
"tokeninfo-bad-path_404",
404
)
})
test_that("RPC codes can be looked up (or not)", {
expect_match(
rpc_description("ALREADY_EXISTS"),
"resource .* already exists"
)
expect_null(rpc_description("MATCHES_NOTHING"))
}) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.