code
stringlengths 1
13.8M
|
---|
fitGMM <- function(exprVals,RDparameters,rejectNull=0.05){
Y <- exprVals
Y[is.na(Y)] <- 0
simulateData <- function(expVal,RDparameters,length){
epsilons <- rnorm(length,mean=0,sd=RDparameters$sd_epsilon)
epsilons <- (epsilons-mean(epsilons))/sd(epsilons)
etas <- rnorm(length,mean=0,sd=RDparameters$sd_eta)
etas <- (epsilons-mean(epsilons))/sd(etas)
y <- 2^expVal
log((y-(RDparameters$alpha+epsilons))/exp(etas),base=2)
}
constantData <- simulateData(median(Y),RDparameters,length=length(Y))
constantData <- constantData[!is.na(constantData)]
if(length(constantData)<=2) pval <- 1
if(length(constantData)>2){
if(length(constantData)<length(Y)) constantData <- sample(constantData,length(Y),replace=TRUE)
npoints <- min(length(unique(Y)),length(unique(constantData)))
kstest <- ks.test(sample(unique(Y),npoints),sample(unique(constantData),npoints))
pval <- kstest$p.value
rm(constantData)
}
if(pval>rejectNull){
print("not significant differential expression")
mm <- list(modelName="null",parameters=NULL,G=0)
}
else{
mm <- Mclust(Y,modelNames=c("V"),G=1:5)
}
rm(Y)
gc()
mm
}
|
pbino=function(nplus)
{
prob=c(rep(0,max(nplus,1)-1),1/(max(nplus,1):400+1))
prob/sum(prob)
}
|
clean.retrieval <- function(x, gunzip = TRUE) {
if (any(!file.exists(x)))
stop("Some of the meta.retrieval() output files seem not to exist. Please provide valid file paths to meta.retrieval() output files.", call. = FALSE)
if (gunzip)
message("Cleaning file names and unzipping files ...")
if (!gunzip)
message("Cleaning file names ...")
folder_files <- list.files(dirname(x)[1])
if (length(folder_files) == 0)
stop("Unfortunately, your specified folder '", x, "' does not include any files.", call. = FALSE)
file_ext <- "[.]*a$"
if (any(stringr::str_detect(folder_files, "[.]faa.gz$"))) {
seq_type <- "ncbi_protein"
file_ext <- "[.]faa$"
}
if (any(stringr::str_detect(folder_files, "[.]fna.gz$"))) {
seq_type <- "ncbi_nucleotide"
file_ext <- "[.]fna$"
}
if (any(stringr::str_detect(folder_files, "[.]gff.gz$"))) {
seq_type <- "ncbi_gff"
file_ext <- "[.]gff$"
}
if (any(stringr::str_detect(folder_files, "[.]out.gz$"))) {
seq_type <- "ncbi_rm"
file_ext <- "[.]out$"
}
if (any(stringr::str_detect(folder_files, "[.]gff3.gz$"))) {
seq_type <- "ensembl_gff3"
file_ext <- "[.]gff3$"
}
if (any(stringr::str_detect(folder_files, "[.]gtf.gz$"))) {
seq_type <- "ensembl_gtf"
file_ext <- "[.]gtf$"
}
if (any(stringr::str_detect(folder_files, "[.]fa.gz$"))) {
seq_type <- "ensembl_fasta"
file_ext <- "[.]fa$"
}
find_doc <- which(stringr::str_detect(folder_files, "doc_"))
find_md5 <- which(stringr::str_detect(folder_files, "md5checksum"))
find_documentaion <- which(stringr::str_detect(folder_files, "documentation"))
find_unzipped_files <- which(stringr::str_detect(folder_files, file_ext))
folder_files_reduced <- NULL
if (length(c(find_doc, find_md5, find_documentaion, find_unzipped_files)) > 0) {
folder_files_reduced <- folder_files[-c(find_doc, find_md5, find_documentaion, find_unzipped_files)]
}
if (length(folder_files_reduced) == 0) {
message("It seems that nothing needs to be done. All files are unzipped.")
return(file.path(x, folder_files[-c(find_doc, find_md5, find_documentaion)]))
} else {
input_files <- folder_files_reduced
}
input_files_without_appendix <- unlist(lapply(input_files, function(x) return(unlist(stringr::str_split(x, "[.]"))[1])))
file_ext <- stringr::str_replace(file_ext, "\\$", "")
file_ext <- stringr::str_replace(file_ext, "\\[.]", "")
if (gunzip)
output_files <- paste0(tidy_name(input_files_without_appendix), ".", file_ext)
if (!gunzip)
output_files <- paste0(tidy_name(input_files_without_appendix),".",file_ext,".gz")
if (!all(file.exists(file.path(dirname(x)[1], input_files))))
stop("Something went wrong during the cleaning process. Some input files seem not to exist.", call. = FALSE)
if (gunzip) {
for (i in seq_len(length(input_files))) {
if (file.exists(file.path(dirname(x)[1], input_files[i]))) {
message("Unzipping file ", input_files[i],"' ...")
R.utils::gunzip(file.path(dirname(x)[1], input_files[i]), destname = file.path(dirname(x)[1], output_files[i]))
}
}
}
message("Finished formatting.")
return(file.path(dirname(x)[1], output_files))
}
|
context("cs_dispersion")
test_that("Forward wrong input data object", {
expect_error(cs_dispersion(1))
expect_error(cs_dispersion("abs"))
expect_error(cs_dispersion(list(a=1, b=2)))
expect_error(cs_dispersion(NULL))
expect_error(cs_dispersion(NA))
expect_error(cs_dispersion(Inf))
expect_error(cs_dispersion(-Inf))
})
test_that("Tests for correct function parameterization", {
set.seed(5)
obs <- rnorm(100)
preds <- matrix(rnorm(1000, 1), 100, 10)
train_o<-obs[1:80]
train_p<-preds[1:80,]
data<-foreccomb(train_o, train_p)
expect_error(cs_dispersion(data, measure = "SD"), NA)
expect_error(cs_dispersion(data, measure = "IQR"), NA)
expect_error(cs_dispersion(data, measure = "Range"), NA)
expect_error(cs_dispersion(data, measure = "bb"))
expect_error(cs_dispersion(data, measure = NULL))
})
test_that( "Check for correct class type and accuracy, when Forecast_Test is provided but not Actual_Test", {
set.seed(5)
obs <- rnorm(100)
preds <- matrix(rnorm(1000, 1), 100, 10)
train_o<-obs[1:80]
train_p<-preds[1:80,]
test_p<-preds[81:100,]
data<-foreccomb(train_o, train_p, newpreds = test_p)
result<-cs_dispersion(data, measure = "SD")
expect_length(result, 2)
expect_equal(as.vector(result$CS_Dispersion[1:6]),
c(1.064505, 0.793982, 1.042518, 0.724194, 0.690324, 0.590318),
tolerance = 1e-5,
check.attributes = FALSE)
})
test_that( "Check for correct class type and accuracy, when test set is used", {
set.seed(5)
obs <- rnorm(100)
preds <- matrix(rnorm(1000, 1), 100, 10)
train_o<-obs[1:80]
train_p<-preds[1:80,]
test_o<-obs[81:100]
test_p<-preds[81:100,]
data<-foreccomb(train_o, train_p, test_o, test_p)
result<-cs_dispersion(data, measure = "SD")
expect_length(result, 2)
expect_equal(as.vector(result$CS_Dispersion[1:6]),
c(1.064505, 0.793982, 1.042518, 0.724194, 0.690324, 0.590318),
tolerance = 1e-5,
check.attributes = FALSE)
})
|
X <- nnTensor::toyModel("NMF")
out1 <- myICA(X, k=10)
expect_equivalent(dim(out1), c(100, 10))
|
ml_glm2 <- function(formula1,
formula2 = ~1,
data,
family,
mean.link,
scale.link,
offset = 0,
start = NULL,
verbose = FALSE) {
mf <- model.frame(formula1, data)
y <- model.response(mf, "numeric")
class(y) <- c(family, mean.link, scale.link, "expFamily")
X1 <- model.matrix(formula1, data = data)
X2 <- model.matrix(formula2, data = data)
colnames(X2) <- paste(colnames(X2), "_s", sep="")
p <- ncol(X1)
X <- cbind(X1, X2)
if (any(is.na(cbind(y, X)))) stop("Some data are missing!")
if (is.null(start)) {
start <- c(kickStart(y, X1, offset),
1,
rep(0, ncol(X) - p - 1))
names(start) <- c(colnames(X1), colnames(X2))
}
fit <- maximize(start, Sjll2, X, y, offset, p = p)
if (verbose | fit$convergence > 0) print(fit)
beta.hat <- fit$par
se.beta.hat <- sqrt(diag(solve(-fit$hessian)))
residuals <- devianceResiduals2(y, beta.hat, X, p, offset)
presiduals <- pearsonResiduals2(y, beta.hat, X, p, offset)
fit.null <- maximize(c(mean(y), 1),
Sjll2,
X[,c(1,p+1)], y, offset, p = 1)
null.deviance <-
sum(devianceResiduals2(y,
c(fit.null$par[1], fit$par[p+1]),
X[,c(1,p+1)],
1,
offset)^2)
null.pearson <-
sum(pearsonResiduals2(y,
c(fit.null$par[1], fit$par[p+1]),
X[,c(1,p+1)],
1,
offset)^2)
lin.pred <- as.matrix(X)[,1:p] %*% beta.hat[1:p] + offset
y.hat <- unlink(y, lin.pred)
results <- list(fit = fit,
loglike = fit$val,
X = X,
y = y,
p = p,
rank = p,
call = match.call(),
obs = length(y),
fitted.values = y.hat,
linear.pedictor = lin.pred,
df.null = length(y) - 2,
df.residual = length(y) - length(beta.hat),
pearson = sum(presiduals^2),
null.pearson = null.pearson,
dispersion = sum(presiduals^2)/(length(y) - length(beta.hat)),
deviance = sum(residuals^2),
null.deviance = null.deviance,
residuals = residuals,
presiduals = presiduals,
coefficients = beta.hat,
se.beta.hat = se.beta.hat,
aic = - 2 * fit$val + 2 * length(beta.hat),
offset = offset,
i = fit$counts[1])
class(results) <- c("msme","glm")
return(results)
}
|
eblupMFH2 <- function (formula, vardir, MAXITER = 100, PRECISION = 1e-04, data){
r = length(formula)
if(!missing(data)){
formula.matrix = lapply(formula, function(x){model.frame(x,na.action = na.omit,data)})
y.matrix = unlist(lapply(formula, function(x){model.frame(x,na.action = na.omit,data)}[[1]]))
x.matrix = Reduce(adiag,lapply(formula, function(x){model.matrix(x,data)}))
n = length(y.matrix)/r
if(!all(vardir %in% names(data)))
stop("Object vardir is not appropiate with data")
if(length(vardir) != sum(1:r))
stop("Length of vardir is not appropiate with data")
if (any(is.na(data[,vardir])))
stop("Object vardir contains NA values.")
vardir = data[,vardir]
R = df2matR(vardir, r)
} else {
formula.matrix = lapply(formula, function(x){model.frame(x,na.action = na.omit)})
y.matrix = unlist(lapply(formula, function(x){model.frame(x, na.action = na.omit)}[[1]]))
x.matrix = Reduce(adiag,lapply(formula, function(x){model.matrix(x)}))
n = length(y.matrix)/r
if(dim(vardir)[2]!= sum(1:r)){
stop("Object vardir is not appropiate with data")
}
if (any(is.na(vardir)))
stop("Object vardir contains NA values.")
R = df2matR(vardir, r)
}
for(i in 1:r){
if (attr(attributes(formula.matrix[[i]])$terms,"response")==1)
textformula = paste(formula[[i]][2],formula[[i]][1],formula[[i]][3])
else
textformula = paste(formula[[i]][1],formula[[i]][2])
if (length(na.action(formula.matrix[[i]]))>0){
stop("Argument formula= ",textformula," contains NA values.")
}
}
y.var = sapply(formula, "[[", 2)
I=diag(n)
Id = diag(r)
d.Omega = list()
sigmau = c(mean(sapply(vardir[,1:r], median)), 1 - 1e-04)
kit = 0
diff = rep(PRECISION + 1,2)
convergence = TRUE
while (any(diff > rep(PRECISION,2)) & (kit < MAXITER)) {
kit = kit + 1
sigmau1=sigmau
Omega_AR = matrix(0, r,r)
if(r == 1){
G = sigmau1[1]/(1-sigmau1[2]^2)
} else {
for(i in 1:r){
for(j in 1:r){
Omega_AR[i,j] = sigmau1[2]^(abs(i-j))/(1-sigmau1[2]^2)
}
}
G = sigmau1[1] * Omega_AR
}
GI=kronecker(G,I)
Omega = solve(GI + R)
Xto = t(Omega %*% x.matrix)
Q = solve(Xto %*% x.matrix)
P = Omega - t(Xto) %*% Q %*% Xto
d.Omega[[1]] = kronecker(Omega_AR, I)
d.Omega[[2]] = matrix(NA, r,r)
for(i in 1:r){
for(j in 1:r){
k = abs(i-j)
d.Omega[[2]][i,j] = sigmau1[1]*(k*sigmau1[2]^(k-1) + (2-k)*(sigmau1[2]^(k+1)))/(1-sigmau1[2]^2)^2
}
}
d.Omega[[2]] = kronecker(d.Omega[[2]], I)
Py = P %*% y.matrix
s = sapply(d.Omega, function(x) (-0.5) * sum(diag(P%*%x)) + 0.5 * (t(Py)%*%x %*% Py))
iF = matrix(unlist(lapply(d.Omega, function(x) lapply(d.Omega, function(y) 0.5 * sum(diag(P%*%x%*%P%*%y))))),2)
sigmau = sigmau1 + solve(iF)%*%s
if(abs(sigmau[2]) > 1){
sigmau[2] = sigmau1[2]
}
diff = abs((sigmau - sigmau1)/sigmau1)
}
sigmau[1] = max(sigmau[1], 0)
if (kit >= MAXITER && diff >= PRECISION) {
convergence = FALSE }
if(r == 1){
G = sigmau[1]/(1-sigmau[2]^2)
} else {
for(i in 1:r){
for(j in 1:r){
Omega_AR[i,j] = sigmau[2]^(abs(i-j))/(1-sigmau[2]^2)
}
}
G = sigmau[1] * Omega_AR
}
GI=kronecker(G,I)
Omega = solve(GI + R)
Xto = t(Omega %*% x.matrix)
Qh = solve(Xto %*% x.matrix)
P = Omega - t(Xto) %*% Qh %*% Xto
Py = P %*% y.matrix
d.Omega[[1]] = kronecker(Omega_AR, I)
d.Omega[[2]] = matrix(NA, r,r)
for(i in 1:r){
for(j in 1:r){
k = abs(i-j)
d.Omega[[2]][i,j] = sigmau[1]*(k*sigmau[2]^(k-1) + (2-k)*(sigmau[2]^(k+1)))/((1-sigmau[2]^2)^2)
}
}
d.Omega[[2]] = kronecker(d.Omega[[2]], I)
beta=Qh%*%Xto%*%y.matrix
res=y.matrix-x.matrix%*%beta
eblup=x.matrix%*%beta+GI%*%Omega%*%res
eblup.df = data.frame(matrix(eblup, n, r))
names(eblup.df) = y.var
se.b=sqrt(diag(Qh))
t.val=beta/se.b
pv = 2 * pnorm(abs(t.val), lower.tail = FALSE)
coef = cbind(beta, se.b, t.val, pv)
colnames(coef) = c("beta", "std.error", "t.statistics", "p.value")
FI=solve(iF)
d=kronecker(Id,I)-GI%*%Omega
gg1=diag(GI%*%Omega%*%R)
gg2=diag(d%*%x.matrix%*%Qh%*%t(x.matrix)%*%t(d))
dg = lapply(d.Omega, function(x) x %*% Omega - GI %*% Omega %*% x %*% Omega)
g3 = list()
for (i in 1:2){
for (j in 1:2){
g3[[(i-1)*2+j]] = FI[i,j]*(dg[[i]]%*%(GI+R) %*% t(dg[[j]]))
}
}
gg3 = diag(Reduce('+', g3))
mse = gg1 + gg2 + 2*gg3
mse.df = data.frame(matrix(0,n,r))
names(mse.df) = y.var
for(i in 1:r)
mse.df[,i] = mse[((i-1)*n+1):(i*n)]
u.cap = GI %*% Omega %*% res
u.cap.df = as.data.frame(matrix(u.cap, n, r))
names(u.cap.df) = y.var
T.test = (sigmau[2,]) / (sqrt(FI[2,2]))
p.val = 2 * pnorm(abs(T.test), lower.tail = FALSE)
rho.df = data.frame(signif(data.frame(sigmau[2], T.test, p.val), digits = 5))
names(rho.df) = c("rho","T.test", "p-value")
result = list(eblup = NA,
MSE = NA,
randomEffect=NA,
Rmatrix=NA,
fit = list(method = NA,
convergence = NA ,
iterations = NA,
estcoef= NA,
refvar=NA,
rho=NA,
informationFisher=NA
))
result$eblup = signif(eblup.df, digits = 5)
result$MSE = signif(mse.df, digits = 5)
result$randomEffect = signif(u.cap.df, digits = 5)
result$Rmatrix = signif(R, digits = 5)
result$fit$method = "REML"
result$fit$convergence = convergence
result$fit$iterations = kit
result$fit$estcoef = coef
result$fit$refvar = signif(sigmau[1,], digits = 5)
result$fit$rho = rho.df
result$fit$informationFisher = signif(iF, digits = 5)
return(result)
}
|
import_local_nut <- function(path, station_code, collMethd = c(1, 2), trace = FALSE){
if(file.exists(paste0(path, '.zip'))){
path <- paste0(path, '.zip')
}
if(!file.exists(path)){
stop('Path does not exist')
}
if(!grepl('wq|met|nut', station_code))
stop('station_code must include wq, met, or nut')
if(!grepl('nut', station_code)){
stop('This function for nutrient stations only')
}
zips <- grepl('\\.zip$', path)
station_code <- tolower(gsub('\\.csv$', '', station_code))
if(zips){
file_nms <- unzip(path, list = TRUE)$Name
expr <- paste0(station_code, '.*', '\\.csv$')
files_in <- grep(expr, file_nms, value = TRUE, ignore.case = TRUE)
if(length(files_in) == 0) stop('File(s) not found.')
tmp_fl <- tempfile()
unzip(path, files = files_in, exdir = tmp_fl)
files_in <- dir(tmp_fl, recursive = TRUE)
path <- tmp_fl
} else {
file_nms <- dir(path)
expr <- paste0('^', station_code, '.*', '\\.csv$')
}
files_in <- grep(expr, file_nms, value = TRUE, ignore.case = TRUE)
if(length(files_in) == 0) stop('File(s) not found.')
station_code <- tolower(station_code)
dat <- vector('list', length(files_in))
names(dat) <- gsub('.csv', '', files_in)
if(trace) cat('Loading files...\n\n')
for(file_in in files_in){
tmp <- try({
read.csv(file.path(path, file_in), stringsAsFactors = FALSE)
}, silent = TRUE)
if('try-error' %in% class(tmp)){
raw <- readLines(file.path(path, file_in))
keep_lines <- grep(paste0('^', station_code), raw)
tmp <- raw[keep_lines]
tmp <- strsplit(tmp, ',')
tmp <- do.call('rbind', tmp)
tmp <- data.frame(tmp, stringsAsFactors = FALSE)
names(tmp) <- strsplit(
gsub('["\\"]', '', raw[keep_lines[1] - 1]),
',')[[1]]
}
names(tmp) <- tolower(names(tmp))
tmp <- tmp[, !names(tmp) %in% c('stationcode', 'isswmp')]
names(tmp)[grep('datetimestamp', names(tmp), ignore.case = TRUE)] <- 'datetimestamp'
tmp$datetimestamp <- time_vec(tmp$datetimestamp, station_code)
nm <- gsub('.csv', '', file_in)
dat[[nm]] <- tmp
}
if(zips) unlink(tmp_fl, recursive = TRUE)
parm <- substring(station_code, 6)
parm <- gsub('[0-9.*]', '', parm)
nms <- param_names(parm)[[parm]]
out <- do.call('rbind', dat)
if(any(c('nut', 'wq') %in% parm) & any(duplicated(out$datetimestamp))){
out <- out[!duplicated(out$datetimestamp),]
}
out <- out[!is.na(out$datetimestamp), ]
if(parm == 'nut'){
if(length(unique(out$collmethd)) == 2){
out <- out[out$collmethd %in% collMethd, ]
}else{
warning('This station does not have diel sampling data. All data will be retained.', call. = FALSE)
out <- out
}
}
out <- data.frame(
datetimestamp = out$datetimestamp,
out[, names(out) %in% nms],
row.names = seq(1, nrow(out))
)
names(out) <- tolower(names(out))
station_code <- gsub('[0-9]*$', '', station_code)
out <- swmpr(out, station_code)
if(trace) cat('\n\nData imported...')
return(out)
}
|
IFS.available.codes <- DataStructureMethod('IFS')
names(IFS.available.codes)
IFS.available.codes[[1]]
CodeSearch(IFS.available.codes, 'CLL', 'GDP')
CodeSearch(IFS.available.codes, 'CL_INDICATOR_IFS', 'GDP')
CodeSearch(IFS.available.codes, 'CL_INDICATOR_IFS', 'GDPABCDE')
|
predIntNparSimultaneousTestPower <-
function (n, n.median = 1, k = 1, m = 2, r = 1, rule = "k.of.m",
lpl.rank = ifelse(pi.type == "upper", 0, 1), n.plus.one.minus.upl.rank = ifelse(pi.type ==
"lower", 0, 1), delta.over.sigma = 0, pi.type = "upper",
r.shifted = r, method = "approx", NMC = 100, ci = FALSE,
ci.conf.level = 0.95, integrate.args.list = NULL, evNormOrdStats.method = "royston")
{
rule <- match.arg(rule, c("k.of.m", "CA", "Modified.CA"),
several.ok = TRUE)
pi.type <- match.arg(pi.type, c("upper", "lower"))
method <- match.arg(method, c("approx", "simulate"))
if (!is.vector(n, mode = "numeric") || !all(is.finite(n)) ||
any(n < 2))
stop(paste("'n' must be a numeric vector", "with all elements greater than or equal to 2",
"and no Missing (NA), Infinite (-Inf, Inf),", "or Undefined (Nan) values."))
if (!is.vector(n.median, mode = "numeric") || !all(is.finite(n.median)) ||
any(n.median < 1) || !all(n.median == trunc(n.median)) ||
!all(is.odd(n.median)))
stop("'n.median' must be a numeric vector of positive odd integers")
if (!is.vector(k, mode = "numeric") || !all(is.finite(k)) ||
any(k < 1))
stop(paste("'k' must be a numeric vector", "with all elements greater than or equal to 1",
"and no Missing (NA), Infinite (-Inf, Inf),", "or Undefined (Nan) values."))
if (!is.vector(m, mode = "numeric") || !all(is.finite(m)) ||
any(m < 1))
stop(paste("'m' must be a numeric vector", "with all elements greater than or equal to 1",
"and no Missing (NA), Infinite (Inf, -Inf),", "or Undefined (Nan) values."))
if (!is.vector(r, mode = "numeric") || !all(is.finite(r)) ||
any(r < 1))
stop(paste("'r' must be a numeric vector", "with all elements greater than or equal to 1",
"and no Missing (NA), Infinite (-Inf, Inf),", "or Undefined (Nan) values."))
if (pi.type == "upper")
lpl.rank <- 0
else n.plus.one.minus.upl.rank <- 0
if (!is.vector(lpl.rank, mode = "numeric") || !all(is.finite(lpl.rank)) ||
any(lpl.rank != trunc(lpl.rank)) || any(lpl.rank < 0 |
lpl.rank >= n))
stop(paste("'lpl.rank' must be a vector of non-negative",
"integers less than the corresponding value of 'n'"))
if (pi.type == "lower" & any(lpl.rank < 1))
stop("When pi.type='lower', all values of 'lpl.rank' must be positive integers")
if (!is.vector(n.plus.one.minus.upl.rank, mode = "numeric") ||
!all(is.finite(n.plus.one.minus.upl.rank)) || any(n.plus.one.minus.upl.rank !=
trunc(n.plus.one.minus.upl.rank)) || any(n.plus.one.minus.upl.rank <
0 | n.plus.one.minus.upl.rank >= n))
stop(paste("'n.plus.one.minus.upl.rank' must be a vector of non-negative",
"integers less than the corresponding value of 'n'"))
if (pi.type == "upper" & any(n.plus.one.minus.upl.rank <
1))
stop("When pi.type='upper' all values of 'n.plus.one.minus.upl.rank' must be positive integers")
if (!is.vector(delta.over.sigma, mode = "numeric") || any(is.na(delta.over.sigma)))
stop(paste("'delta.over.sigma' must be a numeric vector",
"with no Missing (NA) or Undefined (Nan) values."))
if (!is.vector(r.shifted, mode = "numeric") || !all(is.finite(r.shifted)) ||
!all(r.shifted == trunc(r.shifted)) || any(r.shifted <
1) || any(r.shifted > r))
stop(paste("'r.shifted' must be a numeric vector of positive integers",
"with all values less than or equal to", "the corresponding values of 'r'"))
arg.mat <- cbind.no.warn(n = as.vector(n), n.median = as.vector(n.median),
k = as.vector(k), m = as.vector(m), r = as.vector(r),
lpl.rank = as.vector(lpl.rank), n.plus.one.minus.upl.rank = as.vector(n.plus.one.minus.upl.rank),
delta.over.sigma = as.vector(delta.over.sigma), r.shifted = as.vector(r.shifted))
nrow.arg.mat <- nrow(arg.mat)
length.rule <- length(rule)
if (length.rule > nrow.arg.mat)
arg.mat <- arg.mat[rep(1:nrow.arg.mat, length.out = length.rule),
]
else rule <- rep(rule, length.out = nrow.arg.mat)
for (i in c("n", "n.median", "k", "m", "r", "lpl.rank", "n.plus.one.minus.upl.rank",
"delta.over.sigma", "r.shifted")) assign(i, arg.mat[,
i])
index <- rule == "k.of.m"
if (any(index)) {
if (any(k[index] > m[index]))
stop(paste("For cases where rule='k.of.m',", "all elements of 'k' must be less than or equal to",
"the corresponding elements of 'm'"))
}
index <- rule == "Modified.CA"
m[index] <- 4
N <- length(n)
conf.level <- numeric(N)
power <- numeric(N)
for (i in 1:N) {
conf.level[i] <- predIntNparSimultaneousConfLevel(n = n[i],
n.median = n.median[i], k = k[i], m = m[i], r = r[i],
rule = rule[i], lpl.rank = lpl.rank[i], n.plus.one.minus.upl.rank = n.plus.one.minus.upl.rank[i],
pi.type = pi.type, integrate.args.list = integrate.args.list)
}
if (pi.type == "upper")
pl.rank <- n + 1 - n.plus.one.minus.upl.rank
else pl.rank <- lpl.rank
if (method == "approx") {
K <- numeric(N)
for (i in 1:N) K[i] <- evNormOrdStatsScalar(r = pl.rank[i],
n = n[i], method = evNormOrdStats.method)
if (pi.type == "lower")
K <- -K
for (i in 1:N) {
power[i] <- predIntNormSimultaneousTestPowerScalar(n = n[i],
n.mean = n.median[i], K = K[i], k = k[i], m = m[i],
r = r.shifted[i], rule = rule[i], delta.over.sigma = delta.over.sigma[i],
pi.type = pi.type, conf.level = conf.level[i],
integrate.args.list = integrate.args.list)
}
}
else {
for (i in 1:N) {
n.i <- n[i]
n.median.i <- n.median[i]
k.i <- k[i]
m.i <- m[i]
r.i <- r[i]
pl.rank.i <- pl.rank[i]
r.shifted.i <- r.shifted[i]
delta.over.sigma.i <- delta.over.sigma[i]
mean.i <- c(rep(delta.over.sigma.i, r.shifted.i),
rep(0, r.i - r.shifted.i))
out.vec <- logical(NMC)
if (pi.type == "upper") {
test.fcn <- switch(rule[i], k.of.m = function(z,
PL, k) sum(z <= PL) < k, CA = function(z, PL,
k) (z[1] > PL) & (sum(z > PL) >= 2), Modified.CA = function(z,
PL, k) (z[1] > PL) & (sum(z > PL) >= 3))
}
else {
test.fcn <- switch(rule[i], k.of.m = function(z,
PL, k) sum(z >= PL) < k, CA = function(z, PL,
k) (z[1] < PL) & (sum(z < PL) >= 2), Modified.CA = function(z,
PL, k) (z[1] < PL) & (sum(z < PL) >= 3))
}
for (j in 1:NMC) {
x <- rnorm(n.i)
PL <- sort(x)[pl.rank.i]
new.x <- array(rnorm(n.median.i * m.i * r.i,
mean = mean.i), dim = c(r.i, n.median.i, m.i))
new.x <- apply(new.x, c(1, 3), median)
out.vec[j] <- any(apply(new.x, 1, test.fcn, PL = PL,
k = k.i))
}
power[i] <- mean(out.vec)
}
if (ci) {
SE.power <- sqrt(power * (1 - power)/NMC)
LCL <- power - qnorm(ci.conf.level) * SE.power
UCL <- power + qnorm(ci.conf.level) * SE.power
attr(power, "conf.int") <- rbind(LCL = LCL, UCL = UCL)
}
}
power
}
|
mincontri<-
function(mix,loc=NULL)
{
if(is.null(loc))
{
tmp <- [email protected]
tmp2 <- max(sapply(tmp,length))/2
return(ceiling(tmp2))
}
else{
tmp <- [email protected][loc]
tmp2 <- max(sapply(tmp,length))/2
return(ceiling(tmp2))
}
}
|
library(civis)
context("civis_ml_plot")
model_list <- readRDS("data/civis_ml_models.rds")
plotable_mods <- model_list[1:(length(model_list) - 4)]
err_mod <- tail(model_list, n = 4)
is_classif <- sapply(plotable_mods, function(m) is(m, "civis_ml_classifier"))
test_that("decile plot for classification is produced", {
ps <- lapply(plotable_mods[is_classif], function(m) plot(m))
expect_true(all(sapply(ps, is, "ggplot")))
plot_has_facets <- function(p) !is.null(p$facet)
expect_true(all(sapply(ps, plot_has_facets)))
plot_has_bars <- function(p) is((p[["layers"]][[1]][["geom"]]), "GeomBar")
expect_true(all(sapply(ps, plot_has_bars)))
plot_has_x_decile <- function(p) {
all(p$data[["decile"]] %in% 1:10)
}
expect_true(all(sapply(ps, plot_has_x_decile)))
})
ps <- lapply(plotable_mods[!is_classif], function(m) plot(m))
test_that("models with no metrics throw errors for hist and plot", {
msg <- "Plotting data not available."
for (m in err_mod) {
e <- tryCatch(plot(m), error = function(e) e)
expect_equal(e$message, msg)
}
msg <- "Histogram data not available."
for (m in err_mod) {
e <- tryCatch(hist(m), error = function(e) e)
expect_equal(e$message, msg)
}
})
test_that("y_yhat plot for reg is produced", {
expect_true(all(sapply(ps, is, "ggplot")))
plot_has_bins <- function(p) is(p[["layers"]][[1]][["geom"]], "GeomTile")
expect_true(all(sapply(ps, plot_has_bins)))
plot_vals_correct <- function(p, m) {
all.equal(c(m$metrics$metrics$y_yhat_plot$values),
p$data[["values"]])
}
ms <- plotable_mods[!is_classif]
expect_true(all(mapply(plot_vals_correct, ps, ms), fill = T))
})
test_that("hist is produced", {
hs <- lapply(plotable_mods, function(m) hist(m))
expect_true(all(sapply(hs, is, "ggplot")))
plot_has_bars <- function(p) is(p[["layers"]][[1]][["geom"]], "GeomBar")
expect_true(all(sapply(hs, plot_has_bars)))
})
|
statsExpressions::`%>%`
statsExpressions::`%<>%`
statsExpressions::`%$%`
statsExpressions::`%<-%`
statsExpressions::tibble
statsExpressions::enframe
statsExpressions::as_tibble
rlang::exec
rlang::`!!`
rlang::`!!!`
rlang::`%|%`
rlang::`%||%`
rlang::`:=`
|
context("test enrich_dimension_export")
test_that("enrich_dimension_export works", {
tb <-
enrich_dimension_export(st_mrs_age_test,
name = "when_common",
attributes = c("week", "year"))
expect_equal(
tb,
structure(
list(
week = c("01", "02", "Unknown", "03", "04", "05"),
year = c("1962", "1962", "Unknown", "1962", "1962", "1962")
),
row.names = c(NA,-6L),
name = "when_common",
type = "role_playing",
class = c("tbl_df",
"tbl", "data.frame")
)
)
})
|
walk_ast <- function(expr, walk_call) {
counter <- counter_init()
walk_expr(expr, counter, walk_call)
sort(counter_get_names(counter))
}
walk_expr <- function(expr, counter, walk_call) {
if (!length(expr)) {
return()
} else if (is.call(expr)) {
walk_call(expr, counter)
lapply(expr, walk_expr, counter = counter, walk_call = walk_call)
} else if (typeof(expr) == "closure") {
walk_expr(formals(expr), counter, walk_call)
walk_expr(body(expr), counter, walk_call)
} else if (is.pairlist(expr) || is.recursive(expr)) {
lapply(expr, walk_expr, counter = counter, walk_call = walk_call)
}
}
|
PEMlegend <-
function() {
par(mar=c(1, 1, 1, 1))
plot.new()
legend("center", c("Behavior","PEM "), col = c("red","gray"), lwd = 1,ncol=2,bty ="n")
}
|
simple_model <- function(data, Y_value, Fixed_Factor, ...){
Y <- substitute(Y_value)
d <- substitute(data)
ifelse(length(Fixed_Factor) == 1,
Facs <- paste0(Fixed_Factor, collapse = ""),
Facs <- paste0(Fixed_Factor, collapse = "*"))
fo <- as.formula(paste(Y, Facs, sep = "~"))
call1 <- paste0("lm(formula = ",
deparse1(fo),
", data = ",
deparse1(d),
", ...)")
mod <- eval(parse(text = call1))
mod
}
|
tar_test("graph$produce_upstream()", {
edges <- pipeline_upstream_edges(pipeline_order(), targets_only = TRUE)
graph <- graph_init(remove_loops(edges))
expect_equal(graph$produce_upstream("data1"), character(0))
expect_equal(graph$produce_upstream("data2"), character(0))
expect_equal(graph$produce_upstream("min1"), "data1")
expect_equal(graph$produce_upstream("min2"), "data2")
expect_equal(graph$produce_upstream("max1"), "data1")
expect_equal(graph$produce_upstream("max2"), "data2")
expect_equal(sort(graph$produce_upstream("mins")), sort(c("min1", "min2")))
expect_equal(sort(graph$produce_upstream("maxes")), sort(c("max1", "max2")))
expect_equal(sort(graph$produce_upstream("all")), sort(c("mins", "maxes")))
})
tar_test("graph$produce_downstream()", {
edges <- pipeline_upstream_edges(pipeline_order(), targets_only = TRUE)
graph <- graph_init(remove_loops(edges))
expect_equal(
sort(graph$produce_downstream("data1")),
sort(c("min1", "max1"))
)
expect_equal(
sort(graph$produce_downstream("data2")),
sort(c("min2", "max2"))
)
expect_equal(graph$produce_downstream("min1"), "mins")
expect_equal(graph$produce_downstream("min2"), "mins")
expect_equal(graph$produce_downstream("max1"), "maxes")
expect_equal(graph$produce_downstream("max2"), "maxes")
expect_equal(graph$produce_downstream("mins"), "all")
expect_equal(graph$produce_downstream("maxes"), "all")
expect_equal(graph$produce_downstream("all"), character(0))
})
tar_test("graph$produce_degrees(mode = \"in\")", {
edges <- pipeline_upstream_edges(pipeline_order(), targets_only = TRUE)
graph <- graph_init(remove_loops(edges))
expect_equal(graph$produce_degrees("data1", mode = "upstream"), 0L)
expect_equal(graph$produce_degrees("data2", mode = "upstream"), 0L)
expect_equal(graph$produce_degrees("min1", mode = "upstream"), 1L)
expect_equal(graph$produce_degrees("min2", mode = "upstream"), 1L)
expect_equal(graph$produce_degrees("max1", mode = "upstream"), 1L)
expect_equal(graph$produce_degrees("max2", mode = "upstream"), 1L)
expect_equal(graph$produce_degrees("mins", mode = "upstream"), 2L)
expect_equal(graph$produce_degrees("maxes", mode = "upstream"), 2L)
expect_equal(graph$produce_degrees("all", mode = "upstream"), 2L)
expect_equal(
graph$produce_degrees(c("all", "data1"), mode = "upstream"),
c(2L, 0L)
)
})
tar_test("graph$produce_degrees(mode = \"out\")", {
edges <- pipeline_upstream_edges(pipeline_order(), targets_only = TRUE)
graph <- graph_init(remove_loops(edges))
expect_equal(graph$produce_degrees("data1", mode = "out"), 2L)
expect_equal(graph$produce_degrees("data2", mode = "out"), 2L)
expect_equal(graph$produce_degrees("min1", mode = "out"), 1L)
expect_equal(graph$produce_degrees("min2", mode = "out"), 1L)
expect_equal(graph$produce_degrees("max1", mode = "out"), 1L)
expect_equal(graph$produce_degrees("max2", mode = "out"), 1L)
expect_equal(graph$produce_degrees("mins", mode = "out"), 1L)
expect_equal(graph$produce_degrees("maxes", mode = "out"), 1L)
expect_equal(graph$produce_degrees("all", mode = "out"), 0L)
expect_equal(
graph$produce_degrees(c("all", "data1"), mode = "out"),
c(0L, 2L)
)
})
tar_test("graph$insert_edges() upstream checks", {
edges <- pipeline_upstream_edges(pipeline_order(), targets_only = TRUE)
graph <- graph_init(remove_loops(edges))
new_edgelist <- data_frame(
from = c("abc", "xyz", "min1", "other1"),
to = c("data1", "data2", "123", "other2")
)
graph$insert_edges(new_edgelist)
upstream <- graph$upstream
expect_null(upstream[["abc"]])
expect_null(upstream[["xyz"]])
expect_null(upstream[["other1"]])
expect_equal(upstream[["123"]], "min1")
expect_equal(upstream[["other2"]], "other1")
expect_equal(upstream[["data1"]], "abc")
expect_equal(upstream[["data2"]], "xyz")
expect_equal(upstream[["min1"]], "data1")
expect_equal(upstream[["min2"]], "data2")
expect_equal(upstream[["max1"]], "data1")
expect_equal(upstream[["max2"]], "data2")
expect_equal(sort(upstream[["mins"]]), sort(c("min1", "min2")))
expect_equal(sort(upstream[["maxes"]]), sort(c("max1", "max2")))
expect_equal(sort(upstream[["all"]]), sort(c("mins", "maxes")))
})
tar_test("graph$insert_edges() downstream checks", {
edges <- pipeline_upstream_edges(pipeline_order(), targets_only = TRUE)
graph <- graph_init(remove_loops(edges))
new_edgelist <- data_frame(
from = c("abc", "xyz", "min1", "other1"),
to = c("data1", "data2", "123", "other2")
)
graph$insert_edges(new_edgelist)
downstream <- graph$downstream
expect_equal(downstream[["other1"]], "other2")
expect_null(downstream[["other2"]])
expect_null(downstream[["123"]])
expect_equal(downstream[["abc"]], "data1")
expect_equal(downstream[["xyz"]], "data2")
expect_equal(sort(downstream[["min1"]]), sort(c("mins", "123")))
expect_equal(sort(downstream[["data1"]]), sort(c("min1", "max1")))
expect_equal(sort(downstream[["data2"]]), sort(c("min2", "max2")))
expect_equal(sort(downstream[["min1"]]), sort(c("123", "mins")))
expect_equal(downstream[["min2"]], "mins")
expect_equal(downstream[["max1"]], "maxes")
expect_equal(downstream[["max2"]], "maxes")
expect_equal(downstream[["mins"]], "all")
expect_equal(downstream[["maxes"]], "all")
expect_null(downstream[["all"]])
})
tar_test("graph$replace_upstream()", {
graph <- graph_init()
new_edgelist <- data_frame(
from = c("abc", "xyz", "123"),
to = c("data1", "data1", "data2")
)
graph$insert_edges(new_edgelist)
expect_equal(sort(graph$upstream[["data1"]]), sort(c("abc", "xyz")))
graph$replace_upstream("data1", "xyz", "789")
expect_equal(sort(graph$upstream[["data1"]]), sort(c("abc", "789")))
})
tar_test("graph$validate()", {
edges <- pipeline_upstream_edges(pipeline_order(), targets_only = TRUE)
graph <- graph_init(remove_loops(edges))
expect_silent(graph$validate())
})
|
get_rdf <- function(file){
if (!requireNamespace("xslt", quietly = TRUE)) {
stop("xslt package required to convert to rdf",
call. = FALSE)
}
xml_xslt <- getExportedValue("xslt", "xml_xslt")
if(is(file, "nexml")){
who <- tempfile()
nexml_write(x=file, file=who)
file <- who
}
to_rdf <- system.file("examples", "RDFa2RDFXML.xsl", package="RNeXML")
rdf <-
rdf <- xml_xslt(xml2::read_xml(file), xml2::read_xml(to_rdf))
rdf
}
|
phenorm_longit_simpl <- function(df,
var_surrogate,
surrogates_quali,
id_rnd,
rf = FALSE,
ntree = 100,
bool_weight = FALSE,
p.noise = .3,
bool_SAFE = TRUE,
size = 10^5){
df.unid <- df %>% select(-id_rnd)
colmin <- apply(df.unid, 2, min)
fct_min <- function(x) min(x) > 0
df_transf <- df.unid %>%
mutate_if(fct_min, .funs = function(x) log(1+x))
if(bool_SAFE){
lasso_sur <- safe_selection(df = df_transf,
var_surrogate = var_surrogate,
surrogate_quali = surrogates_quali,
bool_weight = bool_weight)
var.prediction <- lasso_sur$important_var
} else {
var.prediction <- colnames(df_transf)
}
df_select <- df_transf %>% select(var.prediction)
x_matrix <- as.data.frame(df_select)
rf_model <- NULL
if(rf){
x_rf <- as.data.frame(x_matrix)
rf_model <- randomForest::randomForest(x = x_rf[surrogates_quali != 3,],
y = as.factor(surrogates_quali[surrogates_quali != 3]),
ntree = ntree)
sur_quali_rf <- predict(rf_model, x_rf)
surrogates_quali <- as.numeric(as.character(sur_quali_rf))
}
phenorm <- phenorm_longit_fit(x_matrix = x_matrix,
y_sur = surrogates_quali,
p.noise = p.noise,
size = size,
ID = df[,id_rnd])
return(list(model = phenorm,
var.prediction = var.prediction,
rf_model = rf_model
))
}
|
get_table_readable <- function(table, endpoint) {
get_fields(table, endpoint)
}
|
"butterfly"
|
library(hamcrest)
expected <- c(-0x1.02e2ac13ef8eep+2 + 0x1.782ebc592b0f2p+1i, 0x1.ebcc370a6681bp+1 + -0x1.a1fb0a2a2fd82p+1i,
-0x1.7500f32b8cbeap+1 + 0x1.27377121757e4p+2i, 0x1.ebcc370a66804p+1 + -0x1.a1fb0a2a2fd7p+1i,
-0x1.02e2ac13ef8efp+2 + 0x1.782ebc592b104p+1i)
assertThat(stats:::fft(inverse=TRUE,z=c(-0.663984427460662+0.791932836256974i, -1.22572926077441-0.35639067155105i,
-2.94672604778838-1.48843108985784i, 1.50907360675607+2.93620816008711i,
-0.71771884260736+1.05560702652717i))
, identicalTo( expected, tol = 1e-6 ) )
|
fmla_lhs <-
function(f, grp=NULL, lvl=NULL, rhs.lpad=FALSE) {
fl = as.list(f)
l=length(fl)
rl = list()
i=0
colon.op = FALSE
Rn.op = FALSE
lvl.found = FALSE
if (l == 1) return(f)
for (tm in fl) {
i=i+1
if (i==1) {
rl[[i]] = tm
if (tm == ":") colon.op = TRUE
if (tm == "Rn") Rn.op = TRUE
if (tm == "|") rl[[i]] = as.symbol("+")
}
if (i > 1) {
if (typeof(tm) == "language")
{rl[[i]] = fmla_lhs(tm)
}
else
if (tm == lvl) lvl.found = TRUE
rl[[i]] = tm
}
}
if (colon.op == TRUE) return(rl[[3]])
if (Rn.op == TRUE) return(rl[[2]])
if (rhs.lpad == TRUE && lvl.found == TRUE) return(rl[[2]])
return(as.call(rl))
}
fmla_rhs <-
function(f,
span,
Rnl=list(Rn.o=list(), Rn.n=NULL, rn.i=0),
byvars=list(byvars1=NULL, byvars2=NULL, byvars.i=0)
)
{
fl = as.list(f)
l=length(fl)
rl = list()
i=0;
colon.op = FALSE
Rn.op = FALSE
cond.op = FALSE
period.op = FALSE
I.op = FALSE
for (tm in fl)
{
i=i+1
if (i==1)
{
rl[[i]] = tm
if (tm == ".") {period.op = TRUE;}
if (tm == ":") {colon.op = TRUE}
if (tm == "Rn")
{Rn.op = TRUE;
Rnl$rn.i<-Rnl$rn.i+1
Rnl$Rn.o[[Rnl$rn.i]] <- fl[[2]]
Rnl$Rn.n <- c(Rnl$Rn.n, fl[[3]])
}
if (tm == "|")
{
cond.op = TRUE
rl[[i]] = as.symbol("+");
byvars$byvars.i=byvars$byvars.i+1;
if (byvars$byvars.i==1) {byvars$byvars1 <- all.vars(fl[[3]])}
if (byvars$byvars.i==2)
{
byvars$byvars2 <- byvars$byvars1
byvars$byvars1 <- all.vars(fl[[3]])
if (byvars$byvars1==".") {byvars$byvars1<-NULL}
}
}
if (sum(colon.op, Rn.op, cond.op, period.op)==0) {I.op<-TRUE}
}
if (i > 1)
{
if (typeof(tm) == "language")
{
rhs.obj <- fmla_rhs(tm, span=span, Rnl=Rnl, byvars=byvars)
rl <- c(rl, rhs.obj$rl)
Rnl = rhs.obj$Rnl
byvars = rhs.obj$byvars
}
else rl[[i]] = tm
}
}
if (!span) {if (colon.op) {return(list(rl=rl[[3]], Rnl=Rnl, byvars=byvars))}}
if (Rn.op) {return(list(rl=rl[[2]], Rnl=Rnl, byvars=byvars))}
if (cond.op) {return(list(rl=rl[[2]], Rnl=Rnl, byvars=byvars))}
if (I.op) {return(list(rl=as.call(rl), Rnl=Rnl, byvars=byvars))}
if (period.op)
{
return(list(rl=as.symbol(rl[[1]]), Rnl=Rnl, byvars=byvars))
}
return(list(rl=as.call(rl), Rnl=Rnl, byvars=byvars))
}
fmla_inter <-
function(f, data=NULL, regx=NA) {
level <- NULL
group <- NULL
byvars <- NULL
rhs.lpad <- FALSE
grp <- NULL
lvl <- NULL
lvl.v <- NULL
byvars1.v <- NULL
byvars2.v <- NULL
fl = as.list(f)
rl = list()
l = length(fl)
rhs <- if(l > 2) fl[[3]] else fl[[2]]
lhs <- if(l > 2) fl[[2]] else NULL
if (length(all.vars(lhs)) == 1 )
{
rhs.lpad = FALSE
lvl = all.vars(lhs)[1]
}
if (length(all.vars(lhs)) > 1 )
{
rhs.lpad = TRUE
grp = all.vars(lhs)[1]
lvl = all.vars(lhs)[2]
}
rhs.obj <- fmla_rhs(rhs, span=FALSE)
rhs.obj.span <- fmla_rhs(rhs, span=TRUE)
rhs = rhs.obj$rl
rhs.span = rhs.obj.span$rl
Rn.o <- as.character(unlist(rhs.obj$Rnl$Rn.o))
Rn.n <- rhs.obj$Rnl$Rn.n
if (!is.null(lhs))
{
lhs = fmla_lhs(lhs, grp, lvl, rhs.lpad)
fmla = as.call(list(as.symbol("~"), lhs, rhs))
fmla.span = as.call(list(as.symbol("~"), lhs, rhs.span))
}
else
{
fmla = as.call(list(as.symbol("~"), rhs))
fmla.span = as.call(list(as.symbol("~"), rhs.span))
}
fmla.span <- as.formula(fmla.span)
fmla <- as.formula(fmla)
trms <- terms(fmla.span, data=data, keep.order=T)
t.trmlbls <- attr(trms, "term.labels")
if (!is.null(lvl))
{if (lvl=="."){lvl<- NULL}
else {lvl.v <- data[,lvl]}
}
if (!is.null(rhs.obj$byvars$byvars1)) {byvars1.v <- data[,rhs.obj$byvars$byvars1]}
if (!is.null(rhs.obj$byvars$byvars2)) {byvars2.v <- data[,rhs.obj$byvars$byvars2]}
if (sum(!is.null(rhs.obj$byvars$byvars1), !is.null(rhs.obj$byvars$byvars2), !is.null(lvl)))
{
byvartrms <- which(t.trmlbls %in% c(rhs.obj$byvars$byvars1, rhs.obj$byvars$byvars2, lvl))
if (length(byvartrms)>0) {t.trmlbls <- t.trmlbls[-byvartrms]}
}
colnames.obj <- colnames.struct(t.trmlbls, FALSE)
if (length(Rn.o) > 0)
{for (rn.i in 1:length(Rn.o)) {colnames.obj$cname[colnames.obj$cname == Rn.o[rn.i]] <- Rn.n[rn.i]}}
colnames.obj <- colnames.linebreak(colnames.obj)
colnames.obj <- colnames.row(colnames.obj)
if (!is.na(regx)) {colnames.obj$cname <- kill.multiregx(colnames.obj$cname, regx=regx)}
trms.cn <- attr(terms(fmla, data=data), "term.labels")
trms.cn <- kill.multiregx(trms.cn, "`")
data=model.frame(fmla, data)
cn.dx <- which(colnames(data) %in% c(grp, trms.cn))
data <-data[,cn.dx]
if (!is.data.frame(data))
{data <- as.data.frame(data)}
if(!is.null(lvl)) {data[,lvl] <- lvl.v}
if(!is.null(byvars1.v)) {data[,rhs.obj$byvars$byvars1] <- byvars1.v}
if(!is.null(byvars2.v)) {data[,rhs.obj$byvars$byvars2] <- byvars2.v}
return(list(tbl=data, group=grp, label=lvl, byvars1=rhs.obj$byvars$byvars1, byvars2=rhs.obj$byvars$byvars2, fmla=fmla, colnames.obj=colnames.obj))
}
|
portfolioBacktesting <-
function(formula, data, spec = portfolioSpec(), constraints = "LongOnly",
backtest = portfolioBacktest(), trace = TRUE)
{
if (class(data) == "fPFOLIODATA") {
Data <- data
data <- getSeries(data)
} else if (class(data) == "timeSeries") {
Data <- portfolioData(data, spec)
}
if (class(constraints) == "fPFOLIOSPEC") {
Constraints <- constraints
constraints <- Constraints@stringConstraints
} else if (class(constraints) == "character") {
Constraints <- portfolioConstraints(data, spec, constraints)
}
benchmarkName = as.character(formula)[2]
assetsNames <- strsplit(gsub(" ", "", as.character(formula)[3]), "\\+")[[1]]
nAssets <- length(assetsNames)
if(trace) {
cat("\nPortfolio Backtesting:\n")
cat("\nAssets: ", assetsNames)
cat("\nBenchmark: ", benchmarkName)
cat("\nStart Series: ", as.character(start(data)))
cat("\nEnd Series: ", as.character(end(data)))
cat("\n Type: ", getType(spec))
cat("\n Cov Estimator: ", getEstimator(spec))
cat("\n Solver: ", getSolver(spec))
cat("\nPortfolio Windows: ", getWindowsFun(backtest))
cat("\n Horizon: ", getWindowsHorizon(backtest))
cat("\nPortfolio Strategy: ", getStrategyFun(backtest))
cat("\nPortfolio Smoother: ", getSmootherFun(backtest))
cat("\n doubleSmoothing: ", getSmootherDoubleSmoothing(backtest))
cat("\n Lambda: ", getSmootherLambda(backtest))
}
if(trace) {
cat("\n\nPortfolio Optimization:")
cat("\nOptimization Period\tTarget\tBenchmark\t Weights\n")
}
windowsFun <- match.fun(getWindowsFun(backtest))
rollingWindows <- windowsFun(data, backtest)
from <- rollingWindows$from
to <- rollingWindows$to
strategyFun <- match.fun(getStrategyFun(backtest))
strategyList <- list()
Sigma <- NULL
for (i in 1:length(from))
{
pfSeries <- window(data[, assetsNames], start = from[i], end = to[i])
bmSeries <- window(data[, benchmarkName], start = from[i], end = to[i])
pfSeries <- portfolioData(pfSeries, spec)
Sigma <- c(Sigma, mean(diag(getSigma(pfSeries))))
strategy <- strategyFun(
data = getSeries(pfSeries),
spec = spec,
constraints = constraints,
backtest = backtest)
strategyList[[i]] <- strategy
if (trace) {
cat(as.character(from[i]), as.character(to[i]))
spReturn <- getTargetReturn(strategy@portfolio)[[2]]
cat("\t", round(spReturn[1], digits = 3))
bmReturn <- mean(series(bmSeries))
cat("\t", round(bmReturn, digits = 3))
nAssets <- length(assetsNames)
weights <- round(getWeights(strategy), digits = 3)
cat("\t")
for (i in 1:length(assetsNames)) cat("\t", weights[i])
cat("\t * ", round(sum(weights), 2))
cat("\n")
}
}
weights <- NULL
for (i in 1:length(strategyList))
weights <- rbind(weights, getWeights(strategyList[[i]]))
rownames(weights) <- as.character(to)
colnames(weights) <- assetsNames
ans <- list(
formula = formula,
data = data,
spec = spec,
constraints = constraints,
backtest = backtest,
benchmarkName = benchmarkName,
assetsNames = assetsNames,
weights = weights,
strategyList = strategyList,
Sigma = Sigma)
class(ans) <- c("portfolioBacktesting", "list")
invisible(ans)
}
portfolioSmoothing <-
function(object, backtest=NULL, trace=TRUE)
{
if (!is.null(backtest)) {
warning("The backtest argument is obsolete and will be
removed for the next release.")
}
formula <- object$formula
data <- object$data
spec <- object$spec
constraints <- object$constraints
backtest <- object$backtest
benchmarkName <- object$benchmarkName
assetsNames <- object$assetsNames
weights <- object$weights
skip <- getSmootherSkip(backtest)
if (skip > 0) weights <- weights[-(1:skip), ]
nAssets <- ncol(weights)
if (trace) print("smooth ...")
smoother <- match.fun(getSmootherFun(backtest))
smoothWeights <- object$smoothWeights <- smoother(weights, spec, backtest)
if (trace) print("aggregate ...")
ow <- options("warn")
options(warn = -1)
monthlyAssets <- object$monthlyAssets <-
applySeries(data[, assetsNames], by = "monthly", FUN = colSums)
monthlyBenchmark <- object$monthlyBenchmark <-
applySeries(data[, benchmarkName], by = "monthly", FUN = colSums)
options(ow)
if (trace) print("offset ...")
cumX <- colCumsums(data[, benchmarkName])
lastX <- window(cumX, start = start(cumX), end = rownames(weights)[1] )
offsetReturn <- as.vector(lastX[end(lastX), ])
names(offsetReturn) <- as.character(end(lastX))
object$offsetReturn <- offsetReturn
Datum <- as.vector(rownames(smoothWeights))
nDatum <- length(Datum)
Portfolio = Benchmark = NULL
for (i in 1:(nDatum-1)) {
Portfolio <- rbind(Portfolio, as.vector((
as.matrix(monthlyAssets[Datum[i+1], ]) %*% smoothWeights[Datum[i], ])))
Benchmark <- rbind(Benchmark, as.vector(monthlyBenchmark[Datum[i+1], ]))
}
P <- timeSeries(data = Portfolio, charvec = Datum[-1], units = "Portfolio")
object$portfolioReturns <- portfolio <- colCumsums(P)
object$P <- P
B <- timeSeries(data = Benchmark, charvec = Datum[-1], units = "Benchmark")
object$benchmarkReturns <- benchmark <- colCumsums(B)
object$B <- B
daily <- colCumsums(data[, benchmarkName])
Daily <- window(daily, start = start(portfolio), end = end(portfolio))
portfolio <- portfolio - portfolio[1] + Daily[1]
benchmark <- benchmark - benchmark[1] + Daily[1]
object$portfolio <- portfolio
object$benchmark <- benchmark
P <- as.vector(P)
B <- as.vector(B)
Stats <- c(sum(P, na.rm = TRUE), sum(B))
Stats <- rbind(Stats, c(mean(P, na.rm = TRUE), mean(B)))
Stats <- rbind(Stats, c(sd(P, na.rm = TRUE), sd(B)))
Stats <- rbind(Stats, c(min(P, na.rm = TRUE), min(B)))
colnames(Stats) <- c(
"Portfolio",
"Benchmark")
rownames(Stats) <- c(
"Total Return",
"Mean Return",
"StandardDev Return",
"Maximum Loss")
object$stats <- Stats
class(object) <- c("portfolioSmoothing", "list")
object
}
|
check_known_collections <- function(R, M) {
ret <- FALSE
if(!is.null(R[["known_collection"]]) && is.null(M[["known_collection"]])) {
stop("Reference data frame has known_collection column, but not the mixture data frame.")
}
if(!is.null(M[["known_collection"]]) && is.null(R[["known_collection"]])) {
stop("Mixture data frame has known_collection column, but not the reference data frame.")
}
if(!is.null(R[["known_collection"]])) {
if(!is.character(R[["known_collection"]])) {
stop("Column known_collection in reference data frame must be a character vector.")
}
}
if(!is.null(M[["known_collection"]])) {
if(!is.character(M[["known_collection"]])) {
stop("Column known_collection in mixture data frame must be a character vector.")
}
KC <- M[["known_collection"]]
uKC <- unique(KC[!is.na(KC)])
mKC <- setdiff(uKC, unique(R[["collection"]]))
if(length(mKC) > 0) {
stop("These known_collection entries in mixture data frame not found amongst reference collections: ",
paste(mKC, collapse = ", "), ". Please fix that and try again.")
}
ret <- any(!is.na(KC))
}
ret
}
|
socket <- function(protocol = c("pair", "bus", "push", "pull", "req", "rep",
"pub", "sub", "surveyor", "respondent"),
dial = NULL,
listen = NULL,
autostart = TRUE) {
protocol <- match.arg(protocol)
res <- .Call(rnng_protocol_open, protocol)
if (is.integer(res)) message(res, " : ", nng_error(res))
if (!missing(dial)) {
dial(res, url = dial, autostart = autostart)
}
if (!missing(listen)) {
listen(res, url = listen, autostart = autostart)
}
res
}
subscribe <- function(socket, topic = NULL) {
xc <- .Call(rnng_socket_set_string, socket, "sub:subscribe" , topic)
if (xc) message(xc, " : ", nng_error(xc)) else message("subscribed topic: ",
if (is.null(topic)) "ALL" else topic)
invisible(xc)
}
unsubscribe <- function(socket, topic = NULL) {
xc <- .Call(rnng_socket_set_string, socket, "sub:unsubscribe" , topic)
if (xc) message(xc, " : ", nng_error(xc)) else message("unsubscribed topic: ",
if (is.null(topic)) "ALL" else topic)
invisible(xc)
}
|
setClass (
Class = "DiscernibilityMatrix",
representation = representation(
discernibilityMatrix = "array"
),
validity = function(object){
if(length(dim(object@discernibilityMatrix)) != 3){
stop ("[DiscernibilityMatrix: validation] Discernibility Matrix must be an 3 dim array")
}else{}
return(TRUE)
}
)
setMethod (
f="initialize",
signature="DiscernibilityMatrix",
definition=function(.Object,discernibilityMatrix){
if(!missing(discernibilityMatrix)){
.Object@discernibilityMatrix <- discernibilityMatrix
validObject(.Object)
}else{
.Object@discernibilityMatrix <- matrix(nrow=0,ncol=0)
}
return(.Object)
}
)
discernibilityMatrix <- function(theDiscernibilityMatrix){
new (Class="DiscernibilityMatrix",discernibilityMatrix = theDiscernibilityMatrix)
}
setGeneric("getDiscernibilityMatrix",function(object){standardGeneric ("getDiscernibilityMatrix")})
setMethod("getDiscernibilityMatrix","DiscernibilityMatrix",
function(object){
return(object@discernibilityMatrix)
}
)
setMethod ("print","DiscernibilityMatrix",
function(x,...){
cat("*** Class DiscernibilityMatrix, method Print *** \n")
disMat <- x@discernibilityMatrix
if(length(disMat) != 0){
mat <- .buildPrintMatrix(disMat)
print(formatC(mat),quote=FALSE)
}else{}
cat("******* End Print (DiscernibilityMatrix) ******* \n")
}
)
setMethod("show","DiscernibilityMatrix",
function(object){
cat("*** Class DiscernibilityMatrix, method Show *** \n")
cat("* DiscernibilityMatrix (limited to a matrix 10x10) = \n")
disMat <- object@discernibilityMatrix
if(length(disMat) != 0){
mat <- .buildPrintMatrix(disMat)
nrowShow <- min(10,nrow(mat))
ncolShow <- min(10,ncol(mat))
print(formatC(mat[1:nrowShow,1:ncolShow]),quote=FALSE)
}else{}
cat("******* End Show (DiscernibilityMatrix) ******* \n")
}
)
setGeneric (name = "computeCore",def = function(object){standardGeneric("computeCore")})
setMethod(
f = "computeCore",
signature = "DiscernibilityMatrix",
definition = function(object){
dm <- object@discernibilityMatrix
core <- .computeCore(dm)
return(core)
}
)
.buildPrintMatrix <- function(theArray){
disMat <- theArray
lenI <- dim(disMat)[1]
lenJ <- dim(disMat)[2]
lenK <- dim(disMat)[3]
if(length(disMat) != 0){
mat <- matrix(NA,nrow = lenI, ncol = lenJ)
for(i in 1:lenI){
for(j in 1:lenJ){
ij <- disMat[i,j,]
partialElement <- vector(mode = "numeric",length = 0)
partialCounter <- 0
for(k in 1:lenK){
if(!is.na(ij[k])){
if(ij[k]){
partialCounter <- (partialCounter +1)
partialElement[partialCounter] <- k
}else{}
}else{}
}
if(length(partialElement) > 0){
mat[i,j] <- paste("C",partialElement,sep="",collapse=",")
}else{}
}
}
mat <- t(mat)
rownames(mat) <- paste("R",1:nrow(mat),sep="")
colnames(mat) <- paste("R",1:nrow(mat),sep="")
}else{}
return(mat)
}
.computeCore <- function(theDiscernibilityMatrix){
ruleCount <- nrow(theDiscernibilityMatrix)
discernibilityMatrix <- theDiscernibilityMatrix
idRedMat <- apply(discernibilityMatrix,c(1,2),.reductIdentification)
idRedVec <- vector(mode = "numeric", length = 0)
for(i in 1:ruleCount){
for(j in 1:ruleCount){
if(!is.na(idRedMat[i,j])){
idRedVec[length(idRedVec) + 1] <- idRedMat[i,j]
}else{}
}
}
idRedVec <- unique(idRedVec)
idRedVec <- sort(idRedVec)
return(idRedVec)
}
.reductIdentification <- function(discernibilityMatrixElement){
conditionCount <- length(discernibilityMatrixElement)
result <- NA
if(sum(discernibilityMatrixElement,na.rm = TRUE) == 1){
for(i in 1:conditionCount){
if(discernibilityMatrixElement[i] == TRUE){
result <- i
break
}else{}
}
}else if(sum(discernibilityMatrixElement,na.rm = TRUE) != 1){
result <- NA
}else{}
return(result)
}
|
viewTIC <- function(x, Seq = NULL, Batch = NULL, Group = NULL, Trans = "none", resultBy = "Group"){
value = NULL
Group <- as.factor(Group)
if(is.null(Group)){stop("Please include group information")}
if(is.null(Seq)){Seq = 1:nrow(x)}
if(length(Group) != nrow(x)){stop("Missing group informaiton detected")}
if(!(resultBy %in% c("Group", "Batch"))){stop("Please choose if you want to show the result by Batch or by Group?")}
x[x == 0] <- 1
if(toupper(Trans) == "LOG2"){x = log2(x)}
if(toupper(Trans) == "LOG10"){x = log10(x)}
if(is.null(Batch)) {
dat2 <- cbind.data.frame(Seq = Seq, Group = Group, x)
dat2 <- dat2[order(dat2$Seq, dat2$Group), ]
dat3 <- melt(dat2, id = c("Seq", "Group"))
} else {
dat2 <- cbind.data.frame(Seq = Seq, Batch = Batch, Group = Group, x)
dat2 <- dat2[order(dat2$Seq, dat2$Batch, dat2$Group), ]
dat3 <- melt(dat2, id = c("Seq", "Batch", "Group"))
}
dat3$Group <- as.factor(dat3$Group)
dat3$Seq <- as.factor(dat3$Seq)
if(resultBy == "Group"){
p <- ggplot2::ggplot(dat3, aes(x = Seq, y = value, fill = Group)) +
geom_boxplot(alpha = 0.5) +
facet_grid(. ~ Group, scales = "free_x", space = "free_x") +
theme_bw() +
xlab("Sample (injection sequence)") +
ylab("Intensity")
} else if (resultBy == "Batch"){
dat3$Batch <- as.factor(dat3$Batch)
p <- ggplot2::ggplot(dat3, aes(x = Seq, y = value, fill = Batch)) +
geom_boxplot(alpha = 0.5) +
facet_grid(. ~ Batch, scales = "free_x", space = "free_x") +
theme_bw() +
xlab("Sample (injection sequence)") +
ylab("Intensity")
}
return(p)
}
|
.dtsq <- function(x, param, df, log = FALSE){
fx <- x*(df - param + 1)/(param*df)
p <- df(fx, param, df - param + 1, log=log)
if(log) p + log((df - param + 1)/(param*df)) else p*((df - param + 1)/(param*df))
}
.ptsq <- function (q, param, df, lower.tail = TRUE, log.p = FALSE){
fq <- q*(df - param + 1)/(param*df)
pf(fq, param, df - param + 1, lower.tail=lower.tail, log.p=log.p)
}
.qtsq <- function(p, param, df, lower.tail = TRUE, log.p = FALSE){
fq <- qf(p, param, df - param + 1, lower.tail=lower.tail, log.p=log.p)
fq / ((df - param + 1)/(param*df))
}
approx.hotelling.diff.test<-function(x,y=NULL, mu0=0, assume.indep=FALSE, var.equal=FALSE, ...){
if(!is.mcmc.list(x))
x <- mcmc.list(mcmc(as.matrix(x)))
if(!is.null(y) && !is.mcmc.list(y))
y <- mcmc.list(mcmc(as.matrix(y)))
if(is.null(mu0)) mu0 <- rep(0,nvar(x))
mu0 <- rep(mu0, length.out = nvar(x))
tr <- function(x) sum(diag(as.matrix(x)))
vars <- list(x=list(v=x))
if(!is.null(y)) vars$y <- list(v=y)
vars <- lapply(if(is.null(y)) list(x=x) else list(x=x,y=y), function(v, ...){
vm <- as.matrix(v)
vcov.indep <- cov(vm)
if(assume.indep){
vcov <- vcov.indep
}else{
vcov <- ERRVL(try(spectrum0.mvar(v, ...), silent=TRUE),
stop("Unable to compute autocorrelation-adjusted standard errors."))
}
m <- colMeans(vm)
n <- nrow(vm)
infl <- if(assume.indep) 1 else attr(vcov, "infl")
neff <- n / infl
vcov.m <- vcov/n
list(v=v, vm=vm, m=m, n=n, vcov.indep=vcov.indep, vcov=vcov, infl=infl, neff=neff, vcov.m=vcov.m)
}, ...)
x <- vars$x
y <- vars$y
d <- x$m
vcov.d <- x$vcov.m
if(!is.null(y)){
d <- d - y$m
if(var.equal){
vcov.pooled <- (x$vcov*(x$n-1) + y$vcov*(y$n-1))/(x$n+y$n-2)
vcov.d <- vcov.pooled * (1/x$n + 1/y$n)
}else{
vcov.d <- vcov.d + y$vcov.m
}
}
p <- nvar(x$v)
names(mu0)<-varnames(x$v)
novar <- diag(vcov.d)==0
p <- p-sum(novar)
if(p==0) stop("data are essentially constant")
ivcov.d <-ginv(vcov.d[!novar,!novar,drop=FALSE])
method <- paste0("Hotelling's ",
NVL2(y, "Two", "One"),
"-Sample",if(var.equal) " Pooled"," T^2-Test", if(!assume.indep) " with correction for autocorrelation")
if(any((d-mu0)[novar]!=0)){
warning("Vector(s) ", paste.and(colnames(x)[novar]),
NVL2(y,
" do not vary and do not equal mu0",
" do not vary in x or in y and have differences unequal to mu0"),
"; P-value has been set to 0.")
T2 <- +Inf
}else{
if(any(novar)){
warning("Vector(s) ", paste.and(colnames(x)[novar]),
NVL2(y,
" do not vary but equal mu0",
" do not vary in x or in y but have differences equal to mu0"),
"; they have been ignored for the purposes of testing.")
}
T2 <- c(t((d-mu0)[!novar])%*%ivcov.d%*%(d-mu0)[!novar])
}
NANVL <- function(z, ifNAN) ifelse(is.nan(z),ifNAN,z)
names(T2) <- "T^2"
pars <- c(param = p, df = if(is.null(y)){
NANVL(x$neff,1)-1
}else if(var.equal){
NANVL(x$neff,1)+NANVL(y$neff,1)-2
}else{
(p+p^2)/(
NANVL((tr(x$vcov.m[!novar,!novar] %*% ivcov.d %*% x$vcov.m[!novar,!novar] %*% ivcov.d) +
tr(x$vcov.m[!novar,!novar] %*% ivcov.d)^2)/x$neff,0) +
NANVL((tr(y$vcov.m[!novar,!novar] %*% ivcov.d %*% y$vcov.m[!novar,!novar] %*% ivcov.d) +
tr(y$vcov.m[!novar,!novar] %*% ivcov.d)^2)/y$neff,0))
})
if(pars[1]>=pars[2]) warning("Effective degrees of freedom (",pars[2],") must exceed the number of varying parameters (",pars[1],"). P-value will not be computed.")
out <- list(statistic=T2, parameter=pars, p.value=if(pars[1]<pars[2]) .ptsq(T2,pars[1],pars[2],lower.tail=FALSE) else NA,
method = method,
null.value=mu0,
alternative="two.sided",
estimate = d,
covariance = vcov.d,
covariance.x = x$vcov.m,
covariance.y = y$vcov.m,
novar = novar)
class(out)<-"htest"
out
}
geweke.diag.mv <- function(x, frac1 = 0.1, frac2 = 0.5, split.mcmc.list = FALSE, ...){
if(is.mcmc.list(x)){
if(split.mcmc.list){
return(lapply(x, geweke.diag.mv, frac1, frac2, ...))
}
}else{
x <- as.mcmc(x)
}
x.len <- end(x) - start(x)
x1 <- window(x, start=start(x), end=start(x) + frac1*x.len)
x2 <- window(x, start=end(x) - frac2*x.len, end=end(x))
test <-
ERRVL(try(approx.hotelling.diff.test(x1,x2,var.equal=TRUE,...), silent=TRUE),
{
warning("Multivariate Geweke diagnostic failed, probably due to insufficient sample size.", call.=FALSE, immediate.=TRUE)
test <- structure(list(p.value=NA), class="htest")
})
if(is.na(test$p.value)) test$p.value <- 0
test$method <- paste("Multivariate extension to Geweke's burn-in convergence diagnostic")
test
}
spectrum0.mvar <- function(x, order.max=NULL, aic=is.null(order.max), tol=.Machine$double.eps^0.5, ...){
breaks <- if(is.mcmc.list(x)) c(0,cumsum(sapply(x, niter))) else NULL
x <- as.matrix(x)
n <- nrow(x)
p <- ncol(x)
v <- matrix(0,p,p)
novar <- abs(apply(x,2,stats::sd))<tol
x <- x[,!novar,drop=FALSE]
if(ncol(x) == 0) stop("All variables are constant.")
first_local_min <- function(x){
d <- diff(c(Inf,x,Inf))
min(which(d>=0))-1
}
e <- eigen(cov(x), symmetric=TRUE)
Q <- e$vectors[,sqrt(pmax(e$values,0)/max(e$values))>tol*2,drop=FALSE]
xr <- x%*%Q
ind.var <- cov(xr)
xr <-
if(!is.null(breaks)) do.call(mcmc.list,lapply(lapply(seq_along(breaks[-1]), function(i) xr[(breaks[i]+1):(breaks[i+1]),,drop=FALSE]), mcmc))
else as.mcmc.list(mcmc(xr))
ord <- NVL(order.max, ceiling(10*log10(niter(xr))))
xr <- do.call(rbind, c(lapply(unclass(xr)[-nchain(xr)], function(z) rbind(cbind(z), matrix(NA, ord, nvar(z)))), unclass(xr)[nchain(xr)]))
arfit <- .catchToList(ar(xr,aic=is.null(order.max), order.max=ord, na.action=na.pass, ...))
while((!is.null(arfit$error) || ERRVL(try(any(eigen(arfit$value$var.pred, only.values=TRUE)$values<0), silent=TRUE), TRUE)) && ord > 0){
ord <- ord - 1
if(ord<=0) stop("Unable to fit ar() even with order 1; this is likely to be due to insufficient sample size or a trend in the data.")
arfit <- .catchToList(ar(xr,aic=is.null(order.max), order.max=ord, na.action=na.pass, ...))
}
arfit <- arfit$value
if(aic && arfit$order>(ord <- first_local_min(arfit$aic)-1)){
arfit <- ar(xr, aic=ord==0, order.max=max(ord,1), na.action=na.pass)
}
arvar <- arfit$var.pred
arcoefs <- arfit$ar
arcoefs <- NVL2(dim(arcoefs), apply(arcoefs,2:3,base::sum), sum(arcoefs))
adj <- diag(1,nrow=ncol(xr)) - arcoefs
iadj <- solve(adj)
v.var <- iadj %*% arvar %*% t(iadj)
infl <- exp((determinant(v.var)$modulus-determinant(ind.var)$modulus)/ncol(ind.var))
v.var <- Q%*%v.var%*%t(Q)
v[!novar,!novar] <- v.var
attr(v, "infl") <- infl
v
}
|
context("Utility")
test_that("MVP.Version() works fine.", {
expect_output(MVP.Version())
})
|
quartetStarTestInd = function(rqt) {
M = dim(rqt)[1]
qcols = c("12|34", "13|24", "14|23")
rqt = cbind(rqt, p_star = 0)
message("Applying hypothesis test for star tree model to ",M," quartets.")
for (i in 1:M) {
obs = rqt[i, qcols]
rqt[i, "p_star"] = quartetStarTest(obs)
}
return(rqt)
}
quartetStarTest = function(obs) {
z = chisq.test(obs)
p = z$p.value
return(p)
}
|
NULL
ENG <- function(x, ...)
{
UseMethod("ENG")
}
ENG.formula <- function(formula,
data,
...)
{
if(!is.data.frame(data)){
stop("data argument must be a data.frame")
}
modFrame <- model.frame(formula,data)
attr(modFrame,"terms") <- NULL
ret <- ENG.default(x=modFrame,...,classColumn = 1)
ret$call <- match.call(expand.dots = TRUE)
ret$call[[1]] <- as.name("ENG")
cleanData <- data
if(!is.null(ret$repIdx)){
cleanData[ret$repIdx,which(colnames(cleanData)==colnames(modFrame)[1])] <- ret$repLab
}
ret$cleanData <- cleanData[setdiff(1:nrow(cleanData),ret$remIdx),]
return(ret)
}
ENG.default <- function(x,
graph = "RNG",
classColumn=ncol(x),
...)
{
if(!is.data.frame(x)){
stop("data argument must be a data.frame")
}
if(!classColumn%in%(1:ncol(x))){
stop("class column out of range")
}
if(!is.factor(x[,classColumn])){
stop("class column of data must be a factor")
}
if(!graph%in%c("RNG","GG")){
stop("the 'graph' argument must be either 'GG' (Gabriel Graph) or 'RNG' (Relative Neighborhood Graph)")
}
PG <- sapply(1:(nrow(x)-1),function(i){c(rep(NA,i),
sapply((i+1):nrow(x),function(j){isNeighbor(i,j,x,graph,classColumn)}))})
PG <- cbind(PG,rep(NA,nrow(x)))
for(i in 1:(nrow(x)-1)){
for(j in (i+1):nrow(x)){
PG[i,j] <- PG[j,i]
}
}
isMisclassified <- sapply(1:nrow(x),function(i){
classes <- table(x[PG[i,],classColumn])
if(names(classes)[nnet::which.is.max(classes)]==x[i,classColumn]){
out <- FALSE
}else{
out <- TRUE
}
return(out)
})
toRemove <- sapply(which(isMisclassified),function(i){
sameClassNeigh <- c(i,which(PG[i,] & x[,classColumn]==x[i,classColumn]))
classes <- character(0)
for(j in sameClassNeigh){
classes <- c(classes,as.character(x[PG[j,],classColumn]))
}
tableClasses <- table(classes)
if(names(tableClasses)[nnet::which.is.max(tableClasses)]==x[i,classColumn]){
out <- FALSE
}
else{
out <- TRUE
}
return(out)
})
remIdx <- which(isMisclassified)[toRemove]
cleanData <- x[setdiff(1:nrow(x),remIdx),]
repIdx <- NULL
repLab <- NULL
parameters <- list(graph=graph)
call <- match.call()
call[[1]] <- as.name("ENG")
ret <- list(cleanData = cleanData,
remIdx = remIdx,
repIdx=repIdx,
repLab=repLab,
parameters=parameters,
call = call,
extraInf = NULL
)
class(ret) <- "filter"
return(ret)
}
distt <- function(x,y){
class <- sapply(x,class)
if("factor"%in%class){
out <- sum((x[1,class!="factor"]-y[1,class!="factor"])^2)+sum(x[1,class=="factor"]!=y[1,class=="factor"])
}
else{
out <- sum((x-y)^2)
}
out <- sqrt(out)
return(out)
}
isNeighbor <- function(i, j, data, graphType, classColumn){
out <- TRUE
if(graphType=="GG"){
for(k in 1:nrow(data)){
if(distt(data[i,- classColumn],data[j,- classColumn])^2 > (distt(data[i,- classColumn],data[k,- classColumn])^2 + distt(data[j,- classColumn],data[k,- classColumn])^2)){
out <- FALSE
break
}
}
}
else{
for(k in 1:nrow(data)){
if(distt(data[i,- classColumn],data[j,- classColumn]) > max(distt(data[i,- classColumn],data[k,- classColumn]),distt(data[j,- classColumn],data[k,- classColumn]))){
out <- FALSE
break
}
}
}
return(out)
}
|
test_that("reporter as expected", {
expect_snapshot_reporter(MinimalReporter$new())
})
|
prepare.mpt.fia <- function(data, model.filename, restrictions.filename = NULL, outfile = "clipboard", Sample = 200000, model.type = c("easy", "eqn", "eqn2")){
if(is.vector(data)) {
data <- array(data, dim = c(1, length(data)))
multiFit <- FALSE
} else
if(is.matrix(data) | is.data.frame(data)) {
if (is.data.frame(data)) data <- as.matrix(data)
multiFit <- TRUE
} else stop("data is neither vector, nor matrix, nor data.frame!")
if(!is.null(restrictions.filename)) {
restrictions <- .read.MPT.restrictions(restrictions.filename)
}
class.model <- class(model.filename)
if ("connection" %in% class.model) {
tmp.model <- readLines(model.filename)
model.filename <- textConnection(tmp.model)
model <- .get.mpt.model(model.filename, model.type)
model.filename <- textConnection(tmp.model)
} else model <- .get.mpt.model(model.filename, model.type)
n.data <- dim(data)[1]
if (sum(sapply(model, length)) != length(data[1,])) stop(paste("Size of data does not correspond to size of model (i.e., model needs ", sum(sapply(model, length)), " datapoints, data gives ", length(data[1,]), " datapoints).", sep = ""))
df.n <- apply(data, 1, .DF.N.get, tree = model)
n_items <- sapply(df.n, function (x) sum(x[[2]]))
mpt.string <- make.mpt.cf(model.filename, model.type=model.type)
is.category <- grepl("^[[:digit:]]+$", mpt.string)
s <- paste(ifelse(is.category == 0, "p", "C"), collapse = "")
params <- mpt.string[!is.category]
category <- mpt.string[is.category]
category <- paste(category, collapse = ",")
is.p.join <- grepl("^hank\\.join\\.", params)
c.join <- sum(is.p.join)
p.join <- params[is.p.join]
p.n.join <- params[!is.p.join]
hank.restrictions <- vector("list", c.join)
ns <- t(sapply(df.n, function (x) x[[2]]))
p.n.join.lev <- sort(unique(p.n.join))
f.p.n.join <- factor(p.n.join, levels = p.n.join.lev)
names(p.n.join.lev) <- 1:length(p.n.join.lev)
if (c.join > 0) {
for (indiv in 1:n.data) {
for (c.hank in 1:(c.join)) {
hank.restrictions[[c.hank]][indiv] <- sum(ns[indiv,1:c.hank]) / sum(ns[indiv,1:(c.hank+1)])
}
}
params.join.mat <- matrix(NA, nrow = n.data, ncol = (c.join))
for (indiv in 1:n.data) {
for (par.join in 1:length(p.join)) {
params.join.mat[indiv,par.join] <- -hank.restrictions[[length(p.join)-(par.join-1)]][indiv]
}
}
parameters <- vector("list", n.data)
for (indiv in 1:n.data) {
parameters[[indiv]] <- c(as.character(round(params.join.mat[indiv,],4)), as.numeric(f.p.n.join))
}
} else {
parameters <- vector("list", n.data)
for (indiv in 1:n.data) {
parameters[[indiv]] <- as.character(as.numeric(f.p.n.join))
}
}
ineq <- "["
if(!is.null(restrictions.filename)) {
for (restr in 1:length(restrictions)) {
if (restrictions[[restr]][3] == "=") {
if (grepl("^[[:digit:]]", restrictions[[restr]][2])) {
for (indiv in 1:n.data) {
parameters[[indiv]][params == restrictions[[restr]][1]] <- as.character(-as.numeric(restrictions[[restr]][2]))
}
} else {
for (indiv in 1:n.data) {
parameters[[indiv]][params == restrictions[[restr]][1]] <- (parameters[[indiv]][params == restrictions[[restr]][2]])[1]
}
}
} else {
if (restrictions[[restr]][3] == "<") {
if (nchar(ineq) > 1) ineq <- paste(ineq, ";", sep = "")
ineq <- paste(ineq, paste((parameters[[indiv]][params == restrictions[[restr]][1]])[1], (parameters[[indiv]][params == restrictions[[restr]][4]])[1], sep = ","), sep = "")
}
}
}
}
ineq <- paste(ineq, "]", sep = "")
calls <- vector("character", n.data)
for (indiv in 1:n.data) {
calls[indiv] <- paste("[CFIA,CI,lnInt,CI1,lnconst,CI2] = BMPTFIA(\'", s, "\',[", paste(parameters[[indiv]], collapse=","), "],", ineq, ",[", category, "],", n_items[indiv], ",", Sample, ")", sep ="")
}
writeLines(calls, outfile)
outlist <- list(s = s, parameters = parameters, param.codes = p.n.join.lev, category = category, ineq0 = ineq, n = n_items, internal = mpt.string)
outlist
}
|
suppressPackageStartupMessages({
require(data.table)
require(jsonlite)
require(ggplot2)
require(cowplot)
})
.args <- commandArgs(trailingOnly = T)
load(.args[1])
refpars <- readRDS(.args[3])[, seropos := 1 - seroneg9 ]
reducedrois <- readRDS(.args[2])[mechanism == "binary"][
refpars, on = .(foi, disparity)
][, .(roi = max(roi)),
by = .(foi, disparity, seropos, p_H, log10OR, nu, tau)
]
rename <- function(res, ref) { names(res) <- ref; return(res) }
foiref <- refpars[, seropos[1], by = .(ref = foi)][, rename(V1, ref)]
pHref <- refpars[, p_H[1], by = .(ref = disparity)][, rename(V1, ref)]
ORref <- refpars[, log10OR[1], by = .(ref = disparity)][, rename(V1, ref)]
disref <- list(
md = quote(Disparity ~ (p[H] * ", " * log[10] * OR)),
ll = quote(phantom("Disparity" ~ (p[H] * ", " * log[10] * OR))),
ul = quote(phantom("Disparity" ~ (p[H] * ", " * log[10] * OR)))
)
p <- ggplot(reducedrois) + aes(
log10(tau), nu,
fill = roi, z = roi, color = c("neg", "pos")[(roi > 0) + 1]
) +
facet_grid(
disparity ~ foi,
labeller = label_bquote(
cols = .(
sprintf("%s\n%d%%",
ifelse(foi == "md", "% Seropositive 9-year-olds", ""),
foiref[foi] * 100)
),
rows = atop(
.(
disref[[disparity]]),
(.(pHref[disparity]) * ", " * .(ORref[disparity]))
)
), scales = "free"
) +
geom_heat +
thm + scale_contour + scale_roi + scale_nu + scale_tau
save_plot(tail(.args, 1), p, ncol = 3, nrow = 3, base_height = 4 * 2 / 3)
|
lsm_c_division <- function(landscape, directions = 8) {
landscape <- landscape_as_list(landscape)
result <- lapply(X = landscape,
FUN = lsm_c_division_calc,
directions = directions)
layer <- rep(seq_along(result),
vapply(result, nrow, FUN.VALUE = integer(1)))
result <- do.call(rbind, result)
tibble::add_column(result, layer, .before = TRUE)
}
lsm_c_division_calc <- function(landscape, directions, resolution = NULL) {
patch_area <- lsm_p_area_calc(landscape,
directions = directions,
resolution = resolution)
total_area <- sum(patch_area$value)
if (is.na(total_area)) {
return(tibble::tibble(level = "class",
class = as.integer(NA),
id = as.integer(NA),
metric = "division",
value = as.double(NA)))
}
patch_area$value <- (patch_area$value / total_area) ^ 2
division <- stats::aggregate(x = patch_area[, 5], by = patch_area[, 2],
FUN = sum)
division$value <- 1 - division$value
return(tibble::tibble(level = "class",
class = as.integer(division$class),
id = as.integer(NA),
metric = "division",
value = as.double(division$value)))
}
|
context("narcc testing")
set.seed(8675309)
datcc <- PROscorerTools::makeFakeData(n = 30, nitems = 10, values = 0:3)
datnar <- PROscorerTools::makeFakeData(n = 30, nitems = 6, values = 0:3)
test_that("narcc: Error if no or wrong 'whichScale' given", {
expect_error(narcc(df = datcc), "use the 'whichScale' argument")
expect_error(narcc(df = datcc, whichScale = "Cog"),
"use the 'whichScale' argument")
})
test_that("narcc: Expect no error (silent) when correct input given", {
expect_silent(narcc(df = datcc, whichScale = "CC"))
expect_silent(narcc(df = datnar, whichScale = "NAR"))
expect_silent(narcc(df = datcc, whichScale = "CC", keepNvalid = TRUE))
expect_silent(narcc(df = datnar, whichScale = "NAR", keepNvalid = TRUE))
})
|
mkdb <- function(isrec) {
dbfun <- function(x, lower = 0, upper = 0, values = "", extra = 0) {
catf("x: %s\nlower: %s\nupper: %s\nvalues: %s\nextra: %s",
collapse(x), collapse(lower), collapse(upper), collapse(values), extra)
if (isrec) do.call(wrapChildren, x) else x
}
}
debugrec <- makeRecombinator(mkdb(TRUE), "custom", n.parents = 2, n.children = 2)
debugmut <- makeMutator(mkdb(FALSE), "custom")
|
pwecxcens<-function(t=seq(0,10,by=0.5),rate1=c(1,0.5),rate2=rate1,
rate3=c(0.7,0.4),rate4=rate2,rate5=rate2,ratec=c(0.2,0.3),
tchange=c(0,1),type=1,rp2=0.5,eps=1.0e-2){
tin<-t
r1<-rate1;r2<-rate2;r3<-rate3;r4<-rate4;r5<-rate5;rc<-ratec;
a2<-pwefv2(t=tin,rate1=r1,rate2=(r1+r3+rc),tchange=tchange,eps=eps)
a4<-pwefvplus(t=tin,rate1=r1,rate2=r2,rate3=r3,rate4=r4,rate5=r5,rate6=rc,tchange=tchange,type=type,rp2=rp2,eps=eps)
du<-a2$f0+a4$f0
b2<-pwecx(t=tin,rate1=r1,rate2=r2,rate3=r3,rate4=r4,rate5=r5,tchange=tchange,type=type,rp2=rp2,eps=eps)
b4<-pwe(t=tin,rate=rc,tchange=tchange)
duprime<-b2$density*b4$surv
list(du=du,duprime=duprime,s=b2$surv,sc=b4$surv)
}
|
nextp<-function(perm,b=1){
out<-.Fortran("nextp",perm=as.integer(perm),as.integer(length(perm)),as.integer(b),PACKAGE="MultNonParam")
return(out$perm)
}
|
levelCheck <- function(x, klass_data){
if(!all(grepl("^[A-Za-z_-]+$", x))){
tab <- data.frame(table(nchar(tm::removePunctuation(unlist(x)))))
t <- which.max(tab$Freq)
nVar <- tab[t,]$Var1
nVar <- nVar[!is.na(nVar)]
input_level <- NA
for (i in 1:length(unique(klass_data$level))){
if(nVar==nchar(tm::removePunctuation(unlist(klass_data[klass_data$level==i,]$code[1])))){
input_level <- i
}
}
} else {
m <- match(x, klass_data$code)
tab <- table(klass_data[m, ]$level)
input_level <- as.numeric(names(tab)[which.max(tab)])
}
if(is.na(input_level))
stop("Cannot find an input level, please check the input vector")
return (input_level)
}
|
expected <- eval(parse(text="list(structure(c(\"0.007239522\", \"0.014584634\", \"0.014207936\", \"0.018442267\", \"0.011128505\", \"0.019910082\", \"0.027072311\", \"0.034140379\", \"0.028320657\", \"0.037525507\"), class = \"AsIs\"), structure(c(\" 1\", \" 6\", \" 7\", \" 8\", \"13\", \"14\", \"15\", \"20\", \"21\", \"22\"), class = \"AsIs\"), structure(c(\" 16\", \" 16\", \"144\", \" 16\", \" 16\", \"128\", \" 16\", \" 16\", \"112\", \" 16\"), .Dim = 10L, .Dimnames = structure(list(c(\"1\", \"6\", \"7\", \"8\", \"13\", \"14\", \"15\", \"20\", \"21\", \"22\")), .Names = \"\")))"));
test(id=0, code={
argv <- eval(parse(text="list(list(structure(list(structure(c(\"0.007239522\", \"0.014584634\", \"0.014207936\", \"0.018442267\", \"0.011128505\", \"0.019910082\", \"0.027072311\", \"0.034140379\", \"0.028320657\", \"0.037525507\"), class = \"AsIs\")), row.names = c(NA, -10L), class = \"data.frame\"), structure(list(structure(c(\" 1\", \" 6\", \" 7\", \" 8\", \"13\", \"14\", \"15\", \"20\", \"21\", \"22\"), class = \"AsIs\")), row.names = c(NA, -10L), class = \"data.frame\"), structure(list(structure(c(\" 16\", \" 16\", \"144\", \" 16\", \" 16\", \"128\", \" 16\", \" 16\", \"112\", \" 16\"), .Dim = 10L, .Dimnames = structure(list(c(\"1\", \"6\", \"7\", \"8\", \"13\", \"14\", \"15\", \"20\", \"21\", \"22\")), .Names = \"\"))), row.names = c(\"1\", \"6\", \"7\", \"8\", \"13\", \"14\", \"15\", \"20\", \"21\", \"22\"), class = \"data.frame\")), FALSE, FALSE)"));
.Internal(unlist(argv[[1]], argv[[2]], argv[[3]]));
}, o=expected);
|
tar_test("tar_exist_process()", {
expect_false(tar_exist_process())
dir_create(dirname(path_process(path_store_default())))
file.create(path_process(path_store_default()))
expect_true(tar_exist_process())
})
tar_test("custom script and store args", {
skip_on_cran()
expect_equal(tar_config_get("script"), path_script_default())
expect_equal(tar_config_get("store"), path_store_default())
tar_script(tar_target(x, 1), script = "example/script.R")
expect_false(tar_exist_process(store = "example/store"))
expect_false(file.exists("example/store"))
tar_make(
callr_function = NULL,
script = "example/script.R",
store = "example/store"
)
expect_true(tar_exist_process(store = "example/store"))
expect_true(file.exists("example/store"))
expect_false(file.exists("_targets.yaml"))
expect_equal(tar_config_get("script"), path_script_default())
expect_equal(tar_config_get("store"), path_store_default())
expect_false(file.exists(path_script_default()))
expect_false(file.exists(path_store_default()))
expect_true(file.exists("example/script.R"))
tar_config_set(script = "x")
expect_equal(tar_config_get("script"), "x")
expect_true(file.exists("_targets.yaml"))
})
|
load_bbs_data <- function(level = "state")
{
bbs_data <- NULL
rm(bbs_data)
bbs_dir <- app_dir(appname = "bbsBayes")
if (level == "state")
{
if(isFALSE(file.exists(paste0(bbs_dir$data(), "/bbs_raw_data.RData"))))
{
stop("No BBS data downloaded. Please use fetch_bbs_data() first.")
}
load(file = paste0(bbs_dir$data(), "/bbs_raw_data.RData"))
}else if (level == "stop")
{
if(isFALSE(file.exists(paste0(bbs_dir$data(), "/bbs_stop_data.RData"))))
{
stop("No BBS stop data downloaded. Please use fetch_bbs_data(level = \"stop\") first.")
}
load(file = paste0(bbs_dir$data(), "/bbs_stop_data.RData"))
}
return(bbs_data)
}
|
plotdelineator<-function(shtseq,coordi=1,ngrid=40,shift=0.05,
volumefunction=NULL,redu=TRUE,type="l")
{
if (is.null(volumefunction)){
lnum<-length(shtseq$level)
st<-shtseq$shtseq[[1]]
td<-treedisc(st,shtseq$pcf,ngrid=ngrid)
reduseq<-list(td)
for (i in 2:lnum){
st<-shtseq$shtseq[[i]]
td<-treedisc(st,shtseq$pcf,ngrid=ngrid)
reduseq<-c(reduseq,list(td))
}
estiseq<-list(lstseq=reduseq,hseq=shtseq$level)
mg<-modegraph(estiseq)
plotmodet(mg,coordi=coordi,shift=shift)
}
else{
vd<-volumefunction
if (redu){
x<-vd$delineator.redu[,coordi]
y<-vd$delineatorlevel.redu
or<-order(x)
x1<-x[or]
y1<-y[or]
plot(x1,y1,type=type,
ylab="level",xlab=paste("coordinate",as.character(coordi)))
}
else
plot(vd$delineator[,coordi],vd$delineatorlevel,ylab="level")
}
}
|
generate.data2d = function() {
n = c(60, 80)
x = vector("list", 2)
x[[1]] = seq_len(n[1])/n[1] - 1/2/n[1]
x[[2]] = seq_len(n[2])/n[2] - 1/2/n[2]
mu2d = matrix(0, nrow = n[1], ncol = n[2])
for (i in seq_len(n[1])) {
for (j in seq_len(n[2])) {
mu2d[i, j] = sin(2 * pi * (x[[1]][i] - .5) ^ 3) * cos(4 * pi * x[[2]][j])
}
}
data2d = mu2d + stats::rnorm(prod(n))
return(list(x = x, mu2d = mu2d, data2d = data2d))
}
generate_data2d = generate.data2d
generateData2d = generate.data2d
GenerateData2d = generate.data2d
|
repeatedMeasuresPlot <- function(data, within, within.names, within.levels, between.names=NULL,
response.name="score", trace, xvar, pch=15:25, lty=1:6,
col=palette()[-1], plot.means=TRUE,
print.tables=FALSE){
if (!(plot.means || print.tables)) stop("nothing to do (neither print tables nor plot means)!")
if (missing(trace)) trace <- NA
if (missing(xvar)) xvar <- NA
reshapeW2L <- function(data){
timevar <- paste(within.names, collapse=".")
long <- reshape(data, varying=within, v.names=response.name,
timevar=timevar,
direction="long")
n.levels <- sapply(within.levels, length)
n.within <- length(within.names)
if (n.within > 2 || n.within < 1) stop("there must be 1 or 2 within factors")
if (prod(n.levels) != length(within)){
stop("the number of repeated measures, ", length(within),
", is not equal to the product of the numbers of levels of the within factors, ",
prod(n.levels))
}
if (length(within.names) != length(within.levels)){
stop("the number of within factors, ", length(within.names),
", is not equal to the number of sets of within-factor levels, ",
length(within.levels))
}
if (n.within == 2){
long[[within.names[1]]] <- factor(within.levels[[within.names[1]]][1 + ((long[[timevar]] - 1) %/% n.levels[2])],
levels=within.levels[[within.names[1]]])
long[[within.names[2]]] <- factor(within.levels[[within.names[2]]][1 + ((long[[timevar]] - 1) %% n.levels[2])],
levels=within.levels[[within.names[2]]])
} else{
long[[within.names]] <- factor(within.levels[[1]][long[[timevar]]],
levels=within.levels[[1]])
}
long
}
computeMeans <- function(data){
formula <- paste(response.name, " ~", paste(c(within.names, between.names), collapse="+"))
meanTable <- Tapply(formula, mean, data=data)
sdTable <- Tapply(formula, sd, data=data)
means <- meanTable
if(length(dim(means)) > 1){
means <- as.data.frame(ftable(means))
names(means)[ncol(means)] <- response.name
} else {
means <- data.frame(factor(names(means), levels=levels(data[, within.names])), means)
names(means) <- c(within.names, response.name)
}
list(means=means, meanTable=meanTable, sdTable=sdTable)
}
rmPlot <- function(data) {
n.levels <-sapply(data[,-ncol(data), drop = FALSE], function(x)
length(levels(x)))
n.factors <- length(n.levels)
fnames <- names(data)[-ncol(data), drop = FALSE]
if (is.na(trace)) {
wnames <- if (!is.na(xvar)) within.names[!(within.names == xvar)] else within.names
trace <- if (length(wnames) > 0) wnames[which.min(n.levels[wnames])] else NULL
}
if (is.na(xvar)) {
wnames <- if (!is.na(trace)) within.names[!(within.names == trace)] else within.names
xvar <- wnames[which.max(n.levels[wnames])]
}
if (length(within.names) == 1 && length(xvar) == 0){
xvar <- within.names
trace <- NULL
}
if (!is.null(trace) && trace == xvar) trace <- NULL
form <- paste(response.name,
" ~",
xvar,
if (n.factors > 1 + !is.null(trace))
"|",
paste(setdiff(fnames, c(trace, xvar)), collapse = "+"))
tr.levels <- n.levels[trace]
if (!is.null(trace)) {
xyplot(
as.formula(form),
groups = if (!is.null(trace))
data[[trace]]
else
1,
type = "b",
lty = lty[1:tr.levels],
pch = pch[1:tr.levels],
col = col[1:tr.levels],
cex = 1.25,
strip = function(...)
strip.default(strip.names = c(TRUE, TRUE), ...),
data = data,
ylab = paste("mean", response.name),
key = list(
title = trace,
cex.title = 1,
text = list(levels(data[[trace]])),
lines = list(lty = lty[1:tr.levels], col = col[1:tr.levels]),
points = list(
pch = pch[1:tr.levels],
col = col[1:tr.levels],
cex = 1.25
)
)
)
} else {
xyplot(
as.formula(form),
type = "b",
lty = lty[1],
pch = pch[1],
col = col[1],
cex = 1.25,
strip = function(...)
strip.default(strip.names = c(TRUE, TRUE), ...),
data = data,
ylab = paste("mean", response.name)
)
}
}
Long <- reshapeW2L(data)
Means <- computeMeans(Long)
if (print.tables){
cat("\n Means of", response.name, "\n")
if (length(dim(Means$meanTable)) > 1) print(ftable(Means$meanTable))
else print(Means$meanTable)
cat("\n\n Standard deviations of", response.name, "\n")
if (length(dim(Means$sdTable)) > 1) print(ftable(Means$sdTable))
else print(Means$sdTable)
}
if (plot.means) rmPlot(Means$means) else invisible(NULL)
}
|
`rgl.renyiaccum` <-
function(x, rgl.height = 0.2, ...)
{
if (!inherits(x, "renyiaccum"))
stop("'x' must be a 'renyiaccum' result object")
y <- x[,,1] * rgl.height
rgl.min = 0
rgl.max = max(y)
xp <- seq(0, 1, len = nrow(y))
z <- seq(0, 1, len = ncol(y))
ylim <- 1000 * range(y)
ylen <- ylim[2] - ylim[1] + 1
colorlut <- rainbow(ylen)
col <- colorlut[1000*y-ylim[1]+1]
rgl.clear()
rgl.surface(xp, z, y, color=col)
y <- x[,,5] * rgl.height
rgl.surface(xp, z, y, color="black", front="lines", back="lines")
y <- x[,,6] * rgl.height
rgl.surface(xp, z, y, color="black", front="lines", back="lines")
y <- x[,,6]*0 + rgl.min
rgl.surface(xp, z, y, alpha=0)
y <- x[,,6] * 0 + rgl.max
rgl.surface(xp, z, y, alpha=0)
labs <- pretty(c(rgl.min, range(x)))
rgl.bbox(color="
zlen=0, xlen=0, yat = rgl.height*labs, ylab=labs)
rgl.texts(0, rgl.min, 0.5, "Scale", col = "darkblue")
rgl.texts(0.5, rgl.min, 0, "Sites", col="darkblue")
}
|
.check_dat_base <- function(dat, check_one_concresp = TRUE)
{
cols_ori <- colnames(dat)
cols_stand <- c("endpoint", "chemical", "conc", "resp")
cols_inter <- intersect(cols_ori, c(cols_stand, 'mask'))
dat <- dat[, cols_inter]
cols_removed <- setdiff(cols_ori, cols_inter)
if (length(cols_removed) != 0) {
rlang::warn(stringr::str_glue("{cols_removed} are removed in the input"))
}
cols <- colnames(dat)
if (!(all(rlang::has_name(dat, c(cols_stand, "mask"))) && length(cols) == 5))
{
if (!(all(rlang::has_name(dat, c(cols_stand))) && length(cols) == 4)) {
rlang::abort(
"dataset needs to have endpoint, chemical, conc, resp, mask (optional) columns"
)
}
}
if (check_one_concresp) {
dat2 <- dplyr::count(dat, .data$endpoint, .data$chemical, .data$conc)
if (sum(dat2$n) != nrow(dat2))
{
rlang::abort("one endpoint-chemical-concentration pair can have only one response. Use combi_run_rcurvep() instead")
}
}
return(dat)
}
.check_mask_input <- function(vec, d) {
if (rlang::has_name(d, "mask")) {
rlang::warn("mask column exists; use original mask column")
return(NULL)
}
if (any(vec == 0)) return(0)
if (any(is.null(vec))) return(NULL)
if (!all(vec == floor(vec))) {
rlang::abort("input value must be integers")
}
if (any(vec < 0)) {
rlang::abort("input value must be equal or larger than 0")
}
min_n_conc <- d %>%
dplyr::count(.data$endpoint, .data$chemical) %>%
dplyr::pull(.data$n) %>%
min()
if (any(vec > min_n_conc)) {
rlang::abort("input value must be smaller than total number of concs")
}
return(vec)
}
.check_config_name <- function(config = curvep_defaults(), ...) {
config <- .check_class(config, 'curvep_config', "config is absent or corrupt")
args <- list(...)
config <- modifyList(config, args)
defaults <- curvep_defaults()
para_diff <- setdiff(names(config), names(defaults))
if (length(para_diff) > 0) {
rlang::abort("nonexistent curvep parameters are added")
}
return(config)
}
.check_config_value <- function(config) {
if (rlang::is_double(config$TRSH) == FALSE | config$TRSH < 0)
{
rlang::abort("TRSH has to be a positive double value")
}
if (rlang::is_double(config$RNGE) == FALSE)
{
rlang::abort("RNGE has to be a double value")
}
if (rlang::is_double(config$MXDV) == FALSE | config$MXDV < 0 ) {
rlang::abort("MXDV has to be a positive double value")
}
if (rlang::is_integer(config$BSFT) | config$BSFT < 0) {
rlang::abort("BSFT has to be a positive integer value")
}
if (rlang::is_integer(config$USHP) | config$USHP < 0) {
rlang::abort("USHP has to be a positive integer value")
}
if (rlang::is_logical(config$TrustHi) == FALSE | rlang::is_logical(config$StrictImp) == FALSE) {
rlang::abort("TrustHi and TrustHi have to be a TRUE/FALSE value")
}
return(config)
}
.check_config_name2 <- function(config = curvep_defaults(), ...) {
if (length(list(...)) == 0) {
rlang::abort("Input parameters are needed, for example, TRSH = 5")
}
config <- .check_config_name(config = curvep_defaults(), ...)
return(config)
}
.check_dat <- function(dat)
{
cols <- colnames(dat)
dev_cols <- c("endpoint", "chemical", "conc", "n_in", "N")
beh_cols <- c("endpoint", "chemical", "conc", "resp")
if (!(all(rlang::has_name(dat, c(beh_cols, "mask"))) && length(cols) == 5) &&
!(all(rlang::has_name(dat, c(dev_cols, "mask"))) && length(cols) == 6) ) {
if (!(all(rlang::has_name(dat, c(beh_cols))) && length(cols) == 4) &&
!(all(rlang::has_name(dat, c(dev_cols))) && length(cols) == 5)) {
rlang::abort("data is not a continuous/dichotomous dataset")
}
}
return(dat)
}
.check_n_samples <- function(n_samples)
{
if (!is.null(n_samples))
{
if (rlang::is_integer(as.integer(n_samples)) == FALSE || n_samples < 0)
{
rlang::abort("n_samples is not a valid number or is not NULL")
}
}
return(n_samples)
}
.check_vdata <- function(vdata, dataset_type) {
if (!all(!is.na(as.numeric(vdata)))) {
rlang::abort("vdata is not NULL or all numeric")
} else if (!is.null(vdata) && dataset_type == "dichotomous") {
rlang::abort("currently numeric vdata is not supported for dichotomous dataset")
}
return(vdata)
}
.check_keep_sets <- function(keep_sets, allowed_sets, must_set) {
keep_sets <- unique(keep_sets)
if (length(keep_sets) > length(allowed_sets) || !all(keep_sets %in% allowed_sets)) {
sets <- stringr::str_c(allowed_sets, collapse = " ")
rlang::abort(
stringr::str_glue(
"Only a combination {sets} is allowed.")
)
}
if (!must_set %in% keep_sets) {
rlang::abort(
stringr::str_glue("{must_set} is needed.")
)
}
return(keep_sets)
}
.check_result_sets <- function(lsets) {
if (!rlang::is_list(lsets) || length(lsets) > 3 || !all(names(lsets) %in% c("act_set", "resp_set", "fp_set"))) {
rlang::abort("Only a list with names of act_set, resp_set, fp_set is allowed")
}
if (!"act_set" %in% names(lsets)) {
rlang::abort("At least act_set tibble is needed")
}
return(lsets)
}
.check_combirun_out <- function(combi_run_out) {
if (!rlang::is_list(combi_run_out) || !rlang::has_name(combi_run_out, "result") || !rlang::has_name(combi_run_out, "config")) {
rlang::abort("the combi_run_out is not a list or does not have result and config item.\n The input does not come from combi_run_rcurvep()")
}
return(combi_run_out)
}
.check_bmr_input <- function(d) {
d <- .check_class(d, "rcurvep", "not a rcurvep object")
if (!rlang::has_name(d$result$act_set, "sample_id")) {
rlang::abort("The input act_set needs to have multiple samples (sample_id).
Please set n_samples in combi_run_rcurvep()")
}
return(d)
}
.check_bmr_statsd <- function(d) {
result <- d
if (!rlang::has_name(d, "endpoint")) {
col_names <- colnames(d)
col_names <- c("TRSH", "pvar", col_names[-c(1,2)])
result <- d %>% rlang::set_names(col_names)
result <- result %>%
dplyr::mutate(
endpoint = "noname"
) %>%
dplyr::select(
.data$endpoint, dplyr::everything()
)
}
return(result)
}
.check_class <- function(obj, class_name, error_message) {
if ( !class_name %in% class(obj) )
{
rlang::abort(error_message, class_name)
}
return(obj)
}
.check_mask_on_concresp <- function(conc, resp, mask) {
result <- list(Conc = conc, Resp = resp)
len_inp <- purrr::map_dbl(list(conc, resp), length)
if (length(unique(len_inp)) != 1 ) {
rlang::abort("the length of Conc Resp is not the same.")
}
if (!is.null(mask)) {
if (length(mask) != length(conc)) {
rlang::abort("the length of Conc Resp Mask is not the same.")
}
result <- mask_resp_conc(conc, resp, mask)
conc <- result$Conc
resp <- result$Resp
}
if (length(conc) < 4) {
rlang::abort("at least four resps are needed.")
}
return(result)
}
.check_hill_args <- function(ori, new) {
if (!all(names(new) %in% names(ori))) {
rlang::warn("The supplied arguments are not registered. No changes will be applied.")
}
config <- modifyList(ori, new)
return(config)
}
.check_modls_args <- function(args, modls) {
syn <- paste0("(", stringr::str_c(modls, collapse = "|"), ")")
if (!all(stringr::str_detect(names(args), syn))) {
rlang::warn("Some supplied arguments do not have model type as the prefix. No changes will be applied.")
}
return(args)
}
.check_objs_type <- function(objs) {
objclass <- purrr::map_lgl(
objs, ~ any(stringr::str_detect(class(.x), "rcurvep")))
if (!all(objclass)) {
rlang::abort("all objects need to be rcurvep objects")
} else {
objtype <- purrr::map_lgl(
objs, ~ "config" %in% names(.x)
)
objsize <- purrr::map_int(
objs, ~ length(.x$result)
)
if (!all(objtype)) {
rlang::abort(
"only results by curvep calculation are supported")
}
if (length(unique(objsize)) != 1) {
rlang::abort(
"different list size in results")
}
}
return(objs)
}
.check_inactivate <- function(inactivate) {
inactivate <- na.omit(inactivate)
if (!is.character(inactivate) & !rlang::is_integerish(inactivate) & !is.null(inactivate)) {
rlang::abort("only string or integer is allowed for inactivate")
}
return(inactivate)
}
|
fgeo_elevation <- function(elev) {
UseMethod("fgeo_elevation")
}
fgeo_elevation.fgeo_elevation <- function(elev) {
elev
}
fgeo_elevation.default <- function(elev) {
abort(glue("Can't deal with data of class {class(elev)}"))
}
fgeo_elevation.list <- function(elev) {
pull_elevation(elev) %>%
nms_try_rename(want = "gx", try = "x") %>%
nms_try_rename(want = "gy", try = "y") %>%
new_fgeo_elevation()
}
fgeo_elevation.data.frame <- fgeo_elevation.list
new_fgeo_elevation <- function(elev) {
stopifnot(is.data.frame(elev))
structure(elev, class = c("fgeo_elevation", class(elev)))
}
pull_elevation <- function(elev) {
UseMethod("pull_elevation")
}
pull_elevation.data.frame <- function(elev) {
check_crucial_names(elev, "elev")
elev
}
pull_elevation.default <- function(elev) {
msg <- paste0(
"`elevation` must be data.frame or list but its class is: ", class(elev)
)
abort(msg)
}
pull_elevation.list <- function(elev) {
safe_check <- purrr::safely(check_crucial_names)
check_result <- safe_check(elev, "col")
if (!is.null(check_result$error)) {
msg <- paste0(
"Your list must contain the element `col` with elevation data.\n",
"* Names of the elements of the list provided:\n",
commas(names(elev))
)
abort(msg)
}
elevation <- elev[["col"]]
elevation
}
|
`Boot.ts` <-
function(obj, R=1, ...){
p<-SelectModel(obj, lag.max=ceiling(length(obj)/4), Best=1)
ans<-FitAR(obj, 1:p)
Boot.FitAR(ans, R=R)
}
|
is_place <- function(place, PN) {
if(place %in% places(PN)$id)
return(T)
else
return(F)
}
|
reachableRate<-function(nD, start, end, seeds){
if (missing(start) | missing(end)) {
times <- get.change.times(nD)
if (length(times) == 0) {
warning("network does not appear to have any dynamic information. Using start=0 end=1")
start = 0
end = 0
}
times[times == Inf] <- NA
times[times == -Inf] <- NA
start = min(times, na.rm = T)
end = max(times, na.rm = T)
}
reachableRates<-sapply(seeds,function(s){
reachableN<-sum(tPath(nD,v=s,start=start,end=end)$tdist<Inf)
reachableN/(end-start)
})
return(mean(reachableRates))
}
meanReachTimes<-function(nD, start, end, seeds){
if (missing(start) | missing(end)) {
times <- get.change.times(nD)
if (length(times) == 0) {
warning("network does not appear to have any dynamic information. Using start=0 end=1")
start = 0
end = 0
}
times[times == Inf] <- NA
times[times == -Inf] <- NA
start = min(times, na.rm = T)
end = max(times, na.rm = T)
}
distances<-lapply(seeds, function(s){
tPath(nD,v =s,start = start,end=end,graph.step.time = 1 )$tdist
})
times<-seq(from=start,to=end,length.out = 10)
means<-sapply(times,function(t){
dAtT<-sapply(distances,function(d){
sum(d<=t)
})
mean(dAtT)
})
names(means)<-times
return(means)
}
timeToReach<-function(nD, num.targets=round(network.size(nD)/2), start, end, seeds){
if (missing(start) | missing(end)) {
times <- get.change.times(nD)
if (length(times) == 0) {
warning("network does not appear to have any dynamic information. Using start=0 end=1")
start = 0
end = 0
}
times[times == Inf] <- NA
times[times == -Inf] <- NA
start = min(times, na.rm = T)
end = max(times, na.rm = T)
}
distances<-lapply(seeds, function(s){
tPath(nD,v =s,start = start,end=end,graph.step.time = 1 )$tdist
})
reachTimes<-sapply(distances,function(d){
d<-sort(d)
d[num.targets]
})
return(reachTimes)
}
|
rm(list=ls()); gc();
library(CSTools)
library(ClimProjDiags)
library(zeallot)
library(ragg)
dir_output <- '/esarchive/scratch/nperez/CSTools_manuscript/v20210603/'
era5 <- list(name = 'era5',
path = '/esarchive/recon/ecmwf/era5/$STORE_FREQ$_mean/$VAR_NAME$_f1h-r1440x721cds/$VAR_NAME$_$YEAR$$MONTH$.nc')
years <- unlist(lapply(1993:2018, function(x){
paste0(x, sprintf("%02d",1:12), '01')}))
era5 <- CST_Load(var = 'prlr',
exp = list(era5),
sdates = years, nmember = 1,
storefreq = "daily", sampleperiod = 1,
latmin = 37.5, latmax = 53.25, lonmin = 2.5, lonmax = 18.25,
output = 'lonlat', nprocs = 1)
era5$data <- era5$data * 24 * 3600 * 1000
era5 <- CST_SplitDim(era5, split_dim = 'sdate', indices = rep(1:12, 26))
slope <- CST_RFSlope(era5, time_dim = c('sdate', 'ftime'), kmin = 5)
save(slope, file = paste0(dir_output, 'Slope.RDS'), version = 2)
slope_plot <- slope
StartDates <- paste0(1993:2018, '1101')
exp <- list(name = 'ecmwfS5',
path = "/esarchive/exp/ecmwf/system5c3s/$STORE_FREQ$_mean/$VAR_NAME$_s0-24h/$VAR_NAME$_$START_DATE$.nc")
obs <- list(name = 'era5',
path = '/esarchive/recon/ecmwf/era5/$STORE_FREQ$_mean/$VAR_NAME$_f1h-r1440x721cds/$VAR_NAME$_$YEAR$$MONTH$.nc')
c(exp, obs) %<-% CST_Load(var = 'prlr', exp = list(exp), obs = list(obs),
sdates = StartDates, nmember = 25,
storefreq = "daily", sampleperiod = 1,
latmin = 42, latmax = 49, lonmin = 4, lonmax = 11,
output = 'lonlat', nprocs = 1)
exp <- CST_SplitDim(exp, split_dim = c('ftime'))
obs <- CST_SplitDim(obs, split_dim = c('ftime'))
exp$data <- exp$data * 24 * 3600 * 1000
obs$data <- obs$data * 24 * 3600 * 1000
exp$data[which(exp$data < 0)] <- 0
exp.qm <- CST_QuantileMapping(exp, obs, method = "QUANT",
wet.day = FALSE,
sample_dims = c('member', 'sdate', 'ftime'),
ncores = 4)
save(exp.qm, file = paste0(dir_output, 'ExpQM.RDS'), version = 2)
load(paste0(dir_output, 'weightsRF100.RDS'))
agg_png(paste0(dir_output, "RF100_WeightsDec.png"),
width = 1000, height = 1100, units = 'px',res = 144)
PlotEquiMap(weight$data[,,12], lon = weight$lon, lat = weight$lat,
filled.continents = FALSE, title_scale = 1,
intylat = 2, intxlon = 2,
toptitle = 'December Weights RF 100')
dev.off()
weights <- Subset(weight$data, along = 'monthly', indices = c(11, 12, 1:6))
slope <- Subset(slope, along = 'monthly', indices = c(11, 12, 1:6),
drop = 'non-selected')
k = 1
for (realizations in 1:10) {
for (member in 1:25) {
result <- exp.qm
result$data <- NULL
for (month in 1:8) {
data <- exp.qm
data$data <- data$data[1, member, , , , , month]
fs <- CST_RainFARM(data, nf = 100,
weights = weights, slope = slope[month],
kmin = 1, nens = 1, verbose = TRUE,
nprocs = 8,
drop_realization = TRUE)
result$data <- abind::abind(result$data, fs$data, along = 5)
if (month == 2 & member == 1 & realization == 1) {
agg_png(paste0(dir_output, "RF100_Down_11dec.png"),
width = 1000, height = 1100, units = 'px',res = 144)
PlotEquiMap(fs$data[1,11,,],lon = fs$lon, lat = fs$lat,
filled.continents = FALSE, bar_limits = c(0,40),
intylat = 2, intxlon = 2, title_scale = 1,
triangle_ends = c(TRUE, FALSE),
toptitle = 'Downsacaled RF 100', units = 'precipitation (mm)')
dev.off()
}
result$lon <- fs$lon
result$lat <- fs$lat
result <- CST_MergeDims(result, merge_dims = c("ftime", "monthly"),
na.rm = TRUE)
result$Dataset <- paste0('RF100_ECMWFC3S_QM_member_', member, '_real_',
realizations)
result$Dates[[1]] <- exp$Dates[[1]]
CST_SaveExp(result, destination = dir_output,
extra_string = paste0('member', k))
gc()
k = k + 1
rm(list= list('fs', 'result', 'data'))
}
}
|
fit <- lm(mpg ~ qsec + factor(am) + wt + factor(gear),
data = mtcars)
summary(fit)
broom::tidy(fit)
library(pixiedust)
dust(fit)
dust(fit) %>%
sprinkle(cols = c("estimate", "std.error", "statistic"),
round = 3) %>%
sprinkle(cols = "p.value", fn = quote(pvalString(value))) %>%
sprinkle_colnames("Term", "Coefficient", "SE", "T-statistic", "P-value") %>%
sprinkle(bg_pattern = c("orchid", "plum")) %>%
sprinkle_print_method("html")
dust(fit) %>%
sprinkle(cols = c("estimate", "std.error", "statistic"), round = 2)
dust(fit) %>%
sprinkle(cols = c("estimate", "std.error", "statistic"),
round = 3) %>%
sprinkle(cols = "p.value", fn = quote(pvalString(value)))
dust(fit) %>%
sprinkle(cols = c("estimate", "std.error", "statistic"),
round = 3) %>%
sprinkle(cols = "p.value", fn = quote(pvalString(value))) %>%
sprinkle_colnames(term = "Term", p.value = "P-value")
dust(fit) %>%
sprinkle(cols = c("estimate", "std.error", "statistic"),
round = 3) %>%
sprinkle(cols = "p.value", fn = quote(pvalString(value))) %>%
sprinkle_colnames(term = "Term", p.value = "P-value",
std.error = "SE", statistic = "T-statistic",
estimate = "Coefficient")
dust(fit) %>%
sprinkle(cols = c("estimate", "std.error", "statistic"),
round = 3) %>%
sprinkle(cols = "p.value", fn = quote(pvalString(value))) %>%
sprinkle_colnames("Term", "Coefficient", "SE", "T-statistic", "P-value")
dust(fit) %>%
sprinkle(cols = c("estimate", "std.error", "statistic"),
round = 3) %>%
sprinkle(cols = "p.value", fn = quote(pvalString(value))) %>%
sprinkle_colnames("Term", "Coefficient", "SE", "T-statistic", "P-value", "Extra Column Name")
dust(fit) %>%
sprinkle(cols = "term",
replace = c("Intercept", "Quarter Mile Time", "Automatic vs. Manual",
"Weight", "Gears: 4 vs. 3", "Gears: 5 vs 3")) %>%
sprinkle(cols = c("estimate", "std.error", "statistic"),
round = 3) %>%
sprinkle(cols = "p.value", fn = quote(pvalString(value))) %>%
sprinkle_colnames("Term", "Coefficient", "SE", "T-statistic", "P-value")
dust(fit) %>%
sprinkle(rows = 2:3, cols = 3:4,
replace = c(100, 300, 200, 400),
italic = TRUE) %>%
sprinkle(cols = c("estimate", "std.error", "statistic"),
round = 3) %>%
sprinkle(cols = "p.value", fn = quote(pvalString(value))) %>%
sprinkle_colnames("Term", "Coefficient", "SE", "T-statistic", "P-value")
basetable <- dust(fit) %>%
sprinkle(cols = c("estimate", "std.error", "statistic"),
round = 3) %>%
sprinkle(cols = "p.value", fn = quote(pvalString(value))) %>%
sprinkle_colnames(term = "Term", estimate = "Coefficient",
std.error = "SE", statistic = "T-statistic",
p.value = "P-value") %>%
sprinkle_print_method("html")
basetable %>%
sprinkle(rows = c(2, 4), border_color = "orchid")
basetable %>%
sprinkle(rows = c(2, 4), cols = 1,
border = c("left", "top", "bottom"),
border_color = "orchid") %>%
sprinkle(rows = c(2, 4), cols = 5,
border = c("right", "top", "bottom"),
border_color = "orchid") %>%
sprinkle(rows = c(2, 4), cols = 2:4,
border = c("top", "bottom"),
border_color = "orchid")
basetable %>%
sprinkle(rows = c(2, 4), border_color = "orchid",
pad = 15)
basetable %>%
sprinkle(rows = c(2, 4), bold = TRUE)
basetable %>%
sprinkle(rows = c(2, 4), bold = TRUE, italic=TRUE)
basetable %>%
sprinkle(rows = c(2, 4), bg = "orchid")
basetable %>%
sprinkle(rows = c(2, 4), bg = "rgba(218,112,214,.5)")
basetable %>%
sprinkle(bg_pattern = c("orchid", "plum"))
basetable %>%
sprinkle(rows = c(2, 4),
font_color = "orchid",
font_size = 24,
font_size_units = "pt")
basetable %>%
sprinkle(rows = 1, cols = 2, halign = "left", valign = "top", height = 50, width = 50) %>%
sprinkle(rows = 1, cols = 3, halign = "center", valign = "top", height = 50, width = 50) %>%
sprinkle(rows = 1, cols = 4, halign = "right", valign = "top", height = 50, width = 50) %>%
sprinkle(rows = 2, cols = 2, halign = "left", valign = "middle", height = 50, width = 50) %>%
sprinkle(rows = 2, cols = 3, halign = "center", valign = "middle", height = 50, width = 50) %>%
sprinkle(rows = 2, cols = 4, halign = "right", valign = "middle", height = 50, width = 50) %>%
sprinkle(rows = 3, cols = 2, halign = "left", valign = "bottom", height = 50, width = 50) %>%
sprinkle(rows = 3, cols = 3, halign = "center", valign = "bottom", height = 50, width = 50) %>%
sprinkle(rows = 3, cols = 4, halign = "right", valign = "bottom", height = 50, width = 50)
dust(mtcars, tidy_df = TRUE) %>%
sprinkle(cols = c("mean", "sd", "median", "trimmed", "mad",
"min", "max", "range", "skew", "kurtosis", "se"),
round = 2) %>%
sprinkle(rows = 1, rotate_degree = -90,
height = 60, part = "head") %>%
sprinkle_print_method("html")
|
Gibbs_pefa_main<-function(y,mu=0,ome,ly,psx, tausq,pig,prior,ilamsq,ilamsq_t,const,count){
Q <- const$Q
J <- const$J
N <- const$N
K <- const$K
indg<-const$indg
t_num <- const$t_num
Ycen<-y-mu
lyb<-tausq
a_gamma <- prior$a_gaml_sq
b_gamma <- prior$b_gaml_sq
Pmean <- prior$m_LA
Sigla <- prior$s_LA
sub_sl <- const$sub_sl
len_sl <- const$len_sl
temp <- Ycen - ly %*% ome
Stmp <- temp %*% t(temp)
for(j in 1:J){
psx[j,j]<-1/rgamma(1, shape=a_gamma+(N-1)/2, rate=b_gamma+(Stmp[j,j])/2)
ind1 <- sub_sl[j, ]
len <- len_sl[j]
if(len>0){
yj<-as.vector(Ycen[j,])
if(len==1){
omesub<-t(ome[ind1,])
}else{omesub<-ome[ind1,]}
PSiginv<-Sigla*diag(len)
vtmp<-chol2inv(chol(tcrossprod(omesub)/psx[j,j]+PSiginv))
mtmp<-(omesub%*%yj/psx[j,j]+PSiginv%*%rep(Pmean,len))
ly[j,ind1]<-mvrnorm(1,vtmp%*%mtmp,Sigma = vtmp)
}
}
for(k in 1:K){
vgh<-diag(sqrt(tausq[,k]))
sigg<-chol2inv(chol(vgh%*%vgh*sum(ome[k,]^2)/diag(psx)+diag(J)))
mug<-sigg%*%vgh%*%as.vector(ome[k,]%*%t(Ycen))/diag(psx)
lyb[,k]<- mvrnorm(1,mug,Sigma=(sigg))
if(indg[k]){
m1<-(sigg)%*%vgh%*%as.vector(ome[k,]%*%t(Ycen-ly[,!indg]%*%ome[!indg,]))/diag(psx)
tmp<-log(det(sigg))/2+t(m1)%*%chol2inv(chol(sigg))%*%m1/2
tmp<-exp(tmp)
p0<-rbeta(1,sum(1-pig[indg])+1,sum(pig[indg])+1)
pg<-p0/(p0+(1-p0)*tmp)
pig[k]<-(1-pg>runif(1))
lyb[,k]<- pig[k]*lyb[,k]
vg2<-pmin(1/(1/ilamsq[k]+sum(ome[k,]^2)*lyb[,k]^2/diag(psx)),const$max_var)
ug<-(Ycen-ly[,-k]%*%ome[-k,])%*%ome[k,]*vg2*lyb[,k]/diag(psx)
tmp<-(log(vg2)-log(ilamsq[k])+ug^2/vg2+2*pnorm(ug/sqrt(vg2)))/2
tmp<-exp(tmp)
tmp1<-sum(tausq==0)
pi1<-rbeta(1,tmp1+1,J*K-tmp1+1)
qg<-pi1/(pi1+2*(1-pi1)*tmp)
pij<-1-(qg>runif(J))
tausq[,k]<-(rnorm(J,ug,(vg2))*pij)^2
s_pij<-sum(pij)
if (s_pij < const$mjf && s_pij>0){
count[k,3:4]<-count[k,3:4]+1
if (count[k,4] >const$no_mjf) {
tausq[,k] <- 0
count[k,5] =count[k,5] + 1
count[k,4] = 0
}
}else{
count[k,4] = 0
}
ly[,k]<-lyb[,k]*sqrt(tausq[,k])
lam.tmp<- rgamma(t_num, shape=1+sum(tausq[,k]!=0)/2, rate=ilamsq_t[k]+sum(tausq[,k])/2)
ilamsq_t[k]<-1/mean(lam.tmp)
ilamsq[k]<-1/rgamma(1, shape=1+sum(tausq[,k]!=0)/2, rate=ilamsq_t[k]+sum(tausq[,k])/2)
}else{
vg2<-pmin(1/(1/ilamsq[k]+sum(ome[k,]^2)*lyb[,k]^2/diag(psx)),const$max_var)
ug<-(Ycen-ly[,-k]%*%ome[-k,])%*%ome[k,]*vg2*lyb[,k]/diag(psx)
tmp<-(log(vg2)-log(ilamsq[k])+ug^2/vg2+2*pnorm(ug/sqrt(vg2)))/2
tmp<-exp(tmp)
tmp1<-sum(tausq==0)
pi1<-rbeta(1,tmp1+1,J*K-tmp1+1)
qg<-pi1/(pi1+2*(1-pi1)*tmp)
pij<-1-(qg>runif(J))
tmp2<-(rnorm(J,ug,(vg2))*pij)^2
ind<-(Q[,k]==-1)
tausq[ind,k]<-tmp2[ind]
ly[ind,k]<-lyb[ind,k]*sqrt(tausq[ind,k])
}
}
if(any(!indg)){
lam.tmp<- rgamma(t_num, shape=1+sum(tausq[,!indg]!=0)/2, rate=mean(ilamsq_t[!indg])+sum(tausq[,!indg])/2)
ilamsq_t[!indg] <- tmp_t <- 1/mean(lam.tmp)
ilamsq[!indg]<-1/rgamma(1, shape=1+sum(tausq[,!indg]!=0)/2, rate=tmp_t+sum(tausq[,!indg])/2)
}
return(list(ly=ly,ilamsq=ilamsq,pig=pig,psx=psx,tausq=tausq,ilamsq_t=ilamsq_t,count=count))
}
|
vim_adjusted <- function(model, scoring_rule, vim_type, interaction_order, nodesize, alpha,
X_oob, y_oob, Z_oob, leaves = "4pl", ...) {
X <- model$X
N <- nrow(X)
disj <- model$disj
n_conj <- sum(rowSums(!is.na(disj)) > 0)
n_vars <- rowSums(!is.na(disj[1:n_conj,,drop=FALSE]))
if(interaction_order < 2 || n_conj < 2) {
return(NULL)
}
new.conjs <- list()
new.conj.vecs <- NULL
for(i in 2:min(interaction_order, n_conj)) {
tmp.conjs <- combn(n_conj, i, simplify = FALSE)
rem.conjs <- integer()
tmp.disj <- NULL
for(j in 1:length(tmp.conjs)) {
conj.vec <- as.integer(disj[tmp.conjs[[j]],,drop=FALSE])
conj.vec <- conj.vec[!is.na(conj.vec)]
if(length(conj.vec) > interaction_order) {
rem.conjs <- c(rem.conjs, j)
} else {
conj.vec <- c(conj.vec, rep(NA_integer_, interaction_order - length(conj.vec)))
tmp.disj <- rbind(tmp.disj, matrix(conj.vec, ncol = interaction_order, nrow = 1))
}
}
if(length(rem.conjs) > 0) tmp.conjs <- tmp.conjs[-rem.conjs]
if(is.null(tmp.disj)) {
next
}
mode(tmp.disj) <- "integer"
dm <- getDesignMatrix(X, tmp.disj)
nodesizes <- colSums(dm)
rem.conjs <- which((nodesizes < nodesize) | (nodesizes > N - nodesize))
if(length(rem.conjs) > 0) {
tmp.conjs <- tmp.conjs[-rem.conjs]
tmp.disj <- tmp.disj[-rem.conjs,,drop=FALSE]
}
new.conjs <- c(new.conjs, tmp.conjs)
new.conj.vecs <- rbind(new.conj.vecs, tmp.disj)
}
vims <- data.frame(matrix(nrow = length(new.conjs), ncol = 2))
colnames(vims) <- c("var", "vim")
if(nrow(vims) == 0) return(vims)
tmp.real.disj <- translateLogicPET(new.conj.vecs, X)
vims$var <- getPredictorNames(tmp.real.disj, sort_conj = TRUE)
vims$vim <- 0
rem.conjs <- integer()
for(i in 1:length(new.conjs)) {
vim.vars <- list()
rem.vars <- new.conjs[[i]]
for(j in length(new.conjs[[i]]):1) {
vim.vars <- c(vim.vars, combn(new.conjs[[i]], j, simplify = FALSE))
}
tmp.vims <- vim_single(model, scoring_rule, vim_type, vim.vars = vim.vars, rem.vars = rem.vars,
X_oob = X_oob, y_oob = y_oob, Z_oob = Z_oob, leaves = leaves, ...)
for(j in 1:length(vim.vars)) {
coefficient <- ((length(new.conjs[[i]]) - length(vim.vars[[j]])) %% 2) * (-2) + 1
vims$vim[i] <- vims$vim[i] + coefficient * tmp.vims[j]
}
}
keep.ind <- is.finite(vims$vim)
vims <- vims[keep.ind,]
if(alpha > 0 && nrow(vims) > 0) {
new.conjs <- new.conjs[keep.ind]
new.conj.vecs <- new.conj.vecs[keep.ind,,drop=FALSE]
dm <- getDesignMatrix(X, model$disj)
y <- model$y
y.sum <- sum(y)
name.pool <- getPredictorNames(model$real_disj, sort_conj = TRUE)
for(i in 1:length(new.conjs)) {
current.conj <- new.conjs[[i]]
combs <- expand.grid(rep(list(0:1), length(current.conj)))
dm.tmp <- dm[,current.conj,drop=FALSE]
p.values <- rep(1.0, nrow(combs))
for(j in 1:nrow(combs)) {
stacked <- matrix(rep(combs[j,], N), byrow = TRUE, nrow = N)
comb.rows <- unname(rowMeans(dm.tmp == stacked)) == 1
if(model$y_bin) {
y.comb.sum <- sum(y[comb.rows])
N.comb <- sum(comb.rows)
p.values[j] <- fisher.test(matrix(c(y.comb.sum, y.sum - y.comb.sum,
N.comb - y.comb.sum, N - N.comb - (y.sum - y.comb.sum)), ncol = 2),
alternative = "two.sided", conf.int = FALSE)$p.value
} else {
p.values[j] <- t.test(y[comb.rows], y[!comb.rows], alternative = "two.sided",
paired = FALSE, var.equal = FALSE)$p.value
}
}
if(min(p.values) < alpha) {
j <- which.min(p.values)
comb <- as.integer(combs[j,])
new.vars <- character()
for(k in 1:length(comb)) {
current.name <- name.pool[current.conj[k]]
if(comb[k] == 0) {
if(n_vars[current.conj[k]] > 1) {
new.vars <- c(new.vars, paste("-(", current.name, ")", sep = ""))
} else {
new.vars <- c(new.vars, ifelse(startsWith(current.name, "-"),
substr(current.name, 2, nchar(current.name)),
paste("-", current.name, sep = "")))
}
} else {
if(n_vars[current.conj[k]] > 1) {
single.vars <- strsplit(current.name, "\\^")[[1]]
new.vars <- c(new.vars, single.vars)
} else {
new.vars <- c(new.vars, current.name)
}
}
}
vims$var[i] <- paste(sort(new.vars), collapse = "^")
}
}
}
return(vims)
}
vim_single <- function(model, scoring_rule, vim_type, vim.vars, rem.vars,
X_oob, y_oob, Z_oob, leaves = "4pl", ...) {
FUN <- getPerfFUN(vim_type)
base.perf <- FUN(model, scoring_rule, rem.vars,
X_oob = X_oob, y_oob = y_oob, Z_oob = Z_oob, leaves = leaves, ...)
if(is.list(vim.vars)) {
vim <- numeric(length(vim.vars))
for(i in 1:length(vim.vars)) {
adj.rem.vars <- setdiff(rem.vars, vim.vars[[i]])
if(length(adj.rem.vars) > 0) {
vim[i] <- base.perf - FUN(model, scoring_rule, adj.rem.vars,
X_oob = X_oob, y_oob = y_oob, Z_oob = Z_oob, leaves = leaves, ...)
} else {
vim[i] <- base.perf - perf(model, scoring_rule,
X_oob = X_oob, y_oob = y_oob, Z_oob = Z_oob, leaves = leaves)
}
}
} else {
vim <- base.perf - perf(model, scoring_rule,
X_oob = X_oob, y_oob = y_oob, Z_oob = Z_oob, leaves = leaves)
}
return(vim)
}
perf <- function(model, scoring_rule,
X_oob, y_oob, Z_oob, leaves = "4pl") {
orig_score <- 0
n_folds <- length(model$X_train)
for(j in 1:n_folds) {
dm_orig <- getDesignMatrix(X_oob[[j]], model$disj)
Z_temp <- NULL
if(!is.null(Z_oob))
Z_temp <- Z_oob[[j]]
probs_orig <- predict_pet(model$ensemble[[j]], dm_orig, Z = Z_temp, "prob", leaves)
orig_score <- orig_score + calcScore(probs_orig, y_oob[[j]], scoring_rule)
}
orig_score/n_folds
}
getPerfFUN <- function(vim_type) {
if(vim_type == "logic")
FUN <- perf.logic
else if(vim_type == "remove")
FUN <- perf.remove
else if(vim_type == "permutation")
FUN <- perf.permutation
FUN
}
perf.logic <- function(model, scoring_rule, rem.vars,
X_oob, y_oob, Z_oob, leaves = "4pl",
average = "before") {
n_folds <- length(model$X_train)
score <- 0
for(j in 1:n_folds) {
Z_temp <- NULL
if(!is.null(Z_oob))
Z_temp <- Z_oob[[j]]
dm_orig <- getDesignMatrix(X_oob[[j]], model$disj)
combs <- expand.grid(rep(list(0:1), length(rem.vars)))
probs <- 0
tmp.score <- 0
for(k in 1:nrow(combs)) {
dm_inv <- dm_orig
dm_inv[,rem.vars] <- rep(as.integer(combs[k,]), each = nrow(dm_orig))
if(average == "before")
probs <- probs + predict_pet(model$ensemble[[j]], dm_inv, Z = Z_temp, "prob", leaves)
else {
probs <- predict_pet(model$ensemble[[j]], dm_inv, Z = Z_temp, "prob", leaves)
tmp.score <- tmp.score + calcScore(probs, y_oob[[j]], scoring_rule)
}
}
if(average == "before") {
probs <- probs/nrow(combs)
score <- score + calcScore(probs, y_oob[[j]], scoring_rule)
} else
score <- score + tmp.score/nrow(combs)
}
score/n_folds
}
perf.remove <- function(model, scoring_rule, rem.vars,
X_oob, y_oob, Z_oob, leaves = "4pl",
empty.model = "mean") {
n_folds <- length(model$X_train)
disj <- model$disj
disj <- disj[-rem.vars,,drop=FALSE]
n_conj <- sum(rowSums(!is.na(disj)) > 0)
if(n_conj == 0) {
if(empty.model == "none")
return(-Inf)
else {
score <- 0
for(j in 1:n_folds) {
score <- score + calcScore(rep(mean(model$y_train[[j]]), length(y_oob[[j]])), y_oob[[j]], scoring_rule)
}
return(score/n_folds)
}
}
ensemble <- fitPETs(model$X_train, model$y_train, model$X_val, model$y_val, model$Z_train, model$Z_val, model$use_validation, model$y_bin,
model$tree_control$nodesize, model$tree_control$cp, model$tree_control$smoothing, model$tree_control$mtry, model$tree_control$covariable_final,
disj, n_conj, getScoreRule(scoring_rule), TRUE)
score <- 0
for(j in 1:n_folds) {
Z_temp <- NULL
if(!is.null(Z_oob))
Z_temp <- Z_oob[[j]]
dm <- getDesignMatrix(X_oob[[j]], disj)
probs <- predict_pet(ensemble[[j]], dm, Z = Z_temp, "prob", leaves)
score <- score + calcScore(probs, y_oob[[j]], scoring_rule)
}
score/n_folds
}
perf.permutation <- function(model, scoring_rule, rem.vars,
X_oob, y_oob, Z_oob, leaves = "4pl",
n.perm = 100) {
n_folds <- length(model$X_train)
disj <- model$disj
score <- 0
for(i in 1:n.perm) {
for(j in 1:n_folds) {
Z_temp <- NULL
if(!is.null(Z_oob))
Z_temp <- Z_oob[[j]]
dm <- getDesignMatrix(X_oob[[j]], disj)
for(k in 1:length(rem.vars)) {
perm <- sample(nrow(X_oob[[j]]))
dm[,rem.vars[k]] <- dm[perm, rem.vars[k]]
}
probs <- predict_pet(model$ensemble[[j]], dm, Z = Z_temp, "prob", leaves)
score <- score + calcScore(probs, y_oob[[j]], scoring_rule)
}
}
score/(n_folds * n.perm)
}
calcScore <- function(preds, y, scoring_rule) {
if(scoring_rule == "deviance") {
score <- calcDev(preds, y)
} else if(scoring_rule == "brier") {
score <- calcBrier(preds, y)
} else if(scoring_rule == "mis") {
score <- calcMis(preds, y)
} else if(scoring_rule == "auc") {
score <- -calcAUCFast(preds, y)
} else if(scoring_rule == "nce") {
score <- calcNCE(preds, y)
} else if(scoring_rule == "mse") {
score <- calcMSE(preds, y)
} else if(scoring_rule == "nrmse") {
score <- calcNRMSE(preds, y)
}
score
}
vim <- function(model, scoring_rule = "auc", vim_type = "logic",
adjust = TRUE, interaction_order = 3, nodesize = NULL, alpha = 0.05,
X_oob = NULL, y_oob = NULL, Z_oob = NULL, leaves = "4pl", ...) {
type <- class(model)
if(type == "logicDT") {
if(is.null(X_oob)) {
X_oob <- model$X_val
y_oob <- model$y_val
Z_oob <- model$Z_val
} else {
XyZ <- prepareXyZ(X_oob, y_oob, Z_oob, model$y_bin, make.list = TRUE)
X_oob <- XyZ$X; y_oob <- XyZ$y; Z_oob <- XyZ$Z
}
if(!model$y_bin && scoring_rule == "auc") scoring_rule <- "nrmse"
if(is.null(nodesize)) nodesize <- model$conjsize
disj <- model$disj
n_conj <- sum(rowSums(!is.na(disj)) > 0)
vims <- data.frame(matrix(nrow = n_conj, ncol = 2))
colnames(vims) <- c("var", "vim")
vims$var <- getPredictorNames(model$real_disj, sort_conj = TRUE)
vims$vim <- 0
for(i in 1:n_conj) {
vims$vim[i] <- vim_single(model, scoring_rule, vim_type, NULL, i,
X_oob = X_oob, y_oob = y_oob, Z_oob = Z_oob, leaves = leaves, ...)
}
if(adjust) {
vims <- rbind(vims, vim_adjusted(model, scoring_rule = scoring_rule, vim_type = vim_type,
interaction_order = interaction_order, nodesize = nodesize, alpha = alpha,
X_oob = X_oob, y_oob = y_oob, Z_oob = Z_oob, leaves = leaves, ...))
}
} else if(type == "logic.bagged") {
models <- model$models
bags <- model$bags
XyZ <- prepareXyZ(model$X, model$y, model$Z, models[[1]]$y_bin, make.list = FALSE)
X <- XyZ$X; y <- XyZ$y; Z <- XyZ$Z
N <- nrow(X)
bagging.iter <- length(models)
vims <- data.frame(matrix(nrow = 0, ncol = 2))
colnames(vims) <- c("var", "vim")
if(is.null(nodesize)) nodesize <- models[[1]]$conjsize
if(!models[[1]]$y_bin && scoring_rule == "auc") scoring_rule <- "nrmse"
Z_temp <- NULL
vim.iter <- bagging.iter
for(i in 1:bagging.iter) {
oob <- setdiff(1:N, bags[[i]])
n_folds <- length(models[[i]]$ensemble)
if(!is.null(Z))
Z_temp <- rep(list(Z[oob,]), n_folds)
ret <- vim(models[[i]], scoring_rule, vim_type, adjust = adjust,
interaction_order = interaction_order, nodesize = nodesize, alpha = alpha,
X_oob = rep(list(X[oob,]), n_folds), y_oob = rep(list(y[oob]), n_folds),
Z_oob = Z_temp, leaves = leaves, ...)$vims
predictor_names <- ret$var
current_vims <- ret$vim
if(any(!is.finite(current_vims))) {
vim.iter <- vim.iter - 1
next
}
for(j in 1:length(predictor_names)) {
if(predictor_names[j] %in% vims$var) {
vims$vim[vims$var == predictor_names[j]] <- vims$vim[vims$var == predictor_names[j]] + current_vims[j]
} else {
vims <- rbind(vims, data.frame(var = predictor_names[j], vim = current_vims[j]))
}
}
}
vims$vim <- vims$vim/vim.iter
} else {
stop("VIMs can only be calculated for models of type logicDT or logic.bagged!")
}
vims <- vims[order(vims$vim, decreasing = TRUE),]
rownames(vims) <- 1:nrow(vims)
ret <- list(vims = vims, vim_type = vim_type, scoring_rule = scoring_rule)
class(ret) <- "vim"
return(ret)
}
permutation_test <- function(x1, x2, n_perm_t = 10000) {
n <- length(x1)
t_perm <- vector()
for(i in 0:n_perm_t) {
if(i == 0)
perm <- rep(FALSE, n)
else
perm <- sample(c(FALSE, TRUE), n, replace = TRUE)
x1p <- c(x1[!perm], x2[perm])
x2p <- c(x2[!perm], x1[perm])
inner_rank_x1 <- rank(x1p)
inner_rank_x2 <- rank(x2p)
full_rank <- rank(c(x1p,x2p))
full_rank_x1 <- full_rank[1:n]
full_rank_x2 <- full_rank[(n+1):(2*n)]
p_est <- 1/(2*n) * mean(full_rank_x2 - full_rank_x1) + 0.5
Z_k <- 1/n * (full_rank_x2 - inner_rank_x2 - full_rank_x1 + inner_rank_x1)
sd_est <- sd(Z_k)
t_perm <- c(t_perm, sqrt(n) * (p_est - 0.5)/sd_est)
}
return(mean(t_perm[1] <= t_perm[-1]))
}
prepareXyZ <- function(X, y, Z, y_bin, make.list = TRUE) {
if(!is.list(y)) {
X <- as.matrix(X)
mode(X) <- "integer"
if(!y_bin) {
y <- as.numeric(y)
} else {
y <- as.integer(y)
}
if(make.list) {
X <- list(X)
y <- list(y)
}
if(!is.null(Z)) {
Z <- as.matrix(Z)
mode(Z) <- "double"
if(make.list) {
Z <- list(Z)
}
}
} else {
n_folds <- length(X)
for(i in 1:n_folds) {
X[[i]] <- as.matrix(X[[i]])
mode(X[[i]]) <- "integer"
if(!y_bin) {
y[[i]] <- as.numeric(y[[i]])
} else {
y[[i]] <- as.integer(y[[i]])
}
if(!is.null(Z)) {
Z[[i]] <- as.matrix(Z[[i]])
mode(Z[[i]]) <- "double"
}
}
}
return(list(X = X, y = y, Z = Z))
}
|
bipartite_stats <- function(nullnet, signif.level = 0.95, index.type,
indices, prog.count = TRUE, ...) {
if(class(nullnet) != "nullnet") {
stop("bipartite_stats requires a nullnet object")}
if("degree distribution" %in% indices) {
stop("Degree distribution is not currently supported by bipartite_stats")}
if("topology" %in% indices) {
stop("Degree distribution (called by indices = 'topology') is not currently supported by bipartite_stats")}
if (signif.level <= 0 || signif.level >= 1) {
stop("Invalid percentile value specified")}
p <- (1 - signif.level) / 2
obs.web <- nullnet$obs.interactions
rownames(obs.web) <- obs.web[, 1]
obs.web <- obs.web[, -1]
obs.web <- t(obs.web)
if (index.type == "networklevel") {
if("ALL" %in% indices){
obs.net.stats <- bipartite::networklevel(obs.web,
index = "ALLBUTDD", ...)
} else {
obs.net.stats <- bipartite::networklevel(obs.web, index = indices, ...)
}
net.stats <- matrix(ncol = length(obs.net.stats), nrow = 0)
colnames(net.stats) <- names(obs.net.stats)
for (i in 1:nullnet$n.iterations) {
web1 <- eval(parse(text = paste(
'nullnet$rand.data[nullnet$rand.data$Iteration == "It.',
i, '", ]', sep = "")))
rownames(web1) <- web1$Consumer
web1 <- web1[, -c(1:2)]
web1 <- t(web1)
if("ALL" %in% indices){
net.stats <- rbind(net.stats, bipartite::networklevel(web1,
index = "ALLBUTDD", ...))
} else {
net.stats <- rbind(net.stats, bipartite::networklevel(web1, indices, ...))
}
if (prog.count == TRUE){Sys.sleep(0.02); print(i); utils::flush.console()}
}
net.level.mean <- apply(net.stats, 2, mean, na.rm = TRUE)
net.level.up <- apply(net.stats, 2, stats::quantile, probs = 1 - p, na.rm = TRUE)
net.level.low <- apply(net.stats, 2, stats::quantile, probs = p, na.rm = TRUE)
net.level.sd <- apply(net.stats, 2, stats::sd, na.rm = TRUE)
net.level.ses <- (obs.net.stats - net.level.mean) / net.level.sd
net.level.results <- data.frame(obs.net.stats, net.level.mean, net.level.low,
net.level.up, rep("ns", length(net.level.low)),
net.level.ses)
colnames(net.level.results) <- c("Observed", "Null", "Lower.CL", "Upper.CL",
"Test", "SES")
net.level.results$Test <- as.character(net.level.results$Test)
for (i in 1:nrow(net.level.results)) {
if (is.na(net.level.results$Observed[i]))
{net.level.results$Test[i] <- "NA"} else {
if ((net.level.results$Observed[i] == net.level.results$Lower.CL[i]) &
(net.level.results$Observed[i] == net.level.results$Upper.CL[i]))
{net.level.results$Test[i] <- "NA"}
if (net.level.results$Observed[i] > net.level.results$Upper.CL[i])
{net.level.results$Test[i] <- "Higher"}
if (net.level.results$Observed[i] < net.level.results$Lower.CL[i])
{net.level.results$Test[i] <- "Lower"}
}
}
}
if (index.type == "grouplevel") {
if("ALL" %in% indices){
obs.net.stats <- bipartite::grouplevel(obs.web, index = "ALLBUTDD", ...)
} else {
obs.net.stats <- bipartite::grouplevel(obs.web, index = indices, ...)
}
net.stats <- matrix(ncol = length(obs.net.stats), nrow = 0)
colnames(net.stats) <- names(obs.net.stats)
for (i in 1:nullnet$n.iterations) {
web1 <- eval(parse(text = paste(
'nullnet$rand.data[nullnet$rand.data$Iteration == "It.',
i, '", ]', sep = "")))
rownames(web1) <- web1$Consumer
web1 <- web1[, -c(1:2)]
web1 <- t(web1)
if("ALL" %in% indices){
net.stats <- rbind(net.stats, bipartite::grouplevel(web1, "ALLBUTDD",
...)) } else {
net.stats <- rbind(net.stats, bipartite::grouplevel(web1, indices, ...))
}
if (prog.count == TRUE){Sys.sleep(0.02); print(i); utils::flush.console()}
}
net.level.mean <- apply(net.stats, 2, mean, na.rm = TRUE)
net.level.up <- apply(net.stats, 2, stats::quantile, probs = 1 - p,
na.rm = TRUE)
net.level.low <- apply(net.stats, 2, stats::quantile, probs = p,
na.rm = TRUE)
net.level.sd <- apply(net.stats, 2, stats::sd, na.rm = TRUE)
net.level.ses <- (obs.net.stats - net.level.mean) / net.level.sd
grp.level.results <- data.frame(obs.net.stats, net.level.mean, net.level.low,
net.level.up, rep("ns", length(net.level.low)),
net.level.ses)
colnames(grp.level.results) <- c("Observed", "Null", "Lower.CL", "Upper.CL",
"Test", "SES")
grp.level.results$Test <- as.character(grp.level.results$Test)
for (i in 1:nrow(grp.level.results)) {
if (is.na(grp.level.results$Observed[i]))
{grp.level.results$Test[i] <- "NA"} else {
if ((grp.level.results$Observed[i] == grp.level.results$Lower.CL[i]) &
(grp.level.results$Observed[i] == grp.level.results$Upper.CL[i]))
{grp.level.results$Test[i] <- "NA"}
if (grp.level.results$Observed[i] > grp.level.results$Upper.CL[i])
{grp.level.results$Test[i] <- "Higher"}
if (grp.level.results$Observed[i] < grp.level.results$Lower.CL[i])
{grp.level.results$Test[i] <- "Lower"}
}
}
}
if (index.type == "specieslevel") {
if("ALL" %in% indices){
obs.net.stats <- bipartite::specieslevel(obs.web, index = "ALLBUTD",
level = "both", ...)} else {
obs.net.stats <- bipartite::specieslevel(obs.web, index = indices,
level = "both", ...)
}
obs.high <- cbind(rownames(obs.net.stats$'higher level'),
obs.net.stats$'higher level')
rownames(obs.high) <- NULL
colnames(obs.high)[1] <- "species"
obs.low <- cbind(rownames(obs.net.stats$'lower level'),
obs.net.stats$'lower level')
rownames(obs.low) <- NULL
colnames(obs.low)[1] <- "species"
for (i in 1:nullnet$n.iterations) {
web1 <- eval(parse(text = paste(
'nullnet$rand.data[nullnet$rand.data$Iteration == "It.',
i, '", ]', sep = "")))
rownames(web1) <- web1$Consumer
web1 <- web1[, -c(1:2)]
web1 <- t(web1)
if("ALL" %in% indices){
iteration.stats <- bipartite::specieslevel(web1, index = "ALLBUTD",
level = "both", ...)} else {
iteration.stats <- bipartite::specieslevel(web1, index = indices,
level = "both", ...)
}
iteration.high <- cbind(rownames(iteration.stats$'higher level'),
iteration.stats$'higher level')
rownames(iteration.high) <- NULL
colnames(iteration.high)[1] <- "species"
if (i == 1) { net.stats.high <- iteration.high } else {
net.stats.high <- rbind(net.stats.high, iteration.high)
}
iteration.low <- cbind(rownames(iteration.stats$'lower level'),
iteration.stats$'lower level')
rownames(iteration.low) <- NULL
colnames(iteration.low)[1] <- "species"
if (i == 1) { net.stats.low <- iteration.low } else {
net.stats.low <- rbind(net.stats.low, iteration.low)
}
if (prog.count == TRUE){Sys.sleep(0.02); print(i); utils::flush.console()}
}
matrix.names <- colnames(net.stats.high)[2:ncol(net.stats.high)]
higher.mean <- stats::aggregate(net.stats.high[, 2:ncol(net.stats.high)],
by = list(net.stats.high$species), mean,
na.rm = TRUE)
higher.up <- stats::aggregate(net.stats.high[, 2:ncol(net.stats.high)],
by = list(net.stats.high$species), stats::quantile,
probs = 1 - p, na.rm = TRUE)
higher.low <- stats::aggregate(net.stats.high[, 2:ncol(net.stats.high)],
by = list(net.stats.high$species), stats::quantile,
probs = p, na.rm = TRUE)
higher.sd <- stats::aggregate(net.stats.high[, 2:ncol(net.stats.high)],
by = list(net.stats.high$species), stats::sd,
na.rm = TRUE)
higher.ses <- cbind.data.frame(obs.high[, 1], ((obs.high[, -1] -
higher.mean[, -1]) / higher.sd[, -1]))
colnames(higher.mean) <- colnames(net.stats.high)
colnames(higher.up) <- colnames(net.stats.high)
colnames(higher.low) <- colnames(net.stats.high)
colnames(higher.sd) <- colnames(net.stats.high)
colnames(higher.ses) <- colnames(net.stats.high)
lower.taxa <- data.frame(Species = unique(c(as.character(obs.low$species),
as.character(net.stats.low$species))))
obs.low <- merge(lower.taxa, obs.low, by.x = "Species", by.y = "species",
all.x = TRUE, all.y = TRUE)
obs.low <- obs.low[order(obs.low$Species), ]
lower.mean <- stats::aggregate(net.stats.low[, 2:ncol(net.stats.low)],
by = list(net.stats.low$species), mean, na.rm = TRUE)
lower.mean <- merge(lower.taxa, lower.mean, by.x = "Species", by.y =
"Group.1",all.x = TRUE, all.y = TRUE)
lower.mean <- lower.mean[order(lower.mean$Species), ]
lower.up <- stats::aggregate(net.stats.low[, 2:ncol(net.stats.low)],
by = list(net.stats.low$species), stats::quantile,
probs = 1 - p, na.rm = TRUE)
lower.up <- merge(lower.taxa, lower.up, by.x = "Species", by.y = "Group.1",
all.x = TRUE, all.y = TRUE)
lower.up <- lower.up[order(lower.up$Species), ]
lower.low <- stats::aggregate(net.stats.low[, 2:ncol(net.stats.low)],
by = list(net.stats.low$species), stats::quantile,
probs = p, na.rm = TRUE)
lower.low <- merge(lower.taxa, lower.low, by.x = "Species", by.y = "Group.1",
all.x = TRUE, all.y = TRUE)
lower.low <- lower.low[order(lower.low$Species), ]
lower.sd <- stats::aggregate(net.stats.low[, 2:ncol(net.stats.low)],
by = list(net.stats.low$species), stats::sd, na.rm = TRUE)
lower.sd <- merge(lower.taxa, lower.sd, by.x = "Species", by.y = "Group.1",
all.x = TRUE, all.y = TRUE)
lower.sd <- lower.sd[order(lower.sd$Species), ]
lower.ses <- cbind.data.frame(obs.low[, 1], ((obs.low[, -1] -
lower.mean[, -1]) / lower.sd[, -1]))
if(!identical(obs.low$Species, lower.mean$Species) ||
!identical(obs.low$Species, lower.mean$Species) ||
!identical(obs.low$Species, lower.up$Species) ||
!identical(obs.low$Species, lower.sd$Species)) {stop(
"Different taxon lists in observed and modelled data")}
colnames(lower.mean) <- colnames(net.stats.high)
colnames(lower.up) <- colnames(net.stats.high)
colnames(lower.low) <- colnames(net.stats.high)
colnames(lower.sd) <- colnames(net.stats.high)
colnames(lower.ses) <- colnames(net.stats.high)
if (!identical(as.vector(colnames(net.stats.high)[2:ncol(net.stats.high)]),
as.vector(colnames(net.stats.high)[2:ncol(net.stats.high)])))
{stop("Different indices at lower and higher levels")}
sp.level <- list()
for (j in 1:length(matrix.names)) {
mat1 <- merge(obs.high[, c("species", matrix.names[j])],
higher.mean[, c("species", matrix.names[j])],
by.x = "species", by.y = "species")
colnames(mat1) <- c("Species", "Observed", "Null")
mat1 <- merge(mat1, higher.low[, c("species", matrix.names[j])],
by.x = "Species", by.y = "species")
mat1 <- merge(mat1, higher.up[, c("species", matrix.names[j])],
by.x = "Species", by.y = "species")
mat1 <- cbind(mat1, rep("ns", nrow(mat1)))
mat1 <- merge(mat1, higher.ses[, c("species", matrix.names[j])],
by.x = "Species", by.y = "species")
colnames(mat1)[4:7] <- c("Lower.CL", "Upper.CL", "Test", "SES")
mat1$Test <- as.character(mat1$Test)
for (k in 1:nrow(mat1)) {
if (is.na(mat1$Observed[k])) {mat1$Test[k] <- "NA"} else {
if ((mat1$Observed[k] == mat1$Lower.CL[k]) &
(mat1$Observed[k] == mat1$Upper.CL[k])) {mat1$Test[k] <- "NA"}
if (mat1$Observed[k] > mat1$Upper.CL[k]) {mat1$Test[k] <- "Higher"}
if (mat1$Observed[k] < mat1$Lower.CL[k]) {mat1$Test[k] <- "Lower"}
}
}
sp.level[[matrix.names[j]]]$higher <- mat1
mat1 <- merge(obs.low[, c("Species", matrix.names[j])],
lower.mean[, c("species", matrix.names[j])],
by.x = "Species", by.y = "species")
colnames(mat1) <- c("Species", "Observed", "Null")
mat1 <- merge(mat1, lower.low[, c("species", matrix.names[j])],
by.x = "Species", by.y = "species")
mat1 <- merge(mat1, lower.up[, c("species", matrix.names[j])],
by.x = "Species", by.y = "species")
mat1 <- cbind(mat1, rep("ns", nrow(mat1)))
mat1 <- merge(mat1, lower.ses[, c("species", matrix.names[j])],
by.x = "Species", by.y = "species")
colnames(mat1)[4:7] <- c("Lower.CL", "Upper.CL", "Test", "SES")
mat1$Test <- as.character(mat1$Test)
for (k in 1:nrow(mat1)) {
if (is.na(mat1$Observed[k]) || is.na(mat1$Lower.CL[k])) {mat1$Test[k] <- "NA"} else {
if ((mat1$Observed[k] == mat1$Lower.CL[k]) &
(mat1$Observed[k] == mat1$Upper.CL[k])) {mat1$Test[k] <- "ns"}
if (mat1$Observed[k] > mat1$Upper.CL[k]) {mat1$Test[k] <- "Higher"}
if (mat1$Observed[k] < mat1$Lower.CL[k]) {mat1$Test[k] <- "Lower"}
}
}
sp.level[[matrix.names[j]]]$lower <- mat1
}
}
if (index.type == "specieslevel" & indices[1] == "ALL") {
warning("Dependence matrix is not currently supported by bipartite_stats: indices = 'ALLBUTD' used instead")}
if (index.type == "grouplevel" & indices[1] == "ALL") {
warning("Degree distribution is not currently supported by bipartite_stats: indices = 'ALLBUTDD' used instead")}
if (index.type == "networklevel" & indices[1] == "ALL") {
warning("Degree distribution is not currently supported by bipartite_stats: indices = 'ALLBUTDD' used instead")}
if (index.type == "networklevel") {return(net.level.results)}
if (index.type == "grouplevel") {return(grp.level.results)}
if (index.type == "specieslevel") {return(sp.level)}
}
|
context("Assertion assertions")
test_that("is.integerish works correctly", {
expect_true(is.integerish(1L))
expect_true(is.integerish(c(1L, 2L, 3L)))
expect_true(is.integerish(c(1L, NA, 3L)))
expect_false(is.integerish(c(1L, 2.1, 3L)))
expect_false(is.integerish(1L + .Machine$double.eps))
expect_false(is.integerish(1L - .Machine$double.neg.eps))
expect_true(is.integerish(Inf))
expect_true(is.integerish(-Inf))
expect_true(is.integerish(1e10))
expect_true(is.integerish(-1e10))
expect_false(is.integerish(1e10 + 0.0002))
expect_false(is.integerish(1e10 - 0.0002))
expect_false(is.integerish(NA))
expect_false(is.integerish(NA_real_))
expect_false(is.integerish(NULL))
expect_false(is.integerish(NaN))
})
test_that("is.named works correctly", {
expect_false(is.named(1))
x <- 1:3
expect_false(is.named(x))
names(x) <- letters[1:3]
expect_true(is.named(x))
names(x)[2] <- ""
expect_false(is.named(x))
names(x)[2] <- NA
expect_false(is.named(x))
names(x) <- NULL
expect_false(is.named(x))
expect_false(is.named(NA))
expect_false(is.named(NULL))
})
test_that("has_attr works correctly", {
x <- 1:3
expect_false(has_attr(x, "names"))
names(x) <- letters[1:3]
expect_true(has_attr(x, "names"))
expect_false(has_attr(x, "something else"))
})
test_that("has_name works correctly", {
x <- 1:3
expect_false(has_name(x, "a"))
names(x) <- letters[1:3]
expect_true(has_name(x, letters[2]))
expect_false(has_name(x, "something else"))
expect_false(has_name(x, NA))
expect_true(has_name(x, c("a", "b")))
expect_true(has_name(x, c("a", "b", "c")))
expect_false(has_name(x, c("a", "d")))
})
test_that("noNA works correctly", {
expect_true(noNA("a"))
expect_false(noNA(c(TRUE, NA)))
x <- sample(c(1:10, NA), 100, rep = TRUE)
expect_false(noNA(x))
expect_true(noNA(1:1000))
})
test_that("are_equal works correctly", {
x <- 2
expect_false(are_equal(x, 1.9))
expect_true(are_equal(x, 1.999, tol = 0.01))
expect_true(are_equal(x, 2))
expect_true(are_equal('a', 'a'))
expect_false(are_equal('a', 'b'))
expect_true(are_equal(NA, NA))
expect_true(are_equal(NULL, NULL))
})
test_that("is.error works correctly", {
x <- try(stop("!!"), silent=TRUE)
expect_true(is.error(x))
expect_false(is.error(1))
expect_false(is.error(NA))
expect_false(is.error(NULL))
})
test_that("is.time works correctly", {
expect_true(is.time(Sys.time()))
expect_false(is.time(Sys.Date()))
expect_false(is.time(1))
expect_false(is.time(NA))
expect_false(is.time(NULL))
})
test_that("is.date works correctly", {
expect_false(is.date(Sys.time()))
expect_true(is.date(Sys.Date()))
expect_false(is.date(1))
expect_false(is.date(NA))
expect_false(is.date(NULL))
})
test_that("has_args works correctly", {
expect_error(1 %has_args% "x")
expect_true(mean %has_args% "x")
expect_false(mean %has_args% "y")
expect_error(NA %has_args% "x")
expect_error(NULL %has_args% "x")
expect_true(has_args(rnorm, "n"))
expect_true(has_args(rnorm, c("n", "mean")))
expect_true(has_args(rnorm, c("mean", "sd", "n")))
expect_false(has_args(rnorm, "n", exact = TRUE))
expect_false(has_args(rnorm, c("n", "mean"), exact = TRUE))
expect_false(has_args(rnorm, c("mean", "sd", "n"), exact = TRUE))
})
test_that("not_empty works correctly", {
expect_true(not_empty(1))
expect_false(not_empty(numeric()))
expect_false(not_empty(mtcars[0, ]))
expect_false(not_empty(mtcars[, 0]))
expect_true(not_empty(NA))
expect_false(not_empty(NULL))
})
|
test_that("dweibull extremes", {
expect_identical(dweibull(numeric(0)), numeric(0))
expect_identical(dweibull(NA), NA_real_)
expect_identical(dweibull(NaN), NaN)
expect_identical(dweibull(0), 1)
expect_equal(dweibull(1), 0.367879441171442)
expect_equal(dweibull(1, log = TRUE), log(dweibull(1)))
expect_equal(dweibull(1, shape = -1), NaN)
expect_equal(dweibull(1, scale = -1), NaN)
expect_identical(dweibull(0), 1)
expect_identical(dweibull(-Inf), 0)
expect_identical(dweibull(Inf), 0)
expect_identical(dweibull(c(NA, NaN, 0, Inf, -Inf)),
c(dweibull(NA), dweibull(NaN), dweibull(0), dweibull(Inf), dweibull(-Inf)))
expect_equal(dweibull(1:2, shape = 1:2, scale = 3:4),
c(dweibull(1, 1, 3), dweibull(2, 2, 4)))
expect_equal(dweibull(1:2, shape = c(1, NA), scale = 3:4),
c(dweibull(1, 1, 3), NA))
})
test_that("pweibull extremes", {
expect_identical(pweibull(numeric(0)), numeric(0))
expect_identical(pweibull(NA), NA_real_)
expect_identical(pweibull(NaN), NaN)
expect_identical(pweibull(0), 0)
expect_equal(pweibull(1), 0.632120558828558)
expect_equal(pweibull(1, log.p = TRUE), log(pweibull(1)))
expect_equal(pweibull(1, lower.tail = FALSE), 1 - pweibull(1))
expect_equal(pweibull(1, lower.tail = FALSE, log.p = TRUE), log(1 - pweibull(1)))
expect_equal(pweibull(1, shape = -1), NaN)
expect_equal(pweibull(1, scale = -1), NaN)
expect_identical(pweibull(0), 0)
expect_identical(pweibull(-Inf), 0)
expect_identical(pweibull(Inf), 1)
expect_identical(pweibull(c(NA, NaN, 0, Inf, -Inf)),
c(pweibull(NA), pweibull(NaN), pweibull(0), pweibull(Inf), pweibull(-Inf)))
expect_equal(pweibull(1:2, shape = 1:2, scale = 3:4),
c(pweibull(1, 1, 3), pweibull(2, 2, 4)))
expect_equal(pweibull(1:2, shape = c(1, NA), scale = 3:4),
c(pweibull(1, 1, 3), NA))
})
test_that("qweibull extremes", {
expect_identical(qweibull(numeric(0)), numeric(0))
expect_identical(qweibull(NA), NA_real_)
expect_identical(qweibull(NaN), NaN)
expect_identical(qweibull(0), 0)
expect_identical(qweibull(1), Inf)
expect_equal(qweibull(0.75), 1.38629436111989)
expect_equal(qweibull(0.75, log.p = TRUE), NaN)
expect_equal(qweibull(log(0.75), log.p = TRUE), qweibull(0.75))
expect_equal(qweibull(0.75, lower.tail = FALSE), qweibull(0.25))
expect_equal(qweibull(log(0.75), lower.tail = FALSE, log.p = TRUE), qweibull(0.25))
expect_equal(qweibull(0.5, shape = -1), NaN)
expect_equal(qweibull(0.5, scale = -1), NaN)
expect_identical(qweibull(0), 0)
expect_identical(qweibull(-Inf), NaN)
expect_identical(qweibull(Inf), NaN)
expect_identical(qweibull(c(NA, NaN, 0, Inf, -Inf)),
c(qweibull(NA), qweibull(NaN), qweibull(0), qweibull(Inf), qweibull(-Inf)))
expect_equal(qweibull(1:2, shape = 1:2, scale = 3:4),
c(qweibull(1, 1, 3), qweibull(2, 2, 4)))
expect_equal(qweibull(1:2, shape = c(1, NA), scale = 3:4),
c(qweibull(1, 1, 3), NA))
expect_equal(qweibull(pweibull(c(0, 0.1, 0.5, 0.9, 1))), c(0, 0.1, 0.5, 0.9, 1))
})
test_that("rweibull extremes", {
expect_identical(rweibull(numeric(0)), numeric(0))
expect_error(rweibull(NA))
expect_identical(rweibull(0), numeric(0))
set.seed(42)
expect_equal(rweibull(1), 0.0890432104972705)
set.seed(42)
expect_equal(rweibull(1.9), 0.0890432104972705)
set.seed(42)
expect_equal(rweibull(2), c(0.0890432104972705, 0.0649915162066272))
set.seed(42)
expect_equal(rweibull(3:4), c(0.0890432104972705, 0.0649915162066272))
expect_equal(rweibull(0, shape = -1), numeric(0))
expect_equal(rweibull(1, shape = -1), NaN)
expect_equal(rweibull(2, shape = -1), c(NaN, NaN))
expect_equal(rweibull(0, scale = -1), numeric(0))
expect_equal(rweibull(1, scale = -1), NaN)
expect_equal(rweibull(2, scale = -1), c(NaN, NaN))
expect_error(rweibull(1, shape = 1:2))
expect_error(rweibull(1, scale = 1:2))
expect_identical(rweibull(1, shape = NA), NA_real_)
})
test_that("fit weibull quinoline", {
quin <- ssdtools::test_data[ssdtools::test_data$Chemical == "Quinoline", ]
expect_warning(dist <- ssd_fit_dist(quin, dist = "weibull"))
expect_true(is.fitdist(dist))
expect_equal(
coef(dist),
c(shape = 0.627542681172847, scale = 15343.492101029)
)
})
test_that("fit weibull boron", {
dist <- ssd_fit_dist(ssdtools::boron_data, dist = "weibull")
expect_true(is.fitdist(dist))
expect_equal(
coef(dist),
c(shape = 0.966282452187714, scale = 23.5097477721338)
)
})
|
dunn=function(trat,
resp,
method="holm",
alpha=0.05,
decreasing=TRUE)
{requireNamespace("dunn.test")
dtres <- capture.output(res <- dunn.test::dunn.test(resp, trat, method,
kw = TRUE,
altp = TRUE))
res <- data.frame(res[-which(names(res) == "chi2")])[,c(4, 1, 2, 3)]
names(res) <- c("Comparison", "Z", "P.unadj", "P.adj")
vec2mat2=function (x, sep = "-"){splits <- strsplit(x, sep)
n.spl <- sapply(splits, length)
if (any(n.spl != 2))
stop("Names must contain exactly one '", sep, "' each; instead got ",
paste(x, collapse = ", "))
x2 <- t(as.matrix(as.data.frame(splits)))
dimnames(x2) <- list(x, NULL)
x2}
multcompLetters=function (x, compare = "<",
threshold = alpha,
Letters = c(letters, LETTERS, "."),
reversed = decreasing)
{x.is <- deparse(substitute(x))
if (any(class(x) == "dist"))
x <- as.matrix(x)
if (!is.logical(x))
x <- do.call(compare, list(x, threshold))
dimx <- dim(x)
{
if ((length(dimx) == 2) && (dimx[1] == dimx[2])) {
Lvls <- dimnames(x)[[1]]
if (length(Lvls) != dimx[1])
stop("Names requred for ", x.is)
else {
x2. <- t(outer(Lvls, Lvls, paste, sep = ""))
x2.n <- outer(Lvls, Lvls, function(x1, x2) nchar(x2))
x2.2 <- x2.[lower.tri(x2.)]
x2.2n <- x2.n[lower.tri(x2.n)]
x2a <- substring(x2.2, 1, x2.2n)
x2b <- substring(x2.2, x2.2n + 1)
x2 <- cbind(x2a, x2b)
x <- x[lower.tri(x)]
}
}
else {
namx <- names(x)
if (length(namx) != length(x))
stop("Names required for ", x.is)
x2 <- vec2mat2(namx)
Lvls <- unique(as.vector(x2))}}
n <- length(Lvls)
LetMat <- array(TRUE, dim = c(n, 1), dimnames = list(Lvls, NULL))
k2 <- sum(x)
if (k2 == 0) {
Ltrs <- rep(Letters[1], n)
names(Ltrs) <- Lvls
dimnames(LetMat)[[2]] <- Letters[1]
return(list(Letters = Ltrs, LetterMatrix = LetMat))}
distinct.pairs <- x2[x, , drop = FALSE]
absorb <- function(A.) {
k. <- dim(A.)[2]
if (k. > 1) {
for (i. in 1:(k. - 1)) for (j. in (i. + 1):k.) {
if (all(A.[A.[, j.], i.])) {
A. <- A.[, -j., drop = FALSE]
return(absorb(A.))}
else {
if (all(A.[A.[, i.], j.])) {
A. <- A.[, -i., drop = FALSE]
return(absorb(A.))
}
}
}
}
A.
}
for (i in 1:k2) {
dpi <- distinct.pairs[i, ]
ijCols <- (LetMat[dpi[1], ] & LetMat[dpi[2], ])
if (any(ijCols)) {
A1 <- LetMat[, ijCols, drop = FALSE]
A1[dpi[1], ] <- FALSE
LetMat[dpi[2], ijCols] <- FALSE
LetMat <- cbind(LetMat, A1)
LetMat <- absorb(LetMat)
}
}
sortCols <- function(B) {
firstRow <- apply(B, 2, function(x) which(x)[1])
B <- B[, order(firstRow)]
firstRow <- apply(B, 2, function(x) which(x)[1])
reps <- (diff(firstRow) == 0)
if (any(reps)) {
nrep <- table(which(reps))
irep <- as.numeric(names(nrep))
k <- dim(B)[1]
for (i in irep) {
i. <- i:(i + nrep[as.character(i)])
j. <- (firstRow[i] + 1):k
B[j., i.] <- sortCols(B[j., i., drop = FALSE])
}
}
B
}
LetMat. <- sortCols(LetMat)
if (reversed)
LetMat. <- LetMat.[, rev(1:ncol(LetMat.))]
k.ltrs <- dim(LetMat.)[2]
makeLtrs <- function(kl, ltrs = Letters) {
kL <- length(ltrs)
if (kl < kL)
return(ltrs[1:kl])
ltrecurse <- c(paste(ltrs[kL], ltrs[-kL], sep = ""),
ltrs[kL])
c(ltrs[-kL], makeLtrs(kl - kL + 1, ltrecurse))
}
Ltrs <- makeLtrs(k.ltrs, Letters)
dimnames(LetMat.)[[2]] <- Ltrs
LetVec <- rep(NA, n)
names(LetVec) <- Lvls
for (i in 1:n) LetVec[i] <- paste(Ltrs[LetMat.[i, ]], collapse = "")
nch.L <- nchar(Ltrs)
blk.L <- rep(NA, k.ltrs)
for (i in 1:k.ltrs) blk.L[i] <- paste(rep(" ", nch.L[i]),
collapse = "")
monoVec <- rep(NA, n)
names(monoVec) <- Lvls
for (j in 1:n) {
ch2 <- blk.L
if (any(LetMat.[j, ]))
ch2[LetMat.[j, ]] <- Ltrs[LetMat.[j, ]]
monoVec[j] <- paste(ch2, collapse = "")
}
InsertAbsorb <- list(Letters = LetVec, monospacedLetters = monoVec,
LetterMatrix = LetMat.)
class(InsertAbsorb) <- "multcompLetters"
InsertAbsorb}
cldList=function (formula = NULL, data = NULL, comparison = NULL, p.value = NULL,
threshold = alpha, print.comp = FALSE, remove.space = TRUE,
remove.equal = TRUE, remove.zero = TRUE, swap.colon = TRUE,
swap.vs = FALSE){if (!is.null(formula)) {
p.value = eval(parse(text = paste0("data", "$",
all.vars(formula[[2]])[1])))
comparison = eval(parse(text = paste0("data", "$",
all.vars(formula[[3]])[1])))}
Comparison = (as.numeric(p.value) <= threshold)
if (sum(Comparison) == 0) {stop("No significant differences.", call. = FALSE)}
if (remove.space == TRUE) {comparison = gsub(" ", "", comparison)}
if (remove.equal == TRUE) {comparison = gsub("=", "", comparison)}
if (remove.zero == TRUE) {comparison = gsub("0", "", comparison)}
if (swap.colon == TRUE) {comparison = gsub(":", "-", comparison)}
if (swap.vs == TRUE) {comparison = gsub("vs", "-", comparison)}
names(Comparison) = comparison
if (print.comp == TRUE) {
Y = data.frame(Comparisons = names(Comparison), p.value = p.value,
Value = Comparison, Threshold = threshold)
cat("\n", "\n")
print(Y)
cat("\n", "\n")}
MCL = multcompLetters(Comparison)
Group = names(MCL$Letters)
Letter = as.character(MCL$Letters)
MonoLetter = as.character(MCL$monospacedLetters)
Z = data.frame(Group, Letter, MonoLetter)
return(Z)}
resp1=resp
names(resp1)=trat
postos=rank(resp1)
somaposto=tapply(postos,names(postos),sum)
N=tapply(postos,names(postos), length)
postosmedios=somaposto/N
media=tapply(resp1,trat,mean)
mediana=tapply(resp1,trat,median)
dunns=cldList(P.adj~Comparison,
data=res)
tabela=data.frame("group"=dunns$Group,
"Sum Rank"=somaposto,
"Mean Rank"=postosmedios,
"Mean"=media,
"Median"=mediana,
"dunn"=dunns$Letter)
krusk=kruskal.test(resp,trat,method=method)
chi=krusk$statistic
pvalor=krusk$p.value
list("Statistic"=chi,
"p-value"=pvalor,
"Post-hoc"=tabela)}
|
gwr.morantest <- function(x, lw, zero.policy = FALSE) {
if(class(x) != "gwr") stop(paste(deparse(substitute(x)),
"not a gwr object"))
if (is.null(x$lhat)) stop("hatmatrix=TRUE needed in gwr fit")
if (!inherits(lw, "listw"))
stop(paste(deparse(substitute(lw)), "is not a listw object"))
n <- ncol(x$lhat)
if (n != length(lw$neighbours)) stop("objects of different length")
if (lw$style != "W") warning(deparse(substitute(lw)),
"not row standardised")
if (requireNamespace("spdep", quietly = TRUE)) {
W <- spdep::listw2mat(spdep::listw2U(lw))
} else {
stop("spdep not available")
}
N <- diag(n) - x$lhat
e.gwr <- N %*% x$lm$y
I0.gwr <- c((t(e.gwr) %*% W %*% e.gwr) / (t(e.gwr) %*% e.gwr))
A <- t(N) %*% (W - I0.gwr*diag(n)) %*% N
EQ.gwr <- sum(diag(A))
tr2 <- sum(diag(A %*% A))
tr3 <- sum(diag(A %*% A %*% A))
varQ.gwr <- 2*tr2
EQ.EQ3.gwr <- 8*tr3
h.gwr <- tr2^3/tr3^2
chi2.gwr <- 0
p.gwr <- 0
if (EQ.EQ3.gwr > 0) {
chi2.gwr <- h.gwr - ((sqrt(2*h.gwr)*EQ.gwr)/(sqrt(varQ.gwr)))
p.gwr <- 1 - pchisq(chi2.gwr, h.gwr)
}
else if (EQ.EQ3.gwr < 0) {
chi2.gwr <- (h.gwr +
((sqrt(2*h.gwr)*EQ.gwr)/(sqrt(varQ.gwr))))
p.gwr <- pchisq(chi2.gwr, h.gwr)
}
GWRtest <- list(estimate=c(I = I0.gwr),
statistic=c(statistic = chi2.gwr), parameter=c(df = h.gwr),
p.value=p.gwr, data.name="GWR residuals",
method="Leung et al. 2000 three moment approximation for Moran's I")
class(GWRtest) <- "htest"
GWRtest
}
|
LA_LHD=function(n,k,m=10,N=10,prun=1/(k-1),OC="phi_p",p=15,q=1,maxtime=5){
maxtime=maxtime*60
timeALL=NULL
C=1
X=rep(0,n*k*m)
dim(X)=c(n,k,m)
for (i in 1:m) {
X[,,i]=LHD::rLHD(n=n,k=k)
}
if(OC=="phi_p"){
result=rep(0,m)
for (i in 1:m) {
result[i]=LHD::phi_p(X[,,i],p=p,q=q)
}
progressbar = utils::txtProgressBar(min = 0, max = N, style = 3)
while (C<=N) {
time0=Sys.time()
temp=cbind(result,1:m)
temp=temp[order(temp[,1]),]
centre=X[,,temp[1,2]]
LW=X[,,temp[2,2]]
RW=X[,,temp[3,2]]
m=6*k+3
Xnew=rep(0,n*k*m)
dim(Xnew)=c(n,k,m)
Xnew[,,1]=centre
Xnew[,,2]=LW
Xnew[,,3]=RW
index=4
for (j in 1:k) {
Xnew[,,index]=centre
Xnew[,j,index]=LW[,j]
index=index+1
Xnew[,,index]=centre
Xnew[,j,index]=RW[,j]
index=index+1
}
for (j in 1:k) {
Xnew[,,index]=LW
Xnew[,j,index]=centre[,j]
index=index+1
Xnew[,,index]=LW
Xnew[,j,index]=RW[,j]
index=index+1
}
for (j in 1:k) {
Xnew[,,index]=RW
Xnew[,j,index]=centre[,j]
index=index+1
Xnew[,,index]=RW
Xnew[,j,index]=LW[,j]
index=index+1
}
X=Xnew
for (i in 2:m) {
for (j in 1:k) {
z=stats::runif(1,0,1)
if (z<=prun){
X[,,i]=LHD::exchange(X=X[,,i],j=j)
}
}
}
result=rep(0,m)
for (i in 1:m) {
result[i]=LHD::phi_p(X[,,i],p=p,q=q)
}
time1=Sys.time()
timediff=time1-time0
timeALL=c(timeALL,timediff)
utils::setTxtProgressBar(progressbar, C)
if(as.numeric(sum(timeALL)+timediff)<=maxtime){C=C+1}
if(as.numeric(sum(timeALL)+timediff)>maxtime){C=N+1}
}
temp=cbind(result,1:m)
temp=temp[order(temp[,1]),]
centre=X[,,temp[1,2]]
}
if(OC=="AvgAbsCor"){
result=rep(0,m)
for (i in 1:m) {
result[i]=LHD::AvgAbsCor(X[,,i])
}
progressbar = utils::txtProgressBar(min = 0, max = N, style = 3)
while (C<=N) {
time0=Sys.time()
temp=cbind(result,1:m)
temp=temp[order(temp[,1]),]
centre=X[,,temp[1,2]]
LW=X[,,temp[2,2]]
RW=X[,,temp[3,2]]
m=6*k+3
Xnew=rep(0,n*k*m)
dim(Xnew)=c(n,k,m)
Xnew[,,1]=centre
Xnew[,,2]=LW
Xnew[,,3]=RW
index=4
for (j in 1:k) {
Xnew[,,index]=centre
Xnew[,j,index]=LW[,j]
index=index+1
Xnew[,,index]=centre
Xnew[,j,index]=RW[,j]
index=index+1
}
for (j in 1:k) {
Xnew[,,index]=LW
Xnew[,j,index]=centre[,j]
index=index+1
Xnew[,,index]=LW
Xnew[,j,index]=RW[,j]
index=index+1
}
for (j in 1:k) {
Xnew[,,index]=RW
Xnew[,j,index]=centre[,j]
index=index+1
Xnew[,,index]=RW
Xnew[,j,index]=LW[,j]
index=index+1
}
X=Xnew
for (i in 2:m) {
for (j in 1:k) {
z=stats::runif(1,0,1)
if (z<=prun){
X[,,i]=LHD::exchange(X=X[,,i],j=j)
}
}
}
result=rep(0,m)
for (i in 1:m) {
result[i]=LHD::AvgAbsCor(X[,,i])
}
time1=Sys.time()
timediff=time1-time0
timeALL=c(timeALL,timediff)
utils::setTxtProgressBar(progressbar, C)
if(as.numeric(sum(timeALL)+timediff)<=maxtime){C=C+1}
if(as.numeric(sum(timeALL)+timediff)>maxtime){C=N+1}
}
temp=cbind(result,1:m)
temp=temp[order(temp[,1]),]
centre=X[,,temp[1,2]]
}
if(OC=="MaxAbsCor"){
result=rep(0,m)
for (i in 1:m) {
result[i]=LHD::MaxAbsCor(X[,,i])
}
progressbar = utils::txtProgressBar(min = 0, max = N, style = 3)
while (C<=N) {
time0=Sys.time()
temp=cbind(result,1:m)
temp=temp[order(temp[,1]),]
centre=X[,,temp[1,2]]
LW=X[,,temp[2,2]]
RW=X[,,temp[3,2]]
m=6*k+3
Xnew=rep(0,n*k*m)
dim(Xnew)=c(n,k,m)
Xnew[,,1]=centre
Xnew[,,2]=LW
Xnew[,,3]=RW
index=4
for (j in 1:k) {
Xnew[,,index]=centre
Xnew[,j,index]=LW[,j]
index=index+1
Xnew[,,index]=centre
Xnew[,j,index]=RW[,j]
index=index+1
}
for (j in 1:k) {
Xnew[,,index]=LW
Xnew[,j,index]=centre[,j]
index=index+1
Xnew[,,index]=LW
Xnew[,j,index]=RW[,j]
index=index+1
}
for (j in 1:k) {
Xnew[,,index]=RW
Xnew[,j,index]=centre[,j]
index=index+1
Xnew[,,index]=RW
Xnew[,j,index]=LW[,j]
index=index+1
}
X=Xnew
for (i in 2:m) {
for (j in 1:k) {
z=stats::runif(1,0,1)
if (z<=prun){
X[,,i]=LHD::exchange(X=X[,,i],j=j)
}
}
}
result=rep(0,m)
for (i in 1:m) {
result[i]=LHD::MaxAbsCor(X[,,i])
}
time1=Sys.time()
timediff=time1-time0
timeALL=c(timeALL,timediff)
utils::setTxtProgressBar(progressbar, C)
if(as.numeric(sum(timeALL)+timediff)<=maxtime){C=C+1}
if(as.numeric(sum(timeALL)+timediff)>maxtime){C=N+1}
}
temp=cbind(result,1:m)
temp=temp[order(temp[,1]),]
centre=X[,,temp[1,2]]
}
if(OC=="MaxProCriterion"){
result=rep(0,m)
for (i in 1:m) {
result[i]=LHD::MaxProCriterion(X[,,i])
}
progressbar = utils::txtProgressBar(min = 0, max = N, style = 3)
while (C<=N) {
time0=Sys.time()
temp=cbind(result,1:m)
temp=temp[order(temp[,1]),]
centre=X[,,temp[1,2]]
LW=X[,,temp[2,2]]
RW=X[,,temp[3,2]]
m=6*k+3
Xnew=rep(0,n*k*m)
dim(Xnew)=c(n,k,m)
Xnew[,,1]=centre
Xnew[,,2]=LW
Xnew[,,3]=RW
index=4
for (j in 1:k) {
Xnew[,,index]=centre
Xnew[,j,index]=LW[,j]
index=index+1
Xnew[,,index]=centre
Xnew[,j,index]=RW[,j]
index=index+1
}
for (j in 1:k) {
Xnew[,,index]=LW
Xnew[,j,index]=centre[,j]
index=index+1
Xnew[,,index]=LW
Xnew[,j,index]=RW[,j]
index=index+1
}
for (j in 1:k) {
Xnew[,,index]=RW
Xnew[,j,index]=centre[,j]
index=index+1
Xnew[,,index]=RW
Xnew[,j,index]=LW[,j]
index=index+1
}
X=Xnew
for (i in 2:m) {
for (j in 1:k) {
z=stats::runif(1,0,1)
if (z<=prun){
X[,,i]=LHD::exchange(X=X[,,i],j=j)
}
}
}
result=rep(0,m)
for (i in 1:m) {
result[i]=LHD::MaxProCriterion(X[,,i])
}
time1=Sys.time()
timediff=time1-time0
timeALL=c(timeALL,timediff)
utils::setTxtProgressBar(progressbar, C)
if(as.numeric(sum(timeALL)+timediff)<=maxtime){C=C+1}
if(as.numeric(sum(timeALL)+timediff)>maxtime){C=N+1}
}
temp=cbind(result,1:m)
temp=temp[order(temp[,1]),]
centre=X[,,temp[1,2]]
}
avgtime=round(mean(timeALL),2)
iterations=length(timeALL)
close(progressbar)
print(paste0("average CPU time per iteration is: ", avgtime, " seconds"))
print(paste0("the number of iterations completed is: ", iterations))
centre
}
|
meanDecompose <- function(formula, data) {
v <- as.character(attr(terms(formula), "variables"))[-1]
if (!is.data.table(data)) {
data <- as.data.table(data)[, v, with = FALSE]
} else {
data <- data[, v, with = FALSE]
}
out <- vector("list", length = length(v))
vres <- paste0(v[1], "_residual")
stopifnot(!any(vres %in% v))
data[, (vres) := get(v[1])]
vfinal <- vector("character", length = length(v))
for (i in 2:length(v)) {
vname <- paste0(v[1], "_", v[i])
data[, (vname) := mean(get(vres), na.rm = TRUE), by = c(v[2:i])]
data[, (vres) := get(vres) - get(vname)]
out[[i - 1]] <- data[, .(X = get(vname)[1]), by = c(v[2:i])]
vfinal[i - 1] <- paste0(v[1], " by ", paste(v[2:i], collapse = " & "))
}
out[[length(v)]] <- data[, .(X = get(vres))]
vfinal[length(v)] <- paste0(v[1], " by ", "residual")
names(out) <- vfinal
return(out)
}
if(getRversion() >= "2.15.1") utils::globalVariables(c("vcov", "grp"))
iccMixed <- function(dv, id, data, family = c("gaussian", "binomial")) {
if (!is.data.table(data)) {
if (is.data.frame(data)) {
data <- as.data.table(data)
} else {
message("Attempting to coerce data to a data.table")
data <- as.data.table(data)
}
}
stopifnot(all(c(dv, id) %in% names(data)))
stopifnot(is.character(dv))
stopifnot(all(is.character(id)))
stopifnot(identical(length(dv), 1L))
stopifnot(length(id) >= 1L)
d <- copy(data[, c(dv, id), with = FALSE])
f <- sprintf("%s ~ 1 + %s", dv, paste(paste0("(1 | ", id, ")"), collapse = " + "))
family <- match.arg(family)
res.binom <- (pi^2) / 3
m <- switch(family,
gaussian = lmer(formula = as.formula(f), data = d, REML = TRUE),
binomial = glmer(formula = as.formula(f), data = d, family = binomial())
)
est <- as.data.table(as.data.frame(VarCorr(m)))[, .(grp, vcov)]
if (identical(family, "binomial")) {
est <- rbind(est, est[1])
est[nrow(est), c("grp", "vcov") := .("Residual", res.binom)]
}
est[, .(Var = grp, Sigma = vcov, ICC = vcov / sum(vcov))]
}
nEffective <- function(n, k, icc, dv, id, data, family = c("gaussian", "binomial")) {
if (any(missing(n), missing(k), missing(icc))) {
if (!is.data.table(data)) {
if (is.data.frame(data)) {
data <- as.data.table(data)
} else {
message("Attempting to coerce data to a data.table")
data <- as.data.table(data)
}
}
stopifnot(all(c(dv, id) %in% names(data)))
stopifnot(is.character(dv))
stopifnot(all(is.character(id)))
stopifnot(identical(length(dv), 1L))
stopifnot(identical(length(id), 1L))
d <- copy(data[, c(dv, id), with = FALSE])
if (missing(icc)) {
icc <- iccMixed(dv = dv, id = id, data = data, family = family)$ICC[1]
}
if (missing(n)) {
n <- length(unique(data[[id]]))
}
if (missing(k)) {
k <- nrow(data) / n
}
}
neff <- (n * k) / ((1 + (k - 1) * icc))
data.table(
Type = c("Effective Sample Size", "Independent Units", "Total Observations"),
N = c(neff, n, n * k))
}
meanDeviations <- function(x, na.rm = TRUE) {
m <- mean(x, na.rm = na.rm)
list(m, x - m)
}
acfByID <- function(xvar, timevar, idvar, data, lag.max = 10L,
na.function = c("na.approx", "na.spline", "na.locf"), ...) {
if (!is.data.table(data)) {
if (is.data.frame(data)) {
data <- as.data.table(data)
} else {
message("Attempting to coerce data to a data.table")
data <- as.data.table(data)
}
}
stopifnot(is.integer(lag.max))
stopifnot(is.character(xvar))
stopifnot(is.character(timevar))
stopifnot(all(c(xvar, timevar) %in% names(data)))
stopifnot(identical(length(xvar), 1L))
stopifnot(identical(length(timevar), 1L))
na.function <- match.arg(na.function)
na.function <- switch(na.function,
na.approx = na.approx,
na.spline = na.spline,
na.locf = na.locf)
if (!missing(idvar)) {
stopifnot(is.character(idvar))
stopifnot(idvar %in% names(data))
stopifnot(identical(length(idvar), 1L))
d <- copy(data[, c(xvar, timevar, idvar), with = FALSE])
} else {
d <- copy(data[, c(xvar, timevar), with = FALSE])
idvar <- "ID"
while(idvar %in% names(d)) {
idvar <- paste0("TMP_", idvar)
}
d[, (idvar) := 1L]
}
d[, .(
Variable = xvar,
Lag = 0:lag.max,
AutoCorrelation = acf(na.function(zoo(get(xvar), order.by = get(timevar))),
lag.max = lag.max, plot = FALSE, ...)$acf[, 1, 1]),
by = idvar]
}
|
expect_colname <- function(expected_colname, colnames) {
expect_true(expected_colname %in% colnames)
}
|
get.placeholder = function(ps=get.ps()) {
ph = ps$rps$placeholder
if (is.null(ph)) return("___")
ph
}
get.placeholder.sym = function() {
as.name(".PH_._")
}
has.call.placeholder = function(call) {
if (!is.character(call)) {
call = deparse1(call)
}
has.substr(call,".PH_._")
}
is.call.placeholder = function(call) {
if (is.null(call)) return(FALSE)
if (!is.character(call)) {
call = deparse1(call)
}
isTRUE(call == ".PH_._")
}
|
update.direction <- function(stat, ref.allele){
RefAllele <- ref.allele$RefAllel
EffectAllele <- ref.allele$EffectAllele
snps <- names(RefAllele)
nsnp <- length(snps)
A1 <- paste(RefAllele, EffectAllele, sep = '')
names(A1) <- snps
nstudy <- length(stat)
for(i in 1:nstudy){
s <- stat[[i]][, 'SNP']
A2 <- paste(stat[[i]][, 'RefAllele'], stat[[i]][, 'EffectAllele'], sep = '')
names(A2) <- s
id <- which(A2 != A1[s])
if(length(id) == 0){
next
}
d <- strsplit(stat[[i]][id, 'Direction'], '')
d <- sapply(d, function(u){paste(ifelse(u %in% c('+','-'), ifelse(u == '+', '-', '+'), u), collapse = '')})
stat[[i]][id, 'Direction'] <- d
stat[[i]][id, 'RefAllele'] <- RefAllele[s[id]]
stat[[i]][id, 'EffectAllele'] <- EffectAllele[s[id]]
stat[[i]][id, 'BETA'] <- -stat[[i]][id, 'BETA']
}
stat
}
|
collapseClips <-
function(
rec,
start.times,
end.times,
return.times=FALSE
) {
rec <- getClip(rec, output="Wave")
start.times[start.times<0] <- 0
end.times[end.times>length(rec@left)/[email protected]] <- length(rec@left)/[email protected]
times <- cbind(start.times, end.times)
duration <- end.times - start.times
cum.times <- data.frame(start.time.collapse=cumsum(duration)-duration, end.time.collapse=cumsum(duration))
waves <- apply(times, 1,function(x) cutWave(rec, from=x[1], to=x[2]))
collapsed <- do.call(tuneR::bind, waves)
if(return.times) return(list(wave=collapsed, times=cum.times)) else return(collapsed)
}
|
source(here::here('dictionary/utilities.R'))
url <- 'http://databank.worldbank.org/data/download/site-content/CLASS.xls'
filename <- tempfile(fileext = '.xls')
download.file(url, filename, quiet = TRUE)
not_countries <- c("Arab World", "Caribbean small states", "Central Europe and the Baltics", "Early-demographic dividend", "East Asia & Pacific", "East Asia & Pacific (excluding high income)", "East Asia & Pacific (IDA & IBRD)", "Euro area", "Europe & Central Asia", "Europe & Central Asia (excluding high income)", "Europe & Central Asia (IDA & IBRD)", "European Union", "Fragile and conflict affected situations", "Heavily indebted poor countries (HIPC)", "High income", "IBRD only", "IDA & IBRD total", "IDA blend", "IDA only", "IDA total", "Late-demographic dividend", "Latin America & Caribbean", "Latin America & Caribbean (excluding high income)", "Latin America & Caribbean (IDA & IBRD)", "Least developed countries: UN classification", "Low & middle income", "Low income", "Lower middle income", "Middle East & North Africa", "Middle East & North Africa (excluding high income)", "Middle East & North Africa (IDA & IBRD)", "Middle income", "North America", "OECD members", "Other small states", "Pacific island small states", "Post-demographic dividend", "Pre-demographic dividend", "Small states", "South Asia", "South Asia (IDA & IBRD)", "Sub-Saharan Africa", "Sub-Saharan Africa (excluding high income)", "Sub-Saharan Africa (IDA & IBRD)", "Upper middle income", "World")
wb <- read_excel(filename, skip = 6, col_names = letters[1:9]) %>%
select(3:4) %>%
setNames(c('country', 'wb')) %>%
filter(!country %in% not_countries,
!is.na(wb))
wb %>% write_csv('dictionary/data_world_bank.csv', na = "")
|
LocalModel <- R6Class("LocalModel",
inherit = InterpretationMethod,
public = list(
initialize = function(predictor, x.interest, dist.fun = "gower",
kernel.width = NULL, k = 3) {
assert_number(k, lower = 1, upper = predictor$data$n.features)
assert_data_frame(x.interest, null.ok = TRUE)
assert_choice(dist.fun, c(
"gower", "euclidean", "maximum",
"manhattan", "canberra", "binary", "minkowski"
))
if (!require("glmnet")) {
stop("Please install glmnet.")
}
super$initialize(predictor = predictor)
self$k <- k
if (!is.null(x.interest)) {
self$x.interest <- private$match_cols(x.interest)
}
private$weight.fun <- private$get.weight.fun(dist.fun, kernel.width)
if (!is.null(x.interest)) private$run()
},
predict = function(newdata = NULL, ...) {
if (is.null(newdata)) {
newdata <- self$x.interest
} else {
newdata <- private$match_cols(newdata)
}
X.recode <- recode(newdata, self$x.interest)
if (private$multiClass) {
prediction <- predict(self$model,
newx = as.matrix(X.recode),
type = "response"
)
prediction <- data.frame(prediction[, , self$best.fit.index])
colnames(prediction) <- colnames(private$predictResults)
prediction
} else {
prediction <- predict(self$model, newx = as.matrix(X.recode))
pred <- prediction[, self$best.fit.index, drop = FALSE]
colnames(pred) <- NULL
data.frame(prediction = pred)
}
},
explain = function(x.interest) {
self$x.interest <- private$match_cols(x.interest)
private$flush()
private$run()
},
x.interest = NULL,
k = NULL,
model = NULL,
best.fit.index = NULL
),
private = list(
q = function(pred) probs.to.labels(pred),
best.index = NULL,
match_cols = function(newdata) {
self$predictor$data$match_cols(data.frame(newdata))
},
aggregate = function() {
X.recode <- recode(private$dataDesign, self$x.interest)
x.recoded <- recode(self$x.interest, self$x.interest)
fam <- ifelse(private$multiClass, "multinomial", "gaussian")
y <- unlist(private$qResults[1])
w <- private$weight.fun(X.recode, x.recoded)
self$model <- glmnet(
x = as.matrix(X.recode), y = y, family = fam, w = w, intercept = TRUE,
standardize = TRUE, type.multinomial = "grouped"
)
res <- self$model
if (any(res$df == self$k)) {
best.index <- max(which(res$df == self$k))
} else {
best.index <- max(which(res$df < self$k))
warning("Had to choose a smaller k")
}
self$best.fit.index <- best.index
if (private$multiClass) {
class.results <- lapply(res$beta, extract.glmnet.effects,
best.index = best.index, x.recoded = x.recoded,
x.original = self$x.interest
)
res <- data.table::rbindlist(class.results)
res$.class <- rep(names(class.results), each = ncol(X.recode))
} else {
res <- extract.glmnet.effects(
res$beta, best.index, x.recoded,
self$x.interest
)
}
res[res$beta != 0, ]
},
intervene = function() private$dataSample,
generatePlot = function() {
requireNamespace("ggplot2", quietly = TRUE)
p <- ggplot(self$results) +
geom_col(aes(y = effect, x = reorder(feature.value, effect))) +
coord_flip() +
ylab("effect") +
xlab("")
if (!private$multiClass) {
original_prediction <- self$predictor$predict(self$x.interest)[[1]]
p <- p + ggtitle(sprintf(
"Actual prediction: %.2f\nLocalModel prediction: %.2f",
original_prediction, self$predict()
))
}
if (private$multiClass) p <- p + facet_wrap(".class")
p
},
get.weight.fun = function(dist.fun, kernel.width) {
if (dist.fun == "gower") {
require("gower")
function(X, x.interest) {
1 - gower_dist(X, x.interest)
}
} else if (is.character(dist.fun)) {
assert_numeric(kernel.width)
function(X, x.interest) {
d <- dist(rbind(x.interest, X), method = dist.fun)[1:nrow(X)]
sqrt(exp(-(d^2) / (kernel.width^2)))
}
} else {
dist.fun
}
},
weight.fun = NULL
)
)
predict.LocalModel <- function(object, newdata = NULL, ...) {
object$predict(newdata = newdata, ...)
}
plot.LocalModel <- function(object) {
object$plot()
}
|
validate_arguments <- function(
argument_obj_list, field_def_obj,
...,
parent_obj,
oh,
skip_variables = FALSE
) {
if (
is.null(argument_obj_list) &&
is.null(field_def_obj$arguments)
) {
return(invisible(TRUE))
}
if (is.null(field_def_obj$arguments)) {
oh$error_list$add(
"5.3.1",
"Arguments supplied, but there are no arguments for field: ", format(field_def_obj$name),
loc = parent_obj$loc
)
return(FALSE)
}
field_arg_map <- field_def_obj$arguments
field_arg_map %>%
lapply("[[", "name") %>%
lapply(format) %>%
unlist() ->
names(field_arg_map)
values_seen <- list()
for (argument_obj in argument_obj_list) {
arg_name <- argument_obj$name
arg_name_str <- format(arg_name)
if (!is.null(values_seen[[arg_name_str]])) {
oh$error_list$add(
"5.3.2",
"duplicate arguments with same name: ", arg_name_str,
loc = parent_obj$loc
)
return(FALSE)
}
arg_value <- argument_obj$value
values_seen[[arg_name_str]] <- arg_value
matching_arg_obj <- field_arg_map[[format(arg_name)]]
if (is.null(matching_arg_obj)) {
oh$error_list$add(
"5.3.1",
"could not find matching arg value with label: ", format(arg_name),
" for field: ", format(field_def_obj$name),
loc = parent_obj$loc
)
return(FALSE)
}
if (inherits(arg_value, "Variable")) {
if (!isTRUE(skip_variables)) {
oh$variable_validator$check_variable(arg_value, matching_arg_obj$type)
}
next
}
validate_value_can_be_coerced(arg_value, matching_arg_obj$type, oh = oh, rule_code = "5.3.3.1")
if (inherits(arg_value, "ObjectValue")) {
validate_input_object_fields(arg_value, oh = oh)
}
}
for (field_arg in field_arg_map) {
if (inherits(field_arg$type, "NonNullType")) {
arg_value <- values_seen[[format(field_arg$name)]]
if (
is.null(arg_value) ||
inherits(arg_value, "NullValue")
) {
oh$error_list$add(
"5.3.3.2",
"null or missing argument not allowed for argument: ", format(field_arg$name),
" for field: ", format(field_def_obj$name),
loc = parent_obj$loc
)
next
}
}
}
invisible(TRUE)
}
|
baseline_sampling<-function(Y, C, A, X, base_line_old, C_prior, sigma_noise, sigma_A, sigma_baseline, sigma_X){
Num_genes=nrow(Y)
Num_samples=ncol(Y)
Num_TFs=ncol(C_prior)
baseline_new=matrix(0, nrow=1, ncol=Num_genes)
for (n in 1:Num_genes){
temp=matrix(0, nrow=1, ncol=Num_samples)
for (m in 1:Num_samples){
for (t in 1:Num_TFs){
temp[m]=temp[m]+A[n,t]*X[t,m]
}
}
mean_baseline = sum(Y[n,]-temp)/Num_samples*sigma_baseline/(sigma_noise+sigma_baseline)
variance_baseline = sigma_baseline*sigma_baseline/(sigma_noise+sigma_baseline)
baseline_new[n]=rnorm(1, mean=mean_baseline, sd=sqrt(variance_baseline))
}
return(baseline_new)
}
|
NULL
NULL
if (!isS4(nrow)) methods::setGeneric("nrow",
function(x) standardGeneric("nrow"))
methods::setMethod("nrow", "OptimizationProblem", function(x) x$nrow())
if (!isS4(ncol)) methods::setGeneric("ncol",
function(x) standardGeneric("ncol"))
methods::setMethod("ncol", "OptimizationProblem", function(x) x$ncol())
if (!exists("ncell") || !isS4(ncell))
methods::setGeneric("ncell", function(x) standardGeneric("ncell"))
methods::setMethod("ncell", "OptimizationProblem", function(x) x$ncell())
methods::setGeneric("modelsense", function(x) standardGeneric("modelsense"))
methods::setMethod("modelsense", "OptimizationProblem",
function(x) x$modelsense())
methods::setGeneric("vtype", function(x) standardGeneric("vtype"))
methods::setMethod("vtype", "OptimizationProblem", function(x) x$vtype())
methods::setGeneric("obj", function(x) standardGeneric("obj"))
methods::setMethod("obj", "OptimizationProblem", function(x) x$obj())
methods::setGeneric("pwlobj", function(x) standardGeneric("pwlobj"))
methods::setMethod("pwlobj", "OptimizationProblem", function(x) x$pwlobj())
methods::setGeneric("A", function(x) standardGeneric("A"))
methods::setMethod("A", "OptimizationProblem", function(x) x$A())
methods::setGeneric("rhs", function(x) standardGeneric("rhs"))
methods::setMethod("rhs", "OptimizationProblem", function(x) x$rhs())
methods::setGeneric("sense", function(x) standardGeneric("sense"))
methods::setMethod("sense", "OptimizationProblem", function(x) x$sense())
methods::setGeneric("lb", function(x) standardGeneric("lb"))
methods::setMethod("lb", "OptimizationProblem", function(x) x$lb())
methods::setGeneric("ub", function(x) standardGeneric("ub"))
methods::setMethod("ub", "OptimizationProblem", function(x) x$ub())
methods::setGeneric("col_ids", function(x) standardGeneric("col_ids"))
methods::setMethod("col_ids", "OptimizationProblem",
function(x) x$col_ids())
methods::setGeneric("row_ids", function(x) standardGeneric("row_ids"))
methods::setMethod("row_ids", "OptimizationProblem", function(x) x$row_ids())
methods::setGeneric("number_of_branches", function(x)
standardGeneric("number_of_branches"))
methods::setMethod("number_of_branches", "OptimizationProblem", function(x)
x$number_of_branches())
methods::setGeneric("get_data", function(x) standardGeneric("get_data"))
methods::setMethod("get_data", "OptimizationProblem", function(x) x$get_data())
as.list.OptimizationProblem <- function(x, ...) {
rcpp_optimization_problem_as_list(x$ptr)
}
|
validate_elements <- function(DT, .progress_cat = FALSE){
out <- rep_len(NA, ncol(DT))
avbl_noms <- names(heims_data_dict)[names(heims_data_dict) %in% names(DT)]
noms <-
if_else(names(DT) %in% avbl_noms,
names(DT),
gsub("A$", "", gsub("_[12]", "", names(DT))))
noms <- gsub("^e([0-9]+)$", "E\\1", noms)
for (n in seq_along(DT)){
nom <- noms[n]
if (.progress_cat){
cat(nom, ".", sep = "")
}
if (!is.null(heims_data_dict[[nom]]) && is.function(heims_data_dict[[nom]]$validate)){
DTn <- DT[[n]]
out[n] <- heims_data_dict[[nom]]$validate(DTn[!is.na(DTn)])
}
}
if (.progress_cat){
cat("\n")
}
names(out) <- names(DT)
out
}
prop_elements_valid <- function(DT, char = FALSE){
out <- rep_len(NA_real_, ncol(DT))
noms <- gsub("A$", "", gsub("_[12]", "", names(DT)))
noms <- gsub("^e([0-9]+)$", "E\\1", noms)
for (n in seq_along(DT)){
nom <- noms[n]
if (!is.null(heims_data_dict[[nom]]) && is.function(heims_data_dict[[nom]]$validate)){
DTn <- DT[[n]]
if (is.function(heims_data_dict[[nom]]$ad_hoc_prepare)){
DTn <- heims_data_dict[[nom]]$ad_hoc_prepare(DTn)
}
DTn <- DTn[!is.na(DTn)]
if (heims_data_dict[[nom]]$validate(DTn)){
out[n] <- if (char) "--" else 1
} else {
if (!is.null(heims_data_dict[[nom]]) && is.function(heims_data_dict[[nom]]$valid)){
prop <- mean(heims_data_dict[[nom]]$valid(DTn), na.rm = TRUE)
out[n] <- if (char) paste0(round(prop * 100), "%") else prop
}
}
}
}
names(out) <- names(DT)
out
}
count_elements_invalid <- function(DT, char = FALSE){
out <- rep_len(NA_real_, ncol(DT))
noms <- gsub("A$", "", gsub("_[12]", "", names(DT)))
noms <- gsub("^e([0-9]+)$", "E\\1", noms)
for (n in seq_along(DT)) {
nom <- noms[n]
if (!is.null(heims_data_dict[[nom]]) && is.function(heims_data_dict[[nom]]$validate)) {
DTn <- DT[[n]]
if (is.function(heims_data_dict[[nom]]$ad_hoc_prepare)) {
DTn <- heims_data_dict[[nom]]$ad_hoc_prepare(DTn)
}
DTn <- DTn[!is.na(DTn)]
if (heims_data_dict[[nom]]$validate(DTn)) {
out[n] <- if (char) "--" else 0L
} else {
if (AND(!is.null(heims_data_dict[[nom]]),
AND("valid" %in% names(heims_data_dict[[nom]]),
is.function(heims_data_dict[[nom]]$valid)))) {
prop <- sum(!heims_data_dict[[nom]]$valid(DTn), na.rm = TRUE)
out[n] <- if (char) paste0(round(prop * 100), "%") else prop
}
}
}
}
names(out) <- names(DT)
out
}
|
test_that("normal distribution fitting and feedback works",{
skip_on_cran()
m <- 10
s <- 20
vals <- c(m - s, m , m + 2 * s)
myfit <- fitdist(vals, pnorm(vals, m, s ))
fb <- feedback(myfit, quantiles=c(0.05, 0.95), values = c(m -0.5*s, m+s))
norm.parameters <- unlist(myfit$Normal)
best.name <- as.character(unlist(myfit$best.fitting))
attributes(norm.parameters) <- NULL
expect_equal(norm.parameters, c(m, s))
expect_equal(best.name, "normal")
expect_equal(fb$fitted.quantiles[, 1],
signif(qnorm(c(0.05, 0.95), m, s),3))
expect_equal(fb$fitted.probabilities[, 1],
signif(pnorm(c(m -0.5*s, m+s), m, s),3))
})
test_that("student-t distribution fitting and feedback works",{
skip_on_cran()
m <- 10
s <- 20
tdftest <- 4
vals <- c(m - s, m , m + 2 * s)
myfit <- fitdist(vals, pt((vals-m)/s, tdftest ), tdf = tdftest)
fb <- feedback(myfit, quantiles=c(0.05, 0.95), values = c(m -0.5*s, m+s))
t.parameters <- unlist(myfit$Student.t)
best.name <- as.character(unlist(myfit$best.fitting))
attributes(t.parameters) <- NULL
expect_equal(t.parameters, c(m, s, tdftest), tolerance = 0.001)
expect_equal(best.name, "t")
expect_equal(fb$fitted.quantiles[, "t"],
signif(m + s * qt(c(0.05, 0.95), tdftest),3))
expect_equal(fb$fitted.probabilities[, "t"],
signif(pt(c( -0.5, 1), tdftest),3))
})
test_that("log-t distribution fitting and feedback works",{
skip_on_cran()
m <- log(30)
s <- 0.5
tdftest <- 5
vals <- c(22, 30, 42)
myfit <- fitdist(vals, pt((log(vals) - m) / s, tdftest ), lower = 0, tdf = tdftest)
fb <- feedback(myfit, quantiles=c(0.05, 0.95), values = c(25, 55))
lt.parameters <- unlist(myfit$Log.Student.t)
best.name <- as.character(unlist(myfit$best.fitting))
attributes(lt.parameters) <- NULL
expect_equal(lt.parameters, c(m, s, tdftest), tolerance = 0.001)
expect_equal(best.name, "logt")
expect_equal(fb$fitted.quantiles[, "logt"],
signif(exp(m + s * qt(c(0.05, 0.95), tdftest)), 3))
expect_equal(fb$fitted.probabilities[, "logt"],
signif(pt((log(c(25, 55)) - m )/s, tdftest), 3))
})
test_that("mirror log-t distribution fitting and feedback works",{
skip_on_cran()
m <- log(30)
s <- 0.5
tdftest <- 5
vals <- c(22, 30, 42)
u <- 60
myfit <- fitdist(vals, 1 - pt((log(u - vals) - m) / s, tdftest ), lower = 0, tdf = tdftest,
upper = u)
fb <- feedback(myfit, quantiles=c(0.05, 0.95), values = c(25, 55))
mirrorlogtparameters <- unlist(myfit$mirrorlogt)
best.name <- as.character(unlist(myfit$best.fitting))
attributes(mirrorlogtparameters) <- NULL
expect_equal(mirrorlogtparameters, c(m, s, tdftest), tolerance = 0.001)
expect_equal(best.name, "mirrorlogt")
expect_equal(fb$fitted.quantiles[, "mirrorlogt"],
signif(u - exp(m + s * qt(1 - c(0.05, 0.95), tdftest)), 3))
expect_equal(fb$fitted.probabilities[, "mirrorlogt"],
signif(1 - pt((log(u - c(25, 55)) - m )/s, tdftest), 3))
})
test_that("scaled beta distribution fitting and feedback works",{
skip_on_cran()
a <- 5
b <- 20
l <- 10
u <- 60
vals <- c(18, 20, 24)
myfit <- fitdist(vals, pbeta((vals-l)/(u-l), a, b ), lower = l, upper = u)
fb <- feedback(myfit, quantiles=c(0.05, 0.95), values = c(19, 29))
beta.parameters <- unlist(myfit$Beta)
best.name <- as.character(unlist(myfit$best.fitting))
attributes(beta.parameters) <- NULL
expect_equal(beta.parameters, c(a, b), tolerance = 0.001)
expect_equal(best.name, "beta")
expect_equal(fb$fitted.quantiles[, "beta"],
signif(l + (u-l) * qbeta(c(0.05, 0.95), a, b),3))
expect_equal(fb$fitted.probabilities[, "beta"],
signif(pbeta((c(19, 29)-l)/(u-l), a, b),3))
})
test_that("shifted lognormal distribution fitting and feedback works",{
skip_on_cran()
l <- -100
m <- log(30)
s <- 0.5
vals <- c(22, 30, 42) + l
myfit <- fitdist(vals, plnorm(vals - l, m, s ), lower = l)
fb <- feedback(myfit, quantiles=c(0.05, 0.95), values = c(25, 55) + l)
lnorm.parameters <- unlist(myfit$Log.normal)
best.name <- as.character(unlist(myfit$best.fitting))
attributes(lnorm.parameters) <- NULL
expect_equal(lnorm.parameters, c(m, s), tolerance = 0.001)
expect_equal(best.name, "lognormal")
expect_equal(fb$fitted.quantiles[, "lognormal"],
signif(l + qlnorm(c(0.05, 0.95), m, s),3))
expect_equal(fb$fitted.probabilities[, "lognormal"],
signif(plnorm(c(25, 55), m, s),3))
})
test_that("shifted lognormal distribution fitting and feedback works",{
skip_on_cran()
m <- 2
s <- 0.5
l <- -10
vals <- c(-6, -2, 2, 6)
myfit <- fitdist(vals, plnorm(vals - l, m, s ), lower = l)
fb <- feedback(myfit, quantiles=c(0.05, 0.95), values = c(-4, 4))
lnorm.parameters <- unlist(myfit$Log.normal)
best.name <- as.character(unlist(myfit$best.fitting))
attributes(lnorm.parameters) <- NULL
expect_equal(lnorm.parameters, c(m, s), tolerance = 0.001)
expect_equal(best.name, "lognormal")
expect_equal(fb$fitted.quantiles[, "lognormal"],
signif(qlnorm(c(0.05, 0.95), m, s)+l, 3) )
expect_equal(fb$fitted.probabilities[, "lognormal"],
signif(plnorm(c(-4, 4) - l, m, s),3))
})
test_that("mirror lognormal distribution fitting and feedback works",{
skip_on_cran()
m <- 2
s <- 0.5
u <- 10
vals <- c(-6, -2, 2, 6)
myfit <- fitdist(vals, 1 - plnorm(u - vals, m, s ), lower = -20,
upper = u)
fb <- feedback(myfit, quantiles=c(0.05, 0.95), values = c(-4, 4))
mirrorlognormalparameters <- unlist(myfit$mirrorlognormal)
best.name <- as.character(unlist(myfit$best.fitting))
attributes(mirrorlognormalparameters) <- NULL
expect_equal(mirrorlognormalparameters, c(m, s),
tolerance = 0.001)
expect_equal(best.name, "mirrorlognormal")
expect_equal(fb$fitted.quantiles[, "mirrorlognormal"],
signif(u - qlnorm(1 - c(0.05, 0.95), m, s), 3) )
expect_equal(fb$fitted.probabilities[, "mirrorlognormal"],
signif(1 - plnorm(u - c(-4, 4), m, s),3))
})
test_that("shifted gamma distribution fitting and feedback works",{
skip_on_cran()
a <- 50
b <- 2
l <- 10
vals <- c(32, 35, 37)
myfit <- fitdist(vals, pgamma(vals-l, a, b ), lower = l)
fb <- feedback(myfit, quantiles=c(0.05, 0.95), values = c(33, 40))
gamma.parameters <- unlist(myfit$Gamma)
best.name <- as.character(unlist(myfit$best.fitting))
attributes(gamma.parameters) <- NULL
expect_equal(gamma.parameters, c(a, b), tolerance = 0.001)
expect_equal(best.name, "gamma")
expect_equal(fb$fitted.quantiles[, "gamma"],
signif(l + qgamma(c(0.05, 0.95), a, b),3))
expect_equal(fb$fitted.probabilities[, "gamma"],
signif(pgamma(c(33, 40)-l, a, b),3))
})
test_that("mirror gamma distribution fitting and feedback works",{
skip_on_cran()
a <- 50
b <- 2
u <- 25
p <- c(0.25, 0.5 , 0.75)
v <- u - qgamma(1 - p, a, b)
myfit <- fitdist(vals = v, probs = p, lower = -10,
upper = u)
fb <- feedback(myfit, quantiles=c(0.05, 0.95), values = c(-3, 3))
mirrorgammaparameters <- unlist(myfit$mirrorgamma)
best.name <- as.character(unlist(myfit$best.fitting))
attributes(mirrorgammaparameters) <- NULL
expect_equal(mirrorgammaparameters, c(a, b), tolerance = 0.001)
expect_equal(best.name, "mirrorgamma")
expect_equal(fb$fitted.quantiles[, "mirrorgamma"],
signif(u - qgamma(1 - c(0.05, 0.95), a, b),3))
expect_equal(fb$fitted.probabilities[, "mirrorgamma"],
signif(1 - pgamma(u - c(-3, 3), a, b),3))
})
test_that("precision fitting works - normal",{
skip_on_cran()
med <- 10
k <- 1
a <- 3
b <- 4
sigmasq <- 1 / qgamma(c(0.05, 0.95), a, b)
probs1 <- pnorm(rep(med + k, 2), med, sigmasq^0.5) - 0.5
pfit1 <- fitprecision(c(med, med + k), probs1, pplot = F)
gamma.parameters1 <- unlist(pfit1$Gamma)
attributes(gamma.parameters1) <- NULL
expect_equal(gamma.parameters1, c(a, b), tolerance = 1e-4)
probs2 <- pnorm(rep(med - k, 2), med, sigmasq^0.5)
pfit2 <- fitprecision(c(-Inf, med - k), probs2, med = med,
pplot = F)
gamma.parameters2 <- unlist(pfit2$Gamma)
attributes(gamma.parameters2) <- NULL
expect_equal(gamma.parameters2, c(a, b), tolerance = 1e-4)
probs3 <- 1 - pnorm(rep(med + k, 2), med, sigmasq^0.5)
pfit3 <- fitprecision(c(med + k, Inf), probs3, med = med,
pplot = F)
gamma.parameters3 <- unlist(pfit3$Gamma)
attributes(gamma.parameters3) <- NULL
expect_equal(gamma.parameters3, c(a, b), tolerance = 1e-4)
})
test_that("precision fitting works - lognormal",{
skip_on_cran()
med <- 10
k <- 5
a <- 3
b <- 4
sigmasq <- 1 / qgamma(c(0.05, 0.95), a, b)
probs1 <- plnorm(rep(med + k, 2), log(med), sigmasq^0.5) - 0.5
pfit1 <- fitprecision(interval = c(med, med + k), propvals = probs1,
trans = "log", pplot = F)
gamma.parameters1 <- unlist(pfit1$Gamma)
attributes(gamma.parameters1) <- NULL
expect_equal(gamma.parameters1, c(a, b), tolerance = 1e-4)
probs2 <- plnorm(rep(med - k, 2), log(med), sigmasq^0.5)
pfit2 <- fitprecision(interval = c(-Inf, med - k), propvals = probs2,
med = med,
trans = "log", pplot = F)
gamma.parameters2 <- unlist(pfit2$Gamma)
attributes(gamma.parameters2) <- NULL
expect_equal(gamma.parameters2, c(a, b), tolerance = 1e-4)
probs3 <- 1 - plnorm(rep(med + k, 2), log(med), sigmasq^0.5)
pfit3 <- fitprecision(interval = c(med + k, Inf), propvals = probs3,
med = med,
trans = "log", pplot = F)
gamma.parameters3 <- unlist(pfit3$Gamma)
attributes(gamma.parameters3) <- NULL
expect_equal(gamma.parameters3, c(a, b), tolerance = 1e-4)
})
test_that("linear pooling works",{
skip_on_cran()
p1 <- c(0.25, 0.5, 0.75)
a <- 10; b <- 4
v1 <- qgamma(p1, a, b)
mu <- 3 ; sigma <- 2
v2 <- qnorm(p1, mu, sigma)
v3 <- qlnorm(p1, log(mu), sigma)
V <- matrix(c(v1, v2, v3), 3, 3)
myfit <- fitdist(vals = V, probs = p1, lower = 0)
w1 <- 1/6; w2 <- 2/6; w3 <- 3/6
xtest <- 1.5
qu <- 0.95
qlp <- qlinearpool(myfit, qu, w = c(w1, w2, w3))
qcheck <- w1 * pgamma(qlp, a, b) +
w2 * pnorm(qlp, mu, sigma) +
w3 * plnorm(qlp, log(mu), sigma)
expect_equal(qcheck, qu , tolerance = 1e-4)
expect_equal(plinearpool(myfit, qlp, w = c(w1, w2, w3)),
qu , tolerance = 1e-4)
plp <- plinearpool(myfit, x = xtest, w = c(w1, w2, w3))
pcheck <- w1 * pgamma(xtest, a, b) +
w2 * pnorm(xtest, mu, sigma) +
w3 * plnorm(xtest, log(mu), sigma)
expect_equal(plp, pcheck , tolerance = 1e-4)
})
test_that("linear pooling works - different lower limits",{
skip_on_cran()
llimits <- c(-2, 1, -4)
p1 <- c(0.25, 0.5, 0.6, 0.75)
a <- 10; b <- 4
v1 <- llimits[1] + qgamma(p1, a, b)
mu <- 3 ; sigma <- 2
v2 <- llimits[2] + qlnorm(p1, log(mu), sigma)
v3 <- llimits[3] + exp(1 + 2 * qt(p1, 3))
V <- matrix(c(v1, v2, v3), length(p1), 3)
myfit <- fitdist(vals = V, probs = p1, lower = llimits)
w1 <- 1/6; w2 <- 2/6; w3 <- 3/6
xtest <- 3
qu <- 0.03
qlp <- qlinearpool(myfit, qu, w = c(w1, w2, w3))
qcheck <- w1 * pgamma(qlp - llimits[1], a, b) +
w2 * plnorm(qlp - llimits[2], log(mu), sigma) +
w3 * pt((log(qlp - llimits[3]) - 1) / 2 , 3)
expect_equal(qcheck, qu , tolerance = 1e-4)
expect_equal(plinearpool(myfit, qlp, w = c(w1, w2, w3)),
qu , tolerance = 1e-4)
plp <- plinearpool(myfit, x = xtest, w = c(w1, w2, w3))
pcheck <- w1 * pgamma(xtest - llimits[1], a, b) +
w2 * plnorm(xtest - llimits[2], log(mu), sigma) +
w3 * pt((log(xtest - llimits[3]) - 1) / 2, 3)
expect_equal(plp, pcheck , tolerance = 1e-4)
})
|
setClass("cold", representation(coefficients = "matrix", se = "matrix", covariance = "matrix", correlation="matrix",
log.likelihood="numeric", message ="integer",n.cases="numeric", ni.cases="numeric", aic="numeric",
Fitted="numeric", bi.estimate="matrix",Fitted.av="numeric", Time="numeric", model.matrix= "matrix", y.matrix="matrix",
random.matrix="matrix", subset.data="data.frame",final.data="data.frame", y.av="numeric", data.id="numeric",
call="language"))
setMethod(f="anova", signature(object = "cold"),
function(object,...)
{
dots <- list(...)
object <- list(object, ...)
if(length(object)<2)
stop("single argument anova not implemented")
x<-object[[1]]
y<-object[[2]]
data1<[email protected]
data2<[email protected]
if(all.equal(data1,data2)!=TRUE)
stop("all models must be fit to the same data object")
m1<-x@call$formula
m2<-y@call$formula
x3<-x@call$dependence
y3<-y@call$dependence
if(m1==m2&&x3==y3 )
stop("models are identical")
if(length(x@coefficients)>length(y@coefficients)){
x1<[email protected]
x2<-x@aic
y1<[email protected]
y2<-y@aic
n1<-length(x@coefficients)
n2<-length(y@coefficients)
n11<-length(x@Time)
n22<-length(y@Time)
bic1<--2*[email protected]+length(x@coefficients)*log(n11)
bic2<--2*[email protected]+length(y@coefficients)*log(n22)
X3<-c(bic1,bic2)
df<-length(x@coefficients)-length(y@coefficients)
}
else {
x1<[email protected]
x2<-y@aic
y1<[email protected]
y2<-x@aic
data1<[email protected]
data2<[email protected]
n11<-length(y@Time)
n22<-length(x@Time)
df<-length(y@coefficients)-length(x@coefficients)
bic1<--2*[email protected]+length(y@coefficients)*log(n11)
bic2<--2*[email protected]+length(x@coefficients)*log(n22)
X3<-c(bic2,bic1)
}
X1<-c("Model1 ","Model2 ")
X2<-c(x@aic,y@aic)
X31<-c([email protected],[email protected])
teste<-2*(x1-y1)
p<-1-pchisq(teste,df)
X4<-c(" ",round(teste,3))
X5<-c(" ",round(df,0))
X6<-c(" ",formatC(p))
tabela1<-data.frame(X1,X2,X3,X31,X4,X5,X6)
names(tabela1)<-c(" ","AIC","BIC","logLik"," Deviance","df", "p-value")
cat("\nData: ")
print(x@call$data)
cat("\nModel1: ")
print(x@call$formula)
a<-x@call$dependence
cat("dependence =",a,"\n")
cat("Model2: ")
print(y@call$formula)
b<-y@call$dependence
cat("dependence =",b,"\n")
cat(" \n")
print(tabela1,row.names=FALSE)
}
)
|
reml.newton <-
function(Psi, Xlist, Zlist, ylist, Slist, nalist, rep, k, q, nall, const, bscov,
fix, control) {
par <- unlist(Psi2par(Psi,bscov,k,q,fix))
fn <- reml.loglik.fn
gr <- if(is.null(Zlist) && bscov=="unstr") reml.loglik.gr else NULL
if(control$showiter) cat("Newton iterations:\n")
opt <- optim(par=par,fn=fn,gr=gr,Xlist=Xlist,Zlist=Zlist,ylist=ylist,
Slist=Slist,nalist=nalist,rep=rep,k=k,q=q,nall=nall,const=const,bscov=bscov,
fix=fix,method="BFGS",control=control$optim,hessian=control$hessian)
Psi <- par2Psi(opt$par,bscov,k,q,fix)
list(Psi=Psi,par=opt$par,logLik=opt$value,converged=opt$convergence==0,
niter=opt$counts[[2]],hessian=opt$hessian)
}
|
"piracy"
|
NULL
.acmpca$create_certificate_authority_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CertificateAuthorityConfiguration = structure(list(KeyAlgorithm = structure(logical(0), tags = list(type = "string")), SigningAlgorithm = structure(logical(0), tags = list(type = "string")), Subject = structure(list(Country = structure(logical(0), tags = list(type = "string")), Organization = structure(logical(0), tags = list(type = "string")), OrganizationalUnit = structure(logical(0), tags = list(type = "string")), DistinguishedNameQualifier = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), CommonName = structure(logical(0), tags = list(type = "string")), SerialNumber = structure(logical(0), tags = list(type = "string")), Locality = structure(logical(0), tags = list(type = "string")), Title = structure(logical(0), tags = list(type = "string")), Surname = structure(logical(0), tags = list(type = "string")), GivenName = structure(logical(0), tags = list(type = "string")), Initials = structure(logical(0), tags = list(type = "string")), Pseudonym = structure(logical(0), tags = list(type = "string")), GenerationQualifier = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), CsrExtensions = structure(list(KeyUsage = structure(list(DigitalSignature = structure(logical(0), tags = list(type = "boolean")), NonRepudiation = structure(logical(0), tags = list(type = "boolean")), KeyEncipherment = structure(logical(0), tags = list(type = "boolean")), DataEncipherment = structure(logical(0), tags = list(type = "boolean")), KeyAgreement = structure(logical(0), tags = list(type = "boolean")), KeyCertSign = structure(logical(0), tags = list(type = "boolean")), CRLSign = structure(logical(0), tags = list(type = "boolean")), EncipherOnly = structure(logical(0), tags = list(type = "boolean")), DecipherOnly = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure")), SubjectInformationAccess = structure(list(structure(list(AccessMethod = structure(list(CustomObjectIdentifier = structure(logical(0), tags = list(type = "string")), AccessMethodType = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), AccessLocation = structure(list(OtherName = structure(list(TypeId = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), Rfc822Name = structure(logical(0), tags = list(type = "string")), DnsName = structure(logical(0), tags = list(type = "string")), DirectoryName = structure(list(Country = structure(logical(0), tags = list(type = "string")), Organization = structure(logical(0), tags = list(type = "string")), OrganizationalUnit = structure(logical(0), tags = list(type = "string")), DistinguishedNameQualifier = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), CommonName = structure(logical(0), tags = list(type = "string")), SerialNumber = structure(logical(0), tags = list(type = "string")), Locality = structure(logical(0), tags = list(type = "string")), Title = structure(logical(0), tags = list(type = "string")), Surname = structure(logical(0), tags = list(type = "string")), GivenName = structure(logical(0), tags = list(type = "string")), Initials = structure(logical(0), tags = list(type = "string")), Pseudonym = structure(logical(0), tags = list(type = "string")), GenerationQualifier = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), EdiPartyName = structure(list(PartyName = structure(logical(0), tags = list(type = "string")), NameAssigner = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), UniformResourceIdentifier = structure(logical(0), tags = list(type = "string")), IpAddress = structure(logical(0), tags = list(type = "string")), RegisteredId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure", box = TRUE))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure")), RevocationConfiguration = structure(list(CrlConfiguration = structure(list(Enabled = structure(logical(0), tags = list(box = TRUE, type = "boolean")), ExpirationInDays = structure(logical(0), tags = list(box = TRUE, type = "integer")), CustomCname = structure(logical(0), tags = list(type = "string")), S3BucketName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), CertificateAuthorityType = structure(logical(0), tags = list(type = "string")), IdempotencyToken = structure(logical(0), tags = list(type = "string")), Tags = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.acmpca$create_certificate_authority_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CertificateAuthorityArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.acmpca$create_certificate_authority_audit_report_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CertificateAuthorityArn = structure(logical(0), tags = list(type = "string")), S3BucketName = structure(logical(0), tags = list(type = "string")), AuditReportResponseFormat = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.acmpca$create_certificate_authority_audit_report_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(AuditReportId = structure(logical(0), tags = list(type = "string")), S3Key = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.acmpca$create_permission_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CertificateAuthorityArn = structure(logical(0), tags = list(type = "string")), Principal = structure(logical(0), tags = list(type = "string")), SourceAccount = structure(logical(0), tags = list(type = "string")), Actions = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.acmpca$create_permission_output <- function(...) {
list()
}
.acmpca$delete_certificate_authority_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CertificateAuthorityArn = structure(logical(0), tags = list(type = "string")), PermanentDeletionTimeInDays = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.acmpca$delete_certificate_authority_output <- function(...) {
list()
}
.acmpca$delete_permission_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CertificateAuthorityArn = structure(logical(0), tags = list(type = "string")), Principal = structure(logical(0), tags = list(type = "string")), SourceAccount = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.acmpca$delete_permission_output <- function(...) {
list()
}
.acmpca$delete_policy_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ResourceArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.acmpca$delete_policy_output <- function(...) {
list()
}
.acmpca$describe_certificate_authority_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CertificateAuthorityArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.acmpca$describe_certificate_authority_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CertificateAuthority = structure(list(Arn = structure(logical(0), tags = list(type = "string")), OwnerAccount = structure(logical(0), tags = list(type = "string")), CreatedAt = structure(logical(0), tags = list(type = "timestamp")), LastStateChangeAt = structure(logical(0), tags = list(type = "timestamp")), Type = structure(logical(0), tags = list(type = "string")), Serial = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), NotBefore = structure(logical(0), tags = list(type = "timestamp")), NotAfter = structure(logical(0), tags = list(type = "timestamp")), FailureReason = structure(logical(0), tags = list(type = "string")), CertificateAuthorityConfiguration = structure(list(KeyAlgorithm = structure(logical(0), tags = list(type = "string")), SigningAlgorithm = structure(logical(0), tags = list(type = "string")), Subject = structure(list(Country = structure(logical(0), tags = list(type = "string")), Organization = structure(logical(0), tags = list(type = "string")), OrganizationalUnit = structure(logical(0), tags = list(type = "string")), DistinguishedNameQualifier = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), CommonName = structure(logical(0), tags = list(type = "string")), SerialNumber = structure(logical(0), tags = list(type = "string")), Locality = structure(logical(0), tags = list(type = "string")), Title = structure(logical(0), tags = list(type = "string")), Surname = structure(logical(0), tags = list(type = "string")), GivenName = structure(logical(0), tags = list(type = "string")), Initials = structure(logical(0), tags = list(type = "string")), Pseudonym = structure(logical(0), tags = list(type = "string")), GenerationQualifier = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), CsrExtensions = structure(list(KeyUsage = structure(list(DigitalSignature = structure(logical(0), tags = list(type = "boolean")), NonRepudiation = structure(logical(0), tags = list(type = "boolean")), KeyEncipherment = structure(logical(0), tags = list(type = "boolean")), DataEncipherment = structure(logical(0), tags = list(type = "boolean")), KeyAgreement = structure(logical(0), tags = list(type = "boolean")), KeyCertSign = structure(logical(0), tags = list(type = "boolean")), CRLSign = structure(logical(0), tags = list(type = "boolean")), EncipherOnly = structure(logical(0), tags = list(type = "boolean")), DecipherOnly = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure")), SubjectInformationAccess = structure(list(structure(list(AccessMethod = structure(list(CustomObjectIdentifier = structure(logical(0), tags = list(type = "string")), AccessMethodType = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), AccessLocation = structure(list(OtherName = structure(list(TypeId = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), Rfc822Name = structure(logical(0), tags = list(type = "string")), DnsName = structure(logical(0), tags = list(type = "string")), DirectoryName = structure(list(Country = structure(logical(0), tags = list(type = "string")), Organization = structure(logical(0), tags = list(type = "string")), OrganizationalUnit = structure(logical(0), tags = list(type = "string")), DistinguishedNameQualifier = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), CommonName = structure(logical(0), tags = list(type = "string")), SerialNumber = structure(logical(0), tags = list(type = "string")), Locality = structure(logical(0), tags = list(type = "string")), Title = structure(logical(0), tags = list(type = "string")), Surname = structure(logical(0), tags = list(type = "string")), GivenName = structure(logical(0), tags = list(type = "string")), Initials = structure(logical(0), tags = list(type = "string")), Pseudonym = structure(logical(0), tags = list(type = "string")), GenerationQualifier = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), EdiPartyName = structure(list(PartyName = structure(logical(0), tags = list(type = "string")), NameAssigner = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), UniformResourceIdentifier = structure(logical(0), tags = list(type = "string")), IpAddress = structure(logical(0), tags = list(type = "string")), RegisteredId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure", box = TRUE))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure")), RevocationConfiguration = structure(list(CrlConfiguration = structure(list(Enabled = structure(logical(0), tags = list(box = TRUE, type = "boolean")), ExpirationInDays = structure(logical(0), tags = list(box = TRUE, type = "integer")), CustomCname = structure(logical(0), tags = list(type = "string")), S3BucketName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), RestorableUntil = structure(logical(0), tags = list(type = "timestamp"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.acmpca$describe_certificate_authority_audit_report_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CertificateAuthorityArn = structure(logical(0), tags = list(type = "string")), AuditReportId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.acmpca$describe_certificate_authority_audit_report_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(AuditReportStatus = structure(logical(0), tags = list(type = "string")), S3BucketName = structure(logical(0), tags = list(type = "string")), S3Key = structure(logical(0), tags = list(type = "string")), CreatedAt = structure(logical(0), tags = list(type = "timestamp"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.acmpca$get_certificate_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CertificateAuthorityArn = structure(logical(0), tags = list(type = "string")), CertificateArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.acmpca$get_certificate_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Certificate = structure(logical(0), tags = list(type = "string")), CertificateChain = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.acmpca$get_certificate_authority_certificate_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CertificateAuthorityArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.acmpca$get_certificate_authority_certificate_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Certificate = structure(logical(0), tags = list(type = "string")), CertificateChain = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.acmpca$get_certificate_authority_csr_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CertificateAuthorityArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.acmpca$get_certificate_authority_csr_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Csr = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.acmpca$get_policy_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ResourceArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.acmpca$get_policy_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Policy = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.acmpca$import_certificate_authority_certificate_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CertificateAuthorityArn = structure(logical(0), tags = list(type = "string")), Certificate = structure(logical(0), tags = list(type = "blob")), CertificateChain = structure(logical(0), tags = list(type = "blob"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.acmpca$import_certificate_authority_certificate_output <- function(...) {
list()
}
.acmpca$issue_certificate_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CertificateAuthorityArn = structure(logical(0), tags = list(type = "string")), Csr = structure(logical(0), tags = list(type = "blob")), SigningAlgorithm = structure(logical(0), tags = list(type = "string")), TemplateArn = structure(logical(0), tags = list(type = "string")), Validity = structure(list(Value = structure(logical(0), tags = list(box = TRUE, type = "long")), Type = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), IdempotencyToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.acmpca$issue_certificate_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CertificateArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.acmpca$list_certificate_authorities_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextToken = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer")), ResourceOwner = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.acmpca$list_certificate_authorities_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CertificateAuthorities = structure(list(structure(list(Arn = structure(logical(0), tags = list(type = "string")), OwnerAccount = structure(logical(0), tags = list(type = "string")), CreatedAt = structure(logical(0), tags = list(type = "timestamp")), LastStateChangeAt = structure(logical(0), tags = list(type = "timestamp")), Type = structure(logical(0), tags = list(type = "string")), Serial = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), NotBefore = structure(logical(0), tags = list(type = "timestamp")), NotAfter = structure(logical(0), tags = list(type = "timestamp")), FailureReason = structure(logical(0), tags = list(type = "string")), CertificateAuthorityConfiguration = structure(list(KeyAlgorithm = structure(logical(0), tags = list(type = "string")), SigningAlgorithm = structure(logical(0), tags = list(type = "string")), Subject = structure(list(Country = structure(logical(0), tags = list(type = "string")), Organization = structure(logical(0), tags = list(type = "string")), OrganizationalUnit = structure(logical(0), tags = list(type = "string")), DistinguishedNameQualifier = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), CommonName = structure(logical(0), tags = list(type = "string")), SerialNumber = structure(logical(0), tags = list(type = "string")), Locality = structure(logical(0), tags = list(type = "string")), Title = structure(logical(0), tags = list(type = "string")), Surname = structure(logical(0), tags = list(type = "string")), GivenName = structure(logical(0), tags = list(type = "string")), Initials = structure(logical(0), tags = list(type = "string")), Pseudonym = structure(logical(0), tags = list(type = "string")), GenerationQualifier = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), CsrExtensions = structure(list(KeyUsage = structure(list(DigitalSignature = structure(logical(0), tags = list(type = "boolean")), NonRepudiation = structure(logical(0), tags = list(type = "boolean")), KeyEncipherment = structure(logical(0), tags = list(type = "boolean")), DataEncipherment = structure(logical(0), tags = list(type = "boolean")), KeyAgreement = structure(logical(0), tags = list(type = "boolean")), KeyCertSign = structure(logical(0), tags = list(type = "boolean")), CRLSign = structure(logical(0), tags = list(type = "boolean")), EncipherOnly = structure(logical(0), tags = list(type = "boolean")), DecipherOnly = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure")), SubjectInformationAccess = structure(list(structure(list(AccessMethod = structure(list(CustomObjectIdentifier = structure(logical(0), tags = list(type = "string")), AccessMethodType = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), AccessLocation = structure(list(OtherName = structure(list(TypeId = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), Rfc822Name = structure(logical(0), tags = list(type = "string")), DnsName = structure(logical(0), tags = list(type = "string")), DirectoryName = structure(list(Country = structure(logical(0), tags = list(type = "string")), Organization = structure(logical(0), tags = list(type = "string")), OrganizationalUnit = structure(logical(0), tags = list(type = "string")), DistinguishedNameQualifier = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), CommonName = structure(logical(0), tags = list(type = "string")), SerialNumber = structure(logical(0), tags = list(type = "string")), Locality = structure(logical(0), tags = list(type = "string")), Title = structure(logical(0), tags = list(type = "string")), Surname = structure(logical(0), tags = list(type = "string")), GivenName = structure(logical(0), tags = list(type = "string")), Initials = structure(logical(0), tags = list(type = "string")), Pseudonym = structure(logical(0), tags = list(type = "string")), GenerationQualifier = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), EdiPartyName = structure(list(PartyName = structure(logical(0), tags = list(type = "string")), NameAssigner = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), UniformResourceIdentifier = structure(logical(0), tags = list(type = "string")), IpAddress = structure(logical(0), tags = list(type = "string")), RegisteredId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure", box = TRUE))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure")), RevocationConfiguration = structure(list(CrlConfiguration = structure(list(Enabled = structure(logical(0), tags = list(box = TRUE, type = "boolean")), ExpirationInDays = structure(logical(0), tags = list(box = TRUE, type = "integer")), CustomCname = structure(logical(0), tags = list(type = "string")), S3BucketName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), RestorableUntil = structure(logical(0), tags = list(type = "timestamp"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.acmpca$list_permissions_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CertificateAuthorityArn = structure(logical(0), tags = list(type = "string")), NextToken = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.acmpca$list_permissions_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Permissions = structure(list(structure(list(CertificateAuthorityArn = structure(logical(0), tags = list(type = "string")), CreatedAt = structure(logical(0), tags = list(type = "timestamp")), Principal = structure(logical(0), tags = list(type = "string")), SourceAccount = structure(logical(0), tags = list(type = "string")), Actions = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), Policy = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.acmpca$list_tags_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CertificateAuthorityArn = structure(logical(0), tags = list(type = "string")), NextToken = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.acmpca$list_tags_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Tags = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.acmpca$put_policy_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ResourceArn = structure(logical(0), tags = list(type = "string")), Policy = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.acmpca$put_policy_output <- function(...) {
list()
}
.acmpca$restore_certificate_authority_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CertificateAuthorityArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.acmpca$restore_certificate_authority_output <- function(...) {
list()
}
.acmpca$revoke_certificate_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CertificateAuthorityArn = structure(logical(0), tags = list(type = "string")), CertificateSerial = structure(logical(0), tags = list(type = "string")), RevocationReason = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.acmpca$revoke_certificate_output <- function(...) {
list()
}
.acmpca$tag_certificate_authority_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CertificateAuthorityArn = structure(logical(0), tags = list(type = "string")), Tags = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.acmpca$tag_certificate_authority_output <- function(...) {
list()
}
.acmpca$untag_certificate_authority_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CertificateAuthorityArn = structure(logical(0), tags = list(type = "string")), Tags = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.acmpca$untag_certificate_authority_output <- function(...) {
list()
}
.acmpca$update_certificate_authority_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CertificateAuthorityArn = structure(logical(0), tags = list(type = "string")), RevocationConfiguration = structure(list(CrlConfiguration = structure(list(Enabled = structure(logical(0), tags = list(box = TRUE, type = "boolean")), ExpirationInDays = structure(logical(0), tags = list(box = TRUE, type = "integer")), CustomCname = structure(logical(0), tags = list(type = "string")), S3BucketName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), Status = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.acmpca$update_certificate_authority_output <- function(...) {
list()
}
|
collapseDose <- function(x, noteMetaData, naFreq = 'most', ...) {
ld <- list(...)
if(length(ld)) {
dn <- x[['drugname']]
path <- numeric(length(dn))
for(i in seq_along(ld)) {
path[grepl(ld[[i]], dn, ignore.case = TRUE) & path == 0] <- i
}
sx <- split(x, path)
res <- lapply(sx, makeDose, noteMetaData, naFreq)
rn <- do.call(qrbind, lapply(res, function(i) i[['note']]))
rn <- rn[order(rn[[1]]),]
rownames(rn) <- NULL
rd <- do.call(qrbind, lapply(res, function(i) i[['date']]))
rd <- rd[order(rd[[1]]),]
rownames(rd) <- NULL
nx <- list(note = rn, date = rd)
} else {
nx <- makeDose(x, noteMetaData, naFreq)
}
nx
}
|
BayesRobustProbitSummary = function(object, digits = max(1L, getOption("digits") - 4L))
{
if(object$call[1]!="BayesProbitHSD()" & object$call[1]!="BayesProbitARMA()" & object$call[1]!="BayesCumulativeProbitHSD()")
stop("Please input the correct object!!")
if(object$call[1] == "BayesProbitHSD()")
output = BayesProbitHSD.Summary(object, digits = digits)
if(object$call[1] == "BayesProbitARMA()")
output = BayesProbitARMA.Summary(object, digits = digits)
if(object$call[1] == "BayesCumulativeProbitHSD()")
output = BayesProbitHSD.Summary(object, digits = digits)
output
}
|
TestIndNH <-
function(posx, posy, posz=NULL, alpha=0.05, nsim=100,PA=FALSE,cores=1,
type='Poisson',lambdaMarg=NULL, lambdaParent=NULL, lambdaNumP=NULL,
dist='normal',sigmaC=1, minC=-1, maxC=1,fixed.seed=NULL)
{
NumProcess<-2+(is.null(posz)==F)
if (is.null(lambdaMarg)) Tf<-length(lambdaParent)
else Tf<-dim(lambdaMarg)[1]
n<-length(posx)
distObs<-DistObs(posx=posx,posy=posy,posz=posz, Tf=Tf,
drawpoints='F', PA=PA)
cl<-makeCluster(cores)
clusterExport(cl, c('simNHPc'))
clusterExport(cl, objects(, envir = .GlobalEnv))
if (is.null(fixed.seed)) matdist<- parSapply(cl, c(1:nsim), FUN=fn2, posx=posx, NumProcess=NumProcess,
PA=PA, type=type, dist=dist,lambdaMarg=lambdaMarg, lambdaParent=lambdaParent,
lambdaNumP=lambdaNumP, sigmaC=sigmaC,minC=minC, maxC=maxC)
else matdist<- parSapply(cl, c(1:nsim), FUN=fn2fix, posx=posx, NumProcess=NumProcess,
PA=PA, type=type, dist=dist,lambdaMarg=lambdaMarg, lambdaParent=lambdaParent,
lambdaNumP=lambdaNumP, sigmaC=sigmaC,minC=minC, maxC=maxC, fixed.seed=fixed.seed)
matdistT<-cbind(distObs,matdist)
matperT<-parSapply(cl, c(1:length(distObs)), FUN=mirank,mat=matdistT)/(nsim+1)
KSest<-parSapply(cl, c(1:(nsim+1)), FUN=miKS,mat=matperT)
stopCluster(cl)
KSpv<-1-rank(KSest)[1]/(nsim+1)
names(KSpv)<-"p-value"
reject<-as.numeric(KSpv<alpha)
return(list(pv=KSpv, reject=reject, est=KSest))
}
|
test_that("as.orcid", {
skip_on_cran()
vcr::use_cassette("as_orcid", {
aa <- as.orcid("0000-0002-1642-628X")
})
expect_is(aa, "or_cid")
expect_is(aa[[1]], "list")
expect_named(aa, "0000-0002-1642-628X")
})
test_that("as.orcid accepts itself, or_cid class", {
skip_on_cran()
vcr::use_cassette("as_orcid_accepts_self_or_or_cid_class", {
tmp <- as.orcid("0000-0002-1642-628X")
bb <- as.orcid(tmp)
})
expect_is(bb, "or_cid")
expect_is(bb[[1]], "list")
expect_identical(bb, tmp)
expect_identical(bb[[1]], tmp[[1]])
})
test_that("as.orcid fails well", {
skip_on_cran()
expect_error(as.orcid(5), "no 'as.orcid' method")
expect_error(as.orcid(list(a = 6)), "no 'as.orcid' method for numeric")
vcr::use_cassette("as_orcid_invalid_orcid_id", {
expect_error(as.orcid("adfafadf"), class = "error")
})
})
|
models2wux <- function(userinput,
modelinput = NULL
) {
cat("\n")
if (!is.list(userinput)) {
user.input.sourced <- source(userinput, local = TRUE)
user.input <- user.input.sourced$value
} else {
user.input <- userinput
}
save.as.data <- user.input$save.as.data
if ( !file.exists(dirname(save.as.data)) )
dir.create(dirname(save.as.data), recursive = TRUE)
if ( !is.null(user.input$plot.subregions$save.subregions.plots) ) {
save.subregions.plots <- user.input$plot.subregions$save.subregions.plots
if ( !file.exists(save.subregions.plots) )
dir.create(save.subregions.plots, recursive = TRUE)
} else {
save.subregions.plots <- NULL
}
if ( !is.null(user.input$area.fraction) ) {
does.area.fraction <- user.input$area.fraction
} else {
does.area.fraction <- FALSE
}
if ( !is.null(user.input$use.land.mask) ) {
use.land.mask <- user.input$use.land.mask
print("USING LAND-SEA MASK")
} else {
use.land.mask <- FALSE
}
if ( !is.null(user.input$spatial.weighting) ) {
does.spatial.weighting <- user.input$spatial.weighting
} else {
does.spatial.weighting <- FALSE
}
reference.period <- user.input$reference.period
scenario.period <- user.input$scenario.period
periods <- c("reference.period" = reference.period,
"scenario.period" = scenario.period)
if ( !is.null(user.input$na.rm) ) {
na.rm <- user.input$na.rm
} else {
na.rm <- FALSE
}
subregions <- user.input$subregions
temporal.aggr <- user.input$temporal.aggregation
parameter.names <- user.input$parameter.names
wux.df.list <- vector("list", length(parameter.names))
names(wux.df.list) <- parameter.names
for ( parameter.longname in parameter.names ) {
cat("\n******************************************************************")
cat("****\n", "PROCESSING PARAMETER \"", parameter.longname,
"\"\n", sep = "")
cat("******************************************************************")
cat("****\n")
wux.df <- data.frame()
user.model.names <- user.input$climate.models
model.input <- ReadFromModelDictionary(user.model.names, modelinput)
model.names <- names(model.input)
for (modelname.counter in model.names) {
model.pars <- model.input[[modelname.counter]]$parameters
parameter.shortname <- GetParameterShortName(parameter.longname,
model.pars)
institute.name <- model.input[[modelname.counter]]$institute
global.model.name <- model.input[[modelname.counter]]$gcm
gcm.run <- model.input[[modelname.counter]]$gcm.run
rcm.name <- model.input[[modelname.counter]]$rcm
emission.scenario <- model.input[[modelname.counter]]$emission.scenario
is.corrected <- model.input[[modelname.counter]]$corrected.data
resolution <- model.input[[modelname.counter]]$resolution
what.timesteps <- model.input[[modelname.counter]]$what.timesteps
model.ref.per <- model.input[[modelname.counter]]$reference.period
model.scn.per <- model.input[[modelname.counter]]$scenario.period
years.in.ref.per.files <-
model.input[[modelname.counter]]$years.in.ref.per.files
years.in.scn.per.files <-
model.input[[modelname.counter]]$years.in.scn.per.files
time.units <- model.input[[modelname.counter]]$time.units
calendar <- model.input[[modelname.counter]]$calendar
count.first.time.value <-
model.input[[modelname.counter]]$count.first.time.value
lonlat.var.name <-
model.input[[modelname.counter]]$lonlat.var.name
grid.filename <-
file.path(model.input[[modelname.counter]]$gridfile.path,
model.input[[modelname.counter]]$gridfile.filename)
subregion.filename <-
file.path(model.input[[modelname.counter]]$subregion.path,
model.input[[modelname.counter]]$subregion.filename)
if (length(subregion.filename) == 0)
subregion.filename <- FALSE
land.mask <- model.input[[modelname.counter]]$land.mask
land.mask.name <- model.input[[modelname.counter]]$land.mask.name
for (periods.counter in periods) {
cat("\n-------------------------------------------------------------")
cat("---------\n\n PROCESSING MODEL \"", modelname.counter,
"\" FOR PERIOD \"", periods.counter, "\"\n", sep = "")
period.begin <- ISOdate(strsplit(periods.counter, "-")[[1]][1],
1, 1, 0)
period.end <- ISOdate(strsplit(periods.counter, "-")[[1]][2],
12, 31, 23)
current.period.char = names(which(periods == periods.counter))
if (current.period.char == "reference.period") {
is.refperiod <- "yes"
if (!is.null(model.ref.per))
periods.counter <- model.ref.per
} else {
is.refperiod <- "no"
if (!is.null(model.scn.per))
periods.counter <- model.scn.per
}
cat(" GETTING REQUIRED FILENAMES FOR PERIOD", "\n", sep = "")
current.model.input <- model.input[[modelname.counter]]
filenames <- GetFileNames(current.model.input,
period.begin, period.end,
parameter.shortname,
period = current.period.char,
calendar = calendar,
count.first.time.value =
count.first.time.value,
time.units = time.units,
what.timesteps = what.timesteps)
cat(paste(" ", filenames, "\n", sep=""))
if ( !all(is.na(filenames)) ) {
data.aggr.list <-
GetAggregatedSpatioTemporalData(filenames,
model.name = modelname.counter,
plot.subregion = user.input$plot.subregion,
grid.filenames = grid.filename,
subregions = subregions,
parameter.name = parameter.shortname,
interpolate.to.grid = FALSE,
temporal.aggr = temporal.aggr,
startdate = period.begin,
enddate = period.end,
time.units = time.units,
calendar = calendar,
count.first.time.value =
count.first.time.value,
lonlat.var.name = lonlat.var.name,
what.timesteps = what.timesteps,
na.rm = na.rm,
area.fraction = does.area.fraction,
spatial.weighting = does.spatial.weighting,
use.land.mask = use.land.mask,
land.mask = land.mask,
land.mask.name = land.mask.name)
n.of.pixels <-
lapply(data.aggr.list, function(z) {
lapply(z, function(x) {lapply(x, function(y) length(!is.na(y)))})})
data.aggr.list <- lapply(data.aggr.list,
function(x)
lapply(x$data,
AggregateWeightedSubregions,
weight=x$weight)
)
if (sum(which(unlist(sapply(temporal.aggr,
"[", "time.series")) == FALSE)) == 0) {
is.time.series <- TRUE
} else {
is.time.series <- FALSE
}
model.data <- MakeModelDataFrame(modelname.counter,
institute.name,
rcm.name,
global.model.name,
gcm.run = gcm.run,
is.reference.period = is.refperiod,
time.step = periods.counter,
subregion = names(data.aggr.list),
is.corrected = is.corrected,
resolution = resolution,
season = sort(names(data.aggr.list[[1]])),
is.time.series = is.time.series,
data = data.aggr.list,
parameter.name = parameter.longname,
emission.scenario = emission.scenario)
wux.df <- rbind(wux.df, model.data)
}
}
}
wux.df.list[[parameter.longname]] <- wux.df
}
wux.df <- MergeParameterDataframes(wux.df.list, parameter.names)
if (!is.time.series) {
wux.diff.df <- WuxDataDiff(wux.df, parameter.names)
write.table(wux.diff.df,
file = paste(save.as.data, "_diff.csv", sep = ""), sep=";")
}
cat("\n-------------------------------------------------------------------")
cat("---\n")
cat("\n", "SAVING DATA", "\n", sep = "")
write.table(wux.df, file = paste(save.as.data, ".csv", sep = ""), sep=";")
cat("\n*******************************************************************")
cat("***\n")
cat(" YOUR WUX DATA FRAME HAS BEEN CREATED SUCCESSFULLY :-) ")
cat(" \n")
cat("********************************************************************")
cat("**\n")
if (!is.time.series) {
class(wux.df) <- append("wux.df", class(wux.df))
return(wux.diff.df)
} else {
class(wux.df) <- append("wux.ts.df", class(wux.df))
return(wux.df)
}
}
ReadFromModelDictionary <- function(modelnames, modelinput = NULL){
if (is.null(modelinput)){
model.input <- InitModelDictionary()
} else {
cat(is.list(modelinput))
if (is.list(modelinput)) {
model.input <- modelinput
} else {
model.input.sourced <- source(modelinput, local = TRUE)
model.input <- model.input.sourced$value
}
if (!is.list(model.input))
stop("YOUR model.input FILE IS WRONG. BE SURE YOU HAVE SPECIFIED A LIST CALLED \"model.input\". SEE ?models2wux FOR DETAILS. ")
}
cmip3.sresa1b.labels <- c("cmip3-sresa1b", "CMIP3-SRESA1B")
cmip3.sresa1b.modelnames <- c("bccr_bcm2_0-r1",
"cccma_cgcm3_1-r1",
"cccma_cgcm3_1-r2",
"cccma_cgcm3_1-r3",
"cccma_cgcm3_1-r4",
"cccma_cgcm3_1-r5",
"cccma_cgcm3_1_t63-r1",
"cnrm_cm3-r1",
"csiro_mk3_0-r1",
"csiro_mk3_5-r1",
"gfdl_cm2_0-r1",
"gfdl_cm2_1-r1",
"giss_aom-r1",
"giss_aom-r2",
"giss_model_e_h-r1",
"giss_model_e_h-r2",
"giss_model_e_h-r3",
"giss_model_e_r-r1",
"giss_model_e_r-r2",
"giss_model_e_r-r3",
"giss_model_e_r-r4",
"giss_model_e_r-r5",
"iap_fgoals1_0_g-r1",
"iap_fgoals1_0_g-r2",
"iap_fgoals1_0_g-r3",
"ingv_echam4-r1",
"inmcm3_0-r1",
"ipsl_cm4-r1",
"miroc3_2_hires-r1",
"miroc3_2_medres-r1",
"miroc3_2_medres-r2",
"miroc3_2_medres-r3",
"miub_echo_g-r1",
"miub_echo_g-r2",
"miub_echo_g-r3",
"mpi_echam5-r1",
"mpi_echam5-r2",
"mpi_echam5-r3",
"mpi_echam5-r4",
"mri_cgcm2_3_2a-r1",
"mri_cgcm2_3_2a-r2",
"mri_cgcm2_3_2a-r3",
"mri_cgcm2_3_2a-r4",
"mri_cgcm2_3_2a-r5",
"ncar_ccsm3_0-r1",
"ncar_ccsm3_0-r2",
"ncar_ccsm3_0-r3",
"ncar_ccsm3_0-r5",
"ncar_ccsm3_0-r6",
"ncar_ccsm3_0-r7",
"ncar_ccsm3_0-r9",
"ukmo_hadcm3-r1",
"ukmo_hadgem1-r1")
cmip3.sresa2.labels <- c("cmip3-sresa2", "CMIP3-SRESA2")
cmip3.sresa2.modelnames <- c("bccr_bcm2_0-sresa2-r1",
"cccma_cgcm3_1-sresa2-r1",
"cccma_cgcm3_1-sresa2-r2",
"cccma_cgcm3_1-sresa2-r3",
"cccma_cgcm3_1-sresa2-r4",
"cccma_cgcm3_1-sresa2-r5",
"cnrm_cm3-sresa2-r1",
"csiro_mk3_0-sresa2-r1",
"csiro_mk3_5-sresa2-r1",
"gfdl_cm2_0-sresa2-r1",
"gfdl_cm2_1-sresa2-r1",
"giss_model_e_r-sresa2-r1",
"ingv_echam4-sresa2-r1",
"inmcm3_0-sresa2-r1",
"ipsl_cm4-sresa2-r1",
"miroc3_2_medres-sresa2-r1",
"miroc3_2_medres-sresa2-r2",
"miroc3_2_medres-sresa2-r3",
"miub_echo_g-sresa2-r1",
"miub_echo_g-sresa2-r2",
"miub_echo_g-sresa2-r3",
"mpi_echam5-sresa2-r1",
"mpi_echam5-sresa2-r2",
"mpi_echam5-sresa2-r3",
"mri_cgcm2_3_2a-sresa2-r1",
"mri_cgcm2_3_2a-sresa2-r2",
"mri_cgcm2_3_2a-sresa2-r3",
"mri_cgcm2_3_2a-sresa2-r4",
"mri_cgcm2_3_2a-sresa2-r5",
"ncar_ccsm3_0-sresa2-r1",
"ncar_ccsm3_0-sresa2-r2",
"ncar_ccsm3_0-sresa2-r3",
"ncar_ccsm3_0-sresa2-r4",
"ncar_ccsm3_0-sresa2-r5",
"ukmo_hadcm3-sresa2-r1",
"ukmo_hadgem1-sresa2-r1")
cmip3.sresb1.labels <- c("cmip3-sresb1", "CMIP3-SRESB1")
cmip3.sresb1.modelnames <- c("bccr_bcm2_0-sresb1-r1",
"cccma_cgcm3_1-sresb1-r1",
"cccma_cgcm3_1-sresb1-r2",
"cccma_cgcm3_1-sresb1-r3",
"cccma_cgcm3_1-sresb1-r4",
"cccma_cgcm3_1-sresb1-r5",
"cccma_cgcm3_1_t63-sresb1-r1",
"cnrm_cm3-sresb1-r1",
"csiro_mk3_0-sresb1-r1",
"csiro_mk3_5-sresb1-r1",
"gfdl_cm2_0-sresb1-r1",
"gfdl_cm2_1-sresb1-r1",
"giss_aom-sresb1-r1",
"giss_aom-sresb1-r2",
"giss_model_e_r-sresb1-r1" ,
"iap_fgoals1_0_g-sresb1-r1",
"iap_fgoals1_0_g-sresb1-r2",
"iap_fgoals1_0_g-sresb1-r3",
"inmcm3_0-sresb1-r1",
"ipsl_cm4-sresb1-r1",
"miroc3_2_hires-sresb1-r1",
"miroc3_2_medres-sresb1-r1",
"miroc3_2_medres-sresb1-r2",
"miroc3_2_medres-sresb1-r3",
"miub_echo_g-sresb1-r1",
"miub_echo_g-sresb1-r2",
"miub_echo_g-sresb1-r3",
"mpi_echam5-sresb1-r1",
"mpi_echam5-sresb1-r2",
"mpi_echam5-sresb1-r3",
"mri_cgcm2_3_2a-sresb1-r1",
"mri_cgcm2_3_2a-sresb1-r2",
"mri_cgcm2_3_2a-sresb1-r3",
"mri_cgcm2_3_2a-sresb1-r4",
"mri_cgcm2_3_2a-sresb1-r5",
"ncar_ccsm3_0-sresb1-r1",
"ncar_ccsm3_0-sresb1-r2",
"ncar_ccsm3_0-sresb1-r3",
"ncar_ccsm3_0-sresb1-r4",
"ncar_ccsm3_0-sresb1-r5",
"ncar_ccsm3_0-sresb1-r6",
"ncar_ccsm3_0-sresb1-r7",
"ncar_ccsm3_0-sresb1-r9",
"ukmo_hadcm3-sresb1-r1")
cmip5.rcp26.labels <- c("cmip5-rcp26", "CMIP5-RCP26")
cmip5.rcp26.modelnames <- c("BCC-CSM1-1-r1i1p1_rcp26",
"BCC-CSM1-1-m-r1i1p1_rcp26",
"BNU-ESM-r1i1p1_rcp26",
"CanESM2-r1i1p1_rcp26",
"CanESM2-r2i1p1_rcp26",
"CanESM2-r3i1p1_rcp26",
"CanESM2-r4i1p1_rcp26",
"CanESM2-r5i1p1_rcp26",
"CCSM4-r1i1p1_rcp26",
"CCSM4-r2i1p1_rcp26",
"CCSM4-r3i1p1_rcp26",
"CCSM4-r4i1p1_rcp26",
"CCSM4-r5i1p1_rcp26",
"CCSM4-r6i1p1_rcp26",
"CESM1-CAM5-r1i1p1_rcp26",
"CESM1-CAM5-r2i1p1_rcp26",
"CESM1-CAM5-r3i1p1_rcp26",
"CNRM-CM5-r1i1p1_rcp26",
"CSIRO-Mk3-6-0-r10i1p1_rcp26",
"CSIRO-Mk3-6-0-r1i1p1_rcp26",
"CSIRO-Mk3-6-0-r2i1p1_rcp26",
"CSIRO-Mk3-6-0-r3i1p1_rcp26",
"CSIRO-Mk3-6-0-r4i1p1_rcp26",
"CSIRO-Mk3-6-0-r5i1p1_rcp26",
"CSIRO-Mk3-6-0-r6i1p1_rcp26",
"CSIRO-Mk3-6-0-r7i1p1_rcp26",
"CSIRO-Mk3-6-0-r8i1p1_rcp26",
"CSIRO-Mk3-6-0-r9i1p1_rcp26",
"EC-EARTH-r12i1p1_rcp26",
"EC-EARTH-r8i1p1_rcp26",
"FGOALS-g2-r1i1p1_rcp26",
"FGOALS-s2-r1i1p1_rcp26",
"FIO-ESM-r1i1p1_rcp26",
"FIO-ESM-r2i1p1_rcp26",
"FIO-ESM-r3i1p1_rcp26",
"GFDL-CM3-r1i1p1_rcp26",
"GFDL-ESM2G-r1i1p1_rcp26",
"GFDL-ESM2M-r1i1p1_rcp26",
"GISS-E2-H-r1i1p1_rcp26",
"GISS-E2-H-r1i1p2_rcp26",
"GISS-E2-H-r1i1p3_rcp26",
"GISS-E2-R-r1i1p1_rcp26",
"GISS-E2-R-r1i1p2_rcp26",
"GISS-E2-R-r1i1p3_rcp26",
"HadGEM2-AO-r1i1p1_rcp26",
"HadGEM2-ES-r1i1p1_rcp26",
"HadGEM2-ES-r4i1p1_rcp26",
"IPSL-CM5A-LR-r1i1p1_rcp26",
"IPSL-CM5A-LR-r2i1p1_rcp26",
"IPSL-CM5A-LR-r3i1p1_rcp26",
"IPSL-CM5A-LR-r4i1p1_rcp26",
"IPSL-CM5A-MR-r1i1p1_rcp26",
"MIROC5-r1i1p1_rcp26",
"MIROC5-r2i1p1_rcp26",
"MIROC5-r3i1p1_rcp26",
"MIROC-ESM-r1i1p1_rcp26",
"MIROC-ESM-CHEM-r1i1p1_rcp26",
"MPI-ESM-LR-r1i1p1_rcp26",
"MPI-ESM-LR-r2i1p1_rcp26",
"MPI-ESM-LR-r3i1p1_rcp26",
"MPI-ESM-MR-r1i1p1_rcp26",
"MRI-CGCM3-r1i1p1_rcp26",
"NorESM1-M-r1i1p1_rcp26",
"NorESM1-ME-r1i1p1_rcp26")
cmip5.rcp26.shortperiods.labels <- c("cmip5-rcp26-shortperiods", "CMIP5-RCP26-SHORTPERIODS")
cmip5.rcp26.shortperiods.modelnames <- c("BCC-CSM1-1-r1i1p1_rcp26",
"BCC-CSM1-1-m-r1i1p1_rcp26",
"BNU-ESM-r1i1p1_rcp26",
"CanESM2-r1i1p1_rcp26",
"CanESM2-r2i1p1_rcp26",
"CanESM2-r3i1p1_rcp26",
"CanESM2-r4i1p1_rcp26",
"CanESM2-r5i1p1_rcp26",
"CCSM4-r1i1p1_rcp26",
"CCSM4-r2i1p1_rcp26",
"CCSM4-r3i1p1_rcp26",
"CCSM4-r4i1p1_rcp26",
"CCSM4-r5i1p1_rcp26",
"CCSM4-r6i1p1_rcp26",
"CESM1-CAM5-r1i1p1_rcp26",
"CESM1-CAM5-r2i1p1_rcp26",
"CESM1-CAM5-r3i1p1_rcp26",
"CNRM-CM5-r1i1p1_rcp26",
"CSIRO-Mk3-6-0-r10i1p1_rcp26",
"CSIRO-Mk3-6-0-r1i1p1_rcp26",
"CSIRO-Mk3-6-0-r2i1p1_rcp26",
"CSIRO-Mk3-6-0-r3i1p1_rcp26",
"CSIRO-Mk3-6-0-r4i1p1_rcp26",
"CSIRO-Mk3-6-0-r5i1p1_rcp26",
"CSIRO-Mk3-6-0-r6i1p1_rcp26",
"CSIRO-Mk3-6-0-r7i1p1_rcp26",
"CSIRO-Mk3-6-0-r8i1p1_rcp26",
"CSIRO-Mk3-6-0-r9i1p1_rcp26",
"EC-EARTH-r12i1p1_rcp26",
"EC-EARTH-r8i1p1_rcp26",
"FGOALS-g2-r1i1p1_rcp26",
"FGOALS-s2-r1i1p1_rcp26",
"FIO-ESM-r1i1p1_rcp26",
"FIO-ESM-r2i1p1_rcp26",
"FIO-ESM-r3i1p1_rcp26",
"GFDL-CM3-r1i1p1_rcp26",
"GFDL-ESM2G-r1i1p1_rcp26",
"GFDL-ESM2M-r1i1p1_rcp26",
"GISS-E2-H-r1i1p1_rcp26",
"GISS-E2-H-r1i1p2_rcp26",
"GISS-E2-H-r1i1p3_rcp26",
"GISS-E2-R-r1i1p1_rcp26",
"GISS-E2-R-r1i1p2_rcp26",
"GISS-E2-R-r1i1p3_rcp26",
"HadGEM2-AO-r1i1p1_rcp26",
"HadGEM2-ES-r1i1p1_rcp26",
"HadGEM2-ES-r2i1p1_rcp26",
"HadGEM2-ES-r3i1p1_rcp26",
"HadGEM2-ES-r4i1p1_rcp26",
"IPSL-CM5A-LR-r1i1p1_rcp26",
"IPSL-CM5A-LR-r2i1p1_rcp26",
"IPSL-CM5A-LR-r3i1p1_rcp26",
"IPSL-CM5A-LR-r4i1p1_rcp26",
"IPSL-CM5A-MR-r1i1p1_rcp26",
"MIROC5-r1i1p1_rcp26",
"MIROC5-r2i1p1_rcp26",
"MIROC5-r3i1p1_rcp26",
"MIROC5-r4i1p1_rcp26",
"MIROC5-r5i1p1_rcp26",
"MIROC-ESM-r1i1p1_rcp26",
"MIROC-ESM-CHEM-r1i1p1_rcp26",
"MPI-ESM-LR-r1i1p1_rcp26",
"MPI-ESM-LR-r2i1p1_rcp26",
"MPI-ESM-LR-r3i1p1_rcp26",
"MPI-ESM-MR-r1i1p1_rcp26",
"MRI-CGCM3-r1i1p1_rcp26",
"NorESM1-M-r1i1p1_rcp26",
"NorESM1-ME-r1i1p1_rcp26")
cmip5.rcp45.labels <- c("cmip5-rcp45", "CMIP5-RCP45")
cmip5.rcp45.modelnames <- c("ACCESS1-0-r1i1p1_rcp45",
"ACCESS1-3-r1i1p1_rcp45",
"BCC-CSM1-1-r1i1p1_rcp45",
"BCC-CSM1-1-m-r1i1p1_rcp45",
"BNU-ESM-r1i1p1_rcp45",
"CanESM2-r1i1p1_rcp45",
"CanESM2-r2i1p1_rcp45",
"CanESM2-r3i1p1_rcp45",
"CanESM2-r4i1p1_rcp45",
"CanESM2-r5i1p1_rcp45",
"CCSM4-r1i1p1_rcp45",
"CCSM4-r2i1p1_rcp45",
"CCSM4-r3i1p1_rcp45",
"CCSM4-r4i1p1_rcp45",
"CCSM4-r5i1p1_rcp45",
"CCSM4-r6i1p1_rcp45",
"CESM1-BGC-r1i1p1_rcp45",
"CESM1-CAM5-r1i1p1_rcp45",
"CESM1-CAM5-r2i1p1_rcp45",
"CESM1-CAM5-r3i1p1_rcp45",
"CESM1-WACCM-r2i1p1_rcp45",
"CMCC-CM-r1i1p1_rcp45",
"CMCC-CMS-r1i1p1_rcp45",
"CNRM-CM5-r1i1p1_rcp45",
"CSIRO-Mk3-6-0-r10i1p1_rcp45",
"CSIRO-Mk3-6-0-r1i1p1_rcp45",
"CSIRO-Mk3-6-0-r2i1p1_rcp45",
"CSIRO-Mk3-6-0-r3i1p1_rcp45",
"CSIRO-Mk3-6-0-r4i1p1_rcp45",
"CSIRO-Mk3-6-0-r5i1p1_rcp45",
"CSIRO-Mk3-6-0-r6i1p1_rcp45",
"CSIRO-Mk3-6-0-r7i1p1_rcp45",
"CSIRO-Mk3-6-0-r8i1p1_rcp45",
"CSIRO-Mk3-6-0-r9i1p1_rcp45",
"EC-EARTH-r12i1p1_rcp45",
"EC-EARTH-r13i1p1_rcp45",
"EC-EARTH-r14i1p1_rcp45",
"EC-EARTH-r2i1p1_rcp45",
"EC-EARTH-r6i1p1_rcp45",
"EC-EARTH-r8i1p1_rcp45",
"EC-EARTH-r9i1p1_rcp45",
"FGOALS-g2-r1i1p1_rcp45",
"FIO-ESM-r1i1p1_rcp45",
"FIO-ESM-r2i1p1_rcp45",
"FIO-ESM-r3i1p1_rcp45",
"GFDL-CM3-r1i1p1_rcp45",
"GFDL-ESM2G-r1i1p1_rcp45",
"GFDL-ESM2M-r1i1p1_rcp45",
"GISS-E2-H-r1i1p1_rcp45",
"GISS-E2-H-r1i1p2_rcp45",
"GISS-E2-H-r1i1p3_rcp45",
"GISS-E2-H-r2i1p1_rcp45",
"GISS-E2-H-r2i1p2_rcp45",
"GISS-E2-H-r2i1p3_rcp45",
"GISS-E2-H-r3i1p1_rcp45",
"GISS-E2-H-r3i1p2_rcp45",
"GISS-E2-H-r3i1p3_rcp45",
"GISS-E2-H-r4i1p1_rcp45",
"GISS-E2-H-r4i1p2_rcp45",
"GISS-E2-H-r4i1p3_rcp45",
"GISS-E2-H-r5i1p1_rcp45",
"GISS-E2-H-r5i1p2_rcp45",
"GISS-E2-H-r5i1p3_rcp45",
"GISS-E2-H-r6i1p3_rcp45",
"GISS-E2-H-CC-r1i1p1_rcp45",
"GISS-E2-R-r1i1p1_rcp45",
"GISS-E2-R-r1i1p2_rcp45",
"GISS-E2-R-r1i1p3_rcp45",
"GISS-E2-R-r2i1p1_rcp45",
"GISS-E2-R-r2i1p2_rcp45",
"GISS-E2-R-r2i1p3_rcp45",
"GISS-E2-R-r3i1p1_rcp45",
"GISS-E2-R-r3i1p2_rcp45",
"GISS-E2-R-r3i1p3_rcp45",
"GISS-E2-R-r4i1p1_rcp45",
"GISS-E2-R-r4i1p2_rcp45",
"GISS-E2-R-r4i1p3_rcp45",
"GISS-E2-R-r5i1p1_rcp45",
"GISS-E2-R-r5i1p2_rcp45",
"GISS-E2-R-r5i1p3_rcp45",
"GISS-E2-R-r6i1p1_rcp45",
"GISS-E2-R-r6i1p3_rcp45",
"GISS-E2-R-CC-r1i1p1_rcp45",
"HadGEM2-AO-r1i1p1_rcp45",
"HadGEM2-CC-r1i1p1_rcp45",
"HadGEM2-ES-r1i1p1_rcp45",
"HadGEM2-ES-r4i1p1_rcp45",
"INM-CM4-r1i1p1_rcp45",
"IPSL-CM5A-LR-r1i1p1_rcp45",
"IPSL-CM5A-LR-r2i1p1_rcp45",
"IPSL-CM5A-LR-r3i1p1_rcp45",
"IPSL-CM5A-LR-r4i1p1_rcp45",
"IPSL-CM5A-MR-r1i1p1_rcp45",
"IPSL-CM5B-LR-r1i1p1_rcp45",
"MIROC5-r1i1p1_rcp45",
"MIROC5-r2i1p1_rcp45",
"MIROC5-r3i1p1_rcp45",
"MIROC-ESM-r1i1p1_rcp45",
"MIROC-ESM-CHEM-r1i1p1_rcp45",
"MPI-ESM-LR-r1i1p1_rcp45",
"MPI-ESM-LR-r2i1p1_rcp45",
"MPI-ESM-LR-r3i1p1_rcp45",
"MPI-ESM-MR-r1i1p1_rcp45",
"MPI-ESM-MR-r2i1p1_rcp45",
"MPI-ESM-MR-r3i1p1_rcp45",
"MRI-CGCM3-r1i1p1_rcp45",
"NorESM1-M-r1i1p1_rcp45",
"NorESM1-ME-r1i1p1_rcp45"
)
cmip5.rcp45.shortperiods.labels <- c("cmip5-rcp45-shortperiods", "CMIP5-RCP45-SHORTPERIODS")
cmip5.rcp45.shortperiods.modelnames <- c("ACCESS1-0-r1i1p1_rcp45",
"ACCESS1-3-r1i1p1_rcp45",
"BCC-CSM1-1-r1i1p1_rcp45",
"BCC-CSM1-1-m-r1i1p1_rcp45",
"BNU-ESM-r1i1p1_rcp45",
"CanCM4-r10i1p1_rcp45",
"CanCM4-r1i1p1_rcp45",
"CanCM4-r2i1p1_rcp45",
"CanCM4-r3i1p1_rcp45",
"CanCM4-r4i1p1_rcp45",
"CanCM4-r5i1p1_rcp45",
"CanCM4-r6i1p1_rcp45",
"CanCM4-r7i1p1_rcp45",
"CanCM4-r8i1p1_rcp45",
"CanCM4-r9i1p1_rcp45",
"CanESM2-r1i1p1_rcp45",
"CanESM2-r2i1p1_rcp45",
"CanESM2-r3i1p1_rcp45",
"CanESM2-r4i1p1_rcp45",
"CanESM2-r5i1p1_rcp45",
"CCSM4-r1i1p1_rcp45",
"CCSM4-r2i1p1_rcp45",
"CCSM4-r3i1p1_rcp45",
"CCSM4-r4i1p1_rcp45",
"CCSM4-r5i1p1_rcp45",
"CCSM4-r6i1p1_rcp45",
"CESM1-BGC-r1i1p1_rcp45",
"CESM1-CAM5-r1i1p1_rcp45",
"CESM1-CAM5-r2i1p1_rcp45",
"CESM1-CAM5-r3i1p1_rcp45",
"CESM1-WACCM-r2i1p1_rcp45",
"CESM1-WACCM-r3i1p1_rcp45",
"CESM1-WACCM-r4i1p1_rcp45",
"CMCC-CM-r1i1p1_rcp45",
"CMCC-CMS-r1i1p1_rcp45",
"CNRM-CM5-r1i1p1_rcp45",
"CSIRO-Mk3-6-0-r10i1p1_rcp45",
"CSIRO-Mk3-6-0-r1i1p1_rcp45",
"CSIRO-Mk3-6-0-r2i1p1_rcp45",
"CSIRO-Mk3-6-0-r3i1p1_rcp45",
"CSIRO-Mk3-6-0-r4i1p1_rcp45",
"CSIRO-Mk3-6-0-r5i1p1_rcp45",
"CSIRO-Mk3-6-0-r6i1p1_rcp45",
"CSIRO-Mk3-6-0-r7i1p1_rcp45",
"CSIRO-Mk3-6-0-r8i1p1_rcp45",
"CSIRO-Mk3-6-0-r9i1p1_rcp45",
"EC-EARTH-r12i1p1_rcp45",
"EC-EARTH-r13i1p1_rcp45",
"EC-EARTH-r14i1p1_rcp45",
"EC-EARTH-r1i1p1_rcp45",
"EC-EARTH-r2i1p1_rcp45",
"EC-EARTH-r6i1p1_rcp45",
"EC-EARTH-r8i1p1_rcp45",
"EC-EARTH-r9i1p1_rcp45",
"FGOALS-g2-r1i1p1_rcp45",
"FIO-ESM-r1i1p1_rcp45",
"FIO-ESM-r2i1p1_rcp45",
"FIO-ESM-r3i1p1_rcp45",
"GFDL-CM2-1-r10i1p1_rcp45",
"GFDL-CM2-1-r1i1p1_rcp45",
"GFDL-CM2-1-r2i1p1_rcp45",
"GFDL-CM2-1-r3i1p1_rcp45",
"GFDL-CM2-1-r4i1p1_rcp45",
"GFDL-CM2-1-r5i1p1_rcp45",
"GFDL-CM2-1-r6i1p1_rcp45",
"GFDL-CM2-1-r7i1p1_rcp45",
"GFDL-CM2-1-r8i1p1_rcp45",
"GFDL-CM2-1-r9i1p1_rcp45",
"GFDL-CM3-r1i1p1_rcp45",
"GFDL-ESM2G-r1i1p1_rcp45",
"GFDL-ESM2M-r1i1p1_rcp45",
"GISS-E2-H-r1i1p1_rcp45",
"GISS-E2-H-r1i1p2_rcp45",
"GISS-E2-H-r1i1p3_rcp45",
"GISS-E2-H-r2i1p1_rcp45",
"GISS-E2-H-r2i1p2_rcp45",
"GISS-E2-H-r2i1p3_rcp45",
"GISS-E2-H-r3i1p1_rcp45",
"GISS-E2-H-r3i1p2_rcp45",
"GISS-E2-H-r3i1p3_rcp45",
"GISS-E2-H-r4i1p1_rcp45",
"GISS-E2-H-r4i1p2_rcp45",
"GISS-E2-H-r4i1p3_rcp45",
"GISS-E2-H-r5i1p1_rcp45",
"GISS-E2-H-r5i1p2_rcp45",
"GISS-E2-H-r5i1p3_rcp45",
"GISS-E2-H-r6i1p3_rcp45",
"GISS-E2-H-CC-r1i1p1_rcp45",
"GISS-E2-R-r1i1p1_rcp45",
"GISS-E2-R-r1i1p2_rcp45",
"GISS-E2-R-r1i1p3_rcp45",
"GISS-E2-R-r2i1p1_rcp45",
"GISS-E2-R-r2i1p2_rcp45",
"GISS-E2-R-r2i1p3_rcp45",
"GISS-E2-R-r3i1p1_rcp45",
"GISS-E2-R-r3i1p2_rcp45",
"GISS-E2-R-r3i1p3_rcp45",
"GISS-E2-R-r4i1p1_rcp45",
"GISS-E2-R-r4i1p2_rcp45",
"GISS-E2-R-r4i1p3_rcp45",
"GISS-E2-R-r5i1p1_rcp45",
"GISS-E2-R-r5i1p2_rcp45",
"GISS-E2-R-r5i1p3_rcp45",
"GISS-E2-R-r6i1p1_rcp45",
"GISS-E2-R-r6i1p3_rcp45",
"GISS-E2-R-CC-r1i1p1_rcp45",
"HadCM3-r10i1p1_rcp45",
"HadCM3-r1i1p1_rcp45",
"HadCM3-r2i1p1_rcp45",
"HadCM3-r3i1p1_rcp45",
"HadCM3-r4i1p1_rcp45",
"HadCM3-r5i1p1_rcp45",
"HadCM3-r6i1p1_rcp45",
"HadCM3-r7i1p1_rcp45",
"HadCM3-r8i1p1_rcp45",
"HadCM3-r9i1p1_rcp45",
"HadGEM2-AO-r1i1p1_rcp45",
"HadGEM2-CC-r1i1p1_rcp45",
"HadGEM2-ES-r1i1p1_rcp45",
"HadGEM2-ES-r2i1p1_rcp45",
"HadGEM2-ES-r3i1p1_rcp45",
"HadGEM2-ES-r4i1p1_rcp45",
"INM-CM4-r1i1p1_rcp45",
"IPSL-CM5A-LR-r1i1p1_rcp45",
"IPSL-CM5A-LR-r2i1p1_rcp45",
"IPSL-CM5A-LR-r3i1p1_rcp45",
"IPSL-CM5A-LR-r4i1p1_rcp45",
"IPSL-CM5A-MR-r1i1p1_rcp45",
"IPSL-CM5B-LR-r1i1p1_rcp45",
"MIROC4h-r1i1p1_rcp45",
"MIROC4h-r2i1p1_rcp45",
"MIROC4h-r3i1p1_rcp45",
"MIROC5-r1i1p1_rcp45",
"MIROC5-r2i1p1_rcp45",
"MIROC5-r3i1p1_rcp45",
"MIROC5-r4i1p1_rcp45",
"MIROC5-r5i1p1_rcp45",
"MIROC-ESM-r1i1p1_rcp45",
"MIROC-ESM-CHEM-r1i1p1_rcp45",
"MPI-ESM-LR-r1i1p1_rcp45",
"MPI-ESM-LR-r2i1p1_rcp45",
"MPI-ESM-LR-r3i1p1_rcp45",
"MPI-ESM-MR-r1i1p1_rcp45",
"MPI-ESM-MR-r2i1p1_rcp45",
"MPI-ESM-MR-r3i1p1_rcp45",
"MRI-CGCM3-r1i1p1_rcp45",
"NorESM1-M-r1i1p1_rcp45",
"NorESM1-ME-r1i1p1_rcp45"
)
cmip5.rcp60.labels <- c("cmip5-rcp60", "CMIP5-RCP60")
cmip5.rcp60.modelnames <- c("BCC-CSM1-1-r1i1p1_rcp60",
"BCC-CSM1-1-m-r1i1p1_rcp60",
"CCSM4-r1i1p1_rcp60",
"CCSM4-r2i1p1_rcp60",
"CCSM4-r3i1p1_rcp60",
"CCSM4-r4i1p1_rcp60",
"CCSM4-r5i1p1_rcp60",
"CCSM4-r6i1p1_rcp60",
"CESM1-CAM5-r1i1p1_rcp60",
"CESM1-CAM5-r2i1p1_rcp60",
"CESM1-CAM5-r3i1p1_rcp60",
"CSIRO-Mk3-6-0-r10i1p1_rcp60",
"CSIRO-Mk3-6-0-r1i1p1_rcp60",
"CSIRO-Mk3-6-0-r2i1p1_rcp60",
"CSIRO-Mk3-6-0-r3i1p1_rcp60",
"CSIRO-Mk3-6-0-r4i1p1_rcp60",
"CSIRO-Mk3-6-0-r5i1p1_rcp60",
"CSIRO-Mk3-6-0-r6i1p1_rcp60",
"CSIRO-Mk3-6-0-r7i1p1_rcp60",
"CSIRO-Mk3-6-0-r8i1p1_rcp60",
"CSIRO-Mk3-6-0-r9i1p1_rcp60",
"FGOALS-s2-r1i1p1_rcp60",
"FIO-ESM-r1i1p1_rcp60",
"FIO-ESM-r2i1p1_rcp60",
"FIO-ESM-r3i1p1_rcp60",
"GFDL-CM3-r1i1p1_rcp60",
"GFDL-ESM2G-r1i1p1_rcp60",
"GFDL-ESM2M-r1i1p1_rcp60",
"GISS-E2-H-r1i1p1_rcp60",
"GISS-E2-H-r1i1p2_rcp60",
"GISS-E2-H-r1i1p3_rcp60",
"GISS-E2-R-r1i1p1_rcp60",
"GISS-E2-R-r1i1p2_rcp60",
"GISS-E2-R-r1i1p3_rcp60",
"HadGEM2-AO-r1i1p1_rcp60",
"HadGEM2-ES-r1i1p1_rcp60",
"HadGEM2-ES-r4i1p1_rcp60",
"IPSL-CM5A-LR-r1i1p1_rcp60",
"IPSL-CM5A-MR-r1i1p1_rcp60",
"MIROC5-r1i1p1_rcp60",
"MIROC5-r2i1p1_rcp60",
"MIROC5-r3i1p1_rcp60",
"MIROC-ESM-r1i1p1_rcp60",
"MIROC-ESM-CHEM-r1i1p1_rcp60",
"MRI-CGCM3-r1i1p1_rcp60",
"NorESM1-M-r1i1p1_rcp60",
"NorESM1-ME-r1i1p1_rcp60")
cmip5.rcp60.shortperiods.labels <- c("cmip5-rcp60-shortperiods", "CMIP5-RCP60-SHORTPERIODS")
cmip5.rcp60.shortperiods.modelnames <- c("BCC-CSM1-1-r1i1p1_rcp60",
"BCC-CSM1-1-m-r1i1p1_rcp60",
"CCSM4-r1i1p1_rcp60",
"CCSM4-r2i1p1_rcp60",
"CCSM4-r3i1p1_rcp60",
"CCSM4-r4i1p1_rcp60",
"CCSM4-r5i1p1_rcp60",
"CCSM4-r6i1p1_rcp60",
"CESM1-CAM5-r1i1p1_rcp60",
"CESM1-CAM5-r2i1p1_rcp60",
"CESM1-CAM5-r3i1p1_rcp60",
"CSIRO-Mk3-6-0-r10i1p1_rcp60",
"CSIRO-Mk3-6-0-r1i1p1_rcp60",
"CSIRO-Mk3-6-0-r2i1p1_rcp60",
"CSIRO-Mk3-6-0-r3i1p1_rcp60",
"CSIRO-Mk3-6-0-r4i1p1_rcp60",
"CSIRO-Mk3-6-0-r5i1p1_rcp60",
"CSIRO-Mk3-6-0-r6i1p1_rcp60",
"CSIRO-Mk3-6-0-r7i1p1_rcp60",
"CSIRO-Mk3-6-0-r8i1p1_rcp60",
"CSIRO-Mk3-6-0-r9i1p1_rcp60",
"FGOALS-s2-r1i1p1_rcp60",
"FIO-ESM-r1i1p1_rcp60",
"FIO-ESM-r2i1p1_rcp60",
"FIO-ESM-r3i1p1_rcp60",
"GFDL-CM3-r1i1p1_rcp60",
"GFDL-ESM2G-r1i1p1_rcp60",
"GFDL-ESM2M-r1i1p1_rcp60",
"GISS-E2-H-r1i1p1_rcp60",
"GISS-E2-H-r1i1p2_rcp60",
"GISS-E2-H-r1i1p3_rcp60",
"GISS-E2-R-r1i1p1_rcp60",
"GISS-E2-R-r1i1p2_rcp60",
"GISS-E2-R-r1i1p3_rcp60",
"HadGEM2-AO-r1i1p1_rcp60",
"HadGEM2-ES-r1i1p1_rcp60",
"HadGEM2-ES-r2i1p1_rcp60",
"HadGEM2-ES-r3i1p1_rcp60",
"HadGEM2-ES-r4i1p1_rcp60",
"IPSL-CM5A-LR-r1i1p1_rcp60",
"IPSL-CM5A-MR-r1i1p1_rcp60",
"MIROC5-r1i1p1_rcp60",
"MIROC5-r2i1p1_rcp60",
"MIROC5-r3i1p1_rcp60",
"MIROC5-r4i1p1_rcp60",
"MIROC5-r5i1p1_rcp60",
"MIROC-ESM-r1i1p1_rcp60",
"MIROC-ESM-CHEM-r1i1p1_rcp60",
"MRI-CGCM3-r1i1p1_rcp60",
"NorESM1-M-r1i1p1_rcp60",
"NorESM1-ME-r1i1p1_rcp60")
cmip5.rcp85.labels <- c("cmip5-rcp85", "CMIP5-RCP85")
cmip5.rcp85.modelnames <- c("ACCESS1-0-r1i1p1_rcp85",
"ACCESS1-3-r1i1p1_rcp85",
"BCC-CSM1-1-r1i1p1_rcp85",
"BCC-CSM1-1-m-r1i1p1_rcp85",
"BNU-ESM-r1i1p1_rcp85",
"CanESM2-r1i1p1_rcp85",
"CanESM2-r2i1p1_rcp85",
"CanESM2-r3i1p1_rcp85",
"CanESM2-r4i1p1_rcp85",
"CanESM2-r5i1p1_rcp85",
"CCSM4-r1i1p1_rcp85",
"CCSM4-r2i1p1_rcp85",
"CCSM4-r3i1p1_rcp85",
"CCSM4-r4i1p1_rcp85",
"CCSM4-r5i1p1_rcp85",
"CCSM4-r6i1p1_rcp85",
"CESM1-BGC-r1i1p1_rcp85",
"CESM1-CAM5-r1i1p1_rcp85",
"CESM1-CAM5-r2i1p1_rcp85",
"CESM1-CAM5-r3i1p1_rcp85",
"CESM1-WACCM-r2i1p1_rcp85",
"CMCC-CESM-r1i1p1_rcp85",
"CMCC-CM-r1i1p1_rcp85",
"CMCC-CMS-r1i1p1_rcp85",
"CNRM-CM5-r10i1p1_rcp85",
"CNRM-CM5-r1i1p1_rcp85",
"CNRM-CM5-r2i1p1_rcp85",
"CNRM-CM5-r4i1p1_rcp85",
"CNRM-CM5-r6i1p1_rcp85",
"CSIRO-Mk3-6-0-r10i1p1_rcp85",
"CSIRO-Mk3-6-0-r1i1p1_rcp85",
"CSIRO-Mk3-6-0-r2i1p1_rcp85",
"CSIRO-Mk3-6-0-r3i1p1_rcp85",
"CSIRO-Mk3-6-0-r4i1p1_rcp85",
"CSIRO-Mk3-6-0-r5i1p1_rcp85",
"CSIRO-Mk3-6-0-r6i1p1_rcp85",
"CSIRO-Mk3-6-0-r7i1p1_rcp85",
"CSIRO-Mk3-6-0-r8i1p1_rcp85",
"CSIRO-Mk3-6-0-r9i1p1_rcp85",
"EC-EARTH-r12i1p1_rcp85",
"EC-EARTH-r13i1p1_rcp85",
"EC-EARTH-r14i1p1_rcp85",
"EC-EARTH-r2i1p1_rcp85",
"EC-EARTH-r6i1p1_rcp85",
"EC-EARTH-r8i1p1_rcp85",
"EC-EARTH-r9i1p1_rcp85",
"FGOALS-g2-r1i1p1_rcp85",
"FIO-ESM-r1i1p1_rcp85",
"FIO-ESM-r2i1p1_rcp85",
"FIO-ESM-r3i1p1_rcp85",
"GFDL-CM3-r1i1p1_rcp85",
"GFDL-ESM2G-r1i1p1_rcp85",
"GFDL-ESM2M-r1i1p1_rcp85",
"GISS-E2-H-r1i1p1_rcp85",
"GISS-E2-H-r1i1p2_rcp85",
"GISS-E2-H-r1i1p3_rcp85",
"GISS-E2-H-r2i1p1_rcp85",
"GISS-E2-H-r2i1p3_rcp85",
"GISS-E2-H-CC-r1i1p1_rcp85",
"GISS-E2-R-r1i1p1_rcp85",
"GISS-E2-R-r1i1p2_rcp85",
"GISS-E2-R-r1i1p3_rcp85",
"GISS-E2-R-r2i1p1_rcp85",
"GISS-E2-R-r2i1p3_rcp85",
"GISS-E2-R-CC-r1i1p1_rcp85",
"HadGEM2-AO-r1i1p1_rcp85",
"HadGEM2-CC-r1i1p1_rcp85",
"HadGEM2-ES-r1i1p1_rcp85",
"HadGEM2-ES-r4i1p1_rcp85",
"INM-CM4-r1i1p1_rcp85",
"IPSL-CM5A-LR-r1i1p1_rcp85",
"IPSL-CM5A-LR-r2i1p1_rcp85",
"IPSL-CM5A-LR-r3i1p1_rcp85",
"IPSL-CM5A-LR-r4i1p1_rcp85",
"IPSL-CM5A-MR-r1i1p1_rcp85",
"IPSL-CM5B-LR-r1i1p1_rcp85",
"MIROC5-r1i1p1_rcp85",
"MIROC5-r2i1p1_rcp85",
"MIROC5-r3i1p1_rcp85",
"MIROC-ESM-r1i1p1_rcp85",
"MIROC-ESM-CHEM-r1i1p1_rcp85",
"MPI-ESM-LR-r1i1p1_rcp85",
"MPI-ESM-LR-r2i1p1_rcp85",
"MPI-ESM-LR-r3i1p1_rcp85",
"MPI-ESM-MR-r1i1p1_rcp85",
"MRI-CGCM3-r1i1p1_rcp85",
"NorESM1-M-r1i1p1_rcp85",
"NorESM1-ME-r1i1p1_rcp85",
"MRI-ESM1-r1i1p1_rcp85")
cmip5.rcp85.shortperiods.labels <- c("cmip5-rcp85-shortperiods", "CMIP5-RCP85-SHORTPERIODS")
cmip5.rcp85.shortperiods.modelnames <- c("ACCESS1-0-r1i1p1_rcp85",
"ACCESS1-3-r1i1p1_rcp85",
"BCC-CSM1-1-r1i1p1_rcp85",
"BCC-CSM1-1-m-r1i1p1_rcp85",
"BNU-ESM-r1i1p1_rcp85",
"CanESM2-r1i1p1_rcp85",
"CanESM2-r2i1p1_rcp85",
"CanESM2-r3i1p1_rcp85",
"CanESM2-r4i1p1_rcp85",
"CanESM2-r5i1p1_rcp85",
"CCSM4-r1i1p1_rcp85",
"CCSM4-r2i1p1_rcp85",
"CCSM4-r3i1p1_rcp85",
"CCSM4-r4i1p1_rcp85",
"CCSM4-r5i1p1_rcp85",
"CCSM4-r6i1p1_rcp85",
"CESM1-BGC-r1i1p1_rcp85",
"CESM1-CAM5-r1i1p1_rcp85",
"CESM1-CAM5-r2i1p1_rcp85",
"CESM1-CAM5-r3i1p1_rcp85",
"CESM1-WACCM-r2i1p1_rcp85",
"CESM1-WACCM-r3i1p1_rcp85",
"CESM1-WACCM-r4i1p1_rcp85",
"CMCC-CESM-r1i1p1_rcp85",
"CMCC-CM-r1i1p1_rcp85",
"CMCC-CMS-r1i1p1_rcp85",
"CNRM-CM5-r10i1p1_rcp85",
"CNRM-CM5-r1i1p1_rcp85",
"CNRM-CM5-r2i1p1_rcp85",
"CNRM-CM5-r4i1p1_rcp85",
"CNRM-CM5-r6i1p1_rcp85",
"CSIRO-Mk3-6-0-r10i1p1_rcp85",
"CSIRO-Mk3-6-0-r1i1p1_rcp85",
"CSIRO-Mk3-6-0-r2i1p1_rcp85",
"CSIRO-Mk3-6-0-r3i1p1_rcp85",
"CSIRO-Mk3-6-0-r4i1p1_rcp85",
"CSIRO-Mk3-6-0-r5i1p1_rcp85",
"CSIRO-Mk3-6-0-r6i1p1_rcp85",
"CSIRO-Mk3-6-0-r7i1p1_rcp85",
"CSIRO-Mk3-6-0-r8i1p1_rcp85",
"CSIRO-Mk3-6-0-r9i1p1_rcp85",
"EC-EARTH-r12i1p1_rcp85",
"EC-EARTH-r13i1p1_rcp85",
"EC-EARTH-r14i1p1_rcp85",
"EC-EARTH-r2i1p1_rcp85",
"EC-EARTH-r6i1p1_rcp85",
"EC-EARTH-r8i1p1_rcp85",
"EC-EARTH-r9i1p1_rcp85",
"FGOALS-g2-r1i1p1_rcp85",
"FIO-ESM-r1i1p1_rcp85",
"FIO-ESM-r2i1p1_rcp85",
"FIO-ESM-r3i1p1_rcp85",
"GFDL-CM3-r1i1p1_rcp85",
"GFDL-ESM2G-r1i1p1_rcp85",
"GFDL-ESM2M-r1i1p1_rcp85",
"GISS-E2-H-r1i1p1_rcp85",
"GISS-E2-H-r1i1p2_rcp85",
"GISS-E2-H-r1i1p3_rcp85",
"GISS-E2-H-r2i1p1_rcp85",
"GISS-E2-H-r2i1p3_rcp85",
"GISS-E2-H-CC-r1i1p1_rcp85",
"GISS-E2-R-r1i1p1_rcp85",
"GISS-E2-R-r1i1p2_rcp85",
"GISS-E2-R-r1i1p3_rcp85",
"GISS-E2-R-r2i1p1_rcp85",
"GISS-E2-R-r2i1p3_rcp85",
"GISS-E2-R-CC-r1i1p1_rcp85",
"HadGEM2-AO-r1i1p1_rcp85",
"HadGEM2-CC-r1i1p1_rcp85",
"HadGEM2-CC-r2i1p1_rcp85",
"HadGEM2-CC-r3i1p1_rcp85",
"HadGEM2-ES-r1i1p1_rcp85",
"HadGEM2-ES-r2i1p1_rcp85",
"HadGEM2-ES-r3i1p1_rcp85",
"HadGEM2-ES-r4i1p1_rcp85",
"INM-CM4-r1i1p1_rcp85",
"IPSL-CM5A-LR-r1i1p1_rcp85",
"IPSL-CM5A-LR-r2i1p1_rcp85",
"IPSL-CM5A-LR-r3i1p1_rcp85",
"IPSL-CM5A-LR-r4i1p1_rcp85",
"IPSL-CM5A-MR-r1i1p1_rcp85",
"IPSL-CM5B-LR-r1i1p1_rcp85",
"MIROC5-r1i1p1_rcp85",
"MIROC5-r2i1p1_rcp85",
"MIROC5-r3i1p1_rcp85",
"MIROC5-r4i1p1_rcp85",
"MIROC5-r5i1p1_rcp85",
"MIROC-ESM-r1i1p1_rcp85",
"MIROC-ESM-CHEM-r1i1p1_rcp85",
"MPI-ESM-LR-r1i1p1_rcp85",
"MPI-ESM-LR-r2i1p1_rcp85",
"MPI-ESM-LR-r3i1p1_rcp85",
"MPI-ESM-MR-r1i1p1_rcp85",
"MRI-CGCM3-r1i1p1_rcp85",
"NorESM1-M-r1i1p1_rcp85",
"NorESM1-ME-r1i1p1_rcp85",
"MRI-ESM1-r1i1p1_rcp85")
ensembles.labels <- c("ENSEMBLES", "ensembles")
ensembles.modelnames <- c("METO-HC_HadRM3Q0",
"METO-HC_HadRM3Q16",
"METO-HC_HadRM3Q3",
"ETHZ-CLM",
"METNOHIRHAM_HadCM3Q0",
"METNOHIRHAM_BCM",
"ICTP-REGCM3",
"MPI-M-REMO",
"C4IRCA3",
"CNRM-RM5.1",
"CNRM-RM4.5",
"DMI-HIRHAM5_ARPEGE",
"DMI-HIRHAM5_ECHAM5",
"DMI-HIRHAM5_BCM",
"GKSS-CCLM4.8",
"KNMI-RACMO2",
"OURANOSMRCC4.2.1",
"SMHIRCA_BCM",
"SMHIRCA_ECHAM5-r3",
"SMHIRCA_HadCM3Q3",
"UCLM-PROMES",
"VMGO-RRCM")
ensembles.qmschoener.labels <- c("ENSEMBLES_QM_SCHOENER",
"ensembles_qm_schoener")
ensembles.qmschoener.modelnames <- c('METO-HC_HadRM3Q0_QM_SCHOENER',
'METO-HC_HadRM3Q16_QM_SCHOENER',
'METO-HC_HadRM3Q3_QM_SCHOENER',
'ETHZ-CLM_QM_SCHOENER',
'METNOHIRHAM_HadCM3Q0_QM_SCHOENER',
'METNOHIRHAM_BCM_QM_SCHOENER',
'ICTP-REGCM3_QM_SCHOENER',
'MPI-M-REMO_QM_SCHOENER',
'C4IRCA3_QM_SCHOENER',
'CNRM-RM5.1_QM_SCHOENER',
'CNRM-RM4.5_QM_SCHOENER',
'DMI-HIRHAM5_ARPEGE_QM_SCHOENER',
'DMI-HIRHAM5_ECHAM5_QM_SCHOENER',
'DMI-HIRHAM5_BCM_QM_SCHOENER',
'GKSS-CCLM4.8_QM_SCHOENER',
'KNMI-RACMO2_QM_SCHOENER',
'OURANOSMRCC4.2.1_QM_SCHOENER',
'SMHIRCA_BCM_QM_SCHOENER',
'SMHIRCA_ECHAM5-r3_QM_SCHOENER',
'SMHIRCA_HadCM3Q3_QM_SCHOENER',
'UCLM-PROMES_QM_SCHOENER',
'VMGO-RRCM_QM_SCHOENER',
'AIT-CCLM_QM_SCHOENER',
'WEGC-CCLM_QM_SCHOENER')
dictionary.names <- names(model.input)
if (length(unique(dictionary.names)) != length(model.input)){
msg1 <- "MODELNAMES IN DICTIONARY MUST BE UNIQUE! "
msg2 <- "THINK ABOUT A CREATIVE MODELNAME ;)"
stop(msg1, msg2)
}
model.input[modelnames[modelnames %in% dictionary.names]]
unknown.modelnames <- NULL
unknown.modelnames <- modelnames[! modelnames %in% dictionary.names]
known.modelnames <- modelnames[ modelnames %in% dictionary.names]
match.modelnames <- match(known.modelnames,modelnames)
model.list <- model.input[modelnames[match.modelnames]]
if (length(unknown.modelnames) > 0) {
if (any(unknown.modelnames %in% cmip3.sresa2.labels)) {
model.list <- c(model.list, model.input[cmip3.sresa2.modelnames])
unknown.modelnames <-
unknown.modelnames[!unknown.modelnames %in% cmip3.sresa2.labels]
}
if (any(unknown.modelnames %in% cmip3.sresb1.labels)) {
model.list <- c(model.list, model.input[cmip3.sresb1.modelnames])
unknown.modelnames <-
unknown.modelnames[!unknown.modelnames %in% cmip3.sresb1.labels]
}
if (any(unknown.modelnames %in% cmip3.sresa1b.labels)) {
model.list <- c(model.list, model.input[cmip3.sresa1b.modelnames])
unknown.modelnames <-
unknown.modelnames[!unknown.modelnames %in% cmip3.sresa1b.labels]
}
if (any(unknown.modelnames %in% cmip5.rcp26.labels)) {
model.list <- c(model.list, model.input[cmip5.rcp26.modelnames])
unknown.modelnames <-
unknown.modelnames[!unknown.modelnames %in% cmip5.rcp26.labels]
}
if (any(unknown.modelnames %in% cmip5.rcp26.shortperiods.labels)) {
model.list <- c(model.list, model.input[cmip5.rcp26.shortperiods.modelnames])
unknown.modelnames <-
unknown.modelnames[!unknown.modelnames %in% cmip5.rcp26.shortperiods.labels]
}
if (any(unknown.modelnames %in% cmip5.rcp45.labels)) {
model.list <- c(model.list, model.input[cmip5.rcp45.modelnames])
unknown.modelnames <-
unknown.modelnames[!unknown.modelnames %in% cmip5.rcp45.labels]
}
if (any(unknown.modelnames %in% cmip5.rcp45.shortperiods.labels)) {
model.list <- c(model.list, model.input[cmip5.rcp45.shortperiods.modelnames])
unknown.modelnames <-
unknown.modelnames[!unknown.modelnames %in% cmip5.rcp45.shortperiods.labels]
}
if (any(unknown.modelnames %in% cmip5.rcp60.labels)) {
model.list <- c(model.list, model.input[cmip5.rcp60.modelnames])
unknown.modelnames <-
unknown.modelnames[!unknown.modelnames %in% cmip5.rcp60.labels]
}
if (any(unknown.modelnames %in% cmip5.rcp60.shortperiods.labels)) {
model.list <- c(model.list, model.input[cmip5.rcp60.shortperiods.modelnames])
unknown.modelnames <-
unknown.modelnames[!unknown.modelnames %in% cmip5.rcp60.shortperiods.labels]
}
if (any(unknown.modelnames %in% cmip5.rcp85.labels)) {
model.list <- c(model.list, model.input[cmip5.rcp85.modelnames])
unknown.modelnames <-
unknown.modelnames[!unknown.modelnames %in% cmip5.rcp85.labels]
}
if (any(unknown.modelnames %in% cmip5.rcp85.shortperiods.labels)) {
model.list <- c(model.list, model.input[cmip5.rcp85.shortperiods.modelnames])
unknown.modelnames <-
unknown.modelnames[!unknown.modelnames %in% cmip5.rcp85.shortperiods.labels]
}
if (any(unknown.modelnames %in% ensembles.labels)) {
model.list <- c(model.list, model.input[ensembles.modelnames])
unknown.modelnames <-
unknown.modelnames[!unknown.modelnames %in% ensembles.labels]
}
if (any(unknown.modelnames %in% ensembles.qmschoener.labels)) {
model.list <- c(model.list, model.input[ensembles.qmschoener.modelnames])
unknown.modelnames <-
unknown.modelnames[!unknown.modelnames %in% ensembles.qmschoener.labels]
}
}
if (length(unknown.modelnames) > 0) {
msg1 <- paste("MODEL(S) \"", paste(unknown.modelnames, collapse="\" \""),
"\" UNKNOWN\n", sep="")
msg2 <- "ADD THE MODEL TO DICTIONARY FILE, "
msg3 <- "OR ADD AS A NEW ENSEMBLE INTO ReadFromModelDictionary."
stop(msg1, msg2, msg3)
}
if (any(is.na(names(model.list))))
stop("THERE IS AT LEAST ONE MODEL CALLED \"NA\" IN THE MODEL.INPUT.")
if (length(model.list) == 0)
stop("NO MODELS HAVE BEEN SELECTED")
return(model.list)
}
MakeModelDataFrame <- function(model.name, institute.name,
rcm.name, global.model.name,
gcm.run,
is.reference.period,
time.step,
season = "none",
is.time.series,
subregion = "no", is.corrected = NULL,
emission.scenario = "A1B", resolution,
data, parameter.name) {
if (is.null(data))
stop("NO AGGREGATED DATA SPECIFIED")
if (is.null(parameter.name))
stop("PARAMETER NAME NOT SPECIFIED")
if (is.null(is.corrected))
is.corrected <- "no"
if (is.null(resolution) || !is.null(resolution) && resolution == "")
resolution <- NA
if (is.null(gcm.run) || !is.null(gcm.run) && gcm.run == "")
gcm.run <- NA
if (is.null(rcm.name) || !is.null(rcm.name) && rcm.name == "")
rcm.name <- NA
emission.scenario <- emission.scenario[order(emission.scenario)]
subregion <- subregion[order(subregion)]
season.order <- season[order(season)]
WUX.df <- list(acronym = model.name,
institute = institute.name,
gcm = global.model.name,
gcm.run = gcm.run,
rcm = rcm.name,
em.scn = emission.scenario,
subreg = subregion,
period = time.step,
ref.per = is.reference.period,
season = season.order,
resolution = resolution,
corrected = is.corrected)
WUX.df <- expand.grid(WUX.df)
data.df <- reshape::melt(data)
if (length(data.df) != 3) {
msg1 <- "OBVIOUSLY YOU HAVE CHANGED THE AGGREGATION DIMENSION, "
msg2 <- "WATCH OUT WITH MERGING IN MakeModelDataFrame"
stop(msg1, msg2)
}
names(data.df) <- c(parameter.name, "season", "subreg")
WUX.df <- merge(WUX.df, data.df)
if (is.time.series) {
WUX.df[["year"]] <-
factor(sapply(strsplit(as.character(WUX.df[["season"]]), " "), "[", 1))
WUX.df[["season"]] <-
factor(sapply(strsplit(as.character(WUX.df[["season"]]), " "), "[", 2))
season.pos <- which(names(WUX.df) == "season")
year.pos <- which(names(WUX.df) == "year")
WUX.df <- data.frame(WUX.df[1:(season.pos - 1)], year = WUX.df[year.pos],
WUX.df[season.pos:(length(names(WUX.df)) - 1)])
}
return(WUX.df)
}
MergeParameterDataframes <- function(wux.df.list, parameter.names) {
if (any(sapply(wux.df.list, is.null))){
msg1 <- "AT LEAST ONE DATA.FRAME FOR ONE PARAMETER IS MISSING. "
msg2 <- "THIS REALLY SHOULD NOT HAPPPEN."
stop(msg1, msg2)
}
n.of.pars <- length(parameter.names)
par.length <- c(1:n.of.pars)
for (ii in c(1:n.of.pars)) {
if (length(wux.df.list[[ii]])== 0)
par.length[ii] <- 0
else
par.length[ii] <- length(wux.df.list[[ii]][[1]])
}
sort.indices <- sort(par.length, decreasing = TRUE, index.return = TRUE)
parameter.names <- parameter.names[sort.indices$ix]
par.length <- par.length[sort.indices$ix]
data <- wux.df.list[[parameter.names[1]]]
ii <- 2
while (ii <= n.of.pars) {
if (length(wux.df.list[[parameter.names[ii]]]) == 0){
data <- cbind(data, NA)
names(data)[length(data)] <- parameter.names[ii]
} else {
data <- merge(data, wux.df.list[[parameter.names[ii]]], all.x = TRUE)
}
ii <- ii + 1
}
return(data)
}
WuxDataDiff <- function(wux.df, parameters) {
sortby <- c("acronym", "period", "season")
wux.df <-
wux.df[order(wux.df$acronym, wux.df$period, wux.df$season),]
col.noparam <- which(! names(wux.df) %in% parameters)
wux.diff.df <- wux.df[wux.df[["ref.per"]] == "no", col.noparam]
for (param in parameters) {
diff.column <- NULL
perc.column <- NULL
timesteps.ref <- unique(wux.df[wux.df[["ref.per"]] == "yes", ]$period)
timesteps.scn <- unique(wux.df[wux.df[["ref.per"]] == "no" , ]$period)
number.of.timesteps.ref <- 1
number.of.timesteps.scn <- length(timesteps.scn)
stopifnot(number.of.timesteps.ref == 1)
x <- number.of.timesteps.ref / number.of.timesteps.scn
if(x == round(x)){
for (scn.period in timesteps.scn) {
scn <- wux.df[wux.df[["ref.per"]] == "no" &
wux.df[["period"]] == scn.period
, c("acronym", "season", param)]
ref <- wux.df[wux.df[["ref.per"]] == "yes", c("acronym", "season", param)]
diff <- scn[[param]] - ref[[param]]
perc <- (scn[[param]] / ref[[param]] - 1) * 100
diff.column <- c(diff.column, diff)
perc.column <- c(perc.column, perc)
}
} else {
stop("WuxDataDiff, SOMETHING WENT SERIOUSLY WRONG")
}
wux.diff.df <- cbind(wux.diff.df, diff.column)
diff.cols <- which(names(wux.diff.df) == "diff.column")
names(wux.diff.df)[diff.cols] <- paste("delta.", param, sep="")
if (param == "precipitation_amount") {
wux.diff.df <- cbind(wux.diff.df, perc.column)
perc.cols <- which(names(wux.diff.df) == "perc.column")
names(wux.diff.df)[perc.cols] <- paste("perc.delta.", param, sep="")
}
}
return(wux.diff.df)
}
|
nhl_conferences<- function(){
base_url <- "https://statsapi.web.nhl.com/api/v1/conferences"
full_url <- paste0(base_url)
res <- httr::RETRY("GET", full_url)
check_status(res)
resp <- res %>%
httr::content(as = "text", encoding = "UTF-8")
tryCatch(
expr = {
conferences_df <- jsonlite::fromJSON(resp)[["conferences"]]
conferences_df <- conferences_df %>%
janitor::clean_names() %>%
dplyr::rename(conference_id = .data$id) %>%
as.data.frame()
},
error = function(e) {
message(glue::glue("{Sys.time()}: Invalid arguments or no conferences data available!"))
},
warning = function(w) {
},
finally = {
}
)
return(conferences_df)
}
|
`mantel` <-
function (xdis, ydis, method = "pearson", permutations = 999,
strata = NULL, na.rm = FALSE, parallel = getOption("mc.cores"))
{
EPS <- sqrt(.Machine$double.eps)
xdis <- as.dist(xdis)
ydis <- as.vector(as.dist(ydis))
if (na.rm)
use <- "complete.obs"
else
use <- "all.obs"
statistic <- cor(as.vector(xdis), ydis, method = method, use = use)
variant <- match.arg(method, eval(formals(cor)$method))
variant <- switch(variant,
pearson = "Pearson's product-moment correlation",
kendall = "Kendall's rank correlation tau",
spearman = "Spearman's rank correlation rho",
variant)
N <- attr(xdis, "Size")
permat <- getPermuteMatrix(permutations, N, strata = strata)
if (ncol(permat) != N)
stop(gettextf("'permutations' have %d columns, but data have %d observations",
ncol(permat), N))
permutations <- nrow(permat)
if (permutations) {
perm <- numeric(permutations)
xmat <- as.matrix(xdis)
asdist <- row(xmat) > col(xmat)
ptest <- function(take, ...) {
permvec <- (xmat[take, take])[asdist]
drop(cor(permvec, ydis, method = method, use = use))
}
if (is.null(parallel))
parallel <- 1
hasClus <- inherits(parallel, "cluster")
if (hasClus || parallel > 1) {
if(.Platform$OS.type == "unix" && !hasClus) {
perm <- do.call(rbind,
mclapply(1:permutations,
function(i, ...) ptest(permat[i,],...),
mc.cores = parallel))
} else {
if (!hasClus) {
parallel <- makeCluster(parallel)
}
perm <- parRapply(parallel, permat, ptest)
if (!hasClus)
stopCluster(parallel)
}
} else {
perm <- sapply(1:permutations, function(i, ...) ptest(permat[i,], ...))
}
signif <- (sum(perm >= statistic - EPS) + 1)/(permutations + 1)
}
else {
signif <- NA
perm <- NULL
}
res <- list(call = match.call(), method = variant, statistic = statistic,
signif = signif, perm = perm, permutations = permutations,
control = attr(permat, "control"))
class(res) <- "mantel"
res
}
|
test_that("LS.kalman works", {
lsk <- LS.kalman(malleco, start(malleco))
expect_equal(unique(lsk$delta), 1542564)
})
|
xformer.ckt <- function(x,dig=2){
n<- x$N[2]/x$N[1]
Z2.r<-x$Zo.r+x$Zl.r
Z1.r<-Z2.r/n^2
Zi.r<-x$Zs.r+Z1.r
I1.p <- div.polar(x$Vs.p,polar(Zi.r))
I2.p <- mult.polar(I1.p,c(1/n,0))
V1.p <- mult.polar(I1.p,polar(Z1.r))
V2.p <- mult.polar(V1.p,c(n,0))
Vl.p<- mult.polar(I2.p,x$Zl)
Il.p<- I2.p
I1.conj <- c(I1.p[1],-I1.p[2])
Il.conj <- c(Il.p[1],-Il.p[2])
Ss.p <- mult.polar(x$Vs.p,I1.conj); Ss.r <- recta(Ss.p)
Sx.p <- mult.polar(V1.p,I1.conj); Sx.r <- recta(Sx.p)
Sl.p <- mult.polar(Vl.p,Il.conj); Sl.r <- recta(Sl.p)
S1.p <- mult.polar(V1.p,I1.conj); S1.r <- recta(S1.p)
ys <- list(Vs.p=x$Vs.p, Is.p=I1.p, Ss.p=Ss.p, Ss.r=Ss.r)
yx <- list(n=n,Z2.r=Z2.r,Z1.r=Z1.r,I1.p=I1.p,V1.p=V1.p,I2.p=I2.p,
V2.p=V2.p,S1.p=S1.p,S1.r=S1.r)
yl <- list(Vl.p=Vl.p, Il.p=Il.p, Sl.p=Sl.p,Sl.r=Sl.r)
lapply(ys, function(ys) ys <- round(ys,dig))
lapply(yx, function(yx) yx <- round(yx,dig))
lapply(yl, function(yl) yl <- round(yl,dig))
prntys <- paste(names(ys),"=",ys,sep="",collapse=", ")
prntyx <- paste(names(yx),"=",yx,sep="",collapse=", ")
prntyl <- paste(names(yl),"=",yl,sep="",collapse=", ")
prnt <- c(paste("Source: ",prntys),
paste("Transformer: ",prntyx),
paste("Load: ",prntyl)
)
cat(prnt, sep="\n")
yout <- list(ys=ys,yx=yx,yl=yl,prnt=prnt)
return(yout)
}
|
.newgroup <- function(name,values,index) {
g <- new('.group',name=name)
if (!is.null(values)) {
if ((length(values) != length(index)) || !is.list(index)) stop('group values and index list are not match')
g@values <- data.frame(indexID=1:length(values),values=values)
g@indices <- index
} else if (!is.null(index) && is.list(index) && !is.null(names(index))) {
g@values <- data.frame(indexID=1:length(index),values=names(index))
g@indices <- index
}
g
}
.updateGroup <- function(x,tbl,sp=NULL) {
if (is.vector(tbl)) {
id <- as.numeric(tbl[1])
g1 <- tbl[2]
g2 <- tbl[3]
} else {
id <- as.numeric(tbl[,1])
g1 <- tbl[,2]
g2 <- tbl[,3]
}
gr <- .getGroupNames(x)
for (g in gr) {
for (i in seq_along(id)) {
if (g1[i] %in% names(x@groups[[g]]@indices)) {
if (id[i] %in% .getGroupIndex(x,g1[i])) {
x@groups[[g]]@indices[[g1[i]]] <- x@groups[[g]]@indices[[g1[i]]][-which(x@groups[[g]]@indices[[g1[i]]] == id[i])]
x@groups[[g]]@indices[[g2[i]]] <- c(x@groups[[g]]@indices[[g2[i]]] , id[i])
}
}
}
}
x
}
.getSpeciesNames <- function(d,n=NULL) {
if (is.null(n)) [email protected]
else [email protected][[email protected] %in% n]
}
.getGroupLevels <- function(d,g=NULL) {
if (!is.null(g)) {
g <- g[1]
if (!g %in% .getGroupNames(d)) stop(paste('group',g,'does not exist!'))
as.character(d@groups[[g]]@values[,2])
}
}
.getSpeciesDF <- function(d,sp,id) {
if (missing(sp)) sp <- [email protected][1]
o <- list()
for (s in sp) {
o[[s]] <- data.frame(rID=id,value=NA)
if (d@species[[s]]@type == 'Presence-Absence') {
o[[s]][id %in% d@species[[s]]@presence,2] <- 1
o[[s]][id %in% d@species[[s]]@absence,2] <- 0
} else if (d@species[[s]]@type == 'Presence-Only') {
o[[s]][id %in% d@species[[s]]@presence,2] <- 1
} else if (d@species[[s]]@type == 'Abundance') {
o[[s]][id %in% d@species[[s]]@abundance$rID,2] <- sapply(id[id %in% d@species[[s]]@abundance$rID],function(x) {d@species[[s]]@abundance[d@species[[s]]@abundance$rID == x,2]})
} else if (d@species[[s]]@type == 'Absence-Only!') {
o[[s]][id %in% d@species[[s]]@absence,2] <- 0
} else if (d@species[[s]]@type == 'Abundance-constant!') {
o[[s]][id %in% d@species[[s]]@abundance$rID,2] <- d@species[[s]]@abundance[1,2]
}
}
o
}
.getSpeciesIndex <- function(d,sp=NULL) {
id <- c()
sp <- .getSpeciesNames(d,sp)
for (i in seq_along(sp)) {
id <- c(id,d@species[[sp[i]]]@presence,d@species[[sp[i]]]@absence,d@species[[sp[i]]]@abundance[,1],d@species[[sp[i]]]@background,d@species[[sp[i]]]@Multinomial[,1])
}
sort(unique(id))
}
.getGroupIndex=function(d,g=NULL) {
if (!is.null(g)) {
id <- c()
for (gg in g) {
gl <- unlist(lapply(unlist(strsplit(gg,':')),.trim))
if (length(gl) > 1) {
if (!gl[1] %in% .getGroupNames(d)) stop(paste('group',gl[1],'does not exist!'))
if (!gl[2] %in% .getGroupNames(d,TRUE)) stop(paste('group level',gl[2],'does not exist!'))
id <- c(id,d@groups[[gl[1]]]@indices[[gl[2]]])
} else {
if (!gl %in% c(.getGroupNames(d),.getGroupNames(d,TRUE))) stop(paste(gl,' is neither a group nor a group level!'))
if (gl %in% .getGroupNames(d)) {
for (gv in d@groups[[gl]]@values[,2]) id <- c(id,d@groups[[gl]]@indices[[gv]])
} else {
for (gn in .getGroupNames(d)) {
if (gl %in% d@groups[[gn]]@values[,2]) id <- c(id,d@groups[[gn]]@indices[[gl]])
}
}
}
}
unique(id)
}
}
.getTimeIndex <- function(d,t=NULL) {
if (!is.null(t)) {
id <- c()
for (gg in t) {
gl <- unlist(lapply(unlist(strsplit(gg,':')),.trim))
if (length(gl) > 1) {
if (!gl[1] %in% .getGroupNames(d)) stop(paste('group',gl[1],'does not exist!'))
if (!gl[2] %in% .getGroupNames(d,TRUE)) stop(paste('group level',gl[2],'does not exist!'))
id <- c(id,d@groups[[gl[1]]]@indices[[gl[2]]])
} else {
if (!gl %in% c(.getGroupNames(d),.getGroupNames(d,TRUE))) stop(paste(gl,' is neither a group nor a group level!'))
if (gl %in% .getGroupNames(d)) {
for (gv in d@groups[[gl]]@values[,2]) id <- c(id,d@groups[[gl]]@indices[[gv]])
} else {
for (gn in .getGroupNames(d)) {
if (gl %in% d@groups[[gn]]@values[,2]) id <- c(id,d@groups[[gn]]@indices[[gl]])
}
}
}
}
unique(id)
}
}
.getIndex <- function(d,sp=NULL,groups=NULL,time=NULL) {
id1 <- id2 <- NULL
if (is.null(sp)) id1 <- d@features$rID
else {
id1 <- .getSpeciesIndex(d,sp)
}
if (!is.null(groups)) {
id2 <- .getGroupIndex(d,groups)
}
if (is.null(id2)) id1
else id1[id1 %in% id2]
}
.getGroupNames <- function(d,levels=FALSE) {
if (levels) {
if (!is.null(names(d@groups))) {
nn <- c()
for (n in names(d@groups)) nn <- c(nn,as.character(d@groups[[n]]@values[,2]))
nn
} else NULL
} else names(d@groups)
}
.newGroup <- function(d,name,values=NULL,index=NULL) {
d@groups[[name]] <- .newgroup(name,values,index)
d
}
if (!isGeneric('.addLog<-')) {
setGeneric('.addLog<-', function(d,value)
standardGeneric('.addLog<-'))
}
setReplaceMethod('.addLog','sdmdata',
function(d,value) {
d@errorLog <- c(d@errorLog,value)
d
}
)
.isBinomial <- function(x) {
if (is.numeric(x)) {
u <- unique(x)
if (length(u) > 2) return(FALSE)
else if (length(u) == 2) return(all(sort(u) == c(0,1)) | all(sort(u) == c(-1,1)))
else return(u == 1)
} else if (is.logical(x)) return(TRUE)
else {
x <- as.character(x)
u <- unique(x)
if (length(u) > 2) return(FALSE)
else if (length(u) == 2) return(all(sort(u) == c('0','1')) | all(sort(u) == c('-1','1')))
else return(u == '1')
}
}
.speciesType <- function(x) {
u <- unique(x)
if (is.numeric(x)) {
if (length(u) > 2) return('Abundance')
else if (length(u) == 2) {
if ((all(sort(u) == c(0,1)) || all(sort(u) == c(-1,1)))) return('Presence-Absence')
else return('Abundance')
} else {
if (u == 1) return('Presence-Only')
else if (u == 0 || u == -1) return('Absence-Only!')
else return('Abundance_constant!')
}
} else if (is.logical(x)) {
if (length(u) == 2) return('Presence-Absence')
else {
if (u) return('Presence-Only')
else return('Absence-Only!')
}
} else {
x <- as.character(x)
u <- unique(x)
if (length(u) > 2) return('Presence-Only')
else if (length(u) == 2) {
if (all(sort(u) == c('0','1')) || all(sort(u) == c('-1','1'))) return('Presence-Absence')
else return('Presence-Only')
} else return('Presence-Only')
}
}
.varExist <-function(data,vars) {
all(vars %in% names(data))
}
.getSpecies <- function(data,nsp,bg=FALSE,id.train=NULL,id.test=NULL) {
species <- list()
for (n in nsp) {
if (!is.null(id.test)) {
typ1 <- .speciesType(data[id.train,n])
typ2 <- .speciesType(data[id.test,n])
if (typ1 == typ2) typ <- typ1
else stop('train and test data have different type (for example, one maybe presence-only while the other is presence-absence)!')
} else typ <- .speciesType(data[,n])
if (typ == 'Presence-Absence') {
species[[n]] <- new('.species.data')
species[[n]]@name <- n
w <- as.numeric(data[,n])
species[[n]]@presence <- data$rID[which(w == 1)]
if (bg) {
species[[n]]@background <- data$rID[which(w %in% c(0,-1))]
species[[n]]@type <- 'Presence-Background'
} else {
species[[n]]@absence <- data$rID[which(w %in% c(0,-1))]
species[[n]]@type <- typ
}
} else if (typ == 'Presence-Only') {
if (is.numeric(data[,n]) || is.logical(data[,n])) {
species[[n]] <- new('.species.data')
species[[n]]@name <- n
species[[n]]@presence <- data$rID
species[[n]]@type <- typ
} else {
w <- as.character(data[,n])
u <- unique(w)
if ('0' %in% u) {
u <- u[u != '0']
bg <- data$rID[which(w == '0')]
} else bg <- NULL
for (uu in u) {
species[[uu]] <- new('.species.data')
species[[uu]]@name <- uu
species[[uu]]@presence <- data$rID[which(w == uu)]
species[[uu]]@type <- typ
}
if (!is.null(bg)) {
for (uu in u) {
species[[uu]]@background <- bg
species[[uu]]@type <- 'Presence-Background'
}
}
}
} else if (typ == 'Abundance') {
species[[n]] <- new('.species.data')
species[[n]]@name <- n
species[[n]]@abundance <- data.frame(rID=data$rID,abundance=data[,n])
species[[n]]@type <- typ
} else if (typ == 'Abundance_constant!') {
species[[n]] <- new('.species.data')
species[[n]]@name <- n
species[[n]]@abundance <- data.frame(rID=data$rID,abundance=data[,n])
species[[n]]@type <- typ
warning(paste('for species',n,', the variance in abundance data is ZERO!'))
} else if (typ == 'Multinomial') {
species[[n]] <- new('.species.data')
species[[n]]@name <- n
species[[n]]@multinomial <- data.frame(rID=data$rID,name=data[,n])
species[[n]]@type <- typ
} else if (typ == 'Absence-Only!') {
species[[n]] <- new('.species.data')
species[[n]]@name <- n
if (bg) {
species[[n]]@background <- data$rID
species[[n]]@type <- 'Background'
} else {
species[[n]]@absence <- data$rID
species[[n]]@type <- typ
}
warning(paste('for species',n,', there is no presence record (all values are 0 or absence)!'))
}
}
species
}
.dataClean <- function(x,nsp,ANY=TRUE) {
rm.na <-0; rm.duplicate <- 0
w <- nrow(x)
x <- unique(x)
if (nrow(x) < w) rm.duplicate <- w - nrow(x)
w <- nrow(x)
if (!missing(nsp)) {
if (length(nsp) > 1) ww <- which(apply(x[,which(colnames(x) %in% nsp)],1,function(x){any(is.na(x))}))
else ww <- which(is.na(x[,which(colnames(x) %in% nsp)]))
if (length(ww) > 0) {
x <- x[-ww,]
rm.na <- w - nrow(x)
}
} else {
if (ANY) ww <- which(apply(x,1,function(x){any(is.na(x))}))
else ww <- which(apply(x,1,function(x){all(is.na(x))}))
if (length(ww) > 0) {
x <- x[-ww,]
rm.na <- w - nrow(x)
}
}
list(x,c(na=rm.na,duplicate=rm.duplicate))
}
.colNumber <- function(d,n) {
unlist(lapply(n,function(x) which(colnames(d) == x)))
}
.char2time <- function(d,...) {
if (length(list(...)) > 0) {
tst <- try(as.POSIXct(d[1],...),silent=TRUE)
if (!inherits(tst, "try-error") && !is.na(tst)) return(as.POSIXct(d,...))
else {
tst <- try(as.POSIXct(d[1]),silent=TRUE)
if (!inherits(tst, "try-error") && !is.na(tst)) return(as.POSIXct(d))
else {
tst <- try(as.Date(d[1],...),silent=TRUE)
if (!inherits(tst, "try-error") && !is.na(tst)) return(as.Date(d,...))
else {
tst <- try(as.Date(d[1]),silent=TRUE)
if (!inherits(tst, "try-error") && !is.na(tst)) return(as.Date(d))
else return(NA)
}
}
}
} else {
tst <- try(as.POSIXct(d[1]),silent=TRUE)
if (!inherits(tst, "try-error") && !is.na(tst)) return(as.POSIXct(d))
else {
tst <- try(as.Date(d[1]),silent=TRUE)
if (!inherits(tst, "try-error") && !is.na(tst)) return(as.Date(d))
else return(NA)
}
}
}
.where <- function(f, x) {
vapply(x, f, logical(1))
}
.int.to.numeric <- function(data) {
w <- which(unlist(lapply(data,is.integer)))
if (length(w) > 0) {
for (i in w) data[,i] <- as.numeric(data[,i])
}
data
}
.which.is.coords <- function(n) {
nxy <- NULL
w <- tolower(n) %in% c('x','y','coords.x1','coords.x2','coords.x','coords.y','lon','long','longitude','lat','latitude')
if (any(w)) {
nxy <- n[w]
if (length(nxy) == 2) {
nxy <- c(nxy[tolower(nxy) %in% c('x','coords.x1','coords.x','lon','long','longitude')],nxy[tolower(nxy) %in% c('y','coords.x2','coords.y','lat','latitude')])
} else nxy <- NULL
}
nxy
}
.normalize <- function(x,except=NULL) {
w <- !.where(is.factor,x)
if (!is.null(except)) {
w[except] <- FALSE
}
if (any(w)) {
xx <- x
for (i in seq_along(w)) {
if (w[i]) {
xx[,i] <- xx[,i] - mean(xx[,i],na.rm=TRUE)
if (sd(x[,i],na.rm=TRUE) != 0) xx[,i] <- xx[,i] / sd(x[,i],na.rm=TRUE)
}
}
}
xx
}
.speciesDetect <- function(data) {
nsp <- nxy <- nFact <- nf <- nt <- NULL
w <- which(unlist(lapply(data,.isBinomial)))
if (length(w) == 0) {
stop ('No species variable is detected, spcify the species variable in the formula or use an appropriate data structure...')
} else {
varNames <- colnames(data)
nsp <- varNames[w]
if (length(varNames) > length(nsp)) {
nf <- varNames[-w]
w <- which(unlist(lapply(data[,nf],function(x) class(x) %in% c("POSIXct","POSIXt","Date","yearmon","yearqtr"))))
if (length(w) > 0) {
nt <- nf[w]
nf <- .excludeVector(nf,nt)
}
w <- which(unlist(lapply(data[,nf],function(x) class(x) %in% c('character','factor'))))
if (length(w) > 0) {
nFact <- nf[w]
nf <- .excludeVector(nf,nFact)
}
}
w <- tolower(varNames) %in% c('lon','long','longitude')
if (any(w) && length(which(w) == 1)) {
nx <- varNames[w]
w <- tolower(varNames) %in% c('lat','latitude')
if (any(w) && length(which(w) == 1)) nxy <- c(nx,varNames[w])
}
}
list(nsp=nsp,nf=nf,nFact=nFact,nxy=nxy,nt=nt)
}
.createSdmdata <- function(train,formula=NULL,test,bg=NULL,crs=NULL,author=NULL,website=NULL,citation=NULL,help=NULL,description=NULL,date=NULL,license=NULL) {
if (missing(test)) test <- NULL
nFact <- nf <- nxy <- nsp <- ng <- nt <- ni <- NULL
if (is.null(formula)) {
nnFact <- nnf <- nnxy <- nnt <- NULL
w <- .speciesDetect(train)
if (!is.null(w$nFact)) {
nFact <- w$nFact
nnFact <- paste(paste('f(',w$nFact,')',sep=''),collapse='+')
}
if (!is.null(w$nf)) {
nf <- w$nf
nnf <- paste(w$nf,collapse='+')
}
if (!is.null(w$nxy)) {
nxy <- w$nxy
nnxy <- paste(paste('coords(',paste(w$nxy,collapse='+'),')',sep=''),collapse='+')
}
if (!is.null(w$nt)) {
nt <- w$nt
nnt <- paste(paste('time(',w$nt,')',sep=''),collapse='+')
}
formula <- as.formula(paste(paste(w$nsp,collapse="+"),'~',paste(c(nnf,nnFact,nnxy,nnt),collapse='+')),env = parent.frame())
}
exf <- .exFormula(formula,train)
nall <- c(exf@vars@names,exf@species)
if (!.varExist(train,nall)) stop('one or more specified variables in the formula do not exist in the train data!')
nsp <- exf@species
d <- new('sdmdata')
d@sdmFormula <- exf
w <- .dataClean(train)
if (any(w[[2]] > 0)) {
train <- w[[1]]
ww <- c()
if (w[[2]][1] > 0) .addLog(d) <- paste(w[[2]][1],'records with NA from the train data are removed')
if (w[[2]][2] > 0) .addLog(d) <- paste(w[[2]][2],'duplicarted records from the train data are removed')
}
train$rID <- 1:nrow(train)
train <- .int.to.numeric(train)
if (!is.null(bg)) {
w <- which(nsp %in% colnames(bg))
if (length(w) > 0) {
for (nnsp in nsp[w]) bg[,nnsp] <- 0
}
w <- which(!nsp %in% colnames(bg))
if (length(w) > 0) {
nnsp <- nsp[w]
nnsp <- matrix(0,nrow=nrow(bg),ncol=length(nnsp))
colnames(nnsp) <- nsp[w]
bg <- cbind(bg,nnsp)
}
bg <- as.data.frame(bg)
if (!.varExist(data.frame(bg),nall)) stop('one or more predictor variables do not exist in the background data!')
w <- .dataClean(bg)
if (any(w[[2]] > 0)) {
bg <- w[[1]]
ww <- c()
if (w[[2]][1] > 0) .addLog(d) <- paste(w[[2]][1],'records with NA from the background data are removed')
if (w[[2]][2] > 0) .addLog(d) <- paste(w[[2]][2],'duplicarted records from the background data are removed')
}
for (n in nsp) {
if (is.factor(train[,n]) || is.character(train[,n])) {
train[,n] <- as.character(train[,n])
bg[,n] <- as.character(bg[,n])
}
}
bg$rID <- (nrow(train)+1):(nrow(bg)+nrow(train))
train <- rbind(train[,c('rID',nall)],bg[,c('rID',nall)])
bg <- bg$rID
}
if (!is.null(test)) {
if (!.varExist(test,nall)) stop('one or more specified variables in the formula does not exist in the test data!')
w <- .dataClean(test)
if (any(w[[2]] > 0)) {
test <- w[[1]]
ww <- c()
if (w[[2]][1] > 0) .addLog(d) <- paste(w[[2]][1],'records with NA from the test data are removed')
if (w[[2]][2] > 0) .addLog(d) <- paste(w[[2]][2],'duplicarted records from the test data are removed')
}
test$rID <- (nrow(train)+1):(nrow(test)+nrow(train))
if (!is.null(bg)) {
w <- unlist(lapply(nsp,function(x) .speciesType(test[,x])))
w <- unique(w)
if (length(w) > 1) stop(paste('Independent test data has different types of records including',paste(w,collapse=', ')))
else if (w == "Presence-Only") {
d <- .newGroup(d,'training',index=list(train=train$rID,test=c(test$rID,bg)))
cat('WARNING:\n Independent test dataset contains only presence records, so, background data (pseuso-absences) are used as Absence in the dataset!\n')
} else d <- .newGroup(d,'training',index=list(train=train$rID,test=test$rID))
} else d <- .newGroup(d,'training',index=list(train=train$rID,test=test$rID))
test <- .int.to.numeric(test)
} else {
d <- .newGroup(d,'training',index=list(train=train$rID))
}
if (is.null(nf) & is.null(nFact)) {
nf <- exf@vars@names
w <- .where(is.factor,train[,nf]) | .where(is.character,train[,nf])
if (any(w)) nFact <- nf[w]
if (!is.null([email protected])) {
w <- unlist(lapply([email protected],class))
if ('.factor' %in% w) {
ww <- [email protected][w == '.factor']
if (length(ww) > 0) {
for (i in seq_along(ww)) {
w <- as.character(ww[[i]]@x)
w <- .excludeVector(w,'+')
nFact <- unique(c(nFact,w))
}
}
}
}
nf <- .excludeVector(nf,nFact)
if (!is.null([email protected])) {
w <- unlist(lapply([email protected],class))
if (".coord.vars" %in% w) nxy <- [email protected][[which(w == ".coord.vars")]]@xy
if (".grouping" %in% w) {
ng <- c()
ww <- [email protected][which(w == ".grouping")]
for (i in seq_along(ww)) ng <- c(ng,ww[[i]]@group.var)
nf <- .excludeVector(nf,ng)
nFact <- .excludeVector(nFact,ng)
}
if ('.time' %in% w) {
nt <- c()
ww <- [email protected][which(w == ".time")]
for (i in seq_along(ww)) nt <- c(nt,as.character(ww[[i]]@terms[[1]]))
nf <- .excludeVector(nf,nt)
nFact <- .excludeVector(nFact,nt)
}
if ('.Info' %in% w) {
ni <- c()
ww <- [email protected][which(w == ".Info")]
for (i in seq_along(ww)) ni <- c(ni,ww[[i]]@names)
nf <- .excludeVector(nf,ni)
nFact <- .excludeVector(nFact,ni)
}
} else {
w <- !colnames(train) %in% c(nall,'rID')
if (any(w)) {
nxy <- .which.is.coords(colnames(train)[w])
if (!is.null(test) && !.varExist(test,nxy)) nxy <- NULL
ww <- unlist(lapply(which(w),function(x) class(train[,x]))) %in% c("POSIXct","POSIXt","Date","yearmon","yearqtr")
if (any(ww)) {
nt <- colnames(train)[w[which(ww)]]
nf <- .excludeVector(nf,nt)
nFact <- .excludeVector(nFact,nt)
}
}
}
nf <- .excludeVector(nf,nxy)
nFact <- .excludeVector(nFact,nxy)
}
nall <- c(nsp,nf,nFact,nxy,ng,ni,nt,'rID')
if (!is.null(test)) {
train <- rbind(train[,nall],test[,nall])
rm(test)
species <- .getSpecies(train,nsp,bg=!is.null(bg),id.train = d@groups$training@indices$train,id.test = d@groups$training@indices$test)
} else {
train <- train[,nall]
species <- .getSpecies(train,nsp,bg=!is.null(bg))
}
if (!is.null(ng)) {
for (n in ng) {
ww <- as.character(train[,n])
u <- unique(ww)
if (length(u) == 1) warning(paste('the grouping variable',n,'is ignored; it is constant!'))
else {
w <- list()
for (uu in u) {
w[[uu]] <- train$rID[which(ww == uu)]
}
d <- .newGroup(d,n,index=w)
}
}
}
if (!is.null(c(nxy,ni,nt,website,help,description,date,license)) || !is.null(c(citation,author))) {
d@info <- new('.info')
if (!is.null(nxy)) {
d@info@coords <- as.matrix(train[,c('rID',nxy)])
if (!is.null(crs) && inherits(crs,'CRS')) d@info@crs <- crs
}
if (!is.null(ni)) d@info@info <- train[,c('rID',ni)]
if (!is.null(nt)) {
dt <- data.frame(rID=train$rID)
for (n in nt) {
if ((class(train[,n]) %in% c("POSIXct","POSIXt","Date","yearmon","yearqtr"))[1]) {
dt <- cbind(dt,train[,n])
} else {
w <- unlist(lapply([email protected],class))
w <- [email protected][which(w == ".time")]
w <- w[[which(unlist(lapply(w,function(x) x@terms[[1]] == n)))]]
if (length(w@terms) > 1 && is.null(names(w@terms)) && length(names(w@terms)) == length(w@terms)) {
w <- do.call(.char2time,c(list(d=train[,n]),w@terms[2:length(w@terms)]))
if ((class(w) %in% c("POSIXct","POSIXt","Date","yearmon","yearqtr"))[1]) dt[,n] <- w
else warning(paste('a time-based format is not detected for variable',n,", so it is IGNORED; it must have a detectable character format, or being one of time-based classes including: 'POSIXct', 'POSIXt', 'Date', 'yearmon','yearqtr'"))
} else {
w <- .char2time(train[,n])
if ((class(w) %in% c("POSIXct","POSIXt","Date","yearmon","yearqtr"))[1]) dt[,n] <- w
else warning(paste('a time-based format is not detected for variable',n,", so it is IGNORED; it must have a detectable character format, or being one of time-based classes including: 'POSIXct', 'POSIXt', 'Date', 'yearmon','yearqtr'"))
}
}
}
if (ncol(train) > 1) d@info@time <- dt
}
if (!is.null(c(website,help,description,date,license)) || !is.null(c(citation,author))) {
d@info@metadata <- .newMetadata(authors=author,web=website,cit=citation,desc=description,date=date,license=license,help=help)
}
}
[email protected] <- c(nf,nFact)
if (!is.null(nFact)) d@factors <- nFact
d@species <- species
[email protected] <- names(species)
for (n in nFact) train[,n] <- factor(train[,n])
if (!is.null([email protected])) d@features <- train[,c('rID',nf,nFact)]
d
}
.Extract <- function(x,cells,factors) {
n <- names(x)
if (length(factors) == 1) {
x2 <- values(x[[factors]])[cells]
} else x2 <- values(x[[factors]])[cells,]
if (length(n) > length(factors)) {
x1 <- x[[-factors]][cells]
d <- data.frame(x1,x2)
colnames(d) <- c(n[-factors],n[factors])
} else {
d <- data.frame(x2)
colnames(d) <- n[factors]
}
for (i in 1:length(factors)) d[,i] <- factor(d[,i])
return(d )
}
.pseudo_gRandom <- function(preds,n=1000,p=NULL) {
s <- sampleRandom(preds,n,cells=TRUE,xy=TRUE)
if (!is.null(p) && ncol(p) == 2) {
p.cells <- cellFromXY(preds,p)
if (length(p.cells) > 0) {
s <- s[which(!s[,'cell'] %in% p.cells),]
}
}
s[,-1]
}
.pseudo_eRandom <- function(preds,n=1000,p=NULL) {
}
.pseudo_gDist <- function(preds,n=1000,p=NULL) {
}
.pseudo_eDist <- function(preds,n=1000,p=NULL) {
}
.pseudo <- function(preds,n=1000,method='gRandom',p=NULL) {
if (method == 'gRandom') .pseudo_gRandom(preds,n=n,p=p)
else if (method == 'eRandom') .pseudo_eRandom(preds,n=n,p=p)
else if (method == 'gDist') .pseudo_gDist(preds,n=n,p=p)
else if (method == 'eDist') .pseudo_eDist(preds,n=n,p=p)
}
if (!isGeneric("sdmData")) {
setGeneric("sdmData", function(formula, train, test, predictors,bg, filename, crs,...)
standardGeneric("sdmData"))
}
setMethod('sdmData', signature(train='data.frame',predictors='missing'),
function(formula,train,test=NULL,predictors,bg=NULL,filename=NULL,crs=NULL,...) {
if(missing(test)) test <- NULL
if(missing(filename)) filename <- NULL
if(missing(crs)) crs <- NULL
if(missing(formula)) formula <- NULL
if(missing(bg)) bg <- NULL
if (!.sdmOptions$getOption('sdmLoaded')) .addMethods()
dot <- list(...)
n <- tolower(names(dot))
for (i in seq_along(n)) {
if (any(!is.na(pmatch(c("aut"),n[i])))) n[i] <- 'author'
else if (any(!is.na(pmatch(c("web"),n[i])))) n[i] <- 'website'
else if (any(!is.na(pmatch(c("cit"),n[i])))) n[i] <- 'citation'
else if (any(!is.na(pmatch(c("hel"),n[i])))) n[i] <- 'help'
else if (any(!is.na(pmatch(c("des"),n[i])))) n[i] <- 'description'
else if (any(!is.na(pmatch(c("dat"),n[i])))) n[i] <- 'date'
else if (any(!is.na(pmatch(c("lic"),n[i])))) n[i] <- 'license'
}
names(dot) <- n
author <- dot[['author']]
website <- dot[['website']]
citation <- dot[['citation']]
help <- dot[['help']]
description <- dot[['description']]
date <- dot[['date']]
license <- dot[['license']]
.createSdmdata(train = train, formula = formula, test = test,bg=bg,crs = crs,author=author,website=website,citation=citation,help=help,description=description,date=date,license=license)
}
)
setMethod('sdmData', signature(formula='data.frame',train='formula',predictors='missing'),
function(formula,train,test=NULL,predictors,bg=NULL,filename=NULL,crs=NULL,...) {
if(missing(test)) test <- NULL
if(missing(filename)) filename <- NULL
if(missing(crs)) crs <- NULL
if(missing(bg)) bg <- NULL
sdmData(formula=train, train=formula,test=test,bg=bg,filename=filename,crs=crs,...)
}
)
setMethod('sdmData', signature(formula='data.frame',train='missing',predictors='missing'),
function(formula,train,test=NULL,predictors,bg=NULL,filename=NULL,crs=NULL,...) {
if(missing(test)) test <- NULL
if(missing(filename)) filename <- NULL
if(missing(crs)) crs <- NULL
if(missing(bg)) bg <- NULL
sdmData(train=formula,test=test,bg=bg,filename=filename,crs=crs,...)
}
)
setMethod('sdmData', signature(train='SpatialPoints',predictors='missing'),
function(formula,train,test=NULL,predictors,bg=NULL,filename=NULL,crs=NULL,...) {
if(missing(test)) test <- NULL
if(missing(filename)) filename <- NULL
if(missing(crs)) crs <- NULL
if(missing(bg)) bg <- NULL
if (!.sdmOptions$getOption('sdmLoaded')) .addMethods()
nxy <- coordnames(train)
if (!is.null(test)) {
if (class(train) == class(test)) {
nxyt <- coordnames(test)
if (as.character(class(test) == 'SpatialPoints')) test <- data.frame(SPECIES=rep(1,length(test)),as(test,'data.frame'))
else test <- as(test,'data.frame')
if (all(nxy != nxyt)) {
colnames(test)[unlist(lapply(nxyt,function(x) which(colnames(test) ==x)))] <- nxy
}
}
}
if (!is.na(proj4string(train))) crs <- CRS(proj4string(train))
if (as.character(class(train) == 'SpatialPoints')) train <- data.frame(SPECIES=rep(1,length(train)),as(train,'data.frame'))
else train <- as(train,'data.frame')
if (!missing(formula)) {
if (!all(nxy %in% all.vars(formula))) {
if ('.' %in% all.vars(formula)) {
ww <- .exFormula(formula,train)
nw <- .excludeVector(colnames(train),c(ww@species,nxy))
if (length(nw) > 0) {
w <- as.character(.exFormula(formula,train,FALSE)@species)
if (length(w) == 0) formula[[2]] <- terms.formula(formula,data=train[,nw])[[2]]
else {
formula[[3]] <- terms.formula(formula,data=train[,nw])[[3]]
if (colnames(train)[1] == 'SPECIES') {
colnames(train)[1] <- w[1]
if (!is.null(test) && colnames(test)[1] == 'SPECIES') colnames(test)[1] <- w[1]
}
}
}
}
formula <- update(formula,as.formula(paste('~ . + coods(',paste(nxy,collapse='+'),')',sep='')))
}
} else formula <- as.formula(paste('~ . + coods(',paste(nxy,collapse='+'),')',sep=''))
dot <- list(...)
n <- tolower(names(dot))
for (i in seq_along(n)) {
if (any(!is.na(pmatch(c("aut"),n[i])))) n[i] <- 'author'
else if (any(!is.na(pmatch(c("web"),n[i])))) n[i] <- 'website'
else if (any(!is.na(pmatch(c("cit"),n[i])))) n[i] <- 'citation'
else if (any(!is.na(pmatch(c("hel"),n[i])))) n[i] <- 'help'
else if (any(!is.na(pmatch(c("des"),n[i])))) n[i] <- 'description'
else if (any(!is.na(pmatch(c("dat"),n[i])))) n[i] <- 'date'
else if (any(!is.na(pmatch(c("lic"),n[i])))) n[i] <- 'license'
}
names(dot) <- n
author <- dot[['author']]
website <- dot[['website']]
citation <- dot[['citation']]
help <- dot[['help']]
description <- dot[['description']]
date <- dot[['date']]
license <- dot[['license']]
.createSdmdata(train = train, formula = formula, test = test,bg=bg,crs = crs,author=author,website=website,citation=citation,help=help,description=description,date=date,license=license)
}
)
setMethod('sdmData', signature(train='SpatialPoints',predictors='Raster'),
function(formula,train,test=NULL,predictors,bg=NULL,filename=NULL,crs=NULL,...) {
if(missing(test)) test <- NULL
if(missing(filename)) filename <- NULL
if(missing(crs)) crs <- NULL
if(missing(bg)) bg <- NULL
if (!.sdmOptions$getOption('sdmLoaded')) .addMethods()
testc <- as.character(class(test))
trainc <- as.character(class(train))
trainSP <-NULL
errLog <- list()
nxy <- coordnames(train)[1:2]
wF <- is.factor(predictors)
if (!is.null(test)) {
if (inherits(test,'SpatialPoints')) {
nxyt <- coordnames(test)
if (testc == 'SpatialPointsDataFrame') test <- as(test,'data.frame')
else test <- data.frame(coordinates(test))
if (all(nxy != nxyt)) {
colnames(test)[unlist(lapply(nxyt,function(x) which(colnames(test) ==x)))] <- nxy
}
} else if (!is.data.frame(test)) stop('test data should be a data.frame or in the same class as the train data!')
if (nxy[1] %in% colnames(test) & nxy[2] %in% colnames(test)) {
cells <- cellFromXY(predictors,test[,nxy])
cNA <- is.na(cells)
if (any(cNA)) {
if (all(cNA)) stop('Test dataset has no overlap with the predictors...!')
wNA <- which(cNA)
test <- test[-wNA,]
cells <- cells[-wNA]
errLog <- c(errLog,paste(length(wNA),'records were removed from the test dataset because of no overlap with the predictors.'))
rm(wNA)
}
rm(cNA)
if (!any(wF)) test.p <- data.frame(predictors[cells])
else test.p <- .Extract(predictors,cells,which(wF))
rm(cells)
colnames(test.p) <- names(predictors)
w <- colnames(test) %in% colnames(test.p)
if (any(w)) {
test <- test[,colnames(test)[!w]]
errLog <- c(errLog,paste('WARNING: The variables',colnames(test)[w],'were removed from the test dataset as they exist in the predictors as well.'))
}
} else stop('the coordinate names in the train and test datasets do not match!')
}
if (!is.na(proj4string(train))) crs <- CRS(proj4string(train))
if (trainc == 'SpatialPointsDataFrame') train <- as(train,'data.frame')
else train <- coordinates(train)
cells <- cellFromXY(predictors,train[,nxy])
cNA <- is.na(cells)
if (any(cNA)) {
if (all(cNA)) stop('Train data has no overlap with the predictors...!')
wNA <- which(cNA)
train <- train[-wNA,]
cells <- cells[-wNA]
errLog <- c(errLog,paste(length(wNA),'records were removed from the train dataset as they had no overlap with the predictors.'))
}
rm(cNA)
if (!any(wF)) train.p <- data.frame(predictors[cells])
else train.p <- .Extract(predictors,cells,which(wF))
rm(cells)
colnames(train.p) <- names(predictors)
w <- colnames(train) %in% colnames(train.p)
if (any(w)) {
train <- train[,colnames(train)[!w]]
errLog <- c(errLog,paste('WARNING: The variables',colnames(train)[w],'were removed from the train dataset as they exist in the predictors as well.'))
}
if (trainc == 'SpatialPointsDataFrame') {
train <- data.frame(train,train.p)
rm(train.p)
if (!is.null(test)) {
test <- data.frame(test,test.p)
rm(test.p)
}
if (!missing(formula)) {
if (!all(nxy %in% all.vars(formula))) {
if ('.' %in% all.vars(formula)) {
ww <- .exFormula(formula,train)
nw <- .excludeVector(colnames(train),c(ww@species,nxy))
w <- as.character(.exFormula(formula,train,FALSE)@species)
if (length(w) == 0) formula[[2]] <- terms.formula(formula,data=train[,nw])[[2]]
else formula[[3]] <- terms.formula(formula,data=train[,nw])[[3]]
}
formula <- update(formula,as.formula(paste('~ . + coods(',paste(nxy,collapse='+'),')',sep='')))
}
} else formula <- as.formula(paste('~ . + coods(',paste(nxy,collapse='+'),')',sep=''))
} else {
if (!missing(formula)) {
ww <- as.character(.exFormula(formula,train.p,FALSE)@species)
if (length(ww) > 1) {
warning('While SpatialPoints can be used for only 1 species, more names are defined in the formula! The first name is considered!')
errLog <- c(errLog,'WARNING: While SpatialPoints can be used for only 1 species, more names are defined in the formula! The first name is considered!')
w <- ww[1]
} else if (length(ww) == 0) w <- 'SPECIES'
if (!all(nxy %in% all.vars(formula))) {
if ('.' %in% all.vars(formula)) {
if (length(ww) == 0) formula[[2]] <- terms.formula(formula,data=train.p)[[2]]
else formula[[3]] <- terms.formula(formula,data=train.p)[[3]]
}
formula <- update(formula,as.formula(paste('~ . + coods(',paste(nxy,collapse='+'),')',sep='')))
}
} else {
formula <- as.formula(paste('SPECIES ~ . + coods(',paste(nxy,collapse='+'),')',sep=''))
w <- 'SPECIES'
}
train <- data.frame(SPECIES=rep(1,nrow(train)),train)
colnames(train)[1] <- w
train <- data.frame(train,train.p)
rm(train.p)
if (!is.null(test)) {
test <- data.frame(SPECIES=rep(1,nrow(test)),test,test.p)
colnames(test)[1] <- w
rm(test.p)
}
}
if (!is.null(bg)) {
if (inherits(bg,'list')) {
nbg <- names(bg)
nbg <- .pmatch(nbg,c('n','method','remove'))
if ('n' %in% nbg) n <- bg[['n']]
else n <- 1000
if ('method' %in% nbg) {
if (.pmatch(bg[['method']],c('gRandom','random','rnd')) %in% c('gRandom','random','rnd')) {
m <- 'gRandom'
} else if (.pmatch(bg[['method']],c('eRandom','envrandom','ernd')) %in% c('eRandom','envrandom','ernd')) {
m <- 'eRandom'
} else if (.pmatch(bg[['method']],c('gDistance','geo')) %in% c('gDistance','geo')) {
m <- 'gDist'
} else if (.pmatch(bg[['method']],c('eDistance','environ','envDist')) %in% c('eDistance','environ','envDist')) {
m <- 'eDist'
}
} else m <- 'gRandom'
if ('remove' %in% nbg && is.logical(bg[['remove']])) r <- bg[['remove']]
else r <- FALSE
bg <- .pseudo(predictors,n=n,method = m,p = if (r) train[,nxy] else NULL)
colnames(bg)[1:2] <- nxy
} else if (is.numeric(bg)) {
bg <- .pseudo(predictors,n=bg,method = 'gRandom',p = NULL)
colnames(bg)[1:2] <- nxy
} else if (!is.data.frame(bg)) bg <- NULL
}
dot <- list(...)
n <- tolower(names(dot))
for (i in seq_along(n)) {
if (any(!is.na(pmatch(c("aut"),n[i])))) n[i] <- 'author'
else if (any(!is.na(pmatch(c("web"),n[i])))) n[i] <- 'website'
else if (any(!is.na(pmatch(c("cit"),n[i])))) n[i] <- 'citation'
else if (any(!is.na(pmatch(c("hel"),n[i])))) n[i] <- 'help'
else if (any(!is.na(pmatch(c("des"),n[i])))) n[i] <- 'description'
else if (any(!is.na(pmatch(c("dat"),n[i])))) n[i] <- 'date'
else if (any(!is.na(pmatch(c("lic"),n[i])))) n[i] <- 'license'
}
names(dot) <- n
author <- dot[['author']]
website <- dot[['website']]
citation <- dot[['citation']]
help <- dot[['help']]
description <- dot[['description']]
date <- dot[['date']]
license <- dot[['license']]
d <- .createSdmdata(train = train, formula = formula, test = test,bg=bg,crs = crs,author=author,website=website,citation=citation,help=help,description=description,date=date,license=license)
if (length(errLog) > 0) {
for (i in seq_along(errLog)) .addLog(d) <- errLog[[i]]
}
d
}
)
|
expect_traces <- function(gg, n.traces, name) {
stopifnot(is.numeric(n.traces))
L <- expect_doppelganger_built(gg, paste0("rect-", name))
all.traces <- L$data
no.data <- sapply(all.traces, function(tr) {
is.null(tr[["x"]]) && is.null(tr[["y"]])
})
has.data <- all.traces[!no.data]
expect_equivalent(length(has.data), n.traces)
list(data=has.data, layout=L$layout)
}
df <- data.frame(
x = sample(10, 20, replace = TRUE),
y = sample(10, 20, replace = TRUE)
)
gg <- ggplot(df, aes(xmin = x, xmax = x + 1, ymin = y, ymax = y + 2)) +
geom_rect()
test_that('geom_rect becomes 1 trace with mode="lines" fill="toself"', {
info <- expect_traces(gg, 1, "black")
tr <- info$data[[1]]
expect_identical(tr$fill, "toself")
expect_identical(tr$type, "scatter")
expect_identical(tr$mode, "lines")
for(xy in c("x", "y")) {
expect_true(anyNA(tr[[xy]]))
}
})
df4 <- data.frame(
x = 1:4,
status = c("cool", "not", "not", "cool")
)
gg4 <- ggplot(df4, aes(xmin = x, xmax = x + 0.5, ymin = 0, ymax = 1)) +
geom_rect()
test_that('trace contains NA back to 1st rect', {
info <- expect_traces(gg4, 1, "black4")
tr <- info$data[[1]]
expect_identical(tr$fill, "toself")
expect_identical(tr$type, "scatter")
expect_identical(tr$mode, "lines")
expected.x <- c(1, 1, 1.5, 1.5, 1, NA,
2, 2, 2.5, 2.5, 2, NA,
3, 3, 3.5, 3.5, 3, NA,
4, 4, 4.5, 4.5, 4)
expect_equivalent(tr$x, expected.x)
expected.y <- c(0, 1, 1, 0, 0, NA,
0, 1, 1, 0, 0, NA,
0, 1, 1, 0, 0, NA,
0, 1, 1, 0, 0)
expect_equivalent(tr$y, expected.y)
})
rect.color <- ggplot(df4, aes(xmin = x, xmax = x + 0.5, ymin = 0, ymax = 1)) +
geom_rect(aes(color = status), fill="grey")
test_that('rect color', {
info <- expect_traces(rect.color, 2, "color")
traces.by.name <- list()
for(tr in info$data){
expect_true(tr$fillcolor == toRGB("grey"))
expect_true(tr$fill == "toself")
expect_equivalent(tr$y, c(0, 1, 1, 0, 0, NA, 0, 1, 1, 0, 0))
traces.by.name[[tr$name]] <- tr
}
expect_equivalent(
traces.by.name[[1]]$x, c(1, 1, 1.5, 1.5, 1, NA, 4, 4, 4.5, 4.5, 4)
)
expect_equivalent(
traces.by.name[[2]]$x,c(2, 2, 2.5, 2.5, 2, NA, 3, 3, 3.5, 3.5, 3)
)
expect_false(
traces.by.name[[1]]$line$color == traces.by.name[[2]]$line$color
)
})
rect.fill <- ggplot(df4, aes(xmin = x, xmax = x + 0.5, ymin = 0, ymax = 1)) +
geom_rect(aes(fill = status))
test_that('rect color', {
info <- expect_traces(rect.fill, 2, "fill")
traces.by.name <- list()
for(tr in info$data){
expect_true(tr$line$color == "transparent")
expect_true(tr$fill == "toself")
expect_equivalent(tr$y, c(0, 1, 1, 0, 0, NA, 0, 1, 1, 0, 0))
traces.by.name[[tr$name]] <- tr
}
expect_equivalent(
traces.by.name[[1]]$x, c(1, 1, 1.5, 1.5, 1, NA, 4, 4, 4.5, 4.5, 4)
)
expect_equivalent(
traces.by.name[[2]]$x, c(2, 2, 2.5, 2.5, 2, NA, 3, 3, 3.5, 3.5, 3)
)
expect_false(
traces.by.name[[1]]$fillcolor == traces.by.name[[2]]$fillcolor
)
})
rect.fill.color <-
ggplot(df4, aes(xmin = x, xmax = x + 0.5, ymin = 0, ymax = 1)) +
geom_rect(aes(fill = status), color="black")
test_that('rect aes(fill) with constant color', {
info <- expect_traces(rect.fill.color, 2, "fill-color")
traces.by.name <- list()
for(tr in info$data){
expect_true(tr$line$color == toRGB("black"))
expect_true(tr$fill == "toself")
expect_equivalent(
tr$y, c(0, 1, 1, 0, 0, NA, 0, 1, 1, 0, 0)
)
traces.by.name[[tr$name]] <- tr
}
expect_equivalent(
traces.by.name[[1]]$x, c(1, 1, 1.5, 1.5, 1, NA, 4, 4, 4.5, 4.5, 4)
)
expect_equivalent(
traces.by.name[[2]]$x, c(2, 2, 2.5, 2.5, 2, NA, 3, 3, 3.5, 3.5, 3)
)
expect_false(
traces.by.name[[1]]$fillcolor == traces.by.name[[2]]$fillcolor
)
})
p <- ggplot(data = data.frame(x1 = 1, x2 = 2, y1 = 1, y2 = 2)) +
geom_rect(aes(xmin = x1, xmax = x2, ymin = y1, ymax = y2),
fill = "
test_that('Specifying alpha in hex color code works', {
info <- expect_traces(p, 1, "fill-hex-alpha")
expect_match(info$data[[1]]$fillcolor, "rgba\\(0,0,0,0\\.0[6]+")
})
|
context("basic HMM functions in AIL")
test_that("AIL nalleles works", {
expect_equal(nalleles("ail"), 2)
})
test_that("AIL check_geno works", {
for(i in 0:5)
expect_true(test_check_geno("ail", i, TRUE, FALSE, FALSE, c(20, 0)))
for(i in 1:3)
expect_true(test_check_geno("ail", i, FALSE, FALSE, FALSE, c(20, 0)))
for(i in c(-1, 6))
expect_false(test_check_geno("ail", i, TRUE, FALSE, FALSE, c(20, 0)))
for(i in c(0, 4))
expect_false(test_check_geno("ail", i, FALSE, FALSE, FALSE, c(20, 0)))
for(dir in 0:2) {
for(i in 0:5)
expect_true(test_check_geno("ail", i, TRUE, TRUE, TRUE, c(20, dir)))
for(i in 1:3)
expect_true(test_check_geno("ail", i, FALSE, TRUE, TRUE, c(20, dir)))
for(i in c(-1, 6))
expect_false(test_check_geno("ail", i, TRUE, TRUE, TRUE, c(20, dir)))
for(i in c(0, 4:6))
expect_false(test_check_geno("ail", i, FALSE, TRUE, TRUE, c(20, dir)))
for(i in 0:5)
expect_true(test_check_geno("ail", i, TRUE, TRUE, FALSE, c(20, dir)))
for(i in c(4,5))
expect_true(test_check_geno("ail", i, FALSE, TRUE, FALSE, c(20, dir)))
for(i in c(-1, 6))
expect_false(test_check_geno("ail", i, TRUE, TRUE, FALSE, c(20, dir)))
for(i in c(0:3, 6))
expect_false(test_check_geno("ail", i, FALSE, TRUE, FALSE, c(20, dir)))
}
})
test_that("AIL n_gen works", {
expect_equal(test_ngen("ail", FALSE), 3)
expect_equal(test_ngen("ail", TRUE), 5)
})
test_that("AIL possible_gen works", {
expect_equal(test_possible_gen("ail", FALSE, FALSE, c(20,0)), 1:3)
for(dir in 0:2) {
expect_equal(test_possible_gen("ail", TRUE, TRUE, c(20,dir)), 1:3)
expect_equal(test_possible_gen("ail", TRUE, FALSE, c(20,dir)), 4:5)
}
})
test_that("AIL init works", {
for(n_gen in c(3,9,12,15)) {
expect_equal(test_init("ail", 1, FALSE, FALSE, c(n_gen, 0)), log(0.25))
expect_equal(test_init("ail", 2, FALSE, FALSE, c(n_gen, 0)), log(0.5))
expect_equal(test_init("ail", 3, FALSE, FALSE, c(n_gen, 0)), log(0.25))
expect_equal(test_init("ail", 1, TRUE, TRUE, c(n_gen, 2)), log(0.25))
expect_equal(test_init("ail", 2, TRUE, TRUE, c(n_gen, 2)), log(0.5))
expect_equal(test_init("ail", 3, TRUE, TRUE, c(n_gen, 2)), log(0.25))
expect_equal(test_init("ail", 4, TRUE, FALSE, c(n_gen, 2)), log(0.5))
expect_equal(test_init("ail", 5, TRUE, FALSE, c(n_gen, 2)), log(0.5))
}
for(n_gen in c(3,9,12,15)) {
fprob <- (2/3) + (1/3)*(-1/2)^n_gen
mprob <- (2/3) + (1/3)*(-1/2)^(n_gen-1)
expect_equal(test_init("ail", 1, TRUE, TRUE, c(n_gen, 0)), log(fprob^2))
expect_equal(test_init("ail", 2, TRUE, TRUE, c(n_gen, 0)), log(2*fprob*(1-fprob)))
expect_equal(test_init("ail", 3, TRUE, TRUE, c(n_gen, 0)), log((1-fprob)^2))
expect_equal(test_init("ail", 4, TRUE, FALSE, c(n_gen, 0)), log(mprob))
expect_equal(test_init("ail", 5, TRUE, FALSE, c(n_gen, 0)), log(1-mprob))
expect_equal(test_init("ail", 3, TRUE, TRUE, c(n_gen, 1)), log(fprob^2))
expect_equal(test_init("ail", 2, TRUE, TRUE, c(n_gen, 1)), log(2*fprob*(1-fprob)))
expect_equal(test_init("ail", 1, TRUE, TRUE, c(n_gen, 1)), log((1-fprob)^2))
expect_equal(test_init("ail", 5, TRUE, FALSE, c(n_gen, 1)), log(mprob))
expect_equal(test_init("ail", 4, TRUE, FALSE, c(n_gen, 1)), log(1-mprob))
}
})
test_that("AIL emit works", {
eps <- 0.01
for(i in 1:3)
expect_equal(test_emit("ail", 0, i, eps, integer(0), FALSE, FALSE, c(20,0)), 0)
expect_equal(test_emit("ail", 1, 1, eps, integer(0), FALSE, FALSE, c(20,0)), log(1-eps))
expect_equal(test_emit("ail", 2, 1, eps, integer(0), FALSE, FALSE, c(20,0)), log(eps/2))
expect_equal(test_emit("ail", 3, 1, eps, integer(0), FALSE, FALSE, c(20,0)), log(eps/2))
expect_equal(test_emit("ail", 4, 1, eps, integer(0), FALSE, FALSE, c(20,0)), log(1-eps/2))
expect_equal(test_emit("ail", 5, 1, eps, integer(0), FALSE, FALSE, c(20,0)), log(eps))
expect_equal(test_emit("ail", 1, 2, eps, integer(0), FALSE, FALSE, c(20,0)), log(eps/2))
expect_equal(test_emit("ail", 2, 2, eps, integer(0), FALSE, FALSE, c(20,0)), log(1-eps))
expect_equal(test_emit("ail", 3, 2, eps, integer(0), FALSE, FALSE, c(20,0)), log(eps/2))
expect_equal(test_emit("ail", 4, 2, eps, integer(0), FALSE, FALSE, c(20,0)), log(1-eps/2))
expect_equal(test_emit("ail", 5, 2, eps, integer(0), FALSE, FALSE, c(20,0)), log(1-eps/2))
expect_equal(test_emit("ail", 1, 3, eps, integer(0), FALSE, FALSE, c(20,0)), log(eps/2))
expect_equal(test_emit("ail", 2, 3, eps, integer(0), FALSE, FALSE, c(20,0)), log(eps/2))
expect_equal(test_emit("ail", 3, 3, eps, integer(0), FALSE, FALSE, c(20,0)), log(1-eps))
expect_equal(test_emit("ail", 4, 3, eps, integer(0), FALSE, FALSE, c(20,0)), log(eps))
expect_equal(test_emit("ail", 5, 3, eps, integer(0), FALSE, FALSE, c(20,0)), log(1-eps/2))
for(i in 1:3)
expect_equal(test_emit("ail", 0, i, eps, integer(0), TRUE, TRUE, c(20,0)), 0)
expect_equal(test_emit("ail", 1, 1, eps, integer(0), TRUE, TRUE, c(20,0)), log(1-eps))
expect_equal(test_emit("ail", 2, 1, eps, integer(0), TRUE, TRUE, c(20,0)), log(eps/2))
expect_equal(test_emit("ail", 3, 1, eps, integer(0), TRUE, TRUE, c(20,0)), log(eps/2))
expect_equal(test_emit("ail", 4, 1, eps, integer(0), TRUE, TRUE, c(20,0)), log(1-eps/2))
expect_equal(test_emit("ail", 5, 1, eps, integer(0), TRUE, TRUE, c(20,0)), log(eps))
expect_equal(test_emit("ail", 1, 2, eps, integer(0), TRUE, TRUE, c(20,0)), log(eps/2))
expect_equal(test_emit("ail", 2, 2, eps, integer(0), TRUE, TRUE, c(20,0)), log(1-eps))
expect_equal(test_emit("ail", 3, 2, eps, integer(0), TRUE, TRUE, c(20,0)), log(eps/2))
expect_equal(test_emit("ail", 4, 2, eps, integer(0), TRUE, TRUE, c(20,0)), log(1-eps/2))
expect_equal(test_emit("ail", 5, 2, eps, integer(0), TRUE, TRUE, c(20,0)), log(1-eps/2))
expect_equal(test_emit("ail", 1, 3, eps, integer(0), TRUE, TRUE, c(20,0)), log(eps/2))
expect_equal(test_emit("ail", 2, 3, eps, integer(0), TRUE, TRUE, c(20,0)), log(eps/2))
expect_equal(test_emit("ail", 3, 3, eps, integer(0), TRUE, TRUE, c(20,0)), log(1-eps))
expect_equal(test_emit("ail", 4, 3, eps, integer(0), TRUE, TRUE, c(20,0)), log(eps))
expect_equal(test_emit("ail", 5, 3, eps, integer(0), TRUE, TRUE, c(20,0)), log(1-eps/2))
for(i in 4:5)
expect_equal(test_emit("ail", 0, i, eps, integer(0), TRUE, FALSE, c(20,0)), 0)
expect_equal(test_emit("ail", 1, 4, eps, integer(0), TRUE, FALSE, c(20,0)), log(1-eps))
expect_equal(test_emit("ail", 2, 4, eps, integer(0), TRUE, FALSE, c(20,0)), 0)
expect_equal(test_emit("ail", 3, 4, eps, integer(0), TRUE, FALSE, c(20,0)), log(eps))
expect_equal(test_emit("ail", 4, 4, eps, integer(0), TRUE, FALSE, c(20,0)), log(1-eps))
expect_equal(test_emit("ail", 5, 4, eps, integer(0), TRUE, FALSE, c(20,0)), log(eps))
expect_equal(test_emit("ail", 1, 5, eps, integer(0), TRUE, FALSE, c(20,0)), log(eps))
expect_equal(test_emit("ail", 2, 5, eps, integer(0), TRUE, FALSE, c(20,0)), 0)
expect_equal(test_emit("ail", 3, 5, eps, integer(0), TRUE, FALSE, c(20,0)), log(1-eps))
expect_equal(test_emit("ail", 4, 5, eps, integer(0), TRUE, FALSE, c(20,0)), log(eps))
expect_equal(test_emit("ail", 5, 5, eps, integer(0), TRUE, FALSE, c(20,0)), log(1-eps))
})
test_that("AIL step works", {
for(rf in c(0.01, 0.0001)) {
for(ngen in c(3, 9, 12, 15)) {
R <- 1 - 2*( 0.25*(1 + (1-2*rf)*(1-rf)^(ngen-2)))
expect_equal(test_step("ail", 1, 1, rf, FALSE, FALSE, c(ngen, 0)), log((1-R)^2))
expect_equal(test_step("ail", 1, 2, rf, FALSE, FALSE, c(ngen, 0)), log(2*R*(1-R)))
expect_equal(test_step("ail", 1, 3, rf, FALSE, FALSE, c(ngen, 0)), log(R^2))
expect_equal(test_step("ail", 2, 1, rf, FALSE, FALSE, c(ngen, 0)), log(R*(1-R)))
expect_equal(test_step("ail", 2, 2, rf, FALSE, FALSE, c(ngen, 0)), log((1-R)^2+R^2))
expect_equal(test_step("ail", 2, 3, rf, FALSE, FALSE, c(ngen, 0)), log(R*(1-R)))
expect_equal(test_step("ail", 3, 1, rf, FALSE, FALSE, c(ngen, 0)), log(R^2))
expect_equal(test_step("ail", 3, 2, rf, FALSE, FALSE, c(ngen, 0)), log(2*R*(1-R)))
expect_equal(test_step("ail", 3, 3, rf, FALSE, FALSE, c(ngen, 0)), log((1-R)^2))
}
}
for(rf in c(0.01, 0.0001)) {
for(ngen in c(3, 9, 12, 15)) {
z <- sqrt((1-rf)*(9-rf))
w <- (1 - rf + z)/4
y <- (1 - rf - z)/4
mR <- 1 - 0.25*(2 + (1-2*rf)*(w^(ngen-2) + y^(ngen-2)) +
(3 - 5*rf + 2*rf^2)/z*(w^(ngen-2) - y^(ngen-2)))
fR <- 1 - 0.25*(2 + (1-2*rf)*(w^(ngen-2) + y^(ngen-2)) +
(3 - 6*rf + rf^2)/z*(w^(ngen-2) - y^(ngen-2)))
expect_equal(test_step("ail", 1, 1, rf, TRUE, TRUE, c(ngen, 2)), log((1-fR)^2))
expect_equal(test_step("ail", 1, 2, rf, TRUE, TRUE, c(ngen, 2)), log(2*fR*(1-fR)))
expect_equal(test_step("ail", 1, 3, rf, TRUE, TRUE, c(ngen, 2)), log(fR^2))
expect_equal(test_step("ail", 2, 1, rf, TRUE, TRUE, c(ngen, 2)), log(fR*(1-fR)))
expect_equal(test_step("ail", 2, 2, rf, TRUE, TRUE, c(ngen, 2)), log((1-fR)^2+fR^2))
expect_equal(test_step("ail", 2, 3, rf, TRUE, TRUE, c(ngen, 2)), log(fR*(1-fR)))
expect_equal(test_step("ail", 3, 1, rf, TRUE, TRUE, c(ngen, 2)), log(fR^2))
expect_equal(test_step("ail", 3, 2, rf, TRUE, TRUE, c(ngen, 2)), log(2*fR*(1-fR)))
expect_equal(test_step("ail", 3, 3, rf, TRUE, TRUE, c(ngen, 2)), log((1-fR)^2))
expect_equal(test_step("ail", 4, 4, rf, TRUE, FALSE, c(ngen, 2)), log(1-mR))
expect_equal(test_step("ail", 4, 5, rf, TRUE, FALSE, c(ngen, 2)), log(mR))
expect_equal(test_step("ail", 5, 4, rf, TRUE, FALSE, c(ngen, 2)), log(mR))
expect_equal(test_step("ail", 5, 5, rf, TRUE, FALSE, c(ngen, 2)), log(1-mR))
}
}
calc_q <- function(n_gen)
2/3 + (1/3)*(-1/2)^n_gen
calc_p11 <- function(n_gen, rf) {
if(n_gen == 1) return(c(1, 0.5))
last <- calc_p11(n_gen-1, rf)
z <- c((1-rf)*last[2] + rf*calc_q(n_gen-2)*calc_q(n_gen-3),
0.5*last[1] + (1-rf)/2*last[2] + (rf/2)*calc_q(n_gen-2)*calc_q(n_gen-3))
z
}
for(rf in c(0.01, 0.0001)) {
for(ngen in c(3, 9, 12, 15)) {
qf <- calc_q(ngen)
qm <- calc_q(ngen-1)
p11 <- calc_p11(ngen, rf)
m11 <- p11[1]
f11 <- p11[2]
m1to1 <- m11/qm
m1to2 <- 1 - m1to1
m2to1 <- (qm - m11)/(1-qm)
m2to2 <- 1 - m2to1
f1to1 <- f11/qf
f1to2 <- 1 - f1to1
f2to1 <- (qf - f11)/(1-qf)
f2to2 <- 1 - f2to1
expect_equal(test_step("ail", 1, 1, rf, TRUE, TRUE, c(ngen, 0)), log(f1to1^2))
expect_equal(test_step("ail", 1, 2, rf, TRUE, TRUE, c(ngen, 0)), log(2*f1to1*f1to2))
expect_equal(test_step("ail", 1, 3, rf, TRUE, TRUE, c(ngen, 0)), log(f1to2^2))
expect_equal(test_step("ail", 2, 1, rf, TRUE, TRUE, c(ngen, 0)), log(f1to1*f2to1))
expect_equal(test_step("ail", 2, 2, rf, TRUE, TRUE, c(ngen, 0)), log(f2to1*f1to2 + f1to1*f2to2))
expect_equal(test_step("ail", 2, 3, rf, TRUE, TRUE, c(ngen, 0)), log(f1to2*f2to2))
expect_equal(test_step("ail", 3, 1, rf, TRUE, TRUE, c(ngen, 0)), log(f2to1^2))
expect_equal(test_step("ail", 3, 2, rf, TRUE, TRUE, c(ngen, 0)), log(2*f2to2*f2to1))
expect_equal(test_step("ail", 3, 3, rf, TRUE, TRUE, c(ngen, 0)), log(f2to2^2))
expect_equal(test_step("ail", 4, 4, rf, TRUE, FALSE, c(ngen, 0)), log(m1to1))
expect_equal(test_step("ail", 4, 5, rf, TRUE, FALSE, c(ngen, 0)), log(m1to2))
expect_equal(test_step("ail", 5, 4, rf, TRUE, FALSE, c(ngen, 0)), log(m2to1))
expect_equal(test_step("ail", 5, 5, rf, TRUE, FALSE, c(ngen, 0)), log(m2to2))
expect_equal(test_step("ail", 1, 1, rf, TRUE, TRUE, c(ngen, 1)), log(f2to2^2))
expect_equal(test_step("ail", 1, 2, rf, TRUE, TRUE, c(ngen, 1)), log(2*f2to2*f2to1))
expect_equal(test_step("ail", 1, 3, rf, TRUE, TRUE, c(ngen, 1)), log(f2to1^2))
expect_equal(test_step("ail", 2, 1, rf, TRUE, TRUE, c(ngen, 1)), log(f2to2*f1to2))
expect_equal(test_step("ail", 2, 2, rf, TRUE, TRUE, c(ngen, 1)), log(f1to2*f2to1 + f1to1*f2to2))
expect_equal(test_step("ail", 2, 3, rf, TRUE, TRUE, c(ngen, 1)), log(f2to1*f1to1))
expect_equal(test_step("ail", 3, 1, rf, TRUE, TRUE, c(ngen, 1)), log(f1to2^2))
expect_equal(test_step("ail", 3, 2, rf, TRUE, TRUE, c(ngen, 1)), log(2*f1to1*f1to2))
expect_equal(test_step("ail", 3, 3, rf, TRUE, TRUE, c(ngen, 1)), log(f1to1^2))
expect_equal(test_step("ail", 4, 4, rf, TRUE, FALSE, c(ngen, 1)), log(m2to2))
expect_equal(test_step("ail", 4, 5, rf, TRUE, FALSE, c(ngen, 1)), log(m2to1))
expect_equal(test_step("ail", 5, 4, rf, TRUE, FALSE, c(ngen, 1)), log(m1to2))
expect_equal(test_step("ail", 5, 5, rf, TRUE, FALSE, c(ngen, 1)), log(m1to1))
}
}
})
test_that("geno_names works", {
expect_equal(geno_names("ail", c("B", "R"), FALSE), c("BB", "BR", "RR"))
expect_equal(geno_names("ail", c("B", "R"), TRUE), c("BB", "BR", "RR", "BY", "RY"))
})
test_that("nrec works", {
expected <- rbind(c(0,1,2), c(1,0,1), c(2,1,0))
for(i in 1:3)
for(j in 1:3) {
expect_equal(test_nrec("ail", i, j, FALSE, FALSE, 0), expected[i,j])
expect_equal(test_nrec("ail", i, j, TRUE, TRUE, 0), expected[i,j])
expect_equal(test_nrec("ail", i, j, TRUE, TRUE, 0), expected[i,j])
}
expected <- rbind(c(0,1), c(1,0))
for(i in 1:2)
for(j in 1:2)
expect_equal(test_nrec("ail", i+3, j+3, TRUE, FALSE, 0), expected[i,j])
})
|
coef.rem <- function(object,...){
par <- coef(object$mr)
row.names(par) <- paste("mr :",row.names(coef(object$mr)))
if(!is.null(coef(object$ds)$exponent)){
rn <- row.names(par)
par <- rbind(par,coef(object$ds)$exponent)
row.names(par) <- c(rn, paste("ds expon:",
row.names(coef(object$ds)$exponent)))
}
rn <- row.names(par)
ds.scale <- coef(object$ds)$scale
if(!is.null(ds.scale)){
par <- rbind(par,coef(object$ds)$scale)
row.names(par) <- c(rn,paste("ds scale:",
row.names(coef(object$ds)$scale)))
}
if(!is.null(coef(object$ds)$adjustment)){
rn <- row.names(par)
par <- rbind(par,coef(object$ds)$adjustment)
row.names(par) <- c(rn, paste("ds adjust:",
row.names(coef(object$ds)$adjustment)))
}
return(par)
}
|
summoning_bells <- function(hansard_id = NULL, rundown_id = NULL, section_code = NULL,
lang = "en", from = '1900-01-01', to = Sys.Date(), floor = FALSE,
n = 1000, extra_param = NULL, count = FALSE, verbose = TRUE) {
query <- "SummoningBells?$select=MeetingDate,SectionCode,RundownID,HansardID,HansardFileURL"
filter_args <- {}
if (!is.null(hansard_id)) {
filter_args <- c(filter_args, .generate_filter("HansardID", hansard_id))
}
if (!is.null(rundown_id)) {
filter_args <- c(filter_args, .generate_filter("RundownID", rundown_id))
}
if (!is.null(section_code)) {
filter_args <- c(filter_args, .generate_filter("SectionCode", section_code))
}
if (is.null(hansard_id) & is.null(hansard_id)) {
lang <- tolower(lang)
if (floor) {
filter_args <- c(filter_args, "HansardType eq 'Floor'")
} else if (lang == "en") {
filter_args <- c(filter_args, "HansardType eq 'English'")
} else if (lang == "zh") {
filter_args <- c(filter_args, "HansardType eq 'Chinese'")
}
}
from <- as.Date(from)
to <- as.Date(to)
filter_args <- c(filter_args, paste0("MeetingDate ge datetime\'", from,
"\' and MeetingDate le datetime\'", to, "\'"))
query <- paste0(query, "&$filter=", paste(filter_args, collapse = " and "))
if (!is.null(extra_param)) {
query <- paste0(query, extra_param)
}
legco_api("hansard", query, n, count, verbose)
}
legco_summoning_bells <- summoning_bells
|
cases.suf.dcn <-
function(results,
outcome,
sol=1,
...)
{
dots <- list(...)
if(length(dots) != 0){
if ("neg.out" %in% names(dots)){print("Argument neg.out is deprecated. The negated outcome is identified automatically from the minimize solution.")}
if ("use.tilde" %in% names(dots)){print("Argument use.tilde is deprecated. The usage of the tilde is identified automatically from the minimize solution.")}
}
if(length(grep("~",outcome)) > 0){
outcome<-outcome[grep("~",outcome)]
outcome<-gsub('\\~', '', outcome)
outcome<-unlist(outcome)}
outcome <- toupper(outcome)
X <- pimdata(results=results, outcome=outcome, sol=sol)
y <- X[,"out", drop=FALSE]
names(y) <- outcome
aux <-
function(i)
{
fil <- (X[,i] > 0.5) & (y < 0.5)
Z <- data.frame(x=X[fil, i],
y=y[fil],
s=rep(FALSE, sum(fil)),
Term=rep(colnames(X)[i], sum(fil)),
Cases=rownames(X)[fil])
s <- (1 - (Z$x-Z$y) + (1-Z$x))
suppressWarnings(Z$s[s==min(s)] <- TRUE)
Z$Sd <- s
colnames(Z)[1:3] <- c('TermMembership', outcome, 'MostDevCons')
Z<-Z[, c(5, 4, 1, 2, 6, 3)]
Z[,c(3:5)] <- round(Z[,c(3:5)], digits = 3)
return(Z[order(Z$Sd),])
}
R <- do.call(rbind, lapply(1:(ncol(X)-1), aux))
R <- R[order(R$Term,R$Sd,R$TermMembership),]
names(R)[names(R)==outcome]<- "Outcome"
names(R)[names(R)=="Sd"]<- "Best"
M<-list()
M[[1]] <- list(title="Deviant Consistency Cases", results=R[R$Term!='solution_formula', ])
class(M) <- 'matchessuf'
return(M)
}
cases.suf.dcv <-
function(results,
outcome,
sol=1,
...)
{
dots <- list(...)
if(length(dots) != 0){
if ("neg.out" %in% names(dots)){print("Argument neg.out is deprecated. The negated outcome is identified automatically from the minimize solution.")}
if ("use.tilde" %in% names(dots)){print("Argument use.tilde is deprecated. The usage of the tilde is identified automatically from the minimize solution.")}
}
if(length(grep("~",outcome)) > 0){
outcome<-outcome[grep("~",outcome)]
outcome<-gsub('\\~', '', outcome)
outcome<-unlist(outcome)}
outcome <- toupper(outcome)
X <- pimdata(results=results, outcome=outcome, sol=sol)
y <- X[,"out", drop=FALSE]
names(y) <- outcome
CS <- results$tt$recoded.data
CS <- CS[, -which(colnames(CS)==outcome)]
TS <- CS
TS[TS<0.50]<-1-TS[TS<0.50]
CS[CS<0.50]<-0
CS[CS>0.50]<-1
CS["TT_row_membership"]<-do.call(pmin,TS)
CS["TT_row_membership"] <- round(CS["TT_row_membership"], digits = 3)
aux <-
function(i)
{
fil <- (X[,i] < 0.5) & (y > 0.5)
Z <- data.frame(x=X[fil, i],
y=y[fil],
s=rep(FALSE, sum(fil)),
Term=rep(colnames(X)[i], sum(fil)),
Case=rownames(X)[fil], ttr=CS[rownames(X)[fil],"TT_row_membership"])
s <- (1-Z$ttr)
suppressWarnings(Z$s[s==min(s)] <- TRUE)
Z$Sd <- s
colnames(Z)[1:3] <- c('TermMembership', outcome, 'Most_deviant')
return(Z[, c(5, 4, 1, 2, 7, 3)])
}
R <- do.call(rbind, lapply(1:(ncol(X)-1), aux))
R <- R[R$Term=='solution_formula', c('Case', 'TermMembership', outcome,"Sd")]
names(R)[2] <- 'SolMembership'
R[,2:4] <- round(R[,2:4], digits = 3)
Z <- merge(x=R, y=CS, by.x='Case', by.y='row.names')
names(Z)[5:(ncol(Z)-1)] <- paste('TT_', names(Z)[5:(ncol(Z)-1)], sep='')
O <-subset(Z,select=3)
Z <-Z[,-c(3)]
Z$Outcome <- O
sortnames<-names(Z)[4:(ncol(Z)-2)]
Z <- Z[do.call("order", c(Z[sortnames], Z["Sd"])), ]
names(Z$Outcome)<- "Outcome"
names(Z)[names(Z)=="Sd"]<- "Best"
ttsplit <- aggregate(Z$Best,by=Z[sortnames],min, drop=FALSE)
Z$MostDevCov <- FALSE
for (n in 1:nrow(Z)){
for (s in 1:nrow(ttsplit)){
if(all(ttsplit[s,sortnames] == Z[n,sortnames]) & ttsplit[s,"x"] == Z[n, "Best"]){Z[n,"MostDevCov"] <- TRUE}
}}
Z$ConsTT <- FALSE
for (n in 1:nrow(Z)){
if(Z[n,"TT_row_membership"] <= Z[n,"Outcome"]){Z[n,"ConsTT"] <- TRUE}
}
Z <- Z[do.call("order", c(Z[sortnames], 1-Z["ConsTT"], Z["Best"])), ]
Z <- cbind(Z[,c(1,2)], Z[sortnames],Z["TT_row_membership"], Z["Outcome"],Z["Best"], Z["MostDevCov"],Z["ConsTT"])
M <- list()
M[[1]] <- list(title="Deviant Coverage Cases", results=Z)
class(M) <- 'matchessuf'
return(M)
}
cases.suf.iir <-
function(results,
outcome,
sol=1,
...)
{
dots <- list(...)
if(length(dots) != 0){
if ("neg.out" %in% names(dots)){print("Argument neg.out is deprecated. The negated outcome is identified automatically from the minimize solution.")}
if ("use.tilde" %in% names(dots)){print("Argument use.tilde is deprecated. The usage of the tilde is identified automatically from the minimize solution.")}
}
if(length(grep("~",outcome)) > 0){
outcome<-outcome[grep("~",outcome)]
outcome<-gsub('\\~', '', outcome)
outcome<-unlist(outcome)}
outcome <- toupper(outcome)
X <- pimdata(results=results, outcome=outcome, sol=sol)
y <- X[,"out", drop=FALSE]
names(y) <- outcome
CS <- results$tt$recoded.data
CS <- CS[, -which(colnames(CS)==outcome)]
TS <- CS
TS[TS<0.50]<-1-TS[TS<0.50]
CS[CS<0.50]<-0
CS[CS>0.50]<-1
CS["TT_row_membership"]<-do.call(pmin,TS)
CS["TT_row_membership"] <- round(CS["TT_row_membership"], digits = 3)
aux <-
function(i)
{
fil <- (X[,i] < 0.5) & (y < 0.5)
Z <- data.frame(x=X[fil, i],
y=y[fil],
s=rep(FALSE, sum(fil)),
term=rep(colnames(X)[i], sum(fil)),
Case=rownames(X)[fil])
s <- 1 - (Z$x-Z$y)/Z$x
suppressWarnings(Z$s[s==min(s)] <- TRUE)
Z$Sd <- s
colnames(Z)[1:3] <- c('term_membership', outcome, 'most_deviant')
return(Z[, c(5, 4, 1, 2, 6, 3)])
}
R <- do.call(rbind, lapply(1:(ncol(X)-1), aux))
R <- R[R$term=='solution_formula', c('Case', 'term_membership', outcome)]
names(R)[2] <- 'Solution_membership'
R[,c(2:3)] <- round(R[,c(2:3)], digits = 3)
Z <- merge(x=R, y=CS, by.x='Case', by.y='row.names')
names(Z)[4:(ncol(Z)-1)] <- paste('TT_', names(Z)[4:(ncol(Z)-1)], sep='')
O <-subset(Z,select=3)
Z <-Z[,-c(3)]
Z$Outcome <- O
sortnames<-names(Z)[3:(ncol(Z)-2)]
Z <- Z[do.call("order", Z[sortnames]), ]
names(Z$Outcome)<- "Outcome"
M <- list()
M[[1]] <- list(title="Individually Irrelevant Cases", results=Z)
class(M) <- 'matchessuf'
return(M)
}
cases.suf.typ.fct <-
function(results,
outcome,
term=1,
sol=1,
max_pairs=5,
...)
{
dots <- list(...)
if(length(dots) != 0){
if ("neg.out" %in% names(dots)){print("Argument neg.out is deprecated. The negated outcome is identified automatically from the minimize solution.")}
if ("use.tilde" %in% names(dots)){print("Argument use.tilde is deprecated. The usage of the tilde is identified automatically from the minimize solution.")}
}
if(length(grep("~",outcome)) > 0){
outcome<-outcome[grep("~",outcome)]
outcome<-gsub('\\~', '', outcome)
outcome<-unlist(outcome)}
outcome <- toupper(outcome)
PD <- pimdata(results=results, outcome=outcome, sol=sol)
if (term>(ncol(PD)-2)){stop("The term selected does not exist for the chosen model of the solution. Check the solution again and pick another term or change the model using the argument sol.")}
nterm <- colnames(PD[term])
DT <- results$tt$initial.data
DT1 <- data.frame(matrix(NA,ncol=0,nrow=nrow(DT)))
row.names(DT1)<-row.names(DT)
tl <- gsub('\\s', '', nterm)
tl <- strsplit(tl, '\\*')
tn <- unique(unlist(tl))
t_neg<-character(0)
t_pre<-character(0)
if(length(grep("~",tn)) > 0){
t_neg<-tn[grep("~",tn)]
t_neg<-gsub('\\~', '', t_neg)
t_neg<-unlist(t_neg)
t_pre<-tn[!tn %in% tn[grep("~",tn)]]
}
else {t_pre<- toupper(tn)}
if (length(t_pre) > 0) {
DT1[t_pre] <- DT[t_pre]
colnames(DT1[t_pre])<-toupper(colnames(DT1[t_pre]))
}
if (length(t_neg) > 0) {
DT1[t_neg] <- 1 - DT[t_neg]
colnames(DT1[t_neg])<-tolower(colnames(DT1[t_neg]))
}
Y <- PD[,"out", drop=FALSE]
names(Y) <- outcome
if (length(tn)==1) {
fct <- paste("Typical Cases - Focal Conjunct", tn[1], sep = " ")
X <-DT1[toupper(tn[1])]
typical <-(X>0.5) & (Y>0.5) & (X<=Y)
ty <- rownames(DT1)[typical]
consfc <-(X<=Y)
cfc <- rownames(DT1)[consfc]
if (identical(ty, character(0))) {M[[i]] <-list(title=fct, results="no typical cases")}
else {
Z <- data.frame(
x <- X[ty,toupper(tn[1])],
y <- Y[ty,outcome],
s=rep(FALSE))
row.names(Z) <- ty
s <- (2*abs(Z$y-Z$x) + (1-Z$x))
suppressWarnings(Z$s[s==min(s)] <- TRUE)
Z$St <- s
colnames(Z)[1:3] <- c('Suff.Term/Focal Conjunct', outcome, 'MostTyp')
Z<-Z[, c( 1, 2, 4, 3)]
Z <- Z[order(Z$St),]
PDU <- as.data.frame(PD[ty,-c(ncol(PD), ncol(PD)-1, term)], row.names = ty)
Z$UniqCov <- TRUE
if (ncol(PDU)>1) {
PDU <- apply(PDU, 1, function(x) sum(x>0.5))
for (j in ty) {
if (PDU[j]==0) {Z[j,"UniqCov"] <- TRUE}
else {Z[j,"UniqCov"] <- FALSE}}}
else {
if (ncol(PDU)==1) {
for (j in ty) {
if (PDU[j,]<=0.5) {Z[j,"UniqCov"] <- TRUE}
else {Z[j,"UniqCov"] <- FALSE}}
}
}
names(Z)[names(Z)==outcome]<- "Outcome"
names(Z)[names(Z)=="St"]<- "Best"
Z$Rank <- "-"
Z$ConsFC <- FALSE
for (h in 1:nrow(Z)){
if (rownames(Z)[h] %in% cfc){Z$ConsFC[h] <- TRUE}
}
Z <- Z[order(1-Z$ConsFC,1-Z$UniqCov, Z$Best),]
Z <- Z[1:(min(c(nrow(Z), max_pairs))), ]
Z <- Z[, c(1, 2, 5, 3, 4, 6, 7)]
M <- list()
M[[1]] <- list(title=fct, results=Z)
}
}
else {
M <- list()
for (i in (1:length(tn)))
{
fct <- paste("Typical Cases - Focal Conjunct", tn[i], sep = " ")
if(length(grep("~",tn)) > 0){tnn<-unlist(gsub('\\~', '', tn))}
else{tnn<-tn}
X <- DT1[toupper(tnn[i])]
cct<- tnn[-grep(tnn[i], tnn)]
cct<- toupper(cct)
CCDT<-DT1[cct]
if(ncol(CCDT)>1){
a<-do.call(pmin, CCDT[,])
CCDT1<-data.frame(a)
row.names(CCDT1)<-row.names(CCDT)}
else{
CCDT1<-CCDT
names(CCDT1)[1]<-"a"}
CCDT$termm<-pmin(CCDT1$a,X[,])
typical <-(CCDT$termm>0.5) & (Y>0.5) & (CCDT$termm<=Y)
typ1 <- (X < CCDT1$a)
typ2 <- (X >= CCDT1$a)
ty <- rownames(DT1)[typical]
ty1 <- rownames(DT1)[typical & typ1]
ty2 <- rownames(DT1)[typical & typ2]
consfc <-(X<=Y)
cfc <- rownames(DT1)[consfc]
if (identical(ty, character(0))) {M[[i]] <-list(title=fct, results="no typical cases")}
else {
Z <- data.frame(
"x" = X[ty,toupper(tnn[i])],
"y" = Y[ty,outcome],
"cctm" = CCDT1[ty,"a"],
"termm" = CCDT[ty,"termm"],
"s" = rep(FALSE))
row.names(Z) <- ty
s <- (2*abs(Z$y-Z$x) + (1-Z$termm))
suppressWarnings(Z$s[s==min(s)] <- TRUE)
Z$St <- s
colnames(Z) <- c('FocalConj', outcome, 'CompConj','Term', 'MostTypFC','Best')
Z<-Z[, c( 1, 3, 4, 2, 6, 5)]
Z$Rank <- NA
Z[ty1,7] <- 1
Z[ty2,7] <- 2
Z <- Z[order(Z$Rank, Z$Best),]
PDU <- PD[ty,-c(ncol(PD), ncol(PD)-1, term), drop = FALSE]
PDU <- apply(PDU, 1, function(x) sum(x>0.5))
Z$UniqCov <- TRUE
for (j in ty) {
if (PDU[j]==0) {Z[j,"UniqCov"] <- TRUE}
else {Z[j,"UniqCov"] <- FALSE}}
Z<-Z[, c( 1, 4, 2, 3, 5, 6, 8, 7)]
Z$ConsFC <- FALSE
for (h in 1:nrow(Z)){
if (rownames(Z)[h] %in% cfc){Z$ConsFC[h] <- TRUE}
}
Z <- Z[order(Z$Rank, 1-Z$ConsFC, 1-Z$UniqCov, Z$Best),]
Z[,c(1:5,8)] <- round(Z[,c(1:5,8)], digits=3)
names(Z)[names(Z)==outcome]<- "Outcome"
Z$MostTypTerm <- FALSE
mtt <- cases.suf.typ.most(results = results, outcome = outcome, sol = sol)
mtt <- mtt[[1]]$results
mttc <- mtt[mtt$term==colnames(PD)[term],"case"]
for (h in 1:nrow(Z)){
if (rownames(Z)[h] %in% mttc){Z$MostTypTerm[h] <- TRUE}
}
Z <- Z[1:(min(c(nrow(Z), max_pairs))), ]
Z <- Z[, c(1, 2, 3, 4, 7, 5, 6, 8, 9, 10)]
M[[i]] <- list(title=fct, results=Z)
}
}
}
class(M) <- 'matchessuf'
return(M)}
cases.suf.typ.most <-
function(results,
outcome,
sol=1,
...)
{
dots <- list(...)
if(length(dots) != 0){
if ("neg.out" %in% names(dots)){print("Argument neg.out is deprecated. The negated outcome is identified automatically from the minimize solution.")}
if ("use.tilde" %in% names(dots)){print("Argument use.tilde is deprecated. The usage of the tilde is identified automatically from the minimize solution.")}
}
R <- cases.suf.typ(results, outcome, sol)
R <- R[[1]]$results
M <- list()
M[[1]] <- list(title="Most Typical Cases", results=R[R$MostTyp, ])
class(M) <- 'matchessuf'
return(M)
}
cases.suf.typ <-
function(results,
outcome,
sol=1,
...)
{
dots <- list(...)
if(length(dots) != 0){
if ("neg.out" %in% names(dots)){print("Argument neg.out is deprecated. The negated outcome is identified automatically from the minimize solution.")}
if ("use.tilde" %in% names(dots)){print("Argument use.tilde is deprecated. The usage of the tilde is identified automatically from the minimize solution.")}
}
if(length(grep("~",outcome)) > 0){
outcome<-outcome[grep("~",outcome)]
outcome<-gsub('\\~', '', outcome)
outcome<-unlist(outcome)}
outcome <- toupper(outcome)
X <- pimdata(results=results, outcome=outcome, sol=sol)
y <- X[,"out", drop=FALSE]
names(y) <- outcome
aux <-
function(i)
{
fil <- (X[,i] > 0.5) & (y > 0.5) & (X[,i] <= y)
Z <- data.frame(x=X[fil, i],
y=y[fil],
s=rep(FALSE, sum(fil)),
term=rep(colnames(X)[i], sum(fil)),
case=rownames(X)[fil])
s <- (2*(Z$y-Z$x) + (1-Z$x))
suppressWarnings(Z$s[s==min(s)] <- TRUE)
Z$St <- s
colnames(Z)[1:3] <- c('term_membership', outcome, 'MostTyp')
Z<-Z[, c(5, 4, 1, 2, 6, 3)]
return(Z[order(Z$St),])
}
R <- do.call(rbind, lapply(1:(ncol(X)-1), aux))
R <- R[R$term!='solution_formula', ]
cases <- unique(R$case)
su <- vapply(cases, function(i) sum(R[R$case==i,3]>0.5), FUN.VALUE=numeric(1))
R$UniqCov <- R$case %in% cases[su==1]
R <- R[order(R$term,-R$UniqCov, R$St, R$term_membership),]
names(R)[names(R)==outcome]<- "Outcome"
names(R)[names(R)=="St"]<-"Best"
R <- R[, c(1, 2, 3, 4, 7, 5, 6)]
M <- list()
M[[1]] <- list(title="Typical Cases", results=R)
class(M) <- 'matchessuf'
return(M)
}
cases.suf.typ.unique <-
function(results,
outcome,
sol=1,
...)
{
dots <- list(...)
if(length(dots) != 0){
if ("neg.out" %in% names(dots)){print("Argument neg.out is deprecated. The negated outcome is identified automatically from the minimize solution.")}
if ("use.tilde" %in% names(dots)){print("Argument use.tilde is deprecated. The usage of the tilde is identified automatically from the minimize solution.")}
}
R <- cases.suf.typ(results, outcome, sol)
R <- R[[1]]$results
M<-list()
M[[1]] <- list(title="Uniquely Covered Typical Cases", results=R[R$UniqCov, ])
class(M) <- 'matchessuf'
return(M)
}
|
knitr::opts_chunk$set(comment = "
library(donut)
set.seed(20092019)
x1 <- runif(100, 0, 2 * pi)
x2 <- runif(100, 0, 3)
DATA <- data.frame(x1, x2)
got_RANN <- requireNamespace("RANN", quietly = TRUE)
library(RANN)
ranges1 <- c(0, 2 * pi)
query1 <- rbind(c(6, 1.3), c(2 * pi, 3), c(3, 1.5), c(4, 0))
res1 <- nnt(DATA, query1, k = 8, torus = 1, ranges = ranges1)
plot(res1, ylim = c(0, 3))
res1$nn.dists
res1$nn.idx
ranges <- rbind(c(0, 2 * pi), c(0, 3))
query <- rbind(c(6, 1.3), c(2 * pi, 3), c(3, 1.5), c(4, 0))
res2 <- nnt(DATA, query, k = 8, torus = 1:2, ranges = ranges)
plot(res2)
got_nabor <- requireNamespace("nabor", quietly = TRUE)
library(nabor)
ranges <- rbind(c(0, 2 * pi), c(0, 3))
query <- rbind(c(6, 1.3), c(2 * pi, 3), c(3, 1.5), c(4, 0))
res2 <- nnt(DATA, query, k = 8, fn = nabor::knn, torus = 1:2, ranges = ranges)
plot(res2)
|
rm_sdt_calc_probs_gpcm_rcpp <- function(a.item, tau.item, Qmatrix, theta.k, VV, K, TP,
eps=0, use_log=FALSE, as_vector=FALSE)
{
K1 <- K+1
prob_dim <- c(VV, K1, TP)
res <- sirt_rcpp_rm_sdt_calc_probs_gpcm( a=a.item, tau=tau.item,
theta_k=theta.k, VV=VV, K1=K1, TP=TP, eps=eps, use_log=use_log )
if ( ! as_vector ){
res <- array(res, dim=prob_dim)
}
return(res)
}
|
mf_get_leg_pos <- function(x, n = 1) {
if (is.null(grDevices::dev.list())) {
bb <- sf::st_bbox(x)
} else {
p <- par("usr")
bb <- sf::st_bbox(c(
xmin = p[1], ymin = p[3],
xmax = p[2], ymax = p[4]
),
crs = sf::st_crs(x)
)
}
g <- sf::st_make_grid(x = sf::st_as_sfc(bb), n = c(3, 3), crs = sf::st_crs(x))
y <- sf::st_union(sf::st_convex_hull(sf::st_geometry(x)))
z <- sf::st_intersects(g, y)
ind <- which(unlist(lapply(z, length)) == 0)
if (n == 1) {
if (length(ind) > 0) {
pos <- max(ind)
} else {
pos <- which.min(sf::st_area(sf::st_intersection(g, y)))
}
}
if (n == 2) {
pos <- c(NA, NA)
if (length(ind) > 1) {
pos <- sort(ind, decreasing = TRUE)[1:2]
} else {
if (length(ind) == 1) {
ii <- rep(NA, 9)
names(ii) <- 1:9
ii[ind] <- 0
couv <- sf::st_area(sf::st_intersection(g, y))
ii[is.na(ii)] <- couv
pos <- as.numeric(names(sort(ii)[1:2]))
} else {
couv <- sf::st_area(sf::st_intersection(g, y))
names(couv) <- 1:9
pos <- as.numeric(names(sort(couv)[1:2]))
}
}
}
tpos <- c(
"bottomleft1", "bottom", "bottomright1",
"left", "center", "right",
"topleft", "top", "topright"
)
return(tpos[pos])
}
|
ACP.calculator <- function(CTSR.VI, ACP.table, ACT.table = NULL, allow.negative = FALSE, allowneg.retest = FALSE) {
if (class(CTSR.VI) != "ts") {
stop("CTSR.VI Not a time series object")}
if (length(CTSR.VI) != dim(ACP.table)[2]) {
stop("ACP.table size does not match CTSR.VI")}
if ((!is.null(ACT.table)) && (dim(ACT.table)[2] != dim(ACP.table)[2])) {
stop("ACT.table size does not match CTSR.VI")
}
yst <- start(CTSR.VI)[1]
mst <- start(CTSR.VI)[2]
linreg <- function(CTSR.VI, ACP.N, ACT.N = NULL, simple = TRUE) {
if (sd(ACP.table[n, ]) == 0) {
if (is.null(ACT.N)) {
return(c(0, -1))}else{return(0)
}
}else{
if (is.null(ACT.N)) {
fit <- lm(CTSR.VI ~ ACP.N)
R.Rval <- summary(fit)$r.square
R.slpe <- as.numeric(coef(fit)[2])
if (simple) {
return(c(R.Rval, R.slpe))
}
R.pval <- glance(fit)$p.value
R.intr <- as.numeric(coef(fit)[1])
R.Tcoef <- NaN
t.sig <- TRUE
}else{
fit <- lm(CTSR.VI ~ ACP.N + ACT.N)
R.Rval <- summary(fit)$r.square
R.slpe <- as.numeric(coef(fit)[2])
if (simple) {
return(c(R.Rval, R.slpe))
}
R.pval <- glance(fit)$p.value
R.intr <- as.numeric(coef(fit)[1])
R.slpe <- as.numeric(coef(fit)[2])
R.Tcoef <- as.numeric(coef(fit)[3])
t.sig <- (coef(summary(fit))["ACT.N","Pr(>|t|)"] < 0.05)
}
R.BH <- NaN
R.SC <- NaN
R.SCT <- NaN
return(structure(list(lm.sum = c(R.slpe, R.Tcoef, R.intr, R.pval, R.Rval, R.BH, R.SC, R.SCT), temp.sig = t.sig)))
}}
while (TRUE) {
len <- dim(ACP.table)[2]
if (is.null(ACT.table)) {
lines <- dim(ACP.table)[1]
}else{
lines <- dim(ACP.table)[1]*dim(ACT.table)[1]
}
m <- matrix(nrow = (lines), ncol = 2)
colnames(m) <- c("R^2.Value", "slope")
if (is.null(ACT.table)) {
rownames(m) <- rownames(ACP.table)
}else{
rn.names <- NULL
for (rnm in rownames(ACP.table)) {
rnms <- cbind(rnm, rownames(ACT.table))
rmx <- paste(rnms[,1] , rnms[,2], sep = ":")
rn.names <- c(rn.names, rmx)}
rownames(m) <- rn.names
}
for (n in 1:dim(ACP.table)[1]) {
if (is.null(ACT.table)) {
m[n, ] <- linreg(CTSR.VI, ACP.table[n, ])
}else{
for (nx in 1:dim(ACT.table)[1]) {
rn.loop <- paste(rownames(ACP.table)[n], rownames(ACT.table)[nx], sep = ":")
m[rn.loop, ] <- linreg(CTSR.VI, ACP.table[n, ], ACT.table[nx,])
}}
}
if (allow.negative) {
mx <- m
}else{
mx <- matrix(m[m[, "slope"] > 0,], ncol = 2)
colnames(mx) <- c("R^2.Value", "slope")
rownames(mx) <- rownames(m[m[, "slope"] > 0,])
}
if (dim(mx)[1] <= 2 || allow.negative) {
max.line <- which.max(m[, "R^2.Value"])
fulname <- rownames(m)[max.line]
if (is.null(ACT.table)) {
sum.res <- linreg(CTSR.VI, ACP.table[fulname, ], simple = FALSE)
suma <- sum.res$lm.sum
precip.nm <- fulname
CTSR.ATM <- NULL
t.osp <- NaN
t.acp <- NaN
} else {
part.nm <- strsplit(fulname, "\\:")[[1]]
precip.nm <- part.nm[1]
sum.res <- linreg(CTSR.VI, ACP.table[precip.nm, ], ACT.table[part.nm[2],], simple = FALSE)
suma <- sum.res$lm.sum
CTSR.ATM <- ts(ACT.table[part.nm[2], ], start = c(yst, mst), frequency = 12)
Tnmsplit <- strsplit(part.nm[2], "\\-")[[1]]
t.osp <- as.numeric(Tnmsplit[1])
t.acp <- as.numeric(Tnmsplit[2])
}
if (!sum.res$temp.sig) {
ACT.table = NULL
allow.negative = allowneg.retest
}else{
CTSR.ARF <- ts(ACP.table[precip.nm, ], start = c(yst, mst), frequency = 12)
nmsplit <- strsplit(precip.nm, "\\-")[[1]]
osp <- as.numeric(nmsplit[1])
acp <- as.numeric(nmsplit[2])
return(structure(list(
summary = suma, CTSR.precip = CTSR.ARF, CTSR.tmp = CTSR.ATM,CTSR.osp = osp,
CTSR.acp = acp, CTSR.tosp = t.osp, CTSR.tacp = t.acp))
)
}
}else{
max.line <- which.max(mx[, "R^2.Value"])
fulname <- rownames(mx)[max.line]
if (is.null(ACT.table)) {
rfx <- ACP.table[m[, "slope"] > 0,]
sum.res <- linreg(CTSR.VI, ACP.table[fulname, ], simple = FALSE)
suma <- sum.res$lm.sum
CTSR.ARF <- ts(rfx[max.line, ], start = c(yst, mst), frequency = 12)
namestr <- rownames(rfx)[max.line]
nmsplit <- strsplit(namestr, "\\-")[[1]]
osp <- as.numeric(nmsplit[1])
acp <- as.numeric(nmsplit[2])
precip.nm <- fulname
CTSR.ATM <- NULL
t.osp <- NaN
t.acp <- NaN
} else {
part.nm <- strsplit(fulname, "\\:")[[1]]
precip.nm <- part.nm[1]
sum.res <- linreg(CTSR.VI, ACP.table[precip.nm, ], ACT.table[part.nm[2],], simple = FALSE)
suma <- sum.res$lm.sum
nmsplit <- strsplit(precip.nm, "\\-")[[1]]
osp <- as.numeric(nmsplit[1])
acp <- as.numeric(nmsplit[2])
CTSR.ATM <- ts(ACT.table[part.nm[2], ], start = c(yst, mst), frequency = 12)
CTSR.ARF <- ts(ACP.table[precip.nm, ], start = c(yst, mst), frequency = 12)
Tnmsplit <- strsplit(part.nm[2], "\\-")[[1]]
t.osp <- as.numeric(Tnmsplit[1])
t.acp <- as.numeric(Tnmsplit[2])
}
if (!sum.res$temp.sig) {
ACT.table = NULL
allow.negative = allowneg.retest
}else {
return(structure(list(
summary = suma, CTSR.precip = CTSR.ARF, CTSR.tmp = CTSR.ATM,CTSR.osp = osp,
CTSR.acp = acp, CTSR.tosp = t.osp, CTSR.tacp = t.acp))
)
}
}
}
browser()
}
|
eject_cassette <- function(cassette = NULL, options = list(),
skip_no_unused_interactions_assertion = NULL) {
on.exit(webmockr::webmockr_disable_net_connect(), add=TRUE)
if (is.null(cassette)) {
cas <- current_cassette()
if (length(cas) == 0) stp("no cassette in use currently")
} else {
if (!cassette_exists(cassette)) stp("cassette '", cassette, "' not found")
cas <- cassettes(FALSE)[[cassette]]
if (is.null(cas)) stp("cassette '", cassette, "' not found")
}
cas$eject()
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.