code
stringlengths
1
13.8M
buildClusters <- function(out, current, merge_list, labels, full_labels, thoughts, topic_proportions){ for(i in seq(length(current))){ out[[i]] <- list() out[[i]]$name <- current[i] if(current[i] > 0){ out[[i]]$children <- buildClusters(list(), merge_list[paste(current[i])][[1]], merge_list, labels=labels, full_labels, thoughts, topic_proportions) out[[i]]$name <- current[i] } else { out[[i]]$size <- 1800 out[[i]]$name <- labels[-current[i]] out[[i]]$topic_no <- -current[i] out[[i]]$thought_1 <- iconv(thoughts$docs[-current[i]]$Topic[1], to='utf-8', sub="") out[[i]]$thought_2 <- iconv(thoughts$docs[-current[i]]$Topic[2], to='utf-8', sub="") out[[i]]$prob <- paste(full_labels$prob[-current[i],], collapse = ", ") out[[i]]$frex <- paste(full_labels$frex[-current[i],], collapse = ", ") out[[i]]$lift <- paste(full_labels$lift[-current[i],], collapse = ", ") out[[i]]$score <- paste(full_labels$score[-current[i],], collapse = ", ") out[[i]]$proportion <- format(round(topic_proportions[-current[i]], 2)) } } return(out) }
permute_importance <- function(ppf) { sd <- NULL imp <- NULL imp2 <- NULL sd.imp <- NULL sd.imp2 <- NULL train <- as.matrix(ppf$train[, -which(colnames(ppf$train) == ppf$class.var)]) classes <- as.integer(unlist(ppf$train[, which(colnames(ppf$train) == ppf$class.var)])) oobid <- apply(ppf$oob.obs, 1, function(x) which(x == 1) - 1) permute <- oobid %>% lapply(function(x) sample(x, length(x))) trees <- ppf[[8]] noob <- as.integer(lapply(oobid, length)) TRstrL <- trees %>% lapply(function(x) as.matrix(x[[1]])) TRsplL <- trees %>% lapply(function(x) as.matrix(x[[3]])) TRprnodeL <- trees %>% lapply(function(x) as.matrix(x[[2]])) corr.oob.per <- imposoon(train, classes, oobid, permute, trees, noob, TRstrL, TRsplL, TRprnodeL) rank.var <- t(apply(corr.oob.per, 1, rank, ties.method = "random")) corr.oob <- (1 - ppf$oob.error.tree) * unlist(lapply(oobid, length)) n.oob <- unlist(lapply(permute, length)) imp.pl <- data.frame(nm = colnames(ppf$train)[colnames(ppf$train) != ppf$class.var], imp = apply(corr.oob.per, 2, function(x) mean((corr.oob - x))), sd.imp = apply(corr.oob.per, 2, function(x) sd(corr.oob - x)), imp2 = apply(corr.oob.per, 2, function(x) mean(((1 - x/n.oob) - ppf$oob.error.tree))), sd.imp2 = apply(corr.oob.per, 2, function(x) sd(((1 - x/n.oob) - ppf$oob.error.tree)))) %>% dplyr::mutate(imp2.std = imp2/sd.imp2, imp.std = imp/sd.imp) %>% dplyr::arrange(imp) imp.pl$nm <- factor(imp.pl$nm, levels = imp.pl[order(imp.pl$imp2), "nm"]) imp.pl }
if(FALSE){ NBDevBartlett <- function(counts, design, log.offset, nb.disp, print.progress = TRUE, bias.fold.tolerance=1.10) { n <- ncol(counts) if (is.null(log.offset)) log.offset <- rep(0, ncol(counts)) est.offset <- exp(log.offset) deviance.vector <- rep(NA, nrow(counts)) means <- matrix(NA, nrow(counts), ncol(counts)) parms <- matrix(NA, nrow(counts), ncol(design)) design.df=as.data.frame(design) glm.ctrl=glm.control(epsilon = 1e-08, maxit = 1500L, trace = FALSE) fbrNBglm.ctrl=fbrNBglm.control(coefOnly=TRUE, infoParms=list(j=1,k=1,m=1), maxit=1500L, tol=1e-8, standardizeX=TRUE) nbLogFamily=negbin("log", 1) logFcCorrection=abs(log(bias.fold.tolerance)) log10=log(10) for (gn in 1:nrow(counts)) { if (gn %in% c(2, 10, 100, 500, 1000, 2500, 5000 * (1:200)) & print.progress) print(paste("Analyzing Gene glm.fitted <- glmsolve(formula = counts[gn, ] ~ . - 1 + offset(log.offset), family = update.fbrNBfamily(nbLogFamily, overdisp=nb.disp[gn]), data = design.df, control = glm.ctrl, x=TRUE) parms[gn, ] <- glm.fitted$coefficients deviance.vector[gn] <- glm.fitted$deviance tmp.bias = coef(glm.fitted, type='bias') tmp.nonNA=which(!is.na(tmp.bias) & !is.na(glm.fitted$coefficients)) this.x=model.matrix(glm.fitted) if(any(abs(this.x[,tmp.nonNA,drop=FALSE]%*%tmp.bias[tmp.nonNA]) > logFcCorrection)) { fbrNBglm.ctrl$start = glm.fitted$coefficients - pmax.int(-log10, pmin.int(log10, tmp.bias)) wt.idx=which(glm.fitted$weights>0) fbrglm.fit <- fbrNBglm.fit(x=this.x[wt.idx,,drop=FALSE], y=counts[gn, wt.idx],offset=log.offset[wt.idx], family=nbLogFamily, control = fbrNBglm.ctrl) parms[gn, ] <- fbrglm.fit tmp.nonNA = which(!is.na(fbrglm.fit)) } means[gn, ] <- as.vector(exp(design[,tmp.nonNA,drop=FALSE] %*% parms[gn, tmp.nonNA]) * est.offset) } glm0.fitted= glm1.fitted= bFact0 = bartlettFactor(glm0.fitted) bFact1 = bartlettFactor(glm1.fitted) df0= df1= bartcorr.nelder=(bFact1$nelder-bFact0$nelder)/(df1-df0) bartcorr.cord=(bFact1$cordiero-bFact0$cordiero)/(df1-df0) LRT=glm0.fitted$deviance-glm1.fitted$deviance LRT.nelder=LRT/bartcorr.nelder LRT.cord=LRT/bartcorr.cord return(list(dev = deviance.vector, means = means, parms = parms)) } }
"pdfcau" <- function(x,para) { if(! are.parcau.valid(para)) return() names(para$para) <- NULL return(dcauchy(x, location=para$para[1], scale=para$para[2])) }
NULL buildGraph <- function(x, k=10, d=50, transposed=FALSE, get.distance=FALSE, reduced.dim="PCA", BNPARAM=KmknnParam(), BSPARAM=bsparam(), BPPARAM=SerialParam()){ if(is(x, "Milo")){ if(length(reducedDimNames(x)) == 0){ x_pca <- prcomp_irlba(t(logcounts(x)), n=min(d+1, ncol(x)-1), scale.=TRUE, center=TRUE) reducedDim(x, "PCA") <- x_pca$x attr(reducedDim(x, "PCA"), "rotation") <- x_pca$rotation reduced.dim <- "PCA" } else if(!any(reducedDimNames(x) %in% c(reduced.dim))){ message("Computing PCA - name not in slot") x_pca <- prcomp_irlba(t(logcounts(x)), n=min(d+1, ncol(x)-1), scale.=TRUE, center=TRUE) reducedDim(x, "PCA") <- x_pca$x attr(reducedDim(x, "PCA"), "rotation") <- x_pca$rotation reduced.dim <- "PCA" } } else if(is.matrix(x) & isTRUE(transposed)){ SCE <- SingleCellExperiment(assays=list(counts=Matrix(0L, nrow=1, ncol=nrow(x))), reducedDims=SimpleList("PCA"=x)) x <- Milo(SCE) } else if(is.matrix(x) & isFALSE(transposed)){ SCE <- SingleCellExperiment(assays=list(logcounts=x)) x_pca <- prcomp_irlba(t(logcounts(SCE)), n=min(d+1, ncol(x)-1), scale.=TRUE, center=TRUE) reducedDim(SCE, "PCA") <- x_pca$x x <- Milo(SCE) attr(reducedDim(x, "PCA"), "rotation") <- x_pca$rotation } else if (is(x, "SingleCellExperiment")){ if(is.null(reducedDim(x))){ x_pca <- prcomp_irlba(t(logcounts(x)), n=min(d+1, ncol(x)-1), scale.=TRUE, center=TRUE) reducedDim(x, "PCA") <- x_pca$x attr(reducedDim(x, "PCA"), "rotation") <- x_pca$rotation reduced.dim <- "PCA" } x <- Milo(x) } .buildGraph(x, k=k, d=d, get.distance=get.distance, reduced.dim=reduced.dim, BNPARAM=BNPARAM, BSPARAM=BSPARAM, BPPARAM=BPPARAM) } .buildGraph <- function(x, k=10, d=50, get.distance=FALSE, reduced.dim="PCA", BNPARAM=KmknnParam(), BSPARAM=bsparam(), BPPARAM=SerialParam()){ nn.out <- .setup_knn_data(x=reducedDim(x, reduced.dim), d=d, k=k, BNPARAM=BNPARAM, BSPARAM=BSPARAM, BPPARAM=BPPARAM) message("Constructing kNN graph with k:", k) zee.graph <- .neighborsToKNNGraph(nn.out$index, directed=FALSE) graph(x) <- zee.graph if(isTRUE(get.distance)){ message("Retrieving distances from ", k, " nearest neighbours") old.dist <- matrix(0L, ncol=ncol(x), nrow=ncol(x)) n.idx <- ncol(x) for(i in seq_len(n.idx)){ i.knn <- nn.out$index[i, ] i.dists <- nn.out$distance[i, ] old.dist[i, i.knn] <- i.dists old.dist[i.knn, i] <- i.dists } old.dist <- as(old.dist, "dgCMatrix") nhoodDistances(x) <- old.dist } [email protected] <- k x } .setup_knn_data <- function(x, k, d=50, get.distance=FALSE, BNPARAM, BSPARAM, BPPARAM) { findKNN(x[, seq_len(d)], k=k, BNPARAM=BNPARAM, BPPARAM=BPPARAM, get.distance=get.distance) }
ch2matrix <- function(ch) { if(is.data.frame(ch)) ch <- as.matrix(ch) if(is.matrix(ch) && is.numeric(ch) && ncol(ch) > 1) return(ch) if(is.factor(ch)) ch <- as.character(ch) if(is.matrix(ch) && ncol(ch) > 1) stop("The format of 'ch' was not recognised.", call. = FALSE) ch0 <- as.numeric(ch) if(is.character(ch)) { len <- nchar(ch) if(sum(len - len[1]) !=0) stop("Capture histories must all have the same number of occasions.") ncol <- len[1] } else { ncol <- round(log10(max(ch0))) + 1 } out <- matrix(NA, length(ch0), ncol) colnames(out) <- paste0("Y", 1:ncol) for(i in 1:ncol) { out[,i] <- ch0 %/% 10^(ncol-i) ch0 <- ch0 %% 10^(ncol-i) } return(out) }
hcc_parallel<- function(Data, W, K, maxLevel, ncores = 2, DIR_output = tempfile(), hybrid = FALSE, verbose = FALSE,...){ bf_cl_assign = file.path(DIR_output, 'Cluster_assign_out') if(file.exists(paste0(bf_cl_assign, '.bk'))){ file.remove(paste0(bf_cl_assign, '.bk')) } Output =bigstatsr::FBM(nrow =ncol(Data), ncol = maxLevel+1, backingfile = bf_cl_assign)$save() Output[,1] = 1 K_global = K bf_C_out = file.path(DIR_output, 'Centroids_out') if(file.exists(paste0(bf_C_out, '.bk'))){ file.remove(paste0(bf_C_out, '.bk')) } if(hybrid){ size_C_out = prod(sapply(1:(maxLevel+1), function(i) max(floor(K_global/i),2))) }else{ size_C_out = K^maxLevel } C_out = bigstatsr::FBM(nrow = nrow(Data), ncol = size_C_out, backingfile = bf_C_out)$save() if(verbose){message('level 1')} SK <- Sketch(Data = Data, ind.col = seq(ncol(Data)), W = W, ncores = ncores, parallel = TRUE) rowmaxs<- bigstatsr::big_apply(Data, a.FUN = function(X, ind){ apply(X[, ind],1, max) }, a.combine = 'cbind', ind = seq(ncol(Data))) rowmins<-bigstatsr::big_apply(Data, a.FUN =function(X, ind){ apply(X[, ind],1, min)}, a.combine = 'cbind', ind = seq(ncol(Data))) rowmaxs = apply(rowmaxs,1, max) rowmins = apply(rowmins,1, min) CKM_out <- COMPR(Data = Data, ind.col = 1:ncol(Data), K = K, Frequencies = W, lower_b = rowmins, upper_b = rowmaxs, SK_Data = SK, HardThreshold = TRUE, ...) non_null_weights <-which(CKM_out$weight > 0) Centroids <-as.matrix(CKM_out$C[,non_null_weights], ncol = length(non_null_weights)) if(length(non_null_weights)>1){ Centroids_norm = apply(Centroids, 2, function(x) x/sqrt(sum(x*x))) }else{ Centroids_norm = as.matrix(Centroids/sqrt(sum(Centroids*Centroids)), ncol = 1) } K_c_norm = bigstatsr::big_apply(Data, a.FUN = function(X, ind, Y){ return(abs(Y%*%X[,ind])) } , a.combine = 'cbind', ind = 1:ncol(Data), Y = t(Centroids_norm)) Neighbours = apply(K_c_norm, 2, function(x) which.max(x)) Output[,2] = Neighbours if(maxLevel !=1){ index = 1:ncol(Data) cluster_size = table(Neighbours) small_cluster = as.numeric(names(cluster_size)[which(cluster_size <= K)]) for(s_cl in small_cluster){ small_cl_index = index[which(Neighbours == s_cl)] if(!hybrid){ Output[small_cl_index, maxLevel + 1] = K^(maxLevel - 1)* (Output[small_cl_index,2]-1) +1 }else{ coeff = prod(sapply(3:(maxLevel+1), function(i) max(floor(K_global/i),2))) Output[small_cl_index, maxLevel + 1] = coeff* (Output[small_cl_index,2]-1) +1 } c_nbr_in_C_out = unique(Output[small_cl_index, maxLevel+1]) C_out[, c_nbr_in_C_out] = Centroids[,s_cl] } }else{ c_nbr_in_C_out = sort(unique(Output[, level+1])) c_nbr = sort(unique(Neighbours)) C_out[, c_nbr_in_C_out] = Centroids[,c_nbr] } switch (Sys.info()['sysname'], 'Linux' = { cluster_Type = 'FORK' }, 'Windows' = { cluster_Type = 'PSOCK' }, "Darwin" = { cluster_Type = 'FORK' }, "SunOS" = { cluster_Type = 'PSOCK' }, stop(paste("Package is not compatible with", Sys.info()['sysname'])) ) core <- parallel::makeCluster(ncores, type = cluster_Type) doParallel::registerDoParallel(core) doRNG::registerDoRNG() on.exit(parallel::stopCluster(core)) for(level in 2:maxLevel){ if(hybrid){ K = max(floor(K_global/level), 2) } clusters_level = sort(unique(Output[,level])) if(verbose){message('level ',level)} cl <- NULL tmp <- foreach::foreach(cl = clusters_level) %dopar% { index = which(Output[,level]==cl) if(length(index)> K){ SK <- Sketch(Data = Data, ind.col = index, W = W) rowmaxs<- bigstatsr::big_apply(Data, a.FUN = function(X, ind){ apply(X[, ind],1, max) }, a.combine = 'cbind', ind = index) rowmins<- bigstatsr::big_apply(Data, a.FUN =function(X, ind){ apply(X[, ind],1, min)}, a.combine = 'cbind', ind = index) rowmaxs = apply(rowmaxs,1, max) rowmins = apply(rowmins,1, min) CKM_out <- COMPR(Data = Data, ind.col = index, K = K, Frequencies = W, lower_b = rowmins, upper_b = rowmaxs, SK_Data = SK, HardThreshold = TRUE, ...) non_null_weights <-which(CKM_out$weight > 0) Centroids <-as.matrix(CKM_out$C[,non_null_weights], ncol = length(non_null_weights)) if(length(non_null_weights)>1){ Centroids_norm = apply(Centroids, 2, function(x) x/sqrt(sum(x*x))) }else{ Centroids_norm = as.matrix(Centroids/sqrt(sum(Centroids*Centroids)), ncol = 1) } K_c_norm = bigstatsr::big_apply(Data, a.FUN = function(X, ind, Y){ return(abs(Y%*%X[,ind])) } , a.combine = 'cbind', ind = index, Y = t(Centroids_norm)) Neighbours = apply(K_c_norm, 2, function(x) which.max(x)) Output[index,level+1] = Neighbours + K*(Output[index, level]-1) if(level < maxLevel){ cluster_size = table(Neighbours) small_cluster = as.numeric(names(cluster_size)[which(cluster_size <= K)]) for(s_cl in small_cluster){ small_cl_index = index[which(Neighbours == s_cl)] if(hybrid){ coeff = prod(sapply(min((level+1), maxLevel+1):(maxLevel+1), function(i) max(floor(K_global/i),2))) Output[small_cl_index, maxLevel + 1] = coeff* (Output[small_cl_index,level+1]-1) +1 }else{ Output[small_cl_index, maxLevel + 1] = K^(maxLevel - level)* (Output[small_cl_index,level+1]-1) +1 } c_nbr_in_C_out = unique(Output[small_cl_index, maxLevel+1]) C_out[, c_nbr_in_C_out] = Centroids[,s_cl] } }else{ c_nbr_in_C_out = sort(unique(Output[index, level+1])) c_nbr = sort(unique(Neighbours)) C_out[, c_nbr_in_C_out] = Centroids[,c_nbr] } }else{ Output[index,level+1] = 1 + K*(Output[index, level]-1) } NULL } } clusters = data.frame('row' = 1:ncol(Data), 'col' = Output[,maxLevel+1]) Cluster_partition = unstack(clusters) names(Cluster_partition) = 1:length(Cluster_partition) return(Cluster_partition) }
col2hex <- function(col, alpha = FALSE) { rgb <- col2rgb(col, alpha) if (alpha) { apply(rgb, 2, function(z) rgb(z[1], z[2], z[3], z[4], maxColorValue=255)) } else { apply(rgb, 2, function(z) rgb(z[1], z[2], z[3], maxColorValue=255)) } }
WhitenSVD <- function(x, tol=1e-06) { Nm <- nrow(x)-1 MEANS <- colMeans(x) x.c <- as.matrix(sweep(x,2,MEANS,"-")) SVD <- svd(x.c, nu = 0) SV <- SVD$d if (!is.null(tol)) { rank <- sum(SVD$d > (SVD$d[1L] * tol)) if (rank < ncol(x)) { SVD$v <- SVD$v[, 1L:rank, drop = FALSE] SVD$d <- SVD$d[1L:rank] } } SIGMAs <- SVD$d / sqrt(Nm) TRANS <- SVD$v %*% diag(1/SIGMAs) RES = x.c %*% TRANS attr(RES, "center") <- MEANS attr(RES, "transform") <- TRANS attr(RES, "backtransform") <- diag(SIGMAs) %*% t(SVD$v) attr(RES, "SingularValues") <- SV RES }
context("implementation [BTabilities]") citations.sf <- countsToBinomial(citations) names(citations.sf)[1:2] <- c("journal1", "journal2") citeModel <- BTm(cbind(win1, win2), journal1, journal2, data = citations.sf) citeModel2 <- update(citeModel, refcat = "JASA") test_that("BTabilities works with changing refcat", { abilities1 <- BTabilities(citeModel) abilities2 <- BTabilities(citeModel2) expect_equal(abilities2[, "ability"], abilities1[, "ability"] - abilities1["JASA", "ability"]) M <- diag(4) M[3, ] <- -1 M[, 3] <- 0 V <- cbind(0, rbind(0, vcov(citeModel))) expect_equal(unname(abilities2[, "s.e."]), sqrt(diag(t(M) %*% V %*% M))) }) test_that("BTabilities works with sum to zero contrasts", { mod3 <- BTm(cbind(win1, win2), journal1, journal2, ~ journal, id = "journal", x = FALSE, contrasts = list(journal = "contr.sum"), data = citations.sf) citations.sf$journal1 <- C(citations.sf$journal1, "contr.sum") citations.sf$journal2 <- C(citations.sf$journal2, "contr.sum") mod3b <- BTm(cbind(win1, win2), journal1, journal2, ~ journal, id = "journal", x = FALSE, data = citations.sf) expect_equivalent(BTabilities(mod3), BTabilities(mod3b)) M <- matrix(- 1/4, nrow = 4, ncol = 4) diag(M) <- 1 - 1/4 expect_equivalent(BTabilities(mod3)[, "ability"], BTabilities(citeModel)[, "ability"] %*% M) V <- cbind(0, rbind(0, vcov(citeModel))) expect_equivalent(BTabilities(mod3)[, "s.e."], sqrt(diag(t(M) %*% V %*% M))) })
test_that("can control output with file arg/option", { path <- tempfile() withr::defer(unlink(path)) with_reporter( MinimalReporter$new(file = path), test_one_file(test_path("reporters/tests.R")) ) expect_snapshot_output(readLines(path)) withr::local_options(testthat.output_file = path) with_reporter( MinimalReporter$new(), test_one_file(test_path("reporters/tests.R")) ) expect_snapshot_output(readLines(path)) }) test_that("should not automatically skip in non-utf-8 locales", { withr::local_locale(LC_CTYPE = "C") expect_true(TRUE) })
spcrglm <- function(x, y, k, family=c("binomial","poisson","multinomial"), lambda.B, lambda.gamma, w=0.1, xi=0.01, adaptive=FALSE, q=1, center=TRUE, scale=FALSE){ if( !is.matrix(x) ) stop("x must be a matrix.") if( mode(x)!="numeric" ) stop("x must be numeric.") if ( !is.vector(y) ) stop("y must be a vector.") if( mode(y)!="numeric" ) stop("y must be numeric.") if( center==TRUE ) x <- sweep(x, 2, apply(x,2,mean)) if( scale==TRUE ) x <- scale(x) if( family=="binomial" ){ if( adaptive==FALSE ){ A <- as.matrix(eigen(var(x))$vectors[ ,1:k]) gamma0 <- mean(y) gamma <- rep(0, k) Beta <- matrix( 0, nrow(A), k ) spcr.object <- SPCRLoG( x, y, k, xi, w, A, gamma0, gamma, Beta, lambda.gamma, lambda.B ) ans <- list( loadings.B=spcr.object$Beta, gamma=spcr.object$gamma, gamma0=spcr.object$gamma0, loadings.A=spcr.object$A, call=match.call() ) class(ans) <- "spcrglm" } if( adaptive==TRUE ){ A <- as.matrix(eigen(var(x))$vectors[ ,1:k]) gamma0 <- mean(y) gamma <- rep(0, k) Beta <- matrix( 0, nrow(A), k ) spcr.object <- SPCRLoG( x, y, k, xi, w, A, gamma0, gamma, Beta, lambda.gamma, lambda.B ) Beta <- spcr.object$Beta gamma <- spcr.object$gamma gamma0 <- spcr.object$gamma0 A <- spcr.object$A BetaWeight <- Beta/sum(abs(Beta)) adaspcr.object <- adaSPCRLoG( x, y, k, q, xi, w, A, gamma0, gamma, Beta, lambda.gamma, lambda.B, BetaWeight ) ans <- list( loadings.B=adaspcr.object$Beta, gamma=adaspcr.object$gamma, gamma0=adaspcr.object$gamma0, loadings.A=adaspcr.object$A, call=match.call() ) class(ans) <- "spcrglm" } } if( family=="poisson" ){ if( adaptive==FALSE ){ A <- as.matrix(eigen(var(x))$vectors[ ,1:k]) gamma0 <- mean(y) gamma <- rep(0, k) Beta <- matrix( 0, nrow(A), k ) spcr.object <- SPCRPoi( x, y, k, xi, w, A, gamma0, gamma, Beta, lambda.gamma, lambda.B ) ans <- list( loadings.B=spcr.object$Beta, gamma=spcr.object$gamma, gamma0=spcr.object$gamma0, loadings.A=spcr.object$A, call=match.call() ) class(ans) <- "spcrglm" } if( adaptive==TRUE ){ A <- as.matrix(eigen(var(x))$vectors[ ,1:k]) gamma0 <- mean(y) gamma <- rep(0, k) Beta <- matrix( 0, nrow(A), k ) spcr.object <- SPCRPoi( x, y, k, xi, w, A, gamma0, gamma, Beta, lambda.gamma, lambda.B ) Beta <- spcr.object$Beta gamma <- spcr.object$gamma gamma0 <- spcr.object$gamma0 A <- spcr.object$A BetaWeight <- Beta/sum(abs(Beta)) adaspcr.object <- adaSPCRPoi( x, y, k, q, xi, w, A, gamma0, gamma, Beta, lambda.gamma, lambda.B, BetaWeight ) ans <- list( loadings.B=adaspcr.object$Beta, gamma=adaspcr.object$gamma, gamma0=adaspcr.object$gamma0, loadings.A=adaspcr.object$A, call=match.call() ) class(ans) <- "spcrglm" } } if( family=="multinomial" ){ Y = y unique.Y = unique(Y) y = matrix(0, nrow(x), length(unique.Y)) for(i in 1:nrow(x)) for(j in 1:length(unique.Y)) if( Y[i] == unique.Y[j] ) y[i,j] <- 1 if( adaptive==FALSE ){ A = as.matrix(eigen(var(x))$vectors[ ,1:k]) gamma0 = apply(y, 2, mean) gamma = matrix(0, k, ncol(y)) Beta = matrix(0, ncol(x), k) spcr.object <- SPCRMultiLoG( x, y, k, xi, w, A, gamma0, gamma, Beta, lambda.gamma, lambda.B ) ans <- list( loadings.B=spcr.object$Beta, gamma=spcr.object$gamma, gamma0=spcr.object$gamma0, loadings.A=spcr.object$A, call=match.call() ) class(ans) <- "spcrglm" } if( adaptive==TRUE ){ A = as.matrix(eigen(var(x))$vectors[ ,1:k]) gamma0 = apply(y, 2, mean) gamma = matrix(0, k, ncol(y)) Beta = matrix(0, ncol(x), k) spcr.object <- SPCRMultiLoG( x, y, k, xi, w, A, gamma0, gamma, Beta, lambda.gamma, lambda.B ) Beta <- spcr.object$Beta gamma <- spcr.object$gamma gamma0 <- spcr.object$gamma0 A <- spcr.object$A BetaWeight <- Beta/sum(abs(Beta)) adaspcr.object <- adaSPCRMultiLoG( x, y, k, q, xi, w, A, gamma0, gamma, Beta, lambda.gamma, lambda.B, BetaWeight ) ans <- list( loadings.B=adaspcr.object$Beta, gamma=adaspcr.object$gamma, gamma0=adaspcr.object$gamma0, loadings.A=adaspcr.object$A, call=match.call() ) class(ans) <- "spcrglm" } } return( ans ) }
PredictivePosteriorPlot.TSPNDE <- function( discrep ) { temp <- is.infinite(discrep) & !is.na(discrep) if(sum(temp, na.rm=TRUE)>0){cat(sum(temp, na.rm=TRUE), " infinite discrepancy measures set to NA\n")} discrep[ temp ] <- NA discrep.long <- data.table::melt( data.table::as.data.table(discrep), measure.vars=list(seq(1,ncol(discrep),2), seq(2,ncol(discrep),2)), value.name=c("Observed","Simulated"), variable.name="Statistic", variable.factor=FALSE) titles <- data.frame(Statistic=as.character(1:6), Title=c( "Freeman-Tukey for m2", "Deviance for m2", "Freeman-Tukey for u2", "Deviance for u2", "Total Freeman-Tukey", "Total Deviance"), stringsAsFactors=FALSE) discrep.long <- merge(discrep.long, titles) p_values <-plyr::ddply(discrep.long, c("Statistic","Title"), function(x){ p.value=mean(x$Observed < x$Simulated, na.rm=TRUE) data.frame(p.value=p.value) }) p_values$label = paste("Bayesian GOF P:",formatC(p_values$p.value, digits=2, format="f")) gof.plot <-ggplot(data=discrep.long, aes_(x=~Simulated, y=~Observed))+ geom_point()+ geom_abline(intercept=0, slope=1)+ geom_text(data=p_values, x=Inf,y=-Inf, hjust=1, vjust=0, label=p_values$label)+ facet_wrap(~Title, ncol=2, nrow=3, scales="free") gof <- list(bp.plot=gof.plot, bp.values=data.frame(test.names=titles, p.value=p_values, stringsAsFactors=FALSE)) gof }
knitr::opts_chunk$set(echo = TRUE) p1 = c(0.28, 0.13, 0.25, 0.34) p2 = c(0.28, 0.72) p3 = c(0.43, 0.37, 0.2) stratumFraction = p1 %x% p2 %x% p3 theta1 = c(1, 2.127, 0.528, 0.413) theta2 = c(1, 0.438) theta3 = c(1, 0.614, 0.159) lambda2 = 0.009211*exp(log(theta1) %x% log(theta2) %x% log(theta3)) library(lrstat) caltime(nevents = 66, accrualDuration = 24, accrualIntensity = 12, stratumFraction = stratumFraction, lambda1 = 0.4466*lambda2, lambda2 = lambda2, followupTime = 100) lrpower(kMax = 3, informationRates = c(0.333, 0.667, 1), alpha = 0.025, typeAlphaSpending = "sfOF", accrualIntensity = 12, stratumFraction = stratumFraction, lambda1 = 0.4466*lambda2, lambda2 = lambda2, accrualDuration = 24, followupTime = 30.92) lrsim(kMax = 3, informationTime = c(0.333, 0.667, 1), criticalValues = c(3.710, 2.511, 1.993), accrualIntensity = 12, stratumFraction = stratumFraction, lambda1 = 0.4466*lambda2, lambda2 = lambda2, accrualDuration = 24, followupTime = 30.92, plannedEvents = c(22, 44, 66), maxNumberOfIterations = 1000, seed = 314159)
clearNN <- function (donotremove, donotdetach = NULL) { if (is.null(donotdetach)) donotdetach <- c(".GlobalEnv", "package:kableExtra", "package:dplyr", "package:stringr", "package:knitr", "package:rmarkdown", "package:captioner", "package:NNbenchmark", "package:R6", "package:pkgload", "tools:rstudio", "package:RWsearch", "package:pacman", "package:stats", "package:graphics", "package:grDevices", "package:utils", "package:datasets", "package:methods", "Autoloads", "package:base") if (!is.element("ZZ", donotdetach)) { while ("ZZ" %in% search()) detach("ZZ", character.only = TRUE) } if (length(setdiff(search(), donotdetach)) > 0) { vecns <- setdiff(search(), donotdetach) strpkgs <- strsplit(vecns, ":", fixed = TRUE) lpkgs <- lengths(strpkgs) fun <- function(l, pkg) {if (l == 2) pkg[2] else NULL} pkgs <- unlist(mapply(fun, lpkgs, strpkgs)) lapply(pkgs, pkgload::unload, quiet = TRUE) } if (length(setdiff(search(), donotdetach)) > 0) tryCatch( lapply(setdiff(search(), donotdetach), detach, unload = TRUE, character.only = TRUE, force = TRUE), warning = function(w) {}) } detachNN <- function() { while ("ZZ" %in% search()) detach("ZZ", character.only = TRUE) }
dist.circular <- function (x, method = "correlation", diag = FALSE, upper = FALSE) { x <- conversion.circular(x, units="radians", zero=0, rotation="counter", modulo="2pi") attr(x, "class") <- attr(x, "circularp") <- NULL if (!is.na(pmatch(method, "correlation"))) method <- "correlation" METHODS <- c("correlation", "angularseparation", "chord", "geodesic") method <- pmatch(method, METHODS) if (is.na(method)) stop("invalid distance method") if (method == -1) stop("ambiguous distance method") N <- nrow(x <- as.matrix(x)) d <- .C("R_distance", x = as.double(x), nr = N, nc = ncol(x), d = double(N * (N - 1)/2), diag = as.integer(FALSE), method = as.integer(method), NAOK = TRUE, PACKAGE = "circular")$d attr(d, "Size") <- N attr(d, "Labels") <- dimnames(x)[[1]] attr(d, "Diag") <- diag attr(d, "Upper") <- upper attr(d, "method") <- METHODS[method] attr(d, "call") <- match.call() class(d) <- "dist" return(d) }
fun.RPRS.ml.m <- function (data, rs.init = c(-1.5, 1.5), leap = 3, FUN = "runif.sobol", no = 10000) { RPRS <- fun.fit.gl.v3m(a = rs.init[1], b = rs.init[2], data = data, fun = fun.auto.perc.rs, no = no, leap = leap, FUN = FUN)$unique.optim.result RPRS <- fun.fit.gl.v3a(RPRS[1], RPRS[2], RPRS[3], RPRS[4], data, "rs") return(RPRS) }
tar_reprex( pipeline = { list( tar_target(data, data.frame(x = sample.int(1e3))), tar_target(summary, mean(data$x, na.rm = TRUE)) ) }, run = { tar_outdated() tar_make() } ) tar_reprex( pipeline = { list( tar_target(data, data.frame(x = sample.int(1e3))), tar_target(summary, mean(data$x, na.rm = TRUE)) ) }, run = { tar_outdated() tar_make() } )
intercorr <- function(k_cat = 0, k_cont = 0, k_pois = 0, k_nb = 0, method = c("Fleishman", "Polynomial"), constants = NULL, marginal = list(), support = list(), lam = NULL, p_zip = 0, size = NULL, prob = NULL, mu = NULL, p_zinb = 0, rho = NULL, seed = 1234, epsilon = 0.001, maxit = 1000, nrand = 100000, quiet = FALSE) { k <- k_cat + k_cont + k_pois + k_nb if (k_pois > 0) { if (length(p_zip) < k_pois) p_zip <- c(rep(0, k_pois - length(p_zip)), p_zip) } if (k_nb > 0) { if (length(prob) > 0) mu <- size * (1 - prob)/prob if (length(p_zinb) < k_nb) p_zinb <- c(rep(0, k_nb - length(p_zinb)), p_zinb) } if (k_cat > 0) { if (length(support) == 0) { for (i in 1:k_cat) { support[[i]] <- 1:(length(marginal[[i]]) + 1) } } } rho_list <- separate_rho(k_cat = k_cat, k_cont = k_cont, k_pois = k_pois, k_nb = k_nb, rho = rho) rho_cat <- rho_list$rho_cat rho_cat_pois <- rho_list$rho_cat_pois rho_cat_nb <- rho_list$rho_cat_nb rho_cont_cat <- rho_list$rho_cont_cat rho_cont <- rho_list$rho_cont rho_cont_pois <- rho_list$rho_cont_pois rho_cont_nb <- rho_list$rho_cont_nb rho_pois <- rho_list$rho_pois rho_pois_nb <- rho_list$rho_pois_nb rho_nb <- rho_list$rho_nb Sigma_cat <- NULL Sigma_cat_cont <- NULL Sigma_cat_pois <- NULL Sigma_cat_nb <- NULL Sigma_cont_cat <- NULL Sigma_cont <- NULL Sigma_cont_pois <- NULL Sigma_cont_nb <- NULL Sigma_pois_cat <- NULL Sigma_pois_cont <- NULL Sigma_pois <- NULL Sigma_pois_nb <- NULL Sigma_nb_cat <- NULL Sigma_nb_cont <- NULL Sigma_nb_pois <- NULL Sigma_nb <- NULL if (k_cat == 1) { Sigma_cat <- matrix(1, nrow = k_cat, ncol = k_cat) } if (k_cat > 1) { Sigma_cat <- diag(1, k_cat, k_cat) for (i in 1:(k_cat - 1)) { for (j in (i + 1):k_cat) { if (length(marginal[[i]]) == 1 & length(marginal[[j]]) == 1) { corr_bin <- function(rho) { phix1x2 <- integrate(function(z2) { sapply(z2, function(z2) { integrate(function(z1) ((2 * pi * sqrt((1 - rho^2)))^-1) * exp(-(z1^2 - 2 * rho * z1 * z2 + z2^2)/(2 * (1 - rho^2))), -Inf, qnorm(1 - marginal[[i]][1]))$value }) }, -Inf, qnorm(1 - marginal[[j]][1]))$value - rho_cat[i, j] * sqrt(marginal[[i]][1] * (1 - marginal[[j]][1]) * marginal[[j]][1] * (1 - marginal[[i]][1])) - ((1 - marginal[[i]][1]) * (1 - marginal[[j]][1])) phix1x2 } Sigma_cat[i, j] <- suppressWarnings(dfsane(par = 0, fn = corr_bin, control = list(trace = FALSE)))$par } else { Sigma_cat[i, j] <- suppressWarnings(ord_norm(list(marginal[[i]], marginal[[j]]), matrix(c(1, rho_cat[i, j], rho_cat[i, j], 1), 2, 2), list(support[[i]], support[[j]]), epsilon = epsilon, maxit = maxit)$SigmaC[1, 2]) } Sigma_cat[j, i] <- Sigma_cat[i, j] } } if (min(eigen(Sigma_cat, symmetric = TRUE)$values) < 0 & quiet == FALSE) { message("It is not possible to find a correlation matrix for MVN ensuring rho for the ordinal variables. Try the error loop.") } } if (k_cont > 0) { Sigma_cont <- intercorr_cont(method, constants, rho_cont) } if (k_cat > 0 & k_cont > 0) { Sigma_cont_cat <- findintercorr_cont_cat(method, constants, rho_cont_cat, marginal, support) Sigma_cat_cont <- t(Sigma_cont_cat) } if (k_cat > 0 & k_pois > 0) { Sigma_cat_pois <- intercorr_cat_pois(rho_cat_pois, marginal, lam, p_zip, nrand, seed) Sigma_pois_cat <- t(Sigma_cat_pois) } if (k_cat > 0 & k_nb > 0) { Sigma_cat_nb <- intercorr_cat_nb(rho_cat_nb, marginal, size, mu, p_zinb, nrand, seed) Sigma_nb_cat <- t(Sigma_cat_nb) } if (k_cont > 0 & k_pois > 0) { Sigma_cont_pois <- intercorr_cont_pois(method, constants, rho_cont_pois, lam, p_zip, nrand, seed) Sigma_pois_cont <- t(Sigma_cont_pois) } if (k_cont > 0 & k_nb > 0) { Sigma_cont_nb <- intercorr_cont_nb(method, constants, rho_cont_nb, size, mu, p_zinb, nrand, seed) Sigma_nb_cont <- t(Sigma_cont_nb) } if (k_pois == 1) { Sigma_pois <- matrix(1, nrow = k_pois, ncol = k_pois) } if (k_pois > 1) { Sigma_pois <- intercorr_pois(rho_pois, lam, p_zip, nrand, seed) } if (k_nb == 1) { Sigma_nb <- matrix(1, nrow = k_nb, ncol = k_nb) } if (k_nb > 1) { Sigma_nb <- intercorr_nb(rho_nb, size, mu, p_zinb, nrand, seed) } if (k_pois > 0 & k_nb > 0) { Sigma_pois_nb <- intercorr_pois_nb(rho_pois_nb, lam, p_zip, size, mu, p_zinb, nrand, seed) Sigma_nb_pois <- t(Sigma_pois_nb) } Sigma <- rbind(cbind(Sigma_cat, Sigma_cat_cont, Sigma_cat_pois, Sigma_cat_nb), cbind(Sigma_cont_cat, Sigma_cont, Sigma_cont_pois, Sigma_cont_nb), cbind(Sigma_pois_cat, Sigma_pois_cont, Sigma_pois, Sigma_pois_nb), cbind(Sigma_nb_cat, Sigma_nb_cont, Sigma_nb_pois, Sigma_nb)) return(Sigma) }
expected <- eval(parse(text="structure(c(0.881160556059998, 0.0247724805272624, -0.0019974955427356, -0.0019974955427356, 0.0247724805272624, -0.0019974955427356, 0.0247724805272624, -0.0019974955427356, 0.0247724805272624, 0, 0, 0.0125576823307855, 0.0125576823307855, -0.190699391433663, -0.0406488311152763, 0.089427428453608, -0.0406488311152763, 0.089427428453608, 0, 0, 0.0191393069551563, 0.0191393069551563, -0.204141514983665, -0.0521301369763476, 0.11294863011542, -0.0521301369763476, 0.11294863011542, 0, 0, 0.0308785628281183, 0.0308785628281183, -0.227171902806281, -0.0725941938525439, 0.153254409244259, -0.0725941938525439, 0.153254409244259, 0, 0, 0, 0, 0, 0, 0, 0.0298677558484084, 0.285910294575491), .Dim = c(9L, 5L))")); test(id=0, code={ argv <- eval(parse(text="list(structure(c(0.853345363892062, -0.0256071578412401, -0.0612813871821256, -0.0612813871821256, -0.0256071578412401, -0.0612813871821256, -0.0256071578412401, -0.0612813871821256, -0.0256071578412401, 0, 0, 0.0786725879812683, 0.0786725879812683, 0.22692030644121, -0.254660745352065, -0.106413026892124, -0.254660745352065, -0.106413026892124, 0, 0, 0.11837902431235, 0.11837902431235, 0.243512113989909, -0.322431463531475, -0.134731829015137, -0.322431463531475, -0.134731829015137, 0, 0, 0.186882435497702, 0.186882435497702, 0.272137080615963, -0.439352693506895, -0.18358875815211, -0.439352693506895, -0.18358875815211, 0, 0, 0, 0, 0, 0, 0, 0.172822903136154, -0.345645806272308), .Dim = c(9L, 5L)), structure(c(1.03259546878074, -0.967404531219257, 0.032595468780743, 0.032595468780743, -0.967404531219257, 0.032595468780743, -0.967404531219257, 0.032595468780743, -0.967404531219257, 1.15961954033819, -0.840380459661814, 0.159619540338186, 0.159619540338186, -0.840380459661814, 0.159619540338186, -0.840380459661814, 0.159619540338186, -0.840380459661814, 1.16167819481816, -0.838321805181836, 0.161678194818164, 0.161678194818164, -0.838321805181836, 0.161678194818164, -0.838321805181836, 0.161678194818164, -0.838321805181836, 1.1652298823369, -0.834770117663101, 0.165229882336899, 0.165229882336899, -0.834770117663101, 0.165229882336899, -0.834770117663101, 0.165229882336899, -0.834770117663101, 1.17282290313615, -0.827177096863846, 0.172822903136154, 0.172822903136154, -0.827177096863846, 0.172822903136154, -0.827177096863846, 0.172822903136154, -0.827177096863846), .Dim = c(9L, 5L)))")); do.call(`*`, argv); }, o=expected);
from_coord_polygon <- function(coord, carto) { if (!inherits(carto, "cartogramR")) stop("carto not inherits from cartogramR class") if (!is.numeric(coord)) stop("coordinates must be numeric") if (is.vector(coord)) { if (length(coord)!=2) stop("coordinates must be a vector of length 2 or a two columns matrix containing xy coordinates") coord <- matrix(coord, ncol=2, nrow=1) } if (is.matrix(coord)) { if (ncol(coord)!=2) stop("coordinates must be a vector of length 2 or a two columns matrix containing xy coordinates") } LL <- carto$options$paramsint[1] padding <- carto$options$paramsdouble[3] bbox <- sf::st_bbox(carto$initial_data) Delta <- c(diff(bbox[c(1,3)]), diff(bbox[c(2,4)])) gg <- c(sum(bbox[c(1,3)]), sum(bbox[c(2,4)]))/2 mm <- gg - Delta/2 * padding MM <- gg + Delta/2 * padding biggest <- which.max(MM-mm) smallest <- which.min(MM-mm) scale <- (MM[biggest]-mm[biggest])/LL newmmB <- gg[biggest] - 0.5*LL*scale lxy <- 2^(ceiling(log2((MM[smallest]-mm[smallest])/scale))) newmmS <- gg[smallest] - 0.5*lxy*scale if (biggest==1) { coord <- sweep(coord,2,c(newmmB,newmmS),FUN="-") } else { coord <- sweep(coord,2,c(newmmS,newmmB),FUN="-") } coord <- sweep(coord,2,rep(scale,2),FUN="/") return(coord) }
expect_equal(1 + 1, 2)
Sim.Data.STSBinBin <- function(Monotonicity=c("No"), N.Total=2000, Seed=sample(1:1000, size=1)){ if (Monotonicity=="No"){ set.seed(seed=Seed) Pi_s <- RandVec(a=0, b=1, s=1, n=16, m=1) Pi_0000 <- Pi_s$RandVecOutput[1] Pi_0100 <- Pi_s$RandVecOutput[2] Pi_0010 <- Pi_s$RandVecOutput[3] Pi_0001 <- Pi_s$RandVecOutput[4] Pi_0101 <- Pi_s$RandVecOutput[5] Pi_1000 <- Pi_s$RandVecOutput[6] Pi_1010 <- Pi_s$RandVecOutput[7] Pi_1001 <- Pi_s$RandVecOutput[8] Pi_1110 <- Pi_s$RandVecOutput[9] Pi_1101 <- Pi_s$RandVecOutput[10] Pi_1011 <- Pi_s$RandVecOutput[11] Pi_1111 <- Pi_s$RandVecOutput[12] Pi_0110 <- Pi_s$RandVecOutput[13] Pi_0011 <- Pi_s$RandVecOutput[14] Pi_0111 <- Pi_s$RandVecOutput[15] Pi_1100 <- Pi_s$RandVecOutput[16] Pi_s_all <- cbind(Pi_0000, Pi_0100, Pi_0010, Pi_0001, Pi_0101, Pi_1000, Pi_1010, Pi_1001, Pi_1110, Pi_1101, Pi_1011, Pi_1111, Pi_0110, Pi_0011, Pi_0111, Pi_1100) } if (Monotonicity=="True.Endp"){ set.seed(seed=Seed) Pi_s <- RandVec(a=0, b=1, s=1, n=12, m=1) Pi_0000 <- Pi_s$RandVecOutput[1] Pi_0100 <- Pi_s$RandVecOutput[2] Pi_0010 <- Pi_s$RandVecOutput[3] Pi_0001 <- Pi_s$RandVecOutput[4] Pi_0101 <- Pi_s$RandVecOutput[5] Pi_1000 <- c(0) Pi_1010 <- c(0) Pi_1001 <- c(0) Pi_1110 <- Pi_s$RandVecOutput[6] Pi_1101 <- Pi_s$RandVecOutput[7] Pi_1011 <- c(0) Pi_1111 <- Pi_s$RandVecOutput[8] Pi_0110 <- Pi_s$RandVecOutput[9] Pi_0011 <- Pi_s$RandVecOutput[10] Pi_0111 <- Pi_s$RandVecOutput[11] Pi_1100 <- Pi_s$RandVecOutput[12] Pi_s_all <- cbind(Pi_0000, Pi_0100, Pi_0010, Pi_0001, Pi_0101, Pi_1000, Pi_1010, Pi_1001, Pi_1110, Pi_1101, Pi_1011, Pi_1111, Pi_0110, Pi_0011, Pi_0111, Pi_1100) } if (Monotonicity=="Surr.Endp"){ set.seed(seed=Seed) Pi_s <- RandVec(a=0, b=1, s=1, n=12, m=1) Pi_0000 <- Pi_s$RandVecOutput[1] Pi_0100 <- Pi_s$RandVecOutput[2] Pi_0010 <- c(0) Pi_0001 <- Pi_s$RandVecOutput[3] Pi_0101 <- Pi_s$RandVecOutput[4] Pi_1000 <- Pi_s$RandVecOutput[5] Pi_1010 <- c(0) Pi_1001 <- Pi_s$RandVecOutput[6] Pi_1110 <- c(0) Pi_1101 <- Pi_s$RandVecOutput[7] Pi_1011 <- Pi_s$RandVecOutput[8] Pi_1111 <- Pi_s$RandVecOutput[9] Pi_0110 <- c(0) Pi_0011 <- Pi_s$RandVecOutput[10] Pi_0111 <- Pi_s$RandVecOutput[11] Pi_1100 <- Pi_s$RandVecOutput[12] Pi_s_all <- cbind(Pi_0000, Pi_0100, Pi_0010, Pi_0001, Pi_0101, Pi_1000, Pi_1010, Pi_1001, Pi_1110, Pi_1101, Pi_1011, Pi_1111, Pi_0110, Pi_0011, Pi_0111, Pi_1100) } if (Monotonicity=="Surr.True.Endp"){ set.seed(seed=Seed) Pi_s <- RandVec(a=0, b=1, s=1, n=9, m=1) Pi_0000 <- Pi_s$RandVecOutput[1] Pi_0100 <- Pi_s$RandVecOutput[2] Pi_0010 <- c(0) Pi_0001 <- Pi_s$RandVecOutput[3] Pi_0101 <- Pi_s$RandVecOutput[4] Pi_1000 <- c(0) Pi_1010 <- c(0) Pi_1001 <- c(0) Pi_1110 <- c(0) Pi_1101 <- Pi_s$RandVecOutput[5] Pi_1011 <- c(0) Pi_1111 <- Pi_s$RandVecOutput[6] Pi_0110 <- c(0) Pi_0011 <- Pi_s$RandVecOutput[7] Pi_0111 <- Pi_s$RandVecOutput[8] Pi_1100 <- Pi_s$RandVecOutput[9] Pi_s_all <- cbind(Pi_0000, Pi_0100, Pi_0010, Pi_0001, Pi_0101, Pi_1000, Pi_1010, Pi_1001, Pi_1110, Pi_1101, Pi_1011, Pi_1111, Pi_0110, Pi_0011, Pi_0111, Pi_1100) } Pi_0000_ma <- matrix(rep(c(0, 0, 0, 0), round(Pi_0000*N.Total)), ncol=4, byrow=T) Pi_0100_ma <- matrix(rep(c(0, 1, 0, 0), round(Pi_0100*N.Total)), ncol=4, byrow=T) Pi_0010_ma <- matrix(rep(c(0, 0, 1, 0), round(Pi_0010*N.Total)), ncol=4, byrow=T) Pi_0001_ma <- matrix(rep(c(0, 0, 0, 1), round(Pi_0001*N.Total)), ncol=4, byrow=T) Pi_0101_ma <- matrix(rep(c(0, 1, 0, 1), round(Pi_0101*N.Total)), ncol=4, byrow=T) Pi_1000_ma <- matrix(rep(c(1, 0, 0, 0), round(Pi_1000*N.Total)), ncol=4, byrow=T) Pi_1010_ma <- matrix(rep(c(1, 0, 1, 0), round(Pi_1010*N.Total)), ncol=4, byrow=T) Pi_1001_ma <- matrix(rep(c(1, 0, 0, 1), round(Pi_1001*N.Total)), ncol=4, byrow=T) Pi_1110_ma <- matrix(rep(c(1, 1, 1, 0), round(Pi_1110*N.Total)), ncol=4, byrow=T) Pi_1101_ma <- matrix(rep(c(1, 1, 0, 1), round(Pi_1101*N.Total)), ncol=4, byrow=T) Pi_1011_ma <- matrix(rep(c(1, 0, 1, 1), round(Pi_1011*N.Total)), ncol=4, byrow=T) Pi_1111_ma <- matrix(rep(c(1, 1, 1, 1), round(Pi_1111*N.Total)), ncol=4, byrow=T) Pi_0110_ma <- matrix(rep(c(0, 1, 1, 0), round(Pi_0110*N.Total)), ncol=4, byrow=T) Pi_0011_ma <- matrix(rep(c(0, 0, 1, 1), round(Pi_0011*N.Total)), ncol=4, byrow=T) Pi_0111_ma <- matrix(rep(c(0, 1, 1, 1), round(Pi_0111*N.Total)), ncol=4, byrow=T) Pi_1100_ma <- matrix(rep(c(1, 1, 0, 0), round(Pi_1100*N.Total)), ncol=4, byrow=T) mat <- data.frame(rbind(Pi_0000_ma, Pi_0100_ma, Pi_0010_ma, Pi_0001_ma, Pi_0101_ma, Pi_1000_ma, Pi_1010_ma, Pi_1001_ma, Pi_1110_ma, Pi_1101_ma, Pi_1011_ma, Pi_1111_ma, Pi_0110_ma, Pi_0011_ma, Pi_0111_ma, Pi_1100_ma), stringsAsFactors = TRUE) colnames(mat) <- c("T0", "T1", "S0", "S1") set.seed(Seed) Z <- rbinom(dim(mat)[1], 1, 0.5) Z[Z==0] <- c(-1) mat <- cbind(mat, Z) mat_obs <- data.frame(matrix(NA, nrow=dim(mat)[1], ncol=3), stringsAsFactors = TRUE) colnames(mat_obs) <- c("T", "S", "Z") for (i in 1: dim(mat)[1]){ if (mat$Z[i]==-1) { mat_obs$T[i] <- mat$T0[i] mat_obs$S[i] <- mat$S0[i] mat_obs$Z[i] <- mat$Z[i] } if (mat$Z[i]==1) { mat_obs$T[i] <- mat$T1[i] mat_obs$S[i] <- mat$S1[i] mat_obs$Z[i] <- mat$Z[i] } } colnames(mat_obs) <- c("T", "S", "Z") if ((dim(mat_obs)[1]) != N.Total) { cat("\nNOTE: The number of patients requested in the function call equals ", N.Total, ", but the actual number of generated ", sep="") cat("\nobservations was ", (dim(mat)[1]), " (due to rounding).", sep="") } Data.STSBinBin_Obs <- Data.STSBinBin_Counterfactuals <- NULL Data.STSBinBin_Obs <<- mat_obs Data.STSBinBin_Counterfactuals <<- mat pi1_1_ <- Pi_s_all[7]+Pi_s_all[9]+Pi_s_all[11]+Pi_s_all[12] pi1_0_ <- Pi_s_all[6]+Pi_s_all[8]+Pi_s_all[10]+Pi_s_all[16] pi_1_1 <- Pi_s_all[5]+Pi_s_all[10]+Pi_s_all[12]+Pi_s_all[15] pi_1_0 <- Pi_s_all[2]+Pi_s_all[9]+Pi_s_all[13]+Pi_s_all[16] pi0_1_ <- Pi_s_all[3]+Pi_s_all[13]+Pi_s_all[14]+Pi_s_all[15] pi_0_1 <- Pi_s_all[4]+Pi_s_all[8]+Pi_s_all[11]+Pi_s_all[14] Pi_Marginals <- cbind(pi1_1_, pi1_0_, pi_1_1, pi_1_0, pi0_1_, pi_0_1) Pi_s <- Pi_s_all mat1 <- Pi_s[7] mat2 <- Pi_s[3] + Pi_s[9] mat3 <- Pi_s[13] mat4 <- Pi_s[6] + Pi_s[11] mat5 <- Pi_s[1] + Pi_s[14] + Pi_s[16] + Pi_s[12] mat6 <- Pi_s[2] + Pi_s[15] mat7 <- Pi_s[8] mat8 <- Pi_s[4] + Pi_s[10] mat9 <- Pi_s[5] Delta_c_mat <- matrix(data=c(mat1, mat2, mat3, mat4, mat5, mat6, mat7, mat8, mat9), nrow=3) sum_S_min1 <- mat1+mat2+mat3 sum_S_0 <- mat4+mat5+mat6 sum_S_1 <- mat7+mat8+mat9 sum_T_min1 <- mat1+mat4+mat7 sum_T_0 <- mat2+mat5+mat8 sum_T_1 <- mat3+mat6+mat9 if (Monotonicity=="No"){ I_Delta_T_Delta_S <- (mat1*log2(mat1/(sum_S_min1*sum_T_min1)))+(mat2*log2(mat2/(sum_S_min1*sum_T_0)))+(mat3*log2(mat3/(sum_S_min1*sum_T_1)))+ (mat4*log2(mat4/(sum_S_0*sum_T_min1)))+(mat5*log2(mat5/(sum_S_0*sum_T_0)))+(mat6*log2(mat6/(sum_S_0*sum_T_1)))+ (mat7*log2(mat7/(sum_S_1*sum_T_min1)))+(mat8*log2(mat8/(sum_S_1*sum_T_0)))+(mat9*log2(mat9/(sum_S_1*sum_T_1))) H_Delta_T <- -(((mat1+mat4+mat7)*log2(mat1+mat4+mat7))+ ((mat2+mat5+mat8)*log2(mat2+mat5+mat8))+ ((mat3+mat6+mat9)*log2(mat3+mat6+mat9))) H_Delta_S <- -(((mat1+mat2+mat3)*log2(mat1+mat2+mat3))+ ((mat4+mat5+mat6)*log2(mat4+mat5+mat6))+ ((mat7+mat8+mat9)*log2(mat7+mat8+mat9))) R2_H <- I_Delta_T_Delta_S / min(H_Delta_T, H_Delta_S) } if (Monotonicity=="True.Endp"){ I_Delta_T_Delta_S <- 0+(mat2*log2(mat2/(sum_S_min1*sum_T_0)))+(mat3*log2(mat3/(sum_S_min1*sum_T_1)))+ 0+(mat5*log2(mat5/(sum_S_0*sum_T_0)))+(mat6*log2(mat6/(sum_S_0*sum_T_1)))+ 0+(mat8*log2(mat8/(sum_S_1*sum_T_0)))+(mat9*log2(mat9/(sum_S_1*sum_T_1))) H_Delta_T <- -(0+((mat2+mat5+mat8)*log2(mat2+mat5+mat8))+((mat3+mat6+mat9)*log2(mat3+mat6+mat9))) H_Delta_S <- -(((mat1+mat2+mat3)*log2(mat1+mat2+mat3))+ ((mat4+mat5+mat6)*log2(mat4+mat5+mat6))+ ((mat7+mat8+mat9)*log2(mat7+mat8+mat9))) R2_H <- I_Delta_T_Delta_S / min(H_Delta_T, H_Delta_S) } if (Monotonicity=="Surr.Endp"){ I_Delta_T_Delta_S <- 0+(mat4*log2(mat4/(sum_S_0*sum_T_min1)))+(mat5*log2(mat5/(sum_S_0*sum_T_0)))+(mat6*log2(mat6/(sum_S_0*sum_T_1)))+ (mat7*log2(mat7/(sum_S_1*sum_T_min1)))+(mat8*log2(mat8/(sum_S_1*sum_T_0)))+(mat9*log2(mat9/(sum_S_1*sum_T_1))) H_Delta_T <- -(((mat1+mat4+mat7)*log2(mat1+mat4+mat7))+ ((mat2+mat5+mat8)*log2(mat2+mat5+mat8))+ ((mat3+mat6+mat9)*log2(mat3+mat6+mat9))) H_Delta_S <- -(0+((mat4+mat5+mat6)*log2(mat4+mat5+mat6))+ ((mat7+mat8+mat9)*log2(mat7+mat8+mat9))) R2_H <- I_Delta_T_Delta_S / min(H_Delta_T, H_Delta_S) } if (Monotonicity=="Surr.True.Endp"){ I_Delta_T_Delta_S <- (mat5*log2(mat5/(sum_S_0*sum_T_0)))+(mat6*log2(mat6/(sum_S_0*sum_T_1)))+ (mat8*log2(mat8/(sum_S_1*sum_T_0)))+(mat9*log2(mat9/(sum_S_1*sum_T_1))) H_Delta_T <- -(((mat2+mat5+mat8)*log2(mat2+mat5+mat8))+((mat3+mat6+mat9)*log2(mat3+mat6+mat9))) H_Delta_S <- -(((mat4+mat5+mat6)*log2(mat4+mat5+mat6))+((mat7+mat8+mat9)*log2(mat7+mat8+mat9))) R2_H <- I_Delta_T_Delta_S / min(H_Delta_T, H_Delta_S) } pi_T_00 <- Pi_s[1] + Pi_s[3] + Pi_s[4] + Pi_s[14] pi_T_01 <- Pi_s[2] + Pi_s[5] + Pi_s[13] + Pi_s[15] pi_T_10 <- Pi_s[6] + Pi_s[7] + Pi_s[8] + Pi_s[11] pi_T_11 <- Pi_s[9] + Pi_s[10] + Pi_s[12] + Pi_s[16] pi_S_00 <- Pi_s[1] + Pi_s[2] + Pi_s[6] + Pi_s[16] pi_S_01 <- Pi_s[4] + Pi_s[5] + Pi_s[8] + Pi_s[10] pi_S_10 <- Pi_s[3] + Pi_s[7] + Pi_s[9] + Pi_s[13] pi_S_11 <- Pi_s[11] + Pi_s[12] + Pi_s[14] + Pi_s[15] theta_T <- (pi_T_00 * pi_T_11)/(pi_T_10 * pi_T_01) theta_S <- (pi_S_00 * pi_S_11)/(pi_S_10 * pi_S_01) fit <- list(Data.STSBinBin.Obs=mat_obs, Data.STSBinBin.Counter=mat, Vector_Pi=Pi_s_all, Pi_Marginals=Pi_Marginals, True.R2_H=R2_H, True.Theta_T=theta_T, True.Theta_S=theta_S) class(fit) <- "Sim.Data.STSBinBin" fit }
test_that("Test plot_fairmodels", { expect_s3_class(plot_fairmodels(explainer_gbm, protected = compas$Ethnicity, privileged = "Caucasian"), "ggplot") fc <- fobject_big suppressWarnings(expect_s3_class(plot_fairmodels(fc, type = "fairness_check"), "ggplot")) suppressWarnings(expect_s3_class(plot_fairmodels(fc, type = "stack_metrics"), "ggplot")) suppressWarnings(expect_s3_class(plot_fairmodels(fc, type = "fairness_heatmap"), "ggplot")) suppressWarnings(expect_s3_class(plot_fairmodels(fc, type = "fairness_pca"), "ggplot")) suppressWarnings(expect_s3_class(plot_fairmodels(fc, type = "fairness_radar", fairness_metrics = c("TPR", "TNR", "FPR", "ACC", "STP", "FOR", "PPV")), "ggplot")) expect_s3_class(plot_fairmodels(fc, type = "group_metric"), "ggplot") expect_s3_class(plot_fairmodels(fc, type = "choose_metric"), "ggplot") expect_s3_class(plot_fairmodels(fc, type = "metric_scores"), "ggplot") expect_s3_class(plot_fairmodels(fc, type = "performance_and_fairness"), "ggplot") expect_s3_class(plot_fairmodels(fc, type = "all_cutoffs"), "ggplot") expect_s3_class(plot_fairmodels(fc, type = "ceteris_paribus_cutoff", cumulated = TRUE, subgroup = "Caucasian"), "ggplot") expect_error(expect_error(plot_fairmodels(fc, type = "not_existing"), "ggplot")) })
duembgen.shape.wt <- function(X, wt = rep(1, nrow(X)), init = NULL, eps = 1e-6, maxiter = 100, na.action = na.fail) { if (length(wt) != nrow(X)) stop("length of 'wt' must equal the number of rows in 'x'") x <- data.frame(wt=wt) x$X <- as.matrix(X) x <- na.action(x) if (!all(sapply(x, is.numeric))) stop("'X' and 'wt' must be numeric") X <- x$X wt<- x$wt p <- ncol(X) if (any(wt < 0) || (sum(wt)) == 0) stop("weights must be non-negative and not all zero") if (p<2) stop("'X' must be at least bivariate") data2 <- pair.diff(X) center.ind<-apply(data2,1,setequal,y=rep(0,p)) n.del<-sum(center.ind) if (n.del!= 0) { data2<-data2[center.ind==F,] pwt <- as.vector(pair.prod(matrix(wt,ncol=1)))[center.ind==F] if (n.del>1) {warning(paste(n.del ,"one pairwise difference equal to the origin was removed")) }else {warning("One pairwise difference equal to the origin was removed")} } else {pwt <- as.vector(pair.prod(matrix(wt,ncol=1)))[center.ind==F]} sum.pwt <- sum(pwt) w.data2 <- pwt * data2 iter <-0 if (is.numeric(init)) V.0<-solve(init) else V.0<-solve(t(w.data2)%*%w.data2) differ<-Inf while(TRUE) { if (differ<eps) break if (iter>=maxiter) { stop("maxiter reached without convergence") } V.new<-.wt.duembgen.step(V.0,data2,pwt,p,sum.pwt) differ<-frobenius.norm(V.new-V.0) V.0<-V.new iter=iter+1 } V.shape<-solve(V.new) V<-V.shape/det(V.shape)^(1/p) colnames(V) <- colnames(X) rownames(V) <- colnames(X) return(V) } .wt.duembgen.step<-function(V.old,datas,pwt,p,sum.pwt) { sqrt.V.old <- mat.sqrt(V.old) r <- sqrt(rowSums((datas %*% sqrt.V.old)^2)) datas2 <- sqrt(pwt)* (1/r) * datas datas3 <- datas2%*%sqrt.V.old M.V.old<-p/sum.pwt*crossprod(datas3) M.V.old.inv <- solve(M.V.old) V.new<-sum(diag(V.old %*% M.V.old.inv))^(-1)*(sqrt.V.old %*% M.V.old.inv %*% sqrt.V.old) return(V.new) }
sample_edgelist <- function( factor_model, ..., poisson_edges = TRUE, allow_self_loops = TRUE) { ellipsis::check_dots_unnamed() UseMethod("sample_edgelist") } sample_edgelist.undirected_factor_model <- function( factor_model, ..., poisson_edges = TRUE, allow_self_loops = TRUE) { X <- factor_model$X S <- factor_model$S sample_edgelist( X, S, X, FALSE, poisson_edges = poisson_edges, allow_self_loops = allow_self_loops ) } sample_edgelist.directed_factor_model <- function( factor_model, ..., poisson_edges = TRUE, allow_self_loops = TRUE) { X <- factor_model$X S <- factor_model$S Y <- factor_model$Y sample_edgelist( X, S, Y, TRUE, poisson_edges = poisson_edges, allow_self_loops = allow_self_loops ) } sample_edgelist.matrix <- function( factor_model, S, Y, directed, ..., poisson_edges = TRUE, allow_self_loops = TRUE) { X <- factor_model stopifnot(is.logical(directed)) stopifnot(is.logical(poisson_edges)) stopifnot(is.logical(allow_self_loops)) n <- nrow(X) d <- nrow(Y) k1 <- ncol(X) k2 <- ncol(Y) Cx <- Diagonal(n = k1, x = colSums(X)) Cy <- Diagonal(n = k2, x = colSums(Y)) S_tilde <- as.matrix(Cx %*% S %*% Cy) expected_edges <- sum(S_tilde) m <- rpois(n = 1, lambda = expected_edges) if (m == 0) { edge_list <- matrix(0, nrow = 0, ncol = 2) colnames(edge_list) <- c("from", "to") return(edge_list) } block_sizes <- matrix( rmultinom(n = 1, size = m, prob = S_tilde), nrow = k1, ncol = k2 ) from <- integer(m) to_tmp <- integer(m) to <- integer(m) u_block_start <- 1 u_block_sizes <- rowSums(block_sizes) for (u in 1:k1) { if (u_block_sizes[u] > 0) { indices <- u_block_start:(u_block_start + u_block_sizes[u] - 1) from[indices] <- sample( n, size = u_block_sizes[u], replace = TRUE, prob = X[, u] ) u_block_start <- u_block_start + u_block_sizes[u] } } v_block_start <- 1 v_block_sizes <- colSums(block_sizes) for (v in 1:k2) { if (v_block_sizes[v] > 0) { indices <- v_block_start:(v_block_start + v_block_sizes[v] - 1) to_tmp[indices] <- sample( d, size = v_block_sizes[v], replace = TRUE, prob = Y[, v] ) v_block_start <- v_block_start + v_block_sizes[v] } } u_block_start <- 1 v_block_start <- c(1, cumsum(v_block_sizes)) for (u in 1:k1) { for (v in 1:k2) { if (block_sizes[u, v] > 0) { to_index <- u_block_start:(u_block_start + block_sizes[u, v] - 1) tmp_index <- v_block_start[v]:(v_block_start[v] + block_sizes[u, v] - 1) to[to_index] <- to_tmp[tmp_index] v_block_start[v] <- v_block_start[v] + block_sizes[u, v] u_block_start <- u_block_start + block_sizes[u, v] } } } if (directed) { edgelist <- tibble(from = from, to = to) } else { tibble_from <- pmin(from, to) tibble_to <- pmax(from, to) edgelist <- tibble( from = tibble_from, to = tibble_to ) } if (!poisson_edges) { edgelist <- dplyr::distinct(edgelist) } if (!allow_self_loops) { edgelist <- dplyr::filter(edgelist, to != from) } edgelist } sample_edgelist.Matrix <- sample_edgelist.matrix
splitSpectraGroups <- function(spectra, inst = NULL, rep.cols = NULL, ...) { .chkArgs(mode = 11L) chkSpectra(spectra) if (is.null(inst)) stop("No splitting instructions provided") lg <- length(spectra$groups) li <- length(inst) tmp <- data.frame(rep(NA, lg)) colnames(tmp) <- "dummy" for (i in 1:li) { tmp <- data.frame(tmp, rep(NA, lg)) } colnames(tmp) <- c("dummy", names(inst)) for (i in 1:length(inst)) { l <- length(inst[[i]]) for (j in 1:l) { which <- grep(inst[[i]][j], spectra$groups) tmp[which, i + 1] <- inst[[i]][j] } tmp[, i + 1] <- as.factor(tmp[, i + 1]) } spectra <- c(spectra, tmp) d <- grep("dummy", names(spectra)) spectra <- spectra[-d] class(spectra) <- "Spectra" if (!is.null(rep.cols)) { if (li > 1) stop("rep.cols can only be used with an instruction composed of one list element\n") if (!length(inst[[1]]) == length(rep.cols)) stop("No. repl. colors doesn't equal levels in inst\n") l <- length(inst[[1]]) tmp <- rep(NA, length(spectra$colors)) for (i in 1:l) { which <- grep(inst[[1]][i], spectra[[names(inst)[1]]]) tmp[which] <- rep.cols[i] } spectra$colors <- tmp } chkSpectra(spectra) spectra }
.SPNET.COLLAPSE.SEP <- ';' .extract.multiple.strings <- function(x, sep = .SPNET.COLLAPSE.SEP) { out <- c() for (k in x) { spl <- strsplit(x = k, split = sep)[[1]] spl <- spl[nzchar(spl)] spl <- sub("^ +", "", spl) spl <- sub(" +$", "", spl) out <- c(out, spl) } return(out) } .expand.multiple.names <- function(x, sep = .SPNET.COLLAPSE.SEP) { nam <- names(x) out <- c() out.nam <- character(0) for (k in 1:length(x)) { nam.temp <- .extract.multiple.strings(nam[k]) for (i in 1:length(nam.temp)) { out <- c(out, x[k]) out.nam <- c(out.nam, nam.temp[i]) } } names(out) <- out.nam return(out) } setReplaceMethod( f ="[", signature ="SpatialNetwork", definition = function(x, i, j, value){ if (length(value) > 1) { value <- paste(value, collapse = .SPNET.COLLAPSE.SEP) } if(missing(i)) { if(missing(j)) { stop("error: you have to specifie i or j") } else { if (inherits(j, 'character')) { j <- match(j, names(x)) } stopifnot(all(!is.na(j))) stopifnot(min(j) >= 1 && max(j) <= ncol(x)) for(k in j){ [email protected][[k]][i] <- value } return(x) } } else { if (missing(j)){ j <- 1:ncol(x) } if (inherits(j, 'character')) { j <- match(j, names(x)) } stopifnot(all(!is.na(j))) stopifnot(min(j) >= 1 && max(j) <= ncol(x)) for(k in j){ [email protected][[k]][i] <- value } return(x) } } ) .position.multiple.symbols <- function(x, n = 1, cex = 1, space = 0.5){ if(n==1) return(matrix(x, ncol = 2)) x.x <- x[1] x.y <- x[2] x.all <- x.x + seq(from = 0, to = space*cex*(n-1), by = space*cex) if(n%%2 == 0) { x.all <- x.all - space*cex*((n/2 - 1) + 0.5) } if(n%%2 == 1) { x.all <- x.all - space*cex*((n-1)/2) } y.all <- rep(x.y, n) out <- matrix(c(x.all, y.all), nrow=n, byrow=FALSE) return(out) }
require("semtree") require("future") plan(multisession) data(lgcm) lgcm$agegroup <- as.ordered(lgcm$agegroup) lgcm$training <- as.factor(lgcm$training) lgcm$noise <- as.numeric(lgcm$noise) manifests <- names(lgcm)[1:5] lgcModel <- mxModel("Linear Growth Curve Model Path Specification", type="RAM", manifestVars=manifests, latentVars=c("intercept","slope"), mxPath( from=manifests, arrows=2, free=TRUE, values = c(1, 1, 1, 1, 1), labels=c("residual1","residual2","residual3","residual4","residual5") ), mxPath( from=c("intercept","slope"), connect="unique.pairs", arrows=2, free=TRUE, values=c(1, 1, 1), labels=c("vari", "cov", "vars") ), mxPath( from="intercept", to=manifests, arrows=1, free=FALSE, values=c(1, 1, 1, 1, 1) ), mxPath( from="slope", to=manifests, arrows=1, free=FALSE, values=c(0, 1, 2, 3, 4) ), mxPath( from="one", to=manifests, arrows=1, free=FALSE, values=c(0, 0, 0, 0, 0) ), mxPath( from="one", to=c("intercept", "slope"), arrows=1, free=TRUE, values=c(1, 1), labels=c("meani", "means") ), mxData(lgcm,type="raw") ) controlOptions <- semtree.control() controlOptions controlOptions$alpha <- 0.01 tree <- semtree(model=lgcModel, data=lgcm, control = controlOptions) constraints <- semtree.constraints(local.invariance = names(omxGetParameters(lgcModel))[1:5]) treeConstrained <- semtree(model=lgcModel, data=lgcm, control = controlOptions, constraints=constraints) plot(tree)
set_couleur_typo <- function(map,paletteTypo=NULL,colBorder="white",map_leaflet=NULL) { msg_error1<-msg_error2<-msg_error3<-msg_error4 <- NULL if(any(!any(class(map) %in% "leaflet"), !any(class(map) %in% "htmlwidget"))) if(!any(class(map) %in% "leaflet_proxy")) msg_error1 <- "La carte doit etre un objet leaflet ou leaflet_proxy / " if(!is.null(paletteTypo)) if(any(class(paletteTypo)!="character")) msg_error2 <- "La palette de la typologie doit etre un vecteur de type caractere / " if(any(class(colBorder)!="character")) msg_error3 <- "La couleur de la bordure doit etre de type caractere (nommee ou hexadecimal) / " if(!is.null(map_leaflet)) if (any(!any(class(map_leaflet) %in% "leaflet"), !any(class(map_leaflet) %in% "htmlwidget"))) msg_error4 <- "La carte doit etre un objet leaflet / " if(any(!is.null(msg_error1),!is.null(msg_error2),!is.null(msg_error3),!is.null(msg_error4))) { stop(simpleError(paste0(msg_error1,msg_error2,msg_error3,msg_error4))) } if(!is.null(map_leaflet)) { map_proxy <- map map <- map_leaflet } if(is.null(paletteTypo)) { idx_carte <- NULL for(i in 1:length(map$x$calls)) { if(map$x$calls[[i]]$method %in% "addPolygons") { if(map$x$calls[[i]]$args[[3]]=="carte_typo") idx_carte <- c(idx_carte,i) } } nb_col <- length(unique(map$x$calls[[idx_carte[length(idx_carte)]]]$args[[4]]$fillColor)) paletteTypo <- substr(rainbow(256)[nb_opposes(256)[1:nb_col]],1,7) } idx_carte <- NULL idx_legende <- NULL legende <- F for(i in 1:length(map$x$calls)) { if(map$x$calls[[i]]$method %in% "addPolygons") { if(map$x$calls[[i]]$args[[3]]=="carte_typo") { if(map$x$calls[[i]]$args[[2]]$nom_fond=="fond_maille_typo_carte") idx_carte <- i } } if(map$x$calls[[i]]$method %in% "addRectangles") { if(map$x$calls[[i]]$args[[6]]=="legende_typo") legende <- T } if(legende) { if(map$x$calls[[i]]$method %in% "addPolygons") { if(map$x$calls[[i]]$args[[3]]=="legende_typo") idx_legende <- c(idx_legende,i) } } } if(is.null(map_leaflet)) { if(!is.null(idx_carte)) { couleur_analyse <- data.frame(col=map$x$calls[[idx_carte]]$args[[4]]$fillColor) couleur_analyse$id1 <- c(1:nrow(couleur_analyse)) pal_anc <- data.frame(col=unique(couleur_analyse$col)) pal_anc$id2 <- c(1:nrow(pal_anc)) couleur_analyse <- merge(couleur_analyse,pal_anc,by="col") aa <- sapply(1:(length(paletteTypo)), function(x) couleur_analyse[couleur_analyse$id2==x,"col"] <<- paletteTypo[x]) rm(aa) couleur_analyse <- couleur_analyse[order(couleur_analyse$id1),] couleur_analyse <- couleur_analyse$col map$x$calls[[idx_carte]]$args[[4]]$fillColor <- couleur_analyse map$x$calls[[idx_carte]]$args[[4]]$color <- colBorder } if(legende) { for(i in 1:length(idx_legende)) { map$x$calls[[idx_legende[i]]]$args[[4]]$fillColor <- paletteTypo[i] } } }else { map_leaflet <- map map <- map_proxy clearGroup(map, group = "carte_typo") analyse_WGS84 <- map_leaflet$x$calls[[idx_carte]]$args[[2]]$analyse_WGS84 analyse <- map_leaflet$x$calls[[idx_carte]]$args[[2]]$analyse code_epsg <- map_leaflet$x$calls[[idx_carte]]$args[[2]]$code_epsg emprise <- map_leaflet$x$calls[[idx_carte]]$args[[2]]$emprise varTypo <- map_leaflet$x$calls[[idx_carte]]$args[[2]]$var_typo map <- addPolygons(map = map, data = analyse_WGS84, opacity = 1, stroke = TRUE, color = colBorder, weight = 1, options = pathOptions(pane = "fond_typo", clickable = T), popup = paste0("<b> <font color= fill = T, fillColor = analyse$col, fillOpacity = 1, group = "carte_typo", layerId = list(analyse_WGS84=analyse_WGS84,analyse=analyse,code_epsg=code_epsg,emprise=emprise,nom_fond="fond_maille_typo_carte",var_typo=varTypo) ) } return(map) }
hMats <- function (Time) { W2 <- splines::splineDesign(unlist(object$control$knots, use.names = FALSE), Time, ord = object$control$ordSpline, outer.ok = TRUE) data.id2 <- data.id data.id2[[timeVar]] <- pmax(Time - lag, 0) out <- list(W2 = W2, data = data.id2) if (param %in% c("td-value", "td-both")) { mfX <- model.frame.default(delete.response(TermsX), data = data.id) mfZ <- model.frame.default(TermsZ, data = data.id) out$Xtime <- model.matrix.default(formYx, mfX) out$Ztime <- model.matrix.default(formYz, mfZ) } if (param %in% c("td-extra", "td-both")) { mfX.extra <- model.frame.default(TermsX.extra, data = data.id) mfZ.extra <- model.frame.default(TermsZ.extra, data = data.id) out$Xtime.extra <- model.matrix.default(extraForm$fixed, mfX.extra) out$Ztime.extra <- model.matrix.default(extraForm$random, mfZ.extra) } if (estimateWeightFun) { GQsurv <- if (object$control$GQsurv == "GaussKronrod") gaussKronrod() else gaussLegendre(object$control$GQsurv.k) wk <- GQsurv$wk sk <- GQsurv$sk P <- Time / 2 st <- outer(P, sk + 1) id.GK <- rep(seq_len(nrow(data.id2)), each = length(sk)) data.id3 <- data.id2[id.GK, ] data.id3[[timeVar]] <- pmax(c(t(st)) - lag, 0) mfX <- model.frame.default(delete.response(TermsX), data = data.id3) mfZ <- model.frame.default(TermsZ, data = data.id3) out$Xu <- model.matrix.default(formYx, mfX) out$Zu <- model.matrix.default(formYz, mfZ) out$P <- P out$st <- Time[id.GK] - c(t(st)) out$wk <- rep(wk, length(P)) out$id.GK <- id.GK out$data <- data.id3 } out }
prep_attributes <- function(data_path = "data", attributes_path = file.path("data", "metadata", "attributes.csv"), ...) { file_paths <- validate_file_paths(data_path, ...) if(!file.exists(attributes_path)) { stop("attribute file does not exist. Check path or run create_spice?")} attributes <- readr::read_csv(attributes_path, col_types = readr::cols()) attributes <- dplyr::bind_rows(attributes, purrr::map_df(file_paths, ~extract_attributes( .x, attributes ))) readr::write_csv(attributes, attributes_path) } validate_file_paths <- function(data_path = "data", ...) { if(length(data_path) == 1) { if(is_dir(data_path)) { file_paths <- list.files(data_path, include.dirs = FALSE, full.names = TRUE, ...) }else{ file_paths <- data_path }} file_paths <- grep("*metadata/*", file_paths, invert = TRUE, value = TRUE) file_paths <- file_paths[!is_dir(file_paths)] %>% check_files_exist() %>% check_extensions() if(length(file_paths) == 0) { stop("no valid paths to data files detected.") } file_paths } extract_attributes <- function(file_path, attributes) { fileName <- file_path %>% check_extensions() %>% basename() %>% check_fileNames(table = attributes) if(length(fileName) == 0) {return()} ext <- tools::file_ext(fileName) x <- switch(ext, csv = readr::read_csv(file_path, n_max = 1, col_types = readr::cols() ), tsv = readr::read_tsv( file_path, n_max = 1, col_types = readr::cols() ), rds = readRDS(file_path)) if(is.null(names(x))) { warning("no attributes to extract attributes from fileName:", fileName, ", \n prep skipped") return() } message("The following variableNames have been added to the attributes ", "file for ", fileName, ": ", paste(names(x), collapse = ", ") ) tibble::add_row(attributes[0,], variableName = names(x), fileName = fileName) } is_dir <- function(path) {tools::file_ext(path) == ""} check_extensions <- function(file_paths) { check_ext <- tools::file_ext(file_paths) %in% c("csv", "tsv", "rds") if(any(!check_ext)) { warning("cannot handle extension for fileName(s):", file_paths[!check_ext], ", \n file(s) ignored") } file_paths[check_ext] } check_files_exist <- function(file_paths) { check_files <- file.exists(file_paths) if(any(!check_files)) { warning("Invalid data_path(s) \n", paste0(file_paths[!check_files], "\n"), "file(s) ignored") } file_paths[check_files] } check_fileNames <- function(fileNames, table) { table_name <- substitute(table) check_fileNames <- fileNames %in% unique(table$fileName) if(any(check_fileNames)) { warning("Entries already exist in ", table_name,".csv for fileNames: ", paste(fileNames[check_fileNames], collapse = ", "), "\n files ignored") } fileNames[!check_fileNames] }
csem <- function( .data = NULL, .model = NULL, .approach_2ndorder = c("2stage", "mixed"), .approach_cor_robust = c("none", "mcd", "spearman"), .approach_nl = c("sequential", "replace"), .approach_paths = c("OLS", "2SLS"), .approach_weights = c("PLS-PM", "SUMCORR", "MAXVAR", "SSQCORR", "MINVAR", "GENVAR","GSCA", "PCA", "unit", "bartlett", "regression"), .conv_criterion = c("diff_absolute", "diff_squared", "diff_relative"), .disattenuate = TRUE, .dominant_indicators = NULL, .estimate_structural = TRUE, .id = NULL, .instruments = NULL, .iter_max = 100, .normality = FALSE, .PLS_approach_cf = c("dist_squared_euclid", "dist_euclid_weighted", "fisher_transformed", "mean_arithmetic", "mean_geometric", "mean_harmonic", "geo_of_harmonic"), .PLS_ignore_structural_model = FALSE, .PLS_modes = NULL, .PLS_weight_scheme_inner = c("path", "centroid", "factorial"), .reliabilities = NULL, .starting_values = NULL, .resample_method = c("none", "bootstrap", "jackknife"), .resample_method2 = c("none", "bootstrap", "jackknife"), .R = 499, .R2 = 199, .handle_inadmissibles = c("drop", "ignore", "replace"), .user_funs = NULL, .eval_plan = c("sequential", "multiprocess"), .seed = NULL, .sign_change_option = c("none", "individual", "individual_reestimate", "construct_reestimate"), .tolerance = 1e-05 ) { .approach_2ndorder <- match.arg(.approach_2ndorder) .approach_cor_robust <- match.arg(.approach_cor_robust) .approach_nl <- match.arg(.approach_nl) .approach_paths <- match.arg(.approach_paths) .approach_weights <- match.arg(.approach_weights) .conv_criterion <- match.arg(.conv_criterion) .eval_plan <- match.arg(.eval_plan) .handle_inadmissibles <- match.arg(.handle_inadmissibles) .PLS_approach_cf <- match.arg(.PLS_approach_cf) .PLS_weight_scheme_inner <- match.arg(.PLS_weight_scheme_inner) .resample_method <- match.arg(.resample_method) .resample_method2 <- match.arg(.resample_method2) .sign_change_option <- match.arg(.sign_change_option) args_used <- c(as.list(environment(), all.names = TRUE)) args <- handleArgs(args_used) args_needed <- args[intersect(names(args), names(as.list(formals(foreman))))] if(!any(class(.data) %in% c("data.frame", "matrix", "list"))) { stop2( "The following error occured in the `csem()` function:\n", "Data must be provided as a `matrix`, a `data.frame` or a `list`. ", ".data has class: ", paste0(class(.data), collapse = ", ") ) } if(inherits(.data, "list")) { c_names <- unique(unlist(lapply(.data, colnames))) } else { c_names <- colnames(.data) } if(length(grep("\\.", c_names)) > 0) { stop2( "At least one variable name in your data set contain a `.` (dot).", " Dots are a reserved special character in cSEM. Please rename these variables in your data and the model description.") } model_original <- parseModel(.model, .instruments = .instruments) if(any(model_original$construct_order == "Second order")) { model_1stage <- convertModel( .csem_model = model_original, .approach_2ndorder = args$.approach_2ndorder, .stage = "first") model_1stage$construct_order <- model_original$construct_order args_needed[[".model"]] <- model_1stage } else { args_needed[[".model"]] <- model_original } if(!is.null(.id) && !inherits(.data, "list")) { if(length(.id) != 1) { stop2( "The following error occured in the `csem()` function:\n", "`.id` must be a character string or an integer identifying one single column." ) } if(is.matrix(.data)) { .data <- as.data.frame(.data) } data_split <- split(.data, f = .data[, .id]) out <- lapply(data_split, function(x) { if(is.numeric(.id)) { args_needed[[".data"]] <- x[, -.id] } else { args_needed[[".data"]] <- x[, -which(names(x) == .id)] } do.call(foreman, args_needed) }) } else if(any(class(.data) == "list")) { out <- lapply(.data, function(x) { args_needed[[".data"]] <- x do.call(foreman, args_needed) }) if(is.null(names(.data))) { names(out) <- paste0("Data_", 1:length(out)) } else { names(out) <- names(.data) } } else { out <- do.call(foreman, args_needed) } if(inherits(.data, "list") | !is.null(.id)) { out[[1]]$Information$Data_pooled <- if(inherits(.data, "list")) { data_cleaned <- lapply(.data, function(x) { x <- x[, setdiff(colnames(model_original$measurement), model_original$vars_attached_to_2nd)] x }) data_pooled <- do.call(rbind, data_cleaned) data_pooled <- as.data.frame(data_pooled) data_pooled } else { .data } if(any(model_original$construct_order == "Second order")) { out <- lapply(out, function(x){ x$Information$Approach_2ndorder <- .approach_2ndorder x$Information$Model_original <- model_original x }) } else { out <- lapply(out, function(x){ x$Information$Approach_2ndorder <- NA x }) } class(out) <- c("cSEMResults", "cSEMResults_multi") if(any(model_original$construct_order == "Second order") && args$.approach_2ndorder %in% c("2stage", "mixed")) { out <- lapply(out, function(x) { calculate2ndStage(model_original, x, args_needed, .approach_2ndorder) }) class(out) <- c("cSEMResults", "cSEMResults_multi", "cSEMResults_2ndorder") } } else if(any(model_original$construct_order == "Second order") && args$.approach_2ndorder %in% c("2stage", "mixed")) { out <- calculate2ndStage(model_original, out, args_needed, .approach_2ndorder) } else { if(any(model_original$construct_order == "Second order")) { out$Information$Approach_2ndorder <- .approach_2ndorder out$Information$Model_original <- model_original } else { out$Information$Approach_2ndorder <- NA } class(out) <- c("cSEMResults", "cSEMResults_default") } if(.resample_method != "none") { out <- resamplecSEMResults( .object = out, .resample_method = .resample_method, .resample_method2 = .resample_method2, .R = .R, .R2 = .R2, .handle_inadmissibles = .handle_inadmissibles, .user_funs = .user_funs, .eval_plan = .eval_plan, .force = FALSE, .seed = .seed, .sign_change_option = .sign_change_option ) } return(out) }
predict_bn <- function(fit, evidence){ n <- names(fit) obj_nodes <- n[which(!(n %in% names(evidence)))] pred <- mvn_inference(attr(fit,"mu"), attr(fit,"sigma"), as_named_vector(evidence)) pred <- as.data.table(t(pred$mu_p[,1])) if(length(obj_nodes) == 1) setnames(pred, names(pred), obj_nodes) return(pred) } predict_dt <- function(fit, dt, obj_nodes, verbose = T){ initial_fit_check(fit) initial_df_check(dt) fit <- initial_attr_check(fit) dt <- as.data.table(dt) obj_dt <- dt[, .SD, .SDcols = obj_nodes] ev_dt <- copy(dt) ev_dt[, (obj_nodes) := NULL] res <- ev_dt[, predict_bn(fit, .SD), by = 1:nrow(ev_dt)] mae <- sapply(obj_nodes, function(x){mae(obj_dt[, get(x)], res[, get(x)])}) sd_e <- sapply(obj_nodes, function(x){sd_error(obj_dt[, get(x)], res[, get(x)])}) if(verbose){ sapply(obj_nodes, function(x){plot(ts(obj_dt[, get(x)]), ylab = x) + lines(ts(res[, get(x)]), col="red")}) print("MAE:", quote = FALSE) print(mae) print("SD:", quote = FALSE) print(sd_e) } return(res) } approx_prediction_step <- function(fit, variables, particles, n = 50){ if(length(particles) == 0) particles <- TRUE particles <- bnlearn::cpdist(fit, nodes = variables, evidence = particles, method = "lw", n = n) particles <- as.list(apply(particles, 2, mean)) return(particles) } exact_prediction_step <- function(fit, variables, evidence){ if(length(evidence) == 0) evidence <- attr(fit,"mu")[bnlearn::root.nodes(fit)] res <- mvn_inference(attr(fit,"mu"), attr(fit,"sigma"), evidence) res$mu_p <- as.list(res$mu_p[,1]) return(res) } approximate_inference <- function(dt, fit, size, obj_vars, ini, rep, len, num_p){ var_names <- names(dt) vars_pred_idx <- grep("t_0", var_names) vars_subs_idx <- grep("t_1", var_names) vars_last_idx <- grep(paste0("t_", size-1), var_names) vars_pred <- var_names[vars_pred_idx] vars_subs <- var_names[vars_subs_idx] vars_prev <- var_names[-c(vars_pred_idx, vars_subs_idx)] vars_post <- var_names[-c(vars_pred_idx, vars_last_idx)] vars_ev <- var_names[-vars_pred_idx] test <- NULL for(i in 1:rep){ evidence <- dt[ini, .SD, .SDcols = vars_ev] for(j in 1:len){ particles <- approx_prediction_step(fit, vars_pred, as.list(evidence), num_p) if(length(vars_post) > 0) evidence[, (vars_prev) := .SD, .SDcols = vars_post] evidence[, (vars_subs) := particles[vars_pred]] temp <- particles[obj_vars] temp["exec"] <- i test <- rbindlist(list(test, temp)) } } return(test) } exact_inference <- function(dt, fit, size, obj_vars, ini, len, prov_ev){ fit <- initial_attr_check(fit) var_names <- names(dt) vars_pred_idx <- grep("t_0", var_names) vars_subs_idx <- grep("t_1", var_names) vars_last_idx <- grep(paste0("t_", size-1), var_names) vars_pred <- var_names[vars_pred_idx] vars_prev <- var_names[-c(vars_pred_idx, vars_subs_idx)] vars_post <- var_names[-c(vars_pred_idx, vars_last_idx)] vars_ev <- var_names[-vars_pred_idx] vars_pred_crop <- vars_pred[!(vars_pred %in% prov_ev)] vars_subs_crop <- sub("t_0","t_1", vars_pred_crop) prov_ev_subs <- sub("t_0","t_1", prov_ev) test <- NULL evidence <- dt[ini, .SD, .SDcols = c(vars_ev, prov_ev)] for(j in 1:len){ particles <- exact_prediction_step(fit, vars_pred, as_named_vector(evidence)) if(is.null(names(particles$mu_p))) names(particles$mu_p) <- obj_vars if(length(vars_post) > 0) evidence[, (vars_prev) := .SD, .SDcols = vars_post] evidence[, (vars_subs_crop) := particles$mu_p[vars_pred_crop]] if(!is.null(prov_ev)){ evidence[, (prov_ev_subs) := .SD, .SDcols = prov_ev] evidence[, (prov_ev) := dt[ini + j, .SD, .SDcols = prov_ev]] } temp <- particles$mu_p[obj_vars] temp["exec"] <- 1 test <- rbindlist(list(test, temp)) } return(test) } forecast_ts <- function(dt, fit, size, obj_vars, ini = 1, len = dim(dt)[1]-ini, rep = 1, num_p = 50, print_res = TRUE, plot_res = TRUE, mode = "exact", prov_ev = NULL){ initial_folded_dt_check(dt) initial_dbnfit_check(fit) numeric_arg_check(size, ini, len, rep, num_p) character_arg_check(obj_vars) null_or_character_arg_check(prov_ev) obj_prov_check(obj_vars, prov_ev) logical_arg_check(print_res, plot_res) initial_mode_check(mode) dt <- as.data.table(dt) exec_time <- Sys.time() if(mode == "exact") test <- exact_inference(dt, fit, size, obj_vars, ini, len, prov_ev) else if (mode == "approx") test <- approximate_inference(dt, fit, size, obj_vars, ini, rep, len, num_p) exec_time <- exec_time - Sys.time() metrics <- lapply(obj_vars, function(x){ test[, mae_by_col(dt[ini:(ini+len-1)], .SD), .SDcols = x, by = "exec"]}) metrics <- sapply(metrics, function(x){mean(x$V1)}) names(metrics) <- obj_vars if(print_res){ print(exec_time) print_metrics(metrics, obj_vars) } if(plot_res) plot_results(dt[ini:(ini+len-1)], test, obj_vars) return(list(orig = dt[ini:(ini+len-1)], pred = test)) } exact_inference_backwards <- function(dt, fit, size, obj_vars, ini, len, prov_ev){ fit <- initial_attr_check(fit) var_names <- names(dt) vars_pred_idx <- grep(paste0("t_", size-1), var_names) vars_subs_idx <- grep(paste0("t_", size-2), var_names) vars_last_idx <- grep("t_0", var_names) vars_pred <- var_names[vars_pred_idx] vars_prev <- var_names[-c(vars_pred_idx, vars_subs_idx)] vars_post <- var_names[-c(vars_pred_idx, vars_last_idx)] vars_ev <- var_names[-vars_pred_idx] vars_pred_crop <- vars_pred[!(vars_pred %in% prov_ev)] vars_subs_crop <- sub(paste0("t_", size-1), paste0("t_", size-2), vars_pred_crop) prov_ev_subs <- sub(paste0("t_", size-1), paste0("t_", size-1), prov_ev) test <- NULL evidence <- dt[ini, .SD, .SDcols = c(vars_ev, prov_ev)] for(j in 1:len){ particles <- exact_prediction_step(fit, vars_pred, as_named_vector(evidence)) if(is.null(names(particles$mu_p))) names(particles$mu_p) <- obj_vars if(length(vars_post) > 0) evidence[, (vars_prev) := .SD, .SDcols = vars_post] evidence[, (vars_subs_crop) := particles$mu_p[vars_pred_crop]] if(!is.null(prov_ev)){ evidence[, (prov_ev_subs) := .SD, .SDcols = prov_ev] evidence[, (prov_ev) := dt[ini + j, .SD, .SDcols = prov_ev]] } temp <- particles$mu_p[obj_vars] temp["exec"] <- 1 test <- rbindlist(list(temp, test)) } return(test) } smooth_ts <- function(dt, fit, size, obj_vars, ini = dim(dt)[1], len = ini-1, print_res = TRUE, plot_res = TRUE, prov_ev = NULL){ initial_folded_dt_check(dt) initial_dbnfit_check(fit) numeric_arg_check(size, ini, len) character_arg_check(obj_vars) null_or_character_arg_check(prov_ev) obj_prov_check(obj_vars, prov_ev) logical_arg_check(print_res, plot_res) dt <- as.data.table(dt) exec_time <- Sys.time() test <- exact_inference_backwards(dt, fit, size, obj_vars, ini, len, prov_ev) exec_time <- exec_time - Sys.time() metrics <- lapply(obj_vars, function(x){ test[, mae_by_col(dt[(ini-len+1):ini], .SD), .SDcols = x, by = "exec"]}) metrics <- sapply(metrics, function(x){mean(x$V1)}) names(metrics) <- obj_vars if(print_res){ print(exec_time) print_metrics(metrics, obj_vars) } if(plot_res) plot_results(dt[(ini-len+1):ini], test, obj_vars) return(list(orig = dt[(ini-len+1):ini], pred = test)) }
test_that("dots replace their arguments", { skip_on_cran() expect_equal( hit_national_map_api( list( c(lat = 44.04905, lng = -74.01188), c(lat = 44.04911, lng = -74.01179) ), 100, 100, "3DEPElevation" ), hit_national_map_api(list( c(lat = 44.04905, lng = -74.01188), c(lat = 44.04911, lng = -74.01179) ), 8000, 8000, "3DEPElevation", verbose = TRUE, size = "100,100" ) ) })
timestamp <- Sys.time() library(caret) library(plyr) library(recipes) library(dplyr) library(evtree) model <- "evtree" set.seed(2) training <- twoClassSim(50, linearVars = 2) testing <- twoClassSim(500, linearVars = 2) trainX <- training[, -ncol(training)] trainY <- training$Class rec_cls <- recipe(Class ~ ., data = training) %>% step_center(all_predictors()) %>% step_scale(all_predictors()) cctrl1 <- trainControl(method = "cv", number = 3, returnResamp = "all") cctrl2 <- trainControl(method = "LOOCV") cctrl3 <- trainControl(method = "none") cctrlR <- trainControl(method = "cv", number = 3, returnResamp = "all", search = "random") evc <- evtree.control(maxdepth = 5, niterations = 50) set.seed(849) test_class_cv_model <- train(trainX, trainY, method = "evtree", trControl = cctrl1, control = evc, preProc = c("center", "scale")) set.seed(849) test_class_cv_form <- train(Class ~ ., data = training, method = "evtree", trControl = cctrl1, control = evc, preProc = c("center", "scale")) test_class_pred <- predict(test_class_cv_model, testing[, -ncol(testing)]) test_class_prob <- predict(test_class_cv_model, testing[, -ncol(testing)], type = "prob") test_class_pred_form <- predict(test_class_cv_form, testing[, -ncol(testing)]) test_class_prob_form <- predict(test_class_cv_form, testing[, -ncol(testing)], type = "prob") set.seed(849) test_class_rand <- train(trainX, trainY, method = "evtree", trControl = cctrlR, tuneLength = 4) set.seed(849) test_class_loo_model <- train(trainX, trainY, method = "evtree", trControl = cctrl2, control = evc, preProc = c("center", "scale")) set.seed(849) test_class_none_model <- train(trainX, trainY, method = "evtree", trControl = cctrl3, control = evc, tuneLength = 1, preProc = c("center", "scale")) test_class_none_pred <- predict(test_class_none_model, testing[, -ncol(testing)]) set.seed(849) test_class_rec <- train(x = rec_cls, data = training, method = "evtree", trControl = cctrl1, control = evc) if( !isTRUE( all.equal(test_class_cv_model$results, test_class_rec$results)) ) stop("CV weights not giving the same results") test_class_imp_rec <- varImp(test_class_rec) test_class_pred_rec <- predict(test_class_rec, testing[, -ncol(testing)]) test_levels <- levels(test_class_cv_model) if(!all(levels(trainY) %in% test_levels)) cat("wrong levels") library(caret) library(plyr) library(recipes) library(dplyr) airq <- subset(airquality, !is.na(Ozone) & complete.cases(airquality)) trainX <- airq[, -1] trainY <- airq$Ozone testX <- airq[, -1] testY <- airq$Ozone rec_reg <- recipe(Ozone ~ ., data = airq) %>% step_center(all_predictors()) %>% step_scale(all_predictors()) rctrl1 <- trainControl(method = "cv", number = 3, returnResamp = "all") rctrl2 <- trainControl(method = "LOOCV") rctrl3 <- trainControl(method = "none") rctrlR <- trainControl(method = "cv", number = 3, returnResamp = "all", search = "random") set.seed(849) test_reg_cv_model <- train(trainX, trainY, method = "evtree", trControl = rctrl1, control = evc, preProc = c("center", "scale")) test_reg_pred <- predict(test_reg_cv_model, testX) set.seed(849) test_reg_cv_form <- train(Ozone ~ ., data = airq, method = "evtree", trControl = rctrl1, control = evc, preProc = c("center", "scale")) test_reg_pred_form <- predict(test_reg_cv_form, testX) set.seed(849) test_reg_rand <- train(trainX, trainY, method = "evtree", trControl = rctrlR, tuneLength = 4) set.seed(849) test_reg_loo_model <- train(trainX, trainY, method = "evtree", trControl = rctrl2, control = evc, preProc = c("center", "scale")) set.seed(849) test_reg_none_model <- train(trainX, trainY, method = "evtree", trControl = rctrl3, control = evc, tuneLength = 1, preProc = c("center", "scale")) test_reg_none_pred <- predict(test_reg_none_model, testX) set.seed(849) test_reg_rec <- train(x = rec_reg, data = airq, method = "evtree", control = evc, trControl = rctrl1) test_reg_pred_rec <- predict(test_reg_rec, airq[, names(airq) != "Ozone"]) tests <- grep("test_", ls(), fixed = TRUE, value = TRUE) sInfo <- sessionInfo() timestamp_end <- Sys.time() save(list = c(tests, "sInfo", "timestamp", "timestamp_end"), file = file.path(getwd(), paste(model, ".RData", sep = ""))) if(!interactive()) q("no")
expected <- eval(parse(text="c(\"34\", \"-45\")")); test(id=0, code={ argv <- eval(parse(text="list(c(34L, -45L))")); do.call(`as.character`, argv); }, o=expected);
test_that("vec_restore() returns a dribble when it should", { x <- readRDS(test_file("just_a_dribble.rds")) expect_identical(vec_restore(x, x), x) expect_dribble(vec_restore(x, x)) }) test_that("vec_restore() returns dribble when row slicing", { x <- readRDS(test_file("just_a_dribble.rds")) row1 <- x[1, ] row0 <- x[0, ] expect_dribble(vec_restore(row1, x)) expect_dribble(vec_restore(row0, x)) }) test_that("vec_restore() returns bare tibble if `x` loses dribble cols", { x <- readRDS(test_file("just_a_dribble.rds")) col <- x[1] expect_bare_tibble(vec_restore(col, x)) }) test_that("vec_ptype2() is working", { x <- readRDS(test_file("just_a_dribble.rds")) x2 <- x x2$y <- 1 x3 <- x x3$z <- 2 tbl <- tibble(x = 1) df <- data.frame(x = 1) expect_identical(vec_ptype2(x, x), vec_slice(x, NULL)) expect_identical( vec_ptype2(x2, x3), new_dribble(df_ptype2(x2, x3)) ) expect_identical( vec_ptype2(x, tbl), vec_ptype2(new_tibble0(x), tbl) ) expect_identical( vec_ptype2(tbl, x), vec_ptype2(tbl, new_tibble0(x)) ) expect_identical( vec_ptype2(x, df), vec_ptype2(new_tibble0(x), df) ) expect_identical( vec_ptype2(df, x), vec_ptype2(df, new_tibble0(x)) ) }) test_that("vec_cast() is working", { x <- readRDS(test_file("just_a_dribble.rds")) x2 <- x x2$y <- 1 x3 <- x x3$z <- 2 tbl <- new_tibble0(x) df <- as.data.frame(tbl) expect_identical(vec_cast(x, x), x) x2_expect <- x x2_expect$y <- NA_real_ expect_identical(vec_cast(x, x2), x2_expect) expect_error( vec_cast(x2, x3), class = "vctrs_error_cast_lossy_dropped" ) expect_identical(vec_cast(x, tbl), tbl) expect_error(vec_cast(tbl, x), class = "vctrs_error_incompatible_type") expect_identical(vec_cast(x, df), df) expect_error(vec_cast(df, x), class = "vctrs_error_incompatible_type") }) test_that("vec_ptype() returns a dribble", { x <- readRDS(test_file("just_a_dribble.rds")) expect_dribble(vec_ptype(x)) }) test_that("vec_slice() generally returns a dribble", { x <- readRDS(test_file("just_a_dribble.rds")) expect_dribble(vec_slice(x, 0)) expect_dribble(vec_slice(x, 1:2)) }) test_that("vec_c() works", { x <- readRDS(test_file("just_a_dribble.rds")) tbl <- new_tibble0(x) expect_identical(vec_c(x), x) expect_identical(vec_c(x, x), new_dribble(vec_c(tbl, tbl))) expect_identical(vec_c(x[1:5, ], x[6:10, ]), x) }) test_that("vec_rbind() works", { x <- readRDS(test_file("just_a_dribble.rds")) tbl <- new_tibble0(x) expect_identical(vec_rbind(x), x) expect_identical( vec_rbind(x, x), new_dribble(vec_rbind(tbl, tbl)) ) expect_identical(vec_rbind(x[1:5, ], x[6:10, ]), x) }) test_that("vec_cbind() returns a bare tibble", { x <- readRDS(test_file("just_a_dribble.rds")) tbl <- new_tibble0(x) expect_identical(vec_cbind(x), vec_cbind(tbl)) expect_identical( vec_cbind(x, x, .name_repair = "minimal"), vec_cbind(tbl, tbl, .name_repair = "minimal") ) expect_identical( vec_cbind(x, tbl, .name_repair = "minimal"), vec_cbind(tbl, tbl, .name_repair = "minimal") ) })
protchecker <- function(X, Comp, eq_cons, lo_bounds, up_bounds, print_flag=0){ report <- list(eq_err = c(), lo_err = c(), up_err = c(), chiral = c(), phi = c(), psi = c(), hbond = c(), dihed = c()) report$eq_err <- bondcheck(X, eq_cons) report$lo_err <- lobound(X, lo_bounds) report$up_err <- upbound(X, up_bounds) report$chiral <- chirality_check(X, Comp, 0) out <- ang_checker(X, Comp, 0) report$phi <- out$phi report$psi <- out$psi report$hbond <- report$up_err[which(up_bounds[,4] == -1)] report$dihed <- report$up_err[which(up_bounds[,4] == -2)] return(report) }
infermsfmetrop <- function(fbayes,data) { logpfx <- function(f,x) { if (f <= -1) return(-Inf) if (f >= 1) return(-Inf) nx <- as.integer(length(x)) r <- f^(0:(nx - 1)) EPS <- .Machine$double.eps out <- .C("logpHx",as.double(r),nx,as.double(x),nx,EPS, tr = array(0,dim=c(1,5)),fault = as.integer(1),PACKAGE = "HKprocess") return(c((out$tr)[1],(out$tr)[2],(out$tr)[3],(out$tr)[4],(out$tr)[5])) } g <- function(f,x = data) (logpfx(f,data)) w <- as.vector(sapply(fbayes,g)) size = length(data) sizef = length(fbayes) shape = 0.5*(size - 1) minfer = vector() sinfer2 = vector() for (i in 1:sizef) { sinfer2[i] = rinvgamma(1,shape,scale = w[5*i]) minfer[i] = rnorm(1,mean=w[5*i - 2],sd = sqrt(sinfer2[i]*w[5*i - 1]))} return(matrix(c(minfer,sinfer2),nrow = sizef,ncol = 2,dimnames = list(NULL, c("mu_sample","sigma_sq_sample")))) }
compileCode <- TRUE nWarm <- 1000 nKept <- 1000 nThin <- 1 library(HRW) ; library(rstan) data(protein) x1Orig <- protein$BMI x2Orig <- protein$proteinRecall yOrig <- protein$proteinBioM m <- 294 ; n <- 2 mean.x1 <- mean(x1Orig) ; sd.x1 <- sd(x1Orig) mean.x2 <- mean(x2Orig) ; sd.x2 <- sd(x2Orig) mean.y <- mean(yOrig) ; sd.y <- sd(yOrig) x1 <- (x1Orig - mean.x1)/sd.x1 x2 <- (x2Orig - mean.x2)/sd.x2 x3 <- protein$female y <- (yOrig - mean.y)/sd.y a1 <- 1.05*min(x1) - 0.05*max(x1) ; b1 <- 1.05*max(x1) - 0.05*min(x1) numIntKnots1 <- 23 intKnots1 <- seq(a1,b1,length=(numIntKnots1+2))[-c(1,numIntKnots1+2)] a2 <- 1.05*min(x2) - 0.05*max(x2) ; b2 <- 1.05*max(x2) - 0.05*min(x2) numIntKnots2 <- 23 intKnots2 <- seq(a2,b2,length=(numIntKnots2+2))[-c(1,numIntKnots2+2)] range.x1 <- c(a1,b1) ; range.x2 <- c(a2,b2) Z1 <- ZOSull(x1,range.x1,intKnots1) Z2 <- ZOSull(x2,range.x2,intKnots2) ncZ1 <- ncol(Z1) ncZ2 <- ncol(Z2) Z10 <- (1-x3)*Z1 ; Z11 <- x3*Z1 x1Mat <- matrix(NA,m,n) ; x2Mat <- matrix(NA,m,n) x3Mat <- matrix(NA,m,n) ; yMat <- matrix(NA,m,n) for (i in 1:m) { x1Mat[i,] <- x1[(i-1)*n+(1:n)] x2Mat[i,] <- x2[(i-1)*n+(1:n)] x3Mat[i,] <- x3[(i-1)*n+(1:n)] yMat[i,] <- y[(i-1)*n+(1:n)] } sigmaBeta <- 1e5 ; Asigma <- 1e5 ; ASigma <- 1e5 margAddIntModModel <- 'data { int<lower=1> m; int<lower=1> n; int<lower=1> ncZ1; int<lower=1> ncZ2; real<lower=0> sigmaBeta; real<lower=0> Asigma; real<lower=0> ASigma; matrix[m,n] x1Mat; matrix[m,n] x2Mat; matrix[m,n] x3Mat; matrix[m,n] yMat; matrix[m*n,ncZ1] Z1; matrix[m*n,ncZ2] Z2; matrix[m*n,ncZ1] Z10; matrix[m*n,ncZ1] Z11; } parameters { real beta0; real beta1; real beta2; real beta0FvsM; real beta1FvsM; vector[ncZ1] u10; vector[ncZ1] u11; vector[ncZ2] u2; real<lower=0> sigma10; real<lower=0> sigma11; real<lower=0> sigma2; cov_matrix[n] Sigma; vector[n] a; } transformed parameters { matrix[m,n] meanFunc; for (i in 1:m) for (j in 1:n) meanFunc[i,j] = beta0 + beta1*x1Mat[i,j] + beta2*x2Mat[i,j] + beta0FvsM*x3Mat[i,j] + beta1FvsM*x1Mat[i,j]*x3Mat[i,j] + dot_product(u10,Z10[(i-1)*n+j]) + dot_product(u11,Z11[(i-1)*n+j]) + dot_product(u2,Z2[(i-1)*n+j]); } model { vector[n] scaleSigma; for (i in 1:m) yMat[i] ~ multi_normal(meanFunc[i],Sigma); u10 ~ normal(0,sigma10); u11 ~ normal(0,sigma11); u2 ~ normal(0,sigma2); a ~ inv_gamma(0.5,pow(ASigma,-2)); for (j in 1:n) scaleSigma[j] = 4/a[j]; Sigma ~ inv_wishart(n+1,diag_matrix(scaleSigma)); beta0 ~ normal(0,sigmaBeta) ; beta1 ~ normal(0,sigmaBeta); beta2 ~ normal(0,sigmaBeta) ; beta0FvsM ~ normal(0,sigmaBeta); beta1FvsM ~ normal(0,sigmaBeta) ; sigma10 ~ cauchy(0,Asigma); sigma11 ~ cauchy(0,Asigma); sigma2 ~ cauchy(0,Asigma); }' allData <- list(m = m,n = n,ncZ1 = ncZ1,ncZ2 = ncZ2,x1Mat = x1Mat,x2Mat = x2Mat, x3Mat = x3Mat,yMat = yMat,Z1 = Z1,Z10 = Z10,Z11 = Z11,Z2 = Z2, sigmaBeta = sigmaBeta,Asigma = Asigma,ASigma = ASigma) if (compileCode) stanCompilObj <- stan(model_code = margAddIntModModel,data = allData, iter = 1,chains = 1) stanObj <- stan(model_code = margAddIntModModel,data = allData,warmup = nWarm, iter = (nWarm + nKept),chains = 1,thin = nThin,refresh = 100, fit = stanCompilObj) beta0MCMC <- as.vector(extract(stanObj,"beta0",permuted = FALSE)) beta1MCMC <- as.vector(extract(stanObj,"beta1",permuted = FALSE)) beta2MCMC <- as.vector(extract(stanObj,"beta2",permuted = FALSE)) beta0FvsMMCMC <- as.vector(extract(stanObj,"beta0FvsM",permuted = FALSE)) beta1FvsMMCMC <- as.vector(extract(stanObj,"beta1FvsM",permuted = FALSE)) betaMCMC <- rbind(beta0MCMC,beta1MCMC,beta0FvsMMCMC,beta1FvsMMCMC,beta2MCMC) u10MCMC <- NULL ; u11MCMC <- NULL ; u2MCMC <- NULL for (k in 1:ncZ1) { charVar <- paste("u10[",as.character(k),"]",sep = "") u10MCMC <- rbind(u10MCMC,extract(stanObj,charVar,permuted = FALSE)) charVar <- paste("u11[",as.character(k),"]",sep="") u11MCMC <- rbind(u11MCMC,extract(stanObj,charVar,permuted = FALSE)) } for (k in 1:ncZ2) { charVar <- paste("u2[",as.character(k),"]",sep="") u2MCMC <- rbind(u2MCMC,extract(stanObj,charVar,permuted = FALSE)) } sigma10MCMC <- as.vector(extract(stanObj,"sigma10",permuted = FALSE)) sigma11MCMC <- as.vector(extract(stanObj,"sigma11",permuted = FALSE)) sigma2MCMC <- as.vector(extract(stanObj,"sigma2",permuted = FALSE)) Sigma11MCMC <- extract(stanObj,"Sigma[1,1]",permuted = FALSE) Sigma12MCMC <- extract(stanObj,"Sigma[1,2]",permuted = FALSE) Sigma22MCMC <- extract(stanObj,"Sigma[2,2]",permuted = FALSE) sigma10MCMCOrig <- sigma10MCMC*sd.y sigma11MCMCOrig <- sigma11MCMC*sd.y sigma2MCMCOrig <- sigma2MCMC*sd.y Sigma11MCMCOrig <- Sigma11MCMC*(sd.y^2) Sigma22MCMCOrig <- Sigma22MCMC*(sd.y^2) Sigma12MCMCOrig <- Sigma12MCMC*(sd.y^2) par(mfrow=c(1,3),mai=c(1.1,1.2,0.1,0.2),mgp=c(6,2,0)) cex.labVal <- 3.2 ; cex.axisVal <- 2.8 ng <- 101 ; ylimVal <- c(5.2,6.6) shade <- FALSE ; colourVersion <- FALSE meanCol <- "darkgreen" ; seCol <- "palegreen" rugCol <- "dodgerblue" x1g <- seq(range.x1[1],range.x1[2],length=ng) x2g <- rep(mean(x2),length=ng) Z1g <- ZOSull(x1g,range.x1,intKnots1) Z2g <- ZOSull(x2g,range.x2,intKnots2) X0g <- cbind(rep(1,ng),x1g,rep(0,ng),rep(0,ng),x2g) f10MCMC <- X0g%*%betaMCMC + Z1g%*%u10MCMC + Z2g%*%u2MCMC f10MCMCorig <- mean.y + sd.y*f10MCMC f10gOrig <- apply(f10MCMCorig,1,mean) credLower10Orig <- apply(f10MCMCorig,1,quantile,0.025) credUpper10Orig <- apply(f10MCMCorig,1,quantile,0.975) x1gOrig <- mean.x1 + sd.x1*x1g plot(x1gOrig,f10gOrig,type="n",ylim=ylimVal, xlab="body mass index (males)",cex.axis=cex.axisVal, ylab="mean log(protein biomarker)",cex.lab=cex.labVal, bty="l") polygon(c(x1gOrig,rev(x1gOrig)),c(credLower10Orig,rev(credUpper10Orig)), col=seCol,border=FALSE) lines(x1gOrig,f10gOrig,lwd=2,col=meanCol) rug(x1Orig,quiet=TRUE,col=rugCol) X1g <- cbind(rep(1,ng),x1g,rep(1,ng),rep(1,ng),x2g) f11MCMC <- X1g%*%betaMCMC + Z1g%*%u11MCMC + Z2g%*%u2MCMC f11MCMCorig <- mean.y + sd.y*f11MCMC f11gOrig <- apply(f11MCMCorig,1,mean) credLower11Orig <- apply(f11MCMCorig,1,quantile,0.025) credUpper11Orig <- apply(f11MCMCorig,1,quantile,0.975) x1gOrig <- mean.x1 + sd.x1*x1g plot(x1gOrig,f11gOrig,type="n",ylim=ylimVal,cex.axis=cex.axisVal, cex.lab=cex.labVal, xlab="body mass index (females)",ylab="mean log(protein biomarker)", bty="l") polygon(c(x1gOrig,rev(x1gOrig)),c(credLower11Orig,rev(credUpper11Orig)), col=seCol,border=FALSE) lines(x1gOrig,f11gOrig,lwd=2,col=meanCol) rug(x1Orig,quiet=TRUE,col=rugCol) x1g <- rep(mean(x1),length=ng) x2g <- seq(range.x2[1],range.x2[2],length=ng) Xg <- cbind(rep(1,ng),x1g,rep(1,ng),x1g,x2g) Z1g <- ZOSull(x1g,range.x1,intKnots1) Z2g <- ZOSull(x2g,range.x2,intKnots2) f2MCMC <- Xg%*%betaMCMC + Z1g%*%u11MCMC + Z2g%*%u2MCMC f2MCMCorig <- mean.y + sd.y*f2MCMC f2gOrig <- apply(f2MCMCorig,1,mean) credLower2Orig <- apply(f2MCMCorig,1,quantile,0.025) credUpper2Orig <- apply(f2MCMCorig,1,quantile,0.975) x2gOrig <- mean.x2 + sd.x2*x2g plot(x2gOrig,f2gOrig,type="n",ylim=ylimVal,cex.axis=cex.axisVal, cex.lab=cex.labVal,xlab="protein recall",ylab="mean log(protein biomarker)", bty="l") polygon(c(x2gOrig,rev(x2gOrig)),c(credLower2Orig,rev(credUpper2Orig)), col=seCol,border=FALSE) lines(x2gOrig,credLower2Orig,lwd=2,lty=2,col=seCol) lines(x2gOrig,credUpper2Orig,lwd=2,lty=2,col=seCol) lines(x2gOrig,f2gOrig,lwd=2,col=meanCol) rug(x2Orig,quiet=TRUE,col=rugCol) indQ2 <- length(x1gOrig[x1gOrig<quantile(x1Orig,0.50)]) fhat10OrigQ2 <- f10MCMCorig[indQ2,] fhat11OrigQ2 <- f11MCMCorig[indQ2,] indQ2 <- length(x2gOrig[x2gOrig<quantile(x2Orig,0.50)]) fhat2OrigQ2 <- f2MCMCorig[indQ2,] parmsMCMC <- list(cbind(fhat10OrigQ2,fhat11OrigQ2,fhat2OrigQ2, Sigma11MCMCOrig,Sigma12MCMCOrig,Sigma22MCMCOrig)) parNamesVec <- list(c("mean function","at median","of BMI (males)"), c("mean function","at median","of BMI (females)"), c("mean function","at median","of protein recall"), expression(Sigma[11]),expression(Sigma[12]), expression(Sigma[22])) summMCMC(parmsMCMC,parNames=parNamesVec,KDEvertLine=TRUE)
turb.iec.plot <- function(mast, set, subset, ...) { if(class(mast)!="mast") stop(substitute(mast), " is no mast object") num.sets <- length(mast$sets) if(!is.numeric(set)) set <- match(set, names(mast$sets)) if(is.na(set)) stop("'set' not found") if(set<0 || set>num.sets) stop("'set' not found") if(is.null(mast$sets[[set]]$data$turb.int)) stop("'set' does not contain turbulence intensity data") unit <- attr(mast$sets[[set]]$data$v.avg, "unit") if(missing(subset)) subset <- c(NA, NA) start.end <- subset.int(mast$timestamp, subset) start <- start.end[1] end <- start.end[2] vmax <- ceiling(max(mast$sets[[set]]$data$v.avg[start:end], na.rm=TRUE)) site.turb <- c() for(i in 0:(vmax-1)) { site.turb <- append(site.turb, mean(mast$sets[[set]]$data$turb.int[mast$sets[[set]]$data$v.avg[start:end]>=i & mast$sets[[set]]$data$v.avg[start:end]<i+1], na.rm=TRUE)) } plot.param <- list(...) if(any(names(plot.param)=="col")) col <- plot.param$col else col <- " if(any(names(plot.param)=="line")) line <- plot.param$line else line <- "black" if(any(names(plot.param)=="col.lab")) col.lab <- plot.param$col.lab else col.lab <- "black" if(any(names(plot.param)=="col.axis")) col.axis <- plot.param$col.axis else col.axis <- "black" if(any(names(plot.param)=="col.leg")) col.leg <- plot.param$col.leg else col.leg <- "black" if(any(names(plot.param)=="col.ticks")) col.ticks <- plot.param$col.ticks else col.ticks <- "black" if(any(names(plot.param)=="col.box")) col.box <- plot.param$col.box else col.box <- "black" if(any(names(plot.param)=="border")) border <- plot.param$border else border <- col if(any(names(plot.param)=="space")) { if(plot.param$space<1 && plot.param$space>0) space <- plot.param$space else space <- 0.2 } else space <- 0.2 if(any(names(plot.param)=="lty")) lty <- plot.param$lty else lty <- c(3, 2, 1) if(any(names(plot.param)=="lwd")) lwd <- plot.param$lwd else lwd <- 1.2 if(any(names(plot.param)=="cex")) cex <- plot.param$cex else cex <- 1 if(any(names(plot.param)=="cex.lab")) cex.lab <- plot.param$cex.lab else cex.lab <- cex if(any(names(plot.param)=="cex.axis")) cex.axis <- plot.param$cex.axis else cex.axis <- cex if(any(names(plot.param)=="cex.leg")) cex.leg <- plot.param$cex.leg else cex.leg <- cex-0.2 if(any(names(plot.param)=="xlim")) xlim <- plot.param$xlim else xlim <- c(0, vmax) if(any(names(plot.param)=="ylim")) ylim <- plot.param$ylim else ylim <- c(0, 0.6) if(any(names(plot.param)=="x.intersp")) x.intersp <- plot.param$x.intersp else x.intersp <- 0.4 if(any(names(plot.param)=="y.intersp")) y.intersp <- plot.param$y.intersp else y.intersp <- 0.8 if(any(names(plot.param)=="bty.leg")) bty.leg <- plot.param$bty.leg else bty.leg <- "n" if(any(names(plot.param)=="pos.leg")) pos.leg <- plot.param$pos.leg else pos.leg <- "topright" if(any(names(plot.param)=="xlab")) xlab <- plot.param$xlab else xlab <- paste("Wind speed [", unit, "]", sep="") if(any(names(plot.param)=="ylab")) ylab <- plot.param$ylab else ylab <- "Turbulence intensity [-]" if(any(names(plot.param)=="mar")) mar <- plot.param$mar else mar <- c(4.5,4.5,1,1) if(any(names(plot.param)=="mgp")) mgp <- plot.param$mgp else mgp <- c(2.2,0.7,0) if(any(names(plot.param)=="las")) las <- plot.param$las else las <- 1 if(any(names(plot.param)=="bty")) bty <- plot.param$bty else bty <- "o" if(any(names(plot.param)=="legend")) legend <- plot.param$legend else legend <- TRUE if(any(names(plot.param)=="leg.text")) leg.text <- plot.param$leg.text else leg.text <- c("Class A (0.16)", "Class B (0.14)", "Class C (0.12)", "Site") if(length(line)==1) line <- rep(line, 3) if(length(lty)==1) lty <- rep(lty, 3) if(length(lwd)==1) lwd <- rep(lwd, 3) v <- seq(0, xlim[2], 1) sigma1 <- 0.16*(0.75*v+5.6)/v sigma2 <- 0.14*(0.75*v+5.6)/v sigma3 <- 0.12*(0.75*v+5.6)/v old.par <- par(no.readonly=TRUE) on.exit(par(old.par)) par(mar=mar, mgp=mgp, las=las, bty="n") plot(v, sigma1, type="l", xlim=xlim, ylim=ylim, xlab=xlab, ylab=ylab, axes=FALSE, lty=lty[3], lwd=lwd[3], col=line[3], cex.lab=cex.lab, col.lab=col.lab) box(bty=bty, col=col.box) axis(1, col=col.ticks, col.axis=col.axis, cex.axis=cex.axis) axis(2, col=col.ticks, col.axis=col.axis, cex.axis=cex.axis) lines(v, sigma2, lty=lty[2], lwd=lwd[2], col=line[2]) lines(v, sigma3, lty=lty[1], lwd=lwd[1], col=line[1]) for(i in 5:vmax) { polygon(c(i-space/2, i-space/2, i-1+space/2, i-1+space/2), c(0, site.turb[i], site.turb[i], 0), col=col, border=border) } if(legend) legend(pos.leg, legend=leg.text, col=c(line, border), lty=c(lty, NA), lwd=c(lwd, NA), pch=c(NA, NA, NA, 22), pt.bg=c(NA, NA, NA, col), bty=bty.leg, cex=cex.leg, x.intersp=x.intersp, y.intersp=y.intersp, text.col=col.leg) }
testthat::context("test-estimate_pcf_fast") testthat::test_that("estimate_pcf returns spatstat.fv object", { pcf_est <- estimate_pcf_fast(pattern = species_b) testthat::expect_is(pcf_est, "fv") })
as_data_backend.OMLData = function(data, primary_key = NULL, ...) { as_data_backend(data$data) } as_data_backend.OMLTask = function(data, primary_key = NULL, ...) { as_data_backend(data$data$data) }
generate_BB <- function(N = 1000 , num_seed = 2 , multiple_node = 1 , m = 1 , mode_f = "gamma", s = 10 ){ if (num_seed >= N) stop("num_seed too large") if (num_seed < 2) stop("num_seed too small") if (multiple_node > N - num_seed) stop("Multiple node and/or num_seed are too large") if ((m <= 0) || (num_seed <= 0) || (multiple_node <= 0) || (s < 0)) stop("The parameters must be positive") return(generate_net(N = N , num_seed = num_seed , multiple_node = multiple_node , m = m , alpha = 1, mode_f = mode_f, s = s)) }
context("Checking text processing functions") api_key <- Sys.getenv("ALCHEMY_API_KEY") text_path <- system.file( "extdata/text", "text_examples.txt", package = "cognizer" ) text <- readLines(text_path) test_that( "sentiment analysis returns successfully", { if (identical(api_key, "")) skip("no authentication provided") test <- text_sentiment(text, api_key) expect_is(test, "list") } ) test_that( "keyword analysis returns successfully", { if (identical(api_key, "")) skip("no authentication provided") test <- text_keywords(text, api_key) expect_is(test, "list") } ) test_that( "emotion analysis returns successfully", { if (identical(api_key, "")) skip("no authentication provided") test <- text_emotion(text, api_key) expect_is(test, "list") } ) test_that( "language detection returns successfully", { if (identical(api_key, "")) skip("no authentication provided") test <- text_language(text, api_key) expect_is(test, "list") } ) test_that( "entity extraction returns successfully", { if (identical(api_key, "")) skip("no authentication provided") test <- text_entity(text, api_key) expect_is(test, "list") } ) test_that( "concept extraction returns successfully", { if (identical(api_key, "")) skip("no authentication provided") test <- text_concept(text, api_key) expect_is(test, "list") } ) test_that( "alchemy analysis errors are handled successfully", { if (identical(api_key, "")) skip("no authentication provided") test <- text_sentiment(text, substr(api_key, 1, 8)) expect_is(test, "list") } ) userpwd <- Sys.getenv("LANG_TRANSLATE_USERNAME_PASSWORD") text <- "hola amigo" test_that( "language translation returns successfully", { if (identical(userpwd, "")) skip("no authentication provided") test <- text_translate(text, userpwd) expect_is(test, "list") } ) test_that( "language translation errors are handled successfully", { if (identical(userpwd, "")) skip("no authentication provided") test <- text_translate(text, substr(userpwd, 1, 8)) expect_is(test, "list") } ) userpwd <- Sys.getenv("PERSONALITY_USERNAME_PASSWORD") set.seed(539843) text <- paste(replicate(1000, rmsfact::rmsfact()), collapse = ' ') test_that( "personality insight returns successfully", { if (identical(userpwd, "")) skip("no authentication provided") test <- text_personality(text, userpwd) expect_is(test, "list") } ) test_that( "personality insight errors are handled successfully", { if (identical(userpwd, "")) skip("no authentication provided") test <- text_personality(text, substr(userpwd, 1, 8)) expect_is(test, "list") } ) userpwd <- Sys.getenv("TONE_USERNAME_PASSWORD") test_that( "tone analyzer returns successfully", { if (identical(userpwd, "")) skip("no authentication provided") test <- text_tone(text, userpwd) expect_is(test, "list") } ) test_that( "tone analyzer errors are handled successfully", { if (identical(userpwd, "")) skip("no authentication provided") test <- text_tone(text, substr(userpwd, 1, 8)) expect_is(test, "list") } ) userpwd <- Sys.getenv("TEXT_TO_SPEECH_USERNAME_PASSWORD") set.seed(539843) text <- rmsfact::rmsfact() test_that( "text to speech synthesizer returns successfully", { if (identical(userpwd, "")) skip("no authentication provided") tmp <- tempdir() on.exit(unlink(file.path(tmp, "1.ogg"))) test <- text_audio(text, userpwd, directory = tmp) expect_true(test) expect_identical(list.files(tmp, ".ogg"), "1.ogg") expect_gt(file.size(list.files(tmp, ".ogg", full.names = TRUE)), 0) } )
writePeaklist <- function(MSlist,directory,filename,overwrite=FALSE){ if(!length(MSlist)==8){stop("This is not an MSlist object")} if(!MSlist[[1]][[5]]){stop("MSlist does not contain picked peaks - abort.")} if(!file.exists(directory)){stop("invalid directory")} fileout<-paste(directory,"\\",filename,sep="") if(file.exists(fileout) & !overwrite){stop("file already exists; cannot overwrite!")} write.table(MSlist[[8]],file=fileout) }
NULL sql_schema_create <- function(dest, schema, ...) { deprecate_soft("0.2.5", "dm::sql_schema_create()", "dm::db_schema_create()") check_dots_empty() db_schema_create(dest, schema) } sql_schema_drop <- function(dest, schema, force = FALSE, ...) { deprecate_soft("0.2.5", "dm::sql_schema_drop()", "dm::db_schema_drop()") check_dots_empty() db_schema_drop(dest, schema, force) } sql_schema_exists <- function(dest, schema, ...) { deprecate_soft("0.2.5", "dm::sql_schema_exists()", "dm::db_schema_exists()") check_dots_empty() db_schema_exists(dest, schema) } sql_schema_list <- function(dest, include_default = TRUE, ...) { deprecate_soft("0.2.5", "dm::sql_schema_list()", "dm::db_schema_list()") check_dots_empty() db_schema_list(dest, include_default) }
NULL sw_tidy.bats <- function(x, ...) { n <- which(stringr::str_detect(names(x), "likelihood")) - 1 x_subset <- x[1:n] ret_1 <- tibble::tibble(term = names(x_subset)) ret_2 <- data.frame(estimate = unlist(x_subset)) %>% tibble::rownames_to_column(var = "term") ret <- dplyr::left_join(ret_1, ret_2, by = "term") return(ret) } sw_glance.bats <- function(x, ...) { if (inherits(x, "tbats")) { ret_1 <- tibble::tibble(model.desc = tbats_string(x)) } else { ret_1 <- tibble::tibble(model.desc = bats_string(x)) } ret_2 <- tibble::tibble(sigma = sqrt(x$variance), logLik = x$likelihood, AIC = x$AIC, BIC = x$AIC - (2*2) + log(length(x$y))*2) ret_3 <- tibble::as_tibble(forecast::accuracy(x)) ret <- dplyr::bind_cols(ret_1, ret_2, ret_3) return(ret) } sw_augment.bats <- function(x, data = NULL, rename_index = "index", timetk_idx = FALSE, ...) { if (timetk_idx) { if (!has_timetk_idx(x)) { warning("Object has no timetk index. Using default index.") timetk_idx = FALSE } } ret <- tk_tbl(cbind(.actual = x$y, .fitted = x$fitted.values, .resid = x$y - x$fitted.values), rename_index = rename_index, silent = TRUE) if (timetk_idx) { idx <- tk_index(x, timetk_idx = TRUE) ret[, rename_index] <- idx } ret <- sw_augment_columns(ret, data, rename_index, timetk_idx) return(ret) } sw_tidy_decomp.bats <- function(x, timetk_idx = FALSE, rename_index = "index", ...) { if (timetk_idx) { if (!has_timetk_idx(x)) { warning("Object has no timetk index. Using default index.") timetk_idx = FALSE } } ret <- forecast::tbats.components(x) ret <- tk_tbl(ret, preserve_index = TRUE, rename_index, silent = TRUE) if ("season" %in% colnames(ret)) { ret <- ret %>% dplyr::mutate(seasadj = observed - season) } if (timetk_idx) { idx <- tk_index(x, timetk_idx = TRUE) if (nrow(ret) != length(idx)) ret <- ret[(nrow(ret) - length(idx) + 1):nrow(ret),] ret[, rename_index] <- idx } ret <- sw_augment_columns(ret, data = NULL, rename_index = rename_index, timetk_idx = timetk_idx) return(ret) }
pk.calc.c0 <- function(conc, time, time.dose=0, method=c("c0", "logslope", "c1", "cmin", "set0"), check=TRUE) { if (check) check.conc.time(conc, time) if (length(time.dose) != 1) stop("time.dose must be a scalar") if (!is.numeric(time.dose) | is.factor(time.dose)) stop("time.dose must be a number") if (time.dose > max(time)) { warning("time.dose is after all available data") return(NA) } method <- match.arg(method, several.ok=TRUE) ret <- NA while (is.na(ret) & length(method) > 0) { current.method <- method[1] method <- method[-1] ret <- do.call( paste("pk.calc.c0.method", current.method, sep="."), args=list( conc=conc, time=time, time.dose=time.dose, check=FALSE ) ) } ret } pk.calc.c0.method.logslope <- function(conc, time, time.dose=0, check=TRUE) { if (check) check.conc.time(conc, time) mask.positive.time <- (time > time.dose & !(is.na(conc))) positive.time <- time[mask.positive.time] if (length(positive.time) < 2) return(NA) mask.1 <- time %in% positive.time[1] mask.2 <- time %in% positive.time[2] c1 <- conc[mask.1] c2 <- conc[mask.2] t1 <- time[mask.1] t2 <- time[mask.2] if (c2 < c1 & c2 != 0) { exp(log(c1) - (log(c2)-log(c1))/(t2-t1)*(t1 - time.dose)) } else { NA } } pk.calc.c0.method.c0 <- function(conc, time, time.dose=0, check=TRUE) { if (check) check.conc.time(conc, time) mask.dose <- (time %in% time.dose & !(conc %in% c(NA, 0))) if (any(mask.dose)) { conc[mask.dose] } else { NA } } pk.calc.c0.method.c1 <- function(conc, time, time.dose=0, check=TRUE) { if (check) check.conc.time(conc, time) mask.post.dose <- (time > time.dose & !is.na(conc)) if (any(mask.post.dose)) { conc[mask.post.dose][1] } else { NA } } pk.calc.c0.method.set0 <- function(conc, time, time.dose=0, check=TRUE) 0 pk.calc.c0.method.cmin <- function(conc, time, time.dose=0, check=TRUE) pk.calc.cmin(conc, check=check)
knitr::opts_chunk$set( collapse = TRUE, comment = " fig.align='center', dpi = 92, fig.retina = 2 ) options(tibble.print_min = 4L, tibble.print_max = 4L)
write.pspp <- function (data, datafile, pspp.path, decmax=6, as.factors=TRUE, use.bat=FALSE) { data <- as.data.frame(data) df <- data codefile <- paste0( datafile, ".sps" ) adQuote <- function(x){ paste0("\"", x, "\"")} varnames <- colnames(df) if ( as.factors ){ dfn <- lapply(df, function(x) if (is.factor(x)) as.numeric(x) else x) } else { dfn <- lapply(df, function(x) if (is.factor(x)) paste(x) else x ) } if(is.null(attributes(df)$variable.labels)) varlabels <- names(df) else varlabels <- attributes(df)$variable.labels if (is.null(varnames)) { varnames <- abbreviate(names(df), 8) if (any(sapply(varnames, nchar) > 8)) stop("I cannot abbreviate the variable names to eight or fewer letters") if (any(varnames !=names(df))) warning("some variable names were abbreviated") } eps2 <- .001 ldfn <- lapply( dfn, FUN=function(vv){ if ( is.numeric(vv) ){ floor( max( log(abs(vv)+1,10), na.rm=TRUE ) )+2 } else { max( nchar(vv) ) } } ) V <- length(dfn) lafter_dfn <- rep(0,V) stringentry <- rep(0,V) for (vv in 1:V){ if ( is.numeric( as.vector(dfn[[vv]] ) ) ){ dvv <- abs( as.numeric( as.vector(dfn[[vv]] )) ) dd <- 0 hh <- 1 while( ( hh==1 ) & ( dd < decmax ) ){ yvv <- 10^dd * dvv - floor( 10^dd* dvv ) if ( max(yvv,na.rm=TRUE)==0 ){ hh <- 0 ; break } else { dd <- dd+1 } } } else { dd <- max( nchar( paste(dfn[[vv]] ))) stringentry[vv] <- 1 } lafter_dfn[vv] <- dd } xf <- unlist(ldfn) + 1 + lafter_dfn xf <- ifelse( xf=="0", "1", xf ) pformat <- paste0( "F", xf, ".", lafter_dfn, "" ) pformat <- ifelse( stringentry==1, paste0( "A", lafter_dfn ), pformat ) vars2 <- paste( paste( varnames, pformat ), collapse="\n " ) dfn1 <- as.data.frame( dfn ) utils::write.csv2( dfn1, paste0( datafile, ".csv" ), row.names=FALSE, quote=FALSE, na="") cat(paste0( "GET DATA \n /TYPE=TXT \n /FILE='", gsub( "\\", "//", getwd(), fixed=TRUE ), "/", datafile, ".csv' \n", "/IMPORTCASES=ALL\n", "/ARRANGEMENT=DELIMITED\n", "/DELCASE=LINE\n", "/FIRSTCASE=2\n", "/DELIMITERS=';'\n", "/QUALIFIER=''\n", "/ESCAPE \n /VARIABLES=\n" ), file=codefile) cat( paste0( vars2, " .\n\n" ), file=codefile, append=TRUE) cat("VARIABLE LABELS\n", file=codefile, append=TRUE) cat(paste(varnames, adQuote(varlabels), "\n"), ".\n", file=codefile, append=TRUE) if ( as.factors){ factors <- sapply(dfn1, is.factor) if (any(factors)){ for (v in which(factors)){ cat("\nVALUE LABELS", file=codefile, append=TRUE) cat("\n", file=codefile, append=TRUE) cat(varnames[v], " \n", file=codefile, append=TRUE) levs <- levels(df[[v]]) cat(paste(1:length(levs), adQuote(levs), "\n", sep=" "), file=codefile, append=TRUE) } cat(".\n", file=codefile, append=TRUE) } } cat("\n",file=codefile,append=TRUE) varnames <- colnames(df) for (vv in varnames){ avv <- attr( df[,vv], "value.labels" ) if ( length(avv) > 0 ){ cat("VALUE LABELS\n", file=codefile, append=TRUE ) pvv <- paste0( avv, " '", names(avv), "'" ) pvv <- paste( paste( vv, paste( pvv, collapse=" ") ) ) cat( pvv, ".\n", file=codefile, append=TRUE) } } cat("\nEXECUTE.\n", file=codefile, append=TRUE) cat( paste0( "\n\n save outfile='", getwd(), "/", datafile, ".sav'.\n execute."), file=codefile, append=TRUE ) p1 <- paste0( "\"", pspp.path, "pspp.exe\" ", codefile ) if ( use.bat ){ writeLines( p1, "_batch_pspp.bat" ) system( "_batch_pspp.bat" ) } else { system(p1) } }
checkTriple2 <- function (a, b, c, nbrsA, nbrsC, sepsetA, sepsetC, suffStat, alpha, version.unf = c(NA, NA), maj.rule = FALSE,verbose = FALSE, covariates=NULL, QTLs = integer(), max.iter = 50, stop.if.significant = TRUE, use.res = FALSE, res.cor = NULL) { non.collider.nodes <- sort(c(1,QTLs)) if (any(non.collider.nodes %in% c(a,b,c))) {return(lapply(list(decision = 2, version = 1, SepsetA = sepsetA, SepsetC = sepsetC), as.integer))} nr.indep <- 0 stopifnot(length(version.unf) == 2, version.unf %in% 1:2) tmp <- if (version.unf[2] == 2) (b %in% sepsetA || b %in% sepsetC) version <- 0 if ((nn <- length(nbrsA)) > 0) { allComb <- expand.grid(lapply(integer(nn), function(.) 0:1)) for (i in 1:nrow(allComb)) { S <- nbrsA[which(allComb[i, ] != 0)] pval <- pcgenTest(x=a, y=c, S=S, suffStat, covariates=covariates, QTLs = QTLs, alpha = alpha, max.iter = max.iter, stop.if.significant = stop.if.significant, use.res = use.res, res.cor = res.cor) if (verbose) cat("a: S =", S, " - pval =", pval, "\n") if (pval >= alpha) { nr.indep <- nr.indep + 1 tmp <- c(tmp, b %in% S) version <- 1 } } } if ((nn <- length(nbrsC)) > 0) { allComb <- expand.grid(lapply(integer(nn), function(.) 0:1)) for (i in 1:nrow(allComb)) { S <- nbrsC[which(allComb[i, ] != 0)] pval <- pcgenTest(x=a, y=c, S=S, suffStat, covariates=covariates, QTLs = QTLs, alpha = alpha, max.iter = max.iter, stop.if.significant = stop.if.significant, use.res = use.res, res.cor = res.cor) if (verbose) cat("c: S =", S, " - pval =", pval, "\n") if (pval >= alpha) { nr.indep <- nr.indep + 1 tmp <- c(tmp, b %in% S) version <- 1 } } } if (version.unf[1] == 2 && nr.indep == 0) { version <- 2 } if (is.null(tmp)) tmp <- FALSE if (all(tmp)) { res <- 2 if (b %nin% sepsetA) sepsetA <- c(sepsetA, b) if (b %nin% sepsetC) sepsetC <- c(sepsetC, b) } else { if (all(!tmp)) { res <- 1 sepsetA <- setdiff(sepsetA, b) sepsetC <- setdiff(sepsetC, b) } else { if (!maj.rule) { res <- 3 } else { if (sum(tmp)/length(tmp) < 0.5) { res <- 1 sepsetA <- setdiff(sepsetA, b) sepsetC <- setdiff(sepsetC, b) } else if (sum(tmp)/length(tmp) > 0.5) { res <- 2 if (b %nin% sepsetA) sepsetA <- c(sepsetA, b) if (b %nin% sepsetC) sepsetC <- c(sepsetC, b) } else if (sum(tmp)/length(tmp) == 0.5) { res <- 3 } } } } if (verbose && res == 3) cat("Triple ambiguous\n") lapply(list(decision = res, version = version, SepsetA = sepsetA, SepsetC = sepsetC), as.integer) }
kappaCohen <- function(data, weight="unweighted") { MatRes <- matrix(nrow=ncol(data)/2 , ncol=3) rownames(MatRes) <- substr(colnames(data)[seq(from=1, to=ncol(data), by=2)], 1, nchar(colnames(data)[seq(from=1, to=ncol(data), by=2)])-2) colnames(MatRes) <- c("Kappa", "Subjects", "p-value") for (j in 1:nrow(MatRes)) { z <- data.frame(data[,2*j], data[,2*j-1]) zz <- na.omit(z) if (nrow(zz)>1) { kappacohen <- kappa2(zz, weight=weight) MatRes[j,1] <- kappacohen$value MatRes[j,2] <- nrow(zz) MatRes[j,3] <- kappacohen$p.value } else { MatRes[j,1] <- NA MatRes[j,2] <- 0 MatRes[j,3] <- NA } } MatRes <- as.data.frame(MatRes) MatRes[,2] <- as.integer(MatRes[,2]) return(MatRes) }
setClass(Class = "Wishart", slots = c("scale" = "matrix"), contains = c("QPriorObj")) .validityWishart <- function(object) { if (length(x = object@lambda) != ncol(x = object@Q)) { return("dim error lambda") } if (nrow(x = object@scale) != ncol(x = object@Q)) { return("dim error scale") } if (ncol(x = object@scale) != ncol(x = object@Q)) { return("dim error scale") } if (any(object@scale < 0.0)) return("scale cannot be negative") return( TRUE ) } setValidity(Class = "Wishart", method = .validityWishart) setMethod(f = "initialize", signature = "Wishart", definition = function(.Object, ...){ .Object <- callNextMethod() .Object@scale <- matrix(data = 0.0, nrow = 0L, ncol = 0L) validObject(.Object) return( .Object ) }) setMethod(f = ".qInv", signature = c(qObj = "Wishart"), definition = function(qObj, ..., basisdB, lambda, scale) { message("initializing Wishart prior") if (!is.numeric(x = scale)) stop("wishartScale must be numeric", call. = FALSE) r <- ncol(x = basisdB) qObj@Q <- diag(x = 1.0, nrow = r, ncol = r) if (length(x = scale) != 1L && length(x = scale) != r) { stop("wishartScale must be of length 1 or r", call. = FALSE) } qObj@scale <- diag(x = scale, nrow = r, ncol = r) if (length(x = lambda) != 1L && length(x = lambda) != r) { stop("lambda must be of length 1 or r", call. = FALSE) } if (length(x = lambda) == 1L) lambda <- rep(x = lambda, times = r) qObj@lambda <- lambda return( qObj ) }) setMethod(f = ".gibbsQ", signature = c(qObj = "Wishart"), definition = function(qObj, ..., SpinvVS, SpinvV) { r <- ncol(x = qObj@Q) denoEta <- as.matrix(x = SpinvVS) + qObj@Q %*% {{1.0 / qObj@lambda} * t(x = qObj@Q)} msg <- "unable to invert matrix for eta distribution\n" denoEta <- tryCatch(expr = pracma::pinv(A = as.matrix(x = denoEta)), error = function(e){ stop(msg, e$message, call. = FALSE) }) qObj <- .gibbsStep(qObj = qObj, denoEta = denoEta, SpinvV = SpinvV) return( qObj ) }) setMethod(f = ".metroQ", signature = c(qObj = "Wishart"), definition = function(qObj, ..., eta) { r <- length(x = eta) scale <- qObj@scale + tcrossprod(x = as.matrix(x = eta)) matsim <- LaplacesDemon::rinvwishart(nu = {r + 1L}, S = scale) if (any(is.na(x = matsim)) || any(is.nan(x = matsim)) || any(is.infinite(x = matsim))) { stop('Wishart contains invalid values (Inf, NA, or NaN)') } temp <- tryCatch(expr = eigen(x = matsim), error = function(e){ stop("eigen decomposition of prior failed", e$message, call. = FALSE) }) qObj@Q <- temp$vectors return( qObj ) })
devtools::check() rhub::check_for_cran() devtools::build() golem::add_rstudioconnect_file() golem::add_shinyappsio_file() golem::add_shinyserver_file() golem::add_dockerfile() golem::add_dockerfile_shinyproxy() golem::add_dockerfile_heroku()
msg <- function(..., startup = FALSE) { if (startup) { if (!isTRUE(getOption("tidymodels.quiet"))) { packageStartupMessage(text_col(...)) } } else { message(text_col(...)) } } text_col <- function(x) { if (!rstudioapi::isAvailable()) { return(x) } if (!rstudioapi::hasFun("getThemeInfo")) { return(x) } theme <- rstudioapi::getThemeInfo() if (isTRUE(theme$dark)) cli::col_white(x) else cli::col_black(x) } tidymodels_packages <- function(include_self = TRUE) { raw <- utils::packageDescription("tidymodels")$Imports imports <- strsplit(raw, ",")[[1]] parsed <- gsub("^\\s+|\\s+$", "", imports) names <- vapply(strsplit(parsed, "\\s+"), "[[", 1, FUN.VALUE = character(1)) if (include_self) { names <- c(names, "tidymodels") } names } invert <- function(x) { if (length(x) == 0) return() stacked <- utils::stack(x) tapply(as.character(stacked$ind), stacked$values, list) } release_bullets <- function() { c( 'Check what `usethis::use_latest_dependencies(TRUE, "CRAN")` might update', 'Use `tidymodels_dependency_dissuade()` to send emails' ) } choose_startup_tip <- function(vec) { ind <- as.numeric(format(Sys.time(), "%S")) %% length(vec) + 1 vec[ind] }
plotStem3d<-function(hi,di,col="chocolate4", alpha=0.75) { cilinder<-function(X0,Y0,r,z) { disfun<-sqrt(1) angs<-seq(0,2*pi,length=60) x<-X0 + r*disfun*cos(angs) y<-Y0 + r*disfun*sin(angs) return(cbind(x,y,rep(z,60))) } hidi<-cbind(hi,di) maxr<-max(hidi)/2 x=0 ; y=0 rgl::open3d() rgl::bg3d(col="white") rgl::view3d( theta = -90, phi = -90) for ( i in 2:(nrow(hidi))) { sec_i<-hidi[(i-1):i,] c0<-cilinder(x,y,(sec_i[1,2]/2),sec_i[1,1]) c1<-cilinder(x,y,(sec_i[2,2]/2),sec_i[2,1]) xyz<-cbind(rbind(c0,c1)) rgl::plot3d(xyz,type="l",lwd=2,xlab="",add=T,col="black",ylab="",axes=FALSE, xlim=c(x-maxr-2,x+maxr+2),ylim=c(y-maxr-2,y+maxr+2)) ch <- t(geometry::convhulln(xyz, "QJ")) rgl::rgl.triangles(xyz[ch,1],xyz[ch,2],xyz[ch,3], col=col,alpha=alpha) } rgl::aspect3d(0.3,0.3,1) }
pmin.int <- function(..., na.rm = FALSE) .Internal(pmin(na.rm, ...)) pmax.int <- function(..., na.rm = FALSE) .Internal(pmax(na.rm, ...)) pmax <- function (..., na.rm = FALSE) { elts <- list(...) if(length(elts) == 0L) stop("no arguments") i4 <- isS4(elts[[1L]]) if(!i4 && all(vapply(elts, function(x) is.atomic(x) && !is.object(x), NA))) { mmm <- .Internal(pmax(na.rm, ...)) } else { mmm <- as.vector(elts[[1L]]) has.na <- FALSE for (each in elts[-1L]) { each <- as.vector(each) l1 <- length(each); l2 <- length(mmm) if(l2 < l1) { if (l2 && l1 %% l2) warning("an argument will be fractionally recycled") mmm <- rep(mmm, length.out = l1) } else if(l1 && l1 < l2) { if (l2 %% l1) warning("an argument will be fractionally recycled") each <- rep(each, length.out = l2) } nas <- cbind(is.na(mmm), is.na(each)) if(has.na || (has.na <- any(nas))) { mmm [nas[, 1L]] <- each[nas[, 1L]] each[nas[, 2L]] <- mmm [nas[, 2L]] } change <- mmm < each change <- change & !is.na(change) mmm[change] <- each[change] if (has.na && !na.rm) mmm[nas[, 1L] | nas[, 2L]] <- NA } } if(i4) { r <- elts[[1L]] tryCatch({ r[] <- mmm; r }, error = mmm) } else { mostattributes(mmm) <- attributes(elts[[1L]]) mmm } } pmin <- function (..., na.rm = FALSE) { elts <- list(...) if(length(elts) == 0L) stop("no arguments") i4 <- isS4(elts[[1L]]) if(!i4 && all(vapply(elts, function(x) is.atomic(x) && !is.object(x), NA))) { mmm <- .Internal(pmin(na.rm, ...)) } else { mmm <- as.vector(elts[[1L]]) has.na <- FALSE for (each in elts[-1L]) { each <- as.vector(each) l1 <- length(each); l2 <- length(mmm) if(l2 < l1) { if (l2 && l1 %% l2) warning("an argument will be fractionally recycled") mmm <- rep(mmm, length.out = l1) } else if(l1 && l1 < l2) { if (l2 %% l1) warning("an argument will be fractionally recycled") each <- rep(each, length.out = l2) } nas <- cbind(is.na(mmm), is.na(each)) if(has.na || (has.na <- any(nas))) { mmm [nas[, 1L]] <- each[nas[, 1L]] each[nas[, 2L]] <- mmm [nas[, 2L]] } change <- mmm > each change <- change & !is.na(change) mmm[change] <- each[change] if (has.na && !na.rm) mmm[nas[, 1L] | nas[, 2L]] <- NA } } if(i4) { r <- elts[[1L]] tryCatch({ r[] <- mmm; r }, error = mmm) } else { mostattributes(mmm) <- attributes(elts[[1L]]) mmm } }
context("test-drawre") test_that("Whether the number of events in the Rainfall_Characteristics file (dataframe) mataches with the ones included in the sublist Rainfall_Events", { Time_series=five_minute_time_series RC=drawre(Time_series,IETD=3,Thres=0.5)$Rainfall_Characteristics RE=drawre(Time_series,IETD=3,Thres=0.5)$Rainfall_Events dl=length(RC[,2])-length(RE) expect_equal(dl,0)}) test_that("Whether the dry periods between the extracted storms are longer than IETD", { Time_series=five_minute_time_series RC=drawre(Time_series,IETD=3,Thres=0.5)$Rainfall_Characteristics dryP<-min(RC$Starting[-1]-RC$End[-length(RC$End)]) expect_gt(dryP,3) })
hcp_list_files = function( prefix = "", delimiter = NULL, query = NULL, ... ) { L = make_aws_call(path_to_file = prefix, ...) bucket = list(...)$bucket if (is.null(bucket)) bucket = formals(hcp_aws_url)$bucket query$delimiter = delimiter query$prefix = prefix ret = aws.s3::s3HTTP( bucket = bucket, path = "", verb = "GET", query = query, key = L$headers$access_key, secret = L$headers$secret_key, parse_response = FALSE, region = L$headers$default_region) httr::stop_for_status(ret) cr = httr::content(ret, as = "text", encoding = "UTF-8") if (cr != "") { res = xml2::read_xml(cr) res = xml2::as_list(res) } else { res = NULL } L = list(get_result = ret, content = cr, parsed_result = res) return(L) } fcp_list_files = function( prefix = "", delimiter = NULL, query = NULL, ... ) { hcp_list_files( prefix = prefix, delimiter = delimiter, query = query, bucket = "openneuro", sign = FALSE, ...) } openneuro_list_files = function( prefix = "", delimiter = NULL, query = NULL, ... ) { hcp_list_files( prefix = prefix, delimiter = delimiter, query = query, bucket = "fcp-indi", sign = FALSE, ...) } hcp_list_dirs = function( prefix = "HCP/", ... ) { if (length(prefix) > 0) { if (!grepl("/$", prefix)) { prefix = paste0(prefix, "/") } } return(hcp_list_files(..., prefix = prefix, delimiter = "/")) } fcp_list_dirs = function( prefix = "data/Projects/", ... ) { hcp_list_dirs( prefix = prefix, bucket = "fcp-indi", sign = FALSE, ...) } openneuro_list_dirs = function( prefix = NULL, ... ) { hcp_list_dirs( prefix = prefix, bucket = "openneuro", sign = FALSE, ...) }
vkGetAdCategories <- function( version = c("v1", "v2"), username = getOption("rvkstat.username"), api_version = getOption("rvkstat.api_version"), token_path = vkTokenPath(), access_token = getOption("rvkstat.access_token") ) { if ( is.null(access_token) ) { if ( Sys.getenv("RVK_API_TOKEN") != "" ) { access_token <- Sys.getenv("RVK_API_TOKEN") } else { access_token <- vkAuth(username = username, token_path = token_path)$access_token } } if ( class(access_token) == "vk_auth" ) { access_token <- access_token$access_token } answer <- GET("https://api.vk.com/method/ads.getCategories", query = list( access_token = access_token, v = api_version )) stop_for_status(answer) dataRaw <- content(answer, "parsed", "application/json") if(!is.null(dataRaw$error)){ stop(paste0("Error ", dataRaw$error$error_code," - ", dataRaw$error$error_msg)) } result <- tibble(response = dataRaw$response[[version]]) %>% unnest_wider("response") %>% unnest_longer("subcategories") %>% unnest_wider("subcategories", names_sep = "_") return(result) }
simpgen1hm2 <- function (n1, n2, rho, beta = c(0, 0, 0, 0)) { n <- n1 + n2 nstop <- 500 + n err <- rnorm(1) for (i in 2:nstop) { err[i] <- rho * err[i - 1] + rnorm(1) } errs <- err[501:nstop] xmat <- hmdesign2(n1, n2) y <- xmat %*% beta + errs mat <- cbind(xmat, y) return(mat) }
APcontours <- function(inRaster, interval, max.contour.segments = NULL){ if(!is.null(max.contour.segments)){ oo <- options(max.contour.segments = max.contour.segments) on.exit(options(oo)) } rasInfo <- suppressWarnings(tryCatch(rgdal::GDALinfo(inRaster@file@name), error = function(e) FALSE)) rasterRange <- if(class(rasInfo) == "GDALobj"){ unlist(attributes(rasInfo)$df[,c("Bmin", "Bmax")]) }else{ raster::cellStats(inRaster, stat = "range") } contLevels <- seq(AProunder(rasterRange[1], interval, "up"), AProunder(rasterRange[2], interval, "down"), by = interval) cont <- raster::rasterToContour(inRaster, levels = contLevels) cont[["level"]] <- as.numeric(as.character(cont[["level"]])) return(cont) }
source("https://raw.githubusercontent.com/leesharpe/nfldata/master/code/plays.R") series_data <- plays %>% filter(season == 2018) %>% filter(!is.na(series) & !is.na(series_success)) %>% filter(!is.na(epa) & wp >= 0.2 & wp <= 0.8) %>% distinct(posteam,game_id,series,series_success) %>% group_by(posteam) %>% summarize(series_success_rate=mean(series_success)) epa_data <- plays %>% filter(season == 2018) %>% filter(!is.na(series) & !is.na(series_success)) %>% filter(!is.na(epa) & wp >= 0.2 & wp <= 0.8) %>% group_by(posteam) %>% summarize(mean_epa=mean(epa)) logos <- read_csv("https://raw.githubusercontent.com/leesharpe/nfldata/master/data/logos.csv") series_epa <- series_data %>% inner_join(epa_data,by=c("posteam"="posteam")) %>% inner_join(logos,by=c("posteam"="team")) ggplot(series_epa,aes(x=series_success_rate,y=mean_epa)) + theme_minimal() + geom_image(aes(image = url), size = 0.05) + xlab("Series Success Rate") + ylab("EPA/Play") + labs(title="2018: Series Success Rate vs. EPA/Play", subtitle="Analysis by @LeeSharpeNFL", caption="Data from nflscrapR")
gprimeprimegammak=function(bsrkr,I,phi,smlgamma){ gprimeprime=0 nRe=sum(I>0) n=length(I) for (i in 2:(nRe)){ gprimeprime=gprimeprime+(i^2)*(phi^2)*exp(-i*phi*smlgamma)/((1-exp(-i*phi*smlgamma))^2) } for (i in 2:(nRe)){ gprimeprime=gprimeprime-1*(phi^2)*exp(-phi*smlgamma)/((1-exp(-phi*smlgamma))^2) } Cgamma=sum(c(1:(nRe+1))^(-smlgamma)) normi1=0 normi=sum(c(1:(nRe+1))^(-smlgamma)*(log(c(1:(nRe+1)))^2)) normi2=0 normi2=(sum(c(1:(nRe+1))^(-smlgamma)*log(c(1:(nRe+1)))))^2 gprimeprime=gprimeprime-(n-nRe)/((Cgamma)^2)*(Cgamma*normi1+normi2) return(gprimeprime) }
modify_graph_vd <- function(g, f, cores = 1L) { g$from <- as.character(g$from) g$to <- as.character(g$to) if(any(colnames(g)[1:2] != c("from", "to"))) { stop("The first two columns of g must be named 'from' and 'to'") } if(!.nodesExists(g, f)) { stop("All nodes used in f must exists in g.") } if(.hasSubpaths(f)) { stop("There are subpaths of some forbidden paths that belong to another forbidden paths. Use modify_graph_hsu() instead.") } ncol <- ncol(g) cluster <- makeCluster(cores) registerDoParallel(cluster) on.exit(parallel::stopCluster(cluster)) firstOutput <- foreach(i = 1:nrow(f), .combine = .comb, .multicombine = TRUE, .export = ".get_arc_attributes") %dopar% { tempNewArcs <- g[0,] preNode <- f[i,1] tempBannedArcs <- g[0,1:2] for(level in 2:(length(f[i,]) - 1) ) { nodeName <- paste0(as.character(f[i,])[1:level], collapse = "|") if(ncol > 2) { tempNewArcs[nrow(tempNewArcs) + 1,] <- list(preNode, nodeName, .get_arc_attributes(g, f[i, level-1], f[i, level])) } else tempNewArcs[nrow(tempNewArcs) + 1,] <- list(preNode, nodeName) preNode <- nodeName } tempBannedArcs[nrow(tempBannedArcs) + 1,] <- c(nodeName, f[i, level + 1]) tempDelete <- g[0,1:2] tempDelete[1,] <- c(f[i,1], f[i,2]) list(tempNewArcs, tempDelete, tempBannedArcs) } g <- dplyr::anti_join(g, firstOutput[[2]], by = c("from", "to")) newNodes <- firstOutput[[1]]$to secondOutput <- foreach(nn = newNodes, .combine = rbind, .export = ".get_arc_attributes") %dopar% { nsName <- gsub(".*\\|(.*)", "\\1", nn) toNodes <- setdiff(subset(g[,1:2], from == nsName)$to, subset(firstOutput[[3]], from == nsName | from == nn)$to) tempNewArcs <- g[0,] for(toNode in toNodes) { newTo <- newNodes[grepl(paste0(nsName, "\\|", toNode, "$"), newNodes)] if(!identical(newTo, character(0))) { if(ncol > 2) { tempNewArcs[nrow(tempNewArcs) + 1,] <- list(nn, newTo, .get_arc_attributes(g, nsName, toNode)) } else tempNewArcs[nrow(tempNewArcs) + 1,] <- list(nn, newTo) } else if(! any(apply( firstOutput[[3]], 1, function(x) paste(x, collapse="") == paste(c(nn,toNode), collapse="")) ) ) { if(ncol > 2) { tempNewArcs[nrow(tempNewArcs) + 1,] <- list(nn, toNode, .get_arc_attributes(g, nsName, toNode)) } else tempNewArcs[nrow(tempNewArcs) + 1,] <- list(nn, toNode) } } tempNewArcs } return( rbind(g, firstOutput[[1]]) %>% rbind(secondOutput) ) }
.slda.collapsed.gibbs.sampler <- function (documents, K, vocab, num.iterations, alpha, eta, annotations, beta, variance, logistic = FALSE, method = "sLDA", lambda, initial = NULL, burnin = NULL, trace = 0L) { retval <- structure(.Call("collapsedGibbsSampler", documents, as.integer(K), as.integer(length(vocab)), as.integer(num.iterations), as.double(alpha), as.double(eta),if (!logistic) as.double(annotations) else if (method=="sLDA" & logistic) as.integer(annotations) else as.logical(annotations), as.double(beta), as.double(variance), pmatch(method, c("sLDA", "corrLDA", "prodLDA")), as.double(lambda), NULL, NULL, initial, burnin, FALSE, trace, FALSE), names = c("assignments", "topics", "topic_sums", "document_sums")) colnames(retval$topics) <- vocab retval } .model.filenames <- function (data.dir, by.time = TRUE, files = c("elbo", "beta", "phi")) { stopifnot(by.time) all.files <- list.files(data.dir, "elbo-*", full.names = TRUE) golden.file <- all.files[which.max(file.info(all.files)$mtime)] iteration <- strsplit(golden.file, "-") stopifnot(length(iteration) == 1) iteration <- as.numeric(iteration[[1]][length(iteration[[1]])]) c(structure(paste(data.dir, "/", files, "-", iteration, sep = ""), names = files), iteration = iteration) } .read.beta <- function (filename, vocab = NULL, num.topics = NULL, ignore.last.row = TRUE) { stopifnot(is.null(num.topics)) stopifnot(!is.null(vocab)) result <- matrix(scan(filename, what = 0), byrow = TRUE, nrow = length(vocab) + ifelse(ignore.last.row, 1, 0)) if (ignore.last.row) { result <- result[-dim(result)[1], ] } if (!is.null(vocab)) { rownames(result) <- vocab } result } .pairwise.link.lda.collapsed.gibbs.sampler <- function (documents, K, vocab, num.iterations, alpha, eta, nbeta, net.annotations, initial = NULL, burnin = NULL, trace = 0L) { retval <- structure(.Call("collapsedGibbsSampler", documents, as.integer(K), as.integer(length(vocab)), as.integer(num.iterations), as.double(alpha), as.double(eta), NULL, NULL, NULL, NULL, NULL, nbeta, as.logical(net.annotations), initial, burnin, FALSE, trace, FALSE), names = c("assignments", "topics", "topic_sums", "document_sums", if (is.null(burnin)) NA else "document_expects", "net.assignments.left", "net.assignments.right", "blocks.neg", "blocks.pos")) colnames(retval$topics) <- vocab retval } .documents.as.Matrix <- function (docs, vocab) { ii <- rep(1:length(docs), times = sapply(docs, function(x) dim(x)[2])) both <- do.call(cbind, docs) condensed <- xtabs(both[2, ] ~ ii + both[1, ], sparse = TRUE) stopifnot(ncol(condensed) == length(vocab)) colnames(condensed) <- vocab condensed }
pmx_dens <- function( x, labels, dname = NULL, xlim = 3, var_line = NULL, snd_line = NULL, vline = NULL, is.legend = TRUE, ...) { assert_that(is_string_or_null(dname)) if (is.null(dname)) dname <- "predictions" if (missing(labels)) { labels <- list( title = sprintf("Density plot of %s", x), y = "", x = "", subtitle = "" ) } assert_that(is_list(labels)) default_var_line <- list(linetype = 1, colour = "black", size = 1) var_line <- l_left_join(default_var_line, var_line) default_snd_line <- list(linetype = 2, colour = "black", size = 1) snd_line <- l_left_join(default_snd_line, snd_line) default_vline <- list(linetype = 3, colour = "black", size = 1) vline <- l_left_join(default_vline, snd_line) labels$subtitle <- "" structure(list( ptype = "PMX_DENS", strat = TRUE, x = x, dname = dname, xlim = xlim, var_line = var_line, snd_line = snd_line, vline = vline, is.legend = is.legend, gp = pmx_gpar( labels = labels, discrete = TRUE, is.smooth = FALSE ) ), class = c("pmx_dens", "pmx_gpar")) } plot_pmx.pmx_dens <- function(x, dx, ...) { dx <- dx[!is.infinite(get(x$x))] with(x, { xrange <- c(-xlim, xlim) vline_layer <- if (!is.null(vline)) { params <- append(list(xintercept = 0), vline) do.call(geom_vline, params) } p <- ggplot(dx, aes(x = get(x), colour = "variable density")) + geom_density(aes(IWRES, linetype = "variable density"), alpha = 0.5) + stat_function(data.frame(x = xrange), fun = dnorm, mapping = aes(x, colour = "normal density", linetype = "normal density")) + scale_linetype_manual(values = c(snd_line$linetype, var_line$linetype), guide = FALSE) + scale_colour_manual(values = c(snd_line$colour, var_line$colour), guide = guide_legend(override.aes = list( linetype = c(snd_line$linetype, var_line$linetype), size = c(1, 1)), title = NULL))+vline_layer if (is.legend) { gp$is.legend <- is.legend gp$legend.position <- "top" } if (!is.null(p)) p <- plot_pmx(gp, p) p <- p + coord_cartesian(xlim = xrange) + theme(aspect.ratio = 1) p }) }
"sens"
cartography_pals <- cartography:::cartography.colors ggthemes_ptol_pals <- list(qualitative = ggthemes::ggthemes_data$ptol$qualitative) ggthemes_solarized_pals <- ggthemes::ggthemes_data$solarized$palettes palettes_dynamic <- list( cartography = cartography_pals, ggthemes_ptol = ggthemes_ptol_pals, ggthemes_solarized = ggthemes_solarized_pals ) usethis::use_data(palettes_dynamic, overwrite = TRUE)
test_that("regr_xyf", { requirePackagesOrSkip("kohonen", default.method = "load") parset.list1 = list( list(), list(grid = class::somgrid(xdim = 2L, ydim = 4L)), list(rlen = 50L) ) parset.list2 = list( list(), list(xdim = 2L, ydim = 4L), list(rlen = 50L) ) old.predicts.list = list() for (i in seq_along(parset.list1)) { pars = parset.list1[[i]] pars$data = as.matrix(regr.num.train[, -regr.num.class.col]) pars$Y = regr.num.train[, regr.num.class.col] set.seed(getOption("mlr.debug.seed")) m = do.call(kohonen::xyf, pars) p = predict(m, as.matrix(regr.num.test[, -regr.num.class.col])) old.predicts.list[[i]] = as.vector(p$prediction) } testSimpleParsets("regr.xyf", regr.num.df, regr.num.target, regr.num.train.inds, old.predicts.list, parset.list2) })
library(polmineR) library(data.table) testthat::context("partition_bundle") test_that( "partition_bundle", { pb <- partition_bundle("GERMAPARLMINI", s_attribute = "speaker") s <- summary(pb) expect_equal(s[grep("Schwall-", s[["name"]]), ][["size"]], 1230L) expect_equal(sum(s[["size"]]), size("GERMAPARLMINI")) scb <- split(x = corpus("GERMAPARLMINI"), s_attribute = "speaker") expect_equal(length(pb), length(scb)) expect_equal(lapply(scb@objects, size), lapply(pb@objects, size)) foo <- lapply(seq_along(scb@objects), function(i){ expect_equal(pb[[i]]@cpos, scb[[i]]@cpos); invisible(NULL) }) expect_equal(names(pb), names(scb)) expect_equal(as(pb, "subcorpus_bundle"), scb) p <- partition("GERMAPARLMINI", interjection = "speech") pb <- partition_bundle(p, s_attribute = "speaker") s <- summary(pb) expect_equal(s[grep("Schwall-", s[["name"]]), ][["size"]], 1119L) sc <- subset(corpus("GERMAPARLMINI"), interjection == "speech") scb <- split(sc, s_attribute = "speaker") expect_equal(length(scb), length(pb)) expect_equal(all(names(scb) %in% names(pb)), TRUE) pb_2 <- pb[[names(scb)]] pb@objects <- pb_2@objects expect_equal(names(scb), names(pb)) expect_equal(lapply(scb@objects, size), lapply(pb@objects, size)) foo <- lapply(seq_along(scb@objects), function(i){ expect_equal(pb[[i]]@cpos, scb[[i]]@cpos); invisible(NULL) }) expect_equal(names(pb), names(scb)) y <- as(pb, "subcorpus_bundle") expect_equal(y, scb) } )
met2model.ED2 <- function(in.path, in.prefix, outfolder, start_date, end_date, lst = 0, lat = NA, lon = NA, overwrite = FALSE, verbose = FALSE, leap_year = TRUE, ...) { overwrite <- as.logical(overwrite) start_date <- as.POSIXlt(start_date, tz = "UTC") end_date <- as.POSIXlt(end_date, tz = "UTC") met_folder <- outfolder met_header_file <- file.path(met_folder, "ED_MET_DRIVER_HEADER") results <- data.frame( file = met_header_file, host = PEcAn.remote::fqdn(), mimetype = "text/plain", formatname = "ed.met_driver_header files format", startdate = start_date, enddate = end_date, dbfile.name = "ED_MET_DRIVER_HEADER", stringsAsFactors = FALSE ) dir.create(met_folder, recursive = TRUE, showWarnings = FALSE) month <- c("JAN", "FEB", "MAR", "APR", "MAY", "JUN", "JUL", "AUG", "SEP", "OCT", "NOV", "DEC") day2mo <- function(year, day, leap_year) { dm <- c(0, 32, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366) dl <- c(0, 32, 61, 92, 122, 153, 183, 214, 245, 275, 306, 336, 367) mo <- rep(NA, length(day)) if (!leap_year) { mo <- findInterval(day, dm) return(mo) } else { leap <- lubridate::leap_year(year) mo[leap] <- findInterval(day[leap], dl) mo[!leap] <- findInterval(day[!leap], dm) return(mo) } } start_year <- lubridate::year(start_date) end_year <- lubridate::year(end_date) year_seq <- seq(start_year, end_year) day_secs <- udunits2::ud.convert(1, "day", "seconds") need_input_files <- file.path(in.path, paste(in.prefix, year_seq, "nc", sep = ".")) have_input_files <- file.exists(need_input_files) if (!all(have_input_files)) { PEcAn.logger::logger.severe( "Missing the following required input files: ", paste(sprintf("'%s'", need_input_files[!have_input_files]), collapse = ", ") ) } month_seq <- seq( lubridate::floor_date(start_date, "month"), lubridate::floor_date(end_date, "month"), by = "1 month" ) target_fnames <- paste0(toupper(strftime(month_seq, "%Y%b", tz = "UTC")), ".h5") target_out_files <- file.path(met_folder, target_fnames) have_target_out_files <- file.exists(target_out_files) if (any(have_target_out_files)) { if (overwrite) { PEcAn.logger::logger.warn( "The following existing target output files will be overwritten:", paste(sprintf("'%s'", target_out_files[have_target_out_files]), collapse = ", ") ) } else { have_output_byyear <- split(have_target_out_files, lubridate::year(month_seq)) complete_years <- vapply(have_output_byyear, all, logical(1)) skip_years <- tryCatch( as.numeric(names(complete_years[complete_years])), warning = function(e) PEcAn.logger::logger.severe(e) ) PEcAn.logger::logger.warn( "The following output files already exist:", paste(target_out_files[have_target_out_files]), ". This means the following complete years will be skipped: ", skip_years ) year_seq <- setdiff(year_seq, skip_years) } } for (year in year_seq) { ncfile <- file.path(in.path, paste(in.prefix, year, "nc", sep = ".")) nc <- ncdf4::nc_open(ncfile) flat <- try(ncdf4::ncvar_get(nc, "latitude"), silent = TRUE) if (!is.numeric(flat)) { flat <- nc$dim[[1]]$vals[1] } if (is.na(lat)) { lat <- drop(flat) } else if (lat != flat) { PEcAn.logger::logger.warn("Latitude does not match that of file", lat, "!=", flat) } flon <- try(ncdf4::ncvar_get(nc, "longitude"), silent = TRUE) if (!is.numeric(flon)) { flon <- nc$dim[[2]]$vals[1] } if (is.na(lon)) { lon <- drop(flon) } else if (lon != flon) { PEcAn.logger::logger.warn("Longitude does not match that of file", lon, "!=", flon) } tdays <- nc$dim$time$vals Tair <- ncdf4::ncvar_get(nc, "air_temperature") Qair <- ncdf4::ncvar_get(nc, "specific_humidity") U <- try(ncdf4::ncvar_get(nc, "eastward_wind"), silent = TRUE) V <- try(ncdf4::ncvar_get(nc, "northward_wind"), silent = TRUE) Rain <- ncdf4::ncvar_get(nc, "precipitation_flux") pres <- ncdf4::ncvar_get(nc, "air_pressure") SW <- ncdf4::ncvar_get(nc, "surface_downwelling_shortwave_flux_in_air") LW <- ncdf4::ncvar_get(nc, "surface_downwelling_longwave_flux_in_air") CO2 <- try(ncdf4::ncvar_get(nc, "mole_fraction_of_carbon_dioxide_in_air"), silent = TRUE) use_UV <- is.numeric(U) & is.numeric(V) if(!use_UV){ U <- try(ncdf4::ncvar_get(nc, "wind_speed"), silent = TRUE) if(is.numeric(U)){ PEcAn.logger::logger.info("eastward_wind and northward_wind are absent, using wind_speed to approximate eastward_wind") V <- rep(0, length(U)) }else{ PEcAn.logger::logger.severe("No eastward_wind and northward_wind or wind_speed in the met data") } } useCO2 <- is.numeric(CO2) sec <- udunits2::ud.convert(tdays, unlist(strsplit(nc$dim$time$units, " "))[1], "seconds") ncdf4::nc_close(nc) dt <- drop(unique(diff(sec))) if (length(dt) > 1) { dt_old <- dt dt <- drop(round(mean(diff(sec)))) PEcAn.logger::logger.warn(paste0( "Time step (`dt`) is not uniform! Identified ", length(dt_old), " unique time steps. ", "`head(dt)` (in seconds): ", paste(utils::head(dt_old), collapse = ", "), " Using the rounded mean difference as the time step: ", dt )) } toff <- -as.numeric(lst) * 3600 / dt slen <- seq_along(sec) Tair <- c(rep(Tair[1], toff), Tair)[slen] Qair <- c(rep(Qair[1], toff), Qair)[slen] U <- c(rep(U[1], toff), U)[slen] V <- c(rep(V[1], toff), V)[slen] Rain <- c(rep(Rain[1], toff), Rain)[slen] pres <- c(rep(pres[1], toff), pres)[slen] SW <- c(rep(SW[1], toff), SW)[slen] LW <- c(rep(LW[1], toff), LW)[slen] if (useCO2) { CO2 <- c(rep(CO2[1], toff), CO2)[slen] } doy <- floor(tdays) + 1 invalid_doy <- doy < 1 | doy > PEcAn.utils::days_in_year(year, leap_year) if (any(invalid_doy)) { PEcAn.logger::logger.severe(paste0( "Identified at least one invalid day-of-year (`doy`). ", "PEcAn met standard uses days since start of year as its time unit, ", "so this suggests a problem with the input met file. ", "Invalid values are: ", paste(doy[invalid_doy], collapse = ", "), ". ", "Source file is: ", normalizePath(ncfile) )) } hr <- (tdays %% 1) * 24 cosz <- PEcAn.data.atmosphere::cos_solar_zenith_angle(doy, lat, lon, dt, hr) rpot <- 1366 * cosz rpot <- rpot[seq_along(tdays)] SW[rpot < SW] <- rpot[rpot < SW] frac <- SW/rpot frac[frac > 0.9] <- 0.9 frac[frac < 0] <- 0 frac[is.na(frac)] <- 0 frac[is.nan(frac)] <- 0 SWd <- SW * (1 - frac) n <- length(Tair) nbdsfA <- (SW - SWd) * 0.57 nddsfA <- SWd * 0.48 vbdsfA <- (SW - SWd) * 0.43 vddsfA <- SWd * 0.52 prateA <- Rain dlwrfA <- LW presA <- pres hgtA <- rep(50, n) ugrdA <- U vgrdA <- V shA <- Qair tmpA <- Tair if (useCO2) { co2A <- CO2 * 1e+06 } mo <- day2mo(year, doy, leap_year) for (m in unique(mo)) { selm <- which(mo == m) mout <- file.path(met_folder, paste0(year, month[m], ".h5")) if (file.exists(mout)) { if (overwrite) { file.remove(mout) ed_met_h5 <- hdf5r::H5File$new(mout) } else { PEcAn.logger::logger.warn("The file already exists! Moving to next month!") next } } else { ed_met_h5 <- hdf5r::H5File$new(mout) } dims <- c(length(selm), 1, 1) nbdsf <- array(nbdsfA[selm], dim = dims) nddsf <- array(nddsfA[selm], dim = dims) vbdsf <- array(vbdsfA[selm], dim = dims) vddsf <- array(vddsfA[selm], dim = dims) prate <- array(prateA[selm], dim = dims) dlwrf <- array(dlwrfA[selm], dim = dims) pres <- array(presA[selm], dim = dims) hgt <- array(hgtA[selm], dim = dims) ugrd <- array(ugrdA[selm], dim = dims) vgrd <- array(vgrdA[selm], dim = dims) sh <- array(shA[selm], dim = dims) tmp <- array(tmpA[selm], dim = dims) if (useCO2) { co2 <- array(co2A[selm], dim = dims) } ed_met_h5[["nbdsf"]] <- nbdsf ed_met_h5[["nddsf"]] <- nddsf ed_met_h5[["vbdsf"]] <- vbdsf ed_met_h5[["vddsf"]] <- vddsf ed_met_h5[["prate"]] <- prate ed_met_h5[["dlwrf"]] <- dlwrf ed_met_h5[["pres"]] <- pres ed_met_h5[["hgt"]] <- hgt ed_met_h5[["ugrd"]] <- ugrd ed_met_h5[["vgrd"]] <- vgrd ed_met_h5[["sh"]] <- sh ed_met_h5[["tmp"]] <- tmp if (useCO2) { ed_met_h5[["co2"]] <- co2 } ed_met_h5$close_all() } metvar <- c("nbdsf", "nddsf", "vbdsf", "vddsf", "prate", "dlwrf", "pres", "hgt", "ugrd", "vgrd", "sh", "tmp", "co2") metvar_table <- data.frame( variable = metvar, update_frequency = dt, flag = 1 ) if (!useCO2) { metvar_table_vars <- metvar_table[metvar_table$variable != "co2",] }else{ metvar_table_vars <- metvar_table } ed_metheader <- list(list( path_prefix = met_folder, nlon = 1, nlat = 1, dx = 1, dy = 1, xmin = lon, ymin = lat, variables = metvar_table_vars )) check_ed_metheader(ed_metheader) write_ed_metheader(ed_metheader, met_header_file, header_line = shQuote("Made_by_PEcAn_met2model.ED2")) } PEcAn.logger::logger.info("Done with met2model.ED2") return(invisible(results)) }
context("Homolog probability") test_that("computes homolog probabilities correctly", { x1 <- get_submap(solcap.dose.map[[1]], 1:20, reestimate.rf = F) probs.t1<-calc_genoprob(input.map = x1, verbose = TRUE) expect_equal(var(probs.t1$probs), 0.0153, tolerance = 1e-3) hom.t1 <- calc_homologprob(input.genoprobs = probs.t1) expect_equal(var(hom.t1$homoprob$probability), 0.187, tolerance = 1e-3) skip_if_not(capabilities("long.double")) expect_is(plot(hom.t1), "plotly") skip_if_not(capabilities("long.double")) expect_is(plot(hom.t1, stack = T), "plotly") skip_if_not(capabilities("long.double")) expect_is(plot(hom.t1, use.plotly = FALSE), "ggplot") })
args<-commandArgs(TRUE) if (length(args) == 0) { stop("Error, require params: infercnv.obj"); } infercnv_obj_file = args[1] pdf(paste0(infercnv_obj_file, '.dropout.pdf')) infercnv_obj = readRDS(infercnv_obj_file) library(edgeR) library(fitdistrplus) library(infercnv) get_parameters <- function(group_name, expr.matrix) { params = list() params[['group_name']] = group_name lib.sizes <- colSums(expr.matrix) lib.med <- median(lib.sizes) norm.counts <- t(t(expr.matrix) / lib.sizes * lib.med) norm.counts <- norm.counts[rowSums(norm.counts > 0) > 1, ] mean_vs_p0_table = infercnv:::.get_mean_vs_p0_from_matrix(expr.matrix) logistic_params = infercnv:::.get_logistic_params(mean_vs_p0_table) params[['dropout.logistic.midpt']] = logistic_params$midpt params[['dropout.logistic.slope']] = logistic_params$slope mean_vs_p0_table = cbind(mean_vs_p0_table, logm=log(mean_vs_p0_table$m + 1)) smoothScatter(mean_vs_p0_table$logm, mean_vs_p0_table$p0, main=group_name) points(mean_vs_p0_table$logm, infercnv:::.logistic(mean_vs_p0_table$logm, logistic_params$midpt, logistic_params$slope), col='red') midpt_use = mean(mean_vs_p0_table$logm[mean_vs_p0_table$p0>0.48 & mean_vs_p0_table$p0<0.52]) points(mean_vs_p0_table$logm, infercnv:::.logistic(mean_vs_p0_table$logm, midpt_use, logistic_params$slope), col='magenta') s = smooth.spline(mean_vs_p0_table$logm, mean_vs_p0_table$p0) r = range(mean_vs_p0_table$logm) x=seq(r[1], r[2], 0.1) points(x, predict(s, x)$y, col='orange') return(params) } all_groups = c(infercnv_obj@observation_grouped_cell_indices, infercnv_obj@reference_grouped_cell_indices) all_groups[['combined_normal']] <- unlist(infercnv_obj@reference_grouped_cell_indices) for (group in names(all_groups)) { group_idxs = all_groups[[ group ]] expr.data = [email protected][, group_idxs] params = get_parameters(group, expr.data) params = t(as.data.frame(params)) print(params) }
context("GEV functions") my_tol <- 1e-5 pqgev_test_fn <- function(x, p) { loc <- x[1] scale <- x[2] shape <- x[3] qs <- qgev(p = p, loc = loc, scale = scale, shape = shape) ps <- pgev(qs, loc = loc, scale = scale, shape = shape) return(list(p = p, ps = ps)) } test_function <- function(x, test_string) { testthat::test_that(test_string, { testthat::expect_equal(x$p, x$ps, tolerance = my_tol) }) } ep <- 1e-10 loc_check <- 0 scale_check <- 2 shape_check <- c(-1, -0.5, -0.1, -ep, 0, ep, 0.1, 0.5, 1) par_vals <- cbind(loc_check, scale_check, shape_check) p_vals <- c(0.01, 0.1, 0.5, 0.9, 0.99) for (i in 1:nrow(par_vals)) { test_string <- paste("gev shape = ", par_vals[i, 3]) x <- pqgev_test_fn(x = par_vals[i, ], p = p_vals) test_function(x, test_string) } seed <- 28082017 rqgev_test_fn <- function(x) { loc <- x[, 1] scale <- x[, 2] shape <- x[, 3] n <- length(loc) set.seed(seed) qs <- rgev(n = n, loc = loc, scale = scale, shape = shape) set.seed(seed) us <- stats::runif(length(loc)) ps <- pgev(qs, loc = loc, scale = scale, shape = shape) return(list(us = us, ps = ps)) } test_function <- function(x, test_string) { testthat::test_that(test_string, { testthat::expect_equal(x$us, x$ps, tolerance = my_tol) }) } ep <- 1e-10 loc_check <- 0 scale_check <- 2 shape_check <- c(-1, -0.5, -0.1, -ep, 0, ep, 0.1, 0.5, 1) par_vals <- cbind(loc_check, scale_check, shape_check) test_string <- "rgev and pgev" x <- rqgev_test_fn(x = par_vals) test_function(x, test_string)
knitr::opts_chunk$set( collapse = TRUE, comment = " ) library(workloopR) library(magrittr) library(purrr) workloop_trials_list<- system.file( "extdata/wl_duration_trials", package = 'workloopR') %>% read_ddf_dir(phase_from_peak = TRUE) workloop_trials_list[1:2] analyzed_wl_list<- system.file( "extdata/wl_duration_trials", package = 'workloopR') %>% read_analyze_wl_dir(sort_by = 'file_id', phase_from_peak = TRUE, cycle_def = 'lo', keep_cycles = 3) analyzed_wl_list[1:2] analyzed_wl_list %>% summarize_wl_trials non_ddf_list<- system.file( "extdata/twitch_csv", package = 'workloopR') %>% list.files(full.names = T) %>% map(read.csv) %>% map(as_muscle_stim, type = "twitch") non_ddf_list<- non_ddf_list %>% map(~{ attr(.x,"stimulus_width")<-0.2 attr(.x,"stimulus_offset")<-0.1 return(.x) }) %>% map(fix_GR,2) file_ids<-paste0("0",1:4,"-",2:5,"mA-twitch.csv") non_ddf_list<- non_ddf_list %>% map2(file_ids, ~{ attr(.x,"file_id")<-.y return(.x) }) non_ddf_list non_ddf_list %>% map_dfr(isometric_timing)
wave_match = function(wvl1, flx1, targetwvl){ wvlrng = which((targetwvl >= min(wvl1)) & (targetwvl <= max(wvl1))) f = splinefun(wvl1, flx1, method = 'natural') return(f(targetwvl[wvlrng])) }
context("full_record") skip_on_cran() test_that("full_record tsn", { vcr::use_cassette("full_record_tsn", { aa <- full_record(tsn = 202385) }) expect_is(aa, "list") expect_is(aa$publicationList, "list") expect_is(aa$coreMetadata, "list") }) test_that("full_record lsid", { vcr::use_cassette("full_record_lsid", { bb <- full_record(lsid = "urn:lsid:itis.gov:itis_tsn:180543") }) expect_is(bb, "list") expect_is(bb$publicationList, "list") expect_is(bb$coreMetadata, "list") }) test_that("full_record fails well", { expect_error(full_record(4, 5), "only one of") vcr::use_cassette("full_record-errors", { expect_error(full_record("asdfadf")) }) })
pdf_info_via_xpdf <- function(file, options = NULL) { outfile <- tempfile("pdfinfo") on.exit(unlink(outfile)) status <- system2("pdfinfo", c(options, shQuote(normalizePath(file))), stdout = outfile) tags <- c("Title", "Subject", "Keywords", "Author", "Creator", "Producer", "CreationDate", "ModDate", "Tagged", "Form", "Pages", "Encrypted", "Page size", "File size", "Optimized", "PDF version") re <- sprintf("^(%s)", paste(sprintf("%-16s", sprintf("%s:", tags)), collapse = "|")) lines <- readLines(outfile, warn = FALSE) ind <- grepl(re, lines) tags <- sub(": *", "", substring(lines[ind], 1L, 16L)) info <- split(sub(re, "", lines), cumsum(ind)) names(info) <- tags fmt <- "%a %b %d %X %Y" if (!is.null(d <- info$CreationDate)) info$CreationDate <- strptime(d, fmt) if (!is.null(d <- info$ModDate)) info$ModDate <- strptime(d, fmt) if (!is.null(p <- info$Pages)) info$Pages <- as.integer(p) info } pdf_info_via_gs <- function(file) { file <- normalizePath(file) gs_cmd <- tools::find_gs_cmd() out <- system2(gs_cmd, c("-dNODISPLAY -q", sprintf("-sFile=%s", shQuote(file)), system.file("ghostscript", "pdf_info.ps", package = "tm")), stdout = TRUE) out <- out[cumsum(out == "") == 2L][-1L] val <- sub("^[^:]+:[[:space:]]*", "", out) names(val) <- sub(":.*", "", out) val <- as.list(val) if (!is.null(d <- val$CreationDate)) val$CreationDate <- PDF_Date_to_POSIXt(d) if (!is.null(d <- val$ModDate)) val$ModDate <- PDF_Date_to_POSIXt(d) val } PDF_Date_to_POSIXt <- function(s) { s <- sub("^D:", "", s) s <- gsub("'", "", s) if (nchar(s) <= 14L) { s <- sprintf("%s%s", s, substring(" 0101000000", nchar(s) + 1L, 14L)) strptime(s, "%Y%m%d%H%M%S") } else if (substring(s, 15L, 15L) == "Z") { strptime(substring(s, 1L, 14L), "%Y%m%d%H%M%S") } else { strptime(s, "%Y%m%d%H%M%S%z") } } pdf_text_via_gs <- function(file) { file <- normalizePath(file) gs_cmd <- tools::find_gs_cmd() tf <- tempfile("pdf") on.exit(unlink(tf)) res <- system2(gs_cmd, c("-q -dNOPAUSE -dBATCH -P- -dSAFER -sDEVICE=ps2write", sprintf("-sOutputFile=%s", tf), "-c save pop -f", shQuote(file))) txt <- system2(gs_cmd, c("-q -dNODISPLAY -P- -dSAFER -dDELAYBIND -dWRITESYSTEMDICT -dSIMPLE", "-c save -f ps2ascii.ps", tf, "-c quit"), stdout = TRUE) if (any(grepl("Error handled by opdfread.ps", txt))) { stop(paste(c("Ghostscript failed, with output:", txt), collapse = "\n")) } strsplit(paste(txt, collapse = "\n"), "\f")[[1L]] }
downloadChEBI <- function(release = "latest", woAssociations = FALSE) { chebi_download <- tempdir() download.file( "ftp://ftp.ebi.ac.uk/pub/databases/chebi/archive/", paste0(chebi_download, "releases.txt"), quiet = TRUE, method = "libcurl" ) releases <- gsub("rel", "", read.table( paste0(chebi_download, "releases.txt"), quote = "\"", comment.char = "" )[, 9]) message("Validating ChEBI release number ... ", appendLF = FALSE) if (release == "latest") { release <- max(releases) } else { release <- releases[match(release, releases)] } message("OK") ftp <- paste0( "ftp://ftp.ebi.ac.uk/pub/databases/chebi/archive/rel", release, "/Flat_file_tab_delimited/" ) message("Downloading compounds ... ", appendLF = FALSE) download.file(paste0(ftp, "compounds.tsv.gz"), paste0(chebi_download, "compounds.tsv"), quiet = TRUE) compounds <- as.data.frame.array(read.delim2(paste0(chebi_download, "compounds.tsv"))) message("DONE", appendLF = TRUE) message("Downloading synonyms ... ", appendLF = FALSE) download.file(paste0(ftp, "names.tsv.gz"), paste0(chebi_download, "names.tsv"), quiet = TRUE) names <- suppressWarnings(as.data.frame.array(read.delim2(paste0( chebi_download, "names.tsv" )))) message("DONE", appendLF = TRUE) message("Downloading formulas ... ", appendLF = FALSE) download.file(paste0(ftp, "chemical_data.tsv"), paste0(chebi_download, "formulas.tsv"), quiet = TRUE) formulas <- suppressWarnings(as.data.frame.array(read.delim2( paste0(chebi_download, "formulas.tsv") ))) message("DONE", appendLF = TRUE) message("Building ChEBI ... ", appendLF = TRUE) compounds <- compounds[compounds[, "STAR"] >= 3, ] latest <- compounds[, c("ID", "NAME")] old <- compounds[, c("ID", "PARENT_ID")] old <- merge( x = old, y = latest, by.x = "PARENT_ID", by.y = "ID" ) compounds <- rbind(latest, old[, c("ID", "NAME")]) compounds[compounds[, "NAME"] == "null", "NAME"] <- NA compounds <- compounds[complete.cases(compounds), ] DB <- suppressWarnings(( merge( compounds[, c("ID", "NAME")], names[, c("COMPOUND_ID", "SOURCE", "NAME")], by.x = "ID", by.y = "COMPOUND_ID", all.x = TRUE ) )) ChEBI <- unique(DB[, c("ID", "NAME.x")]) colnames(ChEBI) <- c("ID", "ChEBI") message(" KEGG Associations ... ", appendLF = FALSE) KEGG <- unique(DB[DB[, "SOURCE"] == "KEGG COMPOUND", c("ID", "NAME.y")]) KEGG <- KEGG[complete.cases(KEGG), ] colnames(KEGG) <- c("ID", "KEGG") message("DONE", appendLF = TRUE) message(" IUPAC Associations ... ", appendLF = FALSE) IUPAC <- unique(DB[DB[, "SOURCE"] == "IUPAC", c("ID", "NAME.y")]) IUPAC <- IUPAC[complete.cases(IUPAC), ] colnames(IUPAC) <- c("ID", "IUPAC") message("DONE", appendLF = TRUE) message(" MetaCyc Associations ... ", appendLF = FALSE) MetaCyc <- unique(DB[DB[, "SOURCE"] == "MetaCyc", c("ID", "NAME.y")]) MetaCyc <- MetaCyc[complete.cases(MetaCyc), ] colnames(MetaCyc) <- c("ID", "MetaCyc") message("DONE", appendLF = TRUE) message(" ChEMBL Associations ... ", appendLF = FALSE) ChEMBL <- unique(DB[DB[, "SOURCE"] == "ChEMBL", c("ID", "NAME.y")]) ChEMBL <- ChEMBL[complete.cases(ChEMBL), ] colnames(ChEMBL) <- c("ID", "ChEMBL") message("DONE", appendLF = TRUE) DB <- unique(merge(DB["ID"], ChEBI, by = "ID", all.x = TRUE)) DB <- unique(merge(DB, KEGG, by = "ID", all.x = TRUE)) DB <- unique(merge(DB, IUPAC, by = "ID", all.x = TRUE)) DB <- unique(merge(DB, MetaCyc, by = "ID", all.x = TRUE)) DB <- unique(merge(DB, ChEMBL, by = "ID", all.x = TRUE)) rm(ChEBI, ChEMBL, compounds, IUPAC, KEGG, latest, MetaCyc, names, old) if ("FORMULA" %in% unique(formulas[, "TYPE"])) { message(" Formula Associations ... ", appendLF = FALSE) formula <- formulas[formulas[, "TYPE"] == "FORMULA", c("COMPOUND_ID", "CHEMICAL_DATA")] colnames(formula) <- c("ID", "FORMULA") DB <- merge(DB, formula, by = "ID", all.x = TRUE) DB <- merge(DB, DB[, c("ChEBI", "FORMULA")], by = "ChEBI", all.x = TRUE) DB[is.na(DB[, "FORMULA.x"]), "FORMULA.x"] <- "null" DB[is.na(DB[, "FORMULA.y"]), "FORMULA.y"] <- "null" DB[DB[, "FORMULA.x"] != "null" & DB[, "FORMULA.y"] == "null", "FORMULA.y"] <- DB[DB[, "FORMULA.x"] != "null" & DB[, "FORMULA.y"] == "null", "FORMULA.x"] DB[DB[, "FORMULA.y"] != "null" & DB[, "FORMULA.x"] == "null", "FORMULA.x"] <- DB[DB[, "FORMULA.y"] != "null" & DB[, "FORMULA.x"] == "null", "FORMULA.y"] DB <- unique(DB[DB[, "FORMULA.x"] != "null" & DB[, "FORMULA.y"] != "null", c("ID", "ChEBI", "KEGG", "IUPAC", "MetaCyc", "ChEMBL", "FORMULA.x")]) rm(formula) message("DONE", appendLF = TRUE) } else { message("NOT AVAILABLE FOR THIS RELEASE") } message("Downloading molecular weights ... ", appendLF = FALSE) if ("MASS" %in% unique(formulas[, "TYPE"])) { mass <- formulas[formulas[, "TYPE"] == "MASS", c("COMPOUND_ID", "CHEMICAL_DATA")] colnames(mass) <- c("ID", "MASS") DB <- merge(DB, mass, by = "ID", all.x = TRUE) DB <- merge(DB, DB[, c("ChEBI", "MASS")], by = "ChEBI", all.x = TRUE) DB[is.na(DB[, "MASS.x"]), "MASS.x"] <- "null" DB[is.na(DB[, "MASS.y"]), "MASS.y"] <- "null" DB[DB[, "MASS.x"] != "null" & DB[, "MASS.y"] == "null", "MASS.y"] <- DB[DB[, "MASS.x"] != "null" & DB[, "MASS.y"] == "null", "MASS.x"] DB[DB[, "MASS.y"] != "null" & DB[, "MASS.x"] == "null", "MASS.x"] <- DB[DB[, "MASS.y"] != "null" & DB[, "MASS.x"] == "null", "MASS.y"] DB <- unique(DB[, c("ID", "ChEBI", "KEGG", "IUPAC", "MetaCyc", "ChEMBL", "FORMULA.x", "MASS.x")]) rm(mass) message("DONE", appendLF = TRUE) } else { message("NOT AVAILABLE FOR THIS RELEASE") } message("Downloading monoisotopic molecular weights ... ", appendLF = FALSE) if ("MONOISOTOPIC MASS" %in% unique(formulas[, "TYPE"])) { mmass <- formulas[formulas[, "TYPE"] == "MONOISOTOPIC MASS", c("COMPOUND_ID", "CHEMICAL_DATA")] colnames(mmass) <- c("ID", "MONOISOTOPIC") DB <- merge(DB, mmass, by = "ID", all.x = TRUE) DB <- merge(DB, DB[, c("ChEBI", "MONOISOTOPIC")], by = "ChEBI", all.x = TRUE) DB[is.na(DB[, "MONOISOTOPIC.x"]), "MONOISOTOPIC.x"] <- "null" DB[is.na(DB[, "MONOISOTOPIC.y"]), "MONOISOTOPIC.y"] <- "null" DB[DB[, "MONOISOTOPIC.x"] != "null" & DB[, "MONOISOTOPIC.y"] == "null", "MONOISOTOPIC.y"] <- DB[DB[, "MONOISOTOPIC.x"] != "null" & DB[, "MONOISOTOPIC.y"] == "null", "MONOISOTOPIC.x"] DB[DB[, "MONOISOTOPIC.y"] != "null" & DB[, "MONOISOTOPIC.x"] == "null", "MONOISOTOPIC.x"] <- DB[DB[, "MONOISOTOPIC.y"] != "null" & DB[, "MONOISOTOPIC.x"] == "null", "MONOISOTOPIC.y"] DB <- unique(DB[, c( "ID", "ChEBI", "KEGG", "IUPAC", "MetaCyc", "ChEMBL", "FORMULA.x", "MASS.x", "MONOISOTOPIC.x" )]) rm(mmass) message("DONE", appendLF = TRUE) } else { message("NOT AVAILABLE FOR THIS RELEASE") } message("Downloading molecular charges ... ", appendLF = FALSE) if ("CHARGE" %in% unique(formulas[, "TYPE"])) { charge <- formulas[formulas[, "TYPE"] == "CHARGE", c("COMPOUND_ID", "CHEMICAL_DATA")] colnames(charge) <- c("ID", "CHARGE") DB <- merge(DB, charge, by = "ID", all.x = TRUE) DB <- merge(DB, DB[, c("ChEBI", "CHARGE")], by = "ChEBI", all.x = TRUE) DB[is.na(DB[, "CHARGE.x"]), "CHARGE.x"] <- "null" DB[is.na(DB[, "CHARGE.y"]), "CHARGE.y"] <- "null" DB[DB[, "CHARGE.x"] != "null" & DB[, "CHARGE.y"] == "null", "CHARGE.y"] <- DB[DB[, "CHARGE.x"] != "null" & DB[, "CHARGE.y"] == "null", "CHARGE.x"] DB[DB[, "CHARGE.y"] != "null" & DB[, "CHARGE.x"] == "null", "CHARGE.x"] <- DB[DB[, "CHARGE.y"] != "null" & DB[, "CHARGE.x"] == "null", "CHARGE.y"] DB <- unique(DB[, c( "ID", "ChEBI", "KEGG", "IUPAC", "MetaCyc", "ChEMBL", "FORMULA.x", "MASS.x", "MONOISOTOPIC.x", "CHARGE.x" )]) message("DONE", appendLF = TRUE) } else { message("NOT AVAILABLE FOR THIS RELEASE") } DB[DB == "null"] <- NA DB <- unique(DB[complete.cases(DB[, c("ID", "ChEBI", "FORMULA.x", "MASS.x", "MONOISOTOPIC.x", "CHARGE.x")]), ]) colnames(DB) <- c( "ID", "ChEBI", "KEGG", "IUPAC", "MetaCyc", "ChEMBL", "FORMULA", "MASS", "MONOISOTOPIC", "CHARGE" ) if (woAssociations == TRUE) { compounds <- unique(rbind( setNames(DB[, c("ChEBI", "FORMULA", "MASS", "MONOISOTOPIC", "CHARGE")], c( "NAME", "FORMULA", "MASS", "MONOISOTOPIC", "CHARGE" )), setNames(DB[, c("KEGG", "FORMULA", "MASS", "MONOISOTOPIC", "CHARGE")], c( "NAME", "FORMULA", "MASS", "MONOISOTOPIC", "CHARGE" )), setNames(DB[, c("IUPAC", "FORMULA", "MASS", "MONOISOTOPIC", "CHARGE")], c( "NAME", "FORMULA", "MASS", "MONOISOTOPIC", "CHARGE" )), setNames(DB[, c("MetaCyc", "FORMULA", "MASS", "MONOISOTOPIC", "CHARGE")], c( "NAME", "FORMULA", "MASS", "MONOISOTOPIC", "CHARGE" )), setNames(DB[, c("ChEMBL", "FORMULA", "MASS", "MONOISOTOPIC", "CHARGE")], c( "NAME", "FORMULA", "MASS", "MONOISOTOPIC", "CHARGE" )) )) compounds <- compounds[complete.cases(compounds), ] return(compounds) } else { return(DB) } }
importance <- function(x, scale=TRUE) { type=NULL; class=NULL; if (!inherits(x, "randomForest")) stop("x is not of class randomForest") classRF <- x$type != "regression" hasImp <- !is.null(dim(x$importance)) || ncol(x$importance) == 1 hasType <- !is.null(type) if (hasType && type == 1 && !hasImp) stop("That measure has not been computed") allImp <- is.null(type) && hasImp if (hasType) { if (!(type %in% 1:2)) stop("Wrong type specified") if (type == 2 && !is.null(class)) stop("No class-specific measure for that type") } imp <- x$importance if (hasType && type == 2) { if (hasImp) imp <- imp[, ncol(imp), drop=FALSE] } else { if (scale) { SD <- x$importanceSD imp[, -ncol(imp)] <- imp[, -ncol(imp), drop=FALSE] / ifelse(SD < .Machine$double.eps, 1, SD) } if (!allImp) { if (is.null(class)) { imp <- imp[, ncol(imp) - 1, drop=FALSE] } else { whichCol <- if (classRF) match(class, colnames(imp)) else 1 if (is.na(whichCol)) stop(paste("Class", class, "not found.")) imp <- imp[, whichCol, drop=FALSE] } } } imp<-imp[,2] imp } "irafnet_onetarget" <- function(x, y=NULL, xtest=NULL, ytest=NULL, ntree, mtry=if (!is.null(y) && !is.factor(y)) max(floor(ncol(x)/3), 1) else floor(sqrt(ncol(x))), replace=TRUE, classwt=NULL, cutoff, strata, sampsize = if (replace) nrow(x) else ceiling(.632*nrow(x)), nodesize = if (!is.null(y) && !is.factor(y)) 5 else 1, maxnodes=NULL, importance=FALSE, localImp=FALSE, nPerm=1, proximity, oob.prox=proximity, norm.votes=TRUE, do.trace=FALSE, keep.forest=!is.null(y) && is.null(xtest), corr.bias=FALSE, keep.inbag=FALSE, sw) { addclass <- is.null(y) classRF <- addclass || is.factor(y) if (!classRF && length(unique(y)) <= 5) { warning("The response has five or fewer unique values. Are you sure you want to do regression?") } if (classRF && !addclass && length(unique(y)) < 2) stop("Need at least two classes to do classification.") n <- nrow(x) p <- ncol(x) if (n == 0) stop("data (x) has 0 rows") x.row.names <- rownames(x) x.col.names <- if (is.null(colnames(x))) 1:ncol(x) else colnames(x) keep.forest <- keep.forest testdat <- !is.null(xtest) if (testdat) { if (ncol(x) != ncol(xtest)) stop("x and xtest must have same number of columns") ntest <- nrow(xtest) xts.row.names <- rownames(xtest) } if (mtry < 1 || mtry > p) warning("invalid mtry: reset to within valid range") mtry <- max(1, min(p, round(mtry))) if (!is.null(y)) { if (length(y) != n) stop("length of response must be the same as predictors") addclass <- FALSE } else { if (!addclass) addclass <- TRUE y <- factor(c(rep(1, n), rep(2, n))) x <- rbind(x, x) } if (any(is.na(x))) stop("NA not permitted in predictors") if (testdat && any(is.na(xtest))) stop("NA not permitted in xtest") if (any(is.na(y))) stop("NA not permitted in response") if (!is.null(ytest) && any(is.na(ytest))) stop("NA not permitted in ytest") ncat <- rep(1, p) xlevels <- as.list(rep(0, p)) maxcat <- max(ncat) if (maxcat > 32) stop("Can not handle categorical predictors with more than 32 categories.") if (classRF) { nclass <- length(levels(y)) if (any(table(y) == 0)) stop("Can't have empty classes in y.") if (!is.null(ytest)) { if (!is.factor(ytest)) stop("ytest must be a factor") if (!all(levels(y) == levels(ytest))) stop("y and ytest must have the same levels") } if (missing(cutoff)) { cutoff <- rep(1 / nclass, nclass) } else { if (sum(cutoff) > 1 || sum(cutoff) < 0 || !all(cutoff > 0) || length(cutoff) != nclass) { stop("Incorrect cutoff specified.") } if (!is.null(names(cutoff))) { if (!all(names(cutoff) %in% levels(y))) { stop("Wrong name(s) for cutoff") } cutoff <- cutoff[levels(y)] } } if (!is.null(classwt)) { if (length(classwt) != nclass) stop("length of classwt not equal to number of classes") if (!is.null(names(classwt))) { if (!all(names(classwt) %in% levels(y))) { stop("Wrong name(s) for classwt") } classwt <- classwt[levels(y)] } if (any(classwt <= 0)) stop("classwt must be positive") ipi <- 1 } else { classwt <- rep(1, nclass) ipi <- 0 } } else addclass <- FALSE if (missing(proximity)) proximity <- addclass if (proximity) { prox <- matrix(0.0, n, n) proxts <- if (testdat) matrix(0, ntest, ntest + n) else double(1) } else { prox <- proxts <- double(1) } if (localImp) { importance <- TRUE impmat <- matrix(0, p, n) } else impmat <- double(1) if (importance) { if (nPerm < 1) nPerm <- as.integer(1) else nPerm <- as.integer(nPerm) if (classRF) { impout <- matrix(0.0, p, nclass + 2) impSD <- matrix(0.0, p, nclass + 1) } else { impout <- matrix(0.0, p, 2) impSD <- double(p) names(impSD) <- x.col.names } } else { impout <- double(p) impSD <- double(1) } nsample <- if (addclass) 2 * n else n Stratify <- length(sampsize) > 1 if ((!Stratify) && sampsize > nrow(x)) stop("sampsize too large") if (Stratify && (!classRF)) stop("sampsize should be of length one") if (classRF) { if (Stratify) { if (missing(strata)) strata <- y if (!is.factor(strata)) strata <- as.factor(strata) nsum <- sum(sampsize) if (length(sampsize) > nlevels(strata)) stop("sampsize has too many elements.") if (any(sampsize <= 0) || nsum == 0) stop("Bad sampsize specification") if (!is.null(names(sampsize))) { sampsize <- sampsize[levels(strata)] } if (any(sampsize > table(strata))) stop("sampsize can not be larger than class frequency") } else { nsum <- sampsize } nrnodes <- 2 * trunc(nsum / nodesize) + 1 } else { nrnodes <- 2 * trunc(sampsize/max(1, nodesize - 4)) + 1 } if (!is.null(maxnodes)) { maxnodes <- 2 * maxnodes - 1 if (maxnodes > nrnodes) warning("maxnodes exceeds its max value.") nrnodes <- min(c(nrnodes, max(c(maxnodes, 1)))) } x <- t(x) storage.mode(x) <- "double" if (testdat) { xtest <- t(xtest) storage.mode(xtest) <- "double" if (is.null(ytest)) { ytest <- labelts <- 0 } else { labelts <- TRUE } } else { xtest <- double(1) ytest <- double(1) ntest <- 1 labelts <- FALSE } nt <- if (keep.forest) ntree else 1 rfout <- .C("regRF", x, as.double(y), as.integer(c(n, p)), as.integer(sampsize), as.integer(nodesize), as.integer(nrnodes), as.integer(ntree), as.integer(mtry), as.integer(c(importance, localImp, nPerm)), as.integer(ncat), as.integer(maxcat), as.integer(do.trace), as.integer(proximity), as.integer(oob.prox), as.integer(corr.bias), ypred = double(n), impout = impout, impmat = impmat, impSD = impSD, prox = prox, ndbigtree = integer(ntree), nodestatus = matrix(integer(nrnodes * nt), ncol=nt), leftDaughter = matrix(integer(nrnodes * nt), ncol=nt), rightDaughter = matrix(integer(nrnodes * nt), ncol=nt), nodepred = matrix(double(nrnodes * nt), ncol=nt), bestvar = matrix(integer(nrnodes * nt), ncol=nt), xbestsplit = matrix(double(nrnodes * nt), ncol=nt), mse = double(ntree), keep = as.integer(c(keep.forest, keep.inbag)), replace = as.integer(replace), testdat = as.integer(testdat), xts = xtest, ntest = as.integer(ntest), yts = as.double(ytest), labelts = as.integer(labelts), ytestpred = double(ntest), proxts = proxts, msets = double(if (labelts) ntree else 1), coef = double(2), oob.times = integer(n), inbag = if (keep.inbag) matrix(integer(n * ntree), n) else integer(1), sw = as.double(sw))[c(16:28, 36:41)] if (keep.forest) { max.nodes <- max(rfout$ndbigtree) rfout$nodestatus <- rfout$nodestatus[1:max.nodes, , drop=FALSE] rfout$bestvar <- rfout$bestvar[1:max.nodes, , drop=FALSE] rfout$nodepred <- rfout$nodepred[1:max.nodes, , drop=FALSE] rfout$xbestsplit <- rfout$xbestsplit[1:max.nodes, , drop=FALSE] rfout$leftDaughter <- rfout$leftDaughter[1:max.nodes, , drop=FALSE] rfout$rightDaughter <- rfout$rightDaughter[1:max.nodes, , drop=FALSE] } cl <- match.call() cl[[1]] <- as.name("randomForest") ypred <- rfout$ypred if (any(rfout$oob.times < 1)) { ypred[rfout$oob.times == 0] <- NA } out <- list(call = cl, type = "regression", predicted = structure(ypred, names=x.row.names), mse = rfout$mse, rsq = 1 - rfout$mse / (var(y) * (n-1) / n), oob.times = rfout$oob.times, importance = if (importance) matrix(rfout$impout, p, 2, dimnames=list(x.col.names, c("%IncMSE","IncNodePurity"))) else matrix(rfout$impout, ncol=1, dimnames=list(x.col.names, "IncNodePurity")), importanceSD=if (importance) rfout$impSD else NULL, localImportance = if (localImp) matrix(rfout$impmat, p, n, dimnames=list(x.col.names, x.row.names)) else NULL, proximity = if (proximity) matrix(rfout$prox, n, n, dimnames = list(x.row.names, x.row.names)) else NULL, ntree = ntree, mtry = mtry, forest = if (keep.forest) c(rfout[c("ndbigtree", "nodestatus", "leftDaughter", "rightDaughter", "nodepred", "bestvar", "xbestsplit")], list(ncat = ncat), list(nrnodes=max.nodes), list(ntree=ntree), list(xlevels=xlevels)) else NULL, coefs = if (corr.bias) rfout$coef else NULL, y = y, test = if(testdat) { list(predicted = structure(rfout$ytestpred, names=xts.row.names), mse = if(labelts) rfout$msets else NULL, rsq = if(labelts) 1 - rfout$msets / (var(ytest) * (n-1) / n) else NULL, proximity = if (proximity) matrix(rfout$proxts / ntree, nrow = ntest, dimnames = list(xts.row.names, c(xts.row.names, x.row.names))) else NULL) } else NULL, inbag = if (keep.inbag) matrix(rfout$inbag, nrow(rfout$inbag), dimnames=list(x.row.names, NULL)) else NULL) class(out) <- "randomForest" return(out) } "iRafNet_permutation" <- function(X,W, ntree,mtry,genes.name,perm) { p<-dim(X)[2] imp<-matrix(0,p,p) n<-dim(X)[1] index<-seq(1,p) vec1<-matrix(rep(genes.name,p),p,p) vec2<-t(vec1); vec1<-c(vec1); vec2<-c(vec2) set.seed(perm) label<-sample(n) for (j in 1:p){ y<-X[label,j]; weights.rf<-as.matrix(W[,j]); weights.rf[j]<-0 weights.rf<-weights.rf/sum(weights.rf); w.sorted<-sort(weights.rf,decreasing = FALSE,index.return=T) index<-w.sorted$ix x.sorted<-X[,index] w.sorted<-w.sorted$x rout<-irafnet_onetarget(x=x.sorted,y=as.double(y),importance=TRUE,mtry=round(sqrt(p-1)),ntree=1000, sw=as.double(w.sorted)) imp[index,j]<-c(importance(rout)) } imp<-c(imp); out<-cbind(as.character(vec1),as.character(vec2),as.data.frame(imp),stringsAsFactors=FALSE) out<-out[vec1!=vec2,] i<-sort(out[,3],decreasing=TRUE,index=TRUE) out<-out[i$ix,] return(out[,3]) }
deploy_sigflow <- function() { }
require(sparseFLMM) dat1 <- data.frame(arg1 = 1:50) dat2 <- expand.grid(arg1 = 1:50, arg2 = 1:50) Bskew <- Predict.matrix( smooth.construct( s(arg1, arg2, bs = "symm", xt = list(skew = TRUE)), data = dat2, knots = NULL ), data = dat2 ) Bsymm <- Predict.matrix( smooth.construct( s(arg1, arg2, bs = "symm", xt = list(skew = FALSE)), data = dat2, knots = NULL ), data = dat2 ) set.seed(934811) dat2$yskew <- c(Bskew %*% rnorm(ncol(Bskew))) dat2$ysymm <- c(Bsymm %*% rnorm(ncol(Bsymm))) modpa <- gam( I(yskew + ysymm) ~ s(arg1, arg2, bs = "symm", xt = list(skew = TRUE)) + s(arg1, arg2, bs = "symm", xt = list(skew = FALSE)), data = dat2) preds <- predict(modpa, type = "terms") dat1 <- as.list(dat1) dat1$arg2 <- dat1$arg1 dat1$predskew <- matrix(preds[,1], nrow = length(dat1$arg1)) dat1$predsymm <- matrix(preds[,2], nrow = length(dat1$arg1)) cols <- hcl.colors(12, "RdBu") opar <- par(mfcol = c(2,2)) with(dat1, image(arg1, arg2, predsymm, asp = 1, main = "Symmetric part of y", col = cols)) with(dat1, image(arg1, arg2, asp = 1, main = "Fit via symm.smooth", matrix(dat2$ysymm, nrow = length(arg1)), col = cols)) with(dat1, image(arg1, arg2, predskew, asp = 1, main = "Skew-symmetric part of y", col = cols)) with(dat1, image(arg1, arg2, asp = 1, main = "Fit via symm.smooth", matrix(dat2$yskew, nrow = length(arg1)), col = cols)) par(opar) stopifnot(all.equal(dat1$predskew, - t(dat1$predskew))) stopifnot(all.equal(dat1$predsymm, t(dat1$predsymm))) modpac <- gam( I(yskew + ysymm) ~ s(arg1, arg2, bs = "symm", xt = list(skew = TRUE, cyclic = TRUE)) + s(arg1, arg2, bs = "symm", xt = list(skew = FALSE, cyclic = TRUE)), knots = list(arg1 = c(1, 50), arg2 = c(1,50)), data = dat2) plot(modpac, asp = 1, se = FALSE, pages = 1) predsc <- predict(modpac, type = "terms") dat1$predskewc <- matrix(predsc[,1], nrow = length(dat1$arg1)) dat1$predsymmc <- matrix(predsc[,2], nrow = length(dat1$arg1)) opar <- par(mfrow = c(1,2)) with(dat1, matplot(arg1, predsymmc[, c(1,10, 40)], t = "l", main = "symmetric smooth")) abline(h = dat1$predsymmc[1, c(1,10, 40)], col = "darkgrey") abline(v = c(1,50), col = "darkgrey") with(dat1, matplot(arg1, predskewc[, c(1,10, 40)], t = "l", main = "skew-symmetric smooth")) abline(h = dat1$predskewc[1, c(1,10, 40)], col = "darkgrey") abline(v = c(1,50), col = "darkgrey") par(opar) dat <- data.frame( x = 1:100 ) ps_obj <- with(dat, s(x, bs = "ps")) B <- Predict.matrix(smooth.construct(ps_obj, dat, NULL), dat) set.seed(3904) dat$y <- B %*% rnorm(ncol(B)) plot(dat, t = "l") mod0 <- gam( y ~ s(x, bs = "symm", xt = list(skew = TRUE)), knots = list(x = c(0,100)), dat = dat ) lines(dat$x, predict(mod0), col = "cornflowerblue", lty = "dashed") mod1 <- gam( y ~ s(x, bs = "symm"), knots = list(x=c(0,50)), dat = dat[1:50, ]) lines(dat[1:50, ]$x, predict(mod1), col = "darkred", lty = "dashed")
library(jtools) context("summ") set.seed(1) output <- rpois(100, 5) input <- log(output) + runif(100,0,1) clusters <- sample(1:5, size = 100, replace = TRUE) dat <- as.data.frame(cbind(output, input, clusters)) fitgf <- glm(output ~ input, data = dat, family = poisson) set.seed(100) exposures <- rpois(50, 50) counts <- exposures - rpois(50, 25) money <- (counts/exposures) + rnorm(50, sd = 1) talent <- counts*.5 + rnorm(50, sd = 3) wt <- runif(50, 0, 3) poisdat <- as.data.frame(cbind(exposures, counts, talent, money, wt)) pmod <- glm(counts ~ talent*money, offset = log(exposures), data = poisdat, family = poisson) pmod2 <- glm(counts ~ talent*money + offset(log(exposures)), data = poisdat, family = poisson) pmodw <- glm(counts ~ talent + money, data = poisdat, weights = wt) if (requireNamespace("survey")) { suppressMessages(library(survey, quietly = TRUE)) data(api) dstrat <- svydesign(id=~1,strata=~stype, weights=~pw, data=apistrat, fpc=~fpc) dstrat$variables$mealsdec <- dstrat$variables$meals/100 regmodel <- svyglm(mealsdec ~ ell + api00, design = dstrat, family = quasibinomial) regmodell <- svyglm(mealsdec ~ ell + api00, design = dstrat) } states <- as.data.frame(state.x77) states$HSGrad <- states$`HS Grad` set.seed(3) states$wts <- runif(50, 0, 3) fit <- lm(Income ~ HSGrad*Murder*Illiteracy, data = states) fitw <- lm(Income ~ HSGrad*Murder*Illiteracy, data = states, weights = wts) if (requireNamespace("lme4")) { library(lme4, quietly = TRUE) data(sleepstudy) mv <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy) } options("summ-stars" = TRUE) test_that("standardize gives deprecated warning", { expect_warning(summ(fit, standardize = TRUE)) expect_warning(summ(fitgf, standardize = TRUE)) if (requireNamespace("lme4")) { expect_warning(summ(mv, standardize = TRUE)) } if (requireNamespace("survey")) { expect_warning(summ(regmodel, standardize = TRUE)) } }) test_that("jsumm: GLMs work", { expect_is(summ(fitgf), "summ.glm") expect_is(summ(fitgf, scale = TRUE), "summ.glm") expect_is(summ(fitgf, center = TRUE), "summ.glm") }) test_that("jsumm: GLMs w/ offsets work (arg)", { expect_is(summ(pmod), "summ.glm") expect_is(summ(pmod, scale = TRUE), "summ.glm") expect_is(summ(pmod, center = TRUE), "summ.glm") }) test_that("jsumm: GLMs w/ offsets work (formula)", { expect_is(summ(pmod2), "summ.glm") expect_is(summ(pmod2, scale = TRUE), "summ.glm") expect_is(summ(pmod2, center = TRUE), "summ.glm") }) test_that("jsumm: GLMs w/ weights work", { expect_is(summ(pmodw), "summ.glm") expect_is(summ(pmodw, scale = TRUE), "summ.glm") expect_is(summ(pmodw, center = TRUE), "summ.glm") }) test_that("jsumm: partial correlations work", { expect_is(summ(fit, part.corr = TRUE), "summ.lm") expect_output(print(summ(fit, part.corr = TRUE))) expect_warning(summ(fit, part.corr = TRUE, robust = TRUE)) }) test_that("summ: knit_print works", { expect_is(jtools:::knit_print.summ.lm(summ(fit)), "knit_asis") expect_is(jtools:::knit_print.summ.glm(summ(fitgf)), "knit_asis") if (requireNamespace("lme4")) { expect_is(jtools:::knit_print.summ.merMod(summ(mv)), "knit_asis") } if (requireNamespace("survey")) { expect_is(jtools:::knit_print.summ.svyglm(summ(regmodel)), "knit_asis") } }) options("summ-stars" = FALSE) x1 <- rnorm(100) x2 <- 2 * x1 y <- rnorm(100) sing_dat <- as.data.frame(cbind(x1, x2, y)) sing_fit <- lm(y ~ x1 + x2, data = sing_dat) sing_fitg <- glm(y ~ x1 + x2, data = sing_dat) int_fit <- lm(y ~ 1, data = sing_dat) int_fitg <- glm(y ~ 1, data = sing_dat) test_that("summ handles singular and intercept-only models", { expect_is(summ(sing_fit), "summ.lm") expect_is(summ(sing_fitg), "summ.glm") expect_is(summ(int_fit), "summ.lm") expect_is(summ(int_fitg), "summ.glm") }) if (requireNamespace("survey")) { test_that("jsumm: non-linear svyglm models work", { expect_is(summ(regmodel), "summ.svyglm") }) test_that("jsumm: svyglm vifs work", { expect_is(summ(regmodel, vifs = TRUE), "summ.svyglm") }) test_that("jsumm: svyglm linear model check works", { expect_warning(summ(regmodel, model.check = TRUE)) }) test_that("jsumm: svyglm CIs work", { expect_is(summ(regmodel, confint = TRUE), "summ.svyglm") expect_output(print(summ(regmodel, confint = TRUE))) }) test_that("jsumm: svyglm dropping pvals works", { expect_is(summ(regmodel, pvals = FALSE), "summ.svyglm") expect_output(print(summ(regmodel, pvals = FALSE))) }) test_that("jsumm: svyglm odds ratios", { expect_warning(summ(regmodel, odds.ratio = T)) expect_is(summ(regmodel, exp = T), "summ.svyglm") expect_output(print(summ(regmodel, exp = T))) }) } if (requireNamespace("lme4")) { gm <- glmer(incidence ~ period + (1 | herd), family = poisson, data = cbpp, offset = log(size)) test_that("jsumm: merMod CIs work", { expect_is(s <- summ(mv, confint = TRUE), "summ.merMod") expect_output(print(s)) expect_is(s <- summ(gm, confint = TRUE), "summ.merMod") expect_output(print(s)) }) test_that("jsumm: merMod dropping pvals works", { expect_is(s <- summ(mv, pvals = FALSE), "summ.merMod") expect_output(print(s)) expect_is(s <- summ(gm, pvals = FALSE), "summ.merMod") expect_output(print(s)) }) test_that("summ: all merMod p-value calculation options work", { expect_is(s <- summ(mv, t.df = "s"), "summ.merMod") expect_output(print(s)) expect_is(s <- summ(mv, t.df = "k-r"), "summ.merMod") expect_output(print(s)) expect_is(s <- summ(mv, t.df = "resid"), "summ.merMod") expect_output(print(s)) expect_is(s <- summ(mv, t.df = 1), "summ.merMod") expect_output(print(s)) }) test_that("jsumm and merMod objects: everything works", { expect_is(suppressWarnings(summ(mv, center = TRUE, n.sd = 2, pvals = FALSE)), "summ.merMod") expect_is(summ(mv, scale = TRUE, n.sd = 2, pvals = FALSE), "summ.merMod") expect_warning(summ(mv, robust = TRUE)) }) } test_that("jsumm: lm CIs work", { expect_is(summ(fit, confint = TRUE), "summ.lm") expect_output(print(summ(fit, confint = TRUE))) }) test_that("jsumm: glm CIs work", { expect_is(summ(fitgf, confint = TRUE), "summ.glm") expect_output(print(summ(fitgf, confint = TRUE))) }) test_that("jsumm: lm dropping pvals works", { expect_is(summ(fit, pvals = FALSE), "summ.lm") expect_output(print(summ(fit, pvals = FALSE))) }) test_that("jsumm: glm dropping pvals works", { expect_is(summ(fitgf, pvals = FALSE), "summ.glm") expect_output(print(summ(fitgf, pvals = FALSE))) }) test_that("jsumm and scale_lm: scaling works", { expect_is(summ(fitgf, scale = TRUE, n.sd = 2), "summ.glm") expect_is(summ(fit, scale = TRUE, n.sd = 2), "summ.lm") }) test_that("jsumm and center_lm: centering works", { expect_is(summ(fitgf, center = TRUE, n.sd = 2), "summ.glm") expect_is(summ(fit, center = TRUE, n.sd = 2), "summ.lm") }) test_that("jsumm can scale weighted lms", { expect_is(summ(fitw, scale = T, n.sd = 2, robust = "HC3"), "summ.lm") expect_is(summ(fitw, center = T, robust = "HC3"), "summ.lm") }) test_that("jsumm: lm robust SEs work", { expect_is(summ(fit, robust = T), "summ.lm") expect_is(summ(fit, robust = "HC4m"), "summ.lm") expect_output(print(summ(fit, robust = "HC4m"))) }) test_that("jsumm: lm partial corrs works", { expect_is(summ(fit, part.corr = T), "summ.lm") expect_output(print(summ(fit, part.corr = T))) }) test_that("jsumm: warn with partial corrs and robust SEs", { expect_warning(summ(fit, robust = "HC3", part.corr = T)) }) test_that("jsumm: glm robust SEs work", { expect_is(summ(fitgf, robust = "HC3"), "summ.glm") expect_output(print(summ(fitgf, robust = "HC4m"))) }) test_that("jsumm: lm cluster-robust SEs work", { expect_is(summ(fit, robust = "HC3", cluster = "Population"), "summ.lm") expect_output(print(summ(fit, robust = "HC3", cluster = "Population"))) expect_error(summ(fit, robust = "HC4m", cluster = "Population")) }) test_that("jsumm: glm cluster-robust SEs work", { expect_is(summ(fitgf, robust = "HC3", cluster = clusters), "summ.glm") expect_output(print(summ(fitgf, robust = T, cluster = clusters))) expect_error(summ(fitgf, robust = "HC4m", cluster = clusters)) }) test_that("jsumm: Printing isn't borked", { expect_error(print(summ(fitgf, vifs = TRUE, robust = TRUE))) expect_output(print(summ(fitgf, scale = TRUE))) if (requireNamespace("survey")) { expect_output(print(summ(regmodel, scale = TRUE, n.sd = 2))) expect_output(print(summ(regmodel, vifs = TRUE))) expect_output(print(summ(regmodell, scale = TRUE, n.sd = 2))) expect_output(print(summ(regmodell, vifs = TRUE))) } expect_output(print(summ(fit, scale = TRUE, n.sd = 2))) expect_output(print(summ(fit, vifs = TRUE))) if (requireNamespace("lme4")) { expect_output(print(summ(mv, scale = TRUE, n.sd = 2, pvals = FALSE))) } }) set_summ_defaults(digits = 4, model.info = FALSE, model.fit = FALSE, pvals = FALSE, robust = TRUE, confint = TRUE, ci.width = .90, vifs = TRUE, table.format = "grid") test_that("set_summ_defaults changes options", { expect_equal(getOption("jtools-digits"), 4) expect_equal(getOption("summ-model.info"), FALSE) expect_equal(getOption("summ-model.fit"), FALSE) expect_equal(getOption("summ-pvals"), FALSE) expect_equal(getOption("summ-robust"), TRUE) expect_equal(getOption("summ-confint"), TRUE) expect_equal(getOption("summ-ci.width"), .90) expect_equal(getOption("summ-vifs"), TRUE) expect_equal(getOption("summ.table.format"), "grid") }) set_summ_defaults(digits = NULL, model.info = NULL, model.fit = NULL, pvals = NULL, robust = NULL, confint = NULL, ci.width = NULL, vifs = NULL)
library(oce) test_that("[[\"?\"]] is handled by all object classes", { objectNames <- c("adp", "adv", "amsr", "argo", "bremen", "cm", "coastline", "ctd", "echosounder", "satellite", "g1sst", "gps", "ladp", "landsat", "lisst", "lobo", "met", "odf", "rsk", "satellite", "sealevel", "section", "tidem", "topo", "windrose", "xbt") for (objectName in objectNames) { o <- new(objectName) expect_equal(4, length(o[["?"]])) } }) test_that("get/set/delete data", { data("ctd") S <- oceGetData(ctd, "salinity") ctd2 <- oceSetData(ctd, name="fake", value=2*S, unit=list(unit=expression(), scale=""), originalName="FAKE") expect_equal(2*S, ctd2[["fake"]]) expect_equal(S, ctd@data$salinity) expect_equal(S, ctd2@data$salinity) d <- oceDeleteData(ctd, "fake") expect_false("fake" %in% names(d[["data"]])) expect_false("fake" %in% names(d@data)) }) test_that("get/set/delete metadata", { data("ctd") type <- oceGetMetadata(ctd, name="type") expect_equal(type, "SBE") ctd2 <- oceSetMetadata(ctd, name="type", value="fake") expect_equal(oceGetMetadata(ctd2, "type"), "fake") expect_equal(ctd2[["metadata"]]$type, "fake") ctd3 <- oceDeleteMetadata(ctd2, "type") expect_false("fake" %in% names(ctd3[["metadata"]])) }) test_that("data", { data("ctd") d <- oceGetData(ctd, "salinity") expect_equal(d, ctd[["salinity"]]) expect_equal(d, ctd@data$salinity) }) test_that("retrieve units", { data("ctd") expect_equal(ctd[["temperatureUnit"]], list(unit=expression(degree*C), scale="IPTS-68")) expect_equal(ctd[["temperature unit"]], expression(degree*C)) expect_equal(ctd[["temperature scale"]], "IPTS-68") expect_equal(ctd[["pressureUnit"]], list(unit=expression(dbar), scale="")) expect_equal(ctd[["pressure unit"]], expression(dbar)) expect_equal(ctd[["pressure scale"]], "") }) test_that("alter units", { data("ctd") ctd[["metadata"]]$units$salinity <- list(unit=expression(foo), scale="bar") expect_equal(ctd[["salinityUnit"]], list(unit=expression(foo), scale="bar")) }) test_that("three methods for specifying units", { data(ctd) freezing <- swTFreeze(ctd) ctd <- oceSetData(ctd, "freezing", freezing, list(unit=expression(degree*C), scale="ITS-90")) feet <- 3.28048 * swDepth(ctd) ctd <- oceSetData(ctd, "depthInFeet", feet, expression(feet)) expect_identical(ctd[["units"]]$depthInFeet, list(unit=expression(feet), scale="")) fathoms <- feet / 6 ctd <- oceSetData(ctd, "depthInFathoms", fathoms, "fathoms") expect_identical(ctd[["units"]]$depthInFathoms, list(unit=expression(fathoms), scale="")) }) test_that("can use original names", { data("ctd") expect_equal(diff(ctd[["timeS"]]), diff(as.numeric(ctd[["time"]]))) expect_equal(length(ctd[["time"]]), length(ctd[["pressure"]])) expect_equal(ctd[["pressure"]], ctd[["pr"]]) expect_equal(ctd[["depth"]], ctd[["depS"]]) expect_equal(ctd[["temperature"]], T90fromT68(ctd[["t068"]])) expect_equal(ctd[["salinity"]], ctd[["sal00"]]) }) test_that("alter ctd profiles within a section", { data("section") section[["station", 1]][["S2"]] <- 2 * section[["station", 1]][["salinity"]] expect_equal(section[["station", 1]][["S2"]], 2 * section[["station", 1]][["salinity"]]) }) test_that("accessor operations (ctd)", { data(ctd) S <- ctd[["salinity"]] expect_equal(head(S), c(29.9210, 29.9205, 29.9206, 29.9219, 29.9206, 29.9164)) ctd[["salinity"]] <- S + 0.01 SS <- ctd[["salinity"]] expect_equal(head(SS), 0.01 + c(29.9210, 29.9205, 29.9206, 29.9219, 29.9206, 29.9164)) ctd[["SS"]] <- SS expect_equal(head(ctd[["SS"]]), 0.01 + c(29.9210, 29.9205, 29.9206, 29.9219, 29.9206, 29.9164)) }) test_that("accessor operations, specifying data or metadata", { data(ctd) expect_equal(ctd[["longitude"]], ctd[["longitude", "metadata"]]) expect_null(ctd[["longitude", "data"]]) expect_equal(ctd[["temperature"]], ctd[["temperature", "data"]]) expect_equal(ctd[["salinity"]], ctd[["salinity", "data"]]) expect_equal(ctd[["salinity"]], ctd[["sal00", "data"]]) o <- new("oce") o <- oceSetMetadata(o, "foo", "metadataBar") o <- oceSetData(o, "foo", "dataBar") expect_equal(o[["foo"]], "metadataBar") expect_equal(o[["foo", "metadata"]], "metadataBar") expect_equal(o[["foo", "data"]], "dataBar") expect_error(o[["foo","unknown"]], "second arg must be") }) test_that("derived quantities handled properly (ctd)", { data(ctd) expect_null(ctd[["noSuchThing"]]) thetaByAccessor <- ctd[["theta"]] thetaByFunction <- swTheta(ctd) expect_equal(thetaByAccessor, thetaByFunction) ctd[["theta"]] <- thetaByAccessor expect_equal(ctd[["theta"]], thetaByAccessor) expect_equal(ctd[["theta"]], swTheta(ctd)) ctd[["S"]] <- ctd[["S"]] + 0.01 expect_equal(head(swTheta(ctd, eos="unesco")), c(14.2208818496, 14.2262540208, 14.2248015615, 14.2218758247, 14.2263218577, 14.2328135332)) expect_equal(swTheta(ctd, eos="unesco"), swTheta(ctd[["salinity"]], ctd[["temperature"]], ctd[["pressure"]], eos="unesco")) expect_equal(swTheta(ctd, eos="gsw"), swTheta(ctd[["salinity"]], ctd[["temperature"]], ctd[["pressure"]], longitude=ctd[["longitude"]], latitude=ctd[["latitude"]], eos="gsw")) }) test_that("accessor operations (adp)", { data(adp) v <- adp[["v"]] expect_equal(v[1:5,1,1], c(-0.11955770778, -0.09925398341, 0.10203801933, 0.09613003081, 0.24394126236)) expect_null(adp[["noSuchThing"]]) adp[["somethingNew"]] <- 1:4 expect_true("somethingNew" %in% names(adp[["data"]])) expect_equal(adp[["somethingNew"]], 1:4) }) test_that("renaming items in the data slot", { data(ctd) bad <- oceRenameData(ctd, "salinity", "saltiness", note="a bad idea!") expect_error(plot(bad), "data slot lacks 'salinity") }) test_that("argo original names (issue 1640)", { data(argo) expect_equal(argo[["temperature"]], argo[["TEMP"]]) })
FCVARlagSelect <- function(x, kmax, r, order, opt ) { cap_T <- nrow(x) p <- ncol(x) printWN <- 0 print2screen <- opt$print2screen opt$print2screen <- 0 opt$plotRoots <- 0 opt$CalcSE <- 0 D <- matrix(0, nrow = kmax+1, ncol = 2) loglik <- matrix(0, nrow = kmax+1, ncol = 1) LRtest <- matrix(0, nrow = kmax+1, ncol = 1) pvLRtest <- matrix(0, nrow = kmax+1, ncol = 1) aic <- matrix(0, nrow = kmax+1, ncol = 1) bic <- matrix(0, nrow = kmax+1, ncol = 1) pvMVq <- matrix(0, nrow = kmax+1, ncol = 1) pvWNQ <- matrix(0, nrow = kmax+1, ncol = p) pvWNLM <- matrix(0, nrow = kmax+1, ncol = p) for (k in 0:kmax) { message(sprintf('Estimating for k = %d and r = %d.', k, r)) results <- FCVARestn(x, k, r, opt) message(sprintf('Finished Estimation for k = %d and r = %d.', k, r)) loglik[k+1] <- results$like D[k+1, ] <- results$coeffs$db aic[k+1] <- -2*loglik[k+1] + 2*results$fp bic[k+1] <- -2*loglik[k+1] + results$fp*log(cap_T - opt$N) MVWNtest_stats <- MVWNtest(results$Residuals, order, printWN) pvWNQ[k+1, ] <- MVWNtest_stats$pvQ pvWNLM[k+1, ] <- MVWNtest_stats$pvLM pvMVq[k+1, ] <- MVWNtest_stats$pvMVQ if (k > 0) { LRtest[k+1] <- 2*(loglik[k+1] - loglik[k]) pvLRtest[k+1] <- 1 - stats::pchisq(LRtest[k+1], p^2) } } i_aic <- which.min(aic) i_bic <- which.min(bic) FCVARlagSelectStats <- list( D = D, loglik = loglik, LRtest = LRtest, pvLRtest = pvLRtest, i_aic = i_aic, aic = aic, i_bic = i_bic, bic = bic, pvMVq = pvMVq, pvWNQ = pvWNQ, pvWNLM = pvWNLM, kmax = kmax, r = r, p = p, cap_T = cap_T, order = order, opt = opt ) class(FCVARlagSelectStats) <- 'FCVAR_lags' opt$print2screen <- print2screen if (opt$print2screen) { summary(object = FCVARlagSelectStats) } return(FCVARlagSelectStats) } summary.FCVAR_lags <- function(object, ...) { yesNo <- c('No','Yes') cat(sprintf('\n--------------------------------------------------------------------------------\n')) cat(sprintf(' Lag Selection Results \n')) cat(sprintf('--------------------------------------------------------------------------------\n')) cat(sprintf('Dimension of system: %6.0f Number of observations in sample: %6.0f \n', object$p, object$cap_T)) cat(sprintf('Order for WN tests: %6.0f Number of observations for estimation: %6.0f \n', object$order, object$cap_T - object$opt$N)) cat(sprintf('Restricted constant: %6s Initial values: %6.0f\n', yesNo[object$opt$rConstant+1], object$opt$N ) ) cat(sprintf('Unrestricted constant:%6s Level parameter: %6s\n', yesNo[object$opt$unrConstant+1], yesNo[object$opt$levelParam+1] )) cat(sprintf('--------------------------------------------------------------------------------\n')) cat(sprintf('Parameter Estimates and Information Criteria:\n')) cat(sprintf('--------------------------------------------------------------------------------\n')) cat(sprintf(' k r d b LogL LR pv AIC BIC')) cat(sprintf('\n')) for (k in seq(object$kmax, 0, by = -1) ) { cat(sprintf('%2.0f %2.0f %4.3f %4.3f %7.2f %6.2f %5.3f %8.2f', k, object$r, object$D[k+1, 1], object$D[k+1, 2], object$loglik[k+1], object$LRtest[k+1], object$pvLRtest[k+1], object$aic[k+1])) if(k+1 == object$i_aic) {cat(sprintf('*'))} else {cat(sprintf(' '))} cat(sprintf(' %8.2f', object$bic[k+1])) if(k+1 == object$i_bic) {cat(sprintf('*'))} else {cat(sprintf(' '))} cat(sprintf('\n')) } cat(sprintf('--------------------------------------------------------------------------------\n')) cat(sprintf('Tests for Serial Correlation of Residuals: \n')) cat(sprintf('--------------------------------------------------------------------------------\n')) cat(sprintf(' k pmvQ')) for (i in 1:object$p) { cat(sprintf(' pQ%1.0f pLM%1.0f', i, i)) } cat(sprintf('\n')) for (k in seq(object$kmax, 0, by = -1) ) { cat(sprintf('%2.0f ', k)) cat(sprintf(' %4.2f', object$pvMVq[k+1, ])) for (i in 1:object$p) { cat(sprintf(' %4.2f %4.2f', object$pvWNQ[k+1,i], object$pvWNLM[k+1,i])) } cat(sprintf('\n')) } cat(sprintf('--------------------------------------------------------------------------------\n')) } FCVARrankTests <- function(x, k, opt) { cap_T <- nrow(x) - opt$N p <- ncol(x) tempPrint2Screen <- opt$print2screen bHat <- matrix(0, nrow = p+1, ncol = 1) dHat <- matrix(0, nrow = p+1, ncol = 1) LogL <- matrix(0, nrow = p+1, ncol = 1) LRstat <- matrix(0, nrow = p+1, ncol = 1) pv <- matrix(NA, nrow = p+1, ncol = 1) opt$print2screen <- 0 opt$plotRoots <- 0 opt$CalcSE <- 0 if(opt$rConstant | opt$levelParam) { consT <- 1 } else { consT <- 0 } for (r in 0 : p) { message(sprintf('Estimating for k = %d and r = %d.', k, r)) results <- FCVARestn(x, k, r, opt) message(sprintf('Finished Estimation for k = %d and r = %d.', k, r)) dHat[r+1] <- results$coeffs$db[1] bHat[r+1] <- results$coeffs$db[2] LogL[r+1] <- results$like } for (r in 0 : (p-1)) { LRstat[r+1] <- - 2*( LogL[r+1] - LogL[p+1] ) p_val <- NULL if (bHat[r+1] > 0 & bHat[r+1] < 2 & ( (!opt$rConstant & !opt$unrConstant & !opt$levelParam) | (opt$rConstant & !opt$unrConstant & opt$restrictDB) | (opt$levelParam & !opt$unrConstant & opt$restrictDB) ) ) { p_val <- fracdist::fracdist_values(iq = p - r, iscon = consT, bb = bHat[r+1], stat = LRstat[r+1]) } else { warning(sprintf('P-values not calculated for the rank test with rank %d.\n', r), 'P-values are only calculated if:\n', '1. there are no deterministic terms, or\n', '2. there is only restricted constant and d = b, or\n', '3. there is only a level parameter and d = b.\n') } if(!is.null(p_val)) { pv[r+1] <- p_val } } rankTestStats <- list( dHat = dHat, bHat = bHat, LogL = LogL, LRstat = LRstat, pv = pv, k = k, p = p, cap_T = cap_T, opt = opt ) class(rankTestStats) <- 'FCVAR_ranks' opt$print2screen <- tempPrint2Screen if (opt$print2screen) { summary(object = rankTestStats) } return(rankTestStats) } summary.FCVAR_ranks <- function(object, ...) { yesNo <- c('No','Yes') cat(sprintf('\n--------------------------------------------------------------------------------\n')) cat(sprintf(' Likelihood Ratio Tests for Cointegrating Rank \n')) cat(sprintf('--------------------------------------------------------------------------------\n')) cat(sprintf('Dimension of system: %6.0f Number of observations in sample: %6.0f \n', object$p, object$cap_T + object$opt$N)) cat(sprintf('Number of lags: %6.0f Number of observations for estimation: %6.0f \n', object$k, object$cap_T)) cat(sprintf('Restricted constant: %6s Initial values: %6.0f\n', yesNo[object$opt$rConstant+1], object$opt$N )) cat(sprintf('Unestricted constant: %6s Level parameter: %6s\n', yesNo[object$opt$unrConstant+1], yesNo[object$opt$levelParam+1] )) cat(sprintf('--------------------------------------------------------------------------------\n')) cat(sprintf('Rank d b Log-likelihood LR statistic P-value\n')) for (i in 1:object$p) { if (!is.na(object$pv[i])) { cat(sprintf('%2.0f %5.3f %5.3f %15.3f %13.3f %8.3f\n', i-1, object$dHat[i], object$bHat[i], object$LogL[i], object$LRstat[i], object$pv[i])) } else { cat(sprintf('%2.0f %5.3f %5.3f %15.3f %13.3f ----\n', i-1, object$dHat[i], object$bHat[i], object$LogL[i], object$LRstat[i])) } } cat(sprintf('%2.0f %5.3f %5.3f %15.3f ---- ----\n', i, object$dHat[i+1], object$bHat[i+1], object$LogL[i+1])) cat(sprintf('--------------------------------------------------------------------------------\n')) } FCVARbootRank <- function(x, k, opt, r1, r2, B) { cap_T <- nrow(x) - opt$N data <- x[1:(k+1), ] LR <- matrix(0, nrow = B, ncol = 1) print2screen <- opt$print2screen opt$print2screen <- 0 opt$CalcSE <- 0 opt$plotRoots <- 0 mBS <- FCVARestn(x, k, r1, opt) mUNR <- FCVARestn(x, k, r2, opt) H <- list(LRstat = NA, pvBS = NA) H$LRstat <- -2*(mBS$like - mUNR$like) show_iters <- 10 for (j in 1:B) { if(round((j+1)/show_iters) == (j+1)/show_iters) { message(sprintf('Completed bootstrap replication %d of %d.', j, B)) } xBS <- FCVARsimBS(data, mBS, cap_T) BSs <- rbind(data, xBS) mUNRbs <- FCVARestn(BSs, k, r2, opt) mRES <- FCVARestn(BSs, k, r1, opt) LR[j] <- -2*(mRES$like - mUNRbs$like) } LRbs <- LR[order(LR)] H$pvBS <- sum(LRbs > H$LRstat)/B opt$print2screen <- print2screen if (opt$print2screen) { cat(sprintf('Bootstrap rank test results:')) cat(sprintf('\nUnrestricted log-likelihood: %3.3f\nRestricted log-likelihood: %3.3f\n', mUNR$like, mBS$like)) cat(sprintf('Test results:\nLR statistic: \t %3.3f\nP-value (BS): \t %1.3f\n', H$LRstat, H$pvBS)) } FCVARbootRank_stats <- list( LRbs = LRbs, H = H, mBS = mBS, mUNR = mUNR ) return(FCVARbootRank_stats) }
as.phylo.bammdata <- function(x, ...) { if (!inherits(x, 'bammdata')) { stop("Object ephy must be of class bammdata\n"); } newphylo <- list(); newphylo$edge <- x$edge; newphylo$Nnode <- x$Nnode; newphylo$tip.label <- x$tip.label; newphylo$edge.length <- x$edge.length; class(newphylo) <- 'phylo'; attributes(newphylo)$order = attributes(x)$order; if (attributes(newphylo)$order != "cladewise") { newphylo <- reorder(newphylo); } return(newphylo); }
if (!isGeneric('linkOTB')) { setGeneric('linkOTB', function(x, ...) standardGeneric('linkOTB')) } linkOTB <- function(bin_OTB=NULL, root_OTB= NULL, type_OTB=NULL, searchLocation=NULL, ver_select=FALSE, quiet = TRUE, returnPaths = TRUE) { if (is.null(searchLocation)){ if (Sys.info()["sysname"] == "Windows") { searchLocation<-"C:" } else {searchLocation<-"~"} } params_OTB <- findOTB(searchLocation = searchLocation,quiet = quiet) if (params_OTB[[1]][1] != FALSE){ if (Sys.info()["sysname"] != "Windows"){ if (nrow(params_OTB) == 1) { pathOTB <- params_OTB[1] otbCmd <- params_OTB[2] } else if (nrow(params_OTB) > 1 & is.numeric(ver_select) & ver_select > 0 ) { if (!quiet){ cat("You have more than one valid OTB version\n") print(params_OTB,right = FALSE,row.names = TRUE) } cat("You have choosen version: ",ver_select,"\n") if (is.null(type_OTB)) { pathOTB <- params_OTB$binDir[[ver_select]] otbCmd <- params_OTB$otbCmd[[ver_select]] } } else if (nrow(params_OTB) > 1 & (!ver_select)) { if (!quiet){ cat("You have more than one valid OTB version\n") print(params_OTB,right = FALSE,row.names = TRUE) } ver <- getrowotbVer(params_OTB$binDir) pathOTB <- params_OTB$binDir[[ver]] otbCmd <- params_OTB$otbCmd[[ver]] if (!quiet) {cat("\nSelect: ",ver)} } else if (nrow(params_OTB) > 1 & ver_select ) { cat("You have more than one valid OTB version\n") print(params_OTB,right = FALSE,row.names = TRUE) if (is.null(type_OTB)) { ver <- as.numeric(readline(prompt = "Please choose one: ")) pathOTB <- params_OTB$binDir[[ver]] otbCmd <- params_OTB$otbCmd[[ver]] } } } else { if (nrow(params_OTB) == 1) { pathOTB <- setenvOTB(bin_OTB = params_OTB$binDir[1],root_OTB = params_OTB$baseDir[2]) } else if (nrow(params_OTB) > 1 & ver_select ) { if (!quiet) { cat("You have more than one valid OTB version\n") print(params_OTB[1],right = FALSE,row.names = TRUE) } if (is.null(type_OTB)) { ver <- as.numeric(readline(prompt = "Please choose one: ")) pathOTB <- setenvOTB(bin_OTB = params_OTB$binDir[[ver]], root_OTB = params_OTB$baseDir[[ver]]) } else { pathOTB <- setenvOTB(bin_OTB = params_OTB[params_OTB["installationType"] == type_OTB][1],root_OTB = params_OTB[params_OTB["installationType"] == type_OTB][2]) } } else if (nrow(params_OTB) > 1 & is.numeric(ver_select) & ver_select > 0 ) { if (!quiet) { cat("You have more than one valid OTB version\n") print(params_OTB,right = FALSE,row.names = TRUE) } cat("You have choosen version: ",ver_select,"\n") if (is.null(type_OTB)) { pathOTB <- params_OTB$binDir[[ver_select]] otbCmd <- params_OTB$otbCmd[[ver_select]] } } else if (nrow(params_OTB) > 1 & (!ver_select)) { if (!quiet){ cat("You have more than one valid OTB version\n") print(params_OTB,right = FALSE,row.names = TRUE) } ver <- getrowotbVer(params_OTB$binDir) pathOTB <- params_OTB$binDir[[ver]] otbCmd <- params_OTB$otbCmd[[ver]] if (!quiet) cat("\nSelect: ",ver) } } otb<-list() names(pathOTB)=NULL otb$pathOTB<-pathOTB otb$version<-params_OTB otb$exist<-TRUE } else { otb<-list() otb$exist<-FALSE returnPaths <-TRUE } if (returnPaths) return(otb) }
library(ggplot2) library(data.table) a <- c(2.5, 2, 1, 1.5) d <- c(-1.5, -1, -0.5, 0) theta <- seq(-4, 4, 0.01) ccnrm <- function(theta, a, d) { exp(d + a * theta) } df <- sapply(1:length(d), function(i) ccnrm(theta, a[i], d[i])) df <- data.frame(1, df) denom <- apply(df, 1, sum) df <- apply(df, 2, function(x) x / denom) df1 <- melt(data.frame(df, theta), id.vars = "theta") ggplot(data = df1, aes(x = theta, y = value, col = variable)) + geom_line() + xlab("Ability") + ylab("Category probability") + xlim(-4, 4) + ylim(0, 1) + theme_bw() + theme( text = element_text(size = 14), panel.grid.major = element_blank(), panel.grid.minor = element_blank() ) + ggtitle("Category probabilities") + scale_color_manual("", values = c("black", "red", "yellow", "green", "blue"), labels = paste0("P(Y = ", 0:4, ")") ) df2 <- data.frame(exp = as.matrix(df) %*% 0:4, theta) ggplot(data = df2, aes(x = theta, y = exp)) + geom_line() + xlab("Ability") + ylab("Expected item score") + xlim(-4, 4) + ylim(0, 4) + theme_bw() + theme( text = element_text(size = 14), panel.grid.major = element_blank(), panel.grid.minor = element_blank() ) + ggtitle("Expected item score")
expected <- eval(parse(text="FALSE")); test(id=0, code={ argv <- eval(parse(text="list(list(list(1)))")); do.call(`is.na`, argv); }, o=expected);
CalculateSampleCovariance <- function(x, y, verbose = TRUE) { n <- length(x) if (n <= 1 || n != length(y)) { stop("Arguments x and y have invalid lengths: ", length(x), " and ", length(y), ".") } if (TRUE %in% is.na(x) || TRUE %in% is.na(y)) { stop(" Arguments x and y must not have missing values.") } covariance <- var(x, y) if (verbose) cat("Covariance = ", round(covariance, 4), ".\n", sep = "") return(covariance) } str_match_perl <- function(string,pattern){ parsed <- regexpr(pattern,string,perl=TRUE) names <- attr(parsed,"capture.names") captured.text <- substr(string,parsed,parsed+attr(parsed,"match.length")-1) captured.text[captured.text==""] <- NA captured.groups <- do.call(rbind,lapply(seq_along(string),function(i){ if(is.na(parsed[i]) || parsed[i]==-1)return(rep(NA,length(names))) st <- attr(parsed,"capture.start")[i,] substring(string[i],st,st+attr(parsed,"capture.length")[i,]-1) })) result <- cbind(captured.text,captured.groups) colnames(result) <- c("",names) result } str_match_all_perl <- function(string,pattern){ parsed <- gregexpr(pattern,string,perl=TRUE) lapply(seq_along(parsed),function(i){ r <- parsed[[i]] starts <- attr(r,"capture.start") if(r[1]==-1)return(matrix(nrow=0,ncol=1+ncol(starts))) names <- attr(r,"capture.names") lengths <- attr(r,"capture.length") full <- substring(string[i],r,r+attr(r,"match.length")-1) subs <- substring(string[i],starts,starts+lengths-1) m <- matrix(c(full,subs),ncol=length(names)+1) colnames(m) <- c("",names) m }) } google <- function(src,...){ pre <- "\\W* lines.pat <- paste("(?:",pre,".*?\n)+",sep="") google.doc.pattern <- paste("(?<description>",lines.pat,")", pre,"Args:\\W*\n", "(?<args>",lines.pat,")", pre,"Returns:\\W*\n", "(?<value>",lines.pat,")", sep="") lines <- paste(src,collapse="\n") parsed <- str_match_perl(lines,google.doc.pattern) if(is.na(parsed[1]))return(list()) docs <- list(description=gsub(pre,"",parsed[,"description"]), value=gsub(pre,"",parsed[,"value"])) arg.pat <- paste(pre,"(?<name>[a-z]+): ", "(?<content>.*?\n", "(?:",pre,"[^:]*?\n)*)", sep="") m <- str_match_all_perl(parsed[,"args"],arg.pat)[[1]] argsList <- as.list(gsub(paste("\n",pre,sep=""),"\n",m[,"content"])) names(argsList) <- sprintf("item{%s}",m[,"name"]) lapply(c(docs,argsList),function(x)gsub("\n$","",x)) } .parsers <- list(google=inlinedocs::forfun(google)) .dontcheck <- TRUE .result <- list(google=list(description="Extract docs from google header comments.", `item{src}`="lines of code of the function source.", value="An inner Documentation List."), str_match_all_perl=list(), str_match_perl=list(), CalculateSampleCovariance=list( description="Computes the sample covariance between two vectors.", `item{x}`="One of two vectors whose sample covariance is to be calculated.", `item{y}`="The other vector. x and y must have the same length, greater than one,\nwith no missing values.", `item{verbose}`="If TRUE, prints sample covariance; if not, not. Default is TRUE.", value="The sample covariance between x and y."))
mat_peptide.F <- function(Feat, SQuali, SQualiN){ Item <- c("/allele=", "/citation=", "/db_xref=", "/EC_number=", "/experiment=", "/function=", "/gene=", "/gene_synonym=", "/inference=", "/locus_tag=", "/map=", "/note=", "/old_locus_tag=", "/product=", "/pseudogene=", "/standard_name=") ItemN <- c("allele", "citation", "db_xref", "EC_number", "experiment", "function", "gene", "gene_synonym", "inference", "locus_tag", "map", "note", "old_locus_tag", "product", "pseudogene", "standard_name") Feat[length(Feat)] <- gsub("\\\",$", "", Feat[length(Feat)]) mat_peptide <- data.frame("Location" = "mat_peptide", "Qualifier" = gsub(".*mat_peptide +([^.]+)\"*", "\\1", Feat[1], perl = T), stringsAsFactors = F) for(i in 2:length(Feat)){ for(j in 1:length(Item)){ if(length(grep(Item[j], Feat[i], perl = T)) == 1){ mat_peptide <- rbind(mat_peptide, c(ItemN[j], gsub(".*=([^.]+)\"*", "\\1", Feat[i]))) if((length(grep("\\\\\"$|\"\\\", $|\\d$", Feat[i])) == 0 & i != length(Feat)) == T){ t <- i+1 while(length(grep("\\\\\"$|\"\\\", $|\\d$", Feat[t])) == 0 && t <= length(Feat)){ mat_peptide[dim(mat_peptide)[1],2] <- paste(mat_peptide[dim(mat_peptide)[1],2], gsub("\\s", " ", Feat[t]), sep = " ") t <- t+1 } if(length(grep("\\\\\"$|\"\\\", $|\\d$", Feat[t])) == 1){ mat_peptide[dim(mat_peptide)[1],2] <- paste(mat_peptide[dim(mat_peptide)[1],2], gsub("\\s", " ", Feat[t]), sep = " ") } } } } for(k in 1:length(SQuali)){ if(length(grep(SQuali[k], Feat[i], perl = T)) == 1){ mat_peptide <- rbind(mat_peptide, c(SQuali[k], SQualiN[k])) } } } mat_peptide <- apply(mat_peptide, 2, function(x){gsub(" {2,}", " ", x, perl = TRUE)}) mat_peptide <- apply(mat_peptide, 2, function(x){gsub("\"", "", x, fixed = T)}) mat_peptide <- apply(mat_peptide, 2, function(x){gsub("\\", "", x, fixed = T)}) mat_peptide <- apply(mat_peptide, 2, function(x){gsub("[^[:alnum:][:space:][]'.,:_<>()-]", "", x, perl = TRUE)}) return(mat_peptide) }
context("edge_case") test_that("edge cases work or throw proper errors", { set.seed(1) dds <- makeExampleDESeqDataSet(n=1) sizeFactors(dds) <- rep(1,ncol(dds)) dispersions(dds) <- .5 dds <- nbinomWaldTest(dds) res <- results(dds) dds <- nbinomLRT(dds, reduced=~1) res <- results(dds) set.seed(1) dds <- makeExampleDESeqDataSet(n=100) design(dds) <- ~ 1 expect_warning({dds <- DESeq(dds)}) res <- results(dds) dds <- makeExampleDESeqDataSet(n=50,m=4) dds2 <- DESeqDataSetFromMatrix( counts(dds), colData(dds), design(dds) ) mcols(dds2)$foo <- paste( "bar", 1:nrow(dds2) ) dds2 <- DESeq(dds2) results(dds2) expect_true(class(mcols(mcols(dds2))$type) == "character") dds3 <- DESeqDataSetFromMatrix( counts(dds), DataFrame(row.names=colnames(dds)), ~ 1 ) dds3$test <- 1:ncol(dds3) dds3 <- estimateSizeFactors(dds3) expect_true(class(mcols(colData(dds3))$type) == "character") dds <- makeExampleDESeqDataSet(n=50,m=4) levels(dds$condition) <- c("A_1","B_2") dds$exp_cond <- dds$condition design(dds) <- ~ exp_cond dds <- DESeq(dds) results(dds) dds <- makeExampleDESeqDataSet(n=50,m=4) colData(dds)$condition[4] <- NA expect_error(DESeq(dds)) })
context("predictor pre-processing") library(testthat) library(sparsediscrim) library(modeldata) data(scat, package = "modeldata") test_that("formula method", { expect_warning( mod <- lda_diag(Species ~ ., data = scat[1:90, ]), "had zero variance" ) expect_equal( mod$N, sum(complete.cases(scat[1:90, ])) ) missing_rows <- which(!complete.cases(scat[-(1:90), -1])) pred_cls <- predict(mod, newdata = scat[-(1:90), -1]) pred_prb <- predict(mod, newdata = scat[-(1:90), -1], type = "prob") expect_true( all(!is.na(pred_prb[-missing_rows,])) ) expect_true( all(is.na(pred_cls[missing_rows])) ) expect_true( all(!is.na(pred_cls[-missing_rows])) ) expect_true( all(is.na(pred_cls[missing_rows])) ) }) test_that("x/y method", { mod <- lda_diag(x = scat[1:90, 6:12], y = scat$Species[1:90]) expect_equal( mod$N, sum(complete.cases(scat[1:90, 6:12])) ) missing_rows <- which(!complete.cases(scat[-(1:90), 6:12])) pred_cls <- predict(mod, newdata = scat[-(1:90), 6:12]) pred_prb <- predict(mod, newdata = scat[-(1:90), 6:12], type = "prob") expect_true( all(!is.na(pred_prb[-missing_rows,])) ) expect_true( all(is.na(pred_cls[missing_rows])) ) expect_true( all(!is.na(pred_cls[-missing_rows])) ) expect_true( all(is.na(pred_cls[missing_rows])) ) })
context("emis_post") df <- as.data.frame(matrix(1:100, ncol =10, nrow = 10)) wCO <- emis_order(x = df, lt_emissions = "2020-02-19 00:00", start_utc_time = "2020-02-20 00:00", desired_length = 78)
autoTranslate = function(genes, targetGenes, possibleOrigins= NULL, possibleTargets = NULL, returnAllPossible = FALSE, db = homologene::homologeneData){ pairwise = db$Taxonomy %>% unique %>% utils::combn(2) %>% {cbind(.,.[c(2,1),], rbind(db$Taxonomy %>% unique,db$Taxonomy %>% unique))} if(!is.null(possibleOrigins)){ possibleOrigins[possibleOrigins == 'human'] = 9606 possibleOrigins[possibleOrigins == 'mouse'] = 10090 pairwise = pairwise[,pairwise[1,] %in% possibleOrigins, drop = FALSE] } else{ possibleOrigins = db$Taxonomy %>% unique } if(!is.null(possibleTargets)){ possibleTargets[possibleTargets == 'human'] = 9606 possibleTargets[possibleTargets == 'mouse'] = 10090 pairwise = pairwise[,pairwise[2,] %in% possibleTargets,drop = FALSE] } else{ possibleTargets = db$Taxonomy %>% unique } possibleOriginData = db %>% dplyr::filter(Taxonomy %in% possibleOrigins & (Gene.Symbol %in% genes | Gene.ID %in% genes)) %>% dplyr::group_by(Taxonomy) possibleOriginCounts = possibleOriginData %>% dplyr::summarise(n = dplyr::n()) possibleTargetData = db %>% dplyr::filter(Taxonomy %in% possibleTargets & (Gene.Symbol %in% targetGenes | Gene.ID %in% targetGenes)) %>% dplyr::group_by(Taxonomy) possibleTargetCounts = possibleTargetData%>% dplyr::summarise(n = dplyr::n()) pairwise = pairwise[,pairwise[1,] %in% possibleOriginCounts$Taxonomy,drop= FALSE] pairwise = pairwise[,pairwise[2,] %in% possibleTargetCounts$Taxonomy, drop = FALSE] pairwise %>% apply(2,function(taxes){ homologene(genes,inTax = taxes[1],outTax = taxes[2]) }) %>% {.[purrr::map_int(.,nrow)>0]} -> possibleTranslations possibleTranslations %>% sapply(function(trans){ sum(c(trans[,2],trans[,4]) %in% targetGenes) }) -> translationCounts if(!returnAllPossible){ translationCounts %>% which.max %>% {possibleTranslations[[.]]} -> possibleTranslations if(sum(translationCounts>0)>1){ bestMatch = translationCounts %>% which.max nextBest = max(translationCounts[-bestMatch]) warning('There are other pairings, best of which has ',nextBest, ' matching genes') } } else{ possibleTranslations = possibleTranslations[translationCounts!=0] } return(possibleTranslations) }
context("Test deletion of cached files and cached file metadata") test_that("clear_cached_files gives an error if no metadata file exists", { skip_on_cran() galah_config(caching = TRUE, cache_directory = tempdir()) suppressWarnings(file.remove(file.path(tempdir(), 'metadata.rds'))) expect_message(clear_cached_files()) galah_config(caching = FALSE) })
phi.fun.1d <- function (rho, lambda, psi1, psi1.apriori, psi2, psi2.apriori, theta.apriori) { pdm.maker.psi1 <- function(psi1) { jj.omega_x <- diag(psi1[1],nrow=1) rownames(jj.omega_x) <- "x" colnames(jj.omega_x) <- "x" jj.omega_t <- diag(psi1[2],ncol=1) rownames(jj.omega_t) <- "A" colnames(jj.omega_t) <- "A" sigma1squared <- psi1[3] return(list(omega_x = jj.omega_x, omega_t = jj.omega_t, sigma1squared = sigma1squared)) } pdm.maker.psi2 <- function(psi2) { jj.omegastar_x <- diag(psi2[1],ncol=1) rownames(jj.omegastar_x) <- "x" colnames(jj.omegastar_x) <- "x" sigma2squared <- psi2[2] return(list(omegastar_x = jj.omegastar_x, sigma2squared = sigma2squared)) } jj.mean <- theta.apriori$mean jj.V_theta <- theta.apriori$sigma jj.discard.psi1 <- pdm.maker.psi1(psi1) jj.omega_t <- jj.discard.psi1$omega_t jj.omega_x <- jj.discard.psi1$omega_x jj.sigma1squared <- jj.discard.psi1$sigma1squared jj.discard.psi2 <- pdm.maker.psi2(psi2) jj.omegastar_x <- jj.discard.psi2$omegastar_x jj.sigma2squared <- jj.discard.psi2$sigma2squared jj.omega_t.upper <- chol(jj.omega_t) jj.omega_t.lower <- t(jj.omega_t.upper) jj.omega_x.upper <- chol(jj.omega_x) jj.omega_x.lower <- t(jj.omega_x.upper) jj.a <- solve(solve(jj.V_theta) + 2 * jj.omega_t, solve(jj.V_theta, jj.mean)) jj.b <- t(2 * solve(solve(jj.V_theta) + 2 * jj.omega_t) %*% jj.omega_t) jj.c <- jj.sigma1squared/sqrt(det(diag(nrow = nrow(jj.V_theta)) + 2 * jj.V_theta %*% jj.omega_t)) names(jj.c) <- "ht.fun.precalc" jj.A <- solve(jj.V_theta + solve(jj.omega_t)/4) jj.A.upper <- chol(jj.A) jj.A.lower <- t(jj.A.upper) list(rho = rho, lambda = lambda, psi1 = psi1, psi1.apriori = psi1.apriori, psi2 = psi2, psi2.apriori = psi2.apriori, theta.apriori = theta.apriori, omega_x = jj.omega_x, omega_t = jj.omega_t, omegastar_x = jj.omegastar_x, sigma1squared = jj.sigma1squared, sigma2squared = jj.sigma2squared, omega_x.upper = jj.omega_x.upper, omega_x.lower = jj.omega_x.lower, omega_t.upper = jj.omega_t.upper, omega_t.lower = jj.omega_t.lower, a = jj.a, b = jj.b, c = jj.c, A = jj.A, A.upper = jj.A.upper, A.lower = jj.A.lower) }