code
stringlengths
1
13.8M
spFeatureSelection <- function( task, wrapper, measure, norm.method = 'standardize', num.features.selected, ... ){ model <- spFSR.default( task = task, wrapper = wrapper, measure = measure, norm.method = norm.method, num.features.selected = num.features.selected, ... ) model }
trunc_mat <- function(x, n = NULL){ rows <- nrow(x) if (!is.na(rows) && rows == 0) return() if (is.null(n)) { if (is.na(rows) || rows > 100) { n <- 10 } else { n <- rows } } df <- as.data.frame(head(x, n)) if (nrow(df) == 0) return() mat <- format(df, justify = "left") width <- getOption("width") values <- c(format(rownames(mat))[[1]], unlist(mat[1, ])) names <- c("", colnames(mat)) w <- pmax(nchar(values), nchar(names)) cumw <- cumsum(w + 1) too_wide <- cumw[-1] > width if (all(too_wide)) { too_wide[1] <- FALSE df[[1]] <- substr(df[[1]], 1, width) } shrunk <- format(df[, !too_wide, drop = FALSE]) needs_dots <- is.na(rows) || rows > n if (needs_dots) { dot_width <- pmin(w[-1][!too_wide], 3) dots <- vapply(dot_width, function(i) paste(rep(".", i), collapse = ""), FUN.VALUE = character(1)) shrunk <- rbind(shrunk, .. = dots) } print(shrunk) if (any(too_wide)) { vars <- colnames(mat)[too_wide] types <- vapply(df[too_wide], type_sum, character(1)) var_types <- paste0(vars, " (", types, ")", collapse = ", ") cat(snap_wrap("Variables not shown: ", var_types), "\n", sep = "") } } snap_wrap <- function(..., indent = 0, width = getOption("width")){ x <- paste0(..., collapse = "") wrapped <- strwrap(x, indent = indent, exdent = indent + 5, width = width) paste0(wrapped, collapse = "\n") } type_sum <- function(x) UseMethod("type_sum") type_sum.default <- function(x) unname(abbreviate(class(x)[1], 4)) type_sum.character <- function(x) "chr" type_sum.Date <- function(x) "date" type_sum.factor <- function(x) "fctr" type_sum.integer <- function(x) "int" type_sum.logical <- function(x) "lgl" type_sum.array <- function(x){ paste0(NextMethod(), "[", paste0(dim(x), collapse = ","), "]") } type_sum.matrix <- function(x){ paste0(NextMethod(), "[", paste0(dim(x), collapse = ","), "]") } type_sum.numeric <- function(x) "dbl" type_sum.POSIXt <- function(x) "time" obj_type <- function(x) { if (!is.object(x)) { paste0("<", type_sum(x), if (!is.array(x)) paste0("[", length(x), "]"), ">") } else if (!isS4(x)) { paste0("<S3:", paste0(class(x), collapse = ", "), ">") } else { paste0("<S4:", paste0(is(x), collapse = ", "), ">") } }
.validInput <- function(input = NULL, name = NULL, valid = NULL){ valid <- unique(valid) if(is.character(valid)){ valid <- tolower(valid) }else{ stop("Validator must be a character!") } if(!is.character(name)){ stop("name must be a character!") } if("null" %in% tolower(valid)){ valid <- c("null", valid[which(tolower(valid) != "null")]) } av <- FALSE for(i in seq_along(valid)){ vi <- valid[i] if(vi == "integer" | vi == "wholenumber"){ if(all(is.numeric(input))){ cv <- min(abs(c(input%%1, input%%1-1)), na.rm = TRUE) < .Machine$double.eps^0.5 }else{ cv <- FALSE } }else if(vi == "null"){ cv <- is.null(input) }else if(vi == "bool" | vi == "boolean" | vi == "logical"){ cv <- is.logical(input) }else if(vi == "numeric"){ cv <- is.numeric(input) }else if(vi == "vector"){ cv <- is.vector(input) }else if(vi == "matrix"){ cv <- is.matrix(input) }else if(vi == "sparsematrix"){ cv <- is(input, "dgCMatrix") }else if(vi == "character"){ cv <- is.character(input) }else if(vi == "factor"){ cv <- is.factor(input) }else if(vi == "rlecharacter"){ cv1 <- is(input, "Rle") if(cv1){ cv <- is(input@values, "factor") || is(input@values, "character") }else{ cv <- FALSE } }else if(vi == "palette"){ cv <- all(.isColor(input)) }else if(vi == "timestamp"){ cv <- is(input, "POSIXct") }else if(vi == "dataframe" | vi == "data.frame" | vi == "df"){ cv1 <- is.data.frame(input) cv2 <- is(input, "DataFrame") cv <- any(cv1, cv2) }else if(vi == "fileexists"){ cv <- all(file.exists(input)) }else if(vi == "direxists"){ cv <- all(dir.exists(input)) }else if(vi == "granges" | vi == "gr"){ cv <- is(input, "GRanges") }else if(vi == "grangeslist" | vi == "grlist"){ cv <- .isGRList(input) }else if(vi == "list" | vi == "simplelist"){ cv1 <- is.list(input) cv2 <- is(input, "SimpleList") cv <- any(cv1, cv2) }else if(vi == "bsgenome"){ cv1 <- is(input, "BSgenome") cv2 <- tryCatch({ library(input) eval(parse(text=input)) }, error = function(e){ FALSE }) cv <- any(cv1, cv2) }else if(vi == "se" | vi == "summarizedexperiment"){ cv <- is(input, "SummarizedExperiment") }else if(vi == "seurat" | vi == "seuratobject"){ cv <- is(input, "Seurat") }else if(vi == "txdb"){ cv <- is(input, "TxDb") }else if(vi == "orgdb"){ cv <- is(input, "OrgDb") }else if(vi == "bsgenome"){ cv <- is(input, "BSgenome") }else if(vi == "parallelparam"){ cv <- is(input, "BatchtoolsParam") }else if(vi == "archrproj" | vi == "archrproject"){ cv <- is(input, "ArchRProject") }else{ stop("Validator is not currently supported by ArchR!") } if(cv){ av <- TRUE break } } if(av){ return(invisible(TRUE)) }else{ stop("Input value for '", name,"' is not a ", paste(valid, collapse="," ), ", (",name," = ",class(input),") please supply valid input!") } } .isWholenumber <- function(x, tol = .Machine$double.eps^0.5){ abs(x - round(x)) < tol } .isColor <- function(x = NULL){ unlist(lapply(x, function(y) tryCatch(is.matrix(col2rgb(y)), error = function(e) FALSE))) } .isDiscrete <- function(x = NULL){ is.factor(x) || is.character(x) || is.logical(x) } .isGRList <- function(x){ isList <- grepl("list", class(x), ignore.case=TRUE) if(!isList){ FALSE }else{ allGR <- all(unlist(lapply(x, function(x) is(x, "GRanges") ))) if(allGR){ TRUE }else{ FALSE } } } validBSgenome <- function(genome = NULL, masked = FALSE){ .validInput(input = genome, name = "genome", valid = c("character", "bsgenome")) .validInput(input = masked, name = "masked", valid = c("boolean")) stopifnot(!is.null(genome)) if(inherits(genome, "BSgenome")){ return(genome) }else if(is.character(genome)){ genome <- tryCatch({ .requirePackage(genome) bsg <- eval(parse(text = genome)) if(inherits(bsg, "BSgenome")){ return(bsg) }else{ stop("genome is not a BSgenome valid class!") } }, error = function(x){ BSgenome::getBSgenome(genome, masked = masked) }) return(genome) }else{ stop("Cannot validate BSgenome options are a valid BSgenome or character for getBSgenome") } } .validTxDb <- function(TxDb = NULL){ stopifnot(!is.null(TxDb)) if(inherits(TxDb, "TxDb")){ return(TxDb) }else if(is.character(TxDb)){ return(getTxDb(TxDb)) }else{ stop("Cannot validate TxDb options are a valid TxDb or character for getTxDb") } } .validOrgDb <- function(OrgDb = NULL){ stopifnot(!is.null(OrgDb)) if(inherits(OrgDb, "OrgDb")){ return(OrgDb) }else if(is.character(OrgDb)){ return(getOrgDb(OrgDb)) }else{ stop("Cannot validate OrgDb options are a valid OrgDb or character for getOrgDb") } } .validGRanges <- function(gr = NULL){ stopifnot(!is.null(gr)) if(inherits(gr, "GRanges")){ return(gr) }else{ stop("Error cannot validate genomic range!") } } .validGeneAnnotation <- function(geneAnnotation = NULL){ if(!inherits(geneAnnotation, "SimpleList")){ if(inherits(geneAnnotation, "list")){ geneAnnotation <- as(geneAnnotation, "SimpleList") }else{ stop("geneAnnotation must be a list/SimpleList of 3 GRanges for : Genes GRanges, Exons GRanges and TSS GRanges!") } } if(identical(sort(tolower(names(geneAnnotation))), c("exons", "genes", "tss"))){ gA <- SimpleList() gA$genes <- .validGRanges(geneAnnotation[[grep("genes", names(geneAnnotation), ignore.case = TRUE)]]) gA$exons <- .validGRanges(geneAnnotation[[grep("exons", names(geneAnnotation), ignore.case = TRUE)]]) gA$TSS <- .validGRanges(geneAnnotation[[grep("TSS", names(geneAnnotation), ignore.case = TRUE)]]) }else{ stop("geneAnnotation must be a list/SimpleList of 3 GRanges for : Genes GRanges, Exons GRanges and TSS GRanges!") } gA } .validGenomeAnnotation <- function(genomeAnnotation = NULL){ if(!inherits(genomeAnnotation, "SimpleList")){ if(inherits(genomeAnnotation, "list")){ genomeAnnotation <- as(genomeAnnotation, "SimpleList") }else{ stop("genomeAnnotation must be a list/SimpleList of 3 GRanges for : blacklist GRanges, chromSizes GRanges and genome BSgenome package string (ie hg38 or BSgenome.Hsapiens.UCSC.hg38)!") } } if(identical(sort(tolower(names(genomeAnnotation))), c("blacklist", "chromsizes", "genome"))){ gA <- SimpleList() gA$blacklist <- .validGRanges(genomeAnnotation[[grep("blacklist", names(genomeAnnotation), ignore.case = TRUE)]]) if(genomeAnnotation[[grep("genome", names(genomeAnnotation), ignore.case = TRUE)]]=="nullGenome"){ gA$genome <- "nullGenome" }else{ bsg <- validBSgenome(genomeAnnotation[[grep("genome", names(genomeAnnotation), ignore.case = TRUE)]]) gA$genome <- bsg@pkgname } gA$chromSizes <- .validGRanges(genomeAnnotation[[grep("chromsizes", names(genomeAnnotation), ignore.case = TRUE)]]) }else{ stop("genomeAnnotation must be a list/SimpleList of 3 GRanges for : blacklist GRanges, chromSizes GRanges and genome BSgenome package string (ie hg38 or BSgenome.Hsapiens.UCSC.hg38)!") } gA } .validGeneAnnoByGenomeAnno <- function(geneAnnotation, genomeAnnotation){ allSeqs <- unique(paste0(seqnames(genomeAnnotation$chromSizes))) geneSeqs <- unique(paste0(seqnames(geneAnnotation$genes))) if(!all(geneSeqs %in% allSeqs)){ geneNotIn <- geneSeqs[which(geneSeqs %ni% allSeqs)] message("Found Gene Seqnames not in GenomeAnnotation chromSizes, Removing : ", paste0(geneNotIn, collapse=",")) geneAnnotation$genes <- .subsetSeqnamesGR(geneAnnotation$genes, names = allSeqs) } exonSeqs <- unique(paste0(seqnames(geneAnnotation$exons))) if(!all(exonSeqs %in% allSeqs)){ exonNotIn <- exonSeqs[which(exonSeqs %ni% allSeqs)] message("Found Exon Seqnames not in GenomeAnnotation chromSizes, Removing : ", paste0(exonNotIn, collapse=",")) geneAnnotation$exons <- .subsetSeqnamesGR(geneAnnotation$exons, names = allSeqs) } TSSSeqs <- unique(paste0(seqnames(geneAnnotation$TSS))) if(!all(TSSSeqs %in% allSeqs)){ TSSNotIn <- TSSSeqs[which(TSSSeqs %ni% allSeqs)] message("Found TSS Seqnames not in GenomeAnnotation chromSizes, Removing : ", paste0(TSSNotIn, collapse=",")) geneAnnotation$TSS <- .subsetSeqnamesGR(geneAnnotation$TSS, names = allSeqs) } geneAnnotation } .validArchRProject <- function(ArchRProj = NULL){ if(!inherits(ArchRProj, "ArchRProject")){ stop("Not a valid ArchRProject as input!") }else{ ArchRProj } }
test_that("select_chr works a single match per pattern", { x <- c("ax", "bx") expect_equal( select_chr(x, starts_with("b"), starts_with("a")), c("bx", "ax") ) }) test_that("select_chr works with a multiple matches of one pattern", { x <- c("ax", "ay") expect_equal(select_chr(x, starts_with("a")), x) }) test_that("select_chr works with less matching items than data items", { x <- c("ax", "bx") expect_equal(select_chr(x, starts_with("b")), "bx") }) test_that("select_chr works with ambiguous matches", { x <- c("ax", "bx") expect_equal(select_chr(x, matches("x")), x) }) test_that("select_chr works with a vector containing duplicated items", { x <- c("a", "a", "b") expect_equal(select_chr(x, "a"), "a") })
nested <- function(web, method="binmatnest2", rescale=FALSE, normalised=TRUE){ if (! any(method %in% c("binmatnest", "discrepancy", "binmatnest2", "discrepancy2", "NODF", "NODF2", "weighted NODF", "wine", "C score", "checker", "ALL"))) stop("Typo? Unknown method!") if ("ALL" %in% method) index <- c("binmatnest", "discrepancy", "binmatnest2", "discrepancy2", "NODF", "NODF2", "weighted NODF", "wine", "C score", "checker") else index <- method out <- NULL if ("binmatnest2" %in% index){ nessy <- try(nestedtemp(web)$statistic, silent=TRUE) nessy.value <- if (inherits(nessy, "try-error")) NA else nessy out <- c(out, binmatnest2 = nessy.value) } if ("binmatnest" %in% index){ nessy <- try(nestedness(web, null.models = FALSE)$temperature, silent=TRUE) nessy.value <- if (inherits(nessy, "try-error")) NA else nessy out <- c(out, binmatnest = nessy.value) } if ("discrepancy2" %in% index) { out <- c(out, "discrepancy2"=nesteddisc(web)$statistic) } if ("discrepancy" %in% index) out <- c(out, "discrepancy"=unname(discrepancy(web))) if ("C score" %in% index) out <- c(out, "C score"=C.score(web, normalise=normalised)) if ("checker" %in% index) out <- c(out, "checker"=nestedchecker(web)$C.score) if ("NODF2" %in% index) out <- c(out, "NODF2"=unname(nestednodf(web, order=TRUE)$statistic[3])) if ("NODF" %in% index) out <- c(out, "NODF"=unname(nestednodf(web, order=FALSE)$statistic[3])) if ("weighted NODF" %in% index) out <- c(out, "weighted NODF"=unname(nestednodf(web, order=TRUE, weighted=TRUE)$statistic[3])) if ("wine" %in% index) out <- c(out, "wine"=wine(web)$wine) if (rescale & ! "ALL" %in% method) warning("You requested rescaling, but you won't get it (unless you use method='ALL')!") if (rescale & "ALL" %in% method) out <- abs(c(100,100,0,0,0,0,0,0,0,0) - out) out }
CommT.viz <- function (in_df, title_str="a_project_name_here", alpha=0.05, legend_text, legend_pos) { annot_x_pos = legend_pos$annot_x_pos annot_y_pos = legend_pos$annot_y_pos xlim_thres = legend_pos$xlim_thres_pos color_specs = CommT.plotcolors(n=2) in_df[,ncol(in_df)+1] = "insign" label_sign = paste("gene", sprintf("%03d", which(legend_text < alpha)), sep="") in_df[which(in_df[,2]==label_sign),ncol(in_df)] = "signif" colnames(in_df)[ncol(in_df)] = "significance" KF_dist = gene_id = significance = NULL plot_handle = ggplot2::ggplot(data=in_df) + geom_density(aes(x=KF_dist, group=gene_id, color=factor(significance), line=2)) + xlim(0, xlim_thres) + theme_bw() + scale_colour_manual(name="significance", values=rev(color_specs)) + ggtitle(paste(title_str, ", alpha=", alpha, "\n", sep="")) + theme(legend.position = "none", plot.title = element_text(size = rel(1.5))) n_entries = length(legend_text) plot_handle = plot_handle + annotate("text", x=annot_x_pos, y=annot_y_pos, label=paste("gene", colnames(legend_text), sep=" "), color=color_specs[2], fontface="bold") for (i in 1:n_entries) { if (legend_text[i] < alpha) { plot_handle = plot_handle + annotate("text", x=annot_x_pos, y=annot_y_pos-(annot_y_pos*i/n_entries), label=paste(rownames(legend_text)[i], sprintf("%03f", legend_text[i]), sep=" "), color=color_specs[1], size=4) } if (legend_text[i] >= alpha) { plot_handle = plot_handle + annotate("text", x=annot_x_pos, y=annot_y_pos-(annot_y_pos*i/n_entries), label=paste(rownames(legend_text)[i], sprintf("%03f", legend_text[i]), sep=" "), color=color_specs[2], size=4) } } return(plot_handle) }
run_document_extractor <- function(corpus_object, max_html_docs = 400, ...) { app <- system.file("download", "app.R", package = "corporaexplorer") if (app == "") { stop("Could not find directory. Try re-installing `corporaexplorer`.", call. = FALSE) } if (class(corpus_object) != "corporaexplorerobject") { stop("'corpus_object' is not a corporaexplorerobject", call. = FALSE ) } if ("package:corporaexplorer" %in% search() == FALSE) { stop( "Load 'corporaexplorer' by running 'library(corporaexplorer)', then run 'run_document_extractor()'.", call. = FALSE ) } if (!is.numeric(max_html_docs)) { stop(sprintf("Invalid '%s' argument.", "max_html_docs"), call. = FALSE ) } shiny::shinyOptions("corporaexplorer_download_data" = corpus_object) shiny::shinyOptions("corporaexplorer_download_max_html" = max_html_docs) shiny::shinyAppFile(app, options = list(display.mode = "normal", ...)) }
get_fun_name = function(n=0) { n = n + 1 cur_call = sys.call(sys.parent(n)) fun_name = as.character(cur_call)[1] fun_name = extract_root_and_last_member(fun_name)[["name"]] return(fun_name) }
require(knitr) opts_chunk$set( dev="pdf", fig.path="figures/", fig.height=3, fig.width=4, out.width=".47\\textwidth", fig.keep="high", fig.show="hold", fig.align="center", prompt=TRUE, comment=NA ) print.pval = function(pval) { threshold = 0.0001 return(ifelse(pval < threshold, paste("p<", sprintf("%.4f", threshold), sep=""), ifelse(pval > 0.1, paste("p=",round(pval, 2), sep=""), paste("p=", round(pval, 3), sep="")))) } require(Sleuth3) require(mosaic) trellis.par.set(theme=col.mosaic()) set.seed(123) knit_hooks$set(inline = function(x) { if (is.numeric(x)) return(knitr:::format_sci(x, 'latex')) x = as.character(x) h = knitr:::hilight_source(x, 'latex', list(prompt=FALSE, size='normalsize')) h = gsub("([_ h = gsub('(["\'])', '\\1{}', h) gsub('^\\\\begin\\{alltt\\}\\s*|\\\\end\\{alltt\\}\\s*$', '', h) }) showOriginal=FALSE showNew=TRUE trellis.par.set(theme=col.mosaic()) options(digits=3, show.signif.stars=FALSE) summary(case0401) favstats(Incidents ~ Launch, data=case0401) histogram(~ Incidents | Launch, data=case0401) t.test(Incidents ~ Launch, var.equal=TRUE, data=case0401) C244=factorial(24)/(factorial(4)*factorial(24-4)); C244 case0401$Incidents[c(1,2,4,24)] with(case0401, t.test(Incidents[c(1,2,4,24)], Incidents[-c(1,2,4,24)], var.equal=TRUE)) case0401$Incidents[c(1,4,5,24)] with(case0401, t.test(Incidents[c(1,4,5,24)], Incidents[-c(1,4,5,24)], var.equal=TRUE)) C1113 = factorial(5)/(factorial(3)*factorial(5-3))*1; C1113 C1123 = factorial(5)/(factorial(2)*factorial(5-2))*1*1; C1123 C0123 = 17*5*1*1; C0123 onep = (C1113+C1123+C0123)/C244; onep result = t.test(Incidents ~ Launch, var.equal=TRUE, data=case0401)$statistic; result nulldist = do(10000)*t.test(Incidents ~ shuffle(Launch), var.equal=TRUE, data=case0401)$statistic histogram(~ t, groups=t >= result, v=result, data=nulldist) tally(~ t >= result, format="proportion", data=nulldist) summary(case0402) favstats(Time ~ Treatment, data=case0402) bwplot(Treatment ~ Time, data=case0402) densityplot(~ Time, groups=Treatment, auto.key=TRUE, data=case0402) obsrank = rank(case0402$Time, ties.method="average"); obsrank mt = sum(obsrank[1:14]); mt average = mean(obsrank); average sd = sd(obsrank); sd n = nrow(subset(case0402, Treatment=="Modified")); n MEANT = n * average; MEANT SDT = sd * sqrt((n^2)/(2*n)); SDT z = (mt-MEANT)/SDT; z p = pnorm(-abs(z)); p wilcox.test(Time ~ Treatment, conf.int=TRUE, exact=TRUE, data=case0402)
heading <- function(..., headingLevel = rock::opts$get("defaultHeadingLevel"), output = "markdown", cat = TRUE) { text <- paste0(..., collapse=""); if (output == "html") { res <- paste0( "\n\n<h", headingLevel, "> ", text, "</h", headingLevel, ">\n\n" ); } else { res <- paste0( "\n\n", repStr(" text, "\n\n" ); } if (cat) { cat(res); } return(invisible(res)); }
library(knitr) opts_knit$set(cache = FALSE, verbose = TRUE, global.par = TRUE) par(mar = c(5, 12, 4, 2) + 0.1) library("vimp") set.seed(5678910) n <- 1000 p <- 2 s <- 1 x <- data.frame(replicate(p, runif(n, -1, 1))) y <- (x[,1])^2*(x[,1]+7/5) + (25/9)*(x[,2])^2 + rnorm(n, 0, 1) folds <- sample(rep(seq_len(2), length = length(y))) library("SuperLearner") library("ranger") est_1 <- vimp_rsquared(Y = y, X = x, indx = 1, run_regression = TRUE, SL.library = c("SL.ranger", "SL.mean"), V = 2, env = environment()) est_1 print(est_1) data("vrc01") library("dplyr") library("tidyselect") y <- vrc01$ic50.censored X <- vrc01 %>% select(starts_with("geog"), starts_with("subtype"), starts_with("length")) geog_indx <- max(which(grepl("geog", names(X)))) set.seed(1234) for (i in seq_len(ncol(X) - geog_indx)) { lm_vim <- vim(Y = y, X = X, indx = geog_indx + i, run_regression = TRUE, SL.library = "SL.lm", type = "r_squared", cvControl = list(V = 2)) if (i == 1) { lm_mat <- lm_vim } else { lm_mat <- merge_vim(lm_mat, lm_vim) } } lm_mat SL.gbm.1 <- function(..., interaction.depth = 1) SL.gbm(..., interaction.depth = interaction.depth) SL.gam.3 <- function(..., deg.gam = 3) SL.gam(..., deg.gam = deg.gam) SL.gam.4 <- function(..., deg.gam = 4) SL.gam(..., deg.gam = deg.gam) SL.gam.5 <- function(..., deg.gam = 5) SL.gam(..., deg.gam = deg.gam) create.SL.glmnet <- function(alpha = c(0.25, 0.5, 0.75)) { for (mm in seq(length(alpha))) { eval(parse(file = "", text = paste('SL.glmnet.', alpha[mm], '<- function(..., alpha = ', alpha[mm], ') SL.glmnet(..., alpha = alpha)', sep = '')), envir = .GlobalEnv) } invisible(TRUE) } create.SL.glmnet() create.SL.randomForest <- function(tune = list(mtry = c(1, 5, 7), nodesize = c(1, 5, 10))) { tuneGrid <- expand.grid(tune, stringsAsFactors = FALSE) for (mm in seq(nrow(tuneGrid))) { eval(parse(file = "", text = paste("SL.randomForest.", mm, "<- function(..., mtry = ", tuneGrid[mm, 1], ", nodesize = ", tuneGrid[mm, 2], ") SL.randomForest(..., mtry = mtry, nodesize = nodesize)", sep = "")), envir = .GlobalEnv) } invisible(TRUE) } create.SL.randomForest() learners <- c("SL.glmnet", "SL.glmnet.0.25", "SL.glmnet.0.5", "SL.glmnet.0.75", "SL.randomForest", "SL.randomForest.1", "SL.randomForest.2", "SL.randomForest.3", "SL.randomForest.4", "SL.randomForest.5", "SL.randomForest.6", "SL.randomForest.7", "SL.randomForest.8", "SL.randomForest.9", "SL.gbm.1") learners.2 <- c("SL.ranger") V <- 2 sl_cvcontrol <- list(V = 2) set.seed(5678) start_time <- Sys.time() subtype_01_AE_vim <- vimp_rsquared(Y = y, X = X, indx = 5, SL.library = learners.2, na.rm = TRUE, env = environment(), V = V, cvControl = sl_cvcontrol, family = binomial()) end_time <- Sys.time() subtype_01_AE_vim head(subtype_01_AE_vim$full_fit[[1]]) head(subtype_01_AE_vim$red_fit[[1]]) ests <- subtype_01_AE_vim set.seed(1234) for (i in seq_len(ncol(X) - geog_indx - 1)) { this_vim <- vimp_rsquared(Y = y, X = X, indx = geog_indx + i + 1, run_regression = TRUE, SL.library = learners.2, V = V, cvControl = sl_cvcontrol, family = binomial()) ests <- merge_vim(ests, this_vim) } library("ggplot2") library("cowplot") theme_set(theme_cowplot()) all_vars <- c(paste0("Subtype is ", c("01_AE", "02_AG", "07_BC", "A1", "A1C", "A1D", "B", "C", "D", "O", "Other")), paste0("Length of ", c("Env", "gp120", "V5", "V5 outliers", "Loop E", "Loop E outliers"))) est_plot_tib <- ests$mat %>% mutate( var_fct = rev(factor(s, levels = ests$mat$s, labels = all_vars[as.numeric(ests$mat$s) - geog_indx], ordered = TRUE)) ) est_plot_tib %>% ggplot(aes(x = est, y = var_fct)) + geom_point() + geom_errorbarh(aes(xmin = cil, xmax = ciu)) + xlab(expression(paste("Variable importance estimates: ", R^2, sep = ""))) + ylab("") + ggtitle("Estimated individual feature importance") + labs(subtitle = "in the VRC01 data (considering only geographic confounders, subtype, and viral geometry)") set.seed(91011) subtype_vim <- vimp_rsquared(Y = y, X = X, indx = 5:15, SL.library = learners.2, na.rm = TRUE, env = environment(), V = V, cvControl = sl_cvcontrol) geometry_vim <- vimp_rsquared(Y = y, X = X, indx = 16:21, SL.library = learners.2, na.rm = TRUE, env = environment(), V = V, cvControl = sl_cvcontrol) groups <- merge_vim(subtype_vim, geometry_vim) all_grp_nms <- c("Viral subtype", "Viral geometry") grp_plot_tib <- groups$mat %>% mutate( grp_fct = factor(case_when( s == "5,6,7,8,9,10,11,12,13,14,15" ~ "1", s == "16,17,18,19,20,21" ~ "2" ), levels = c("1", "2"), labels = all_grp_nms, ordered = TRUE) ) grp_plot_tib %>% ggplot(aes(x = est, y = grp_fct)) + geom_point() + geom_errorbarh(aes(xmin = cil, xmax = ciu)) + xlab(expression(paste("Variable importance estimates: ", R^2, sep = ""))) + ylab("") + ggtitle("Estimated feature group importance") + labs(subtitle = "in the VRC01 data (considering only geographic confounders, subtype, and viral geometry)")
source('functions.R') library(ggplot2) library(scales) library(grid) load("output/users-data-US.rdata") load("data/contributor-data.rdata") contr <- merge(contr, users) contr$cat <- "Both parties" contr$cat[contr$amount_dem>0 & contr$amount_rep==0] <- "Only Democrats" contr$cat[contr$amount_dem==0 & contr$amount_rep>0] <- "Only Republicans" table(contr$theta>0) tab <- table(contr$theta>0, contr$cat) (tab["TRUE", "Both parties"] + tab["TRUE", "Only Republicans"]) / sum(tab["TRUE",]) (tab["FALSE", "Both parties"] + tab["FALSE", "Only Democrats"]) / sum(tab["FALSE",]) cor(contr$theta, contr$cfscore, use='complete.obs') load("output/users-data-US.rdata") load("data/ohio-data.rdata") ohio <- merge(ohio.data, users) ohio$party <- "Not registered" ohio$party[ohio$PARTY_AFFILIATION=="D"] <- "Registered DEM" ohio$party[ohio$PARTY_AFFILIATION=="R"] <- "Registered REP" sum.regs <- apply(ohio[,4:42], 1, function(x) sum(x %in% "D") - sum(x %in% "R")) ohio$sum.regs <- sum.regs ohio$regs.cat <- NA ohio$regs.cat[ohio$sum.regs<(-5)] <- "<-5" ohio$regs.cat[ohio$sum.regs<=(-3) & ohio$sum.regs>=(-5)] <- "[-3,-5]" ohio$regs.cat[ohio$sum.regs==(-2)] <- "-2" ohio$regs.cat[ohio$sum.regs==(-1)] <- "-1" ohio$regs.cat[ohio$sum.regs==(0)] <- "0" ohio$regs.cat[ohio$sum.regs==(2)] <- "+2" ohio$regs.cat[ohio$sum.regs==(1)] <- "+1" ohio$regs.cat[ohio$sum.regs>(5)] <- ">+5" ohio$regs.cat[ohio$sum.regs>=(3) & ohio$sum.regs<=(5)] <- "[+3,+5]" ohio$regs.cat <- factor(ohio$regs.cat, levels=c("<-5", "[-3,-5]", "-2", "-1", "0", "+1", "+2", "[+3,+5]", ">+5")) plotdata <- ohio[,c("uid", "theta", "regs.cat")] names(plotdata) <- c("uid", "estimates", "x") plotdata$facet <- "Registration History" plotdata <- rbind(plotdata, data.frame("uid"=ohio$uid, "estimates"=ohio$theta, x=ohio$party, facet="2012")) plotdata <- plotdata[plotdata$x %in% "Not registered" == FALSE,] levels(plotdata$x)[c(11,12)] <- c("Dem.", "Rep.") p <- ggplot(plotdata, aes(x=x, y=estimates)) pq <- p + geom_boxplot(outlier.colour="grey", outlier.size=1) + scale_x_discrete(" Party ( scale_y_continuous(expression(paste(theta[i], ", Twitter-Based Ideology Estimates")), limits=c(-2.5, 2.5)) + theme(panel.border=element_rect(fill=NA), panel.background = element_blank(), legend.position="none") + geom_hline(aes(yintercept=0), linetype=3) + facet_grid(~facet,scales="free", space="free") pq ggsave(filename="plots/figure5.pdf", plot=pq, height=3.5, width=7)
GMask <- function(ES, Core, Y){ Y <- as.factor(Y) levels(Y) = c(1,2) ES[, Y == 1] = (ES[, Y == 1] >= Core[,1] & ES[, Y == 1] <= Core[,2]) & !(ES[, Y == 1] >= Core[,3] & ES[, Y == 1] <= Core[,4]) ES[, Y == 2] = (ES[, Y == 2] >= Core[,3] & ES[, Y == 2] <= Core[,4]) & !(ES[, Y == 2] >= Core[,1] & ES[, Y == 2] <= Core[,2]) mask = ES mode(mask) = "integer" return(mask) }
rls_prm <- function(lambda) { list(lambda = lambda) }
NULL scale_x_continuous <- function(name = waiver(), breaks = waiver(), minor_breaks = waiver(), labels = waiver(), limits = NULL, expand = waiver(), oob = censor, na.value = NA_real_, trans = "identity") { sc <- continuous_scale( c("x", "xmin", "xmax", "xend", "xintercept", "xmin_final", "xmax_final", "xlower", "xmiddle", "xupper"), "position_c", identity, name = name, breaks = breaks, minor_breaks = minor_breaks, labels = labels, limits = limits, expand = expand, oob = oob, na.value = na.value, trans = trans, guide = "none" ) sc$super <- ScaleContinuousPosition class(sc) <- class(ScaleContinuousPosition) sc } scale_y_continuous <- function(name = waiver(), breaks = waiver(), minor_breaks = waiver(), labels = waiver(), limits = NULL, expand = waiver(), oob = censor, na.value = NA_real_, trans = "identity") { sc <- continuous_scale( c("y", "ymin", "ymax", "yend", "yintercept", "ymin_final", "ymax_final", "lower", "middle", "upper"), "position_c", identity, name = name, breaks = breaks, minor_breaks = minor_breaks, labels = labels, limits = limits, expand = expand, oob = oob, na.value = na.value, trans = trans, guide = "none" ) sc$super <- ScaleContinuousPosition class(sc) <- class(ScaleContinuousPosition) sc } ScaleContinuousPosition <- gganimintproto("ScaleContinuousPosition", ScaleContinuous, map = function(self, x, limits = self$get_limits()) { scaled <- as.numeric(self$oob(x, limits)) ifelse(!is.na(scaled), scaled, self$na.value) } ) scale_x_log10 <- function(...) { scale_x_continuous(..., trans = log10_trans()) } scale_y_log10 <- function(...) { scale_y_continuous(..., trans = log10_trans()) } scale_x_reverse <- function(...) { scale_x_continuous(..., trans = reverse_trans()) } scale_y_reverse <- function(...) { scale_y_continuous(..., trans = reverse_trans()) } scale_x_sqrt <- function(...) { scale_x_continuous(..., trans = sqrt_trans()) } scale_y_sqrt <- function(...) { scale_y_continuous(..., trans = sqrt_trans()) }
context("Object antaresDataTable") l <- list.files() sapply(studyPathS, function(studyPath){ opts <- setSimulationPath(studyPath) mydata <- readAntares(areas = "all", showProgress = FALSE) test_that("subsetting an antaresDataTable returns an antaresDataTable", { subsetdt <- mydata[area == "a"] expect_is(subsetdt, "antaresDataTable") expect_true(all(c("timeStep", "opts", "synthesis", "type") %in% names(attributes(subsetdt)))) }) test_that("modifying columns of an antaresDataTable returns an antaresDataTable", { newdata <- copy(mydata) newdata[, load2 <- LOAD * 2] newdata[, LOAD := NULL] expect_is(newdata, "antaresDataTable") expect_true(all(c("timeStep", "opts", "synthesis", "type") %in% names(attributes(newdata)))) }) test_that("merge an antaresDataTable with a table returns an antaresDataTable", { newdata <- merge(mydata, opts$districtsDef, by = "area") expect_is(newdata, "antaresDataTable") expect_true(all(c("timeStep", "opts", "synthesis", "type") %in% names(attributes(newdata)))) }) })
context("generateParagraphCR") n <- 7 s <- generateParagraphCR(LETTERS[1:n]) test_that("generateParagraphCR", { expect_length(s, 1L) expect_equal(stringr::str_count(s, '\\\\cr'), n - 1L) })
fb_player_match_logs <- function(player_url, season_end_year, stat_type) { stat_types <- c("summary", "keepers", "passing", "passing_types", "gca", "defense", "possession", "misc") if(!stat_type %in% stat_types) stop("check stat type") main_url <- "https://fbref.com" player_page <- xml2::read_html(player_url) player_name <- player_page %>% rvest::html_node("h1") %>% rvest::html_text() %>% stringr::str_squish() main_cats <- player_page %>% rvest::html_nodes(" span_names <- main_cats %>% rvest::html_nodes("span") %>% rvest::html_text() main_cats <- main_cats[grep("Match Logs", span_names)] match_log_names <- main_cats %>% rvest::html_nodes("p") %>% rvest::html_text() match_log_names <- match_log_names %>% gsub("Match Logs \\(", "", .) %>% gsub("\\)", "", .) log_level1 <- main_cats %>% rvest::html_nodes("ul") all_logs <- data.frame() for(i in 1:length(log_level1)) { log_name <- match_log_names[i] log_urls <- log_level1[i] %>% rvest::html_nodes("li") %>% rvest::html_nodes("a") %>% rvest::html_attr("href") season <- log_level1[i] %>% rvest::html_nodes("li") %>% rvest::html_nodes("a") %>% rvest::html_text() each_log_df <- cbind(log_name=log_name, log_urls=log_urls, season=season) %>% data.frame() all_logs <- rbind(all_logs, each_log_df) } all_logs <- all_logs %>% dplyr::mutate(season_end = gsub(".*-", "", .data$season), stat = dplyr::case_when( log_name == "Summary" ~ "summary", log_name == "Goalkeeping" ~ "keepers", log_name == "Passing" ~ "passing", log_name == "Pass Types" ~ "passing_types", log_name == "Goal and Shot Creation" ~ "gca", log_name == "Defensive Actions" ~ "defense", log_name == "Possession" ~ "possession", log_name == "Miscellaneous Stats" ~ "misc", TRUE ~ NA_character_ )) log_url <- all_logs %>% dplyr::filter(.data$stat == stat_type, .data$season_end == season_end_year) %>% dplyr::pull(log_urls) if(length(log_url) == 0) stop(glue::glue("check stat type: {stat_type} or season end: {season_end_year} exists")) season <- all_logs %>% dplyr::filter(.data$stat == stat_type, .data$season_end == season_end_year) %>% dplyr::pull(season) stat_page <- xml2::read_html(paste0(main_url, log_url)) tab <- stat_page %>% rvest::html_nodes(".table_container") %>% rvest::html_nodes("table") %>% rvest::html_table() %>% data.frame() tab <- .clean_table_names(tab) tab <- tab %>% dplyr::filter(.data$Date != "") %>% dplyr::mutate(Squad = sub("^.*?([A-Z])", "\\1", .data$Squad), Opponent = sub("^.*?([A-Z])", "\\1", .data$Opponent), Player = player_name, Season = season) %>% dplyr::select(.data$Player, .data$Season, dplyr::everything(), -.data$`Match Report`) non_num_vars <- c("Player", "Season", "Date", "Day", "Comp", "Round", "Venue", "Result", "Squad", "Opponent", "Start", "Pos") cols_to_transform <- names(tab)[!names(tab) %in% non_num_vars] suppressWarnings( tab <- tab %>% dplyr::mutate_at(.vars = cols_to_transform, .funs = function(x) {gsub(",", "", x)}) %>% dplyr::mutate_at(.vars = cols_to_transform, .funs = function(x) {gsub("+", "", x)}) %>% dplyr::mutate_at(.vars = cols_to_transform, .funs = as.numeric) ) return(tab) }
library(testthat) library(checkmate) test_check("BatchExperiments")
BW2stageSRS <- function(X, psuID, lonely.SSU = "mean"){ M <- length(unique(psuID)) Ni <- table(psuID) Nbar <- length(X)/M ti <- by(X, INDICES = psuID, FUN = sum) S2Ui <- by(X, INDICES = psuID, FUN = var) S2Ui.miss <- is.na(S2Ui) if (lonely.SSU == "mean"){ S2Ui[S2Ui.miss] <- mean(S2Ui[!S2Ui.miss]) } else if (lonely.SSU == "zero"){ S2Ui[S2Ui.miss] <- 0 } else {stop("Illegal value of lonely.SSU: ", lonely.SSU, "\n")} tbarU <- mean(ti) tU <- M*tbarU S2U1 <- var(ti) B2 <- S2U1 / tbarU^2 ybarU <- mean(X) S2U <- var(X) W2 <- M * sum(Ni^2 * S2Ui) / tU^2 c("B2"=B2, "W2"=W2, "unit relvar"=S2U/ybarU^2, "B2+W2"=B2 + W2, "k"=(B2 + W2)/(S2U/ybarU^2), "delta" = B2 / (B2 + W2) ) }
repRev <- function (path = Sys.getenv("RevBayesPath"), viewCode = FALSE, coerce = FALSE, use_wd = TRUE) { while (TRUE) { ginput <- readline(prompt = "rb>>>") numberOfOpenBraces <- stringr::str_count(ginput, "\\{") numberOfClosedBraces <- stringr::str_count(ginput, "\\}") numberOfOpenParenthesis <- stringr::str_count(ginput, "\\(") numberOfClosedParenthesis <- stringr::str_count(ginput, "\\)") testBraces <- numberOfOpenBraces == numberOfClosedBraces testParenthesis <- numberOfOpenParenthesis == numberOfClosedParenthesis while(!(testBraces && testParenthesis)){ if( (numberOfOpenBraces < numberOfClosedBraces) | (numberOfOpenBraces < numberOfClosedBraces) ) break(); ginput <- ginput %+% readline(prompt <- "rb>>>") numberOfOpenBraces <- stringr::str_count(ginput, "\\{") numberOfClosedBraces <- stringr::str_count(ginput, "\\}") numberOfOpenParenthesis <- stringr::str_count(ginput, "\\(") numberOfClosedParenthesis <- stringr::str_count(ginput, "\\)") testBraces <- numberOfOpenBraces == numberOfClosedBraces testParenthesis <- numberOfOpenParenthesis == numberOfClosedParenthesis } if (ginput == "quit()" || ginput == "q()") { break } if (ginput == "clearRev()"){ clearRev() next } if(str_detect(ginput, "clearRev\\(([0-9]+)\\)")){ clearRev(as.integer(str_extract(ginput, "[0-9]+"))) next } if(ginput == "getRevVars()"){ cat(getRevVars(), sep = "\n") next() } if(str_detect(ginput, pattern = "^getRevVars\\(\".+\"\\)$")){ args <- str_remove_all(str_extract(ginput, "\\(.+\\)"), "\\(|\\)|\"") cat(getRevVars(args), sep = "\n") next() } if(ginput == "getRevHistory()"){ cat(getRevHistory(), sep = "\n") next() } else{cat(doRev(ginput, viewCode = viewCode, coerce = coerce), sep = "\n\n")} } }
context("test-rx_anything") test_that("anything rule works", { expect_equal(rx_anything() %>% as.character(), "(.*)") expect_equal(rx_anything(mode = "lazy") %>% as.character(), "(.*?)") expect_true(grepl(rx_anything(), "b")) expect_true(grepl(rx_anything(mode = "lazy"), "b")) expect_true(grepl(rx_anything(), "a!.>\\")) expect_true(grepl(rx_anything(mode = "lazy"), "a!.>\\")) expect_equal(rx_anything(.data = "^") %>% as.character(), "^(.*)") expect_equal(rx_anything(.data = "^", mode = "lazy") %>% as.character(), "^(.*?)") expect_error(rx_anything(mode = "whatever")) expect_true(nchar(unlist(regmatches("abc", gregexpr(rx_anything(), "abc")))) == 3) expect_true(compare(nchar(unlist(regmatches("abc", gregexpr(rx_anything(mode="lazy"), "abc")))), c(0,0,0))$equal) })
F.cr.model.avg <- function(fits = ls(pattern = "^fit"), what = "survival", fit.stat = "qaicc") { if(substring(what, 1, 1) == "s") { x.name <- "s.hat" x.se <- "se.s.hat" want.n.hat <- FALSE } else if(substring(what, 1, 1) == "c") { x.name <- "p.hat" x.se <- "se.p.hat" want.n.hat <- FALSE } else if(substring(what, 1, 1) == "n") { x.name <- "n.hat" x.se <- "se.n.hat" want.n.hat <- TRUE } else { stop(paste("Invalid option. Cannot model average '", what, "'.", sep = "")) } if( !(fit.stat == "aicc") & !(fit.stat == "qaicc")) { stop(paste("Invalid option. Cannot model average '", fit.stat, "'.", sep = "")) } n.mod <- length(fits) use.mod <- vector("logical", n.mod) nan.mod <- vector("numeric", n.mod) ns.mod <- vector("numeric", n.mod) for(li1 in 1:n.mod) { fit <- get(fits[li1], pos = .GlobalEnv) if("cr" %in% class(fit)) { if((fit$exit.code == 1) & (fit$cov.code == 0) & (fit$df > 0)) { use.mod[li1] <- TRUE nan.mod[li1] <- fit$aux$nan ns.mod[li1] <- fit$aux$ns } else { use.mod[li1] <- FALSE nan.mod[li1] <- NA ns.mod[li1] <- NA } } else { warning(paste("Object", fits[li1], "in fits is not a CR object and has been ignored.")) use.mod[li1] <- FALSE nan.mod[li1] <- NA ns.mod[li1] <- NA } } dum1 <- min(nan.mod, na.rm=TRUE) dum2 <- max(nan.mod, na.rm=TRUE) if(dum1 == dum2) { nan <- dum1 } else { stop(paste("Number of individuals differ among models. Cannot model average.")) } dum1 <- min(ns.mod, na.rm=TRUE) dum2 <- max(ns.mod, na.rm=TRUE) if(dum1 == dum2) { ns <- dum1 } else { stop(paste("Number of occasions differ among models. Cannot model average.")) } rm(dum1, dum2, nan.mod, ns.mod) dim.nms <- dimnames(fit$histories) n.mod.good <- sum(use.mod) if(n.mod.good < 1) { stop(paste("Number of good models equals 0.")) } if(want.n.hat) { nan <- 1 } n.stats <- nan*ns fits <- fits[use.mod] stats <- matrix(0, n.mod.good, n.stats) se.stats <- stats all.fit.stat <- vector("numeric", n.mod.good) for(li1 in 1:n.mod.good) { fit <- get(fits[li1], pos = .GlobalEnv) stats[li1, 1:n.stats] <- unlist(fit[x.name]) se.stats[li1, 1:n.stats] <- unlist(fit[x.se]) all.fit.stat[li1] <- unlist(fit[fit.stat]) } delta.AIC <- all.fit.stat - min(all.fit.stat) dum1 <- exp(-0.5*delta.AIC) dum2 <- sum(dum1) wi.array <- dum1/dum2 rm(dum1, dum2) a1 <- stats*wi.array theta.average <- apply(a1, 2, sum) var.theta <- se.stats^2 dum <- matrix(theta.average, nrow=n.mod.good, ncol=n.stats, byrow=TRUE) a2 <- sqrt(var.theta + (stats - dum)^2)*wi.array rm(dum) se.theta.average <- apply(a2, 2, sum) a2 <- se.stats*wi.array se.conditional.theta.average <- apply(a2, 2, sum) mod.selection.proportion <- (se.theta.average - se.conditional.theta.average)/ se.theta.average hat <- matrix(theta.average, nrow=nan, ncol=ns, byrow=FALSE) se.hat <- matrix(se.theta.average, nrow=nan, ncol=ns, byrow=FALSE) se.hat.conditional <- matrix(se.conditional.theta.average, nrow=nan, ncol=ns, byrow=FALSE) mod.selection.proportion <- matrix(mod.selection.proportion, nrow=nan, ncol=ns, byrow=FALSE) AIC.table <- data.frame(fits, all.fit.stat, delta.AIC, wi.array, stringsAsFactors=FALSE) AIC.table <- AIC.table[order(AIC.table[,3]),] names(AIC.table) <- c("model", fit.stat, paste("delta.", fit.stat, sep=""), paste(fit.stat, ".weight", sep="")) if(substring(what, 1, 1) == "s") { dimnames(hat) <- dim.nms dimnames(se.hat) <- dim.nms dimnames(se.hat.conditional) <- dim.nms dimnames(mod.selection.proportion) <- dim.nms results <- list(fit.table=AIC.table, s.hat=hat, se.s.hat=se.hat, se.s.hat.conditional=se.hat.conditional, mod.selection.proportion=mod.selection.proportion) } else if(substring(what, 1, 1) == "c") { dimnames(hat) <- dim.nms dimnames(se.hat) <- dim.nms dimnames(se.hat.conditional) <- dim.nms dimnames(mod.selection.proportion) <- dim.nms results <- list(fit.table=AIC.table, p.hat=hat, se.p.hat=se.hat, se.p.hat.conditional=se.hat.conditional, mod.selection.proportion=mod.selection.proportion) } else { names(hat) <- dim.nms[[2]] names(se.hat) <- dim.nms[[2]] names(se.hat.conditional) <- dim.nms[[2]] names(mod.selection.proportion) <- dim.nms[[2]] results <- list(fit.table=AIC.table, n.hat=hat, se.n.hat=se.hat, se.n.hat.conditional=se.hat.conditional, mod.selection.proportion=mod.selection.proportion) results$n.hat.lower <- results$n.hat - 1.96*results$se.n.hat results$n.hat.upper <- results$n.hat + 1.96*results$se.n.hat results$nhat.v.meth <- fit$nhat.v.meth + 3 results$n.hat.conf <- 0.95 results$intervals <- fit$intervals class(results) <- c("nhat", "cr") } results }
systemfitAR <- function(formula, method = "OLS", inst = NULL, data = list(), restrict.matrix = NULL, restrict.rhs = NULL, restrict.regMat = NULL, pooled = FALSE, control = systemfit.control( ... ), AR1 = FALSE, rho.sel = c("all", "mean"), model = c("static", "dynamic"), ...) { rho.sel <- match.arg(rho.sel) model <- match.arg(model) est <- systemfit(formula=formula, method="SUR", data=data) resids_all <- rho_ind <- NULL for (i in 1:length(est$eq)) { resids <- est$eq[[i]]$residuals resids_all <- c(resids_all, resids) res <- resids[-length(resids)] rho_ind <- c(rho_ind, sum(resids[-1] * res) / sum(res^2)) rho_ste_ind <- sqrt((1 - rho_ind^2) / length(resids)) } res_all <- resids_all[-length(resids_all)] rho_all <- sum(resids_all[-1] * res_all) / sum(res_all^2) rho_ste_all <- sqrt((1 - rho_all^2) / length(resids_all)) if (rho.sel == "all") { rho <- rho_all; rho_ste <- rho_ste_all } else { rho <- mean(rho_ind); rho_ste <- rho_ste_ind } if (AR1 & model == 'static') { if(inherits(formula, "formula")) {formula <- list(formula)} name.y <- name.x <- NULL; formu.adj <- list() for (i in 1:length(formula)) { name.y <- c(name.y, all.vars(formula[[i]])[ 1]) name.x <- c(name.x, all.vars(formula[[i]])[-1]) formu.adj[[i]] <- bsFormu(name.y=all.vars(formula[[i]])[1], name.x=c("intercept_adj", all.vars(formula[[i]])[-1]), intercept=FALSE) } nam.y <- unique(name.y); yy <- data[, nam.y] nam.x <- unique(name.x); xx <- data[, nam.x] if(length(nam.y) == 1) { adj.yy <- yy[-1] - rho * yy[-length(yy)] } else { adj.yy <- yy[-1,] - rho * yy[-nrow(yy),] } adj.intercept <- 1 - rho adj.xx <- xx[-1,] - rho * xx[-nrow(xx),] data.adj <- cbind(adj.yy, adj.intercept, adj.xx) names(data.adj) <- c(nam.y, "intercept_adj", nam.x) formula <- formu.adj data <- data.adj } if (AR1 & model == 'dynamic') { if(inherits(formula, "formula")) {formula <- list(formula)} name.y <- name.x <- NULL for (i in 1:length(formula)) { name.y <- c(name.y, all.vars(formula[[i]])[ 1]) name.x <- c(name.x, all.vars(formula[[i]])[-1]) } nam.y <- unique(name.y); yy <- data[, nam.y] nam.x <- unique(name.x); xx <- data[, nam.x] if(length(nam.y) == 1) { adj.yy <- yy[-1] - rho * yy[-length(yy)] } else { adj.yy <- yy[-1,] - rho * yy[-nrow(yy),] } adj.xx <- xx[-1,] - rho * xx[-nrow(xx),] data.adj <- cbind(adj.yy, adj.xx) names(data.adj) <- c(nam.y, nam.x) data <- data.adj } result <- systemfit(formula=formula, method=method, inst=inst, data=data, restrict.matrix=restrict.matrix, restrict.rhs=restrict.rhs, restrict.regMat=restrict.regMat, pooled=pooled, control=control) result$rho <- rho result$rho_ste <- rho_ste result$data <- data result$formula <- formula return(result) }
HHGtest <- function(Dx, Dy, nperm){ if (!requireNamespace("HHG", quietly = TRUE)) { stop("Package \"HHG\" needed for this function to work. Please install it.", call. = FALSE) } hhgObs <- HHG::hhg.test(Dx = Dx, Dy = Dy, nr.perm = 0)$sum.chisq if(nperm != 0 ){ permStats <- rep(NA, nperm) s <-lapply(1:nperm, function(x) c(sample(nrow(Dy)))) for(i in 1:nperm){ permStats[i] <- HHG::hhg.test(Dx = Dx, Dy = Dy[s[[i]], s[[i]]], nr.perm = 0)$sum.chisq } pVal <- (sum(permStats > hhgObs) + 1)/(nperm + 1) return(list(Stat = hhgObs, pValue = pVal, permStats = permStats)) } else{ return(list(Stat = hhgObs)) } }
CIV <- function(level){ x <- NULL if(level==1){ x1 <- github.cssegisanddata.covid19(country = "Cote d'Ivoire") x2 <- ourworldindata.org(id = "CIV") x <- full_join(x1, x2, by = "date") } return(x) }
"lbh_selec_table2"
order_expkumg <- function(size,spec,a,b,c,k,n,alpha=0.05,...){ sample <- qexpkumg(initial_order(size,k,n),spec,a,b,c,...) pdf <- factorial(size)*cumprod(dexpkumg(sample,spec,a,b,c,...))[size] if(size>5){ return(list(sample=sample,pdf=pdf,ci_median=interval_median(size,sample,alpha))) } cat("---------------------------------------------------------------------------------------------\n") cat("We cannot report the confidence interval. The size of the sample is less or equal than five.\n") return(list(sample=sample,pdf=pdf)) }
library(apc) data <- data.asbestos.2013() apc.fit.table(data,"poisson.response")[1:4,1:6] data.trunc <- apc.data.list.subset(data,0,0,0,0,0,22,suppress.warning=TRUE) fit.ac <- apc.fit.model(data.trunc,"poisson.response","AC") forecast <- apc.forecast.ac(fit.ac) cat("Peak forecast","\n") print(forecast$response.forecast.per[1:6,]) v.WT2006 <- c( 2007, 1791, 1715, 1864, 2008, 1835, 1755, 1920, 2009, 1869, 1788, 1953, 2010, 1902, 1817, 1990, 2011, 1926, 1842, 2015, 2012, 1947, 1859, 2042, 2013, 1964, 1874, 2062, 2014, 1979, 1881, 2079, 2015, 1988, 1886, 2099, 2016, 1990, 1885, 2100, 2017, 1988, 1875, 2100, 2018, 1978, 1870, 2100, 2019, 1966, 1851, 2083, 2020, 1945, 1821, 2070, 2021, 1916, 1790, 2045, 2022, 1881, 1753, 2014, 2023, 1841, 1709, 1984, 2024, 1799, 1668, 1945, 2025, 1745, 1612, 1893, 2026, 1692, 1549, 1839, 2027, 1625, 1485, 1780, 2028, 1557, 1416, 1710, 2029, 1486, 1338, 1639, 2030, 1412, 1268, 1558) WT2006 <- matrix(data=v.WT2006, ncol=4,byrow=TRUE) v.WT2010 <- c( 2011, 1942, 1866, 2022, 2012, 1965, 1886, 2046, 2013, 1983, 1901, 2069, 2014, 1997, 1913, 2081, 2015, 2003, 1918, 2099, 2016, 2002, 1912, 2101, 2017, 2000, 1904, 2093, 2018, 1989, 1892, 2084, 2019, 1974, 1874, 2076, 2020, 1945, 1849, 2049, 2021, 1916, 1817, 2017, 2022, 1879, 1774, 1990, 2023, 1842, 1740, 1948, 2024, 1797, 1691, 1911, 2025, 1738, 1631, 1849, 2026, 1682, 1574, 1802, 2027, 1614, 1510, 1730, 2028, 1544, 1444, 1655, 2029, 1471, 1364, 1591, 2030, 1398, 1302, 1515) WT2010 <- matrix(data=v.WT2010, ncol=4,byrow=TRUE) data.trunc.2006 <- apc.data.list.subset(data,0,0,0,7,0,22, suppress.warning=TRUE) fit.ac.2006 <- apc.fit.model(data.trunc.2006, "poisson.response","AC") forecast.2006 <- apc.forecast.ac(fit.ac.2006) data.sum.per <- apc.data.sums(data.trunc)$sums.per plot(seq(1968,2013),data.sum.per,xlim=c(2002,2030),ylim=c(1400,2200), xlab="period",ylab="number of cases") apc.polygon(forecast$response.forecast.per.ic,2013,TRUE,TRUE, col.line=1,lwd.line=3) apc.polygon(forecast.2006$response.forecast.per.ic,2006,FALSE, lty.line=4,col.line=4,lwd.line=3) apc.polygon(WT2006[,2:4],2006,FALSE,lty.line=2,col.line=2,lwd.line=3) apc.polygon(WT2010[,2:4],2010,FALSE,lty.line=3,col.line=3,lwd.line=3) legend("topleft",lty=c(1,4,2,3),col=c(1,4,2,3),lwd=3, legend=c("AC 2013","AC 2006","HSE 2006","HSE 2010")) plot(seq(1968,2013),data.sum.per,xlim=c(2002,2030),ylim=c(1400,2200), xlab="period",ylab="number of cases") apc.polygon(forecast$response.forecast.per.ic,2013,TRUE,TRUE, col.line=1,lwd.line=3) apc.polygon(forecast.2006$response.forecast.per.ic,2006,FALSE, lty.line=4,col.line=1,lwd.line=3) apc.polygon(WT2006[,2:4],2006,FALSE,lty.line=2,col.line=1,lwd.line=3) apc.polygon(WT2010[,2:4],2010,FALSE,lty.line=3,col.line=1,lwd.line=3) legend("topleft",lty=c(1,4,2,3),col=1,lwd=3, legend=c("AC 2013","AC 2006","HSE 2006","HSE 2010"))
is.non.singular.matrix <- function( x, tol=1e-8 ) { if (!is.square.matrix( x ) ) stop( "argument x is not a square matrix" ) if ( !is.numeric( x ) ) stop( "argument x is not a numeric matrix" ) det.x <- det( x ) return( abs( det.x ) >= tol ) }
utils::globalVariables(c('nRootedShapes', 'nUnrootedShapes', 'unrootedKeys'), 'TreeTools') RootedTreeShape <- function (tree) { edge <- tree$edge nTip <- NTip(tree) edge <- Postorder(edge, renumber = FALSE, sizeSort = TRUE) .Int64(edge_to_rooted_shape(edge[, 1], edge[, 2], nTip)) } .Int64 <- function (n) { n <- as.integer64(n) if (length(n) == 2L) { n <- n[1] * 2147483647L + n[2] } n } RootedTreeWithShape <- function (shape, nTip, tipLabels) UseMethod('RootedTreeWithShape') RootedTreeWithShape.numeric <- function (shape, nTip, tipLabels = character(nTip)) { structure(list(edge = rooted_shape_to_edge(shape, nTip), Nnode = nTip - 1L, tip.label = tipLabels), class = 'phylo') } RootedTreeWithShape.integer64 <- function (shape, nTip, tipLabels = character(nTip)) { if (shape < 0) { stop("Shape may not be negative.") } else if (shape > 2L^31L - 1L) { stop("Shapes this large are not currently implemented. ", "Please contact maintainer for help.") } else { RootedTreeWithShape(as.integer(shape), nTip, tipLabels) } } UnrootedTreeWithShape <- function (shape, nTip, tipLabels = character(nTip)) { if (nTip > 30) { stop("Only trees with < 31 tips are presently handled") } nShapes <- nUnrootedShapes[nTip] if (shape >= as.integer(nShapes)) { stop("Shape must be between 0 and ", nShapes) } key <- UnrootedKeys(nTip)[shape + 1L] UnrootedTreeWithKey(key, nTip, tipLabels) } UnrootedTreeWithKey <- function (key, nTip, tipLabels = character(nTip)) { AddRoot <- function (x) { x$root.edge <- 1L x } SingleTaxonTree(tipLabels[1]) + SingleTaxonTree(tipLabels[2]) + AddRoot(RootedTreeWithShape(key, nTip - 2L, tipLabels[-c(1, 2)])) } UnrootedTreeShape <- function (tree) { which(UnrootedKeys(NTip(tree)) == UnrootedTreeKey(tree)) - 1L } UnrootedTreeKey <- function (tree, asInteger = FALSE) { tree <- Preorder(tree) edge <- Postorder(tree$edge, renumber = FALSE, sizeSort = TRUE) nTip <- NTip(tree) parent <- edge[, 1] child <- edge[, 2] nEdge <- length(child) unrooted <- nEdge %% 2L nodeFirst <- c(rep.int(c(TRUE, FALSE), nEdge / 2L), logical(as.integer(unrooted))) nodeSecond <- !nodeFirst nodeNumbers <- unique(parent) if (unrooted) { nodeFirst [nEdge - 0:2] <- FALSE nodeSecond[nEdge - 0:2] <- FALSE nodeNumbers <- nodeNumbers[-(nTip - 2L)] } RootedNumber <- function (nodeChildren) { RootedTreeShape(Postorder(DropTip(RootTree(tree, nodeChildren[1]), nodeChildren))) } basalTipEdges <- nEdge - (seq_len(4L - unrooted) - 1L) rootCandidate <- if (sum(child[basalTipEdges] <= nTip) == 2) { RootedNumber(child[basalTipEdges][child[basalTipEdges] <= nTip]) } else { integer64(0) } cherryNodes <- nodeNumbers[child[nodeFirst] <= nTip & child[nodeSecond] <= nTip] allKeys <- structure(c(vapply(cherryNodes, function (node) { RootedNumber(child[parent == node]) }, integer64(1)), rootCandidate), class = 'integer64') min(if (asInteger) as.integer(allKeys) else allKeys) } .UnrootedKeys <- function (nTip) { if (nTip > 28L) { stop("Too many shapes to calculate with ", nTip, " leaves.") } else if (nTip > length(unrootedKeys)) { shapes <- as.integer(structure( vapply(seq_len(as.integer(NRootedShapes(nTip))) - 1L, function (shape) UnrootedTreeKey(RootedTreeWithShape(shape, nTip)), integer64(1L)), class = 'integer64')) uniqueShapes <- unique(shapes) } else { uniqueShapes <- unrootedKeys[[nTip]] } sort(uniqueShapes) } UnrootedKeys <- addMemoization(.UnrootedKeys, envir = 'package:TreeTools') NUnrootedShapes <- function (nTip) { if (nTip > 60L) { stop("Too many shapes to represent as a 64-bit integer. ", "Consult OEIS for value: https://oeis.org/A000672/b000672.txt") } nUnrootedShapes[nTip] } NRootedShapes <- function (nTip) { if (nTip > 55L) { stop("Too many shapes to represent as a 64-bit integer. ", "Consult OEIS for value: https://oeis.org/A001190/b001190.txt") } nRootedShapes[nTip] }
NULL ggThemeAssist <- function(text){ SubtitlesSupport <- any(names(formals(ggtitle)) == 'subtitle') if (grepl('^\\s*[[:alpha:]]+[[:alnum:]\\.]*\\s*$', paste0(text, collapse = ''))) { text <- gsub('\\s+', '', text) if (any(ls(envir = .GlobalEnv) == text)) { gg_original <- get(text, envir = .GlobalEnv) allowOneline <- TRUE } else { stop(paste0('I\'m sorry, I couldn\'t find object', text, '.')) } } else { gg_original <- try(eval(parse(text = text)), silent = TRUE) allowOneline <- FALSE if(class(gg_original)[1] == 'try-error') { stop(paste0('I\'m sorry, I was unable to parse the string you gave to me.\n', gg_original)) } } if (!is.ggplot(gg_original)) { stop('No ggplot2 object has been selected. Fool someone else!') } colours.available <- c(colours.available, getRGBHexColours(gg_original)) default <- updateDefaults(gg_original, default, linetypes = linetypes) ui <- miniPage( tags$script(jscodeWidth), tags$script(jscodeHeight), tags$style(type = "text/css", ".selectize-dropdown{ width: 200px !important; }"), gadgetTitleBar("ggplot Theme Assistant"), miniTabstripPanel(selected = 'Panel & Background', miniTabPanel("Settings", icon = icon('sliders'), plotOutput("ThePlot5", width = '100%', height = '45%'), miniContentPanel(scrollable = TRUE, fillRow(height = heading.height, width = '100%', headingOutput('Plot dimensions') ), fillRow(height = line.height, width = '100%', numericInput('plot.width', label = 'Width', min = 0, max = 10, step = 1, value = 10), numericInput('plot.height', label = 'Height', min = 0, max = 10, step = 1, value = 5) ), fillRow(height = heading.height, width = '100%', headingOutput("General options")), fillRow(height = heading.height, width = '100%', tags$div( title = 'If enabled, formatR will be used. Set options(ggThemeAssist.formatR = FALSE) to disable it permanently.', checkboxInput('formatR', 'Use FormatR', value = getOption("ggThemeAssist.formatR", default = TRUE)) ), if (allowOneline) { tags$div( title = 'If multiline support is enabled, a theme function is returned for each element. To set this option permanently set options(ggThemeAssist.multiline = TRUE).', checkboxInput('multiline', 'Multiline results', value = getOption("ggThemeAssist.multiline", default = FALSE)) ) } ) ) ), miniTabPanel("Panel & Background", icon = icon('sliders'), plotOutput("ThePlot2", width = '100%', height = '45%'), miniContentPanel(scrollable = TRUE, fillRow(height = heading.height, width = '100%', headingOutput('Plot Background'), headingOutput('Panel Background'), headingOutput('Grid Major'), headingOutput('Grid Minor') ), fillRow(height = line.height, width = '100%', selectizeInput('plot.background.fill', label = 'Fill', choices = NULL, width = input.width), selectizeInput('panel.background.fill', label = 'Fill', choices = NULL, width = input.width), "", "" ), fillRow(height = line.height, width = '100%', selectInput('plot.background.linetype', label = 'Type', choices = linetypes, selected = default$plot.background$linetype, width = input.width), selectInput('panel.background.linetype', label = 'Type', choices = linetypes, selected = default$panel.background$linetype, width = input.width), selectInput('panel.grid.major.type', label = 'Type', choices = linetypes, selected = default$panel.grid.major$linetype, width = input.width), selectInput('panel.grid.minor.type', label = 'Type', choices = linetypes, selected = default$panel.grid.minor$linetype, width = input.width) ), fillRow(height = line.height, width = '100%', numericInput('plot.background.size', label = 'Size', step = 0.1, value = default$plot.background$size, width = input.width), numericInput('panel.background.size', label = 'Size', step = 0.1, value = default$panel.background$size, width = input.width), numericInput('panel.grid.major.size', label = 'Size', step = 0.1, value = default$panel.grid.major$size, min = 0, width = input.width), numericInput('panel.grid.minor.size', label = 'Size', step = 0.1, value = default$panel.grid.minor$size, min = 0, width = input.width) ), fillRow(height = line.height, width = '100%', selectizeInput('plot.background.colour', label = 'Colour', choices = NULL, width = input.width), selectizeInput('panel.background.colour', label = 'Colour', choices = NULL, width = input.width), selectizeInput('panel.grid.major.colour', label = 'Colour', choices = NULL, width = input.width), selectizeInput('panel.grid.minor.colour', label = 'Colour', choices = NULL, width = input.width) ) ) ), miniTabPanel("Axis", icon = icon('sliders'), plotOutput("ThePlot", width = '100%', height = '45%'), miniContentPanel(scrollable = TRUE, fillRow(height = heading.height, width = '100%', headingOutput('Axis text'), headingOutput('Axis text.x'), headingOutput('Axis text.y'), headingOutput('Axis line'), headingOutput('Axis ticks') ), fillRow(height = line.height, width = '100%', selectInput('axis.text.family', label = 'Family', choices = text.families, selected = default$axis.text$family, width = input.width), selectInput('axis.text.x.family', label = 'Family', choices = c('None' = 'NULL', text.families), selected = NULL, width = input.width), selectInput('axis.text.y.family', label = 'Family', choices = c('None' = 'NULL', text.families), selected = NULL, width = input.width), selectInput('axis.line.type', label = 'Type', choices = linetypes, selected = default$axis.line$linetype, width = input.width), selectInput('axis.ticks.type', label = 'Type', choices = linetypes, selected = default$axis.ticks$linetype, width = input.width) ), fillRow(height = line.height, width = '100%', selectInput('axis.text.face', label = 'Face', choices = text.faces, width = input.width, selected = default$axis.text$face), selectInput('axis.text.x.face', label = 'Face', choices = c('None' = 'NULL', text.faces), width = input.width, selected = NULL), selectInput('axis.text.y.face', label = 'Face', choices = c('None' = 'NULL', text.faces), width = input.width, selected = NULL), numericInput('axis.line.size', label = 'Size', step = 0.1, value = default$axis.line$size, min = 0,width = input.width), numericInput('axis.ticks.size', label = 'Size', step = 0.1, value = default$axis.ticks$size, min = 0,width = input.width) ), fillRow(height = line.height, width = '100%', numericInput('axis.text.size', label = 'Size', min = 1, max = 30, value = default$axis.text$size, step = 1, width = input.width), numericInput('axis.text.x.size', label = 'Size', min = 1, max = 30, value = NULL, step = 1, width = input.width), numericInput('axis.text.y.size', label = 'Size', min = 1, max = 30, value = NULL, step = 1, width = input.width), selectizeInput('axis.line.colour', label = 'Colour', choices = NULL, width = input.width), selectizeInput('axis.ticks.colour', label = 'Colour', choices = NULL, width = input.width) ), fillRow(height = line.height, width = '100%', selectizeInput('axis.text.colour', label = 'Colour', choices = NULL, width = input.width), selectizeInput('axis.text.x.colour', label = 'Colour', choices = NULL, width = input.width), selectizeInput('axis.text.y.colour', label = 'Colour', choices = NULL, width = input.width), "", "" ), fillRow(height = line.height, width = '100%', numericInput('axis.text.hjust', 'Hjust', value = default$axis.text$hjust, step = 0.25, width = input.width), numericInput('axis.text.hjust.x', 'Hjust', value = NULL, step = 0.25, width = input.width), numericInput('axis.text.hjust.y', 'Hjust', value = NULL, step = 0.25, width = input.width), "", "" ), fillRow(height = line.height, width = '100%', numericInput('axis.text.vjust', 'Vjust', value = default$axis.text$vjust, step = 0.25, width = input.width), numericInput('axis.text.x.vjust', 'Vjust', value = NULL, step = 0.25, width = input.width), numericInput('axis.text.y.vjust', 'Vjust', value = NULL, step = 0.25, width = input.width), "", "" ), fillRow(height = line.height, width = '100%', numericInput('axis.text.angle', label = 'Angle', min = -180, max = 180, value = default$axis.text$angle, step = 5, width = input.width), numericInput('axis.text.x.angle', label = 'Angle', min = -180, max = 180, value = NULL, step = 5, width = input.width), numericInput('axis.text.y.angle', label = 'Angle', min = -180, max = 180, value = NULL, step = 5, width = input.width), "", "" ) )), miniTabPanel("Title, Label & Facet", icon = icon('sliders'), plotOutput("ThePlot4", width = '100%', height = '45%'), miniContentPanel(scrollable = TRUE, fillRow(height = heading.height, width = '100%', headingOutput('Labels'), headingOutput('Plot Title'), headingOutput('Axis Labels'), headingOutput('Facets Background'), headingOutput('Facets Text') ), fillRow(height = line.height, width = '100%', textInput('plot.title', label = 'Title', value = preserveNewlines(gg_original$labels$title), width = input.width), selectInput('plot.title.family', label = 'Family', choices = text.families, selected = default$plot.title$family, width = input.width), selectInput('axis.title.family', label = 'Family', choices = text.families, selected = default$axis.title$family, width = input.width), selectizeInput('strip.background.fill', label = 'Fill', choices = NULL, width = input.width), selectInput('strip.text.family', label = 'Family', choices = text.families, selected = default$strip.text$family, width = input.width) ), fillRow(height = line.height, width = '100%', textInput('axis.title.x', label = 'x-Axis label', value = preserveNewlines(gg_original$labels$x), width = input.width), selectInput('plot.title.face', label = 'Face', choices = text.faces, width = input.width, selected = default$plot.title$face), selectInput('axis.title.face', label = 'Face', choices = text.faces, width = input.width, selected = default$axis.title$face), selectInput('strip.background.linetype', label = 'Type', choices = linetypes, selected = default$strip.background$linetype, width = input.width), selectInput('strip.text.face', label = 'Face', choices = text.faces, width = input.width, selected = default$strip.text$face) ), fillRow(height = line.height, width = '100%', textInput('axis.title.y', label = 'y-Axis label', value = preserveNewlines(gg_original$labels$y), width = input.width), numericInput('plot.title.size', label = 'Size', min = 1, max = 30, value = default$plot.title$size, step = 1, width = input.width), numericInput('axis.title.size', label = 'Size', min = 1, max = 30, value = default$axis.title$size, step = 1, width = input.width), numericInput('strip.background.size', label = 'Size', step = 0.1, value = default$strip.background$size, width = input.width), numericInput('strip.text.size', label = 'Size', min = 1, max = 30, value = default$strip.text$size, step = 1, width = input.width) ), fillRow(height = line.height, width = '100%', textInput('legend.colour.title', label = 'Colour', value = preserveNewlines(gg_original$labels$colour), width = input.width), selectizeInput('plot.title.colour', label = 'Colour', choices = NULL, width = input.width), selectizeInput('axis.title.colour', label = 'Colour', choices = NULL, width = input.width), selectizeInput('strip.background.colour', label = 'Colour', choices = NULL, width = input.width), selectizeInput('strip.text.colour', label = 'Colour', choices = NULL, width = input.width) ), fillRow(height = line.height, width = '100%', textInput('legend.fill.title', label = 'Fill label', value = preserveNewlines(gg_original$labels$fill), width = input.width), numericInput('plot.title.hjust', 'Hjust', value = default$plot.title$hjust, step = 0.25, width = input.width), numericInput('axis.title.hjust', 'Hjust', value = default$axis.title$hjust, step = 0.25, width = input.width), '', numericInput('strip.text.hjust', 'Hjust', value = default$strip.text$hjust, step = 0.25, width = input.width) ), fillRow(height = line.height, width = '100%', textInput('legend.size.title', label = 'Size label', value = preserveNewlines(gg_original$labels$size), width = input.width), numericInput('plot.title.vjust', 'Vjust', value = default$plot.title$vjust, step = 0.25, width = input.width), numericInput('axis.title.vjust', 'Vjust', value = default$axis.title$vjust, step = 0.25, width = input.width), '', numericInput('strip.text.vjust', 'Vjust', value = default$strip.text$vjust, step = 0.25, width = input.width) ), fillRow(height = line.height, width = '100%', textInput('legend.alpha.title', label = 'Alpha label', value = preserveNewlines(gg_original$labels$alpha), width = input.width), numericInput('plot.title.angle', label = 'Angle', min = -180, max = 180, value = default$plot.title$angle, step = 5, width = input.width), numericInput('axis.title.angle', label = 'Angle', min = -180, max = 180, value = default$axis.title$angle, step = 5, width = input.width), '', numericInput('strip.text.angle', label = 'Angle', min = -180, max = 180, value = default$strip.text$angle, step = 5, width = input.width) ), fillRow(height = line.height, width = '33%', textInput('legend.linetype.title', label = 'Linetype label', value = preserveNewlines(gg_original$labels$linetype), width = input.width) ), fillRow(height = line.height, width = '33%', textInput('legend.shape.title', label = 'Shape label', value = preserveNewlines(gg_original$labels$shape), width = input.width) ) ) ), miniTabPanel("Legend", icon = icon('sliders'), plotOutput("ThePlot3", width = '100%', height = '45%', click = 'legend.click'), miniContentPanel(scrollable = TRUE, fillRow(height = heading.height, width = '100%', headingOutput('Legend position'), headingOutput('Legend Title'), headingOutput('Legend Text'), headingOutput("Legend Background"), headingOutput("Legend Keys") ), fillRow(height = line.height, width = '100%', selectInput('legend.position', label = 'Position', choices = legend.positions, selected = default$legend.position, width = input.width), selectInput('legend.title.family', label = 'Family', choices = text.families, selected = default$legend.title$family, width = input.width), selectInput('legend.text.family', label = 'Family', choices = text.families, selected = default$legend.text$family, width = input.width), selectizeInput('legend.background.fill', label = 'Fill', choices = NULL, width = input.width), selectizeInput('legend.key.fill', label = 'Fill', choices = NULL, width = input.width) ), fillRow(height = line.height, width = '100%', selectInput('legend.direction', label = 'Direction', choices = legend.directions, selected = default$legend.direction, width = input.width), selectInput('legend.title.face', label = 'Face', choices = text.faces, selected = default$legend.title$face, width = input.width), selectInput('legend.text.face', label = 'Face', choices = text.faces, selected = default$legend.text$face, width = input.width), selectInput('legend.background.linetype', label = 'Type', choices = linetypes, selected = default$legend.background$linetype, width = input.width), selectInput('legend.key.linetype', label = 'Type', choices = linetypes, selected = default$legend.key$linetype, width = input.width) ), fillRow(height = line.height, width = '100%', conditionalPanel( condition = "input['legend.position'] == 'XY'", numericInput('legend.position.x', label = 'X Coord', min = 0, max = 1, value = default$legend.position.x, step = 0.01, width = input.width) ), numericInput('legend.title.size', label = 'Size', min = 1, max = 30, value = default$legend.title$size, step = 1, width = input.width), numericInput('legend.text.size', label = 'Size', min = 1, max = 30, value = default$legend.text$size, step = 1, width = input.width), numericInput('legend.background.size', label = 'Size', step = 0.1, value = default$legend.background$size, width = input.width), numericInput('legend.key.size', label = 'Size', step = 0.1, value = default$legend.key$size, width = input.width) ), fillRow(height = line.height, width = '100%', conditionalPanel( condition = "input['legend.position'] == 'XY'", numericInput('legend.position.y', label = 'Y Coord', min = 0, max = 1, value = default$legend.position.y, step = 0.01, width = input.width) ), selectizeInput('legend.title.colour', label = 'Colour', choices = NULL, width = input.width), selectizeInput('legend.text.colour', label = 'Colour', choices = NULL, width = input.width), selectizeInput('legend.background.colour', label = 'Colour', choices = NULL, width = input.width), selectizeInput('legend.key.colour', label = 'Colour', choices = NULL, width = input.width) ) ) ), if (SubtitlesSupport) { miniTabPanel("Subtitle and Caption", icon = icon('sliders'), plotOutput("ThePlot6", width = '100%', height = '45%'), miniContentPanel(scrollable = TRUE, fillRow(width = '100%', height = heading.height, headingOutput('Subtitle') ), fillRow(width = '100%', height = line.height, tags$div(style="display:table; width:100%; margin:auto", tags$textarea(id="plot.subtitle.text", label="Subtitle", rows=3, cols=80, gg_original$labels$subtitle, style="width:inherit; font-size:9pt; padding:5px" ) ) ), fillRow(width = '100%', height = line.height, selectInput('plot.subtitle.family', label = 'Family', choices = text.families, selected = default$plot.subtitle$family, width = input.width2), selectInput('plot.subtitle.face', label = 'Face', choices = text.faces, width = input.width2, selected = default$plot.subtitle$face), numericInput('plot.subtitle.size', label = 'Size', min = 1, max = 30, value = default$plot.subtitle$size, step = 1, width = input.width2), selectizeInput('plot.subtitle.colour', label = 'Colour', choices = colours.available, selected = default$plot.subtitle$colour, width = input.width2, options = list(create = TRUE)), numericInput('plot.subtitle.hjust', 'Hjust', value = default$plot.subtitle$hjust, step = 0.25, width = input.width2) ), fillRow(width = '100%', height = heading.height, headingOutput('Caption') ), fillRow(width = '100%', height = line.height, tags$div(style="display:table; width:100%; margin:auto", tags$textarea(id="plot.caption.text", label="Subtitle", rows=3, cols=80, gg_original$labels$caption, style="width:inherit; font-size:9pt; padding:5px" ) ) ), fillRow(width = '100%', height = line.height, selectInput('plot.caption.family', label = 'Family', choices = text.families, selected = default$plot.caption$family, width = input.width2), selectInput('plot.caption.face', label = 'Face', choices = text.faces, width = input.width2, selected = default$plot.caption$face), numericInput('plot.caption.size', label = 'Size', min = 1, max = 30, value = default$plot.caption$size, step = 1, width = input.width2), selectizeInput('plot.caption.colour', label = 'Colour', choices = colours.available, selected = default$plot.caption$colour, width = input.width2, options = list(create = TRUE)), numericInput('plot.caption.hjust', 'Hjust', value = default$plot.caption$hjust, step = 0.25, width = input.width2) ) ) ) } )) server <- function(input, output, session) { colour.choices <- colours2RGB(colours.available) updateSelectizeInput(session = session, inputId = 'plot.background.fill', choices = colour.choices, selected = NA2text(default$plot.background$fill), server = TRUE, options = list(create = TRUE, labelField = 'name', searchField = 'colour', valueField = 'colour', render = jsColourSelector)) updateSelectizeInput(session = session, inputId = 'panel.background.fill', choices = colour.choices, selected = NA2text(default$panel.background$fill), server = TRUE, options = list(create = TRUE, labelField = 'name', searchField = 'colour', valueField = 'colour', render = jsColourSelector)) updateSelectizeInput(session = session, inputId = 'plot.background.colour', choices = colour.choices, selected = NA2text(default$plot.background$colour), server = TRUE, options = list(create = TRUE, labelField = 'name', searchField = 'colour', valueField = 'colour', render = jsColourSelector)) updateSelectizeInput(session = session, inputId = 'panel.background.colour', choices = colour.choices, selected = NA2text(default$panel.background$colour), server = TRUE, options = list(create = TRUE, labelField = 'name', searchField = 'colour', valueField = 'colour', render = jsColourSelector)) updateSelectizeInput(session = session, inputId = 'panel.grid.major.colour', choices = colour.choices, selected = NA2text(default$panel.grid.major$colour), server = TRUE, options = list(create = TRUE, labelField = 'name', searchField = 'colour', valueField = 'colour', render = jsColourSelector)) updateSelectizeInput(session = session, inputId = 'panel.grid.minor.colour', choices = colour.choices, selected = NA2text(default$panel.grid.minor$colour), server = TRUE, options = list(create = TRUE, labelField = 'name', searchField = 'colour', valueField = 'colour', render = jsColourSelector)) updateSelectizeInput(session = session, inputId = 'axis.line.colour', choices = colour.choices, selected = NA2text(default$axis.line$colour), server = TRUE, options = list(create = TRUE, labelField = 'name', searchField = 'colour', valueField = 'colour', render = jsColourSelector)) updateSelectizeInput(session = session, inputId = 'axis.ticks.colour', choices = colour.choices, selected = NA2text(default$axis.ticks$colour), server = TRUE, options = list(create = TRUE, labelField = 'name', searchField = 'colour', valueField = 'colour', render = jsColourSelector)) updateSelectizeInput(session = session, inputId = 'axis.text.colour', choices = colour.choices, selected = NA2text(default$axis.text$colour), server = TRUE, options = list(create = TRUE, labelField = 'name', searchField = 'colour', valueField = 'colour', render = jsColourSelector)) updateSelectizeInput(session = session, inputId = 'axis.text.x.colour', choices = colours2RGB(colours.available, Inherit = TRUE), selected = 'NULL', server = TRUE, options = list(create = TRUE, labelField = 'name', searchField = 'colour', valueField = 'colour', render = jsColourSelector)) updateSelectizeInput(session = session, inputId = 'axis.text.y.colour', choices = colours2RGB(colours.available, Inherit = TRUE), selected = 'NULL', server = TRUE, options = list(create = TRUE, labelField = 'name', searchField = 'colour', valueField = 'colour', render = jsColourSelector)) updateSelectizeInput(session = session, inputId = 'plot.title.colour', choices = colour.choices, selected = NA2text(default$plot.title$colour), server = TRUE, options = list(create = TRUE, labelField = 'name', searchField = 'colour', valueField = 'colour', render = jsColourSelector)) updateSelectizeInput(session = session, inputId = 'axis.title.colour', choices = colour.choices, selected = NA2text(default$axis.title$colour), server = TRUE, options = list(create = TRUE, labelField = 'name', searchField = 'colour', valueField = 'colour', render = jsColourSelector)) updateSelectizeInput(session = session, inputId = 'legend.background.fill', choices = colour.choices, selected = NA2text(default$legend.background$fill), server = TRUE, options = list(create = TRUE, labelField = 'name', searchField = 'colour', valueField = 'colour', render = jsColourSelector)) updateSelectizeInput(session = session, inputId = 'legend.key.fill', choices = colour.choices, selected = NA2text(default$legend.key$fill), server = TRUE, options = list(create = TRUE, labelField = 'name', searchField = 'colour', valueField = 'colour', render = jsColourSelector)) updateSelectizeInput(session = session, inputId = 'legend.title.colour', choices = colour.choices, selected = NA2text(default$legend.title$colour), server = TRUE, options = list(create = TRUE, labelField = 'name', searchField = 'colour', valueField = 'colour', render = jsColourSelector)) updateSelectizeInput(session = session, inputId = 'legend.text.colour', choices = colour.choices, selected = NA2text(default$legend.text$colour), server = TRUE, options = list(create = TRUE, labelField = 'name', searchField = 'colour', valueField = 'colour', render = jsColourSelector)) updateSelectizeInput(session = session, inputId = 'legend.background.colour', choices = colour.choices, selected = NA2text(default$legend.background$colour), server = TRUE, options = list(create = TRUE, labelField = 'name', searchField = 'colour', valueField = 'colour', render = jsColourSelector)) updateSelectizeInput(session = session, inputId = 'legend.key.colour', choices = colour.choices, selected = NA2text(default$legend.key$colour), server = TRUE, options = list(create = TRUE, labelField = 'name', searchField = 'colour', valueField = 'colour', render = jsColourSelector)) updateSelectizeInput(session = session, inputId = 'panel.background.fill', choices = colour.choices, selected = NA2text(default$panel.background$fill), server = TRUE, options = list(create = TRUE, labelField = 'name', searchField = 'colour', valueField = 'colour', render = jsColourSelector)) updateSelectizeInput(session = session, inputId = 'legend.key.colour', choices = colour.choices, selected = NA2text(default$legend.key$colour), server = TRUE, options = list(create = TRUE, labelField = 'name', searchField = 'colour', valueField = 'colour', render = jsColourSelector)) updateSelectizeInput(session = session, inputId = 'strip.background.fill', choices = colour.choices, selected = NA2text(default$strip.background$fill), server = TRUE, options = list(create = TRUE, labelField = 'name', searchField = 'colour', valueField = 'colour', render = jsColourSelector)) updateSelectizeInput(session = session, inputId = 'strip.background.colour', choices = colour.choices, selected = NA2text(default$strip.background$colour), server = TRUE, options = list(create = TRUE, labelField = 'name', searchField = 'colour', valueField = 'colour', render = jsColourSelector)) updateSelectizeInput(session = session, inputId = 'strip.text.colour', choices = colour.choices, selected = NA2text(default$strip.text$colour), server = TRUE, options = list(create = TRUE, labelField = 'name', searchField = 'colour', valueField = 'colour', render = jsColourSelector)) if (SubtitlesSupport) { updateSelectizeInput(session = session, inputId = 'plot.subtitle.colour', choices = colour.choices, selected = NA2text(default$plot.subtitle$colour), server = TRUE, options = list(create = TRUE, labelField = 'name', searchField = 'colour', valueField = 'colour', render = jsColourSelector)) updateSelectizeInput(session = session, inputId = 'plot.caption.colour', choices = colour.choices, selected = NA2text(default$plot.caption$colour), server = TRUE, options = list(create = TRUE, labelField = 'name', searchField = 'colour', valueField = 'colour', render = jsColourSelector)) } gg_reactive <- reactive({ validate( need(is.validColour(input$plot.background.fill), ''), need(is.validColour(input$panel.background.fill), ''), need(is.validColour(input$plot.background.colour), ''), need(is.validColour(input$panel.background.colour), ''), need(is.validColour(input$panel.grid.major.colour), ''), need(is.validColour(input$panel.grid.minor.colour), ''), need(is.validColour(input$axis.line.colour), ''), need(is.validColour(input$axis.ticks.colour), ''), need(is.validColour(input$axis.text.colour), ''), need(is.validColour(input$axis.text.x.colour), ''), need(is.validColour(input$axis.text.y.colour), ''), need(is.validColour(input$plot.title.colour), ''), need(is.validColour(input$axis.title.colour), ''), need(is.validColour(input$legend.background.fill), ''), need(is.validColour(input$legend.key.fill), ''), need(is.validColour(input$legend.title.colour), ''), need(is.validColour(input$legend.text.colour), ''), need(is.validColour(input$legend.background.colour), ''), need(is.validColour(input$legend.key.colour), ''), need(is.validColour(input$strip.background.fill), ''), need(is.validColour(input$strip.background.colour), ''), need(is.validColour(input$strip.text.colour), '') ) if (SubtitlesSupport) { validate( need(is.validColour(input$plot.subtitle.colour), ''), need(is.validColour(input$plot.caption.colour), '') ) } gg <- gg_original + labs( title = checkInputText(input$plot.title), x = checkInputText(input$axis.title.x), y = checkInputText(input$axis.title.y), fill = checkInputText(input$legend.fill.title), linetype = checkInputText(input$legend.linetype.title), alpha = checkInputText(input$legend.alpha.title), size = checkInputText(input$legend.size.title), shape = checkInputText(input$legend.shape.title), colour = checkInputText(input$legend.colour.title) ) + theme( axis.text = element_text( size = input$axis.text.size, colour = input$axis.text.colour, face = input$axis.text.face, family = input$axis.text.family, angle = input$axis.text.angle, hjust = input$axis.text.hjust, vjust = input$axis.text.vjust, lineheight = input$axis.text.lineheight), axis.text.x = element_text( size = setNull(input$axis.text.x.size), colour = setNull(input$axis.text.x.colour), family = setNull(input$axis.text.x.family), angle = setNull(input$axis.text.x.angle), hjust = setNull(input$axis.text.x.hjust), vjust = setNull(input$axis.text.x.vjust) ), axis.text.y = element_text( size = setNull(input$axis.text.y.size), colour = setNull(input$axis.text.y.colour), family = setNull(input$axis.text.y.family), angle = setNull(input$axis.text.y.angle), hjust = setNull(input$axis.text.y.hjust), vjust = setNull(input$axis.text.y.vjust) ), axis.line = element_line( linetype = input$axis.line.type, colour = input$axis.line.colour, size = input$axis.line.size), axis.ticks = element_line( linetype = input$axis.ticks.type, colour = input$axis.ticks.colour, size = input$axis.ticks.size), axis.title = element_text( size = input$axis.title.size, colour = input$axis.title.colour, face = input$axis.title.face, family = input$axis.title.family, angle = input$axis.title.angle, hjust = input$axis.title.hjust, vjust = input$axis.title.vjust, lineheight = input$axis.title.lineheight), plot.title = element_text( size = input$plot.title.size, colour = input$plot.title.colour, face = input$plot.title.face, family = input$plot.title.family, angle = input$plot.title.angle, hjust = input$plot.title.hjust, vjust = input$plot.title.vjust, lineheight = input$plot.title.lineheight), plot.background = element_rect( fill = input$plot.background.fill, colour = input$plot.background.colour, size = input$plot.background.size, linetype = input$plot.background.linetype ), panel.background = element_rect( fill = input$panel.background.fill, colour = input$panel.background.colour, size = input$panel.background.size, linetype = input$panel.background.linetype ), strip.background = element_rect( fill = input$strip.background.fill, colour = input$strip.background.colour, size = input$strip.background.size, linetype = input$strip.background.linetype ), strip.text = element_text( size = input$strip.text.size, colour = input$strip.text.colour, face = input$strip.text.face, family = input$strip.text.family, angle = input$strip.text.angle, hjust = input$strip.text.hjust, vjust = input$strip.text.vjust), panel.grid.major = element_line( linetype = input$panel.grid.major.type, colour = input$panel.grid.major.colour, size = input$panel.grid.major.size), panel.grid.minor = element_line( linetype = input$panel.grid.minor.type, colour = input$panel.grid.minor.colour, size = input$panel.grid.minor.size), legend.text = element_text( size = input$legend.text.size, face = input$legend.text.face, colour = input$legend.text.colour, family = input$legend.text.family ), legend.title = element_text( size = input$legend.title.size, face = input$legend.title.face, colour = input$legend.title.colour, family = input$legend.title.family ), legend.background = element_rect( fill = input$legend.background.fill, colour = input$legend.background.colour, size = input$legend.background.size, linetype = input$legend.background.linetype ), legend.key = element_rect( fill = input$legend.key.fill, colour = input$legend.key.colour, size = input$legend.key.size, linetype = input$legend.key.linetype ), legend.position = (if (input$legend.position == 'XY') { c(input$legend.position.x, input$legend.position.y) } else { input$legend.position }), legend.direction = input$legend.direction ) if (SubtitlesSupport) { gg <- gg + labs( subtitle = if (input$plot.subtitle.text == '') {NULL} else {input$plot.subtitle.text}, caption = if (input$plot.caption.text == '') {NULL} else {input$plot.caption.text} ) + theme( plot.subtitle = element_text( size = input$plot.subtitle.size, colour = input$plot.subtitle.colour, face = input$plot.subtitle.face, family = input$plot.subtitle.family, hjust = input$plot.subtitle.hjust, lineheight = input$plot.subtitle.lineheight), plot.caption = element_text( size = input$plot.caption.size, colour = input$plot.caption.colour, face = input$plot.caption.face, family = input$plot.caption.family, hjust = input$plot.caption.hjust, lineheight = input$plot.caption.lineheight) ) } return(gg) }) observeEvent(input$legend.click, { x.click <- input$legend.click$x / (input$legend.click$domain$right - input$legend.click$domain$left) y.click <- input$legend.click$y / (input$legend.click$domain$top - input$legend.click$domain$bottom) if (hasLegend(gg_original)) { updateSelectInput(session, 'legend.position', selected = 'XY') updateSelectInput(session, 'legend.position.x', selected = round(x.click, 4)) updateSelectInput(session, 'legend.position.y', selected = round(y.click, 4)) } }) ThePlot <- renderPlot(width = function() { validate( need(is.numeric(input$plot.width), ''), need(is.numeric(input$plot.height), ''), need(!is.null(input$ViewerWidth), ''), need(is.validColour(input$legend.key.colour), '') ) min(input$plot.width / input$plot.height * input$ViewerWidth * 45 / 100, input$ViewerWidth ) }, { gg_reactive() }) output$ThePlot <- ThePlot output$ThePlot2 <- ThePlot output$ThePlot3 <- ThePlot output$ThePlot4 <- ThePlot output$ThePlot5 <- ThePlot output$ThePlot6 <- ThePlot observeEvent(input$done, { themeResult <- sapply(AvailableElements, compileResults, new = gg_reactive(), original = gg_original, std = default, USE.NAMES = FALSE) themeResult <- themeResult[!is.na(themeResult)] labelResult <- construcThemeString('labs', original = gg_original, new = gg_reactive(), std = default, category = 'labels') if((!is.null(themeResult) & length(themeResult) > 0) | !is.null(labelResult)) { if (!is.null(input$multiline)) { if (input$multiline) { oneline <- FALSE } else { oneline <- TRUE } } else { oneline <- TRUE } result <- formatResult(text = text, themestring = themeResult, labelstring = labelResult, oneline = oneline, formatR = input$formatR) rstudioapi::insertText(result) } invisible(stopApp()) }) observeEvent(input$cancel, { invisible(stopApp()) }) } viewer <- dialogViewer(dialogName = 'ggThemeAssist', width = 990, height = 900) runGadget(ui, server, stopOnCancel = FALSE, viewer = viewer) } ggThemeAssistGadget <- function(plot) { if (missing(plot)) { stop('You must provide a ggplot2 plot object.', call. = FALSE) } plot <- deparse(substitute(plot)) if (grepl('^\\s*[[:alpha:]]+[[:alnum:]\\.]*\\s*$', paste0(plot, collapse = ''))) { ggThemeAssist(plot) } else { stop('You must provide a ggplot2 plot object.', call. = FALSE) } } ggThemeAssistAddin <- function() { context <- rstudioapi::getActiveDocumentContext() text <- context$selection[[1]]$text if (nchar(text) == 0) { stop('Please highlight a ggplot2 plot before selecting this addin.') } ggThemeAssist(text) }
sgo_etrs_laea <- function(x) UseMethod("sgo_etrs_laea") sgo_etrs_laea.sgo_points <- function(x) { if (!x$epsg %in% c(4258, 4937, 4936)) stop("This routine only supports ETRS89 coordinates.") if (x$epsg == 4936) x <- sgo_cart_lonlat(x) x.3d <- x$dimension == "XYZ" if (x.3d) { core.cols <- .sgo_points.3d.core } else { core.cols <- .sgo_points.2d.core } additional.elements <- !names(x) %in% core.cols num.elements <- sum(additional.elements, na.rm=TRUE) ellipsoid <- lonlat.datum[lonlat.datum$datum==x$datum, "ellipsoid"] params <- lonlat.ellipsoid[lonlat.ellipsoid$ellipsoid==ellipsoid, c("a","e2")] a <- params$a e2 <- params$e2 e <- sqrt(e2) FE <- (4321000); FN <- (3210000) phi0 <- 52 / RAD.TO.GRAD lambda0 <- 10 / RAD.TO.GRAD phi <- x$y / RAD.TO.GRAD lambda <- x$x / RAD.TO.GRAD lambda.delta <- lambda - lambda0 cos.lambda.delta <- cos(lambda.delta) sin.phi <- sin(phi) sin2.phi <- sin.phi * sin.phi sin.phi0 <- sin(phi0) sin2.phi0 <- sin.phi0 * sin.phi0 splat0 <- 1 - e2 * sin2.phi0 q.phi <- (1 - e2) * (sin.phi / (1 - e2 * sin2.phi) - 1/(2 * e) * log((1 - e * sin.phi) / (1 + e * sin.phi))) q.phi0 <- (1 - e2) * (sin.phi0 / splat0 - 1/(2 * e) * log((1 - e * sin.phi0) / (1 + e * sin.phi0))) q.phi.p <- (1 - e2) * (1 / (1-e2) - 1/(2 * e) * log((1 - e ) / (1 + e))) beta <- asin(q.phi / q.phi.p) beta0 <- asin(q.phi0 / q.phi.p) cos.beta <- cos(beta) cos.beta0 <- cos(beta0) sin.beta <- sin(beta) sin.beta0 <- sin(beta0) Rq <- a * sqrt(q.phi.p / 2) D <- a * (cos(phi0) / sqrt(splat0)) / (Rq * cos.beta0) B <- Rq * sqrt(2 / (1 + sin.beta0 * sin.beta + (cos.beta0 * cos.beta * cos.lambda.delta))) E <- FE + B * D * cos.beta * sin(lambda.delta) N <- FN + (B / D) * (cos.beta0 * sin.beta - sin.beta0 * cos.beta * cos.lambda.delta) en <- list(x=E, y=N) if (num.elements > 0) en <- c(en, x[additional.elements]) structure(c(en, epsg = 3035, datum = .epsgs[.epsgs$epsg == 3035, "datum"], dimension = "XY"), class = "sgo_points") } sgo_laea_etrs <- function(x) UseMethod("sgo_laea_etrs") sgo_laea_etrs.sgo_points <- function(x) { if (x$epsg != 3035) stop("This routine only supports coordinates in EPSG:3035.") core.cols <- .sgo_points.2d.core additional.elements <- !names(x) %in% core.cols num.elements <- sum(additional.elements, na.rm=TRUE) ellipsoid <- lonlat.datum[lonlat.datum$datum==x$datum, "ellipsoid"] params <- lonlat.ellipsoid[lonlat.ellipsoid$ellipsoid==ellipsoid, c("a","e2")] a <- params$a e2 <- params$e2 e <- sqrt(e2) e4 <- e2 * e2 e6 <- e4 * e2 FE <- (4321000); FN <- (3210000) phi0 <- 52 / RAD.TO.GRAD lambda0 <- 10 / RAD.TO.GRAD E <- x$x N <- x$y E.delta <- E - FE N.delta <- N - FN sin.phi0 <- sin(phi0) sin2.phi0 <- sin.phi0 * sin.phi0 splat0 <- 1 - e2 * sin2.phi0 q.phi0 <- (1 - e2) * (sin.phi0 / splat0 - 1/(2 * e) * log((1 - e * sin.phi0) / (1 + e * sin.phi0))) q.phi.p <- (1 - e2) * (1 / (1-e2) - 1/(2 * e) * log((1 - e ) / (1 + e))) beta0 <- asin(q.phi0 / q.phi.p) cos.beta0 <- cos(beta0) sin.beta0 <- sin(beta0) Rq <- a * sqrt(q.phi.p / 2) D <- a * (cos(phi0) / sqrt(splat0)) / (Rq * cos.beta0) Dtimes.N.delta <- D * N.delta E.delta.divD <- E.delta / D rho <- sqrt(E.delta.divD * E.delta.divD + Dtimes.N.delta * Dtimes.N.delta) C <- 2 * asin(rho / (2 * Rq)) sin.C <- sin(C) cos.C <- cos(C) beta.prime <- asin(cos.C * sin.beta0 + ((Dtimes.N.delta * sin.C * cos.beta0) / rho)) lambda <- lambda0 + atan2(E.delta * sin.C, (D * rho * cos.beta0 * cos.C - D * Dtimes.N.delta * sin.beta0 * sin.C)) phi <- beta.prime + (e2 / 3 + 31 * e4 / 180 + 517 * e6 / 5040) * sin(2 * beta.prime) + (23 * e4 / 360 + 251 * e6 / 3780) * sin(4 * beta.prime) + (761 * e6 / 45360) * sin(6 * beta.prime) xy <- list(x=lambda * RAD.TO.GRAD, y=phi * RAD.TO.GRAD) if (num.elements > 0) xy <- c(xy, x[additional.elements]) structure(c(xy, epsg = 4258, datum = .epsgs[.epsgs$epsg == 4258, "datum"], dimension = "XY"), class = "sgo_points") }
test_that("can register for generics that don't exist", { withr::with_envvar(c(NOT_CRAN = ""), { expect_silent( s3_register("testthat::foobarbaz", "class", method = function(...) NULL) ) }) skip_if_not_installed("testthat", "3.0.4.9000") withr::with_envvar(c(NOT_CRAN = "true"), { expect_snapshot({ (expect_warning(s3_register("testthat::foobarbaz", "class", method = function(...) NULL))) }) }) })
context("canvasXpress Charts - Segregated Boxplot Tooltips") test_that("segregation boxplot tooltips", { tryCatch({ y = read.table("https://www.canvasxpress.org/data/cX-toothgrowth-dat.txt", header = TRUE, sep = "\t", quote = "", row.names = 1, fill = TRUE, check.names = FALSE, stringsAsFactors = FALSE) x = read.table("https://www.canvasxpress.org/data/cX-toothgrowth-smp.txt", header = TRUE, sep = "\t", quote = "", row.names = 1, fill = TRUE, check.names = FALSE, stringsAsFactors = FALSE) }, error = function(e) { skip('Unable to read data files') }) x$dose[x$supp == "VC"] <- rep(c(1,2,3),10) x$dose[x$supp == "OJ"] <- rep(c(4,5),15) x$order[x$supp == "VC"] <- "G1" x$order[x$supp == "OJ"] <- "G2" result <- canvasXpress(data = y, smpAnnot = x, graphType = "Boxplot", graphOrientation = "vertical", colorBy = "supp", groupingFactors = list("dose", "supp"), segregateSamplesBy = list("supp"), smpOverlays = list("dose"), showBoxplotOriginalData = TRUE, showLegend = TRUE, smpLabelRotate = 90, stringSampleFactors = list("dose", "order"), title = "Boxplot Overlay Tooltips") check_ui_test(result) })
bcgam <- function(formula, family=gaussian(), data=NULL, nloop=10000, burnin=trunc(nloop/10)) { cl<-match.call() if (is.character(family)) family <- get(family, mode = "function", envir = parent.frame()) if (is.function(family)) family <- family() if (is.null(family$family)) stop("'family' not recognized!") mf<-match.call(expand.dots=FALSE) m <- match(c("formula", "data"), names(mf), 0L) mf <- mf[c(1L, m)] mf[[1L]] <- as.name("model.frame") mf <- eval(mf, parent.frame()) ynm <- names(mf)[1] mt <- attr(mf, "terms") y <- model.response(mf, "any") n <- length(y) if (family$family == "binomial") { if (class(y) == "factor") { y = ifelse(y == levels(y)[1], 0, 1) } } shapes1 <- NULL xmat <- NULL xmatnms <- NULL nums <- NULL ks <- list() sps <- NULL xid <- 1 for (i in 2:ncol(mf)) { if (is.numeric(attributes(mf[, i])$shape)) { shapes1 <- c(shapes1, attributes(mf[, i])$shape) xmat <- cbind(xmat, mf[, i]) xmatnms <- c(xmatnms, attributes(mf[, i])$nm) nums <- c(nums, attributes(mf[, i])$numknots) sps <- c(sps, attributes(mf[, i])$space) ks[[xid]] <- attributes(mf[, i])$knots xid <- xid + 1 } } if(is.null(xmatnms)){stop("No variables to be modeled nonparametrically!")} znms <- NULL zid <- NULL is_fac<-NULL vals<-NULL dist<-0 zid1<-NULL zid2<-NULL zmat<-NULL ind.nonpara<-NULL zmat<-NULL for (i in 2:ncol(mf)) { if (is.numeric(attributes(mf[, i])$shape)) { ind.nonpara<-c(ind.nonpara,i) } } ind.nonparaplusinter <- c(1,ind.nonpara) zmat.matrix<-model.matrix(mt,mf, contrasts) zmat<-as.matrix(zmat.matrix[,-ind.nonparaplusinter]) znms<-colnames(zmat.matrix)[-ind.nonparaplusinter] getdefault.values<-function(x.var){ ans<-NULL if(is.factor(x.var)){ans <- x.var[which.max(table(x.var))]} else{ sort.x <- sort(x.var) pos.value <- ceiling(length(sort.x)/2) ans <- sort.x[pos.value]} ans } default.plotvalues<- data.frame(lapply(mf[,-1],getdefault.values)) colnames(default.plotvalues)[1:length(xmatnms)] <- xmatnms if (family$family == "binomial" | family$family == "poisson") { wt.iter = TRUE } else { wt.iter = FALSE } if (is.null(shapes1)) { nloop <- 0 } xmat0 <- xmat shapes0 <- shapes1 nums0 <- nums ks0 <- ks sps0 <- sps xmatnms0 <- xmatnms nloop <- nloop burnin <- burnin ans <- bcgam.fit(y, xmat0, shapes0, zmat, nums0, ks0, sps0, nloop,burnin, family, xmatnms0, znms ) class(ans) <- "bcgam" ans$xmat <- xmat0 ans$xmatnms <- xmatnms0 ans$nloop <- nloop ans$burnin <- burnin ans$shapes <- shapes0 ans$y <- y ans$default.plotvalues=default.plotvalues ans$data <- mf ans$family <- family$family ans$ind_nonparam <- ind.nonpara ans$ks <- ks0 ans$sps <- sps0 ans$levels <- .getXlevels(mt, mf) ans$call <- cl ans$terms <- mt ans }
owd <- function(dir = '.') { if (length(dir) != 1L) stop('Only one directory path may be specified.') if (!dir.exists(dir)) stop('Directory not found.') if (file.access(dir, 4L) != 0L) stop('You do not have permission to access this directory.') dir <- normalizePath(dir) if (.Platform[['OS.type']] == 'unix') { command <- if (Sys.info()[['sysname']] == 'Darwin') 'open' else 'xdg-open' return(system2(command, dir, stdout = FALSE, stderr = FALSE, wait = FALSE)) } else if (Sys.info()[['sysname']] == 'Windows') { return(shell.exec(dir)) } else { stop('Operating system not supported.') } }
SNPs_scatter_plot = function(A, betas.Gy, num_X){ plotList = list() label.plot = colnames(A) if(nrow(A) == length(betas.Gy)){ dta.plot = as.data.frame(cbind(A, betas.Gy)) for(i in 1:num_X){ plotList[[i]] = ggplot(dta.plot, aes(x = dta.plot[, i], y = betas.Gy)) + geom_point() + geom_hline(yintercept = 0, color = 'red') + xlab(expression(hat(alpha))) + ylab(expression(hat(beta))) + theme_bw() } p = ggarrange(plotlist=plotList, ncol=2, nrow=ceiling(num_X/2), common.legend = T, legend="bottom", labels = label.plot, font.label = list(size = 12)) return(p) }else{ stop("The number of SNPs in A matrix is not the same as the number of SNPs in betas.Gy vector.") } }
anm.LVe.tck<-function(){ local({ have_ttk <- as.character(tcl("info", "tclversion")) >= "8.5" if(have_ttk) { tkbutton <- ttkbutton tkcheckbutton <- ttkcheckbutton tkentry <- ttkentry tkframe <- ttkframe tklabel <- ttklabel tkradiobutton <- ttkradiobutton } tclServiceMode(FALSE) dialog.sd <- function(){ tt <- tktoplevel() tkwm.title(tt,"Lotka Volterra exploitation") nh.entry <- tkentry(tt, textvariable=Nh, width = 10) np.entry <- tkentry(tt, textvariable=Np, width = 10) rh.entry <- tkentry(tt, textvariable=Rh, width= 10) con.entry <- tkentry(tt, textvariable=Con, width = 10 ) p.entry <- tkentry(tt, textvariable=P, width = 10) dp.entry<-tkentry(tt, textvariable=Dp, width = 10) int.entry<-tkentry(tt, textvariable=Int, width = 10) Ts.entry<-tkentry(tt, textvariable=Ts, width = 10) done <- tclVar(0) show.circle<-tclVar(0) reset <- function() { tclvalue(Nh)<-"300" tclvalue(Np)<-"50" tclvalue(Rh)<-"0.7" tclvalue(Con)<-"0.4" tclvalue(P)<-"0.006" tclvalue(Dp)<-"0.2" tclvalue(Int)<-"0.1" tclvalue(Ts)<-"seq(0,200)" tclvalue(show.circle)<-"0" } reset.but <- tkbutton(tt, text="Reset", command=reset) submit.but <- tkbutton(tt, text="Submit",command=function()tclvalue(done)<-1) c.cbut <- tkcheckbutton(tt, text="Circle", variable=show.circle) build <- function() { nh <-tclvalue(Nh) np <-tclvalue(Np) rh <-tclvalue(Rh) con <-tclvalue(Con) p <-tclvalue(P) d.p <-tclvalue(Dp) interval <-tclvalue(Int) time<-parse(text=tclvalue(Ts))[[1]] circle<-as.logical(tclObj(show.circle)) substitute(anm.LVexp(nh=as.numeric(nh),np=as.numeric(np),rh=as.numeric(rh),con=as.numeric(con),p=as.numeric(p),d.p=as.numeric(d.p),interval=as.numeric(interval),time=time,circle=circle)) } tkgrid(tklabel(tt,text="Lotka Volterra exploitation"),columnspan=4) tkgrid(tklabel(tt,text="")) tkgrid(tklabel(tt,text=""), tklabel(tt,text="Prey"),tklabel(tt,text=""),tklabel(tt,text="Predator")) tkgrid(tklabel(tt,text=" n.h", width = 5), nh.entry,tklabel(tt,text="n.p"), np.entry) tkgrid(tklabel(tt,text=" r.h", width = 5), rh.entry,tklabel(tt,text="c"), con.entry) tkgrid(tklabel(tt,text=" p", width = 5), p.entry,tklabel(tt,text="d.p"), dp.entry) tkgrid(tklabel(tt,text="")) tkgrid(tklabel(tt,text=""),tklabel(tt,text="Time seq."), Ts.entry) tkgrid(tklabel(tt,text=""),tklabel(tt,text="Anim. int."),int.entry) tkgrid(tklabel(tt,text="")) tkgrid(c.cbut) tkgrid(tklabel(tt,text="")) tkgrid(submit.but, tklabel(tt,text=""),tklabel(tt,text=""),reset.but, sticky="e") tkbind(tt, "<Destroy>", function()tclvalue(done)<-2) tkwait.variable(done) if(tclvalue(done)=="2") stop("aborted") tkdestroy(tt) cmd <- build() eval.parent(cmd) invisible(tclServiceMode(TRUE)) } Nh<-tclVar("300") Np<-tclVar("50") Rh<-tclVar("0.7") Con<-tclVar("0.4") P<-tclVar("0.006") Dp<-tclVar("0.2") Int<-tclVar("0.1") Ts<-tclVar("seq(0,200)") dialog.sd() }) }
`coef.rda` <- function (object, norm = FALSE, ...) { if(is.null(object$CCA)) stop("unconstrained models do not have coefficients") Q <- object$CCA$QR u <- object$CCA$u if (nrow(Q$qr) < nrow(u) && inherits(object$na.action, "exclude")) u <- u[-object$na.action,, drop=FALSE] b <- qr.coef(Q, u) if (norm) b <- sqrt(colSums(qr.X(Q)^2)) * b b }
library(patternplot) library(jpeg) library(ggplot2) Orange<-readJPEG(system.file("img", "oranges.jpg", package="patternplot")) Strawberry <-readJPEG(system.file("img", "strawberries.jpg", package="patternplot")) Watermelon<-readJPEG(system.file("img", "watermelons.jpg", package="patternplot")) data <- read.csv(system.file("extdata", "fruits.csv", package="patternplot")) x<-data$Fruit y<-data$Weight group<-data$Store pattern.type<-list(Orange, Strawberry, Watermelon) imageboxplot(data,x,y,group=NULL,pattern.type=pattern.type, frame.color=c('orange', 'darkred', 'darkgreen'), legend.label="", ylab='Weight, Pounds')
copy_labels <- function(from, to, .strict = TRUE) { UseMethod("copy_labels") } copy_labels.default <- function(from, to, .strict = TRUE) { if (!is.atomic(from)) stop("`from` should be a vector or a data.frame", call. = FALSE, domain = "R-labelled") if (!is.atomic(to)) stop("`to` should be a vector", call. = FALSE, domain = "R-labelled") var_label(to) <- var_label(from) to } copy_labels.haven_labelled <- function(from, to, .strict = TRUE) { if (mode(from) != mode(to) & .strict) stop("`from` and `to` should be of same type", call. = FALSE, domain = "R-labelled") var_label(to) <- var_label(from) if (mode(from) == mode(to)) { val_labels(to) <- val_labels(from) na_range(to) <- na_range(from) na_values(to) <- na_values(from) } to } copy_labels.data.frame <- function(from, to, .strict = TRUE) { if (!is.data.frame(to)) stop("`to` should be a data frame", call. = FALSE, domain = "R-labelled") for (var in names(to)) if (var %in% names(from)) to[[var]] <- copy_labels(from[[var]], to[[var]], .strict = .strict) to } copy_labels_from <- function(to, from, .strict = TRUE) { copy_labels(from, to, .strict = .strict) }
assign("idwST", function(formula, data, newdata, n.neigh, C, factor.p, progress=TRUE){ s = cbind(coordinates(data),data["t"]@data) s0 = cbind(coordinates(newdata),newdata["t"]@data) z = extractFormula(formula, data, newdata)$z S <- scale(s) dist.newdata <- rdist(S, cbind(standardize(s0[,1],mean(s[,1]),sd(s[,1])),standardize(s0[,2],mean(s[,2]),sd(s[,2])), C*standardize(s0[,3],mean(s[,3]),sd(s[,3])))) Pred <- as.numeric(NA,length= nrow(coordinates(newdata))) idw0 <- function(z, dist.newdata, n.neigh, factor.p){ vec.orden <- order(dist.newdata) vc <- vec.orden[1:n.neigh] dist.vec.cerca <- dist.newdata[vc] Lambda <- dist.vec.cerca^(-factor.p)/sum(dist.vec.cerca^(-factor.p)) pred <- t(Lambda)%*%z[vc] pred } if(progress) pb <- txtProgressBar(min = 0, max = nrow(coordinates(newdata)), char = "=", style = 3) for (i in 1:nrow(coordinates(newdata))){ Pred[i] <- idw0(z, dist.newdata=dist.newdata[,i], n.neigh, factor.p=factor.p) if(progress) setTxtProgressBar(pb, i) } if(progress) close(pb) idw.pred <- data.frame(s0,Pred,NA) names(idw.pred) <- c("x","y","t","var1.pred","var1.var") idw.pred } )
context(paste("Symbolic differentiation rules v", packageVersion("Deriv"), sep="")) lc_orig=Sys.getlocale(category = "LC_COLLATE") Sys.setlocale(category = "LC_COLLATE", locale = "C") num_test_deriv <- function(fun, larg, narg, h=1.e-5, tolerance=2000*h^2) { if (length(names(larg)) == 0) stop(sprintf("No argument for function %s() to differentiate. There must be at leat one argument.", fun)) if (h <= 0) stop("Parameter h must be positive") larg_ph=larg_mh=larg larg_ph[[narg]]=larg_ph[[narg]]+h larg_mh[[narg]]=larg_mh[[narg]]-h f_ph=do.call(fun, larg_ph) f_mh=do.call(fun, larg_mh) dnum=(f_ph-f_mh)/(2*h) sym_larg=larg sym_larg[[narg]]=as.symbol(narg) flang=as.symbol(fun) dsym=try(do.call(as.function(c(sym_larg, list(Deriv(as.call(c(flang, sym_larg)), narg)))), larg, quote=TRUE)) if (inherits(dsym, "try-error")) { stop(sprintf("failed to calculate symbolic derivative of '%s'", format1(as.call(c(flang, sym_larg))))) } expect_equal(as.vector(dnum), as.vector(dsym), tolerance=tolerance, info=sprintf("%s by %s", format1(as.call(c(flang, larg))), narg)) } f=function(x) {} expect_equal_deriv <- function(t, r, nmvar="x") { test=substitute(t) ref=substitute(r) ans=Deriv(test, nmvar, cache.exp=FALSE) eval(bquote(expect_equal(format1(quote(.(ans))), format1(quote(.(ref)))))) ans=Deriv(format1(test), nmvar, cache.exp=FALSE) eval(bquote(expect_equal(.(ans), format1(quote(.(ref)))))) ans=Deriv(call("~", test), nmvar, cache.exp=FALSE) eval(bquote(expect_equal(format1(quote(.(ans))), format1(quote(.(ref)))))) ans=Deriv(as.expression(test), nmvar, cache.exp=FALSE) eval(bquote(expect_equal(format1(.(ans)), format1(expression(.(ref)))))) body(f)=test ans=Deriv(f, nmvar, cache.exp=FALSE) body(f)=ref eval(bquote(expect_equal(quote(.(ans)), quote(.(f)), check.environment=FALSE))) x=seq(0.1, 1, len=10) h=1.e-7 suppressWarnings(f1 <- try(sapply(x-h, function(val) eval(test, list(x=val))), silent=TRUE)) suppressWarnings(f2 <- try(sapply(x+h, function(val) eval(test, list(x=val))), silent=TRUE)) if (!inherits(f1, "try-error") && !inherits(f2, "try-error")) { numder=(f2-f1)/h/2 refder=sapply(x, function(val) eval(ref, list(x=val))) i=is.finite(refder) & is.finite(numder) expect_gt(sum(i), 0, label=sprintf("length of central diff for %s", format1(test))) expect_equal(numder[i], refder[i], tolerance=5.e-8, label=sprintf("Central diff. of '%s'", format1(test)), expected.label=sprintf("'%s'", format1(ref))) } } expect_equal_format1 <- function(t, r) { eval(bquote(expect_equal(format1(.(t)), format1(.(r))))) } test_that("elementary functions", { expect_equal(Deriv("x", "x"), "1") expect_equal(Deriv(quote(x), "x"), 1) expect_equal(Deriv(quote((x)), "x"), 1) expect_equal_deriv(x**2, 2*x) expect_equal_deriv(x**n, n*x^(n-1)) expect_equal_deriv(2**x, 0.693147180559945 * 2^x) expect_equal_deriv(sin(x), cos(x)) expect_equal_deriv(cos(x), -sin(x)) expect_equal_deriv(tan(x), 1/cos(x)^2) expect_equal_deriv(asin(x), 1/sqrt(1 - x^2)) expect_equal_deriv(acos(x), -(1/sqrt(1 - x^2))) expect_equal_deriv(atan(x), 1/(1+x^2)) expect_equal_deriv(atan2(x, y), y/(x^2+y^2)) expect_equal_deriv(atan2(0.5, x), -(0.5/(0.25 + x^2))) expect_equal_deriv(exp(x), exp(x)) expect_equal_deriv(expm1(x), exp(x)) expect_equal_deriv(log(x), 1/x) expect_equal_deriv(log1p(x), 1/(1+x)) expect_equal_deriv(abs(x), sign(x)) expect_equal_deriv(sign(x), 0) expect_equal_deriv(sinh(x), cosh(x)) expect_equal_deriv(cosh(x), sinh(x)) expect_equal_deriv(tanh(x), 1-tanh(x)^2) }) if (getRversion() >= "3.1.0") { test_that("trigonometric functions with pi", { expect_equal_deriv(sinpi(x), pi*cospi(x)) expect_equal_deriv(cospi(x), -(pi*sinpi(x))) expect_equal_deriv(tanpi(x), pi/cospi(x)**2) }) } test_that("special functions", { expect_equal_deriv(beta(x, y), beta(x, y) * (digamma(x) - digamma(x + y))) expect_equal_deriv(beta(x, y), beta(x, y) * (digamma(y) - digamma(x + y)), "y") expect_equal_deriv(besselI(x, 0), besselI(x, 1)) expect_equal_deriv(besselI(x, 0, FALSE), besselI(x, 1)) expect_equal_deriv(besselI(x, 0, TRUE), besselI(x, 1, TRUE)-besselI(x, 0, TRUE)) expect_equal_deriv(besselI(x, 1), 0.5 * (besselI(x, 0) + besselI(x, 2))) expect_equal_deriv(besselI(x, 1, FALSE), 0.5 * (besselI(x, 0) + besselI(x, 2))) expect_equal_deriv(besselI(x, 1, TRUE), 0.5 * (besselI(x, 0, TRUE) + besselI(x, 2, TRUE))-besselI(x, 1, TRUE)) expect_equal_deriv(besselI(x, n), if (n == 0) besselI(x, 1) else 0.5 * (besselI(x, 1 + n) + besselI(x, n - 1))) expect_equal_deriv(besselI(x, n, TRUE), (if (n == 0) besselI(x, 1, TRUE) else 0.5 * (besselI(x, 1 + n, TRUE) + besselI(x, n - 1, TRUE)))-besselI(x, n, TRUE)) expect_equal_deriv(besselK(x, 0), -besselK(x, 1)) expect_equal_deriv(besselK(x, 0, FALSE), -besselK(x, 1)) expect_equal_deriv(besselK(x, 0, TRUE), besselK(x, 0, TRUE)-besselK(x, 1, TRUE)) expect_equal_deriv(besselK(x, 1), -(0.5 * (besselK(x, 0) + besselK(x, 2)))) expect_equal_deriv(besselK(x, 1, FALSE), -(0.5 * (besselK(x, 0) + besselK(x, 2)))) expect_equal_deriv(besselK(x, 1, TRUE), besselK(x, 1, TRUE)-0.5 * (besselK(x, 0, TRUE) + besselK(x, 2, TRUE))) expect_equal_deriv(besselK(x, n), if (n == 0) -besselK(x, 1) else -(0.5 * (besselK(x, 1 + n) + besselK(x, n - 1)))) expect_equal_deriv(besselK(x, n, FALSE), if (n == 0) -besselK(x, 1) else -(0.5 * (besselK(x, 1 + n) + besselK(x, n - 1)))) expect_equal_deriv(besselK(x, n, TRUE), besselK(x, n, TRUE)+if (n == 0) -besselK(x, 1, TRUE) else -(0.5 * (besselK(x, 1 + n, TRUE) + besselK(x, n - 1, TRUE)))) expect_equal_deriv(besselJ(x, 0), -besselJ(x, 1)) expect_equal_deriv(besselJ(x, 1), 0.5 * (besselJ(x, 0) - besselJ(x, 2))) expect_equal_deriv(besselJ(x, n), if (n == 0) -besselJ(x, 1) else 0.5 * (besselJ(x, n - 1) - besselJ(x, 1 + n))) expect_equal_deriv(besselY(x, 0), -besselY(x, 1)) expect_equal_deriv(besselY(x, 1), 0.5 * (besselY(x, 0) - besselY(x, 2))) expect_equal_deriv(besselY(x, n), if (n == 0) -besselY(x, 1) else 0.5 * (besselY(x, n - 1) - besselY(x, 1 + n))) expect_equal_deriv(gamma(x), digamma(x) * gamma(x)) expect_equal_deriv(lgamma(x), digamma(x)) expect_equal_deriv(digamma(x), trigamma(x)) expect_equal_deriv(trigamma(x), psigamma(x, 2L)) expect_equal_deriv(psigamma(x), psigamma(x, 1L)) expect_equal_deriv(psigamma(x, n), psigamma(x, 1L+n)) expect_equal_deriv(beta(x, y), beta(x, y) * (digamma(x) - digamma(x + y))) expect_equal_deriv(beta(x, y), beta(x, y) * (digamma(y) - digamma(x + y)), "y") expect_equal_deriv(lbeta(x, y), digamma(x) - digamma(x + y)) expect_equal_deriv(lbeta(x, y), digamma(y) - digamma(x + y), "y") }) test_that("probability densities", { expect_equal_deriv(dbinom(5,3,x), 3 * ((3 - 5 * x) * dbinom(5, 2, x)/(1 - x)^2)) expect_equal_deriv(dnorm(x, m=0.5), -(dnorm(x, 0.5, 1) * (x - 0.5))) }) test_that("normal quantile", { expect_equal_deriv(qnorm(x, mu, lower.tail=FALSE), -(1/dnorm(qnorm(x, mean = mu, sd = 1, lower.tail = FALSE, log.p = FALSE), mean = mu, sd = 1))) expect_equal_deriv(qnorm(x, mu, lower.tail=TRUE), 1/dnorm(qnorm(x, mean = mu, sd = 1, lower.tail = TRUE, log.p = FALSE), mean = mu, sd = 1)) expect_equal_deriv(qnorm(x, mu, log.p=TRUE), exp(x)/dnorm(qnorm(x, mean = mu, sd = 1, lower.tail = TRUE, log.p = TRUE), mean = mu, sd = 1)) expect_equal_deriv(qnorm(x, mu, log.p=FALSE), 1/dnorm(qnorm(x, mean = mu, sd = 1, lower.tail = TRUE, log.p = FALSE), mean = mu, sd = 1)) }) a=0.1 test_that("chain rule: multiply by a const", { expect_equal_deriv(a*x, a) expect_equal_deriv(a[1]*x, a[1]) expect_equal_deriv(a[[1]]*x, a[[1]]) expect_equal_deriv(a$b*x, a$b) expect_equal_deriv((a*x)**2, 2*(a^2*x)) expect_equal_deriv((a*x)**n, a*n*(a*x)^(n-1)) expect_equal_deriv(sin(a*x), a*cos(a*x)) expect_equal_deriv(cos(a*x), -(a*sin(a*x))) expect_equal_deriv(tan(a*x), a/cos(a*x)^2) expect_equal_deriv(exp(a*x), a*exp(a*x)) expect_equal_deriv(log(a*x), 1/x) }) test_that("particular cases", { expect_equal_deriv(log(x, x), 0) expect_equal_deriv(x^n+sin(n*x), n * (cos(n * x) + x^(n - 1))) expect_equal_deriv(x*(1-x), 1-2*x) expect_equal_deriv(x^x, x^x+x^x*log(x)) }) test_that("indexing", { expect_equal_deriv(a[['b']], 0) }) test_that("matrix calculus", { expect_equal_deriv(solve(matrix(c(1, x, x**2, x**3), nrow=2, ncol=2)), -solve(matrix(c(1, x, x^2, x^3), nrow = 2, ncol = 2)) %*% matrix(c(0, 1, 2 * x, 3 * x^2), nrow = 2, ncol = 2, byrow = , dimnames = ) %*% solve(matrix(c(1, x, x^2, x^3), nrow = 2, ncol = 2))) }) test_that("language constructs", { expect_equal_deriv(ifelse(x>0, x^2, x^3), ifelse(test=x>0, yes=2*x, no=3*x^2)) expect_equal_deriv(with(list(c=2), x^c), with(data=list(c = 2), expr=c * x^(c - 1))) }) g <- function(x, m=0, s=1) exp(-0.5*(x-m)^2/s^2)/s/sqrt(2*pi) g1c <- Deriv(g, "x") g1n <- Deriv(g, "x", cache.exp=FALSE) g2c <- Deriv(g1c, "x") g2n <- Deriv(g1n, "x", cache.exp=FALSE) m <- 0.5 s <- 3. x=seq(-2, 2, len=11) f <- function(a) (1+a)^(1/a) f1c <- Deriv(f) f2c <- Deriv(f1c) f3c <- Deriv(f2c) f1 <- Deriv(f, cache.exp=FALSE) f2 <- Deriv(f1, cache.exp=FALSE) f3 <- Deriv(f2, cache.exp=FALSE) a=seq(0.01, 2, len=11) test_that("expression cache test", { expect_equal_deriv(exp(-0.5*(x-m)^2/s^2)/s/sqrt(2*pi), -(exp(-(0.5 * ((x - m)^2/s^2))) * (x - m)/(s^3 * sqrt(2 * pi)))) expect_equal(g2n(x, m, s), g2c(x, m, s)) expect_equal(f3(a), f3c(a)) }) test_that("reused variables", { expect_equal(Deriv(~{sum=x; sum=sum*(1+x); sum=sum*y}, c("x", "y")), quote(c(x = y * (1 + 2 * x), y = x * (1 + x)))) }) f <- function(x){ t<-x^2; log(t) } g <- function(x) cos(f(x)) test_that("composite function", { expect_equal(Deriv(g,"x"), function (x) -(2 * (sin(f(x))/x)), check.environment=FALSE) }) ifel <- ifelse drule[["ifel"]]<-alist(test=NULL, yes=(test)*1, no=(!test)*1) suppressWarnings(rm(t)) expect_equal(Deriv(~ifel(abs(t)<0.1, t**2, abs(t)), "t"), quote({ .e2 <- abs(t) < 0.1 (!.e2) * sign(t) + 2 * (t * .e2) })) rm("ifel", envir=drule) test_that("error reporting", { expect_error(Deriv(rnorm), "is not in derivative table", fixed=TRUE) expect_error(Deriv(~rnorm(x), "x"), "is not in derivative table", fixed=TRUE) expect_error(Deriv(~x+rnorm(x), "x"), "is not in derivative table", fixed=TRUE) }) set.seed(7) test_that("central differences", { for (nm_f in ls(drule)) { fargs=head(as.list(args(nm_f)), -1L) fargs[["..."]]=NULL ilo=sapply(fargs, isTRUE) | sapply(fargs, isFALSE) rule <- drule[[nm_f]] larg <- fargs narg <- length(larg) if (nm_f == "rep.int") { larg["x"]=pi larg["times"]=2 } else if (nm_f == "rep.int") { larg["x"]=pi larg["length.out"]=2 } else { larg[] <- runif(narg) } if (nm_f == "det") { larg[["x"]]=as.matrix(larg[["x"]]) } else if (nm_f == "acosh") { larg[["x"]]=1+larg[["x"]] } else if (nm_f == "diag" || nm_f == "matrix") { larg[["nrow"]]=larg[["ncol"]]=1L if (nm_f == "matrix") { larg[["dimnames"]]=NULL ilo=ilo[-which(names(ilo) %in% "dimnames")] } } if (any(ilo)) logrid=do.call(expand.grid, rep(list(c(TRUE, FALSE)), sum(ilo))) for (arg in names(rule)) { if (is.null(rule[[arg]]) || arg == "_missing") next if (is.null(fargs) || !any(ilo)) { tmp=try(num_test_deriv(nm_f, larg, narg=arg), silent=TRUE) if (inherits(tmp, "try-error")) { stop(sprintf("Failed num. deriv test on '%s(%s)'", nm_f, paste(names(larg), larg, sep="=", collapse=", "))) } } else { apply(logrid, 1, function(lv) { lolarg=larg lolarg[ilo]=lv if (nm_f == "qnorm" && isTRUE(lolarg[["log.p"]])) { lolarg[["p"]]=log(lolarg[["p"]]) } suppressWarnings(num_test_deriv(nm_f, lolarg, narg=arg)) }) } } } }) tmp <- Deriv(Deriv(quote(dnorm(x ** 2 - x)), "x"), "x") test_that("dsym cleaning after nested call", { expect_identical(Deriv(quote(.e1*x), "x"), quote(.e1)) }) fsq <- function(x) x^2 fsc <- function(x, y) sin(x) * cos(y) f_ <- Deriv(fsc) fc <- function(x, h=0.1) if (abs(x) < h) 0.5*h*(x/h)**2 else abs(x)-0.5*h myfun <- function(x, y=TRUE) NULL dmyfun <- function(x, y=TRUE) NULL drule[["myfun"]] <- alist(x=dmyfun(x, y), y=NULL) theta <- list(m=0.1, sd=2.) x <- names(theta) names(x)=rep("theta", length(theta)) set.seed(777) ncomp=2 a=runif(ncomp) a=a/sum(a) m=rnorm(ncomp) s=runif(ncomp) pn=function(x, a, m, s, log=FALSE) { n=length(a) structure(vapply(seq(n), function(i) a[i]*dnorm(x, m[i], s[i], log), double(length(x))), dim=c(length(x), n)) } p=function(x, a, m, s) rowSums(pn(x, a, m, s)) dp=Deriv(p, "x") test_that("doc examples", { expect_equal_format1(Deriv(fsq), function (x) 2 * x) expect_equal_format1(Deriv(fsc), function (x, y) c(x = cos(x) * cos(y), y = -(sin(x) * sin(y)))) expect_equal(f_(3, 4), c(x=0.6471023, y=0.1068000), tolerance = 1.e-7) expect_equal(Deriv(~ fsc(x, y^2), "y"), quote(-(2 * (y * sin(x) * sin(y^2))))) expect_equal(Deriv(quote(fsc(x, y^2)), c("x", "y"), cache.exp=FALSE), quote(c(x = cos(x) * cos(y^2), y = -(2 * (y * sin(x) * sin(y^2)))))) expect_equal(Deriv(expression(sin(x^2) * y), "x"), expression(2 * (x * y * cos(x^2)))) expect_equal(Deriv("sin(x^2) * y", "x"), "2 * (x * y * cos(x^2))") expect_equal(Deriv(fc, "x", cache=FALSE), function(x, h=0.1) if (abs(x) < h) x/h else sign(x), check.environment=FALSE) expect_equal(Deriv(~myfun(z^2, FALSE), "z"), quote(2 * (z * dmyfun(z^2, FALSE)))) expect_equal(Deriv(~exp(-(x-theta$m)**2/(2*theta$sd)), x, cache.exp=FALSE), quote(c(theta_m = exp(-((x - theta$m)^2/(2 * theta$sd))) * (x - theta$m)/theta$sd, theta_sd = 2 * (exp(-((x - theta$m)^2/(2 * theta$sd))) * (x - theta$m)^2/(2 * theta$sd)^2)))) expect_equal(dp(0, a, m, s), -0.9547048, tolerance=1.e-6) }) drule[["myfun"]] <- NULL f=cos g = function(f) Deriv(f) test_that("renaming primitive", { expect_identical(g(f), Deriv(cos)) }) f <- function(x, y) x + y res=c(x=1, y=1) fd=as.function(alist(x=, y=, res)) body(fd)=res f2=function(x, y) c(x, y)^2 test_that("multivar diff", { expect_identical(Deriv(f), fd) expect_equal(Deriv(f2, cache=FALSE), function (x, y) c(x = c(2, 0) * c(x, y), y = c(0, 2) * c(x, y)), check.environment=FALSE) }) Sys.setlocale(category = "LC_COLLATE", locale = lc_orig)
`calcPredictionErrorCV` <- function(gp, X, newTheta, varMatrix, nugget) { r = calcCorOneObs(X, gp$beta, gp$a, newTheta) return ( (gp$sig2 + nugget) - gp$sig2*r%*%solve(varMatrix)%*%t(r)*gp$sig2 ) }
chart.Weights.pso <- function(object, ..., neighbors = NULL, main="Weights", las = 3, xlab=NULL, cex.lab = 1, element.color = "darkgray", cex.axis=0.8, colorset=NULL, legend.loc="topright", cex.legend=0.8, plot.type="line"){ if(!inherits(object, "optimize.portfolio.pso")) stop("object must be of class 'optimize.portfolio.pso'") if(plot.type %in% c("bar", "barplot")){ barplotWeights(object=object, ..., main=main, las=las, xlab=xlab, cex.lab=cex.lab, element.color=element.color, cex.axis=cex.axis, legend.loc=legend.loc, cex.legend=cex.legend, colorset=colorset) } else if(plot.type == "line"){ columnnames = names(object$weights) numassets = length(columnnames) constraints <- get_constraints(object$portfolio) if(is.null(xlab)) minmargin = 3 else minmargin = 5 if(main=="") topmargin=1 else topmargin=4 if(las > 1) { bottommargin = max(c(minmargin, (strwidth(columnnames,units="in"))/par("cin")[1])) * cex.lab if(bottommargin > 10 ) { bottommargin<-10 columnnames<-substr(columnnames,1,19) } } else { bottommargin = minmargin } par(mar = c(bottommargin, 4, topmargin, 2) +.1) if(any(is.infinite(constraints$max)) | any(is.infinite(constraints$min))){ ylim <- range(object$weights) } else { ylim <- range(c(constraints$min, constraints$max)) } plot(object$weights, type="b", col="blue", axes=FALSE, xlab='', ylim=ylim, ylab="Weights", main=main, pch=16, ...) if(!any(is.infinite(constraints$min))){ points(constraints$min, type="b", col="darkgray", lty="solid", lwd=2, pch=24) } if(!any(is.infinite(constraints$max))){ points(constraints$max, type="b", col="darkgray", lty="solid", lwd=2, pch=25) } axis(2, cex.axis = cex.axis, col = element.color) axis(1, labels=columnnames, at=1:numassets, las=las, cex.axis = cex.axis, col = element.color) box(col = element.color) } } chart.Weights.optimize.portfolio.pso <- chart.Weights.pso chart.Scatter.pso <- function(object, ..., neighbors=NULL, return.col="mean", risk.col="ES", chart.assets=FALSE, element.color = "darkgray", cex.axis=0.8, xlim=NULL, ylim=NULL){ if(!inherits(object, "optimize.portfolio.pso")) stop("object must be of class 'optimize.portfolio.pso'") R <- object$R if(is.null(R)) stop("Returns object not detected, must run optimize.portfolio with trace=TRUE") xtract = extractStats(object) columnnames = colnames(xtract) return.column = pmatch(return.col,columnnames) if(is.na(return.column)) { return.col = paste(return.col,return.col,sep='.') return.column = pmatch(return.col,columnnames) } risk.column = pmatch(risk.col,columnnames) if(is.na(risk.column)) { risk.col = paste(risk.col,risk.col,sep='.') risk.column = pmatch(risk.col,columnnames) } if(is.na(return.column) | is.na(risk.column)){ return.col <- gsub("\\..*", "", return.col) risk.col <- gsub("\\..*", "", risk.col) warning(return.col,' or ', risk.col, ' do not match extractStats output of $objective_measures slot') wts_index <- grep("w.", columnnames) wts <- xtract[, wts_index] if(is.na(return.column)){ tmpret <- applyFUN(R=R, weights=wts, FUN=return.col) xtract <- cbind(tmpret, xtract) colnames(xtract)[which(colnames(xtract) == "tmpret")] <- return.col } if(is.na(risk.column)){ tmprisk <- applyFUN(R=R, weights=wts, FUN=risk.col) xtract <- cbind(tmprisk, xtract) colnames(xtract)[which(colnames(xtract) == "tmprisk")] <- risk.col } columnnames = colnames(xtract) return.column = pmatch(return.col,columnnames) if(is.na(return.column)) { return.col = paste(return.col,return.col,sep='.') return.column = pmatch(return.col,columnnames) } risk.column = pmatch(risk.col,columnnames) if(is.na(risk.column)) { risk.col = paste(risk.col,risk.col,sep='.') risk.column = pmatch(risk.col,columnnames) } } if(chart.assets){ arguments <- NULL if(is.null(arguments)){ tmp.args <- unlist(lapply(object$portfolio$objectives, function(x) x$arguments), recursive=FALSE) tmp.args <- tmp.args[!duplicated(names(tmp.args))] if(!is.null(tmp.args$portfolio_method)) tmp.args$portfolio_method <- "single" arguments <- tmp.args } asset_ret <- scatterFUN(R=R, FUN=return.col, arguments) asset_risk <- scatterFUN(R=R, FUN=risk.col, arguments) xlim <- range(c(xtract[,risk.column], asset_risk)) ylim <- range(c(xtract[,return.column], asset_ret)) } else { asset_ret <- NULL asset_risk <- NULL } plot(xtract[,risk.column],xtract[,return.column], xlab=risk.col, ylab=return.col, col="darkgray", axes=FALSE, xlim=xlim, ylim=ylim, ...) if(length(names(object)[which(names(object)=='constrained_objective')])) { result.slot<-'constrained_objective' } else { result.slot<-'objective_measures' } objcols<-unlist(object[[result.slot]]) names(objcols)<-name.replace(names(objcols)) return.column = pmatch(return.col,names(objcols)) if(is.na(return.column)) { return.col = paste(return.col,return.col,sep='.') return.column = pmatch(return.col,names(objcols)) } risk.column = pmatch(risk.col,names(objcols)) if(is.na(risk.column)) { risk.col = paste(risk.col,risk.col,sep='.') risk.column = pmatch(risk.col,names(objcols)) } if(is.na(return.column) | is.na(risk.column)){ return.col <- gsub("\\..*", "", return.col) risk.col <- gsub("\\..*", "", risk.col) opt_weights <- object$weights ret <- as.numeric(applyFUN(R=R, weights=opt_weights, FUN=return.col)) risk <- as.numeric(applyFUN(R=R, weights=opt_weights, FUN=risk.col)) points(risk, ret, col="blue", pch=16) text(x=risk, y=ret, labels="Optimal",col="blue", pos=4, cex=0.8) } else { points(objcols[risk.column], objcols[return.column], col="blue", pch=16) text(x=objcols[risk.column], y=objcols[return.column], labels="Optimal",col="blue", pos=4, cex=0.8) } if(chart.assets){ points(x=asset_risk, y=asset_ret) text(x=asset_risk, y=asset_ret, labels=colnames(R), pos=4, cex=0.8) } axis(1, cex.axis = cex.axis, col = element.color) axis(2, cex.axis = cex.axis, col = element.color) box(col = element.color) } chart.RiskReward.optimize.portfolio.pso <- chart.Scatter.pso charts.pso <- function(pso, return.col="mean", risk.col="ES", chart.assets=FALSE, cex.axis=0.8, element.color="darkgray", neighbors=NULL, main="PSO.Portfolios", xlim=NULL, ylim=NULL, ...){ op <- par(no.readonly=TRUE) layout(matrix(c(1,2)),heights=c(2,2),widths=1) par(mar=c(4,4,4,2)) chart.Scatter.pso(object=pso, return.col=return.col, risk.col=risk.col, chart.assets=chart.assets, element.color=element.color, cex.axis=cex.axis, main=main, xlim=xlim, ylim=ylim, ...=...) par(mar=c(2,4,0,2)) chart.Weights.pso(object=pso, neighbors=neighbors, las=3, xlab=NULL, cex.lab=1, element.color=element.color, cex.axis=cex.axis, ...=..., main="") par(op) } plot.optimize.portfolio.pso <- function(x, ..., return.col="mean", risk.col="ES", chart.assets=FALSE, cex.axis=0.8, element.color="darkgray", neighbors=NULL, main="PSO.Portfolios", xlim=NULL, ylim=NULL){ charts.pso(pso=x, return.col=return.col, risk.col=risk.col, chart.assets=FALSE, cex.axis=cex.axis, element.color=element.color, neighbors=neighbors, main=main, xlim=xlim, ylim=ylim, ...=...) }
pctp <- function(q, a, b, gamma, lower.tail = TRUE) { if ( mode(c(q,a,b,gamma)) != "numeric") stop( "non-numeric argument to mathematical function" ) if( gamma <= 2 * a ) stop( "gamma must be greater than 2a" ) q<-as.vector(q) maxX=q[1] n<-length(q) for ( i in 1:n ){ q[i] = floor( q[i] ) if (q[i] > maxX) maxX=q[i] } icomplex <- sqrt(as.complex(-1)) lf0 <- 2 * Re(cgamma(gamma - a + b * icomplex, log = TRUE)) - lgamma(gamma) - lgamma(gamma - 2 * a) pmfAux<-exp(lf0) i=1 Fd <-c(pmfAux) digits=options()$digit while( i <= maxX+1 && Fd[i]<(1-10^-digits)){ pmfAux <- exp(log(pmfAux)+log(((a+i-1)^2+b^2))-log((gamma+i-1))-log(i)) Fd <- c( Fd, Fd[[i]] + pmfAux ) print(paste(i,Fd[i],pmfAux,sep=":")) i <- i + 1 } result<-vector(mode="numeric",length=n) for ( i in 1:n ){ if ( q[i] < 0 ) result[i]=0 else result[i]=Fd[q[i]+1] if (! lower.tail){ result[i]<-1-result[i] } } return (result) } pcbp <- function(q, b, gamma, lower.tail = TRUE ) { if ( mode(c(q,b,gamma)) != "numeric") stop( "non-numeric argument to mathematical function" ) if( gamma <= 0 ) stop( "gamma must be greater than 0" ) q<-as.vector(q) maxX=q[1] n<-length(q) for ( i in 1:n ){ q[i] = floor( q[i] ) if (q[i] > maxX) maxX=q[i] } icomplex <- sqrt(as.complex(-1)) lf0 <- 2 * Re(cgamma(gamma + b * icomplex, log = TRUE)) - lgamma(gamma) - lgamma(gamma ) pmfAux<-exp(lf0) i=1 Fd <-c(pmfAux) while( i <= maxX+1 ){ pmfAux <- exp(log(pmfAux)+log(((i-1)^2+b^2))-log((gamma+i-1))-log(i)) Fd <- c( Fd, Fd[[i]] + pmfAux ) i <- i + 1 } result<-vector(mode="numeric",length=n) for ( i in 1:n ){ if ( q[i] < 0 ) result[i]=0 else result[i]=Fd[q[i]+1] if (! lower.tail){ result[i]<-1-result[i] } } return (result) }
test_that("line_push() adds indentation", { out <- line_push("foo", "bar", width = 4, indent = 2) expect_identical(out, c("foo", " bar")) }) test_that("line_push() doesn't make a new line if current is only spaces", { expect_identical(line_push(" ", "foo", width = 2L), " foo") }) test_that("line_push() trims trailing spaces", { expect_identical(line_push("foo ", "bar", width = 1L), c("foo", "bar")) }) test_that("line_push() doesn't trim trailing spaces on sticky inputs", { expect_identical(line_push("tag", " = ", sticky = TRUE, width = 3L, indent = 2L), "tag = ") }) test_that("sticky input sticks", { expect_identical(line_push("foo ", "bar", sticky = TRUE, width = 1L), "foo bar") }) test_that("line_push() respects boundaries", { expect_identical(line_push("foo, ", "bar", boundary = 4L, width = 1L, indent = 2L), c("foo,", " bar")) expect_identical(line_push("foo, ", "bar", sticky = TRUE, boundary = 4L, width = 1L, indent = 2L), c("foo,", " bar")) expect_identical(line_push("foo, bar", "baz", boundary = 4L, width = 1L, indent = 2L), c("foo, bar", " baz")) }) test_that("line_push() handles the nchar(line) == boundary case", { expect_identical(line_push(" tag = ", "bar", sticky = TRUE, boundary = 8L, width = 3L, indent = 2L), " tag = bar") }) test_that("line_push() strips ANSI codes before computing overflow", { local_options(crayon.enabled = TRUE) if (!has_crayon()) { skip("test needs crayon") } expect_identical(length(line_push("foo", open_blue(), width = 3L)), 2L) expect_identical(length(line_push("foo", open_blue(), width = 3L, has_colour = TRUE)), 1L) }) test_that("can push several lines (useful for default base deparser)", { expect_identical(new_lines()$push(c("foo", "bar"))$get_lines(), "foobar") }) test_that("control flow is deparsed", { expect_identical(fn_call_deparse(expr(function(a, b) 1)), "function(a, b) 1") expect_identical(fn_call_deparse(expr(function(a = 1, b = 2) { 3; 4; 5 })), c("function(a = 1, b = 2) {", " 3", " 4", " 5", "}")) expect_identical(while_deparse(quote(while(1) 2)), "while (1) 2") expect_identical(for_deparse(quote(for(a in 2) 3)), "for (a in 2) 3") expect_identical(repeat_deparse(quote(repeat 1)), "repeat 1") expect_identical(if_deparse(quote(if (1) 2 else { 3 })), c("if (1) 2 else {", " 3", "}")) }) test_that("functions defs increase indent", { ctxt <- new_lines(width = 3L) expect_identical(sexp_deparse(quote(function() 1), ctxt), c("function()", " 1")) ctxt <- new_lines(width = 3L) expect_identical(sexp_deparse(function() 1, ctxt), c("<function()", " 1>")) }) test_that("blocks are deparsed", { expect_identical(braces_deparse(quote({1; 2; { 3; 4 }})), c("{", " 1", " 2", " {", " 3", " 4", " }", "}")) expect_identical_(sexp_deparse(quote({{ 1 }})), c("{", " {", " 1", " }", "}")) ctxt <- new_lines(width = 3L) expected_lines <- c("{", " 11111", " 22222", " {", " 33333", " 44444", " }", "}") expect_identical(braces_deparse(quote({11111; 22222; { 33333; 44444 }}), ctxt), expected_lines) }) test_that("multiple openers on the same line only trigger one indent", { ctxt <- new_lines(width = 3L) expect_identical(sexp_deparse(quote(function() { 1 }), ctxt), c("function()", " {", " 1", " }")) ctxt <- new_lines(width = 12L) expect_identical(sexp_deparse(quote(function() { 1 }), ctxt), c("function() {", " 1", "}")) }) test_that("multiple openers on the same line are correctly reset", { expect_identical(sexp_deparse(quote({ 1(2()) })), c("{", " 1(2())", "}")) }) test_that("parentheses are deparsed", { expect_identical(parens_deparse(quote((1))), "(1)") expect_identical(parens_deparse(quote(({ 1; 2 }))), c("({", " 1", " 2", "})")) expect_identical(sexp_deparse(quote(({({ 1 })}))), c("({", " ({", " 1", " })", "})")) }) test_that("spaced operators are deparsed", { expect_identical(spaced_op_deparse(quote(1 ? 2)), "1 ? 2") expect_identical(spaced_op_deparse(quote(1 <- 2)), "1 <- 2") expect_identical(spaced_op_deparse(quote(1 <<- 2)), "1 <<- 2") expect_identical(spaced_op_deparse(quote(`=`(1, 2))), "1 = 2") expect_identical(spaced_op_deparse(quote(1 := 2)), "1 := 2") expect_identical(spaced_op_deparse(quote(1 ~ 2)), "1 ~ 2") expect_identical(spaced_op_deparse(quote(1 | 2)), "1 | 2") expect_identical(spaced_op_deparse(quote(1 || 2)), "1 || 2") expect_identical(spaced_op_deparse(quote(1 & 2)), "1 & 2") expect_identical(spaced_op_deparse(quote(1 && 2)), "1 && 2") expect_identical(spaced_op_deparse(quote(1 > 2)), "1 > 2") expect_identical(spaced_op_deparse(quote(1 >= 2)), "1 >= 2") expect_identical(spaced_op_deparse(quote(1 < 2)), "1 < 2") expect_identical(spaced_op_deparse(quote(1 <= 2)), "1 <= 2") expect_identical(spaced_op_deparse(quote(1 == 2)), "1 == 2") expect_identical(spaced_op_deparse(quote(1 != 2)), "1 != 2") expect_identical(spaced_op_deparse(quote(1 + 2)), "1 + 2") expect_identical(spaced_op_deparse(quote(1 - 2)), "1 - 2") expect_identical(spaced_op_deparse(quote(1 * 2)), "1 * 2") expect_identical(spaced_op_deparse(quote(1 / 2)), "1 / 2") expect_identical(spaced_op_deparse(quote(1 %% 2)), "1 %% 2") expect_identical(spaced_op_deparse(quote(1 %>% 2)), "1 %>% 2") expect_identical(sexp_deparse(quote({ 1; 2 } + { 3; 4 })), c("{", " 1", " 2", "} + {", " 3", " 4", "}")) }) test_that("unspaced operators are deparsed", { expect_identical(unspaced_op_deparse(quote(1:2)), "1:2") expect_identical(unspaced_op_deparse(quote(1^2)), "1^2") expect_identical(unspaced_op_deparse(quote(a$b)), "a$b") expect_identical(unspaced_op_deparse(quote(a@b)), "a@b") expect_identical(unspaced_op_deparse(quote(a::b)), "a::b") expect_identical(unspaced_op_deparse(quote(a:::b)), "a:::b") }) test_that("operands are wrapped in parentheses to ensure correct predecence", { expect_identical_(sexp_deparse(expr(1 + !!quote(2 + 3))), "1 + (2 + 3)") expect_identical_(sexp_deparse(expr((!!quote(1^2))^3)), "(1^2)^3") skip_on_cran() skip_if(getRversion() < "4.0.0") expect_identical_(sexp_deparse(quote(function() 1 ? 2)), "(function() 1) ? 2") expect_identical_(sexp_deparse(expr(!!quote(function() 1) ? 2)), "(function() 1) ? 2") }) test_that("unary operators are deparsed", { expect_identical(unary_op_deparse(quote(?1)), "?1") expect_identical(unary_op_deparse(quote(~1)), "~1") expect_identical(unary_op_deparse(quote(!1)), "!1") expect_identical_(unary_op_deparse(quote(!!1)), "!!1") expect_identical_(unary_op_deparse(quote(!!!1)), "!!!1") expect_identical_(unary_op_deparse(quote(`!!`(1))), "!!1") expect_identical_(unary_op_deparse(quote(`!!!`(1))), "!!!1") expect_identical(unary_op_deparse(quote(+1)), "+1") expect_identical(unary_op_deparse(quote(-1)), "-1") }) test_that("brackets are deparsed", { expect_identical(sexp_deparse(quote(1[2])), c("1[2]")) expect_identical(sexp_deparse(quote(1[[2]])), c("1[[2]]")) ctxt <- new_lines(width = 1L) expect_identical(sexp_deparse(quote(1[2]), ctxt), c("1[", " 2]")) ctxt <- new_lines(width = 1L) expect_identical(sexp_deparse(quote(1[[2]]), ctxt), c("1[[", " 2]]")) }) test_that("calls are deparsed", { expect_identical(call_deparse(quote(foo(bar, baz))), "foo(bar, baz)") expect_identical(call_deparse(quote(foo(one = bar, two = baz))), "foo(one = bar, two = baz)") }) test_that("call_deparse() respects boundaries", { ctxt <- new_lines(width = 1L) expect_identical(call_deparse(quote(foo(bar, baz)), ctxt), c("foo(", " bar,", " baz)")) ctxt <- new_lines(width = 7L) expect_identical(call_deparse(quote(foo(bar, baz)), ctxt), c("foo(", " bar,", " baz)")) ctxt <- new_lines(width = 8L) expect_identical(call_deparse(quote(foo(bar, baz)), ctxt), c("foo(bar,", " baz)")) ctxt <- new_lines(width = 1L) expect_identical(call_deparse(quote(foo(one = bar, two = baz)), ctxt), c("foo(", " one = bar,", " two = baz)")) }) test_that("call_deparse() handles multi-line arguments", { ctxt <- new_lines(width = 1L) expect_identical(sexp_deparse(quote(foo(one = 1, two = nested(one = 1, two = 2))), ctxt), c("foo(", " one = 1,", " two = nested(", " one = 1,", " two = 2))")) ctxt <- new_lines(width = 20L) expect_identical(sexp_deparse(quote(foo(one = 1, two = nested(one = 1, two = 2))), ctxt), c("foo(one = 1, two = nested(", " one = 1, two = 2))")) }) test_that("call_deparse() delimits CAR when needed", { fn_call <- quote(function() x + 1) call <- expr((!!fn_call)()) expect_identical(call_deparse(call), "(function() x + 1)()") roundtrip <- parse_expr(expr_deparse(call)) exp <- call2(call("(", fn_call)) expect_equal(zap_srcref(roundtrip), zap_srcref(exp)) call <- expr((!!quote(f + g))(x)) expect_identical(call_deparse(call), "`+`(f, g)(x)") expect_identical(parse_expr(expr_deparse(call)), call) call <- expr((!!quote(+f))(x)) expect_identical(call_deparse(call), "`+`(f)(x)") expect_identical(parse_expr(expr_deparse(call)), call) call <- expr((!!quote(while (TRUE) NULL))(x)) expect_identical(call_deparse(call), "`while`(TRUE, NULL)(x)") expect_identical(parse_expr(expr_deparse(call)), call) call <- expr(foo::bar(x)) expect_identical(call_deparse(call), "foo::bar(x)") expect_identical(parse_expr(expr_deparse(call)), call) }) test_that("literal functions are deparsed", { expect_identical_(sexp_deparse(function(a) 1), "<function(a) 1>") expect_identical_(sexp_deparse(expr(foo(!!function(a) 1))), "foo(<function(a) 1>)") }) test_that("literal dots are deparsed", { dots <- (function(...) env_get(, "..."))(NULL) expect_identical_(sexp_deparse(expr(foo(!!dots))), "foo(<...>)") }) test_that("environments are deparsed", { expect_identical(sexp_deparse(expr(foo(!! env()))), "foo(<environment>)") }) test_that("atomic vectors are deparsed", { expect_identical(sexp_deparse(set_names(c(TRUE, FALSE, TRUE), c("", "b", ""))), "<lgl: TRUE, b = FALSE, TRUE>") expect_identical(sexp_deparse(set_names(1:3, c("", "b", ""))), "<int: 1L, b = 2L, 3L>") expect_identical(sexp_deparse(set_names(c(1, 2, 3), c("", "b", ""))), "<dbl: 1, b = 2, 3>") expect_identical(sexp_deparse(set_names(as.complex(1:3), c("", "b", ""))), "<cpl: 1+0i, b = 2+0i, 3+0i>") expect_identical(sexp_deparse(set_names(as.character(1:3), c("", "b", ""))), "<chr: \"1\", b = \"2\", \"3\">") expect_identical(sexp_deparse(set_names(as.raw(1:3), c("", "b", ""))), "<raw: 01, b = 02, 03>") }) test_that("boundaries are respected when deparsing vectors", { ctxt <- new_lines(width = 1L) vec <- set_names(1:3, c("", "b", "")) expect_identical_(sexp_deparse(expr(foo(!!vec)), ctxt), c("foo(", " <int:", " 1L,", " b = 2L,", " 3L>)")) ctxt <- new_lines(width = 12L) expect_identical(sexp_deparse(list(c("foo", "bar", "baz")), ctxt), c("<list: <chr:", " \"foo\",", " \"bar\",", " \"baz\">>")) }) test_that("scalar atomic vectors are simply printed", { expect_identical(sexp_deparse(TRUE), "TRUE") expect_identical(sexp_deparse(1L), "1L") expect_identical(sexp_deparse(1), "1") expect_identical(sexp_deparse(1i), "0+1i") expect_identical(sexp_deparse("1"), "\"1\"") }) test_that("scalar raw vectors are printed in long form", { expect_identical(sexp_deparse(as.raw(1)), "<raw: 01>") }) test_that("literal lists are deparsed", { expect_identical(sexp_deparse(list(TRUE, b = 2L, 3, d = "4", as.raw(5))), "<list: TRUE, b = 2L, 3, d = \"4\", <raw: 05>>") }) test_that("long vectors are truncated by default", { expect_identical(sexp_deparse(1:10), "<int: 1L, 2L, 3L, 4L, 5L, ...>") expect_identical(sexp_deparse(as.list(1:10)), "<list: 1L, 2L, 3L, 4L, 5L, ...>") }) test_that("long vectors are truncated when max_elements = 0L", { lines <- new_lines(max_elements = 0L) expect_identical(sexp_deparse(1:10, lines), "<int: ...>") lines <- new_lines(max_elements = 0L) expect_identical(sexp_deparse(as.list(1:10), lines), "<list: ...>") }) test_that("long vectors are not truncated when max_elements = NULL", { lines <- new_lines(max_elements = NULL) expect_identical(sexp_deparse(1:10, lines), "<int: 1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L, 10L>") lines <- new_lines(max_elements = NULL) expect_identical(sexp_deparse(as.list(1:10), lines), "<list: 1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L, 10L>") }) test_that("other objects are deparsed with base deparser", { expect_identical_(sexp_deparse(expr(foo((!!base::list)(1, 2)))), "foo(.Primitive(\"list\")(1, 2))") expect_identical_(sexp_deparse(expr(foo((!!base::`if`)(1, 2)))), "foo(.Primitive(\"if\")(1, 2))") }) test_that("S3 objects are deparsed", { skip_on_cran() expr <- expr(list(!!factor(1:3), !!structure(list(), class = c("foo", "bar", "baz")))) expect_identical(sexp_deparse(expr), "list(<fct>, <foo>)") }) test_that("successive indentations on a single line are only counted once", { ctxt <- new_lines(5L) broken_output <- c("<list:", " <chr:", " foo = \"bar\",", " baz = \"bam\">>") expect_identical(sexp_deparse(list(c(foo = "bar", baz = "bam")), ctxt), broken_output) ctxt <- new_lines(12L) unbroken_output <- c("<list: <chr:", " foo = \"bar\",", " baz = \"bam\">>") expect_identical(sexp_deparse(list(c(foo = "bar", baz = "bam")), ctxt), unbroken_output) }) test_that("successive indentations close off properly", { expect_identical(sexp_deparse(quote(1(2(), 3(4())))), "1(2(), 3(4()))") expect_identical(sexp_deparse(quote(1(2(), 3(4()))), new_lines(width = 1L)), c("1(", " 2(),", " 3(", " 4()))")) expect_identical(sexp_deparse(expr(c((1), function() { 2 }))), c("c((1), function() {", " 2", "})")) }) test_that("empty quosures are deparsed", { expect_identical(strip_style(quo_deparse(quo())), "^") }) test_that("missing values are deparsed", { expect_identical(expr_deparse(NA), "NA") expect_identical(expr_deparse(NaN), "NaN") expect_identical(expr_deparse(NA_integer_), "NA_integer_") expect_identical(expr_deparse(NA_real_), "NA_real_") expect_identical(expr_deparse(NA_complex_), "NA_complex_") expect_identical(expr_deparse(NA_character_), "NA_character_") expect_identical(expr_deparse(c(NaN, 2, NA)), "<dbl: NaN, 2, NA>") expect_identical(expr_deparse(c(foo = NaN)), "<dbl: foo = NaN>") expect_identical(sexp_deparse(c(name = NA)), "<lgl: name = NA>") expect_identical(sexp_deparse(c(NA, "NA")), "<chr: NA, \"NA\">") expect_identical(sexp_deparse(quote(call(NA))), "call(NA)") expect_identical(sexp_deparse(quote(call(NA_integer_))), "call(NA_integer_)") expect_identical(sexp_deparse(quote(call(NA_real_))), "call(NA_real_)") expect_identical(sexp_deparse(quote(call(NA_complex_))), "call(NA_complex_)") expect_identical(sexp_deparse(quote(call(NA_character_))), "call(NA_character_)") }) test_that("needs_backticks() detects non-syntactic symbols", { expect_true(all(map_lgl(reserved_words, needs_backticks))) expect_false(any(map_lgl(c(".", "a", "Z"), needs_backticks))) expect_true(all(map_lgl(c("1", ".1", "~", "!"), needs_backticks))) expect_true(all(map_lgl(c("_", "_foo", "1foo"), needs_backticks))) expect_true(all(map_lgl(c(".fo!o", "b&ar", "baz <- _baz", "~quux.", "h~unoz_"), needs_backticks))) expect_false(any(map_lgl(c(".foo", "._1", "bar", "baz_baz", "quux.", "hunoz_", "..."), needs_backticks))) expect_false(needs_backticks(expr())) }) test_that("expr_text() and expr_name() interpret unicode tags ( expect_identical(expr_text(quote(`<U+006F>`)), "o") expect_identical(expr_name(quote(`~f<U+006F><U+006F>`)), "~foo") expect_identical(as_label(quote(`~f<U+006F><U+006F>`)), "~foo") }) test_that("expr_text() deparses non-syntactic symbols with backticks ( expect_identical(expr_text(sym("~foo")), "`~foo`") expect_identical(expr_text(sym("~f<U+006F><U+006F>")), "`~foo`") expect_identical(expr_text(call("~foo")), "`~foo`()") }) test_that("expr_text() deparses empty arguments", { expect_identical(expr_text(expr()), "") expect_identical(quo_text(expr()), "") expect_identical(quo_text(quo()), "") }) test_that("expr_name() deparses empty arguments", { expect_identical(expr_name(expr()), "") expect_identical(quo_name(quo()), "") expect_identical(names(quos_auto_name(quos(, ))), "<empty>") expect_identical(as_label(expr()), "<empty>") }) test_that("expr_deparse() handles newlines in strings ( x <- "foo\n" expect_identical(expr_deparse(x), "\"foo\\n\"") expect_output(expr_print(x), "foo\\n", fixed = TRUE) roundtrip <- parse_expr(expr_deparse(x)) expect_identical(x, roundtrip) }) test_that("expr_deparse() handles ANSI escapes in strings", { expect_identical(expr_deparse("\\"), deparse("\\")) expect_identical(expr_deparse("\\a"), deparse("\\a")) expect_identical(expr_deparse("\\b"), deparse("\\b")) expect_identical(expr_deparse("\\f"), deparse("\\f")) expect_identical(expr_deparse("\\n"), deparse("\\n")) expect_identical(expr_deparse("\\r"), deparse("\\r")) expect_identical(expr_deparse("\\t"), deparse("\\t")) expect_identical(expr_deparse("\\v"), deparse("\\v")) expect_identical(expr_deparse("\\0"), deparse("\\0")) }) test_that("as_label() and expr_name() handles .data pronoun", { expect_identical(expr_name(quote(.data[["bar"]])), "bar") expect_identical(quo_name(quo(.data[["bar"]])), "bar") expect_identical(as_label(quote(.data[["bar"]])), "bar") expect_identical(as_label(quo(.data[["bar"]])), "bar") }) test_that("as_label() handles literals", { expect_identical(as_label(1:2), "<int>") expect_identical(as_label(c(1, 2)), "<dbl>") expect_identical(as_label(letters), "<chr>") expect_identical(as_label(base::list), "<fn>") expect_identical(as_label(base::mean), "<fn>") }) test_that("as_label() handles objects", { skip_on_cran() expect_identical(as_label(mtcars), "<df[,11]>") expect_identical(as_label(structure(1, class = "foo")), "<foo>") }) test_that("bracket deparsing is a form of argument deparsing", { expect_identical(expr_deparse(quote(foo[bar, , baz()])), "foo[bar, , baz()]") expect_identical(expr_deparse(quote(foo[[bar, , baz()]])), "foo[[bar, , baz()]]") skip_on_cran() expect_identical(expr_deparse(call("[", iris, missing_arg(), drop = FALSE)), "<df[,5]>[, drop = FALSE]") }) test_that("non-syntactic symbols are deparsed with backticks", { expect_identical(expr_deparse(quote(`::foo`)), "`::foo`") expect_identical(expr_deparse(quote(x(`_foo`))), "x(`_foo`)") expect_identical(expr_deparse(quote(x[`::foo`])), "x[`::foo`]") }) test_that("symbols with unicode are deparsed consistently ( skip_if(getRversion() < "3.2") expect_identical(expr_text(sym("\u00e2a")), "\u00e2a") expect_identical(expr_deparse(sym("\u00e2a")), "\u00e2a") expect_identical(expr_text(sym("a\u00e2")), "a\u00e2") expect_identical(expr_deparse(sym("a\u00e2")), "a\u00e2") }) test_that("formal parameters are backticked if needed", { expect_identical(expr_deparse(function(`^`) {}), c("<function(`^`) { }>")) }) test_that("empty blocks are deparsed on the same line", { expect_identical(expr_deparse(quote({ })), "{ }") }) test_that("top-level S3 objects are deparsed", { skip_on_cran() f <- structure(function() { }, class = "lambda") expect_identical(expr_deparse(f), "<lambda>") }) test_that("as_label() supports symbols, calls, and literals", { expect_identical(as_label(quote(foo)), "foo") expect_identical(as_label(quote(foo(bar))), "foo(bar)") expect_identical(as_label(1L), "1L") expect_identical(as_label("foo"), "\"foo\"") expect_identical(as_label(function() NULL), "<fn>") expect_identical(as_label(expr(function() { a; b })), "function() ...") expect_identical(as_label(1:2), "<int>") expect_identical(as_label(env()), "<env>") }) test_that("as_label() supports special objects", { expect_match(as_label(quote(foo := bar)), ":=") expect_identical(as_label(quo(foo)), "foo") expect_identical(as_label(quo(foo(!!quo(bar)))), "foo(bar)") expect_identical(as_label(~foo), "~foo") expect_identical(as_label(NULL), "NULL") }) test_that("as_name() supports quosured symbols and strings", { expect_identical(as_name(quo(foo)), "foo") expect_identical(as_name(quo("foo")), "foo") expect_error(as_name(quo(foo())), "Can't convert a call to a string") }) test_that("named empty lists are marked as named", { expect_identical(expr_deparse(set_names(list(), chr())), "<named list>") }) test_that("infix operators are sticky", { expect_identical(expr_deparse(quote(foo %>% bar), width = 3L), c("foo %>%", " bar")) expect_identical(expr_deparse(quote(foo + bar), width = 3L), c("foo +", " bar")) }) test_that("argument names are backticked if needed ( expect_identical(expr_deparse(quote(list(`a b` = 1))), "list(`a b` = 1)") }) test_that("`next` and `break` are deparsed", { expect_equal(expr_deparse(quote({ next; (break) })), c("{", " next", " (break)", "}")) expect_equal(expr_deparse(quote(a <- next <- break)), c("a <- next <- break")) }) test_that("double colon is never wrapped ( expect_identical( expr_deparse(quote(some.very.long::construct), width = 20), "some.very.long::construct" ) expect_identical( expr_deparse(quote(id_function <- base::identity), width = 15), c( "id_function <-", " base::identity" ) ) expect_identical( expr_deparse(quote(id_fun <- base::identity), width = 20), "id_fun <- base::identity" ) }) test_that("triple colon is never wrapped ( expect_identical( expr_deparse(quote(some.very.long:::construct), width = 20), "some.very.long:::construct" ) expect_identical( expr_deparse(quote(id_function <- base:::identity), width = 15), c( "id_function <-", " base:::identity" ) ) expect_identical( expr_deparse(quote(id_fun <- base:::identity), width = 20), "id_fun <- base:::identity" ) }) test_that("backslashes in strings are properly escaped ( expect_equal( expr_deparse(sym("a\\b")), "`a\\\\b`" ) expect_equal( parse_expr(expr_deparse(sym("a\\b"))), sym("a\\b") ) expect_equal( expr_deparse(quote(c("a\\b" = "c\\d"))), "c(`a\\\\b` = \"c\\\\d\")" ) expect_equal( expr_deparse(c("a\\b" = "c\\d")), "<chr: a\\\\b = \"c\\\\d\">" ) expect_equal( expr_deparse(list("a\\b" = "c\\d")), "<list: a\\\\b = \"c\\\\d\">" ) }) test_that("formulas are deparsed ( expect_equal( expr_deparse(~foo), "<formula>" ) expect_equal( expr_deparse(quote(~foo)), "~foo" ) expect_equal( expr_deparse(quote(~+foo)), "~ +foo" ) expect_equal( expr_deparse(quote(~foo())), "~ foo()" ) }) test_that("matrices and arrays are formatted ( mat <- matrix(1:3) expect_equal(as_label(mat), "<int[,1]>") expect_equal(expr_deparse(mat), "<int[,1]: 1L, 2L, 3L>") mat2 <- matrix(1:4, 2) expect_equal(as_label(mat2), "<int[,2]>") expect_equal(expr_deparse(mat2), "<int[,2]: 1L, 2L, 3L, 4L>") arr <- array(1:3, c(1, 1, 3)) expect_equal(as_label(arr), "<int[,1,3]>") expect_equal(expr_deparse(arr), "<int[,1,3]: 1L, 2L, 3L>") }) test_that("infix operators are labelled ( expect_equal( as_label(quote({ 1; 2} + 3)), "... + 3" ) expect_equal( as_label(quote(`+`(1, 2, 3))), "`+`(1, 2, 3)" ) expect_equal( as_label(quote( arg + arg + arg + arg + arg + arg + arg + arg + arg + arg + arg + arg )), "... + arg" ) expect_equal( as_label(quote(X[key1 == "val1" & key2 == "val2"]$key3 & foobarbaz(foobarbaz(foobarbaz(foobarbaz(foobarbaz(foobarbaz(foobarbaz())))))))), "X[key1 == \"val1\" & key2 == \"val2\"]$key3 & ..." ) expect_equal( as_label(quote(X[key1 == "val1"]$key3 & foobarbaz(foobarbaz()))), "X[key1 == \"val1\"]$key3 & foobarbaz(foobarbaz())" ) expect_equal( as_label(quote(nchar(chr, type = "bytes", allowNA = TRUE) == 1)), "nchar(chr, type = \"bytes\", allowNA = TRUE) == 1" ) expect_equal( as_label(quote(very_long_expression[with(subsetting), -1] - another_very_long_expression[with(subsetting), -1] )), "very_long_expression[with(subsetting), -1] - ..." ) lhs_perfect_fit <- sym(paste(rep("a", 56), collapse = "")) lhs_no_fit <- sym(paste(rep("a", 57), collapse = "")) expect_equal( as_label(expr(!!lhs_perfect_fit + 1)), "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa + 1" ) expect_equal( as_label(expr(!!lhs_perfect_fit + 10)), "... + 10" ) expect_equal( as_label(expr(1 + !!lhs_perfect_fit)), "1 + aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" ) expect_equal( as_label(expr(10 + !!lhs_perfect_fit)), "10 + ..." ) expect_equal( as_label(expr(!!lhs_no_fit + 1)), "... + 1" ) expect_equal( as_label(expr(!!lhs_no_fit + !!lhs_no_fit)), "... + ..." ) }) test_that("binary op without arguments", { expect_equal( expr_deparse(quote(`+`())), "`+`()" ) expect_equal( expr_deparse(quote(`$`())), "`$`()" ) expect_equal( expr_deparse(quote(`~`())), "`~`()" ) })
PE <- function (U, b) { if (missing(U)) stop("The membership degree matrix U must be given") if (is.null(U)) stop("The membership degree matrix U is empty") U=as.matrix(U) if (any(is.na(U))) stop("The membership degree matrix U must not contain NA values") if (!is.numeric(U)) stop("The membership degree matrix U is not numeric") if (missing(b)) { b=exp(1) } if (!is.numeric(b)) { b=exp(1) cat("The logarithmic base b is not numeric: the default value b=exp(1) will be used ",fill=TRUE) } if (b<=1) { b=exp(1) cat("The logarithmic base b must be >1: the default value b=exp(1) will be used ",fill=TRUE) } part.ent=partEntropy(U = U,b = b,n = nrow(U)) return(part.ent) }
NULL setMethod( f = "smooth_savitzky", signature = signature(object = "GammaSpectrum"), definition = function(object, m = 3, p = 2, ...) { x <- get_counts(object) z <- savitzky(x, m = m, p = p) methods::initialize(object, count = z) } ) setMethod( f = "smooth_savitzky", signature = signature(object = "GammaSpectra"), definition = function(object, m = 3, p = 2, ...) { spc <- lapply(X = object, FUN = smooth_savitzky, m = m, p = p) .GammaSpectra(spc) } ) savitzky <- function(x, m, p = 2) { m <- as.integer(m)[[1L]] if (m %% 2 == 0) stop(sQuote("m"), " must be an odd integer.", call. = FALSE) k <- (m - 1) / 2 i <- seq(from = -k, to = k, by = 1) j <- utils::head(utils::tail(seq_along(x), n = -k), n = -k) conv <- coef_savitzky(m, p) smoothed <- vapply( X = j, FUN = function(j, i, conv, data) { sum(conv * data[j + i]) }, FUN.VALUE = double(1), i = i, conv = conv, data = x ) x[j] <- smoothed x } coef_savitzky <- function(m, p = 2) { k <- (m - 1) / 2 z <- seq(from = -k, to = k, by = 1) J <- vapply(X = c(0, p), FUN = function(p, z) z^p, z, FUN.VALUE = double(m)) (solve(t(J) %*% J) %*% t(J))[1, , drop = TRUE] }
test_that("reverse_geocode works", { set_key("dummy_api_key") data(poi) nrows <- 3 (x <- sf::st_sf( id = 1:nrows, geometry = sf::st_sfc(lapply(1:nrows, function(x) sf::st_geometrycollection())) )) expect_error(reverse_geocode(poi = x), "'poi' has empty entries in the geometry column.") expect_error(reverse_geocode(poi = c(1, 2, 3)), "'poi' must be an sf object.") expect_error(reverse_geocode(poi = c("character", NA)), "'poi' must be an sf object.") expect_error(reverse_geocode(poi = poi, results = -100), "'results' must be in the valid range from 1 to 100.") expect_error(reverse_geocode(poi = poi, sf = NA), "'sf' must be a 'boolean' value.") expect_error(reverse_geocode(poi = poi, results = "-100"), "'results' must be of type 'numeric'.") with_mock( "hereR:::.async_request" = function(url, rps) { hereR:::mock$reverse_geocode_response }, reverse <- reverse_geocode(poi = poi, results = 3, sf = TRUE), expect_s3_class(reverse, c("sf", "data.frame"), exact = TRUE), expect_true(all(sf::st_geometry_type(reverse) == "POINT")) ) with_mock( "hereR:::.async_request" = function(url, rps) { hereR:::mock$reverse_geocode }, reverse <- reverse_geocode(poi = poi, results = 3, sf = FALSE), expect_s3_class(reverse, "data.frame", exact = TRUE), expect_type(reverse[["lat_position"]], "double"), expect_type(reverse[["lng_position"]], "double"), expect_type(reverse[["lat_access"]], "double"), expect_type(reverse[["lng_access"]], "double") ) })
`kendall.post` <- function(Y, group, nperm=999, mult="holm") { mult <- match.arg(mult, c("sidak", p.adjust.methods)) Y <- as.matrix(Y) n <- nrow(Y) p <- ncol(Y) if(p < 2) stop("there is only one variable in the data matrix") R <- apply(Y,2,rank) if(missing(group)) group <- rep(1,p) if(length(group) != p){ stop("the number of species in the vector differs from the total number of species") } group <- as.factor(group) gr.lev <- levels(group) ngr <- nlevels(group) gr <- as.list(1:ngr) n.per.gr <- vector(length=ngr) for(i in 1:ngr){ gr[[i]] <- which(group==gr.lev[i]) n.per.gr[i] <- length(gr[[i]]) } counter <- as.list(1:ngr) for(i in 1:ngr){ counter[[i]] <- vector(length = n.per.gr[i]) } W.gr <- vector("list",ngr) if(ngr > 1) spear.gr <- vector("list",ngr) for(i in 1:ngr){ p.i <- n.per.gr[i] if(p.i < 2) stop(gettextf("there is only one variable in group %d", gr.lev[i])) R.gr <- R[,gr[[i]]] spear.mat <- cor(R.gr) diag(spear.mat) <- NA spear.mean <- apply(spear.mat, 1, mean, na.rm=TRUE) W.var <- ((p.i-1)*spear.mean+1)/p.i for(j in 1:p.i){ R.gr.mod <- R.gr[,-j] counter[[i]][j] <- 1 for(k in 1:nperm){ var.perm <- sample(R.gr[,j]) spear.mat.perm <- cor(cbind(R.gr.mod, var.perm)) diag(spear.mat.perm) <- NA spear.mean.j.perm <- mean(spear.mat.perm[p.i,], na.rm=TRUE) W.perm <- ((p.i-1)*spear.mean.j.perm+1)/p.i if(W.perm >= W.var[j]) counter[[i]][j] <- counter[[i]][j]+1 } } W.gr[[i]] <- W.var if(ngr > 1) spear.gr[[i]] <- spear.mean } for(i in 1:ngr) { counter[[i]] <- counter[[i]]/(nperm+1) } vec <- counter[[1]] if(ngr > 1) { for(i in 2:ngr) vec = c(vec, counter[[i]]) } if(length(vec) != p) stop("error in putting together vector 'vec'") if(mult == "sidak") { vec.corr = NA for(i in 1:p) vec.corr = c(vec.corr, (1-(1-vec[i])^p)) vec.corr <- vec.corr[-1] } else { vec.corr <- p.adjust(vec, method=mult) } if(ngr > 1) { vec.gr <- vector("list",ngr) end <- 0 for(i in 1:ngr){ beg <- end+1 end <- end + n.per.gr[i] vec.gr[[i]] <- vec.corr[beg:end] } } if(ngr == 1) { table <- rbind(spear.mean, W.var, counter[[1]], vec.corr) rownames(table) <- c("Spearman.mean", "W.per.species", "Prob", "Corrected prob") } else { table <- as.list(1:ngr) for(i in 1:ngr) { table[[i]] <- rbind(spear.gr[[i]], W.gr[[i]], counter[[i]], vec.gr[[i]]) rownames(table[[i]]) <- c("Spearman.mean", "W.per.species", "Prob", "Corrected prob") colnames(table[[i]]) <- colnames(table[[i]], do.NULL = FALSE, prefix = "Spec") } } if(ngr == 1) { out <- list(A_posteriori_tests=table, Correction.type=mult) } else { out <- list(A_posteriori_tests_Group=table, Correction.type=mult) } class(out) <- "kendall.post" out }
BootstrapESConfInterval <- function(Ra, number.resamples, cl){ if (nargs() < 3){ stop("Too few arguments") } if (nargs() > 3){ stop("Too many arguments") } profit.loss.data <- as.vector(Ra) unsorted.loss.data <- -profit.loss.data losses.data <- sort(unsorted.loss.data) n <- length(losses.data) if (is.vector(cl) & (length(cl) != 1) ) { stop("Confidence level must be a scalar") } if (length(number.resamples) != 1) { stop("Number of resamples must be a scalar") } if (cl >= 1){ stop("Confidence level must be less that 1") } if (cl <= 0){ stop("Confidence level must be at least 0") } if (number.resamples <= 0){ stop("Number of resamples must be at least 0") } es <- bootstrap(losses.data, number.resamples, HSES, cl)$thetastar y <- quantile(es, c(.05, .95)) return(y) }
library(hamcrest) test.colNamesPreserved <- function() { m <- matrix(1:10/5, ncol=2) colnames(m) <- c("a", "b") mf <- format(m, digits=2) assertThat(colnames(mf), identicalTo(c("a", "b"))) }
win2F<-function(m1.1,m2.1,m3.1=NA,m4.1=NA,m1.2,m2.2,m3.2=NA,m4.2=NA, s1.1=NA,s2.1=NA,s3.1=NA,s4.1=NA,s1.2=NA,s2.2=NA,s3.2=NA,s4.2=NA, r12=NULL, r13=NULL, r14=NULL, r15=NULL, r16=NULL, r17=NULL, r18=NULL, r23=NULL, r24=NULL, r25=NULL, r26=NULL, r27=NULL, r28=NULL, r34=NULL, r35=NULL, r36=NULL, r37=NULL, r38=NULL, r45=NULL, r46=NULL, r47=NULL, r48=NULL, r56=NULL, r57=NULL, r58=NULL, r67=NULL, r68=NULL, r78=NULL, r=NULL, s = NULL, n, alpha=.05) { V1<-V2<-V3<-V4<-V5<-V6<-V7<-V8<-dv<-id<-iv1<-iv2<-NULL levels<-NA levels[is.na(m4.1) & is.na(m3.1)]<-2 levels[is.na(m4.1) & !is.na(m3.1)]<-3 levels[!is.na(m4.1)]<-4 oldoption<-options(contrasts=c("contr.helmert", "contr.poly")) oldoption on.exit(options(oldoption)) if (levels=="2"){ if (!is.null(s)){ s1.1<-s; s2.1<-s;s1.2<-s;s2.2<-s var1<-s^2; var2<-s^2;var3<-s^2;var4<-s^2} if (is.null(s)){var1<-s1.1^2; var2<-s2.1^2;var3<-s1.2^2;var4<-s2.2^2} if (!is.null(r)){r12<-r;r13<-r;r14<-r; r23<-r;r24<-r; r34<-r} cov12<-r12*s1.1*s2.1;cov13<-r13*s1.1*s1.2;cov14<-r14*s1.1*s2.2; cov23<-r23*s2.1*s1.2;cov24<-r24*s2.1*s2.2; cov34<-r34*s2.1*s2.2; out <- MASS::mvrnorm(n, mu = c(m1.1,m2.1,m1.2,m2.2), Sigma = matrix(c(var1,cov12,cov13, cov14, cov12,var2,cov23, cov24, cov13, cov23,var3,cov34, cov14, cov24, cov34, var4), ncol = 4), empirical = TRUE) out<-as.data.frame(out) out<-dplyr::rename(out, y1 = V1, y2 = V2, y3 = V3, y4 = V4) out$id <- rep(1:nrow(out)) out$id<-as.factor(out$id) out<-tidyr::gather(out,key="time",value="dv",-id) out$time<-as.factor(out$time) out$time<-as.numeric(out$time) out$iv1<-NA out$iv1[out$time==1|out$time==3]<-1 out$iv1[out$time==2|out$time==4]<-2 out$iv2<-NA out$iv2[out$time==1|out$time==2]<-1 out$iv2[out$time==3|out$time==4]<-2 out$iv1<-as.ordered(out$iv1) out$iv2<-as.ordered(out$iv2) model<-ez::ezANOVA(data=out, dv=dv, wid=id, within = iv1+iv2, type=3, detailed=TRUE) dfA<-model$ANOVA$DFn[2] dfB<-model$ANOVA$DFn[3] dfAB<-model$ANOVA$DFn[4] dfWA<-model$ANOVA$DFd[2] dfWB<-model$ANOVA$DFd[3] dfWAB<-model$ANOVA$DFd[4] SSA<-model$ANOVA$SSn[2] SSB<-model$ANOVA$SSn[3] SSAB<-model$ANOVA$SSn[4] SSWA<-model$ANOVA$SSd[2] SSWB<-model$ANOVA$SSd[3] SSWAB<-model$ANOVA$SSd[4] eta2A<-SSA/(SSA+SSWA) eta2B<-SSB/(SSB+SSWB) eta2AB<-SSAB/(SSAB+SSWAB) f2A<-eta2A/(1-eta2A) f2B<-eta2B/(1-eta2B) f2AB<-eta2AB/(1-eta2AB) lambdaA<-f2A*dfWA lambdaB<-f2B*dfWB lambdaAB<-f2AB*dfWAB minusalpha<-1-alpha FtA<-stats::qf(minusalpha, dfA, dfWA) FtB<-stats::qf(minusalpha, dfB, dfWB) FtAB<-stats::qf(minusalpha, dfAB, dfWAB) powerA<-round(1-stats::pf(FtA, dfA,dfWA,lambdaA),3) powerB<-round(1-stats::pf(FtB, dfB,dfWB,lambdaB),3) powerAB<-round(1-stats::pf(FtAB, dfAB,dfWAB,lambdaAB),3) eta2A<-round((eta2A),3) eta2B<-round((eta2B),3) eta2AB<-round((eta2AB),3) message("Partial eta-squared Factor A = ", eta2A) message("Power Factor A (Unadjusted) for n = ",n," is ", powerA) message("Partial eta-squared Factor B = ", eta2B) message("Power Factor B (Unadjusted) for n = ",n," is ", powerB) message("Partial eta-squared AxB = ", eta2AB) message("Power AxB (Unadjusted) for n = ",n," is ", powerAB) message("Both Factors Have 2 levels - There is no adjustment when levels = 2") result <- data.frame(matrix(ncol = 7)) colnames(result) <- c("n", "eta2 A","Power A", "eta2 B", "Power B", "eta2 AxB","Power AxB") result[, 1]<-n result[, 2]<-eta2A result[, 3]<-powerA result[, 4]<-eta2B result[, 5]<-powerB result[, 6]<-eta2AB result[, 7]<-powerAB output<-na.omit(result) rownames(output)<- c() } if (levels=="3"){ if (!is.null(s)){ s1.1<-s; s2.1<-s;s3.1<-s;s1.2<-s;s2.2<-s;s3.2<-s var1<-s^2; var2<-s^2;var3<-s^2;var4<-s^2;var5<-s^2;var6<-s^2} if (is.null(s)){var1<-s1.1^2; var2<-s2.1^2;var3<-s3.1^2;var4<-s1.2^2;var5<-s2.2^2; var6<-s3.2^2} if (!is.null(r)){r12<-r;r13<-r;r14<-r;r15<-r;r16<-r; r23<-r;r24<-r;r25<-r;r26<-r; r34<-r;r35<-r;r36<-r; r45<-r;r46<-r; r56<-r} cov12<-r12*s1.1*s2.1;cov13<-r13*s1.1*s3.1;cov14<-r14*s1.1*s1.2;cov15<-r15*s1.1*s2.2;cov16<-r16*s1.1*s3.2; cov23<-r23*s2.1*s3.1;cov24<-r24*s2.1*s1.2;cov25<-r25*s2.1*s2.2;cov26<-r26*s2.1*s3.2; cov34<-r34*s3.1*s1.2;cov35<-r35*s3.1*s2.2;cov36<-r36*s3.1*s3.2; cov45<-r45*s1.2*s2.2;cov46<-r46*s1.2*s3.2; cov56<-r56*s2.2*s3.2 out <- MASS::mvrnorm(n, mu = c(m1.1,m2.1,m3.1,m1.2,m2.2,m3.2), Sigma = matrix(c(var1,cov12,cov13, cov14, cov15, cov16, cov12,var2,cov23, cov24, cov25, cov26, cov13, cov23,var3,cov34, cov35, cov36, cov14, cov24, cov34, var4, cov45, cov46, cov15, cov25, cov35, cov45, var5, cov56, cov16, cov26, cov36, cov46, cov56, var6), ncol = 6), empirical = TRUE) out<-as.data.frame(out) out<-dplyr::rename(out, y1 = V1, y2 = V2, y3 = V3, y4 = V4, y5 = V5, y6 = V6) out$id <- rep(1:nrow(out)) out$id<-as.factor(out$id) out<-tidyr::gather(out,key="time",value="dv",-id) out$time<-as.factor(out$time) out$time<-as.numeric(out$time) out$iv1<-NA out$iv1[out$time==1|out$time==4]<-1 out$iv1[out$time==2|out$time==5]<-2 out$iv1[out$time==3|out$time==6]<-3 out$iv2<-NA out$iv2[out$time==1|out$time==2|out$time==3]<-1 out$iv2[out$time==4|out$time==5|out$time==6]<-2 out$iv1<-as.ordered(out$iv1) out$iv2<-as.ordered(out$iv2) model<-ez::ezANOVA(data=out, dv=dv, wid=id, within = iv1+iv2, type=3, detailed=TRUE) dfA<-model$ANOVA$DFn[2] dfB<-model$ANOVA$DFn[3] dfAB<-model$ANOVA$DFn[4] dfWA<-model$ANOVA$DFd[2] dfWB<-model$ANOVA$DFd[3] dfWAB<-model$ANOVA$DFd[4] SSA<-model$ANOVA$SSn[2] SSB<-model$ANOVA$SSn[3] SSAB<-model$ANOVA$SSn[4] SSWA<-model$ANOVA$SSd[2] SSWB<-model$ANOVA$SSd[3] SSWAB<-model$ANOVA$SSd[4] eta2A<-SSA/(SSA+SSWA) eta2B<-SSB/(SSB+SSWB) eta2AB<-SSAB/(SSAB+SSWAB) f2A<-eta2A/(1-eta2A) f2B<-eta2B/(1-eta2B) f2AB<-eta2AB/(1-eta2AB) lambdaA<-f2A*dfWA lambdaB<-f2B*dfWB lambdaAB<-f2AB*dfWAB minusalpha<-1-alpha FtA<-stats::qf(minusalpha, dfA, dfWA) FtB<-stats::qf(minusalpha, dfB, dfWB) FtAB<-stats::qf(minusalpha, dfAB, dfWAB) powerA<-round(1-stats::pf(FtA, dfA,dfWA,lambdaA),3) powerB<-round(1-stats::pf(FtB, dfB,dfWB,lambdaB),3) powerAB<-round(1-stats::pf(FtAB, dfAB,dfWAB,lambdaAB),3) ggeA<-round(model$`Sphericity Corrections`$GGe[1],3) ggeAB<-round(model$`Sphericity Corrections`$GGe[2],3) hfeA<-round(model$`Sphericity Corrections`$HFe[1],3) hfeAB<-round(model$`Sphericity Corrections`$HFe[2],3) hfeA[hfeA>1]<-1 hfeAB[hfeAB>1]<-1 ggdfA<-ggeA*dfA ggdfAB<-ggeAB*dfAB ggdfWA<-ggeA*dfWA ggdfWAB<-ggeAB*dfWAB hfdfA<-hfeA*dfA hfdfAB<-hfeAB*dfAB hfdfWA<-hfeA*dfWA hfdfWAB<-hfeAB*dfWAB lambdaggA<-f2A*ggdfWA lambdaggAB<-f2AB*ggdfWAB lambdahfA<-f2A*hfdfWA lambdahfAB<-f2AB*hfdfWAB FtggA<-stats::qf(minusalpha, ggdfA, ggdfWA) FtggAB<-stats::qf(minusalpha, ggdfAB, ggdfWAB) FthfA<-stats::qf(minusalpha, hfdfA, hfdfWA) FthfAB<-stats::qf(minusalpha, hfdfAB, hfdfWAB) powerggA<-round(1-stats::pf(FtggA, ggdfA,ggdfWA,lambdaggA),3) powerggAB<-round(1-stats::pf(FtggAB, ggdfAB,ggdfWAB,lambdaggAB),3) powerhfA<-round(1-stats::pf(FthfA, hfdfA,hfdfWA,lambdahfA),3) powerhfAB<-round(1-stats::pf(FthfAB, hfdfAB,hfdfWAB,lambdahfAB),3) eta2A<-round((eta2A),3) eta2B<-round((eta2B),3) eta2AB<-round((eta2AB),3) message("Partial eta-squared Factor A = ", eta2A) message("Power Factor A (Unadjusted) for n = ",n," is ", powerA) message("Power Factor A H-F Adjusted (Epsilon = ",hfeA ,") for n = ",n, " is ", powerhfA) message("Power Factor A G-G Adjusted (Epsilon = ", ggeA,") for n = ",n, " is ", powerggA) message("Partial eta-squared Factor B = ", eta2B) message("Power Factor B (Unadjusted) for n = ",n," is ", powerB) message("Power Factor B Adjusted - There is no adjustment when levels = 2") message("Partial eta-squared AxB = ", eta2AB) message("Power AxB (Unadjusted) for n = ",n," is ", powerAB) message("Power AxB H-F Adjusted (Epsilon = ",hfeAB ,") for n = ",n, " is ", powerhfAB) message("Power AxB G-G Adjusted (Epsilon = ", ggeAB,") for n = ",n, " is ", powerggAB) result <- data.frame(matrix(ncol = 15)) colnames(result) <- c("n", "eta2 A","Power A (Unadujsted)", "HF epsilon A", "Power A (HF)","GG Epsilon A","Power A (GG)","eta2 B", "Power B", "eta2 AxB","Power AxB(Unadjusted)","HF epsilon AxB", "Power AxB(HF)","GG Epsilon AB","Power AxB(GG)") result[, 1]<-n result[, 2]<-eta2A result[, 3]<-powerA result[, 4]<-hfeA result[, 5]<-powerhfA result[, 6]<-ggeA result[, 7]<-powerggA result[, 8]<-eta2B result[, 9]<-powerB result[, 10]<-eta2AB result[, 11]<-powerAB result[, 12]<-hfeAB result[, 13]<-powerhfAB result[, 14]<-ggeAB result[, 15]<-powerggAB output<-na.omit(result) rownames(output)<- c() } if (levels=="4"){ if (!is.null(s)){ s1.1<-s; s2.1<-s;s3.1<-s;s4.1<-s;s1.2<-s;s2.2<-s;s3.2<-s;s4.2<-s var1<-s^2; var2<-s^2;var3<-s^2;var4<-s^2;var5<-s^2;var6<-s^2;var7<-s^2;var8<-s^2} if (is.null(s)){var1<-s1.1^2; var2<-s2.1^2;var3<-s3.1^2;var4<-s4.1^2;var5<-s1.2^2;var6<-s2.2^2;var7<-s3.2^2;var8<-s4.2^2} if (!is.null(r)){r12<-r;r13<-r;r14<-r;r15<-r;r16<-r;r17<-r;r18<-r;r23<-r;r24<-r;r25<-r;r26<-r;r27<-r;r28<-r r34<-r;r35<-r;r36<-r;r37<-r;r38<-r;r45<-r;r46<-r;r47<-r;r48<-r;r56<-r;r57<-r;r58<-r r67<-r;r68<-r;r78<-r} cov12<-r12*s1.1*s2.1;cov13<-r13*s1.1*s3.1;cov14<-r14*s1.1*s4.1;cov15<-r15*s1.1*s1.2;cov16<-r16*s1.1*s2.2;cov17<-r17*s1.1*s3.2;cov18<-r18*s1.1*s4.2 cov23<-r23*s2.1*s3.1;cov24<-r24*s2.1*s4.1;cov25<-r25*s2.1*s1.2;cov26<-r26*s2.1*s2.2;cov27<-r27*s2.1*s3.2;cov28<-r28*s2.1*s4.2 cov34<-r34*s3.1*s4.1;cov35<-r35*s3.1*s1.2;cov36<-r36*s3.1*s2.2;cov37<-r37*s3.1*s3.2;cov38<-r38*s3.1*s4.2 cov45<-r45*s4.1*s1.2;cov46<-r46*s4.1*s2.2;cov47<-r47*s4.1*s3.2;cov48<-r48*s4.1*s4.2 cov56<-r56*s1.2*s2.2;cov57<-r57*s1.2*s3.2;cov58<-r58*s1.2*s4.2 cov67<-r67*s2.2*s3.2;cov68<-r68*s2.2*s4.2 cov78<-r78*s3.2*s4.2 out <- MASS::mvrnorm(n, mu = c(m1.1,m2.1,m3.1,m4.1,m1.2,m2.2,m3.2,m4.2), Sigma = matrix(c(var1,cov12,cov13, cov14, cov15, cov16, cov17, cov18, cov12,var2,cov23, cov24, cov25, cov26, cov27, cov28, cov13, cov23,var3,cov34, cov35, cov36, cov37, cov38, cov14, cov24, cov34, var4, cov45, cov46, cov47, cov48, cov15, cov25, cov35, cov45, var5, cov56, cov57, cov58, cov16, cov26, cov36, cov46, cov56, var6, cov67, cov68, cov17, cov27, cov37, cov47, cov57, cov67, var7, cov78, cov18, cov28, cov38, cov48, cov58, cov68, cov78, var8), ncol = 8), empirical = TRUE) out<-as.data.frame(out) out<-dplyr::rename(out, y1 = V1, y2 = V2, y3 = V3, y4 = V4, y5 = V5, y6 = V6, y7 = V7, y8 = V8) out$id <- rep(1:nrow(out)) out$id<-as.factor(out$id) out<-tidyr::gather(out,key="time",value="dv",-id) out$time<-as.factor(out$time) out$time<-as.numeric(out$time) out$iv1<-NA out$iv1[out$time==1|out$time==5]<-1 out$iv1[out$time==2|out$time==6]<-2 out$iv1[out$time==3|out$time==7]<-3 out$iv1[out$time==4|out$time==8]<-4 out$iv2<-NA out$iv2[out$time==1|out$time==2|out$time==3|out$time==4]<-1 out$iv2[out$time==5|out$time==6|out$time==7|out$time==8]<-2 out$iv1<-as.ordered(out$iv1) out$iv2<-as.ordered(out$iv2) model<-ez::ezANOVA(data=out, dv=dv, wid=id, within = iv1+iv2, type=3, detailed=TRUE) dfA<-model$ANOVA$DFn[2] dfB<-model$ANOVA$DFn[3] dfAB<-model$ANOVA$DFn[4] dfWA<-model$ANOVA$DFd[2] dfWB<-model$ANOVA$DFd[3] dfWAB<-model$ANOVA$DFd[4] SSA<-model$ANOVA$SSn[2] SSB<-model$ANOVA$SSn[3] SSAB<-model$ANOVA$SSn[4] SSWA<-model$ANOVA$SSd[2] SSWB<-model$ANOVA$SSd[3] SSWAB<-model$ANOVA$SSd[4] eta2A<-SSA/(SSA+SSWA) eta2B<-SSB/(SSB+SSWB) eta2AB<-SSAB/(SSAB+SSWAB) f2A<-eta2A/(1-eta2A) f2B<-eta2B/(1-eta2B) f2AB<-eta2AB/(1-eta2AB) lambdaA<-f2A*dfWA lambdaB<-f2B*dfWB lambdaAB<-f2AB*dfWAB minusalpha<-1-alpha FtA<-stats::qf(minusalpha, dfA, dfWA) FtB<-stats::qf(minusalpha, dfB, dfWB) FtAB<-stats::qf(minusalpha, dfAB, dfWAB) powerA<-round(1-stats::pf(FtA, dfA,dfWA,lambdaA),3) powerB<-round(1-stats::pf(FtB, dfB,dfWB,lambdaB),3) powerAB<-round(1-stats::pf(FtAB, dfAB,dfWAB,lambdaAB),3) ggeA<-round(model$`Sphericity Corrections`$GGe[1],3) ggeAB<-round(model$`Sphericity Corrections`$GGe[2],3) hfeA<-round(model$`Sphericity Corrections`$HFe[1],3) hfeAB<-round(model$`Sphericity Corrections`$HFe[2],3) hfeA[hfeA>1]<-1 hfeAB[hfeAB>1]<-1 ggdfA<-ggeA*dfA ggdfAB<-ggeAB*dfAB ggdfWA<-ggeA*dfWA ggdfWAB<-ggeAB*dfWAB hfdfA<-hfeA*dfA hfdfAB<-hfeAB*dfAB hfdfWA<-hfeA*dfWA hfdfWAB<-hfeAB*dfWAB lambdaggA<-f2A*ggdfWA lambdaggAB<-f2AB*ggdfWAB lambdahfA<-f2A*hfdfWA lambdahfAB<-f2AB*hfdfWAB FtggA<-stats::qf(minusalpha, ggdfA, ggdfWA) FtggAB<-stats::qf(minusalpha, ggdfAB, ggdfWAB) FthfA<-stats::qf(minusalpha, hfdfA, hfdfWA) FthfAB<-stats::qf(minusalpha, hfdfAB, hfdfWAB) powerggA<-round(1-stats::pf(FtggA, ggdfA,ggdfWA,lambdaggA),3) powerggAB<-round(1-stats::pf(FtggAB, ggdfAB,ggdfWAB,lambdaggAB),3) powerhfA<-round(1-stats::pf(FthfA, hfdfA,hfdfWA,lambdahfA),3) powerhfAB<-round(1-stats::pf(FthfAB, hfdfAB,hfdfWAB,lambdahfAB),3) eta2A<-round((eta2A),3) eta2B<-round((eta2B),3) eta2AB<-round((eta2AB),3) message("Partial eta-squared Factor A = ", eta2A) message("Power Factor A (Unadjusted) for n = ",n," is ", powerA) message("Power Factor A H-F Adjusted (Epsilon = ",hfeA ,") for n = ",n, " is ", powerhfA) message("Power Factor A G-G Adjusted (Epsilon = ", ggeA,") for n = ",n, " is ", powerggA) message("Partial eta-squared Factor B = ", eta2B) message("Power Factor B (Unadjusted) for n = ",n," is ", powerB) message("Power Factor B Adjusted - There is no adjustment when levels = 2") message("Partial eta-squared AxB = ", eta2AB) message("Power AxB (Unadjusted) for n = ",n," is ", powerAB) message("Power AxB H-F Adjusted (Epsilon = ",hfeAB ,") for n = ",n, " is ", powerhfAB) message("Power AxB G-G Adjusted (Epsilon = ", ggeAB,") for n = ",n, " is ", powerggAB) result <- data.frame(matrix(ncol = 15)) colnames(result) <- c("n", "eta2 A","Power A (Unadujsted)", "HF epsilon A", "Power A (HF)","GG Epsilon A","Power A (GG)","eta2 B", "Power B", "eta2 AxB","Power AxB(Unadjusted)","HF epsilon AxB", "Power AxB(HF)","GG Epsilon AB","Power AxB(GG)") result[, 1]<-n result[, 2]<-eta2A result[, 3]<-powerA result[, 4]<-hfeA result[, 5]<-powerhfA result[, 6]<-ggeA result[, 7]<-powerggA result[, 8]<-eta2B result[, 9]<-powerB result[, 10]<-eta2AB result[, 11]<-powerAB result[, 12]<-hfeAB result[, 13]<-powerhfAB result[, 14]<-ggeAB result[, 15]<-powerggAB output<-na.omit(result) rownames(output)<- c() } invisible(output)}
library(survival) attach(graft.vs.host) plot(survfit(Surv(time,dead)~gvhd)) survdiff(Surv(time,dead)~gvhd) summary(coxph(Surv(time,dead) ~ gvhd)) summary(coxph(Surv(time,dead) ~ gvhd + log(index) + donage + rcpage + preg)) attach(melanom) cox1 <- coxph(Surv(days, status==1) ~ log(thick) + sex + strata(ulc)) new <- data.frame(sex=2, thick=c(0.1, 0.2, 0.5)) svfit <- survfit(cox1,newdata=new) plot(svfit[2], ylim=c(.985, 1)) summary(coxph(Surv(obsmonths, dead)~age+sex, data=stroke)) summary(coxph(Surv(obsmonths, dead)~sex, data=stroke)) with(stroke, tapply(age,sex,mean)) stroke.trim <- function(t1, t2) subset(transform(stroke, entry=t1, exit=pmin(t2, obsmonths), dead=dead & obsmonths <= t2), entry < exit) stroke2 <- do.call(rbind, mapply(stroke.trim, c(0,0.5,2,12), c(0.5,2,12,Inf), SIMPLIFY=F)) summary(coxph(Surv(entry, exit, dead)~age+sex, data=stroke2)) rm(list=ls()) while(search()[2] != "package:ISwR") detach()
varMase <- function(y, pi, pi2 = NULL, method = "LinHB", N = NULL){ n <- length(y) if(method=="LinHT" & is.null(pi2)){ message("For LinHT variance estimator, need to provide second order inclusion probabilities matrix.") return(NULL) } if(method == "LinHB"){ a <- n/(n-1)*(1-pi) e <- as.vector(pi^(-1)*y) - c(sum(a)^(-1)*(pi^(-1)*a)%*%y) varEst <- sum(a*e^2) } if(method == "LinHH"){ t <- pi^(-1)%*%y varEst <- 1/(n*(n-1))*t(as.vector(n*y*pi^(-1)) - as.numeric(t))%*%(as.vector(n*y*pi^(-1)) - as.numeric(t)) } if(method == "LinHTSRS"){ if(is.null(N)){ N <- sum(pi^(-1)) } varEst <- (N-n)*(N/n)*var(y) } if(method== "LinHT"){ a <- (pi2 - pi%*%t(pi))*pi2^(-1)*(pi%*%t(pi))^(-1)*y%*%t(y) varEst <- sum(a) } return(varEst) }
print.summary.smoothagesccs <- function(x, digits = max(getOption('digits')-3, 3), signif.stars = getOption("show.signif.stars"), ...) { if (!is.null(x$call)) { cat("Call:\n") dput(x$call) cat("\n") } if (!is.null(x$fail)) { cat(" Coxreg failed.", x$fail, "\n") return() } savedig <- options(digits = digits) on.exit(options(savedig)) omit <- x$na.action if (!is.null(x$nevent)) cat(", number of events=", x$nevent, "\n") else cat("\n") if (length(omit)) cat(" (", naprint(omit), ")\n", sep="") if (nrow(x$coef)==0) { cat (" Null model\n") return() } if(!is.null(x$coefficients)) { cat("\n") printCoefmat(x$coefficients, digits=digits, signif.stars=signif.stars, ...) } if(!is.null(x$conf.int)) { cat("\n") print(x$conf.int) } cat("\n") if (!is.null(x$concordance)) { cat("Concordance=", format(round(x$concordance[1],3)), " (se =", format(round(x$concordance[2], 3)),")\n") } cat("spline based age relative incidence function:", "\n", "Smoothing parameter =", x$smoothingpara, "\n", "Cross validation score =", x$cv, "\n") invisible() }
import_chron_excel <- function(path, ...) { data <- readxl::read_excel(path = path, ...) data } import_chron_csv <- function(path, delim, ...) { if (delim == ",") {data <- readr::read_csv(file = path, ...)} if (delim == ";") {data <- readr::read_csv2(file = path, ...)} data } import_chron_delim <- function(path, delim, ...) { if (delim %in% c("\t", " ")) { if (delim == "\t") {data <- readr::read_tsv(file = path, ...)} if (delim == " ") {data <- readr::read_table2(file = path, ...)} } else {data <- readr::read_delim(file = path, delim = delim, ...)} data }
svocc.step <- function (object, model, trace = 1, steps = 1000, criter = c("AIC", "BIC", "cAUC"), test = FALSE, k = 2, control, ...) { if (missing(control)) control <- object$control if (!is.null(control)) { old.optim.control <- getOption("detect.optim.control") old.optim.method <- getOption("detect.optim.method") old.dc.control <- getOption("detect.dc.control") old.mcmc.control <- getOption("detect.mcmc.control") options("detect.optim.control"=control$optim.control) options("detect.optim.method"=control$optim.method) options("detect.dc.control"=control$dc.control) options("detect.mcmc.control"=control$mcmc.control) on.exit(options("detect.optim.control"=old.optim.control), add=TRUE) on.exit(options("detect.optim.method"=old.optim.method), add=TRUE) on.exit(options("detect.dc.control"=old.dc.control), add=TRUE) on.exit(options("detect.mcmc.control"=old.mcmc.control), add=TRUE) } if (missing(model)) stop("'model' argument must be supplied") model <- match.arg(model, c("sta","det")) cut.string <- function(string) { if (length(string) > 1) string[-1] <- paste("\n", string[-1], sep = "") string } step.results <- function(models, fit, object) { change <- sapply(models, "[[", "change") rdf <- sapply(models, "[[", "df.resid") ddf <- c(NA, diff(rdf)) AIC <- sapply(models, "[[", criter) heading <- c("Single visit site-occupancy model\n Stepwise Model Path", "\nInitial Model:", deparse(as.vector(formula(object))), "\nFinal Model:", deparse(as.vector(formula(fit))), "\n") aod <- data.frame(Step = I(change), Df = ddf, "Resid. Df" = rdf, AIC = AIC, check.names = FALSE) if (criter != "AIC") colnames(aod)[colnames(aod) == "AIC"] <- criter attr(aod, "heading") <- heading fit$anova <- aod fit } criter <- match.arg(criter) Terms <- terms(object, model) object$formula[[model]] <- Terms fdrop <- numeric(0) fadd <- attr(Terms, "factors") forward <- FALSE models <- vector("list", steps) fit <- object n <- object$nobs bAIC <- extractAIC(fit, k = k, ...) edf <- bAIC[1] if (criter == "AIC") bAIC <- bAIC[2] if (criter == "BIC") bAIC <- BIC(fit) if (criter == "cAUC") bAIC <- 1 - AUC(fit) nm <- 1 Terms <- fit$terms[[model]] if (trace) cat("Start: ", criter, "=", format(round(bAIC, 2)), "\n", cut.string(deparse(as.vector(formula(fit)$full))), "\n\n", sep = "") models[[nm]] <- list(deviance = extractAIC(fit, k = k, ...), df.resid = n - edf, change = "", AIC = bAIC) if (criter != "AIC") names(models[[nm]])[names(models[[nm]]) == "AIC"] <- criter backward <- TRUE while (steps > 0) { steps <- steps - 1 AIC <- bAIC ffac <- attr(Terms, "factors") scope <- factor.scope(ffac, list(add = fadd, drop = fdrop)) aod <- NULL change <- NULL if (backward && length(scope$drop)) { aod <- drop1.svocc(fit, scope$drop, model, criter = criter, k = k, test = test, control=NULL, ...) if (inherits(aod, "try-error")) return(aod) rn <- row.names(aod) row.names(aod) <- c(rn[1], paste("-", rn[-1], sep = " ")) if (any(aod$Df == 0, na.rm = TRUE)) { zdf <- aod$Df == 0 & !is.na(aod$Df) change <- rev(rownames(aod)[zdf])[1] } } if (is.null(change)) { attr(aod, "heading") <- NULL nzdf <- if (!is.null(aod$Df)) aod$Df != 0 | is.na(aod$Df) aod <- aod[nzdf, ] if (is.null(aod) || ncol(aod) == 0) break nc <- match(criter, names(aod)) nc <- nc[!is.na(nc)][1] o <- order(aod[, nc]) if (trace) print(aod[o, ]) if (o[1] == 1) break change <- rownames(aod)[o[1]] } ttt <- if (model == "sta") paste(". ~ .", change, "| .") else paste(". ~ . | .", change) fit <- update.svisit(fit, as.formula(ttt), evaluate = FALSE) fit <- eval.parent(fit) if (length(fit$fitted) != n) stop("number of rows in use has changed: remove missing values?") Terms <- terms(fit, model) bAIC <- extractAIC(fit, k = k, ...) edf <- bAIC[1] if (criter == "AIC") bAIC <- bAIC[2] if (criter == "BIC") bAIC <- BIC(fit) if (criter == "cAUC") bAIC <- 1 - AUC(fit) if (trace) cat("\nStep: ", criter, "=", format(round(bAIC, 2)), "\n", cut.string(deparse(as.vector(formula(fit)$full))), "\n\n", sep = "") if (bAIC >= AIC + 1e-07) break nm <- nm + 1 models[[nm]] <- list(deviance = extractAIC(fit, k = k, ...), df.resid = n - edf, change = change, AIC = bAIC) if (criter != "AIC") names(models[[nm]])[names(models[[nm]]) == "AIC"] <- criter } step.results(models = models[seq(nm)], fit, object) }
test_that("model fitting", { skip_if_not(TEST_MODEL_FITTING) with_parallel({ model <- "EarthModel" expect_output(test_model_binary(model)) expect_output(test_model_factor(model)) expect_output(test_model_numeric(model)) expect_output(test_model_ordered(model)) expect_error(test_model_Surv(model)) }) })
.reAttDF <- function(df1, df0) { for (i in 1: length(names(attributes(df0)))) { if(!(names(attributes(df0))[i] %in% names(attributes(df1)))) { attr(df1, names(attributes(df0))[i]) <- attr(df0,names(attributes(df0))[i]) } } return(df1) }
lazy.toc <- function(type=c("contents", "figures", "tables"), add=FALSE, desc="", withPage=TRUE, sec_unit=c("chapter", "section", "subsection", "subsubsection", "part")){ reportFormat <- getOption("lazyReportFormat") if (!reportFormat %in% c("latex", "html", "markdown")) stop("option(\"lazyReportFormat\") must be either 'latex', 'html', 'markdown'") if (reportFormat == "latex"){ fncall <- paste("%%", paste(deparse(match.call()), collapse=" ")) type <- match.arg(type, c("contents", "figures", "tables")) sec_unit <- match.arg(sec_unit, c("chapter", "section", "subsection", "subsubsection", "part")) if (!add){ code <- switch(type, "contents" = "\\tableofcontents", "figures" = "\\listoffigures", "tables" = "\\listoftables") } else{ code <- switch(type, "contents" = "toc", "figures" = "lof", "tables" = "lot") if (withPage) code <- paste("\\addcontentsline{", code, "}{", sec_unit, "}{", desc, "}", sep="") else code <- paste("\\addtocontents{", code, "}{", desc, "}", sep="") } code <- paste(fncall, "\n", code, "\n\n") } if (reportFormat %in% c("html", "markdown")){ code <- "" warning("Tables of contents are not available in HTML or markdown reports. Nothing has been done") } return(code) }
upsetplot.csAnno <- function(x, order_by = "freq", vennpie=FALSE, vp = list(x=.6, y=.7, width=.8, height=.8)) { y <- x@detailGenomicAnnotation nn <- names(y) y <- as.matrix(y) res <- tibble::tibble(anno = lapply(1:nrow(y), function(i) nn[y[i,]])) g <- ggplot(res, aes_(x = ~anno)) + geom_bar() + xlab(NULL) + ylab(NULL) + theme_minimal() + ggupset::scale_x_upset(n_intersections = 20, order_by = order_by) if (!vennpie) return(g) f <- function() vennpie(x, cex = .9) p <- ggplotify::as.ggplot(f) + coord_fixed() ggplotify::as.ggplot(g) + ggimage::geom_subview(subview = p, x = vp$x, y = vp$y, width = vp$width, height = vp$height) }
coef.PTReg<-function(object,...){ beta_estimate=object$beta alpha_estimate=object$alpha intercept_estimate=object$intercept return(list(intercept=intercept_estimate,alpha=alpha_estimate,beta=beta_estimate)) }
test_WordCorr_df <- tibble::tribble( ~user_id, ~status_id, ~created_at, ~screen_name, ~text, ~hashtags, ~location, ~key, ~query, as.character(12344), as.character(098098), as.POSIXct("2021-04-07 01:15:33"), as.character("cool123"), "I am happy and joyful", as.character("dog"), as.character("Phoenix AZ"), as.character("dude123 2021-04-07 01:15:33"), as.character(" as.character(987234), as.character(90898), as.POSIXct("2021-04-07 01:16:43"), as.character("sweet123"), "I am sad and annoyed", as.character("dog"), as.character("Denver CO"), as.character("sweet123 2021-04-07 01:16:43"), as.character(" as.character(23443), as.character(5645), as.POSIXct("2021-04-08 01:17:41"), as.character("happy123"), "I am supremely happy and gratefully annoyed", as.character("cat"), as.character("Ouray CO"), as.character("happy123 2021-04-08 01:17:41"), as.character(" as.character(098787), as.character(8765), as.POSIXct("2021-04-09 06:17:45"), as.character("yota123"), "I am super duper happy and joyful", as.character("kittie"), as.character("Tucson AZ"), as.character("yota123 2021-04-09 06:17:45"), as.character(" ) incorrect_WordCorr_df <- tibble::tribble( ~item1, ~item2, ~c, "joyful", "happy", as.double(0.577), "happy", "joyful", as.double(0.577), "annoyed", "happy", as.double(-0.577), "happy", "annoyed", as.double(-0.577), "annoyed", "joyful", as.double(-1.000), "joyful", "annoyed", as.double(-1.000) ) test_WordCorr_Tidy_df <- saotd::tweet_tidy(DataFrame = test_WordCorr_df) test <- saotd::word_corr(DataFrameTidy = test_WordCorr_Tidy_df, number = 2) %>% dplyr::mutate(correlation = round(x = correlation, digits = 3)) p <- saotd::word_corr_network(WordCorr = test, Correlation = .1) testthat::test_that("The word_corr_network function is working as properly", { testthat::expect_error(object = saotd::word_corr_network(WordCorr = text), "The input for this function is a Correlation data frame.") testthat::expect_error(object = saotd::word_corr_network(WordCorr = test, Correlation = 0), "A correlation value between 0 and 1 must be selected.") testthat::expect_error(object = saotd::word_corr_network(WordCorr = test, Correlation = 1.1), "A correlation value between 0 and 1 must be selected.") }) testthat::test_that("The word_corr_network retunrs ggplot object", { testthat::expect_type(object = p, type = "list") })
skip_if_no_key <- function() { skip_if( condition = !fredr_has_key(), message = "FRED API key is not set." ) }
context("dendlist") test_that("dendlist works", { dend <- iris[, -5] %>% dist() %>% hclust() %>% as.dendrogram() dend2 <- iris[, -5] %>% dist() %>% hclust(method = "single") %>% as.dendrogram() expect_error(dendlist(1:4, 5, a = dend)) expect_equal(dendlist(dend) %>% class(), "dendlist") expect_equal(dendlist(dend) %>% length(), 1L) expect_equal(dendlist(dend, dend) %>% length(), 2L) expect_equal(dendlist(dend, dend, dendlist(dend)) %>% length(), 3L) expect_identical( dendlist(dend, dend2), dendlist(dend) %>% dendlist(dend2) ) expect_identical( dendlist(dend), dendlist(dendlist(dend)) ) expect_identical( dendlist(dend, dend), dendlist(dendlist(dend, dend)) ) expect_error( dendlist(unclass(dendlist(dend, dend))) ) expect_equal( suppressWarnings(length(dendlist(dendlist(list()), dend))), 1 ) expect_true(suppressWarnings(is.dendlist(dendlist()))) expect_true(is.dendlist(dendlist(dend))) }) test_that("all.equal.dendlist works", { dend <- iris[, -5] %>% dist() %>% hclust() %>% as.dendrogram() expect_true( all.equal(dendlist(dend, dend)) ) expect_true( all.equal(dendlist(dend, dend, dend)) ) expect_true( all.equal(dendlist(dend, dend), dendlist(dend, dend)) ) p_dend <- prune(dend, "1") expect_true( is.character( all.equal(dendlist(dend, p_dend)) ) ) })
beta2.calc <- function(x, y, n, j, k, e1, e2) { aa <- wmat2(x, y, n, j, k, e1, e2) W <- aa$w bb <- rvec2(x, y, n, j, k, e1, e2) R <- bb$r beta <- solve(W, R) list(B = beta) }
library(ggplot2) library(grid) library(pdp) library(png) library(randomForest) trn <- pdp::boston set.seed(101) rfo <- randomForest(cmedv ~ ., data = trn) rescale <- function(x, a, b) { ((x - min(x)) / (max(x) - min(x))) * (b - a) + a } pd <- partial(rfo, pred.var = c("lstat", "rm"), chull = FALSE, progress = "text", grid.resolution = 100) hex <- data.frame(x = 1.35 * 1 * c(-sqrt(3) / 2, 0, rep(sqrt(3) / 2, 2), 0, rep(-sqrt(3) / 2, 2)), y = 1.35 * 1 * c(0.5, 1, 0.5, -0.5, -1, -0.5, 0.5)) pd_hex <- pd pd_hex$lstat <- rescale(pd_hex$lstat, a = min(hex$x), b = max(hex$x)) pd_hex$rm <- rescale(pd_hex$rm, a = min(hex$y), b = max(hex$y)) pd_hex <- pd_hex[mgcv::in.out(as.matrix(hex), as.matrix(pd_hex[, 1L:2L])), ] make_pdp_sticker <- function(option, ...) { ggplot(pd_hex, aes(lstat, rm)) + geom_polygon(data = hex, aes(x, y), color = "black", fill = grey(0.25), size = 3) + geom_tile(data = pd_hex, aes(x = lstat, y = rm, z = yhat, fill = yhat)) + viridis::scale_fill_viridis(option = option, ...) + geom_polygon(data = hex, aes(x, y), color = "black", fill = "transparent", size = 3) + geom_contour(aes(z = yhat), color = "black") + annotate(geom = "text", x = 0, y = -0.15, color = "white", size = 18, label = "pdp") + coord_equal(xlim = range(hex$x), ylim = range(hex$y)) + scale_x_continuous(expand = c(0.04, 0)) + scale_y_reverse(expand = c(0.04, 0)) + theme(axis.line = element_blank(), axis.text.x = element_blank(), axis.text.y = element_blank(), axis.ticks = element_blank(), axis.title.x = element_blank(), axis.title.y = element_blank(), legend.position = "none", plot.background = element_blank(), panel.background = element_blank(), panel.border = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank()) } logos <- lapply(LETTERS[1L:5L], FUN = make_pdp_sticker, begin = 0.2) png("man/figures/pdp-logos.png", width = 900, height = 500, bg = "transparent", type = "cairo-png") grid.arrange(grobs = logos, ncol = 3) dev.off() pdp_logo <- make_pdp_sticker(option = "C", begin = 0.2) print(pdp_logo) png("man/figures/pdp-logo.png", width = 181, height = 209, bg = "transparent", type = "cairo-png") print(pdp_logo) dev.off() svg("man/figures/pdp-logo.svg", width = 181 / 72, height = 209 / 72, bg = "transparent") print(pdp_logo) dev.off()
compute_contourLength <- function(gpRealizations, threshold, nRealizations,verb=1){ imageSize <- sqrt(ncol(gpRealizations)) ll<-rep(0,nRealizations) for(j in seq(nRealizations)){ indxSP<-contourLines(matrix(gpRealizations[j,],ncol=imageSize),levels = c(threshold)) nConnexComponent<-length(indxSP) ll[j]<-0 for(i in seq(nConnexComponent)) ll[j]<-ll[j]+poly_length(x = indxSP[[i]]$x,y = indxSP[[i]]$y) if(verb>0 && j%%100==0) cat("Computed length for realizations ",j," of ",nRealizations,"\n") } return(ll) }
print.turbo <- function(x, ...) { if(all(!is.na(x$errors))) { cat("All algorithms failed: \n") cat(error(x)) } else { tab <- with(x, data.frame(method, value.objfn, itr, fpeval, objfeval, convergence, elapsed.time=runtime[,"elapsed"], row.names=seq_along(x$method))) tab <- tab[!x$fail,] print(tab, ...) if(any(!is.na(x$errors))) cat("\nAcceleration scheme", paste(paste(seq_along(x$method)[x$fail], " (", x$method[x$fail], ")", sep=""), collapse=", "), "failed\n") } } pars <- function(x, ...) { UseMethod("pars") } pars.turbo <- function(x, ...) { if(length(x$method)==1) { return(as.vector(x$par)) } else { ret <- x$pars rownames(ret) <- x$method colnames(ret) <- paste("p", 1:ncol(ret), sep="") return(ret[!x$fail,]) if(any(!is.na(x$errors))) cat("\nAcceleration scheme", paste(seq_along(x$method)[x$fail], collapse=", "), "failed\n") } } error <- function(x, ...) { UseMethod("error") } error.turbo <- function(x, ...) { nmethod <- length(x$method) which.error <- which(!is.na(x$errors)) if(length(which.error) == 0) { print("There were no errors") } else { for(i in which.error) { cat(paste("method ", i, " (", x$method[i], "):", sep=""), x$errors[i]) } } } plot.turbo <- function(x, which.methods = seq_along(x$method), method.names = x$method[which.methods], xlim, ylim, ...) { if(!x$control.run$keep.objfval) stop("plot methods only defined when control.run$keep.objfval=TRUE") trace <- x$trace.objfval[which.methods] select <- which((!x$fail)[which.methods]) times <- lapply(select, function(u) c(trace[[u]]$time.before.iter["elapsed"], trace[[u]]$time.before.iter["elapsed"] + 1:x$itr[u]*trace[[u]]$time.per.iter["elapsed"])) max.time <- max(sapply(select, function(u) max(times[[u]]))) lower <- max(sapply(select, function(u) min(times[[u]]))) if(missing(xlim)) xlim <- c(lower, max.time) if(missing(ylim)) ylim <- range(unlist(sapply(select, function(u) trace[[u]]$trace[times[[u]] >= xlim[1] & times[[u]] <= xlim[2]]))) plot(unlist(sapply(select, function(u) times[[u]])), unlist(sapply(select, function(u) trace[[u]]$trace)), type="n", xlab="run time (sec.)", ylab="Objective function value", main="Trace of Objective Function Value", xlim=xlim, ylim=ylim, ...) for(k in seq_along(select)) { lines(times[[select[k]]], trace[[select[k]]]$trace, col=k) } legend("topright", as.character(method.names[select]), col=seq_along(select), lwd=1) } grad.numDeriv <- grad grad <- function(x, ...) { UseMethod("grad") } grad.turbo <- function(x, objfn=x$objfn, which.methods = seq_along(x$method), method.names = x$method[which.methods], ...) { if(is.null(objfn)) stop("objfn must be provided to compute gradient") subs <- (!x$fail)[which.methods] select.methods <- which.methods[subs] mat <- matrix(NA, length(select.methods), ncol(x$par)) rownames(mat) <- method.names[subs] for(k in seq_along(select.methods)) { mat[k,] <- grad.numDeriv(objfn, x$par[select.methods[k],], method="Richardson", method.args=list(r=2), ...) } return(mat) } hessian.numDeriv <- hessian hessian <- function(x, ...) { UseMethod("hessian") } hessian.turbo <- function(x, objfn=x$objfn, which.methods = seq_along(x$method), method.names = x$method[which.methods], ...) { if(is.null(objfn)) stop("objfn must be provided to compute hessian") subs <- (!x$fail)[which.methods] select.methods <- which.methods[subs] lst <- vector("list", length(select.methods)) names(lst) <- method.names[subs] for(k in seq_along(select.methods)) { lst[[k]] <- hessian.numDeriv(objfn, x$par[select.methods[k],], method="Richardson", method.args=list(r=2), ...) } return(lst) } stderror <- function(x, ...) { UseMethod("stderror") } stderror.turbo <- function(x, objfn=x$objfn, which.methods = seq_along(x$method), method.names = x$method[which.methods], ...) { if(is.null(objfn)) stop("objfn must be provided to compute standard error") hesses <- hessian(x, objfn=objfn, which.methods=which.methods, method.names=method.names) ret <- t(sapply(seq_along(hesses), function(u) sqrt(diag(solve(hesses[[u]]))))) rownames(ret) <- names(hesses) return(ret) }
ui_significance <- function() { ns <- NS("signif") tabPanel( "Significance testing", value = "signific", h5("Test significance for variables not used in clustering"), verticalLayout( selectizeInput( ns("signif_var"), "Select variable", choices = NULL, options = list( onInitialize = I('function(){this.setValue("");}') ) ) %>% shinyhelper::helper(type = "markdown", content = "signif_help"), splitLayout( plotOutput(ns("signif_boxplot"), height = 700), htmlOutput(ns("signif_results"), container = pre), cellWidths = c("25%", "75%") ) ) ) } server_significance <- function(id, all_data, cluster_labels, cluster_colors, unselected_vars) { moduleServer(id, function(input, output, session) { observeEvent(unselected_vars(), { updateSelectizeInput(session, "signif_var", choices = unselected_vars()) }) output$signif_boxplot <- renderPlot({ req(input$signif_var) isolate({ all_df <- all_data() }) df <- all_df[, input$signif_var] df$Cluster <- as.factor(cluster_labels()) facet_boxplot(df, "Cluster", input$signif_var, boxplot_colors = cluster_colors, plot_points = FALSE) }, height = 150) output$signif_results <- renderPrint({ req(input$signif_var) isolate({ all_df <- all_data() }) clusters <- cluster_labels() num_clusters <- length(unique(clusters)) if (num_clusters > 2) { dunn.test::dunn.test( x = dplyr::pull(all_df, input$signif_var), g = clusters, method = "bh" ) } else { all_df$Cluster <- clusters t_test_formula <- stats::as.formula(paste(input$signif_var, " ~ Cluster")) stats::t.test(t_test_formula, data = all_df) } }) }) }
l_toR <- function(x, cast=as.character) { if(!is.function(cast)) stop('cast is expected to be a function') if (!grepl(' ', x)) cast(x) else cast(unlist(strsplit(x, " ", fixed=TRUE))) }
context("Odd Log-Logistic Generalized Gamma distribution"); require(ollggamma); test_that("dollggamma integrates to 1", { result = integrate(function(x) dollggamma(x, 1, 1, 1, 1), 0, Inf)$value; expect_equal(result, 1, tolerance=0.001); result = integrate(function(x) dollggamma(x, 0.8, 0.2, 1, 0.5), subdivisions=1000000, 0, 1000000)$value; expect_equal(result, 1, tolerance=0.001); result = integrate(function(x) dollggamma(x, 0.5, 0.5, 0.2, 1.2), 0, Inf)$value; expect_equal(result, 1, tolerance=0.001); result = integrate(function(x) dollggamma(x, 0.5, 5, 2, 1), 0, Inf)$value; expect_equal(result, 1, tolerance=0.001); result = integrate(function(x) dollggamma(x, 0.5, 1, 2, 1), 0, Inf)$value; expect_equal(result, 1, tolerance=0.001); result = integrate(function(x) dollggamma(x, 5, 1, 0.5, 1), 0, Inf)$value; expect_equal(result, 1, tolerance=0.001); }); test_that("pollggamma correctly reflects dollggamma", { expected = integrate(function(x) dollggamma(x, 1, 1, 1, 1), 0, 10)$value; result = pollggamma(10, 1, 1, 1, 1); expect_equal(result, expected); result = pollggamma(1e-10, 1, 1, 1, 1.5); expect_equal(result, 0); result = pollggamma(20, 1, 1, 1, 1.5); expect_equal(result, 1); expected = integrate(function(x) dollggamma(x, 0.8, 0.5, 1, 1.2), 0, 10)$value; result = pollggamma(10, 0.8, 0.5, 1, 1.2); expect_equal(result, expected, tolerance=1e-5); result = pollggamma(0, 0.8, 0.5, 1, 1.2); expect_equal(result, 0); result = pollggamma(1000, 0.8, 0.5, 1, 1.2); expect_equal(result, 1); expected = integrate(function(x) dollggamma(x, 1.2, 0.5, 1.5, 0.5), 0, 10)$value; result = pollggamma(10, 1.2, 0.5, 1.5, 0.5); expect_equal(result, expected, tolerance=1e-5); result = pollggamma(0, 1.2, 0.5, 1.5, 0.5); expect_equal(result, 0); result = pollggamma(10000, 1.2, 0.5, 1.5, 0.5); expect_equal(result, 1); }); test_that("qollggamma correctly inverts pollggamma", { a = 1; b = 1; k = 1; lambda = 1; expected = c(1, 10); expect_equal(qollggamma(pollggamma(expected, a, b, k, lambda), a, b, k, lambda), expected); a = 0.5; b = 1.5; k = 1.5; lambda = 1.2; expected = c(0.5, 1, 3); expect_equal(qollggamma(pollggamma(expected, a, b, k, lambda), a, b, k, lambda), expected); a = 1.5; b = 0.5; k = 3; lambda = 0.5; expected = c(0.5, 1, 3); expect_equal(qollggamma(pollggamma(expected, a, b, k, lambda), a, b, k, lambda), expected); }); test_that("random number generation", { set.seed(72); a = 1; b = 1; k = 1; lambda = 1; r = rollggamma(100000, a, b, k, lambda); e = ecdf(r); x = seq(0.001, 10, length=10000); maxError = max(abs(e(x) - pollggamma(x, a, b, k, lambda))); expect_lte(maxError, 1e-2); a = 0.3; b = 1.5; k = 1.5; lambda = 1.2; r = rollggamma(100000, a, b, k, lambda); e = ecdf(r); x = seq(0.001, 10, length=10000); maxError = max(abs(e(x) - pollggamma(x, a, b, k, lambda))); expect_lte(maxError, 1e-2); a = 1.5; b = 5; k = 0.3; lambda = 0.5; r = rollggamma(100000, a, b, k, lambda); e = ecdf(r); x = seq(0.001, 10, length=10000); maxError = max(abs(e(x) - pollggamma(x, a, b, k, lambda))); expect_lte(maxError, 1e-2); a = 3; b = 1.8; k = 0.5; lambda = 1.5; r = rollggamma(100000, a, b, k, lambda); e = ecdf(r); x = seq(0.001, 10, length=10000); maxError = max(abs(e(x) - pollggamma(x, a, b, k, lambda))); expect_lte(maxError, 1e-2); set.seed(NULL); });
test_that("works as expected", { x = data.frame( c3 = factor( c( 'A', NA, 'C' ) ), c4 = factor( c( 'B', 'B', 'B' ) ), stringsAsFactors = TRUE ) expect_equal( as.character( coalf( x$c3, x$c4 ) ), c( 'A', 'B', 'C' ) ) })
"student_success"
"print.permutest.coca" <- function(x, digits = max(3, getOption("digits") - 3), ...) { ptest.stats <- rbind(x$permstat, x$inertia, x$fitax, x$pcent.fit, x$pval) rownames(ptest.stats) <- c("Stat.", "Inertia", "Fit", "% fit", "P-value") colnames(ptest.stats) <- paste("COCA", 1:x$n.axes, sep = " ") cat("\nPermutation test for predictive co-correspondence analysis:\n\n") writeLines(strwrap(pasteCall(x$call))) cat("\nPermutation test results:\n\n") printCoefmat(t(ptest.stats), digits = digits, na.print = "") cat("\n") invisible(x) }
mapAgeToAgeGroup <- function(x) { cut(x, breaks = c(-1, 4, 14, 34, 59, 79, 120), labels = c( "A00-A04", "A05-A14", "A15-A34", "A35-A59", "A60-A79", "A80+" )) }
add_license <- function(author, year = format(Sys.Date(), "%Y"), open_source_content = TRUE, content_license = "CC BY 4.0", open_source_data = TRUE, data_license = "CC0", open_source_code = TRUE, code_license = "MIT"){ lesson_file_check() if(file.exists(file.path(getOption("swirlify_course_dir_path"), "LICENSE.txt")) && interactive()){ prompt_result <- readline(paste0("LICENSE.txt already exists for ", getOption("swirlify_course_name"), "\n", "Are you sure you want to overwrite it? Y/n ")) if(prompt_result != "Y"){ return(invisible(file.path(getOption("swirlify_course_dir_path"), "LICENSE.txt"))) } } if(!open_source_content && !open_source_data && !open_source_code){ cat(whisker.render("All code and content contained within this course is Copyright {{{year}}} {{{author}}}. All rights reserved.", list(author=author, year = year)), file = file.path(getOption("swirlify_course_dir_path"), "LICENSE.txt")) return(invisible(file.path(getOption("swirlify_course_dir_path"), "LICENSE.txt"))) } license_text <- "Copyright {{{year}}} {{{author}}}" if(open_source_content){ license_text <- paste0(license_text, "\n\nThe content of this course including but not limited to contents of the lesson.yaml files enclosed are licensed {{{content_license}}}. For more information please visit {{{content_license_url}}}") } if(open_source_data){ if(data_license != "CC0") stop(paste0("An invalid value: '", data_license, "' was provided for the add_license function.")) license_text <- paste0(license_text, "\n\nThe datasets contained in this course are dedicated to the public domain under the CC0 license. For more information please visit https://creativecommons.org/publicdomain/zero/1.0/") } if(open_source_code){ license_text <- paste0(license_text, "\n\nThe software contained in this course is subject to the following license:\n\n", whisker.render("{{{software_license}}}", list(software_license=get_license(code_license)))) } cat(whisker.render(license_text, list(year=year, author=author, content_license=content_license, content_license_url=get_cc_url(content_license) )), file = file.path(getOption("swirlify_course_dir_path"), "LICENSE.txt") ) } get_cc_url <- function(x){ cc_urls <- list( "CC BY 4.0" = "http://creativecommons.org/licenses/by/4.0/", "CC BY-SA 4.0" = "http://creativecommons.org/licenses/by-sa/4.0/", "CC BY-ND 4.0" = "http://creativecommons.org/licenses/by-nd/4.0/", "CC BY-NC 4.0" = "http://creativecommons.org/licenses/by-nc/4.0/", "CC BY-NC-SA 4.0" = "http://creativecommons.org/licenses/by-nc-sa/4.0/", "CC BY-NC-ND 4.0" = "http://creativecommons.org/licenses/by-nc-nd/4.0/", "CC0" = "https://creativecommons.org/publicdomain/zero/1.0/" ) result <- cc_urls[[x]] if(is.null(result)){ stop(paste0("An invalid value: '", x, "' was provided for the add_license function.")) } else { result } } get_license <- function(x){ license_dict <- list( "MIT" = 'Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL {{{author}}} BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. Except as contained in this notice, the name of {{{author}}} shall not be used in advertising or otherwise to promote the sale, use or other dealings in this Software without prior written authorization from {{{author}}}.', "GPL3" = 'The programs included in this course are free software: you can redistribute them and/or modify them under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>.', "CC0" = 'The code contained in this course is dedicated to the public domain under the CC0 license. For more information please visit https://creativecommons.org/publicdomain/zero/1.0/') result <- license_dict[[x]] if(is.null(result)){ stop(paste0("An invalid value: '", x, "' was provided for the add_license function.")) } else { result } }
touchi.tail<-function(rec1,rec2,r1,r2=NULL,dist.type="euclid") { d<-length(rec1) if (dist.type=="euclid"){ if (length(rec2)==2*d){ point<-rec1 rec<-rec2 dist<-0 for (i in 1:d){ if (point[i]>rec[2*i]) dist<-dist+(point[i]-rec[2*i])^2 else if (point[i]<rec[2*i-1]) dist<-dist+(point[i]-rec[2*i-1])^2 } dist<-sqrt(dist) if (dist>r1) tulos<-0 else tulos<-1 } else{ dista<-sqrt(sum((rec1-rec2)^2)) if (dista>r1+r2) tulos<-0 else tulos<-1 } } else{ if (length(rec2)==2*d){ tulos<-1 i<-1 while ((i<=d) && (tulos==1)){ ala<-max(rec1[i]-r1,rec2[2*i-1]) yla<-min(rec1[i]+r1,rec2[2*i]) if (yla<ala) tulos<-0 i<-i+1 } } else{ tulos<-1 i<-1 while ((i<=d) && (tulos==1)){ ala<-max(rec1[i]-r1,rec2[i]-r2) yla<-min(rec1[i]+r1,rec2[i]+r2) if (yla<ala) tulos<-0 i<-i+1 } } } return(tulos) }
setClass("EXAMPLE", representation(slot1="numeric", slot2="character", myslot3="data.frame"), contains = "character" )
expected <- eval(parse(text="structure(c(8, 10, 12, 14, 8, 10, 12, 14, 8, 10, 12, 14, 8, 10, 12, 14, 8, 10, 12, 14, 8, 10, 12, 14, 8, 10, 12, 14, 8, 10, 12, 14, 8, 10, 12, 14, 8, 10, 12, 14, 8, 10, 12, 14, 8, 10, 12, 14, 8, 10, 12, 14, 8, 10, 12, 14, 8, 10, 12, 14, 8, 10, 12, 14, 8, 10, 12, 14, 8, 10, 12, 14, 8, 10, 12, 14, 8, 10, 12, 14, 8, 10, 12, 14, 8, 10, 12, 14, 8, 10, 12, 14, 8, 10, 12, 14, 8, 10, 12, 14, 8, 10, 12, 14, 8, 10, 12, 14), class = \"anyC\")")); test(id=0, code={ argv <- eval(parse(text="list(structure(c(8, 10, 12, 14, 8, 10, 12, 14, 8, 10, 12, 14, 8, 10, 12, 14, 8, 10, 12, 14, 8, 10, 12, 14, 8, 10, 12, 14, 8, 10, 12, 14, 8, 10, 12, 14, 8, 10, 12, 14, 8, 10, 12, 14, 8, 10, 12, 14, 8, 10, 12, 14, 8, 10, 12, 14, 8, 10, 12, 14, 8, 10, 12, 14, 8, 10, 12, 14, 8, 10, 12, 14, 8, 10, 12, 14, 8, 10, 12, 14, 8, 10, 12, 14, 8, 10, 12, 14, 8, 10, 12, 14, 8, 10, 12, 14, 8, 10, 12, 14, 8, 10, 12, 14, 8, 10, 12, 14), class = \"anyC\"), value = \"anyC\")")); do.call(`class<-`, argv); }, o=expected);
x <- factor( c(1,4,2,2,3,3,5,5,6,6), labels=letters[1:6]) p <- c(5,3,2,1,4,6) test_that("permuteLevels examples work", { expect_s3_class( permuteLevels(x, perm = p), "factor" ) expect_equal(levels(permuteLevels(x, perm = p)), c("e", "c", "b", "a", "d", "f")) expect_equal(permuteLevels(permuteLevels(x, p), p, invert = TRUE), x) })
context("RANDOM_SVD") opt.save <- options(bigmemory.typecast.warning = FALSE, bigmemory.default.shared = TRUE) TOL <- 1e-4 sampleScale <- function() { tmp <- sample(list(c(TRUE, FALSE), c(TRUE, TRUE), c(FALSE, FALSE)))[[1]] list(center = tmp[1], scale = tmp[2]) } N <- 1501 M <- 781 x <- matrix(rnorm(N*M, sd = 5), N) test_that("equality with prcomp", { for (t in ALL.TYPES) { X <- as.big.matrix(x, type = t) k <- sample(c(2, 20), 1) sc <- sampleScale() test <- big_randomSVD(X = X, fun.scaling = big_scale(center = sc$center, scale = sc$scale), K = k, use.Eigen = (runif(1) > 0.5)) pca <- prcomp(X[,], center = sc$center, scale. = sc$scale) expect_equal(diffPCs(test$u %*% diag(test$d), pca$x), 0, tolerance = TOL) expect_equal(diffPCs(test$v, pca$rotation), 0, tolerance = TOL) } }) test_that("equality with prcomp with half of the data", { ind <- sample(N, N/2) for (t in ALL.TYPES) { X <- as.big.matrix(x, type = t) k <- sample(c(2, 20), 1) sc <- sampleScale() test <- big_randomSVD(X = X, ind.train = ind, fun.scaling = big_scale(center = sc$center, scale = sc$scale), K = k, use.Eigen = (runif(1) > 0.5)) pca <- prcomp(X[ind, ], center = sc$center, scale. = sc$scale) expect_equal(diffPCs(test$u %*% diag(test$d), pca$x), 0, tolerance = TOL) expect_equal(diffPCs(test$v, pca$rotation), 0, tolerance = TOL) } }) options(opt.save)
readTau <- function(subtype="paper") { files <- c(paper="tau_data_1995-2000.mz", historical="tau_xref_history_country.mz") file <- toolSubtypeSelect(subtype,files) x <- read.magpie(file) x[x==-999] <- NA return(x) }
ols_plot_comp_plus_resid <- function(model, print_plot = TRUE) { check_model(model) x <- NULL y <- NULL pl <- cpout(model) myplots <- list() for (i in seq_len(pl$lmc)) { k <- cpdata(pl$data, pl$mc, pl$e, i) p <- eval(substitute(ggplot(k, aes(x = x, y = y)) + geom_point(colour = "blue", size = 2) + xlab(pl$nam[i]) + ylab(paste0("Residual + Component (", pl$indvar, ")")) + stat_smooth(method = "lm", se = FALSE), list(i = i))) myplots[[i]] <- p } if (print_plot) { marrangeGrob(myplots, nrow = 2, ncol = 2) } else { return(myplots) } } cpdata <- function(data, mc, e, i) { x <- data[[i]] y <- ((mc[i] * data[i]) + e)[[1]] data.frame(x = x, y = y) } cpout <- function(model) { e <- residuals(model) mc <- coefficients(model)[-1] data <- as.data.frame(model.matrix(model))[, -1] lmc <- length(mc) nam <- names(data) indvar <- names(model.frame(model))[1] list(e = e, mc = mc, data = data, lmc = lmc, nam = nam, indvar = indvar) } ols_rpc_plot <- function(model) { .Deprecated("ols_plot_comp_plus_resid()") }
godfrey_orme <- function(mainlm, hettest, B = 1000L, alternative = c("greater", "less", "two.sided"), seed = 1234, ...) { alternative <- match.arg(alternative, c("greater", "less", "two.sided")) if (is.character(hettest)) { hettestfunc <- get(hettest) } else { stop("hettest must be a character naming a function") } arguments <- list(...) invisible(list2env(arguments, envir = environment())) if ("alternative" %in% names(formals(hettest))) arguments$alternative <- alternative processmainlm(m = mainlm) if (hettest == "wilcox_keselman" && p > 2) { stop("`wilcox_keselman` cannot be used in godfrey_orme when model has more than two covariates") } else if (hettest == "zhou_etal" && p > 2) { if (exists("method", where = environment(), inherits = FALSE)) { if (method %in% c("covariate-specific", "hybrid")) { stop("`zhou_etal` cannot be used in godfrey_orme with method \"covariate-specific\" or \"hybrid\" when model has more than two covariates") } } } n <- length(e) if (exists("deflator", where = environment(), inherits = FALSE)) { if (is.character(deflator)) arguments$deflator <- which(colnames(X) == deflator) } teststat <- do.call(what = hettest, args = append(list("mainlm" = list("y" = y, "X" = X), "statonly" = TRUE), arguments)) if (!is.na(seed)) set.seed(seed) estar <- replicate(B, sample(e, size = n, replace = TRUE), simplify = FALSE) betahat <- solve(crossprod(X)) %*% t(X) %*% y ystar <- lapply(estar, function(u) X %*% betahat + u) statgen <- vapply(1:B, function(b) do.call(what = hettest, args = append(list("mainlm" = list("X" = X, "y" = ystar[[b]]), "statonly" = TRUE), arguments)), NA_real_) if (alternative == "greater") { statcount <- sum(statgen >= teststat) } else if (alternative == "less") { statcount <- sum(statgen <= teststat) } else if (alternative == "two.sided") { statcount <- min(sum(statgen >= teststat), sum(statgen <= teststat)) } pval <- statcount / B * ifelse(alternative == "two.sided", 2, 1) rval <- structure(list(statistic = teststat, parameter = B, p.value = pval, null.value = "Homoskedasticity", alternative = alternative, method = "Nonpar. Bootstrap"), class = "htest") broom::tidy(rval) }
setwd("D:/libraries/RTutor/RTutor") library(testex) sources = testex_sources(ex.files = "testex/tests.r") et = testex_create(sources) saveRDS(et, "testex/et.Rds")
library(testthat) library(synthACS) context("new attr - bottom of recursion") test_that("mapply - works as designed (df)", { set.seed(567L) df <- data.frame(gender= factor(sample(c("m", "f"), size= 100, replace=T)), age= factor(sample(1:5, size= 100, replace=T)), edu= factor(sample(c("hs", "col", "grad"), size= 100, replace=T)), p= runif(100)) df$p <- df$p / sum(df$p) df2 <- data.table::setDT(df) attr <- data.frame(pct= 0.5, level= "new") a_name <- "var_new" dat2 <- synthACS:::add_synth_attr_level(df, "p", attr= attr, attr_name= a_name) dat3 <- synthACS:::add_synth_attr_level(df2, "p", attr= attr, attr_name= a_name) expect_true(all(names(df) %in% names(dat2))) expect_true(all(names(dat2) %in% c(a_name, names(df)))) expect_equal(sum(df$p), sum(dat2$p) / attr[[1]]) expect_equal(df$p, dat2$p / attr[[1]]) expect_equal(nrow(df), nrow(dat2)) expect_equal(ncol(df), ncol(dat2) - 1) expect_true(all(names(df2) %in% names(dat3))) expect_true(all(names(dat3) %in% c(a_name, names(df2)))) expect_equal(sum(df2$p), sum(dat3$p) / attr[[1]]) expect_equal(df2$p, dat2$p / attr[[1]]) expect_equal(nrow(df2), nrow(dat3)) expect_equal(ncol(df2), ncol(dat3) - 1) expect_equal(dat2, dat3) }) context("one level up -- add_synth_attr") test_that("lapply - bug catches", { set.seed(567L) df <- data.frame(gender= factor(sample(c("m", "f"), size= 100, replace=T)), age= factor(sample(1:5, size= 100, replace=T)), edu= factor(sample(c("hs", "col", "grad"), size= 100, replace=T)), p= runif(100)) df$p <- df$p / sum(df$p) ST <- data.frame(attr_cnts= as.character(c(0.1,0.1,0.1)), lev= c("employed", "unemp", "not_in_labor_force")) ST2 <- data.frame(abc= letters[1:3], attr_cnts= c(60, 10, 30), lev= c("employed", "unemp", "not_in_labor_force")) ST3 <- data.frame(lev= c("employed", "unemp", "not_in_labor_force"), attr_cnts= c(60, 10, 30)) ST4 <- data.frame(lvl= c("employed", "unemp", "not_in_labor_force"), attr_cnts= c(60, 10, 30)) expect_error(synthACS:::add_synth_attr(l= df, prob_name= "p", sym_tbl= ST, attr_name= "variable")) expect_error(synthACS:::add_synth_attr(l= df, prob_name= "p", sym_tbl= ST2, attr_name= "variable")) expect_error(synthACS:::add_synth_attr(l= df, prob_name= "p", sym_tbl= ST3, attr_name= "variable")) expect_error(synthACS:::add_synth_attr(l= df, prob_name= "p", sym_tbl= ST4, attr_name= "variable")) }) test_that("lapply - valid output with percentages (df)", { set.seed(567L) df <- data.frame(gender= factor(sample(c("m", "f"), size= 100, replace=T)), age= factor(sample(1:5, size= 100, replace=T)), edu= factor(sample(c("hs", "col", "grad"), size= 100, replace=T)), p= runif(100)) df$p <- df$p / sum(df$p) sym_tbl <- data.frame(attr_cnts= c(0.6, 0.1, 0.3), lev= c("employed", "unemp", "not_in_labor_force")) dat <- synthACS:::add_synth_attr(l= df, prob_name= "p", sym_tbl= sym_tbl, attr_name= "variable") expect_equal(nrow(df) * nrow(sym_tbl), nrow(dat)) expect_equal(ncol(df), ncol(dat) - 1) expect_true(all(names(df) %in% names(dat))) expect_true(all(names(dat) %in% c("variable", names(df)))) expect_equal(sum(dat$p), 1) expect_equal(sum(dat$p), sum(df$p)) expect_true(all(sym_tbl$lev %in% levels(factor(dat$variable)))) expect_true(all(levels(factor(dat$variable)) %in% sym_tbl$lev)) expect_equal(tapply(dat$p, dat$gender, sum), tapply(df$p, df$gender, sum)) expect_equal(sort(as.vector(tapply(dat$p, dat$variable, sum))), sort(sym_tbl[,1])) expect_equal(as.vector(tapply(dat$p, dat$variable, sum)), c(0.6, 0.3, 0.1)) }) test_that("lapply - valid output with percentages (dt)", { set.seed(567L) df <- data.frame(gender= factor(sample(c("m", "f"), size= 100, replace=T)), age= factor(sample(1:5, size= 100, replace=T)), edu= factor(sample(c("hs", "col", "grad"), size= 100, replace=T)), p= runif(100)) df$p <- df$p / sum(df$p) data.table::setDT(df) sym_tbl <- data.frame(attr_cnts= c(0.6, 0.1, 0.3), lev= c("employed", "unemp", "not_in_labor_force")) data.table::setDT(sym_tbl) dat <- synthACS:::add_synth_attr(l= df, prob_name= "p", sym_tbl= sym_tbl, attr_name= "variable") expect_equal(nrow(df) * nrow(sym_tbl), nrow(dat)) expect_equal(ncol(df), ncol(dat) - 1) expect_true(all(names(df) %in% names(dat))) expect_true(all(names(dat) %in% c("variable", names(df)))) expect_equal(sum(dat$p), 1) expect_equal(sum(dat$p), sum(df$p)) expect_true(all(sym_tbl$lev %in% levels(factor(dat$variable)))) expect_true(all(levels(factor(dat$variable)) %in% sym_tbl$lev)) expect_equal(tapply(dat$p, dat$gender, sum), tapply(df$p, df$gender, sum)) expect_equal(sort(as.vector(tapply(dat$p, dat$variable, sum))), c(0.1, 0.3, 0.6)) expect_equal(as.vector(tapply(dat$p, dat$variable, sum)), c(0.6, 0.3, 0.1)) }) test_that("lapply - valid output with counts -- DF", { set.seed(567L) df <- data.frame(gender= factor(sample(c("m", "f"), size= 100, replace=T)), age= factor(sample(1:5, size= 100, replace=T)), edu= factor(sample(c("hs", "col", "grad"), size= 100, replace=T)), p= runif(100)) df$p <- df$p / sum(df$p) sym_tbl <- data.frame(attr_cnts= c(60, 10, 30), lev= c("employed", "unemp", "not_in_labor_force")) dat <- synthACS:::add_synth_attr(l= df, prob_name= "p", sym_tbl= sym_tbl, attr_name= "variable") expect_equal(nrow(df) * nrow(sym_tbl), nrow(dat)) expect_equal(ncol(df), ncol(dat) - 1) expect_true(all(names(df) %in% names(dat))) expect_true(all(names(dat) %in% c("variable", names(df)))) expect_equal(sum(dat$p), 1) expect_equal(sum(dat$p), sum(df$p)) expect_true(all(sym_tbl$lev %in% levels(factor(dat$variable)))) expect_true(all(levels(factor(dat$variable)) %in% sym_tbl$lev)) expect_equal(tapply(dat$p, dat$gender, sum), tapply(df$p, df$gender, sum)) expect_equal(sort(as.vector(tapply(dat$p, dat$variable, sum))), sort(sym_tbl[,1]) / 100) expect_equal(as.vector(tapply(dat$p, dat$variable, sum)), c(0.6, 0.3, 0.1)) }) test_that("lapply - valid output with counts -- DT", { set.seed(567L) df <- data.frame(gender= factor(sample(c("m", "f"), size= 100, replace=T)), age= factor(sample(1:5, size= 100, replace=T)), edu= factor(sample(c("hs", "col", "grad"), size= 100, replace=T)), p= runif(100)) df$p <- df$p / sum(df$p) data.table::setDT(df) sym_tbl <- data.frame(attr_cnts= c(60, 10, 30), lev= c("employed", "unemp", "not_in_labor_force")) data.table::setDT(sym_tbl) dat <- synthACS:::add_synth_attr(l= df, prob_name= "p", sym_tbl= sym_tbl, attr_name= "variable") expect_equal(nrow(df) * nrow(sym_tbl), nrow(dat)) expect_equal(ncol(df), ncol(dat) - 1) expect_true(all(names(df) %in% names(dat))) expect_true(all(names(dat) %in% c("variable", names(df)))) expect_equal(sum(dat$p), 1) expect_equal(sum(dat$p), sum(df$p)) expect_true(all(sym_tbl$lev %in% levels(factor(dat$variable)))) expect_true(all(levels(factor(dat$variable)) %in% sym_tbl$lev)) expect_equal(tapply(dat$p, dat$gender, sum), tapply(df$p, df$gender, sum)) expect_equal(sort(as.vector(tapply(dat$p, dat$variable, sum))), c(0.1, 0.3, 0.6)) expect_equal(as.vector(tapply(dat$p, dat$variable, sum)), c(0.6, 0.3, 0.1)) }) context("conditional splitting & recursion") test_that("conditional split -- fully specified conditioning (DF)", { set.seed(567L) df <- data.frame(gender= factor(sample(c("male", "female"), size= 100, replace=T)), age= factor(sample(1:5, size= 100, replace=T)), pov= factor(sample(c("lt_pov", "gt_eq_pov"), size= 100, replace=T, prob= c(.15,.85))), p= runif(100)) df$p <- df$p / sum(df$p) cond_v <- c("gender", "pov") levels <- c("employed", "unemp", "not_in_LF") sym_tbl <- data.frame(gender= rep(rep(c("male", "female"), each= 3), 2), pov= rep(c("lt_pov", "gt_eq_pov"), each= 6), cnts= c(52, 8, 268, 72, 12, 228, 1338, 93, 297, 921, 105, 554), lvls= rep(levels, 4)) dat <- synthACS:::cond_var_split(df, "p", attr_name= "variable", conditional_vars= cond_v, sym_tbl= sym_tbl) expect_equal(nrow(df) * length(levels), nrow(dat)) expect_equal(ncol(df), ncol(dat) - 1) expect_true(all(names(df) %in% names(dat))) expect_true(all(names(dat) %in% c("variable", names(df)))) expect_equal(sum(dat$p), 1) expect_equal(sum(dat$p), sum(df$p)) expect_true(all(levels %in% levels(factor(dat$variable)))) expect_true(all(levels(factor(dat$variable)) %in% levels)) expect_equal(tapply(dat$p, dat$gender, sum), tapply(df$p, df$gender, sum)) expect_equal(tapply(dat$p, dat$pov, sum), tapply(df$p, df$pov, sum)) expect_equal(tapply(dat$p, list(dat$pov, dat$gender), sum), tapply(df$p, list(df$pov, df$gender), sum)) }) test_that("conditional split -- fully specified conditioning (DT)", { set.seed(567L) df <- data.frame(gender= factor(sample(c("male", "female"), size= 100, replace=T)), age= factor(sample(1:5, size= 100, replace=T)), pov= factor(sample(c("lt_pov", "gt_eq_pov"), size= 100, replace=T, prob= c(.15,.85))), p= runif(100)) df$p <- df$p / sum(df$p) cond_v <- c("gender", "pov") levels <- c("employed", "unemp", "not_in_LF") sym_tbl <- data.frame(gender= rep(rep(c("male", "female"), each= 3), 2), pov= rep(c("lt_pov", "gt_eq_pov"), each= 6), cnts= c(52, 8, 268, 72, 12, 228, 1338, 93, 297, 921, 105, 554), lvls= rep(levels, 4)) data.table::setDT(df); data.table::setDT(sym_tbl) dat <- synthACS:::cond_var_split(df, "p", attr_name= "variable", conditional_vars= cond_v, sym_tbl= sym_tbl) expect_equal(nrow(df) * length(levels), nrow(dat)) expect_equal(ncol(df), ncol(dat) - 1) expect_true(all(names(df) %in% names(dat))) expect_true(all(names(dat) %in% c("variable", names(df)))) expect_equal(sum(dat$p), 1) expect_equal(sum(dat$p), sum(df$p)) expect_true(all(levels %in% levels(factor(dat$variable)))) expect_true(all(levels(factor(dat$variable)) %in% levels)) expect_equal(tapply(dat$p, dat$gender, sum), tapply(df$p, df$gender, sum)) expect_equal(tapply(dat$p, dat$pov, sum), tapply(df$p, df$pov, sum)) expect_equal(tapply(dat$p, list(dat$pov, dat$gender), sum), tapply(df$p, list(df$pov, df$gender), sum)) }) test_that("conditional splitting -- differential conditioning", { set.seed(567L) df <- data.frame(gender= factor(sample(c("male", "female"), size= 100, replace=T)), edu= factor(sample(c("LT_college", "BA_degree"), size= 100, replace=T)), p= runif(100)) df$p <- df$p / sum(df$p) levels <- c("low", "middle", "high") ST2 <- data.frame(gender= c(rep("male", 3), rep("female", 6)), edu= c(rep(NA, 3), rep(c("LT_college", "BA_degree"), each= 3)), attr_pct= c(0.1, 0.8, 0.1, 10, 80, 10, 5, 70, 25), levels= rep(levels, 3)) dat <- synthACS:::cond_var_split(df, prob_name= "p", attr_name= "SES", conditional_vars= c("gender", "edu"), sym_tbl= ST2) expect_equal(nrow(df) * 3, nrow(dat)) expect_equal(ncol(df), ncol(dat) - 1) expect_true(all(names(df) %in% names(dat))) expect_true(all(names(dat) %in% c("SES", names(df)))) expect_equal(sum(dat$p), 1) expect_equal(sum(dat$p), sum(df$p)) expect_true(all(levels %in% levels(factor(dat$SES)))) expect_true(all(levels(factor(dat$variable)) %in% levels)) expect_equal(tapply(dat$p, dat$gender, sum), tapply(df$p, df$gender, sum)) expect_equal(tapply(dat$p, dat$edu, sum), tapply(df$p, df$edu, sum)) expect_equal(tapply(dat$p, list(dat$edu, dat$gender), sum), tapply(df$p, list(df$edu, df$gender), sum)) }) context("Synthetic new attribute (top level)") test_that("error checking", { set.seed(567L) df <- data.frame(gender= factor(sample(c("male", "female"), size= 100, replace=T)), edu= factor(sample(c("LT_college", "BA_degree"), size= 100, replace=T)), p= runif(100)) df$p <- df$p / sum(df$p) levels <- c("low", "middle", "high") ST <- data.frame(gender= c(rep("male", 3), rep("female", 6)), attr_pct= c(0.1, 0.8, 0.1, 10, 80, 10, 5, 70, 25), levels= rep(levels, 3)) ST2 <- data.frame(gender= c(rep("male", 3), rep("female", 6)), edu= c(rep(NA, 3), rep(c("LT_college", "BA_degree"), each= 3)), attr_pct= c(0.1, 0.8, 0.1, 10, 80, 10, 5, 70, 25), levels= rep(levels, 3)) ST3 <- ST2; names(ST3) <- c("abc", "def", "attr_pct", "levels") expect_error(synthetic_new_attribute(df= df, prob_name= "p", attr_name= "variable", conditional_vars= NULL, sym_tbl = NULL)) class(df) <- c(class(df), "micro_synthetic") expect_error(synthetic_new_attribute(df= df, prob_name= "pp", attr_name= "variable", conditional_vars= NULL, sym_tbl = NULL)) expect_error(synthetic_new_attribute(df= df, prob_name= 123L, attr_name= "variable", conditional_vars= NULL, sym_tbl = NULL)) expect_error(synthetic_new_attribute(df= df, prob_name= "p", attr_name= 123L, conditional_vars= NULL, sym_tbl = NULL)) expect_error(synthetic_new_attribute(df= df, prob_name= c("p", "p2"), attr_name= "variable", conditional_vars= NULL, sym_tbl = NULL)) expect_error(synthetic_new_attribute(df= df, prob_name= "p", attr_name= c("v1", "v2"), conditional_vars= NULL, sym_tbl = NULL)) expect_error(synthetic_new_attribute(df= df, prob_name= "p", attr_name= "variable", conditional_vars= "abc", sym_tbl = NULL)) expect_error(synthetic_new_attribute(df= df, prob_name= "p", attr_name= "variable", conditional_vars= c("gender","abc"), sym_tbl = NULL)) expect_error(synthetic_new_attribute(df= df, prob_name= "p", attr_name= "variable", conditional_vars= "gender", sym_tbl = NULL)) expect_error(synthetic_new_attribute(df= df, prob_name= "p", attr_name= "variable", conditional_vars= c("gender", "edu"), sym_tbl = ST)) expect_error(synthetic_new_attribute(df= df, prob_name= "p", attr_name= "variable", conditional_vars= "gender", sym_tbl = ST2)) expect_error(synthetic_new_attribute(df= df, prob_name= "p", attr_name= "variable", conditional_vars= c("gender", "edu"), sym_tbl = ST3)) }) test_that("new attr (top level) - standard conditioning (DF)", { set.seed(567L) df <- data.frame(gender= factor(sample(c("male", "female"), size= 100, replace=T)), age= factor(sample(1:5, size= 100, replace=T)), pov= factor(sample(c("lt_pov", "gt_eq_pov"), size= 100, replace=T, prob= c(.15,.85))), p= runif(100)) df$p <- df$p / sum(df$p) class(df) <- c("data.frame", "micro_synthetic") cond_v <- c("gender", "pov") levels <- c("employed", "unemp", "not_in_LF") sym_tbl <- data.frame(gender= rep(rep(c("male", "female"), each= 3), 2), pov= rep(c("lt_pov", "gt_eq_pov"), each= 6), cnts= c(52, 8, 268, 72, 12, 228, 1338, 93, 297, 921, 105, 554), lvls= rep(levels, 4)) syn <- synthetic_new_attribute(df= df, prob_name= "p", attr_name= "variable", conditional_vars= c("gender", "pov"), sym_tbl= sym_tbl) expect_true(is.micro_synthetic(syn)) expect_true(is.data.frame(syn)) expect_equal(sum(syn$p), 1) expect_equal(sum(syn$p), sum(df$p)) expect_equal(tapply(syn$p, syn$gender, sum), tapply(df$p, df$gender, sum)) expect_equal(tapply(syn$p, syn$pov, sum), tapply(df$p, df$pov, sum)) expect_equal(nrow(df) * length(levels), nrow(syn)) expect_equal(ncol(df), ncol(syn) - 1) expect_true(all(names(df) %in% names(syn))) expect_true(all(names(syn) %in% c("variable", names(df)))) expect_true(all(levels %in% levels(factor(syn$variable)))) expect_true(all(levels(factor(syn$variable)) %in% levels)) }) test_that("new attr (top level) - standard conditioning (DT)", { set.seed(567L) df <- data.frame(gender= factor(sample(c("male", "female"), size= 100, replace=T)), age= factor(sample(1:5, size= 100, replace=T)), pov= factor(sample(c("lt_pov", "gt_eq_pov"), size= 100, replace=T, prob= c(.15,.85))), p= runif(100)) df$p <- df$p / sum(df$p) class(df) <- c("data.frame", "micro_synthetic") cond_v <- c("gender", "pov") levels <- c("employed", "unemp", "not_in_LF") sym_tbl <- data.frame(gender= rep(rep(c("male", "female"), each= 3), 2), pov= rep(c("lt_pov", "gt_eq_pov"), each= 6), cnts= c(52, 8, 268, 72, 12, 228, 1338, 93, 297, 921, 105, 554), lvls= rep(levels, 4)) data.table::setDT(df); data.table::setDT(sym_tbl) syn <- synthetic_new_attribute(df= df, prob_name= "p", attr_name= "variable", conditional_vars= c("gender", "pov"), sym_tbl= sym_tbl) expect_true(is.micro_synthetic(syn)) expect_true(is.data.frame(syn)) expect_equal(sum(syn$p), 1) expect_equal(sum(syn$p), sum(df$p)) expect_equal(tapply(syn$p, syn$gender, sum), tapply(df$p, df$gender, sum)) expect_equal(tapply(syn$p, syn$pov, sum), tapply(df$p, df$pov, sum)) expect_equal(nrow(df) * length(levels), nrow(syn)) expect_equal(ncol(df), ncol(syn) - 1) expect_true(all(names(df) %in% names(syn))) expect_true(all(names(syn) %in% c("variable", names(df)))) expect_true(all(levels %in% levels(factor(syn$variable)))) expect_true(all(levels(factor(syn$variable)) %in% levels)) }) test_that("new attr (top level) - differential conditioning", { set.seed(567L) df <- data.frame(gender= factor(sample(c("male", "female"), size= 100, replace=T)), edu= factor(sample(c("LT_college", "BA_degree"), size= 100, replace=T)), p= runif(100)) df$p <- df$p / sum(df$p) class(df) <- c("data.frame", "micro_synthetic") levels <- c("low", "middle", "high") ST2 <- data.frame(gender= c(rep("male", 3), rep("female", 6)), edu= c(rep(NA, 3), rep(c("LT_college", "BA_degree"), each= 3)), attr_pct= c(0.1, 0.8, 0.1, 10, 80, 10, 5, 70, 25), levels= rep(levels, 3)) syn <- synthetic_new_attribute(df= df, prob_name= "p", attr_name= "variable", conditional_vars= c("gender", "edu"), sym_tbl= ST2) expect_true(is.micro_synthetic(syn)) expect_true(is.data.frame(syn)) expect_equal(sum(syn$p), 1) expect_equal(sum(syn$p), sum(df$p)) expect_equal(tapply(syn$p, syn$gender, sum), tapply(df$p, df$gender, sum)) expect_equal(tapply(syn$p, syn$edu, sum), tapply(df$p, df$edu, sum)) expect_equal(nrow(df) * length(levels), nrow(syn)) expect_equal(ncol(df), ncol(syn) - 1) expect_true(all(names(df) %in% names(syn))) expect_true(all(names(syn) %in% c("variable", names(df)))) expect_true(all(levels %in% levels(factor(syn$variable)))) expect_true(all(levels(factor(syn$variable)) %in% levels)) }) test_that("new attr (top level) -- unconditionally", { set.seed(567L) df <- data.frame(gender= factor(sample(c("m", "f"), size= 100, replace=T)), age= factor(sample(1:5, size= 100, replace=T)), pov= factor(sample(c("below poverty", "at above poverty"), size= 100, replace=T, prob= c(.15,.85))), p= runif(100)) df$p <- df$p / sum(df$p) class(df) <- c("data.frame", "micro_synthetic") levels <- c("low", "middle", "high") ST2 <- data.frame(attr_pct= c(0.1, 0.8, 0.1), levels= levels) syn <- synthetic_new_attribute(df= df, prob_name= "p", attr_name= "variable", conditional_vars= NULL, sym_tbl= ST2) expect_true(is.micro_synthetic(syn)) expect_true(is.data.frame(syn)) expect_equal(sum(syn$p), 1) expect_equal(sum(syn$p), sum(df$p)) expect_equal(tapply(syn$p, syn$gender, sum), tapply(df$p, df$gender, sum)) expect_equal(tapply(syn$p, syn$pov, sum), tapply(df$p, df$pov, sum)) expect_equal(nrow(df) * length(levels), nrow(syn)) expect_equal(ncol(df), ncol(syn) - 1) expect_true(all(names(df) %in% names(syn))) expect_true(all(names(syn) %in% c("variable", names(df)))) expect_true(all(levels %in% levels(factor(syn$variable)))) expect_true(all(levels(factor(syn$variable)) %in% levels)) }) context("Synthetic new attribute -- in parallel") test_that("can add extra attributes in parallel (DF)", { set.seed(567L) df <- data.frame(gender= factor(sample(c("male", "female"), size= 100, replace=T)), age= factor(sample(1:5, size= 100, replace=T)), pov= factor(sample(c("lt_pov", "gt_eq_pov"), size= 100, replace=T, prob= c(.15,.85))), p= runif(100)) df$p <- df$p / sum(df$p) class(df) <- c("data.frame", "micro_synthetic") cond_v <- c("gender", "pov") levels <- c("employed", "unemp", "not_in_LF") sym_tbl <- data.frame(gender= rep(rep(c("male", "female"), each= 3), 2), pov= rep(c("lt_pov", "gt_eq_pov"), each= 6), cnts= c(52, 8, 268, 72, 12, 228, 1338, 93, 297, 921, 105, 554), lvls= rep(levels, 4)) df_list <- replicate(10, df, simplify= FALSE) st_list <- replicate(10, sym_tbl, simplify= FALSE) syn <- all_geog_synthetic_new_attribute(df_list, prob_name= "p", attr_name= "variable", conditional_vars= cond_v,st_list= st_list) expect_true( all(unlist(lapply(syn, function(l) is.micro_synthetic(l[[2]])))) ) expect_true( all(unlist(lapply(syn, function(l) is.data.frame(l[[2]])))) ) expect_true(all(unlist(lapply(syn, function(l) sum(l[[2]]$p) == 1)))) expect_equal(lapply(syn, function(l) {tapply(l[[2]]$p, l[[2]]$gender, sum)}), lapply(df_list, function(l) {tapply(l$p, l$gender, sum)})) expect_equal(lapply(syn, function(l) {tapply(l[[2]]$p, l[[2]]$pov, sum)}), lapply(df_list, function(l) {tapply(l$p, l$pov, sum)})) }) test_that("can add extra attributes in parallel (DT)", { set.seed(567L) df <- data.frame(gender= factor(sample(c("male", "female"), size= 100, replace=T)), age= factor(sample(1:5, size= 100, replace=T)), pov= factor(sample(c("lt_pov", "gt_eq_pov"), size= 100, replace=T, prob= c(.15,.85))), p= runif(100)) df$p <- df$p / sum(df$p) class(df) <- c("data.frame", "micro_synthetic") cond_v <- c("gender", "pov") levels <- c("employed", "unemp", "not_in_LF") sym_tbl <- data.frame(gender= rep(rep(c("male", "female"), each= 3), 2), pov= rep(c("lt_pov", "gt_eq_pov"), each= 6), cnts= c(52, 8, 268, 72, 12, 228, 1338, 93, 297, 921, 105, 554), lvls= rep(levels, 4)) data.table::setDT(df); data.table::setDT(sym_tbl) df_list <- replicate(10, df, simplify= FALSE) st_list <- replicate(10, sym_tbl, simplify= FALSE) syn <- all_geog_synthetic_new_attribute(df_list, prob_name= "p", attr_name= "variable", conditional_vars= cond_v,st_list= st_list) expect_true( all(unlist(lapply(syn, function(l) is.micro_synthetic(l[[2]])))) ) expect_true( all(unlist(lapply(syn, function(l) is.data.frame(l[[2]])))) ) expect_true(all(unlist(lapply(syn, function(l) sum(l[[2]]$p) == 1)))) expect_equal(lapply(syn, function(l) {tapply(l[[2]]$p, l[[2]]$gender, sum)}), lapply(df_list, function(l) {tapply(l$p, l$gender, sum)})) expect_equal(lapply(syn, function(l) {tapply(l[[2]]$p, l[[2]]$pov, sum)}), lapply(df_list, function(l) {tapply(l$p, l$pov, sum)})) })
fun_factory <- function(mother_fun, ...) { fun_params <- list(...) lapply(fun_params, force) new_fun <- mother_fun environment(new_fun) <- new.env() for (i in 1:length(fun_params)) assign(names(fun_params)[i], fun_params[[i]], envir = environment(new_fun)) return(new_fun) } genRandomFuns <- function(portfolio_fun, params_grid, name = "portfolio", N_funs = NULL) { N_combinations <- prod(sapply(params_grid, length)) if (is.null(N_funs)) stop("Number of functions to be generated \"N_funs\" has to be specified") if (N_funs > N_combinations) { warning("\nToo many functions requested for only ", N_combinations, " possible combinations: using instead N_funs = ", N_combinations, ".") N_funs <- N_combinations } else message("Generating ", N_funs, " functions out of a total of ", N_combinations, " possible combinations.") list_random_funs <- vector("list", N_funs) list_random_params <- vector("list", N_funs) for (i in 1:N_funs) { while (any(sapply(list_random_params, identical, params_realiz <- lapply(params_grid, FUN = sample, 1, replace = TRUE)))) TRUE list_random_params[[i]] <- params_realiz list_random_funs[[i]] <- do.call(fun_factory, c("mother_fun" = portfolio_fun, params_realiz)) attr(list_random_funs[[i]], "params") <- params_realiz names(list_random_funs)[i] <- paste0(name, " (", paste(names(params_grid), params_realiz, sep = "=", collapse = ", "), ")") } attr(list_random_funs, "params_grid") <- params_grid return(list_random_funs) } plotPerformanceVsParams <- function(bt_all_portfolios, params_subset = NULL, name_performance = "Sharpe ratio", summary_fun = median) { res_summary <- backtestSummary(bt_all_portfolios, summary_fun = summary_fun) score_all_funs <- summaryTable(res_summary, measures = name_performance, type = "simple") N_portfolios <- length(attr(bt_all_portfolios, "portfolio_index")) if (N_portfolios == 0) stop("No portfolio found in backtest!") params_portfolio_funs_list <- lapply(bt_all_portfolios[1:N_portfolios], attr, "params") if (any(sapply(params_portfolio_funs_list, is.null))) stop("Backtest does not contain the attribute \"params\"!") params_portfolio_funs <- do.call(rbind.data.frame, params_portfolio_funs_list) portfolio_data <- cbind(params_portfolio_funs, score = score_all_funs[1:N_portfolios]) for (i in seq_along(params_subset)) portfolio_data <- portfolio_data[portfolio_data[[names(params_subset[i])]] %in% params_subset[[i]], ] params_grid <- params_portfolio_funs_list[[1]] params_grid[] <- NA for (i in 1:length(params_grid)) params_grid[[i]] <- sort(unique(sapply(params_portfolio_funs_list, function(x) x[[i]]))) if (!is.null(params_subset)) { if (!all(names(params_subset) %in% names(params_grid))) stop("Argument \"params_subset\" contains parameters not contained in the backtest.") for (name in names(params_subset)) if (!all(params_subset[[name]] %in% params_grid[[name]])) stop("Element ", name, " of argument \"params_subset\" is not contained in the backtest.") params_grid <- modifyList(params_grid, params_subset) } message("Parameter grid:") message(paste(paste(" ", paste(names(params_grid), params_grid, sep = " = ")), collapse = "\n")) N_grid <- sapply(params_grid, length) idx_fixed <- which(N_grid == 1) idx_numeric <- setdiff(which(lapply(params_grid, class) == "numeric"), idx_fixed) idx_factor <- setdiff(which(lapply(params_grid, class) %in% c("character", "factor", "logical")), idx_fixed) if (!setequal(union(union(idx_fixed, idx_numeric), idx_factor), 1:length(params_grid))) stop("Error in the partitioning of the elements of params into fixed, numeric, and factor.") message(sprintf("\nParameter types: %d fixed, %d variable numeric, and %d variable non-numeric.", length(idx_fixed), length(idx_numeric), length(idx_factor))) title_name <- ifelse(length(idx_fixed) == 0, name_performance, paste(name_performance, "for configuration:", paste(names(params_grid[idx_fixed]), params_grid[idx_fixed], sep = "=", collapse = ", "))) switch(as.character(length(idx_numeric)), "0" = stop("No numeric parameter to plot!"), "1" = { p <- ggplot(portfolio_data, aes(x = .data[[names(params_grid[idx_numeric])]], y = .data$score)) + geom_point() + geom_line() + ggtitle(title_name) + xlab(names(params_grid[idx_numeric])) + ylab(name_performance) if (length(idx_factor) >= 1) p <- p + aes(col = .data[[names(params_grid[idx_factor[1]])]]) + guides(col = guide_legend(title = names(params_grid[idx_factor[1]]))) if (length(idx_factor) >= 2) p <- p + aes(shape = .data[[names(params_grid)[idx_factor[2]]]]) + guides(shape = guide_legend(title = names(params_grid)[idx_factor[2]])) if (length(idx_factor) == 3) p <- p + facet_wrap(as.formula(paste("~", names(params_grid[idx_factor[3]]))), labeller = labeller(.cols = label_both)) if (length(idx_factor) == 4) p <- p + facet_grid(as.formula(paste(names(params_grid[idx_factor[3]]), "~", names(params_grid[idx_factor[4]]))), labeller = labeller(.cols = label_both, .rows = label_both)) if (length(idx_factor) > 4) stop("Cannot deal with one numeric parameter and more than 4 non-numeric parameters.") }, "2" = { p <- ggplot(portfolio_data, aes(x = .data[[names(params_grid[idx_numeric[1]])]], y = .data[[names(params_grid[idx_numeric[2]])]], fill = .data$score)) + geom_tile() + scale_fill_viridis_c(name = name_performance, na.value = "transparent") + ggtitle(title_name) + xlab(names(params_grid[idx_numeric[1]])) + ylab(names(params_grid[idx_numeric[2]])) if (length(idx_factor) == 1) p <- p + facet_wrap(as.formula(paste("~", names(params_grid[idx_factor[1]]))), labeller = labeller(.cols = label_both)) if (length(idx_factor) == 2) p <- p + facet_grid(as.formula(paste(names(params_grid[idx_factor[1]]), "~", names(params_grid[idx_factor[2]]))), labeller = labeller(.cols = label_both, .rows = label_both)) if (length(idx_factor) > 2) stop("Cannot deal with 2 numeric parameters and more than 2 non-numeric parameters.") }, stop("Cannot deal with more than 2 numeric parameters.")) return(p) }
check_valid_device <- function(x, ...) { res <- checkmate::check_string( x, min.chars = 1, ... ) if (!isTRUE(res)) { res } else if (nchar(x) > 25) { "Must have fewer than 26 characters" } else { TRUE } } test_valid_device <- checkmate::makeTestFunction(check_valid_device) assert_valid_device <- checkmate::makeAssertionFunction(check_valid_device)
meanvarN7 = function(lower=rep(-Inf,length(mu)),upper=rep(Inf,length(mu)),mu,Sigma){ p = length(mu) if(p==1){ out = meanvarNuni(a = lower,b = upper,mu = mu,Sigma = Sigma) return(out) } if(p<10){ if(all(is.infinite(lower))){ if(all(is.infinite(upper))){ return(list(mean = mu,EYY = Sigma + mu%*%t(mu),varcov = Sigma)) }else{ bool = is.infinite(upper) if(sum(bool)>0){ out = withinfs(upper = upper,mu = mu,Sigma = Sigma,bool = bool) }else{ out = Kan.RC(b = upper,mu = mu,Sigma = Sigma) } } }else{ if(all(is.infinite(upper))){ bool = is.infinite(lower) if(sum(bool)>0){ out = withinfs(upper = -lower,mu = -mu,Sigma = Sigma,bool = bool) out$mean = -out$mean }else{ out = Kan.RC(b = -lower,mu = -mu,Sigma = Sigma) out$mean = -out$mean } }else{ if(all(is.finite(c(lower,upper)))){ out = Kan.IC(a = lower,b = upper,mu = mu,Sigma = Sigma) }else{ bool = is.infinite(lower) & is.infinite(upper) if(sum(bool)>0){ out = withinfs(lower,upper,mu,Sigma,bool) }else{ out = Kan.LRIC(a = lower,b = upper,mu = mu,Sigma = Sigma) } } } } }else{ if(all(is.infinite(lower))){ if(all(is.infinite(upper))){ return(list(mean = mu,EYY = Sigma + mu%*%t(mu),varcov = Sigma)) }else{ bool = is.infinite(upper) if(sum(bool)>0){ out = withinfs(upper = upper,mu = mu,Sigma = Sigma,bool = bool) }else{ out = Vaida.RC(b = upper,mu = mu,Sigma = Sigma) } } }else{ if(all(is.infinite(upper))){ bool = is.infinite(lower) if(sum(bool)>0){ out = withinfs(upper = -lower,mu = -mu,Sigma = Sigma,bool = bool) out$mean = -out$mean }else{ out = Vaida.RC(b = -lower,mu = -mu,Sigma = Sigma) out$mean = -out$mean } }else{ if(all(is.finite(c(lower,upper)))){ out = Vaida.IC(a = lower,b = upper,mu = mu,Sigma = Sigma) }else{ bool = is.infinite(lower) & is.infinite(upper) if(sum(bool)>0){ out = withinfs(lower,upper,mu,Sigma,bool) }else{ out = Vaida.LRIC(a = lower,b = upper,mu = mu,Sigma = Sigma) } } } } } return(out) }
predictBoostMLR <- function(Object, x, tm, id, y, M, importance = FALSE, eps = 1e-5, setting_seed = FALSE, seed_value = 100L, ...) { user.option <- list(...) dt_Add <- is.hidden.predict.dt_Add(user.option) importance_Coef <- is.hidden.importance_Coef(user.option) if(missing(tm) && missing(x)){ stop("tm and x both missing") } if(!missing(tm) && missing(id)){ stop("id is missing") } CrossSectional <- FALSE if(missing(tm) && !missing(x) ){ if(!missing(id)){ if(!(length(sort(unique(id))) == nrow(x)) ){ stop("tm is missing") } }else { id <- 1:nrow(x) } tm <- rep(0, length(x)) CrossSectional <- TRUE } if(missing(x) && !missing(tm)){ x_miss <- TRUE All_RawX <- TRUE x <- cbind(rep(1,length(tm))) if(length(sort(unique(id))) == nrow(x) ){ CrossSectional <- TRUE } }else { x_miss <- FALSE } Time_Varying <- Object$Time_Varying if(!missing(tm) && Time_Varying == FALSE && CrossSectional == FALSE){ if(x_miss){ x <- tm }else { x <- x } } if (any(is.na(id))) { stop("missing values encountered in id: remove observations with missing values") } if (!missing(y)) { if ( any(is.na(y)) ) { } testFlag <- TRUE y_Names <- colnames(y) } else{ testFlag <- FALSE L <- Object$Grow_Object$Dimensions$L y <- matrix(0, nrow = nrow(x),ncol = L) y_Names <- paste("y",1:L,sep="") } if(!is.matrix(y)){ y <- data.matrix(y) } Time_Unmatch <- Object$Grow_Object$Time_Unmatch N <- nrow(x) if(!is.null(dt_Add)){ if(!is.list(dt_Add)){ stop("dt_Add must be a list") } K_Add <- length(dt_Add) nullObj <- lapply(1:K_Add,function(kk){ nc_K_Add <- ncol(dt_Add[[kk]]) if(nc_K_Add != 3){ stop("Each element of dt_Add must be a dataset with 3 columns arrange in order of id, time, x") } NULL }) Ord_id_tm <- Order_Time(ID = id,Time = tm) id <- id[Ord_id_tm] tm <- tm[Ord_id_tm] x <- x[Ord_id_tm,,drop = FALSE] y <- y[Ord_id_tm,,drop = FALSE] x_Add_New <- matrix(NA,nrow = N,ncol = K_Add) x_Names_Add <- rep(NA,K_Add) Time_Add_New <- matrix(NA,nrow = N,ncol = K_Add) Time_Names_Add <- rep(NA,K_Add) for(kk in 1:K_Add){ Ord_id_tm_Add <- Order_Time(ID = dt_Add[[kk]][,1],Time = dt_Add[[kk]][,2]) dt_Add[[kk]] <- dt_Add[[kk]][Ord_id_tm_Add,,drop = FALSE] id_Add <- dt_Add[[kk]][,1] x_Names_Add[kk] <- names(dt_Add[[kk]][,3,drop = FALSE]) Time_Names_Add[kk] <- names(dt_Add[[kk]][,2,drop = FALSE]) if(any(is.na(id_Add))){ stop("Missing values observed for id in dt_Add") } unq_id_Add <- unique(id_Add) n_Add <- length(unq_id_Add) nullObj <- unlist(lapply(1:n_Add,function(i){ Which_id <- which(unq_id_Add[i] == id) ni <- length(Which_id) if(ni > 0){ Which_id_Add <- which(id_Add == unq_id_Add[i]) ni_Add <- length(Which_id_Add) tm_Add <- dt_Add[[kk]][Which_id_Add,2] x_Add <- dt_Add[[kk]][Which_id_Add,3] for(j in 1:ni){ for(jj in 1:ni_Add){ if((!is.na(tm_Add[jj]) && !is.na(tm[Which_id[j]]))){ if(tm_Add[jj] <= tm[Which_id[j]]){ x_Add_New[Which_id[j], kk] <<- x_Add[jj] Time_Add_New[Which_id[j], kk] <<- tm_Add[jj] } } } } } NULL })) } colnames(x_Add_New) <- x_Names_Add x <- cbind(x,x_Add_New) colnames(Time_Add_New) <- Time_Names_Add } else { Time_Add_New <- matrix(0,nrow = N,ncol = 1) colnames(Time_Add_New) <- "Time_Add" } if(is.character(id)){ id <- as.numeric(id) } if(is.factor(id)){ id <- as.numeric(levels(id))[id] } sort_id <- is.hidden.sort_id(user.option) if(sort_id){ unq_id <- sort_unique_C_NA(id) } else { unq_id <- unique_C_NA(id) } Ord_id_tm <- Order_Time(ID = id,Time = tm,unq_id = unq_id) id <- id[Ord_id_tm] tm <- tm[Ord_id_tm] x <- x[Ord_id_tm,,drop = FALSE] y <- y[Ord_id_tm,,drop = FALSE] if(!is.matrix(x)){ x <- data.matrix(x) } x_Names <- colnames(x) K <- ncol(x) if(is.null(x_Names)){ x_Names <- paste("x",1:K,sep="") } if(!identical(x_Names , Object$x_Names) ){ stop("Covariate from grow and predict function are not matching") } if(missing(M)){ M <- Object$Grow_Object$Regulate$M } L <- ncol(y) if(is.null(y_Names)){ y_Names <- paste("y",1:L,sep="") } H <- Object$Grow_Object$Dimensions$H Dk <- Object$Grow_Object$Dimensions$Dk x_Mean <- Object$Grow_Object$Data$x_Mean x_Std_Error <- Object$Grow_Object$Data$x_Std_Error y_Mean <- Object$Grow_Object$Data$y_Mean y_Std_Error <- Object$Grow_Object$Data$y_Std_Error unq_tm <- Object$Grow_Object$Index$unq_tm unq_x <- Object$Grow_Object$Index$unq_x Bt <- Object$Grow_Object$BS$Bt Bx <- Object$Grow_Object$BS$Bx nu <- Object$Grow_Object$Regulate$nu Beta <- Object$Grow_Object$Beta_Estimate$Beta Beta_Hat_List <- Object$Grow_Object$Beta_Estimate$Beta_Hat_List UseRaw <- Object$UseRaw vimpFlag <- (importance == TRUE && testFlag == TRUE) vimpFlag_Coef <- (importance_Coef == TRUE && testFlag == TRUE) obj_C <- predict_BoostMLR_C(x, tm, id, y, x_Mean, x_Std_Error, y_Mean, y_Std_Error, K, L, H, Dk, unq_id, unq_tm, unq_x, Bt, Bx, UseRaw, Time_Add_New, Time_Unmatch, Beta, Beta_Hat_List, testFlag, M, nu, Time_Varying, vimpFlag, vimpFlag_Coef, eps, setting_seed, seed_value) Error_Rate <- obj_C$Error_Rate colnames(Error_Rate) <- y_Names vimp <- obj_C$vimp vimp_Coef <- obj_C$vimp_Coef if(vimpFlag){ names(vimp) <- y_Names for(l in 1:L){ rownames(vimp[[l]]) <- x_Names if(H == 1){ vimp[[l]] <- vimp[[l]][,1,drop = FALSE] } if(H == 1){ colnames(vimp[[l]]) <- "Main_Eff" } else { colnames(vimp[[l]]) <- c("Main_Eff",paste("Int_Eff.",1:H,sep="")) } } } if(vimpFlag_Coef){ names(vimp_Coef) <- y_Names for(l in 1:L){ rownames(vimp_Coef[[l]]) <- x_Names if(H == 1){ vimp_Coef[[l]] <- vimp_Coef[[l]][,1,drop = FALSE] } if(H == 1){ colnames(vimp_Coef[[l]]) <- "Main_Eff" } else { colnames(vimp_Coef[[l]]) <- c("Main_Eff",paste("Int_Eff.",1:H,sep="")) } } } mu <- obj_C$Org_mu colnames(mu) <- y_Names if(testFlag) { mu_Mopt <- obj_C$Org_mu_Mopt colnames(mu_Mopt) <- y_Names } else { mu_Mopt <- NA } Pred_Object <- obj_C$Pred_Object Pred_Object$Dimensions = obj_C$Dimensions Pred_Object$Index = obj_C$Index Pred_Object$BS = obj_C$BS Pred_Object$UseRaw = UseRaw Pred_Object$Time_Varying = Time_Varying Pred_Object$Beta_Hat_List = Beta_Hat_List obj <- list(Data = obj_C$Data, x_Names = x_Names, y_Names = y_Names, mu = mu, mu_Mopt = mu_Mopt, Error_Rate = Error_Rate, Mopt = obj_C$Mopt, nu = nu, rmse = obj_C$rmse, vimp = vimp, vimp_Coef = vimp_Coef, Pred_Object = Pred_Object) class(obj) <- c("BoostMLR", "predict") invisible(obj) }
source("https://raw.githubusercontent.com/Flavjack/inti/master/pkgdown/favicon/docs.r") table <- "https://docs.google.com/spreadsheets/d/1QziIXGOwb8cl3GaARJq6Ez6aU7vND_UHKJnFcAKx0VI/edit table %>% gsheet2tbl() %>% knitr::kable(caption = 'Description of each module in GerminaQuant to evaluate and analyze the germination process.' ) knitr::include_graphics('files/impdt.png') knitr::include_graphics('files/dtdown.png') knitr::include_graphics('files/stat.png') knitr::include_graphics('files/plot.png') knitr::include_graphics('files/gtime.png') if(!file.exists("files/pkgs.bib")){write_bib(c(.packages()),'files/pkgs.bib')}
autotriples <- function(x, lags=1:2, h, type=c("levels","persp","image", "lines", "points")) { panel <- list(levels = function(x) contour(x, xlab=xlab, ylab=ylab), persp = function(x) persp(x, xlab=xlab, ylab=ylab, zlab=zlab), image = function(x) image(x, xlab=xlab, ylab=ylab), lines = function(x) scatterplot3d::scatterplot3d(X, xlab=xlab, ylab=ylab, zlab=zlab, main="directed lines", type="l"), points = function(x) scatterplot3d::scatterplot3d(X, xlab=xlab, ylab=ylab, zlab=zlab, main="cloud", pch=1)) type <- match.arg(type) X <- embedd(x, lags=c(-lags,0)) if(missing(h)) h <- sm::hnorm(X[,1]) xlab <- paste("lag",lags[1]) ylab <- paste("lag",lags[2]) zlab <- "lag 0" mod <- sm::sm.regression(X[,1:2], X[,3], h=rep(h,2), display="none") panel[[type]](mod$estimate) invisible(NULL) }
svc <- paws::iotwireless() test_that("list_destinations", { expect_error(svc$list_destinations(), NA) }) test_that("list_destinations", { expect_error(svc$list_destinations(MaxResults = 20), NA) }) test_that("list_device_profiles", { expect_error(svc$list_device_profiles(), NA) }) test_that("list_device_profiles", { expect_error(svc$list_device_profiles(MaxResults = 20), NA) }) test_that("list_partner_accounts", { expect_error(svc$list_partner_accounts(), NA) }) test_that("list_partner_accounts", { expect_error(svc$list_partner_accounts(MaxResults = 20), NA) }) test_that("list_service_profiles", { expect_error(svc$list_service_profiles(), NA) }) test_that("list_service_profiles", { expect_error(svc$list_service_profiles(MaxResults = 20), NA) }) test_that("list_wireless_devices", { expect_error(svc$list_wireless_devices(), NA) }) test_that("list_wireless_devices", { expect_error(svc$list_wireless_devices(MaxResults = 20), NA) }) test_that("list_wireless_gateway_task_definitions", { expect_error(svc$list_wireless_gateway_task_definitions(), NA) }) test_that("list_wireless_gateway_task_definitions", { expect_error(svc$list_wireless_gateway_task_definitions(MaxResults = 20), NA) }) test_that("list_wireless_gateways", { expect_error(svc$list_wireless_gateways(), NA) }) test_that("list_wireless_gateways", { expect_error(svc$list_wireless_gateways(MaxResults = 20), NA) })
shift.down <- function( A, rows = 1, fill = 0 ) { if ( !is.matrix( A ) ) { stop( "argument A is not a matrix" ) } if ( !is.numeric( A ) ) { stop( "argument A is not a numeric matrix" ) } if ( rows < 0 ) stop( "Argument rows is not positive" ) if ( rows != trunc( rows ) ) stop( "Arguments rows is not an integer" ) if ( !is.numeric( fill ) ) stop( "Argument fill is not numeric" ) if ( rows > 0 ) return( shift.down( rbind( rep( fill, ncol(A) ), A[1:nrow(A)-1,] ), rows - 1, fill ) ) return( A ) }
library(googleAuthR) if(!require(searchConsoleR)){ if(!require(devtools)){ install.packages("devtools") } else { devtools::install_github("MarkEdmondson1234/searchConsoleR") } } library(searchConsoleR) website <- "http://copenhagenish.me" start <- Sys.Date() - 3 end <- Sys.Date() - 3 download_dimensions <- c('date','query') type <- c('web') gar_auth() data <- search_analytics(siteURL = website, startDate = start, endDate = end, dimensions = download_dimensions, searchType = type) filename <- paste("search_analytics", Sys.Date(), paste(download_dimensions, collapse = "",sep=""), type,".csv",sep="-") write.csv(data, filename)
deletion3 <- function(string){ len <- nchar(string) i<-1:len j<-1:(len-1) k<-1:(len-2) bucket<-c() bucket<-append(bucket,string) bucket<-append(bucket,unique(as.vector(sapply(string, function(x) paste0(str_sub(string,1,i-1),str_sub(string,i+1,nchar(string))))))) bucket<-append(bucket,unique(as.vector(sapply(as.vector(sapply(string, function(x) paste0(str_sub(x,1,i-1),str_sub(x,i+1,nchar(x))))), function(x) paste0(str_sub(x,1,j-1),str_sub(x,j+1,nchar(x))))))) bucket<-append(bucket,unique(as.vector(sapply(as.vector(sapply(as.vector(sapply(string, function(x) paste0(str_sub(x,1,i-1),str_sub(x,i+1,nchar(x))))), function(x) paste0(str_sub(x,1,j-1),str_sub(x,j+1,nchar(x))))), function(x) paste0(str_sub(x,1,k-1),str_sub(x,k+1,nchar(x))))))) bucket }