code
stringlengths
1
13.8M
get_type <- function(x) { s4basic_get_type(x) } get_args <- function (x) { s4basic_get_args(x) } get_hash <- function (x) { s4basic_hash(x) } get_str <- function(x) { s4basic_str(x) } free_symbols <- function(x) { s4basic_free_symbols(x) } function_symbols <- function(x) { s4basic_function_symbols(x) } get_name <- function(x) { s4basic_function_getname(x) } get_prec <- function(x) { s4basic_realmpfr_get_prec(x) }
showCols1 <- function(bg = "gray", cex = 0.75, srt = 30) { m <- ceiling(sqrt(n <- length(cl <- colors()))) length(cl) <- m*m; cm <- matrix(cl, m) require("graphics") op <- par(mar=rep(0,4), ann=FALSE, bg = bg); on.exit(par(op)) plot(1:m,1:m, type="n", axes=FALSE) text(col(cm), rev(row(cm)), cm, col = cl, cex=cex, srt=srt) } showCols1() showCols2 <- function(bg = "grey", cex = 0.75, rot = 30) { m <- ceiling(sqrt(n <- length(cl <- colors()))) length(cl) <- m*m; cm <- matrix(cl, m) require("grid") grid.newpage(); vp <- viewport(width = .92, height = .92) grid.rect(gp=gpar(fill=bg)) grid.text(cm, x = col(cm)/m, y = rev(row(cm))/m, rot = rot, vp=vp, gp=gpar(cex = cex, col = cm)) } showCols2() showCols2(bg = "gray33") plotCol <- function(col, nrow=1, ncol=ceiling(length(col) / nrow), txt.col="black") { stopifnot(nrow >= 1, ncol >= 1) if(length(col) > nrow*ncol) warning("some colors will not be shown") require(grid) grid.newpage() gl <- grid.layout(nrow, ncol) pushViewport(viewport(layout=gl)) ic <- 1 for(i in 1:nrow) { for(j in 1:ncol) { pushViewport(viewport(layout.pos.row=i, layout.pos.col=j)) grid.rect(gp= gpar(fill=col[ic])) grid.text(col[ic], gp=gpar(col=txt.col)) upViewport() ic <- ic+1 } } upViewport() invisible(gl) } plotCol(c(" paste0("darkorange", c("",1:2)), paste0("darkgoldenrod", 1:2), "orange", "orange1", "sandybrown", "tan1", "tan2"), nrow=2) nearRcolor <- function(rgb, cSpace = c("hsv", "rgb255", "Luv", "Lab"), dist = switch(cSpace, "hsv" = 0.10, "rgb255" = 30, "Luv" = 15, "Lab" = 12)) { if(is.character(rgb)) rgb <- col2rgb(rgb) stopifnot(length(rgb <- as.vector(rgb)) == 3) Rcol <- col2rgb(.cc <- colors()) uniqC <- !duplicated(t(Rcol)) Rcol <- Rcol[, uniqC] ; .cc <- .cc[uniqC] cSpace <- match.arg(cSpace) convRGB2 <- function(Rgb, to) t(convertColor(t(Rgb), from="sRGB", to=to, scale.in=255)) TransF <- switch(cSpace, "rgb255" = identity, "hsv" = rgb2hsv, "Luv" = function(RGB) convRGB2(RGB, "Luv"), "Lab" = function(RGB) convRGB2(RGB, "Lab")) d <- sqrt(colSums((TransF(Rcol) - as.vector(TransF(rgb)))^2)) iS <- sort.list(d[near <- d <= dist]) setNames(.cc[near][iS], format(zapsmall(d[near][iS]), digits=3)) } nearRcolor(col2rgb("tan2"), "rgb") nearRcolor(col2rgb("tan2"), "hsv") nearRcolor(col2rgb("tan2"), "Luv") nearRcolor(col2rgb("tan2"), "Lab") nearRcolor(" plotCol(nearRcolor("deepskyblue", "rgb", dist=50)) plotCol(nearRcolor("deepskyblue", dist=.1)) plotCol(nearRcolor("tomato", "rgb", dist= 50), nrow=3) plotCol(nearRcolor("tomato", "hsv", dist=.12), nrow=3) plotCol(nearRcolor("tomato", "Luv", dist= 25), nrow=3) plotCol(nearRcolor("tomato", "Lab", dist= 18), nrow=3)
context('transform-fitted') model = modelTest test_that('NULL', { out = transformFitted(NULL, model) expect_true(all(is.na(out))) }) test_that('matrix', { refdata = model.data(model) mat = matrix(refdata$Value, nrow = nobs(model), ncol = nClusters(model)) colnames(mat) = clusterNames(model) out = transformFitted(mat, model, clusters = NULL) expect_is(out, 'matrix') expect_true(noNA(out)) expect_true(nrow(out) == nobs(model)) expect_true(ncol(out) == nClusters(model)) }) test_that('matrix in swapped column order', { refmat = matrix(rep(1:2, each = nobs(model)), ncol = 2) colnames(refmat) = clusterNames(model) mat = refmat[ , c(2, 1)] out = transformFitted(mat, model, clusters = NULL) expect_equal(out, refmat) }) test_that('matrix cluster', { mat = matrix(rep(1:2, each = nobs(model)), ncol = 2) colnames(mat) = clusterNames(model) out = transformFitted(mat, model, clusters = trajectoryAssignments(model)) expect_true(is.numeric(out)) expect_length(out, nobs(model)) expect_equal(out, as.integer(trajectoryAssignments(model)[make.idRowIndices(model)])) }) test_that('list', { mat = matrix(rep(1:2, each = nobs(model)), ncol = 2) colnames(mat) = clusterNames(model) lis = list( data.frame(Fit = rep(2, nobs(model))), data.frame(Fit = rep(1, nobs(model))) ) %>% set_names(rev(clusterNames(model))) out = transformFitted(lis, model, clusters = NULL) expect_equal(out, mat) }) test_that('data.frame', { df = data.frame( Fit = rep(1:2, each = nobs(model)), Cluster = rep(clusterNames(model), each = nobs(model)) ) out = transformFitted(df, model, clusters = NULL) expect_is(out, 'matrix') expect_true(all(out[, 1] == 1)) expect_true(all(out[, 2] == 2)) }) test_that('reversed data.frame', { df = data.frame( Fit = rep(2:1, each = nobs(model)), Cluster = rep(rev(clusterNames(model)), each = nobs(model)) ) out = transformFitted(df, model, clusters = NULL) expect_equal(colnames(out), clusterNames(model)) expect_true(all(out[, 1] == 1)) expect_true(all(out[, 2] == 2)) })
rotonto <- function(x,y,scale=FALSE,signref=TRUE,reflection=TRUE,weights=NULL,centerweight=FALSE,...) { xorig <- x yorig <- y reflect=0 k <- nrow(x) m <- ncol(x) xrows <- rowSums(x) yrows <- rowSums(y) xbad <- which(as.logical(is.na(xrows) + is.nan(xrows))) ybad <- which(as.logical(is.na(yrows) + is.nan(yrows))) bad <- sort(unique(c(xbad,ybad))) docenter <- FALSE if (length(centerweight) == 1) { if (centerweight) docenter <- TRUE centerweight <- weights } else { docenter <- TRUE } if (length(bad)) { message("some landmarks are missing and ignored for calculating the transform") x <- x[-bad,,drop=FALSE] y <- y[-bad,,drop=FALSE] if (!is.null(weights)) weights <- weights[-bad] if (!is.logical(centerweight[1])) centerweight <- centerweight[-bad] } if (!is.null(weights)) weights <- weights/sum(weights) if (!is.null(centerweight)) centerweight <- centerweight/sum(centerweight) if (nrow(x) > 1) { X <- scale(x, scale=FALSE) Y <- scale(y, scale=FALSE) } else { X <- x Y <- y } if (docenter && !is.null(centerweight)) { xcent <- apply(X,2,weighted.mean,w=centerweight) ycent <- apply(Y,2,weighted.mean,w=centerweight) X <- scale(X,scale=F,center=xcent) Y <- scale(Y,scale=F,center=ycent) } if (!is.null(weights)) { Dn <- diag(weights) X1 <- Dn%*%X Y1 <- Dn%*%Y XY <- crossprod(X1,Y1) } else { XY <- crossprod(X,Y) } sv1 <- svd(XY) gamm <- tcrossprod(sv1$v,sv1$u) if(sign(det(gamm))<1) { reflect <- 1 if (signref && reflection) cat("reflection involved\n") if (!reflection) { u <- sv1$u v <- sv1$v chk1 <- Re(prod(eigen(v)$values)) chk2 <- Re(prod(eigen(u)$values)) if ((chk1 < 0) && (chk2 > 0)) { v[, dim(v)[2]] <- v[, dim(v)[2]] * (-1) gamm <- v %*% t(u) } if ((chk2 < 0) && (chk1 > 0)) { u[, dim(u)[2]] <- u[, dim(u)[2]] * (-1) gamm <- v %*% t(u) } } } trans <- x[1,]-X[1,] transy <- y[1,]-Y[1,] del <- sv1$d Yorig <- sweep(yorig,2,transy) ctrace <- function(MAT) sum(diag(crossprod(MAT))) if (scale) { if (!is.null(weights)) bet <- sum(del)/ctrace(Y1) else bet <- sum(del)/ctrace(Y) yrot <- bet*Yorig%*%gamm } else { bet <- 1 yrot <- Yorig%*%gamm } Y <- yrot yrot <- t(t(yrot)+trans) matlist <- list(yrot=yrot,Y=Y,X=X) matlist$X <- sweep(xorig,2,trans) out <- list(yrot=matlist$yrot,Y=matlist$Y,X=matlist$X,trans=trans,transy=transy,gamm=gamm,bet=bet,reflect=reflect) class(out) <- "rotonto" return(out) } rotreverse <- function(mat,rot)UseMethod("rotreverse") rotreverse.matrix <- function(mat,rot){ hmat <- solve(getTrafo4x4(rot)) out <-homg2mat(hmat%*%mat2homg(mat)) return(out) } rotreverse.mesh3d <- function(mat,rot) { x <- rotreverse(vert2points(mat),rot) mat$vb[1:3,] <- t(x) if (!is.null(mat$normals)) mat <- vcgUpdateNormals(mat) return(mat) } getTrafo4x4 <- function(x)UseMethod("getTrafo4x4") getTrafo4x4.rotonto <- function(x) { m <- ncol(x$gamm) hgamm <- rbind(cbind(x$gamm,0),0);hgamm[m+1,m+1] <- 1 htrans <- diag(m+1);htrans[1:m,m+1] <- c(-x$transy) htrans2 <- diag(m+1);htrans2[1:m,m+1] <- c(x$trans) scale <- diag(m+1);diag(scale)[1:m] <- x$bet hall <- htrans2%*%scale%*%t(hgamm)%*%htrans return(hall) } mat2homg <- function(x) { x <- rbind(t(x),1) return(x) } homg2mat <- function(x) { m <- nrow(x) x <- t(x[1:(m-1),]) return(x) }
rtext <- R6::R6Class( classname = "rtext", active = NULL, inherit = rtext_tokenize, lock_objects = TRUE, class = TRUE, portable = TRUE, lock_class = FALSE, cloneable = TRUE, parent_env = asNamespace('rtext'), private = list(), public = list() )
layer_embedding <- function(object, input_dim, output_dim, embeddings_initializer = "uniform", embeddings_regularizer = NULL, activity_regularizer = NULL, embeddings_constraint = NULL, mask_zero = FALSE, input_length = NULL, batch_size = NULL, name = NULL, trainable = NULL, weights = NULL) { create_layer(keras$layers$Embedding, object, list( input_dim = as.integer(input_dim), output_dim = as.integer(output_dim), embeddings_initializer = embeddings_initializer, embeddings_regularizer = embeddings_regularizer, activity_regularizer = activity_regularizer, embeddings_constraint = embeddings_constraint, mask_zero = mask_zero, input_length = if (!is.null(input_length)) as.integer(input_length) else NULL, batch_size = as_nullable_integer(batch_size), name = name, trainable = trainable, weights = weights )) }
kmdist <- function(data, struct) { if (!inherits(data, "matrix")) { stop(sprintf("%s must be of class %s.", dQuote("data"), dQuote("matrix"))) } if (any(data != 1*as.logical(data))) { stop(sprintf("%s must be a binary matrix.", dQuote("data"))) } if (!inherits(struct, "matrix")) { stop(sprintf("%s must be of class %s.", dQuote("struct"), dQuote("matrix"))) } if (any(struct != 1*as.logical(struct))) { stop(sprintf("%s must be a binary matrix.", dQuote("struct"))) } if (dim(data)[2] != dim(struct)[2]) { stop(sprintf("%s and %s hve different item numbers!.", dQuote("data"), dQuote("struct"))) } distvec <- rep(0, dim(struct)[2]+1) names(distvec) <- 0:dim(struct)[2] apply(data, MARGIN = 1, function(rp) { d <- min(apply(struct, MARGIN = 1, kmsetdistance, rp)) distvec[d+1] <<- distvec[d+1] + 1 }) distvec }
error_message <- function(h,NL){ if (sum(h)> NL) { h.string <- as.character(h) for (cd in 1:length(h.string)) { if (cd==1){ s<-""; s <- paste(h.string[cd],sep = "+")} if ( !cd==1)s <- paste(s,h.string[cd],sep = "+") } sum.of.h <- s sum.of.h <- paste(sum.of.h,"=",as.character( sum(h) ) ) small_margin() plot(0,0,type="n", axes=FALSE,xlim=c(0,1),ylim =c(0,1),xaxt="n", yaxt="n",xlab="Please fix inconsistent data",ylab="",main="Error: Inconsistent Data \n In baseball game, \n batter's number of hits can not be greater than his number of at-bats") graphics::text(0.5,0.8,c("*Now, Sum of the number of hits is greater than that of lesions; \n\n", expression(paste(h1+h2+h3+... , " > Number of Lesions")) ),col="blue",cex = 1.4 ) graphics::text(0.5,0.65,paste("In the current inputed data, it is the following: " ),col="black",cex = 1.5 ) graphics::text(0.5,0.5,paste( sum.of.h , " > ",NL,sep = ""),col="red",cex = 2) graphics::text(0.5,0.3,c("* Please fix data so that the inequality is the opposite \n", expression(paste(h1+h2+h3+... , " < Number of lesions"))),col="blue",cex = 1.5 ) graphics::text(0.5,0.1,c("* Shoud decrease the number of hits or \n Shoud increase the number of lesions"),col="blue",cex = 1.5 ) } }
tar_test("tar_make_interactive()", { dir.create(path_store_default()) code <- c("list(", "tar_target(x, \"a\"),", "tar_target(y, \"b\")", ")") tar_make_interactive(code) expect_true(file.exists("_targets")) expect_equal(tar_option_get("envir")$x, "a") expect_equal(tar_option_get("envir")$y, "b") }) tar_test("tar_make_interactive() with dynamic branching", { dir.create(path_store_default()) code <- c( "list(", "tar_target(x, letters[seq_len(2)]),", "tar_target(y, toupper(x), pattern = map(x))", ")" ) tar_make_interactive(code) expect_true(file.exists("_targets")) expect_equal(tar_option_get("envir")$x, letters[seq_len(2)]) y <- tar_option_get("envir")$y expect_equal(unname(tar_option_get("envir")$y), LETTERS[seq_len(2)]) y <- tar_option_get("envir")$y y1 <- names(y[y == "A"]) y2 <- names(y[y == "B"]) expect_equal(unname(tar_option_get("envir")[[y1]]), "A") expect_equal(unname(tar_option_get("envir")[[y2]]), "B") })
NULL tt_available <- function(auth = github_pat()) { tt_year <- sort(tt_years(),decreasing = TRUE,) datasets <- setNames(vector("list", length(tt_year)), tt_year) for(year in tt_year){ datasets[[as.character(year)]] <- tt_datasets(year, auth = auth) } structure(datasets, class = c("tt_dataset_table_list") ) } tt_datasets <- function(year, auth = github_pat()) { if (!year %in% tt_years()) { stop( paste0( "Invalid `year` provided to list available tidytuesday datasets.", "\n\tUse one of the following years: ", paste(tt_years(), collapse = ", "), "." ) ) } files <- github_sha(file.path("data", year)) readme <- grep( pattern = "readme", files$path, value = TRUE, ignore.case = TRUE ) readme_html <- github_html(file.path("data", year, readme), auth = auth) readme_html <- read_html(gsub( "\\n", "", gsub( x = as.character(readme_html), pattern = "<a href=\\\"(\\d+)(-\\d+-\\d+)(\\/readme.+)*\\\">", replacement = paste0("<a href=\\\"https:\\/\\/github.com\\/", "rfordatascience\\/tidytuesday\\/tree\\/master\\/", "data\\/\\1\\/\\1\\2\\\">"), perl = TRUE ) )) datasets <- readme_html %>% html_table() %>% `[[`(1) structure(datasets, .html = readme_html, class = "tt_dataset_table") } NULL print.tt_dataset_table <- function(x, ..., is_interactive = interactive()) { if(is_interactive){ tmpHTML <- tempfile(fileext = ".html") make_tt_dataset_html(x, file = tmpHTML <- tempfile(fileext = ".html")) html_viewer(tmpHTML) }else { print(data.frame(unclass(x))) } invisible(x) } make_tt_dataset_html <- function(x, file = tempfile(fileext = ".html")){ readme <- attr(x,".html") write_html(readme, file = file) invisible(readme) } print.tt_dataset_table_list <- function(x, ...,is_interactive = interactive()) { if (is_interactive) { make_tt_dataset_list_html(x, file = tmpHTML <- tempfile(fileext = ".html")) html_viewer(tmpHTML) } else { names(x) %>% purrr::map( function(.x, x) { list( table = data.frame(unclass(x[[.x]])), year = .x ) }, x = x ) %>% purrr::walk( function(.x) { cat(paste0("Year: ", .x$year, "\n\n")) print(.x$table) cat("\n\n") } ) } invisible(x) } make_tt_dataset_list_html <- function(x, file = tempfile(fileext = ".html")){ readme <- names(x) %>% purrr::map_chr( function(.x, x) { year_table <- attr(x[[.x]],".html") %>% html_node("table") paste("<h2>",.x,"</h2>", as.character(year_table), "") }, x = x ) %>% paste(collapse = "") readme <- paste( "<article class='markdown-body entry-content' itemprop='text'>", paste("<h1>TidyTuesday Datasets</h1>", readme),"</article>" ) %>% read_html() %>% github_page() write_html(readme, file = file) invisible(readme) }
hello <- function() 'Hello World'
IR_bands <- function(std = "ISO") { if (std == "ISO") { list(NIR(std), MIR(std), FIR(std)) } else if (std == "CIE") { list(IRA(std), IRB(std), IRC(std)) } else { warning("'std' argument value not implemented.") list() } }
if (FALSE){ source("rreport.s") getReference("sae", "ser.adv withdr") getReference("Osae", "ser.advWITHDR") getReference("Osae", "ser.advCARDIO") getReference("sae", "ser.adv cardio") getRefsByKey("sae") getLabelsByKey("sae") getReferenceString("Osae") getReferenceObject() } getReferenceObject <- function(){ refD <- options("rreport.reference.list")[[1]] if (is.null(refD)){ refD <- data.frame(marker=c(), keyword=c(), label=c()) } refD } putReferenceObject <- function(refD){ options(rreport.reference.list=refD) } print.latexReference <- function(refD){ if (is.null(refD)){ cat("The list of markers has not been created. Use function getReferenceObject() to create it\n") }else{ print(refD) } } updateMarkers <- function(newMarker, keyword="", label=""){ refD = getReferenceObject() if (newMarker %in% refD$marker){ stop(paste("Duplicated marker", newMarker)) } newM <-data.frame(marker=newMarker, keyword=keyword, label=label) newM$marker <- as.character(newM$marker) newM$keyword <- as.character(newM$keyword) newM$label <- as.character(newM$label) refD = rbind(refD, newM) for (n in names(refD)) refD[[n]] <- as.character(refD[[n]]) putReferenceObject(refD) } generateRef <- function(){ generate <- function(){paste("marker",abs(round(rnorm(1)*(10^8))), sep="")} existingMarkers <- getRefsByKey() newMarker <- generate() while (newMarker %in% existingMarkers){ newMarker <- generate() } newMarker } getReference <- function(keyword="", label=""){ newMarker <- generateRef() updateMarkers(newMarker = newMarker, keyword=keyword, label=label) newMarker } getRefsByKey <- function(keyword=NULL){ refD = getReferenceObject() if (!is.null(keyword)){ refD$marker[refD$keyword==keyword] }else{ refD$marker } } getLabelsByKey <- function(keyword=NULL){ refD = getReferenceObject() if (!is.null(keyword)){ refD$label[refD$keyword==keyword] }else{ refD$label } } getReferenceString <- function(keyword){ markers <- getRefsByKey(keyword) labels <- getLabelsByKey(keyword) keys <- paste(labels," in section ", "\\ref{", markers, "}", " (page ", "\\pageref{",markers,"}",")", sep="") paste("See", paste(keys, collapse=", ")) }
pos.to.env <- function(x) { if(x == -1) { return(parent.frame(n = 2)) } if(x < 1) { stop("invalid 'pos' argument") } env <- .GlobalEnv while(x > 1) { if(identical(env, emptyenv())) { stop("invalid 'pos' environment") } env <- parent.env(env) x <- x - 1 } env }
library("testthat") library("mgcv") library("gratia") context("Testing smooth_samples() methods") set.seed(12398) dat <- gamSim(1, n = 400, dist = "normal", scale = 2, verbose = FALSE) m1 <- gam(y ~ s(x0), data = dat, method = "REML") m2 <- gam(y ~ s(x0) + s(x1) + s(x2) + s(x3), data = dat, method = "REML") set.seed(34786) dat2 <- gamSim(4, verbose = FALSE) m3 <- gam(y ~ fac + s(x2, by = fac) + s(x0), data = dat2, method = "REML") set.seed(42) cont_by_data <- gamSim(3, n = 400, verbose = FALSE) cont_by_gam <- gam(y ~ s(x2, by = x1), data = cont_by_data) test_that("smooth_samples works for a continuous by GAM", { expect_silent(sm <- smooth_samples(cont_by_gam, n = 5, n_vals = 100, seed = 42)) expect_s3_class(sm, c("smooth_samples", "posterior_samples", "tbl_df", "tbl", "data.frame")) expect_identical(NROW(sm), 500L) expect_identical(NCOL(sm), 7L) }) test_that("smooth_samples works for a simple GAM", { expect_silent(sm <- smooth_samples(m1, n = 5, n_vals = 100, seed = 42)) expect_s3_class(sm, c("smooth_samples", "posterior_samples", "tbl_df", "tbl", "data.frame")) expect_identical(NROW(sm), 500L) expect_identical(NCOL(sm), 7L) }) test_that("smooth_samples works for a multi-smooth GAM", { expect_silent(sm <- smooth_samples(m2, n = 5, n_vals = 100, seed = 42)) expect_s3_class(sm, c("smooth_samples", "posterior_samples", "tbl_df", "tbl", "data.frame")) expect_identical(NROW(sm), 2000L) expect_identical(NCOL(sm), 7L) }) test_that("smooth_samples works for a multi-smooth factor by GAM", { expect_silent(sm <- smooth_samples(m3, n = 5, n_vals = 100, seed = 42)) expect_s3_class(sm, c("smooth_samples", "posterior_samples", "tbl_df", "tbl", "data.frame")) expect_identical(NROW(sm), 2000L) expect_identical(NCOL(sm), 8L) }) test_that("smooth_samples() fails if not suitable method available", { expect_error(smooth_samples(1:10), "Don't know how to sample from the posterior of <integer>", fixed = TRUE) }) test_that("smooth_samples sets seed when seed not provided", { expect_silent(smooth_samples(m2, seed = NULL)) }) test_that("smooth_samples works with term provided", { expect_silent(sm <- smooth_samples(m2, term = "s(x2)", seed = 42)) }) test_that("smooth_samples errors with invalid term provided", { expect_error(sm <- smooth_samples(m2, term = "s(x10)", seed = 42), "None of the terms matched a smooth.", fixed = TRUE) }) context("Testing fitted_samples() methods") test_that("fitted_samples works for a simple GAM", { expect_silent(sm <- fitted_samples(m1, n = 5, seed = 42)) expect_s3_class(sm, c("fitted_samples", "posterior_samples", "tbl_df", "tbl", "data.frame")) expect_identical(NROW(sm), 2000L) expect_identical(NCOL(sm), 3L) expect_named(sm, expected = c("row", "draw", "fitted")) }) test_that("fitted_samples works for a multi-smooth GAM", { expect_silent(sm <- fitted_samples(m2, n = 5, seed = 42)) expect_s3_class(sm, c("fitted_samples", "posterior_samples", "tbl_df", "tbl", "data.frame")) expect_identical(NROW(sm), 2000L) expect_identical(NCOL(sm), 3L) expect_named(sm, expected = c("row", "draw", "fitted")) }) test_that("fitted_samples works for a multi-smooth factor by GAM", { expect_silent(sm <- fitted_samples(m3, n = 5, seed = 42)) expect_s3_class(sm, c("fitted_samples", "posterior_samples", "tbl_df", "tbl", "data.frame")) expect_identical(NROW(sm), 2000L) expect_identical(NCOL(sm), 3L) expect_named(sm, expected = c("row", "draw", "fitted")) }) test_that("fitted_samples sets seed when seed not provided", { expect_silent(fitted_samples(m2, seed = NULL)) }) test_that("fitted_samples() fails if not suitable method available", { expect_error(fitted_samples(1:10), "Don't know how to sample from the posterior of <integer>", fixed = TRUE) }) context("Testing predicted_samples() methods") test_that("predicted_samples works for a simple GAM", { expect_silent(sm <- predicted_samples(m1, n = 5, seed = 42)) expect_s3_class(sm, c("predicted_samples", "posterior_samples", "tbl_df", "tbl", "data.frame")) expect_identical(NROW(sm), 2000L) expect_identical(NCOL(sm), 3L) expect_named(sm, expected = c("row", "draw", "response")) }) test_that("predicted_samples works for a multi-smooth GAM", { expect_silent(sm <- predicted_samples(m2, n = 5, seed = 42)) expect_s3_class(sm, c("predicted_samples", "posterior_samples", "tbl_df", "tbl", "data.frame")) expect_identical(NROW(sm), 2000L) expect_identical(NCOL(sm), 3L) expect_named(sm, expected = c("row", "draw", "response")) }) test_that("predicted_samples works for a multi-smooth factor by GAM", { expect_silent(sm <- predicted_samples(m3, n = 5, seed = 42)) expect_s3_class(sm, c("predicted_samples", "posterior_samples", "tbl_df", "tbl", "data.frame")) expect_identical(NROW(sm), 2000L) expect_identical(NCOL(sm), 3L) expect_named(sm, expected = c("row", "draw", "response")) }) test_that("predicted_samples sets seed when seed not provided", { expect_silent(predicted_samples(m2, seed = NULL)) }) test_that("predicted_samples() fails if not suitable method available", { expect_error(predicted_samples(1:10), "Don't know how to sample from the posterior of <integer>", fixed = TRUE) }) context("Testing posterior_samples() methods") test_that("posterior_samples() fails if not suitable method available", { expect_error(posterior_samples(1:10), "Don't know how to sample from the posterior of <integer>", fixed = TRUE) })
report_add_slide = function (obnd, template = NULL, elements = NULL, verbose = TRUE){ msgs = c() isgood = TRUE general_types = c("imagefile", "ggplot", "table", "flextable", "flextable_object") if(obnd[["isgood"]]){ if(obnd[["rpttype"]] != "PowerPoint"){ isgood = FALSE msgs = c(msgs, paste0("The povided onbrand object is for rpttype >", obnd[["rpttype"]], "<")) msgs = c(msgs, "and should be PowerPoint") } } else { isgood = FALSE msgs = c(msgs, "Bad onbrand object supplied") } if(is.null(template)){ isgood = FALSE msgs = c(msgs, "No slide template was provided") } if(is.null(elements)){ isgood = FALSE msgs = c(msgs, "No slide elements were provided") } if(isgood){ if(template %in% names(obnd[["meta"]][["rpptx"]][["templates"]])){ td = obnd[["meta"]][["rpptx"]][["templates"]][[template]] if(all(names(elements) %in% names(td))){ for(phname in names(elements)){ allowed_types = c(td[[phname]][["content_type"]], general_types) if(!(elements[[phname]][["type"]] %in% allowed_types)){ isgood = FALSE msgs = c(msgs, paste0("The content type >", elements[[phname]][["type"]], "< is not allowed with")) msgs = c(msgs, paste0("the layout template >", template, "<")) msgs = c(msgs, paste0("The allowed values of type are:")) msgs = c(msgs, paste0(" ", paste(allowed_types, collapse=", "))) } } } else { isgood = FALSE msgs = c(msgs, paste0("The following placeholder elements were specified but")) msgs = c(msgs, paste0("not found in the mapping file: ")) msgs = c(msgs, paste0(" ", paste(names(elements)[!(names(elements) %in% names(td))], collapse =", "))) } } else { isgood = FALSE msgs = c(msgs, paste0("The slide template >", template, "< was not found")) msgs = c(msgs, paste0("in the mapping file.")) } } if(isgood){ obnd[["rpt"]] = officer::add_slide(x = obnd[["rpt"]], layout = template, master = obnd[["meta"]][["rpptx"]][["master"]]) for(phname in names(elements)){ if(is.null(td[[phname]][["ph_label"]])){ msgs = c(msgs, paste0("The named placeholder >", phname,"< is defined in the onbrand mapping file but is NULL")) msgs = c(msgs, "This can happen when a reporting workflow has a placeholder but it is not implemented") msgs = c(msgs, "a specific template. This element will be skipped and not added to the report.") } else { obnd = add_pptx_ph_content(obnd = obnd, content = elements[[phname]][["content"]], content_type = elements[[phname]][["type"]], ph_label = td[[phname]][["ph_label"]], verbose = verbose) } } } if(!isgood){ obnd[["isgood"]] = FALSE msgs = c(msgs, paste0("mapping file: ", obnd[["mapping"]])) msgs = c(msgs, "onbrand::report_add_slide()") } if(verbose & !is.null(msgs)){ message(paste(msgs, collapse="\n")) } if(!isgood){ stop("Unable to add PowerPoint Slide. See the messages above for details.") } obnd[["msgs"]] = msgs obnd}
context("Conditional Passage Time") br_function <- function(samc, col) { Q <- samc$q_matrix Q <- as.matrix(Q) qj <- Q[-col, col] Qj <- Q[-col, -col] I <- diag(nrow(Qj)) r <- samc@data@t_abs r <- r[-col] R <- cbind(r, qj) f <- solve(I - Qj) b <- as.matrix(f %*% R) bdg <- Matrix::sparseMatrix(i = 1:nrow(b), j = 1:nrow(b), x = b[, 2], index1 = TRUE) bdg <- as.matrix(bdg) res <- solve(bdg) %*% f %*% bdg %*% rep(1, nrow(bdg)) return(as.numeric(res)) } for(test in testlist) { if (!(test$id %in% c(1, 2))) next samc_obj <- test$samc samc_p <- samc(samc_obj$p_matrix) test_that("Testing cond_passage(samc, dest)", { base_result <- br_function(samc_obj, col_vec[1]) r1 <- cond_passage(samc_p, dest = col_vec[1]) r2 <- cond_passage(samc_p, dest = as.character(col_vec[1])) r1 <- r1[-col_vec[1]] r2 <- r2[-col_vec[1]] expect_equal(dim(r1), dim(base_result)) expect_equal(as.vector(r1), as.vector(base_result)) expect_equal(r1, r2) }) test_that("Testing cond_passage(samc, origin, dest)", { vector_result <- cond_passage(samc_p, row_vec, col_vec) vector_result_char <- cond_passage(samc_p, as.character(row_vec), as.character(col_vec)) expect_equal(vector_result, vector_result_char) for (i in 1:length(row_vec)) { base_result <- cond_passage(samc_obj, dest = col_vec[i]) r <- cond_passage(samc_p, origin = row_vec[i], dest = col_vec[i]) expect_equal(r, unname(base_result[row_vec[i]])) expect_equal(vector_result[i], r) } }) }
codisp <- function(x, y, coords, nclass = 13) { if (length(x) != length(y)) stop("'x' and 'y' must have the same length") if (!is.numeric(x)) stop("'x' must be a numeric vector") if (!is.numeric(y)) stop("'y' must be a numeric vector") OK <- complete.cases(x, y) x <- x[OK] y <- y[OK] n <- length(x) dnames <- colnames(cbind(x, y)) coords <- as.matrix(coords) p <- ncol(coords) if (p < 2) stop("'coords' must be a matrix with two columns") if (p > 2) warning("only the first two columns of 'coords' are considered") p <- 2 xpos <- coords[,1] ypos <- coords[,2] cnames <- colnames(coords)[1:p] ndist <- n * (n - 1) / 2 if (is.null(nclass)) nclass = as.integer(1.5 + 3.3 * log10(ndist)) dims <- c(n, p, nclass) now <- proc.time() z <- .C("codisp_coef", x = as.double(x), y = as.double(y), xpos = as.double(xpos), ypos = as.double(ypos), dims = as.integer(dims), upper.bounds = double(nclass), card = double(nclass), coef = double(nclass)) speed <- proc.time() - now o <- list(coef = z$coef, upper.bounds = z$upper.bounds, card = z$card) o$dims <- dims o$data.names <- dnames o$speed <- speed class(o) <- "codisp" return(o) } print.codisp <- function(x, digits = 4, ...) { z <- cbind(x$upper.bounds, x$card, x$coef) nclass <- x$dims[3] dimnames(z) <- list(1:nclass, c("Upper Bounds", "Cardinality", "Coefficient")) print(z, digits = digits) invisible(x) } plot.codisp <- function(x, ...) { nclass <- x$dims[3] incr <- x$upper.bounds[1] from <- incr / 2 midpoints <- seq(from = from, by = incr, length = nclass) y <- x$coef dnames <- paste(x$data.names, collapse = " and ", sep = "") plot(midpoints, y, type = "b", xlab = "distances", ylab = "codispersion coefficient") title(main = paste("Codispersion between ", dnames, sep = "")) invisible(x) }
boostmtree <- function(x, tm, id, y, family = c("Continuous","Binary","Nominal","Ordinal"), y_reference = NULL, M = 200, nu = 0.05, na.action = c("na.omit","na.impute")[2], K = 5, mtry = NULL, nknots = 10, d = 3, pen.ord = 3, lambda, rho, lambda.max = 1e6, lambda.iter = 2, svd.tol = 1e-6, forest.tol = 1e-3, verbose = TRUE, cv.flag = FALSE, eps = 1e-5, mod.grad = TRUE, NR.iter = 3, ...) { if(Sys.info()["sysname"] == "Windows") { options(rf.cores = 1, mc.cores = 1) } if(length(family) != 1){ stop("Specify any one of the four families") } if(any(is.na( match(family,c("Continuous","Binary","Nominal","Ordinal"))))){ stop("family must be any one from Continuous, Binary, Nominal or Ordinal") } univariate <- FALSE if (missing(tm)) { id <- 1:nrow(x) } id.unq <- sort(unique(id)) n <- length(id.unq) if (length(id.unq) == length(id)) { univariate <- TRUE tm <- rep(0, n) d <- -1 } if(family == "Continuous"){ Q_set <- NA n.Q <- 1 } else { if(!is.numeric(y)){ y <- as.numeric(factor(y)) } y.unq <- sort(unique(y)) Q <- length(y.unq) if(family == "Nominal"){ if(is.null(y_reference)){ y_reference <- min(y.unq) } else { if(length(y_reference) != 1 || is.na(match(y_reference,y.unq))){ stop(paste("y_reference must take any one of the following:",y.unq,sep=" ")) } } Q_set <- setdiff(y.unq,y_reference) } if(family == "Ordinal") { Q_set <- setdiff(y.unq,max(y.unq)) } if(family == "Binary"){ Q_set <- setdiff(y.unq,min(y.unq)) } n.Q <- length(Q_set) } if (univariate) { mod.grad <- FALSE rho <- rep(0,n.Q) lambda.mat <- phi.mat <- rho.mat <- NULL } user.option <- list(...) if (any(is.na(id)) || any(is.na(y)) || any(is.na(tm))) { stop("missing values encountered y or id or tm: remove observations with missing values") } x <- as.data.frame(x) X <- do.call(rbind, lapply(1:n, function(i) { x[id == id.unq[i],, drop = FALSE][1,, drop = FALSE]})) x <- do.call(rbind, lapply(1:n, function(i) {x[id == id.unq[i],, drop = FALSE]})) id <- unlist(lapply(1:n,function(i){ id[id == id.unq[i] ] })) tm <- unlist(lapply(1:n,function(i){ tm[id == id.unq[i] ] })) y <- unlist(lapply(1:n,function(i){ y[id == id.unq[i] ] })) if(any(is.na(X))){ RemoveMiss.Obj <- RemoveMiss.Fun(X) X <- RemoveMiss.Obj$X id_all_na <- RemoveMiss.Obj$id.remove id_any_na <- which(unlist(lapply(1:nrow(X),function(i){ any(is.na(X[i,])) } ))) if(na.action == "na.omit"){ id_na <- id_any_na } else { id_na <- id_all_na } if( length(id_na) > 0 ){ id.remove <- id.unq[id_na] id.unq <- setdiff(id.unq,id.remove) n <- length(id.unq) tm <- unlist(lapply(1:n,function(i){ tm[id == id.unq[i] ] })) y <- unlist(lapply(1:n,function(i){ y[id == id.unq[i] ] })) x <- do.call(rbind, lapply(1:n, function(i) {x[id == id.unq[i],, drop = FALSE]})) id <- unlist(lapply(1:n,function(i){ id[id == id.unq[i] ] })) } } p <- ncol(X) xvar.names <- colnames(X) if(family == "Continuous"){ Ymean <- mean(y, na.rm = TRUE) Ysd <- sd(y, na.rm = TRUE) if (Ysd < 1e-6) { Ysd <- 1 } } else{ Ymean <- 0 Ysd <- 1 } ni <- unlist(lapply(1:n, function(i) {sum(id == id.unq[i])})) tm.unq <- sort(unique(tm)) n.tm <- length(tm.unq) tm.id <- lapply(1:n, function(i) { tm.i <- tm[id == id.unq[i]] match(tm.i, tm.unq) }) tm.list <- lapply(1:n, function(i) {tm[id == id.unq[i]]}) if (nknots < 0) { warning("bsplines require a positive number of knots: eliminating b-spline fitting") d <- 0 } if (d >= 1) { if (n.tm > 1) { bs.tm <- bs(tm.unq, df = nknots + d, degree = d) X.tm <- cbind(1, bs.tm) attr(X.tm, "knots") <- attr(bs.tm, "knots") attr(X.tm, "Boundary.knots") <- attr(bs.tm, "Boundary.knots") } else { X.tm <- cbind(1, cbind(tm.unq)) } } else { X.tm <- cbind(rep(1, n.tm)) lambda <- rep(0,n.Q) } df.D <- ncol(X.tm) D <- lapply(1:n, function(i) { cbind(X.tm[tm.id[[i]],, drop = FALSE]) }) nu <- {if (length(nu) > 1) nu else rep(nu, 2)} if (sum(!(0 < nu & nu <= 1)) > 0) { stop("regularization parameter (nu) must be in (0,1]") } nu.vec <- c(nu[1], rep(nu[2], df.D - 1)) ntree <- is.hidden.ntree(user.option) bootstrap <- is.hidden.bootstrap(user.option) bst.frac <- is.hidden.bst.frac(user.option) samp.mat <- is.hidden.samp.mat(user.option) if(bootstrap == "none"){ bootstrap <- "by.user" bst.frac <- 1 } if(bootstrap == "by.user"){ if(missing(bst.frac)){ bst.frac <- 0.632 } if(is.null(samp.mat)){ samp.mat <- matrix(NA,nrow = n,ncol = M) for(i in 1:M){ samp.value <- (sample(1:n,floor(bst.frac*n),replace = FALSE)) samp.value <- sort(c(samp.value,sample(samp.value, n - length(samp.value),replace = TRUE))) samp.value <- unlist(lapply(1:n,function(i){ sum(samp.value == i) })) samp.mat[,i] <- samp.value } } } nsplit <- is.hidden.nsplit(user.option) samptype <- is.hidden.samptype(user.option) if (ntree == 1) { nodesize <- max(1, round(n/(2 * K))) if(is.null(mtry)){ mtry <- p } } else { nodedepth <- max(0, log(max(0, K), base = 2)) nodesize <- 1 mtry <- NULL if (missing(lambda) || lambda < 0) { lambda <- rep(0,n.Q) } } xvar.wt <- is.hidden.xvar.wt(user.option) case.wt <- is.hidden.case.wt(user.option) seed.value <- is.hidden.seed.value(user.option) if (ntree > 1) { if (univariate) { learnerUsed <- "forest learner" } else { learnerUsed <- "mforest learner" } } else { if (df.D == 1) { learnerUsed <- "tree learner" } else { learnerUsed <- "mtree-Pspline learner" } } lambda.est.flag <- FALSE if(!missing(lambda)){ if(length(lambda) == 1){ lambda <- rep(lambda,n.Q) } } pen.lsq.matx <- penBSderiv(df.D - 1, pen.ord) if (!univariate && ntree == 1 && (missing(lambda) || lambda < 0)) { if (df.D >= (pen.ord + 2)) { lambda.est.flag <- TRUE pen.mix.matx <- penBS(df.D - 1, pen.ord) svd.pen <- svd(pen.mix.matx) d.zap <- svd.pen$d < svd.tol d.sqrt <- sqrt(svd.pen$d) d.sqrt[d.zap] <- 0 d.inv.sqrt <- 1 / sqrt(svd.pen$d) d.inv.sqrt[d.zap] <- 0 pen.inv.sqrt.matx <- svd.pen$v %*% (t(svd.pen$v) * d.inv.sqrt) lambda <- rep(0,n.Q) } else { warning("not enough degrees of freedom to estimate lambda: setting lambda to zero\n") lambda <- rep(0,n.Q) } } Yq <- lapply(1:n.Q,function(q){ if(family == "Continuous"){ out <- y } if(family == "Nominal" || family == "Binary"){ out <- ifelse(y == Q_set[q],1,0) } if(family == "Ordinal"){ out <- ifelse(y <= Q_set[q],1,0) } out }) Yorg <- lapply(1:n.Q,function(q){ lapply(1:n, function(i) { Yq[[q]][id == id.unq[i]] }) }) Y <- lapply(1:n.Q,function(q){ lapply(1:n, function(i) {(Yq[[q]][id == id.unq[i]] - Ymean) / Ysd}) }) if (ntree == 1) { baselearner <- membership.list <- gamma.list <- lapply(1:n.Q,function(q){ lapply(1:M,function(m){ NULL }) }) } else { membership.list <- gamma.list <- NULL baselearner <- vector("list", length = M) } if (!univariate) { lambda.mat <- phi.mat <- rho.mat <- matrix(NA,nrow = M,ncol = n.Q) } lambda.initial <- Ysd^2 rho.fit.flag <- TRUE rho.tree.grad <- 0 rho.hide <- is.hidden.rho(user.option) if (!is.null(rho.hide) && (rho.hide >= 0 && rho.hide < 1)) { rho.fit.flag <- FALSE rho <- rep(rho.hide,n.Q) } else { rho <- rep(0,n.Q) } sigma <- phi <- rep(1,n.Q) if (!lambda.est.flag) { sigma <- unlist(lapply(1:n.Q,function(q){ sigma.robust(lambda[q], rho[q]) })) } Y.names <- paste("Y", 1:df.D, sep = "") rfsrc.f <- as.formula(paste("Multivar(", paste(Y.names, collapse = ","), paste(") ~ ."), sep = "")) cv.flag <- cv.flag && (ntree == 1) cv.lambda.flag <- cv.flag && is.hidden.CVlambda(user.option) && lambda.est.flag cv.rho.flag <- cv.flag && is.hidden.CVrho(user.option) && rho.fit.flag l_pred_db <- lapply(1:n.Q,function(q){ lapply(1:n,function(i){ rep(0,ni[i]) }) }) if (cv.flag) { mu.cv.list <- lapply(1:n.Q,function(q){ vector("list", M) }) if(family == "Nominal"){ l_pred_ref.cv <- lapply(1:n,function(i){ rep(log(1/Q),ni[i]) }) } else { l_pred_ref.cv <- lapply(1:n,function(i){ rep(0,ni[i]) }) } l_pred.cv <- lapply(1:n.Q,function(q){ lapply(1:n, function(i) { l_pred_ref.cv[[i]] + l_pred_db[[q]][[i]] }) }) mu.cv <- lapply(1:n.Q,function(q){ lapply(1:n,function(i){ GetMu(Linear_Predictor = l_pred.cv[[q]][[i]],Family = family) }) }) l_pred_db.i <- lapply(1:n.Q,function(q){ lapply(1:n, function(i) { lapply(1:n, function(j) { rep(0,ni[j]) }) }) }) gamma.i.list <- lapply(1:n.Q,function(q){ lapply(1:M,function(m){ vector("list", length = n) }) }) err.rate <- lapply(1:n.Q,function(q){ err.rate_mat <- matrix(NA, M, 2) colnames(err.rate_mat) <- c("l1", "l2") err.rate_mat }) Mopt <- rmse <- rep(NA,n.Q) if(family == "Continuous"){ Ymean.i <- lapply(1:n.Q,function(q){ sapply(1:n, function(i) { mean(unlist(Yorg[[q]][-i]), na.rm = TRUE) }) }) Ysd.i <- lapply(1:n.Q,function(q){ sapply(1:n, function(i) { sd.i <- sd(unlist(Yorg[[q]][-i]), na.rm = TRUE) if (sd.i < 1e-6) { 1 } else { sd.i } }) }) } else{ Ymean.i <- lapply(1:n.Q,function(q){ unlist(lapply(1:n,function(i){ Ymean })) }) Ysd.i <- lapply(1:n.Q,function(q){ unlist(lapply(1:n,function(i){ Ysd })) }) } } else { err.rate <- rmse <- Mopt <- NULL } if(family == "Continuous"){ NR.iter <- 1 } if(family == "Nominal"){ l_pred_ref <- lapply(1:n,function(i){ rep(log(1/Q),ni[i]) }) } else { l_pred_ref <- lapply(1:n,function(i){ rep(0,ni[i]) }) } if (verbose) pb <- txtProgressBar(min = 0, max = M, style = 3) for (m in 1:M) { if(m == 1){ l_pred <- lapply(1:n.Q,function(q){ lapply(1:n, function(i) { l_pred_ref[[i]] + l_pred_db[[q]][[i]] }) }) mu <- lapply(1:n.Q,function(q){ lapply(1:n,function(i){ GetMu(Linear_Predictor =l_pred[[q]][[i]],Family = family) }) }) } if (verbose) setTxtProgressBar(pb, m) if (verbose && m == M) cat("\n") for(q in 1:n.Q) { VMat <- lapply(1:n,function(i){ VarTemp <- matrix(rho[q]*phi[q],ni[i],ni[i]) diag(VarTemp) <- phi[q] VarTemp }) inv.VMat <- lapply(1:n,function(i){ out <- tryCatch({ qr.solve(VMat[[i]])}, error = function(ex){NULL}) if(is.null(out)){ out <- diag(phi[q],nrow(VMat[[i]])) } out }) H_Mu <- lapply(1:n,function(i){ Transform_H(Mu = mu[[q]][[i]], Family = family) }) if (mod.grad == FALSE) { gm.mod <- t(matrix(unlist(lapply(1:n, function(i) { t(D[[i]])%*%H_Mu[[i]]%*%inv.VMat[[i]]%*%(Y[[q]][[i]] - mu[[q]][[i]]) })), nrow = df.D)) } else { gm.mod <- t(matrix(unlist(lapply(1:n, function(i) { t(D[[i]])%*%H_Mu[[i]]%*%(Y[[q]][[i]] - mu[[q]][[i]]) })), nrow = df.D)) } incoming.data <- cbind(gm.mod, X) names(incoming.data) = c(Y.names, names(X)) if (ntree > 1) { rfsrc.obj <- rfsrc(rfsrc.f, data = incoming.data, mtry = mtry, nodedepth = nodedepth, nodesize = nodesize, nsplit = nsplit, importance = "none", bootstrap = bootstrap, samptype = samptype, ntree = ntree, xvar.wt = xvar.wt, case.wt = case.wt, forest.wt = TRUE, memebership = TRUE) Kmax <- max(rfsrc.obj$leaf.count, na.rm = TRUE) baselearner[[m]] <- list(forest = rfsrc.obj) } else { rfsrc.obj <- rfsrc(rfsrc.f, data = incoming.data, ntree = 1, mtry = mtry, nodesize = nodesize, nsplit = nsplit, importance = "none", bootstrap = bootstrap, samptype = samptype, samp = if(bootstrap == "by.user") samp.mat[,m,drop = FALSE] else NULL, xvar.wt = xvar.wt, case.wt = case.wt, membership = TRUE, na.action = na.action, nimpute = 1, seed = seed.value) baselearner[[q]][[m]] <- rfsrc.obj result.pred <- predict.rfsrc(rfsrc.obj, membership = TRUE, ptn.count = K, importance = "none") membership <- membership.org <- c(result.pred$ptn.membership) membership.list[[q]][[m]] <- membership.org membership <- as.numeric(factor(membership)) ptn.id <- unique(membership) Kmax <- length(ptn.id) } if (ntree == 1) { if (lambda.est.flag) { transf.data <- papply(1:n, function(i) { if (ni[i] > 1) { ci <- rho.inv.sqrt(ni[i], rho[q]) R.inv.sqrt <- (diag(1, ni[i]) - matrix(ci, ni[i], ni[i])) / sqrt(1 - rho[q]) V.inv.sqrt <- phi[q]^(-1/2)*R.inv.sqrt } else { R.inv.sqrt <- cbind(1) V.inv.sqrt <- phi[q]^(-1/2)*R.inv.sqrt } if (cv.lambda.flag) { Ynew <- V.inv.sqrt %*% (Y[[q]][[i]] - mu.cv[[q]][[i]]) } else { Ynew <- V.inv.sqrt %*% (Y[[q]][[i]] - mu[[q]][[i]]) } mu.2 <- GetMu_Lambda(Linear_Predictor = 2*l_pred[[q]][[i]],Family = family) LambdaD <- Transform_H(mu.2,Family = family)%*%D[[i]] Xnew <- V.inv.sqrt %*% LambdaD[, 1, drop = FALSE] Znew <- V.inv.sqrt %*% LambdaD[, -1, drop = FALSE] %*% pen.inv.sqrt.matx list(Ynew = Ynew, Xnew = Xnew, Znew = Znew) }) lambda.hat <- lambda.initial for (k in 1:lambda.iter) { blup.obj <- blup.solve(transf.data, membership, lambda.hat, Kmax) lambda.obj <- lapply(1:Kmax, function(k) { pt.k <- (membership == k) Z <- do.call(rbind, lapply(which(pt.k), function(j) {transf.data[[j]]$Znew})) X <- do.call(rbind, lapply(which(pt.k), function(j) {transf.data[[j]]$Xnew})) Y <- unlist(lapply(which(pt.k), function(j) {transf.data[[j]]$Ynew})) ZZ <- t(Z) %*% Z rss <- (Y - X %*% c(blup.obj[[k]]$fix.eff))^2 robust.pt <- (rss <= quantile(rss, .99, na.rm = TRUE)) rss <- sum(rss[robust.pt], na.rm = TRUE) resid <- (Y - X %*% c(blup.obj[[k]]$fix.eff) - Z %*% c(blup.obj[[k]]$rnd.eff))^2 resid <- resid[robust.pt] return(list(trace.Z = sum(diag(ZZ)), rss = rss, resid = resid)) }) num <- sum(unlist(lapply(1:Kmax, function(k) {lambda.obj[[k]]$trace.Z})), na.rm = TRUE) den <- sum(unlist(lapply(1:Kmax, function(k) {lambda.obj[[k]]$rss})), na.rm = TRUE) N <- sum(unlist(lapply(1:Kmax, function(k) {lambda.obj[[k]]$resid})), na.rm = TRUE) if (!is.na(den) && den > (.99 * N)) { lambda.hat <- num / (den - .99 * N) } else { lambda.hat <- min(lambda.hat, lambda.max) } lambda.hat <- min(lambda.hat, lambda.max) } lambda[q] <- lambda.hat sigma[q] <- sigma.robust(lambda[q], rho[q]) } gamma <- lapply(1:Kmax, function(k) { pt.k <- (membership == k) if (sum(pt.k) > 0) { which.pt.k <- which(pt.k == TRUE) seq.pt.k <- seq(length(which.pt.k) ) gamma.NR.update <- rep(0,df.D) for(Iter in 1:NR.iter){ mu.NR.update <- lapply(which.pt.k,function(i){ l_pred_gamma <- l_pred[[q]][[i]] + c(D[[i]]%*%gamma.NR.update) out <- GetMu(Linear_Predictor = l_pred_gamma, Family = family) out }) CalD.i <- lapply(seq.pt.k,function(i){ out_H_Mat <- Transform_H(Mu = mu.NR.update[[i]], Family = family) out <- out_H_Mat%*%D[[ which.pt.k[i] ]] out }) HesMat.temp <- Reduce("+",lapply(seq.pt.k,function(i){ t(CalD.i[[i]])%*%inv.VMat[[ which.pt.k[i] ]]%*%CalD.i[[i]] })) HesMat <- HesMat.temp + (lambda[q]*pen.lsq.matx) ScoreVec.temp <- Reduce("+",lapply(seq.pt.k,function(i){ t(CalD.i[[i]])%*%inv.VMat[[ which.pt.k[i] ]]%*%(Y[[q]][[ which.pt.k[i] ]] - mu.NR.update[[ i ]] ) })) ScoreVec <- ScoreVec.temp - (lambda[q]*(pen.lsq.matx%*%gamma.NR.update)) qr.obj <- tryCatch({qr.solve(HesMat, ScoreVec)}, error = function(ex){NULL}) if (!is.null(qr.obj)) { qr.obj <- qr.obj } else { qr.obj <- rep(0, df.D) } gamma.NR.update <- gamma.NR.update + qr.obj } gamma.NR.update } else { rep(0, df.D) } }) gamma.matx <- matrix(0, Kmax, df.D + 1) gamma.matx[, 1] <- sort(unique(membership.org)) gamma.matx[, 2:(df.D+1)] <- matrix(unlist(gamma), ncol = df.D, byrow = TRUE) gamma.list[[q]][[m]] <- gamma.matx bhat <- t(matrix(unlist(lapply(1:n, function(i) { gamma[[membership[i]]]})), nrow = df.D) * nu.vec) l_pred_db[[q]] <- lapply(1:n, function(i) { l_pred_db_Temp <- l_pred_db[[q]][[i]] + c(D[[i]] %*% bhat[i, ]) if(family == "Ordinal" && q > 1){ l_pred_db_Temp <- ifelse(l_pred_db_Temp < l_pred_db[[q-1]][[i]],l_pred_db[[q-1]][[i]],l_pred_db_Temp) } l_pred_db_Temp }) if (cv.flag) { oob <- which(rfsrc.obj$inbag == 0) l_pred_db.i[[q]] <- lapply(1:n,function(i) { if( any(i == oob )){ mem.i <- membership[i] l_pred.ij <- l_pred_db.i[[q]][[i]] gamma.i <- lapply(1:Kmax, function(k) { pt.k <- (membership == k) which.pt.k <- setdiff(which(pt.k == TRUE) ,i) if (sum(pt.k) > 0 && length(which.pt.k) > 0) { seq.pt.k <- seq(length(which.pt.k) ) gamma.NR.update <- rep(0,df.D) for(Iter in 1:NR.iter){ mu.NR.update <- lapply(which.pt.k,function(j){ l_pred_gamma <- l_pred.ij[[j]] + c(D[[j]]%*%gamma.NR.update) out <- GetMu(Linear_Predictor = l_pred_gamma, Family = family) out }) CalD.i <- lapply(seq.pt.k,function(j){ out_H_Mat <- Transform_H(Mu = mu.NR.update[[j]], Family = family) out <- out_H_Mat%*%D[[ which.pt.k[j] ]] out }) HesMat.temp <- Reduce("+",lapply(seq.pt.k,function(j){ t(CalD.i[[j]])%*%inv.VMat[[ which.pt.k[j] ]]%*%CalD.i[[j]] })) HesMat <- HesMat.temp + (lambda[q]*pen.lsq.matx) ScoreVec.temp <- Reduce("+",lapply(seq.pt.k,function(j){ t(CalD.i[[j]])%*%inv.VMat[[ which.pt.k[j] ]]%*%(Y[[q]][[ which.pt.k[j] ]] - mu.NR.update[[ j ]] ) })) ScoreVec <- ScoreVec.temp - (lambda[q]*(pen.lsq.matx%*%gamma.NR.update)) qr.obj <- tryCatch({qr.solve(HesMat, ScoreVec)}, error = function(ex){NULL}) if (!is.null(qr.obj)) { qr.obj <- qr.obj } else { qr.obj <- rep(0, df.D) } gamma.NR.update <- gamma.NR.update + qr.obj } gamma.NR.update } else { rep(0, df.D) } }) gamma.matx.i <- matrix(0, Kmax, df.D + 1) gamma.matx.i[, 1] <- sort(unique(membership.org)) gamma.matx.i[, 2:(df.D+1)] <- matrix(unlist(gamma.i), ncol = df.D, byrow = TRUE) gamma.i.list[[q]][[m]][[i]] <<- gamma.matx.i l_pred_db.ij_Temp <- lapply(1:n,function(j){ which.j <- which(gamma.matx.i[, 1] == membership.org[j]) l_pred_db.ij_Temp <- l_pred.ij[[j]] + c(D[[j]] %*% (gamma.matx.i[which.j, -1] * nu.vec)) }) }else { l_pred_db.ij_Temp <- l_pred_db.i[[q]][[i]] } l_pred_db.ij_Temp }) if(family == "Ordinal" && q > 1){ for(i in 1:n){ for(j in 1:n){ l_pred_db.ij_Temp <- l_pred_db.i[[q]][[i]][[j]] l_pred_db.ij_Temp <- ifelse(l_pred_db.ij_Temp < l_pred_db.i[[q-1]][[i]][[j]],l_pred_db.i[[q-1]][[i]][[j]],l_pred_db.ij_Temp) l_pred_db.i[[q]][[i]][[j]] <- l_pred_db.ij_Temp } } } } } else{ forest.wt <- rfsrc.obj$forest.wt Xnew <- mclapply(1:n, function(i) { rmi <- rho.inv(ni[i], rho) Wi <- diag(1, ni[i]) - matrix(rmi, ni[i], ni[i]) t(D[[i]]) %*% Wi %*% D[[i]] }) bhat <- do.call("cbind", mclapply(1:n, function(i) { fwt.i <- forest.wt[i, ] fwt.i[fwt.i <= forest.tol] <- 0 pt.i <- (fwt.i != 0) if (sum(pt.i) > 0) { fwt.i <- fwt.i / sum(fwt.i) YnewSum <- colSums(fwt.i[pt.i] * gm.mod[pt.i,, drop = FALSE]) XnewSum <- Reduce("+", lapply(which(pt.i), function(j) {fwt.i[j] * Xnew[[j]]})) XnewSum <- XnewSum + sigma * pen.lsq.matx qr.obj <- tryCatch({qr.solve(XnewSum, YnewSum)}, error = function(ex){NULL}) if (!is.null(qr.obj)) { qr.obj } else { rep(0, df.D) } } else { rep(0, df.D) } })) bhat <- t(bhat * nu.vec) mu <- lapply(1:n, function(i) {mu[[i]] + D[[i]] %*% bhat[i, ]}) baselearner[[m]] <- c(baselearner[[m]], list(gm = gm.mod), list(Xnew = Xnew), list(pen = sigma * pen.lsq.matx)) } } if(cv.flag){ if(family == "Nominal"){ l_pred_ref.cv <- lapply(1:n,function(i){ log((1 + (Reduce("+",lapply(1:n.Q,function(q){ exp(l_pred_db.i[[q]][[i]][[i]]) }))))^{-1}) }) } for(q in 1:n.Q){ l_pred.cv <- lapply(1:n,function(i){ l_pred_ref.cv[[i]] + l_pred_db.i[[q]][[i]][[i]]}) mu.cv[[q]] <- lapply(1:n,function(i){ GetMu(Linear_Predictor = l_pred.cv[[i]],Family = family) }) l_pred.cv.org <- lapply(1:n,function(i){l_pred.cv[[i]] * Ysd.i[[q]][i] + Ymean.i[[q]][i]}) mu.cv.org <- lapply(1:n,function(i){ GetMu(Linear_Predictor = l_pred.cv.org[[i]],Family = family) }) mu.cv.list[[q]][[m]] <- mu.cv.org err.rate[[q]][m, ] <- c(l1Dist(Yorg[[q]], mu.cv.org), l2Dist(Yorg[[q]], mu.cv.org)) } } else { if(family == "Nominal"){ l_pred_ref <- lapply(1:n,function(i){ log((1 + (Reduce("+",lapply(1:n.Q,function(q){ exp(l_pred_db[[q]][[i]]) }))))^{-1}) }) } for(q in 1:n.Q){ l_pred[[q]] <- lapply(1:n,function(i){ l_pred_ref[[i]] + l_pred_db[[q]][[i]] }) mu[[q]] <- lapply(1:n,function(i){ GetMu(Linear_Predictor = l_pred[[q]][[i]],Family = family) }) } } for(q in 1:n.Q){ if (!univariate && rho.fit.flag) { if (cv.rho.flag) { resid.data <- data.frame(y = unlist(lapply(1:n, function(i) {Y[[q]][[i]] - mu.cv[[q]][[i]]})), x, tm = unlist(lapply(1:n, function(i) {tm[id == id.unq[i]]})), id = unlist(lapply(1:n, function(i) {rep(id.unq[i], ni[i])}))) } else { resid.data <- data.frame(y = unlist(lapply(1:n, function(i) {Y[[q]][[i]] - mu[[q]][[i]]})), x, tm = unlist(lapply(1:n, function(i) {tm[id == id.unq[i]]})), id = unlist(lapply(1:n, function(i) {rep(id.unq[i], ni[i])}))) } gls.obj <- tryCatch({gls(y ~ ., data = resid.data, correlation = corCompSymm(form = ~ 1 | id))}, error = function(ex){NULL}) if (is.null(gls.obj)) { gls.obj <- tryCatch({gls(y ~ 1, data = resid.data, correlation = corCompSymm(form = ~ 1 | id))}, error = function(ex){NULL}) } if (!is.null(gls.obj)) { phi[q] <- gls.obj$sigma^2 rho_temp <- as.numeric(coef(gls.obj$modelStruct$corStruc, unconstrained = FALSE)) rho[q] <- max(min(0.999, rho_temp, na.rm = TRUE), -0.999) } } if (!univariate) { phi.mat[m,q] <- phi[q] * Ysd^2 rho.mat[m,q] <- rho[q] } if (!univariate) { sigma[q] <- sigma.robust(lambda[q], rho[q]) lambda.mat[m,q] <- lambda[q] } } } if (cv.flag) { nullObj <- lapply(1:n.Q,function(q){ diff.err <- abs(err.rate[[q]][, "l2"] - min(err.rate[[q]][, "l2"], na.rm = TRUE)) diff.err[is.na(diff.err)] <- 1 if (sum(diff.err < Ysd * eps) > 0) { Mopt[q] <<- min(which(diff.err < eps)) } else { Mopt[q] <<- M } rmse[q] <<- err.rate[[q]][Mopt[q], "l2"] mu[[q]] <<- lapply(1:n,function(i){mu.cv.list[[q]][[ Mopt[q] ]][[i]]}) NULL }) }else { mu <- lapply(1:n.Q,function(q){ lapply(1:n,function(i){ l_pred_temp <- c(l_pred[[q]][[i]] * Ysd + Ymean) GetMu(Linear_Predictor = l_pred_temp,Family = family) }) }) } y <- lapply(1:n, function(i) {y[id == id.unq[i]]}) if(family == "Ordinal"){ Prob_class <- lapply(1:(n.Q+1),function(q){ if(q == 1){ out <- lapply(1:n,function(i){ mu[[q]][[i]] }) } if(q == (n.Q+1)){ out <- lapply(1:n,function(i){ 1 - mu[[q-1]][[i]] }) } if(q > 1 && q < (n.Q+1) ){ out <- lapply(1:n,function(i){ mu[[q]][[i]] - mu[[q-1]][[i]] }) } out }) } else { Prob_class <- NULL } obj <- list(x = X, xvar.names = xvar.names, time = lapply(1:n, function(i) {tm[id == id.unq[i]]}), id = id, y = y, Yorg = if(family == "Nominal" || family == "Ordinal") Yorg else unlist(Yorg,recursive=FALSE), family = family, ymean = Ymean, ysd = Ysd, na.action = na.action, n = n, ni = ni, n.Q = n.Q, Q_set = Q_set, y.unq = if(family != "Continuous") y.unq else NA, y_reference = y_reference, tm.unq = tm.unq, gamma = gamma.list, mu = if(family == "Nominal" || family == "Ordinal") mu else unlist(mu,recursive=FALSE), Prob_class = Prob_class, lambda = if(family == "Nominal" || family == "Ordinal") lambda.mat else as.vector(lambda.mat), phi = if(family == "Nominal" || family == "Ordinal") phi.mat else as.vector(phi.mat), rho = if(family == "Nominal" || family == "Ordinal") rho.mat else as.vector(rho.mat), baselearner = baselearner, membership = membership.list, X.tm = X.tm, D = D, d = d, pen.ord = pen.ord, K = K, M = M, nu = nu, ntree = ntree, cv.flag = cv.flag, err.rate = if (!is.null(err.rate)) { if(family == "Nominal" || family == "Ordinal") lapply(1:n.Q,function(q){ err.rate[[q]] / Ysd }) else err.rate[[1]]/Ysd } else NULL, rmse = if (!is.null(rmse)) unlist(lapply(1:n.Q,function(q){ rmse[q] / Ysd })) else NULL, Mopt = Mopt, gamma.i.list = if(cv.flag) gamma.i.list else NULL, forest.tol = forest.tol) class(obj) <- c("boostmtree", "grow", learnerUsed) invisible(obj) }
apg_lookup <- function(taxa, rank = "family") { if (!rank %in% c('family', 'order')) { stop("rank must be one of family or order") } af <- taxize_ds$apg_families if (rank == "family") { x <- af[af$family %in% taxa,]$synonym if (length(x) == 0) { message("no match found...") out <- NA_character_ } else { if (is.na(x)) { message("name is the same...") out <- taxa } else { message("new name...") out <- x } } } else { ao <- taxize_ds$apg_orders x <- ao[ao$order %in% taxa,]$synonym if (length(x) == 0) { message("no match found...") out <- NA_character_ } else { if (is.na(x)) { message("name is the same...") out <- taxa } else { message("new name...") out <- x } } } return(out) }
demo_data combs <- combi(data= demo_data, signalthr=450, combithr=1) combs_with(markers = c('Marker1', 'Marker2') , markers_table = combs)
makeCOVbin<-function(X, K, cutoffs, adjust0bin=TRUE,...){ X = X[!is.na(X)] data_temp<-data.frame(X=X) range_temp<-range(X) cutoffsOrig = cutoffs if(min(cutoffs)>=range_temp[1]){ cutoffs<-c(range_temp[1]-stats::sd(X)*0.1,cutoffs) } if(max(cutoffs)<=range_temp[2]){ cutoffs<-c(cutoffs,range_temp[2]+stats::sd(X)*0.1) } cut_temp<-cut(X, breaks=cutoffs) if(adjust0bin){ if(sum(table(cut_temp)==0)!=0){ cutoffs<-cutoffs[-which(table(cut_temp)==0)] cut_temp<-cut(X, breaks=cutoffs) } } tab<-plyr::ddply(data_temp,plyr::.(cut_temp), plyr::summarise, mid_COV=round(stats::median(X, na.rm=T),2),.drop=FALSE) cutoffs[c(1,length(cutoffs))] = range_temp LU_temp = cbind(cutoffs[-length(cutoffs)],cutoffs[-1]) colnames(LU_temp)<-c("lower_COV","upper_COV") mid_LU<-apply(LU_temp,1,mean) tab<-data.frame(tab,n_bin=c(table(cut_temp)),LU_temp,mid_LU=mid_LU) return(list(COV_bin=cut_temp,COVbin_summary=tab)) }
print.frontier <- function( x, digits = NULL, ... ) { if( is.null( digits ) ) { digits <- max( 3, getOption( "digits" ) - 3 ) } cat( "\nCall:\n" ) cat( deparse( x$call ) ) cat( "\n\n" ) cat( "Maximum likelihood estimates\n" ) print.default( format( coef( x ), digits = digits ), print.gap = 2, quote = FALSE ) invisible( x ) }
lgb.unloader <- function(restore = TRUE, wipe = FALSE, envir = .GlobalEnv) { try(detach("package:lightgbm", unload = TRUE), silent = TRUE) if (wipe) { boosters <- Filter( f = function(x) { inherits(get(x, envir = envir), "lgb.Booster") } , x = ls(envir = envir) ) datasets <- Filter( f = function(x) { inherits(get(x, envir = envir), "lgb.Dataset") } , x = ls(envir = envir) ) rm(list = c(boosters, datasets), envir = envir) gc(verbose = FALSE) } if (restore) { library(lightgbm) } return(invisible(NULL)) }
expected <- "./Pkgs" test(id=9, code={ argv <- structure(list(".", "Pkgs"), .Names = c("", "")) do.call('file.path', argv); }, o = expected);
imd <- function(data=NULL,chr=NULL,position,extra=NULL){ arguments <- as.list(match.call()) chr = eval(arguments$chr, data) position = eval(arguments$position, data) extra = eval(arguments$extra, data) if (is.null(chr)==TRUE){ chr<-c(rep("N",length(position))) } if (is.null(extra) == TRUE){ data <- data.frame(chr,position) } else if (is.null(extra) == FALSE){ data <- data.frame(chr,position,extra) } data$chr <- factor(data$chr,levels=c(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,"X","Y","M","N")) data <- data[order(data$chr,data$position),] data$number <- 1:nrow(data) data$distance <- ave(data$position,factor(data$chr), FUN=function(x) c(diff(x),NA)) data$log10distance<-round(log10(data$distance),digits=3) data<-data[complete.cases(data),] return(data) }
require(BiasedUrn) require(stats) ComparePlot <- function(m1, m2, n, odds) { xmin <- minHypergeo(m1, m2, n) xmax <- maxHypergeo(m1, m2, n) x <- xmin : xmax wnc <- dWNCHypergeo(x, m1, m2, n, odds) fnc <- dFNCHypergeo(x, m1, m2, n, odds) hyp <- dhyper(x, m1, m2, n) plot (x, wnc, type="l", col="blue", main = "Hypergeometric distributions", sub = "Blue = Wallenius, Red = Fisher, Green = Central", xlab = "x", ylab = "Probability") points (x, fnc, type="l", col="red") points (x, hyp, type="l", col="green") } ComparePlot(80, 60, 100, 0.5)
test_that("wk_rcrd works", { xy_rcrd <- structure(list(x = as.numeric(1:3), y = c(2, 2, 2)), class = "wk_rcrd") expect_identical(length(xy_rcrd), 3L) expect_identical( xy_rcrd[2], structure(list(x = 2, y = 2), class = "wk_rcrd") ) expect_identical(xy_rcrd[[2]], xy_rcrd[2]) expect_error(xy_rcrd$x, "is not meaningful") expect_identical(names(xy_rcrd), NULL) expect_identical(is.na(xy_rcrd), c(FALSE, FALSE, FALSE)) expect_identical(is.na(xy_rcrd[NA_integer_]), TRUE) expect_identical(is.na(xy_rcrd[integer(0)]), logical(0)) expect_identical(expect_output(print(xy_rcrd), "wk_rcrd"), xy_rcrd) expect_output(print(xy_rcrd[integer(0)]), "wk_rcrd") expect_output(expect_identical(str(xy_rcrd), xy_rcrd), "wk_rcrd") expect_output(expect_identical(str(xy_rcrd[integer(0)]), xy_rcrd[integer(0)]), "wk_rcrd\\[0\\]") expect_output(print(wk_set_crs(xy_rcrd, 1234)), "CRS=EPSG:1234") expect_length(format(xy_rcrd), 2) expect_length(as.character(xy_rcrd), 2) old_opt <- options(max.print = 1000) expect_output( print(structure(list(x = 1:1001), class = "wk_rcrd")), "Reached max.print" ) options(old_opt) xy_rcrd2 <- xy_rcrd names(xy_rcrd2) <- NULL expect_identical(xy_rcrd2, xy_rcrd) expect_error(names(xy_rcrd) <- "not null", "must be NULL") expect_identical(validate_wk_rcrd(xy_rcrd), xy_rcrd) expect_identical( rep(xy_rcrd, 2), structure(list(x = as.numeric(c(1:3, 1:3)), y = rep(2, 6)), class = "wk_rcrd") ) expect_identical( rep(xy_rcrd, 2), c(xy_rcrd, xy_rcrd) ) expect_error(c(xy_rcrd, 2), "Can't combine") expect_identical( as.matrix(xy_rcrd), matrix(c(1, 2, 3, 2, 2, 2), ncol = 2, dimnames = list(NULL, c("x", "y"))) ) expect_identical( as.data.frame(xy_rcrd), data.frame(x = c(1, 2, 3), y = c(2, 2, 2)) ) expect_identical( data.frame(col_name = xy_rcrd), new_data_frame(list(col_name = xy_rcrd)) ) }) test_that("geodesic gets printed for geodesic rcrd objects", { x_geod <- new_wk_rcrd( list(x = double()), template = structure(list(), class = c("some_wk_rcrd", "wk_rcrd")) ) s3_register("wk::wk_is_geodesic", "some_wk_rcrd", function(x) TRUE) expect_output(print(x_geod), "geodesic some_wk_rcrd") }) test_that("rep_len() works for wk_rcrd", { skip_if_not(packageVersion("base") >= "3.6") xy_rcrd <- structure(list(x = as.numeric(1:3), y = c(2, 2, 2)), class = "wk_rcrd") expect_identical( rep_len(xy_rcrd, 6), structure(list(x = as.numeric(c(1:3, 1:3)), y = rep(2, 6)), class = "wk_rcrd") ) }) test_that("c() for wk_rcrd handles crs attributes", { expect_identical( wk_crs(c(xy(0, 1, crs = wk_crs_inherit()), xy(0, 1, crs = 1234))), 1234 ) expect_error( wk_crs(c(xy(0, 1), xy(0, 1, crs = 1234))), "are not equal" ) })
context("Empty columns") test_that("Drops empty columns of data.tables and data.frames", { library(data.table) DT1 <- data.table(x1 = 1:5, x2 = sample(c(letters[1:5], NA), size = 5), x3 = runif(5), x4 = sample(c(rcauchy(4), NA)), x5 = c(TRUE, FALSE, TRUE, NA, TRUE), x6 = rep(NA_character_, 5), x7 = rep(NA_integer_, 5)) DF1 <- as.data.frame(DT1) output <- drop_empty_cols(DT1, copy = TRUE) expect_false(any(c("x6", "x7") %in% names(output))) expect_true(all(c("x6", "x7") %in% names(DT1))) output_no_copy <- drop_empty_cols(DT1) expect_false(any(c("x6", "x7") %in% names(output_no_copy))) expect_false(any(c("x6", "x7") %in% names(DT1))) output_DF <- drop_empty_cols(DF1) expect_false(any(c("x6", "x7") %in% names(output_DF))) expect_true(all(c("x6", "x7") %in% names(DF1))) }) test_that("Error handling", { expect_error(drop_empty_cols("x"), regexp = "must be a data\\.(frame|table)") expect_warning(drop_empty_cols(data.frame(x = c(1, 2)), copy = FALSE), regexp = "copy.*is FALSE, but .* not a data\\.table.") }) test_that("No empty cols", { DFi <- data.frame(x = c(1, 2), y = c(3, 4)) expect_identical(drop_empty_cols(DFi), DFi) })
tam_pv_mcmc_sample_beta_variance_lm_beta <- function(Y0, theta0, pweights0, D, use_lm=TRUE) { if (use_lm){ formula_theta <- theta0 ~ 0 + Y0 mod2 <- stats::lm( formula=formula_theta, weights=pweights0 ) beta2 <- mod2$coef beta2 <- matrix( beta2, ncol=D ) res2 <- stats::resid(mod2) } if (! use_lm){ mod <- stats::lm.wfit(y=theta0, x=Y0, w=pweights0) beta2 <- matrix( mod$coefficients, ncol=D) res2 <- matrix( mod$residuals, ncol=D ) } beta2[ is.na(beta2) ] <- 0 res <- list( beta2=beta2, res2=res2) return(res) }
time_factor <- 1 if (is_on_github_actions()) time_factor <- 4 test_that("cache_mem: handling missing values", { d <- cache_mem() expect_true(is.key_missing(d$get("abcd"))) d$set("a", 100) expect_identical(d$get("a"), 100) expect_identical(d$get("y", missing = NULL), NULL) expect_error( d$get("y", missing = stop("Missing key")), "^Missing key$", ) d <- cache_mem(missing = NULL) expect_true(is.null(d$get("abcd"))) d$set("a", 100) expect_identical(d$get("a"), 100) expect_identical(d$get("y", missing = -1), -1) expect_error( d$get("y", missing = stop("Missing key")), "^Missing key$", ) d <- cache_mem(missing = stop("Missing key")) expect_error(d$get("abcd"), "^Missing key$") d$set("x", NULL) d$set("a", 100) expect_identical(d$get("a"), 100) expect_error(d$get("y"), "^Missing key$") expect_identical(d$get("y", missing = NULL), NULL) expect_true(is.key_missing(d$get("y", missing = key_missing()))) expect_error( d$get("y", missing = stop("Missing key 2")), "^Missing key 2$", ) expr <- rlang::quo(stop("Missing key")) d <- cache_mem(missing = !!expr) expect_error(d$get("y"), "^Missing key$") expect_error(d$get("y"), "^Missing key$") }) test_that("cache_mem: pruning respects max_n", { delay <- 0.001 * time_factor d <- cache_mem(max_n = 3) d$set("a", rnorm(100)); Sys.sleep(delay) d$set("b", rnorm(100)); Sys.sleep(delay) d$set("c", rnorm(100)); Sys.sleep(delay) d$set("d", rnorm(100)); Sys.sleep(delay) d$set("e", rnorm(100)); Sys.sleep(delay) expect_identical(sort(d$keys()), c("c", "d", "e")) }) test_that("cache_mem: pruning respects max_size", { delay <- 0.001 * time_factor d <- cache_mem(max_size = object.size(123) * 3) d$set("a", rnorm(100)); Sys.sleep(delay) d$set("b", rnorm(100)); Sys.sleep(delay) d$set("c", 1); Sys.sleep(delay) expect_identical(sort(d$keys()), c("c")) d$set("d", rnorm(100)); Sys.sleep(delay) expect_length(d$keys(), 0) d$set("e", 2); Sys.sleep(delay) d$set("f", 3); Sys.sleep(delay) expect_identical(sort(d$keys()), c("e", "f")) }) test_that("cache_mem: max_size=Inf", { mc <- cachem::cache_mem(max_size = Inf) mc$set("a", 123) expect_identical(mc$get("a"), 123) mc$prune() expect_identical(mc$get("a"), 123) }) test_that("cache_mem: pruning respects both max_n and max_size", { delay <- 0.001 * time_factor d <- cache_mem(max_n = 3, max_size = object.size(123) * 3) d$set("a", rnorm(100)); Sys.sleep(delay) d$set("b", rnorm(100)); Sys.sleep(delay) d$set("c", rnorm(100)); Sys.sleep(delay) d$set("d", rnorm(100)); Sys.sleep(delay) d$set("e", rnorm(100)); Sys.sleep(delay) d$set("f", 1); Sys.sleep(delay) d$set("g", 1); Sys.sleep(delay) d$set("h", 1); Sys.sleep(delay) expect_identical(sort(d$keys()), c("f", "g", "h")) d$set("i", c(2, 3)); Sys.sleep(0.001) expect_identical(sort(d$keys()), c("h", "i")) }) test_that('cache_mem: pruning with evict="lru"', { delay <- 0.001 * time_factor d <- cache_mem(max_n = 2) d$set("a", 1); Sys.sleep(delay) d$set("b", 1); Sys.sleep(delay) d$set("c", 1); Sys.sleep(delay) expect_identical(sort(d$keys()), c("b", "c")) d$get("b") d$set("d", 1); Sys.sleep(delay) expect_identical(sort(d$keys()), c("b", "d")) d$get("b") d$set("e", 2); Sys.sleep(delay) d$get("b") d$set("f", 3); Sys.sleep(delay) expect_identical(sort(d$keys()), c("b", "f")) d <- cache_mem(max_n = 2, evict = "lru") d$set("a", 1); Sys.sleep(delay) d$set("b", 1); Sys.sleep(delay) d$set("c", 1); Sys.sleep(delay) d$set("b", 2); Sys.sleep(delay) d$set("d", 2); Sys.sleep(delay) expect_identical(sort(d$keys()), c("b", "d")) }) test_that('cache_mem: pruning with evict="fifo"', { delay <- 0.001 * time_factor d <- cache_mem(max_n = 2, evict = "fifo") d$set("a", 1); Sys.sleep(delay) d$set("b", 1); Sys.sleep(delay) d$set("c", 1); Sys.sleep(delay) expect_identical(sort(d$keys()), c("b", "c")) d$get("b") d$set("d", 1); Sys.sleep(delay) expect_identical(sort(d$keys()), c("c", "d")) d$get("b") d$set("e", 2); Sys.sleep(delay) d$get("b") d$set("f", 3); Sys.sleep(delay) expect_identical(sort(d$keys()), c("e", "f")) d <- cache_mem(max_n = 2, evict = "fifo") d$set("a", 1); Sys.sleep(delay) d$set("b", 1); Sys.sleep(delay) d$set("c", 1); Sys.sleep(delay) d$set("b", 2); Sys.sleep(delay) d$set("d", 2); Sys.sleep(delay) expect_identical(sort(d$keys()), c("b", "d")) }) test_that("Pruning by max_age", { skip_on_cran() d <- cache_mem(max_age = 0.25*time_factor) d$set("a", 1) expect_identical(d$get("a"), 1) Sys.sleep(0.3*time_factor) expect_identical(d$get("a"), key_missing()) expect_identical(d$get("x"), key_missing()) d <- cache_mem(max_age = 0.25*time_factor) d$set("a", 1) expect_identical(d$get("a"), 1) Sys.sleep(0.3*time_factor) d$set("b", 1) expect_identical(d$keys(), "b") d <- cache_mem(max_age = 0.25*time_factor) d$set("a", 1) expect_identical(d$get("a"), 1) expect_true(d$exists("a")) expect_false(d$exists("b")) Sys.sleep(0.15*time_factor) d$set("b", 1) expect_true(d$exists("a")) expect_true(d$exists("b")) Sys.sleep(0.15*time_factor) expect_false(d$exists("a")) expect_true(d$exists("b")) d <- cache_mem(max_age = 0.25*time_factor) d$set("a", 1) expect_identical(d$keys(), "a") Sys.sleep(0.15*time_factor) d$set("b", 1) Sys.sleep(0.15*time_factor) expect_identical(d$keys(), "b") d <- cache_mem(max_age = 0.25*time_factor) d$set("a", 1) expect_identical(d$size(), 1L) Sys.sleep(0.15*time_factor) d$set("b", 1) expect_identical(d$size(), 2L) Sys.sleep(0.15*time_factor) expect_identical(d$size(), 1L) }) test_that("Removed objects can be GC'd", { mc <- cache_mem() e <- new.env() finalized <- FALSE reg.finalizer(e, function(x) finalized <<- TRUE) mc$set("e", e) rm(e) mc$set("x", 1) gc() expect_false(finalized) expect_true(is.environment(mc$get("e"))) }) test_that("Pruned objects can be GC'd", { delay <- 0.001 * time_factor mc <- cache_mem(max_size = object.size(new.env()) + object.size(1234)) e <- new.env() finalized <- FALSE reg.finalizer(e, function(x) finalized <<- TRUE) mc$set("e", e) rm(e) mc$set("x", 1) gc() expect_false(finalized) expect_true(is.environment(mc$get("e"))) Sys.sleep(delay) mc$get("x") Sys.sleep(delay) mc$set("y", 2) gc() expect_true(finalized) expect_true(is.key_missing(mc$get("e"))) })
context("spec (responses): 1.39") test_sample_responses("1.39")
bprobgHsDiscr2SS <- function(params, respvec, VC, ps, AT = FALSE){ p1 <- p2 <- pdf1 <- pdf2 <- c.copula.be2 <- c.copula.be1 <- c.copula2.be1be2 <- NA eta1 <- VC$X1%*%params[1:VC$X1.d2] eta2 <- VC$X2%*%params[(VC$X1.d2+1):(VC$X1.d2+VC$X2.d2)] etad <- etas <- l.ln <- NULL if(is.null(VC$X3)){ sigma2.st <- etas <- params[(VC$X1.d2 + VC$X2.d2 + 1)] teta.st <- etad <- params[(VC$X1.d2 + VC$X2.d2 + 2)] } if(!is.null(VC$X3)){ sigma2.st <- etas <- VC$X3%*%params[(VC$X1.d2+VC$X2.d2+1):(VC$X1.d2+VC$X2.d2+VC$X3.d2)] teta.st <- etad <- VC$X4%*%params[(VC$X1.d2+VC$X2.d2+VC$X3.d2+1):(VC$X1.d2+VC$X2.d2+VC$X3.d2+VC$X4.d2)] } sstr1 <- esp.tr(sigma2.st, VC$margins[2]) sigma2.st <- sstr1$vrb.st sigma2 <- sstr1$vrb eta2 <- eta.tr(eta2, VC$margins[2]) dHs <- distrHsDiscr(respvec$y2, eta2, sigma2, sigma2.st, nu = 1, nu.st = 1, margin2=VC$margins[2], naive = FALSE, y2m = VC$y2m, min.dn = VC$min.dn, min.pr = VC$min.pr, max.pr = VC$max.pr) pdf2 <- dHs$pdf2 p2 <- dHs$p2 derpdf2.dereta2 <- dHs$derpdf2.dereta2 derpdf2.dersigma2.st <- dHs$derpdf2.dersigma2.st derp2.dersigma.st <- dHs$derp2.dersigma.st derp2.dereta2 <- dHs$derp2.dereta2 der2p2.dereta2eta2 <- dHs$der2p2.dereta2eta2 der2pdf2.dereta2 <- dHs$der2pdf2.dereta2 der2p2.dersigma2.st2 <- dHs$der2p2.dersigma2.st2 der2pdf2.dersigma2.st2 <- dHs$der2pdf2.dersigma2.st2 der2p2.dereta2dersigma2.st <- dHs$der2p2.dereta2dersigma2.st der2pdf2.dereta2dersigma2.st <- dHs$der2pdf2.dereta2dersigma2.st pd1 <- probm(eta1, VC$margins[1], bc = TRUE, min.dn = VC$min.dn, min.pr = VC$min.pr, max.pr = VC$max.pr) p1 <- 1 - pd1$pr resT <- teta.tr(VC, teta.st) teta.st <- resT$teta.st teta <- resT$teta C1 <- mm(BiCDF(p1[VC$inde], p2, VC$nC, teta, VC$dof), min.pr = VC$min.pr, max.pr = VC$max.pr ) C2 <- mm(BiCDF(p1[VC$inde], mm(p2-pdf2, min.pr = VC$min.pr, max.pr = VC$max.pr), VC$nC, teta, VC$dof), min.pr = VC$min.pr, max.pr = VC$max.pr ) A <- mm(C1 - C2, min.pr = VC$min.pr, max.pr = VC$max.pr) B <- mm(pdf2 - A, min.pr = VC$min.pr, max.pr = VC$max.pr) l.par1 <- log(p1) l.par1[VC$inde] <- log(B) l.par <- VC$weights*l.par1 dH1 <- copgHs(p1[VC$inde], p2, eta1=NULL, eta2=NULL, teta, teta.st, VC$BivD, VC$dof, min.dn = VC$min.dn, min.pr = VC$min.pr, max.pr = VC$max.pr) dH2 <- copgHs(p1[VC$inde], mm(p2-pdf2, min.pr = VC$min.pr, max.pr = VC$max.pr), eta1=NULL, eta2=NULL, teta, teta.st, VC$BivD, VC$dof, min.dn = VC$min.dn, min.pr = VC$min.pr, max.pr = VC$max.pr) c.copula.be1.C1 <- dH1$c.copula.be1 c.copula.be1.C2 <- dH2$c.copula.be1 c.copula.be2.C1 <- dH1$c.copula.be2 c.copula.be2.C2 <- dH2$c.copula.be2 derp2m1.dereta2 <- derp2.dereta2 - derpdf2.dereta2 derp2m1.dersigma.st <- derp2.dersigma.st - derpdf2.dersigma2.st c.copula.theta.C1 <- dH1$c.copula.theta c.copula.theta.C2 <- dH2$c.copula.theta derp1.dereta1 <- pd1$derp1.dereta1 Cc <- c.copula.be1.C1 - c.copula.be1.C2 C <- Cc*derp1.dereta1[VC$inde] Cs <- c.copula.theta.C1 - c.copula.theta.C2 Cssb2 <- c.copula.be2.C1*derp2.dereta2 - c.copula.be2.C2*derp2m1.dereta2 CssSI <- c.copula.be2.C1*derp2.dersigma.st - c.copula.be2.C2*derp2m1.dersigma.st dl.dbe11 <- 1/p1*derp1.dereta1 dl.dbe11[VC$inde] <- -C/B dl.dbe1 <- VC$weights*dl.dbe11 dl.dteta.st <- VC$weights[VC$inde]*(-Cs/B) dl.dbe2 <- VC$weights[VC$inde]*( (derpdf2.dereta2 - Cssb2)/B ) dl.dsigma.st <- VC$weights[VC$inde]*( (derpdf2.dersigma2.st - CssSI)/B ) c.copula2.be1.C1 <- dH1$c.copula2.be1 c.copula2.be1.C2 <- dH2$c.copula2.be1 c.copula2.be2.C1 <- dH1$c.copula2.be2 c.copula2.be2.C2 <- dH2$c.copula2.be2 c.copula2.be1be2.C1 <- dH1$c.copula2.be1be2 c.copula2.be1be2.C2 <- dH2$c.copula2.be1be2 c.copula2.be2th.C1 <- dH1$c.copula2.be2th c.copula2.be2th.C2 <- dH2$c.copula2.be2th der2p1.dereta1eta1 <- pd1$der2p1.dereta1eta1 derC.dereta1 <- (c.copula2.be1.C1 - c.copula2.be1.C2)*derp1.dereta1[VC$inde]^2 + Cc*der2p1.dereta1eta1[VC$inde] c.copula2.theta.C1 <- dH1$bit1.th2ATE c.copula2.theta.C2 <- dH2$bit1.th2ATE c.copula.thet.C1 <- dH1$c.copula.thet c.copula.thet.C2 <- dH2$c.copula.thet derteta.derteta.st <- dH1$derteta.derteta.st der2teta.derteta.stteta.st <- dH1$der2teta.derteta.stteta.st derCs.dertheta.st <- (c.copula2.theta.C1 - c.copula2.theta.C2)*derteta.derteta.st^2 + (c.copula.thet.C1 - c.copula.thet.C2)*der2teta.derteta.stteta.st derA.dereta2 <- c.copula.be2.C1*derp2.dereta2 - c.copula.be2.C2*derp2m1.dereta2 derB.dereta2 <- derpdf2.dereta2 - derA.dereta2 der2p2m1.dereta2eta2 <- der2p2.dereta2eta2 - der2pdf2.dereta2 derCssb2.dereta2 <- c.copula2.be2.C1*derp2.dereta2^2 + c.copula.be2.C1*der2p2.dereta2eta2 - (c.copula2.be2.C2*derp2m1.dereta2^2 + c.copula.be2.C2*der2p2m1.dereta2eta2) derA.dersigma2.st <- c.copula.be2.C1*derp2.dersigma.st - c.copula.be2.C2*derp2m1.dersigma.st derB.dersigma2.st <- derpdf2.dersigma2.st - derA.dersigma2.st der2p2m1.dersigma2.st2 <- der2p2.dersigma2.st2 - der2pdf2.dersigma2.st2 derCssSI.dersigma2.st <- c.copula2.be2.C1*derp2.dersigma.st^2 + c.copula.be2.C1*der2p2.dersigma2.st2 - (c.copula2.be2.C2*derp2m1.dersigma.st^2 + c.copula.be2.C2*der2p2m1.dersigma2.st2) derC.dereta2 <- (c.copula2.be1be2.C1*derp2.dereta2 - c.copula2.be1be2.C2*derp2m1.dereta2)*derp1.dereta1[VC$inde] derC.dersigma2.st <- (c.copula2.be1be2.C1*derp2.dersigma.st - c.copula2.be1be2.C2*derp2m1.dersigma.st)*derp1.dereta1[VC$inde] c.copula2.be1th.C1 <- dH1$c.copula2.be1th c.copula2.be1th.C2 <- dH2$c.copula2.be1th derC.dertheta.st <- (c.copula2.be1th.C1 - c.copula2.be1th.C2)*derp1.dereta1[VC$inde] der2p2m1.dereta2dersigma2.st <- der2p2.dereta2dersigma2.st - der2pdf2.dereta2dersigma2.st derCssb2.dersigma2.st <- c.copula2.be2.C1*derp2.dereta2*derp2.dersigma.st + c.copula.be2.C1*der2p2.dereta2dersigma2.st - (c.copula2.be2.C2*derp2m1.dereta2*derp2m1.dersigma.st + c.copula.be2.C2*der2p2m1.dereta2dersigma2.st) derCs.dereta2 <- c.copula2.be2th.C1*derp2.dereta2 - c.copula2.be2th.C2*derp2m1.dereta2 derCs.dersigma2.st <- c.copula2.be2th.C1*derp2.dersigma.st - c.copula2.be2th.C2*derp2m1.dersigma.st d2l.be1.be11 <- -1/p1^2*derp1.dereta1*derp1.dereta1 + 1/p1*der2p1.dereta1eta1 d2l.be1.be11[VC$inde] <- -C^2/B^2 - derC.dereta1/B d2l.be1.be1 <- -VC$weights*d2l.be1.be11 d2l.rho.rho <- -VC$weights[VC$inde]*( -Cs^2/B^2 - derCs.dertheta.st/B ) d2l.be2.be2 <- -VC$weights[VC$inde]*( -derB.dereta2/B^2*(derpdf2.dereta2-Cssb2) + ( der2pdf2.dereta2 - derCssb2.dereta2)/B ) d2l.sigma.sigma <- -VC$weights[VC$inde]*( - derB.dersigma2.st/B^2*(derpdf2.dersigma2.st-CssSI) + ( der2pdf2.dersigma2.st2 - derCssSI.dersigma2.st)/B ) d2l.be1.be2 <- -VC$weights[VC$inde]*( ( derB.dereta2/B^2)*C - derC.dereta2/B ) d2l.be1.sigma <- -VC$weights[VC$inde]*( ( derB.dersigma2.st/B^2)*C - derC.dersigma2.st/B ) d2l.be1.rho <- -VC$weights[VC$inde]*( -C*Cs/B^2 - derC.dertheta.st/B ) d2l.be2.sigma <- -VC$weights[VC$inde]*( -derB.dersigma2.st/B^2*(derpdf2.dereta2-Cssb2) + ( der2pdf2.dereta2dersigma2.st - derCssb2.dersigma2.st)/B ) d2l.be2.rho <- -VC$weights[VC$inde]*( derB.dereta2/B^2*Cs - derCs.dereta2/B ) d2l.rho.sigma <- -VC$weights[VC$inde]*( derB.dersigma2.st/B^2*Cs - derCs.dersigma2.st/B ) if( is.null(VC$X3) ){ be1.be1 <- crossprod(VC$X1*c(d2l.be1.be1),VC$X1) be2.be2 <- crossprod(VC$X2*c(d2l.be2.be2),VC$X2) be1.be2 <- crossprod(VC$X1[VC$inde,]*c(d2l.be1.be2),VC$X2) be1.rho <- t(t(rowSums(t(VC$X1[VC$inde,]*c(d2l.be1.rho))))) be1.sigma <- t(t(rowSums(t(VC$X1[VC$inde,]*c(d2l.be1.sigma))))) be2.rho <- t(t(rowSums(t(VC$X2*c(d2l.be2.rho))))) be2.sigma <- t(t(rowSums(t(VC$X2*c(d2l.be2.sigma))))) H <- rbind( cbind( be1.be1 , be1.be2 , be1.sigma, be1.rho ), cbind( t(be1.be2) , be2.be2 , be2.sigma, be2.rho ), cbind( t(be1.sigma), t(be2.sigma), sum(d2l.sigma.sigma), sum(d2l.rho.sigma) ), cbind( t(be1.rho) , t(be2.rho), sum(d2l.rho.sigma), sum(d2l.rho.rho) ) ) G <- -c( colSums( c(dl.dbe1)*VC$X1 ) , colSums( c(dl.dbe2)*VC$X2 ) , sum( dl.dsigma.st ), sum( dl.dteta.st ) ) } if( !is.null(VC$X3) ){ be1.be1 <- crossprod(VC$X1*c(d2l.be1.be1),VC$X1) be2.be2 <- crossprod(VC$X2*c(d2l.be2.be2),VC$X2) be1.be2 <- crossprod(VC$X1[VC$inde,]*c(d2l.be1.be2),VC$X2) be1.rho <- crossprod(VC$X1[VC$inde,]*c(d2l.be1.rho), VC$X4) be1.sigma <- crossprod(VC$X1[VC$inde,]*c(d2l.be1.sigma),VC$X3) be2.rho <- crossprod(VC$X2*c(d2l.be2.rho), VC$X4) be2.sigma <- crossprod(VC$X2*c(d2l.be2.sigma),VC$X3) sigma.sigma <- crossprod(VC$X3*c(d2l.sigma.sigma),VC$X3) sigma.rho <- crossprod(VC$X3*c(d2l.rho.sigma),VC$X4) rho.rho <- crossprod(VC$X4*c(d2l.rho.rho), VC$X4) H <- rbind( cbind( be1.be1 , be1.be2 , be1.sigma , be1.rho ), cbind( t(be1.be2) , be2.be2 , be2.sigma , be2.rho ), cbind( t(be1.sigma), t(be2.sigma), sigma.sigma , sigma.rho ), cbind( t(be1.rho) , t(be2.rho) , t(sigma.rho), rho.rho ) ) G <- -c( colSums( c(dl.dbe1)*VC$X1 ) , colSums( c(dl.dbe2)*VC$X2 ) , colSums( c(dl.dsigma.st)*VC$X3 ) , colSums( c(dl.dteta.st)*VC$X4 ) ) } res <- -sum(l.par) if(VC$extra.regI == "pC") H <- regH(H, type = 1) S.h <- ps$S.h if( length(S.h) != 1){ S.h1 <- 0.5*crossprod(params,S.h)%*%params S.h2 <- S.h%*%params } else S.h <- S.h1 <- S.h2 <- 0 S.res <- res res <- S.res + S.h1 G <- G + S.h2 H <- H + S.h if(VC$extra.regI == "sED") H <- regH(H, type = 2) list(value=res, gradient=G, hessian=H, S.h=S.h, S.h1=S.h1, S.h2=S.h2, l=S.res, l.par=l.par, ps = ps, etas = etas, eta1=eta1, eta2=eta2, etad=etad, dl.dbe1=dl.dbe1, dl.dbe2=dl.dbe2, dl.dsigma.st = dl.dsigma.st, dl.dteta.st = dl.dteta.st, BivD=VC$BivD, p1 = 1-p1, p2 = p2, pdf1 = pdf1, pdf2 = pdf2, c.copula.be2 = c.copula.be2, c.copula.be1 = c.copula.be1, c.copula2.be1be2 = c.copula2.be1be2, theta.star = teta.st) }
NULL fms <- function(config = list()) { svc <- .fms$operations svc <- set_config(svc, config) return(svc) } .fms <- list() .fms$operations <- list() .fms$metadata <- list( service_name = "fms", endpoints = list("*" = list(endpoint = "fms.{region}.amazonaws.com", global = FALSE), "cn-*" = list(endpoint = "fms.{region}.amazonaws.com.cn", global = FALSE), "us-iso-*" = list(endpoint = "fms.{region}.c2s.ic.gov", global = FALSE), "us-isob-*" = list(endpoint = "fms.{region}.sc2s.sgov.gov", global = FALSE)), service_id = "FMS", api_version = "2018-01-01", signing_name = "fms", json_version = "1.1", target_prefix = "AWSFMS_20180101" ) .fms$service <- function(config = list()) { handlers <- new_handlers("jsonrpc", "v4") new_service(.fms$metadata, handlers, config) }
library(CloneSeeker) suppressWarnings( RNGversion("3.5.0") ) set.seed(461283) sampleSimplex(4, 3) all( apply(sampleSimplex(4, 3), 1, sum) == 1) generateSimplex(5, 3) try( sampleSimplex(0) ) try( sampleSimplex(-1) ) try( sampleSimplex(4, 0) ) try( sampleSimplex(4, -1) )
setClass("ExomeDepth", representation(test = "numeric", reference = "numeric", formula = "character", expected = "numeric", phi = "numeric", likelihood = "matrix", annotations = "data.frame", CNV.calls = "data.frame")) setMethod("initialize", "ExomeDepth", function(.Object, data = NULL, test, reference, formula = 'cbind(test, reference) ~ 1', phi.bins = 1, prop.tumor = 1, subset.for.speed = NULL, verbose = TRUE) { if (length(test) != length(reference)) stop("Length of test and numeric must match") if (sum(test > 5) < 5) { message('It looks like the test samples has only ', sum(test > 5), ' bins with more than 5 reads. The coverage is too small to perform any meaningful inference so no likelihood will be computed.') return(.Object) } n.data.points <- length(test) if (is.null(data)) data <- data.frame(intercept = rep(1, length(test))) data$test <- test data$reference <- reference if (!is.null(subset.for.speed)) { if ( (class(subset.for.speed) == 'numeric') && (length(subset.for.speed) == 1)) {subset.for.speed <- seq(from = 1, to = nrow(data), by = floor( nrow(data) / subset.for.speed ) )} subset.for.speed <- subset.for.speed[ subset.for.speed %in% 1:nrow(data) ] data.for.fit <- data[ subset.for.speed, , drop = FALSE] } else { data.for.fit <- data } if (verbose) message('Now fitting the beta-binomial model on a data frame with ', nrow(data.for.fit), ' rows : this step can take a few minutes.') if (phi.bins == 1) { mod <- aod::betabin( data = data.for.fit, formula = as.formula(formula), random = ~ 1, link = 'logit', warnings = FALSE) .Object@phi <- rep(mod@param[[ 'phi.(Intercept)']], n.data.points) } else { if (!is.null(subset.for.speed)) {stop('Subset for speed option is not compatible with variable phi. This will be fixed later on but for now please adapt your code.')} ceiling.bin <- quantile(reference, probs = c( 0.85, 1) ) bottom.bins <- seq(from = 0, to = ceiling.bin[1], by = ceiling.bin[1]/(phi.bins-1)) complete.bins <- as.numeric(c(bottom.bins, ceiling.bin[2] + 1)) data$depth.quant <- factor(sapply(reference, FUN = function(x) {sum (x >= complete.bins)})) my.tab <- table(data$depth.quant) if (length(my.tab) != phi.bins) { stop('Binning did not happen properly') } mod <- aod::betabin (data = data.for.fit, formula = as.formula(formula), random = as.formula('~ depth.quant'), link = 'logit', warnings = FALSE) phi.estimates <- as.numeric([email protected]) data$phi <- phi.estimates[ data$depth.quant ] fc <- approxfun (x = c(complete.bins[ 1:phi.bins] + complete.bins[ 2:(phi.bins+1)])/2, y = phi.estimates, yleft = phi.estimates[1], yright = phi.estimates[phi.bins]) data$phi.linear <- fc (reference) .Object@phi <- data$phi.linear } .Object@formula <- formula .Object@test <- test .Object@reference <- reference my.coeffs <- [email protected] if (is.null(subset.for.speed)) { .Object@expected <- aod::fitted(mod) } else { intercept <- my.coeffs[[ '(Intercept)' ]] .Object@expected <- rep(intercept, times = nrow(data)) if (length(my.coeffs) > 1) { for (na in names(my.coeffs)[ -1 ]) { .Object@expected = .Object@expected + my.coeffs [[ na ]]*data[, na] } } .Object@expected <- exp(.Object@expected)/ (1 + exp(.Object@expected)) } .Object@annotations <- data.frame() if (verbose) message('Now computing the likelihood for the different copy number states') if (prop.tumor < 1) message('Proportion of tumor DNA is ', prop.tumor) .Object@likelihood <- .Call("get_loglike_matrix", phi = .Object@phi, expected = .Object@expected, total = as.integer(.Object@reference + .Object@test), observed = as.integer(.Object@test), mixture = prop.tumor) .Object }) if (!isGeneric("show")) { if (is.function("show")) fun <- show else fun <- function(object) standardGeneric("show") setGeneric("show", fun) } setMethod("show", "ExomeDepth", function(object) { cat('Number of data points: ', length(object@test), '\n') cat('Formula: ', object@formula, '\n') cat('Phi parameter (range if multiple values have been set): ', range(object@phi), '\n') if (ncol(object@likelihood) == 3) cat("Likelihood computed\n") else cat("Likelihood not computed\n") }) setGeneric("TestCNV", def = function(x, chromosome, start, end, type) standardGeneric('TestCNV')) setMethod("TestCNV", "ExomeDepth", function(x, chromosome, start, end, type) { if (! type %in% c('deletion', 'duplication')) stop("type must be either duplication or deletion\n") if (length(chromosome) != 1 || length(start) != 1 || length(end) != 1 || length(type) != 1) stop("The arguments chromosome, start, end and type must all be of length 1") if (class(chromosome) == 'factor') chromosome <- as.character(chromosome) if (class(chromosome) != 'character') stop('The input chromosome must be a character or a factor') which.exons <- which((x@annotations$chromosome == chromosome) & (x@annotations$start >= start) & (x@annotations$end <= end)) if (type == 'deletion') log.ratio <- sum(x@likelihood[ which.exons, 1] - x@likelihood[ which.exons, 2]) if (type == 'duplication') log.ratio <- sum(x@likelihood[ which.exons, 3] - x@likelihood[ which.exons, 2]) return (log.ratio) }) setGeneric("CallCNVs", def = function(x, chromosome, start, end, name, transition.probability = 0.0001, expected.CNV.length = 50000) standardGeneric('CallCNVs')) setMethod("CallCNVs", "ExomeDepth", function( x, chromosome, start, end, name, transition.probability, expected.CNV.length) { if (length(x@phi) == 0) { message('The vector phi does not seem initialized. This may be because the read count is too low and the test vector cannot be processed. No calling will happen') [email protected] <- data.frame() return(x) } if ( length(start) != length(chromosome) || length(end) != length(chromosome) || length(name) != length(chromosome) ) stop('Chromosome, start and end vector must have the same lengths.\n') if (nrow(x@likelihood) != length(chromosome) ) stop('The annotation vectors must have the same length as the data in the ExomeDepth x') chr.names.used <- unique(as.character(chromosome)) chr.levels <- c(as.character(seq(1, 22)), chr.names.used[! chr.names.used %in% as.character(seq(1, 22)) ] ) chr.levels <- chr.levels[ chr.levels %in% chr.names.used ] x@annotations <- data.frame(name = name, chromosome = factor(chromosome, levels = chr.levels), start = start, end = end) my.new.order <- order(x@annotations$chromosome, 0.5*(x@annotations$start + x@annotations$end) ) if (sum( my.new.order != 1:nrow(x@annotations) ) > 0) { message('Positions of exons seem non ordered, so ExomeDepth will reorder the data according to chromosome and position') x@test <- x@test[ my.new.order ] x@reference <- x@reference[ my.new.order ] x@annotations <- x@annotations[ my.new.order, ] x@likelihood <- x@likelihood[ my.new.order, ] } cor.test.reference <- cor(x@test, x@reference) message('Correlation between reference and tests count is ', signif(cor.test.reference, 5)) message('To get meaningful result, this correlation should really be above 0.97. If this is not the case, consider the output of ExomeDepth as less reliable (i.e. most likely a high false positive rate)') total <- x@test + x@reference transitions <- matrix(nrow = 3, ncol = 3, c( 1. - transition.probability, transition.probability/2., transition.probability/2., 0.5, 0.5, 0., 0.5, 0, 0.5), byrow = TRUE) my.chromosomes <- unique(x@annotations$chromosome) final <- data.frame() shift <- 0 for (chrom in my.chromosomes) { good.pos <- which (x@annotations$chromosome == chrom) loc.annotations <- x@annotations[ good.pos , ] loc.expected <- x@expected[ good.pos ] loc.test <- x@test[ good.pos ] loc.total <- total[ good.pos ] positions <- loc.annotations$start end.positions <- loc.annotations$end loc.likelihood <- rbind(c(- Inf, 0, -Inf), x@likelihood[good.pos, c(2, 1, 3)],c(-100,0,-100)) my.calls <- viterbi.hmm (transitions, loglikelihood = loc.likelihood, positions = as.integer(c(positions[1] - 2*expected.CNV.length, positions,end.positions[length(end.positions)]+2*expected.CNV.length)), expected.CNV.length = expected.CNV.length) my.calls$calls$start.p <- my.calls$calls$start.p -1 my.calls$calls$end.p <- my.calls$calls$end.p -1 loc.likelihood <- loc.likelihood[ -c(1,nrow(loc.likelihood)), c(2,1, 3), drop = FALSE ] if (nrow(my.calls$calls) > 0) { my.calls$calls$start <- loc.annotations$start[ my.calls$calls$start.p ] my.calls$calls$end <- loc.annotations$end[ my.calls$calls$end.p ] my.calls$calls$chromosome <- as.character(loc.annotations$chromosome[ my.calls$calls$start.p ]) my.calls$calls$id <- paste('chr', my.calls$calls$chromosome, ':', my.calls$calls$start, '-', my.calls$calls$end, sep = '') my.calls$calls$type <- c('deletion', 'duplication')[ my.calls$calls$type ] my.calls$calls$BF <- NA my.calls$calls$reads.expected <- NA my.calls$calls$reads.observed <- NA for (ir in 1:nrow(my.calls$calls)) { if (my.calls$calls$type[ir] == 'duplication') my.calls$calls$BF[ir] <- sum(loc.likelihood [ my.calls$calls$start.p[ir] : my.calls$calls$end.p[ir],3 ] - loc.likelihood [ my.calls$calls$start.p[ir] : my.calls$calls$end.p[ir],2 ]) if (my.calls$calls$type[ir] == 'deletion') my.calls$calls$BF[ir] <- sum(loc.likelihood [ my.calls$calls$start.p[ir] : my.calls$calls$end.p[ir], 1 ] - loc.likelihood [ my.calls$calls$start.p[ir] : my.calls$calls$end.p[ir],2 ]) my.calls$calls$reads.expected[ ir ] <- sum( loc.total [my.calls$calls$start.p[ir] : my.calls$calls$end.p[ir] ] * loc.expected [my.calls$calls$start.p[ir] : my.calls$calls$end.p[ ir ] ]) my.calls$calls$reads.observed[ ir ] <- sum( loc.test [my.calls$calls$start.p[ir] : my.calls$calls$end.p[ir] ] ) } my.calls$calls$reads.expected <- as.integer( my.calls$calls$reads.expected) my.calls$calls$reads.ratio <- signif(my.calls$calls$reads.observed / my.calls$calls$reads.expected, 3) my.calls$calls$BF <- signif( log10(exp(1))*my.calls$calls$BF, 3) my.calls$calls$start.p <- my.calls$calls$start.p + shift my.calls$calls$end.p <- my.calls$calls$end.p + shift if (nrow(final) == 0) {final <- my.calls$calls} else {final <- rbind.data.frame(final, my.calls$calls)} message('Number of calls for chromosome ', chrom, ' : ', nrow(my.calls$calls)) } shift <- shift + length(good.pos) } [email protected] <- final return (x) }) somatic.CNV.call <- function(normal, tumor, prop.tumor = 1, chromosome, start, end, names) { message('Warning: this function is largely untested and experimental') message('Initializing the exomeDepth object') myTest <- new('ExomeDepth', test= tumor, reference = normal, prop.tumor = prop.tumor, formula = 'cbind(test, reference) ~ 1') message('Now calling the CNVs') myTest <- CallCNVs(x = myTest, transition.probability = 10^-4, chromosome = chromosome, start = start, end = end, name = names) return (myTest) } NULL NULL
utils::globalVariables(c("i", "%dopar%")) breakpoints <- function(obj, ...) { UseMethod("breakpoints") } breakpoints.Fstats <- function(obj, ...) { RVAL <- list(breakpoints = obj$breakpoint, RSS = obj$RSS, nobs = obj$nobs, nreg = obj$nreg, call = match.call(), datatsp = obj$datatsp) class(RVAL) <- "breakpoints" return(RVAL) } breakpoints.formula <- function(formula, h = 0.15, breaks = c("BIC", "LWZ", "RSS", "all"), data = list(), hpc = c("none", "foreach"), ...) { mf <- model.frame(formula, data = data) y <- model.response(mf) modelterms <- terms(formula, data = data) X <- model.matrix(modelterms, data = data) RVAL <- breakpoints.matrix(X, y, h = h, breaks = breaks, hpc = hpc, ...) n <- nrow(X) if(is.ts(data)) { if(NROW(data) == n) datatsp <- tsp(data) else datatsp <- c(1/n, 1, n) } else { env <- environment(formula) if(missing(data)) data <- env orig.y <- eval(attr(terms(formula), "variables")[[2]], data, env) if(is.ts(orig.y) & (NROW(orig.y) == n)) datatsp <- tsp(orig.y) else datatsp <- c(1/n, 1, n) } RVAL$datatsp <- datatsp return(RVAL) } breakpoints.matrix <- function(obj, y, h = 0.15, breaks = c("BIC", "LWZ", "RSS", "all"), hpc = c("none", "foreach"), ...) { X <- obj n <- nrow(X) k <- ncol(X) breakstat <- NULL intercept_only <- isTRUE(all.equal(as.vector(X), rep(1L, n))) if(is.null(h)) h <- k + 1 if(h < 1) h <- floor(n*h) if(h <= k) stop("minimum segment size must be greater than the number of regressors") if(h > floor(n/2)) stop("minimum segment size must be smaller than half the number of observations") if (!is.numeric(breaks)) { breakstat <- match.arg(breaks) breaks <- ceiling(n/h) - 2 } else { if (length(breaks) > 1) stop("Argument 'breaks' takes a single number or method for optimal break estimation") if (breaks %% 1 != 0) stop("Please enter an integer number of breaks") if(breaks < 1) { breaks <- 1 warning("number of breaks must be at least 1") } if(breaks > ceiling(n/h) - 2) { breaks0 <- breaks breaks <- ceiling(n/h) - 2 warning(sprintf("requested number of breaks = %i too large, changed to %i", breaks0, breaks)) } } hpc <- match.arg(hpc) if(hpc == "foreach") { if(requireNamespace("foreach")) { `%dopar%` <- foreach::`%dopar%` } else { warning("High perfomance computing (hpc) support with 'foreach' package is not available, foreach is not installed.") hpc <- "none" } } if (getOption("strucchange.use_armadillo", FALSE)) { res = .sc_cpp_construct_rss_table(y,X,n,h,breaks,intercept_only,sqrt(.Machine$double.eps)/ncol(X),getOption("strucchange.armadillo_rcond_min",sqrt(.Machine$double.eps))) RSS.table = res$RSS.table dimnames(RSS.table) = list(as.character(h:(n-h)), as.vector(rbind(paste("break", 1:breaks, sep = ""),paste("RSS", 1:breaks, sep = "")))) RSS.triang = res$RSS.triang RSS <- function(i, j) .sc_cpp_rss(RSS.triang, i, j) extend.RSS.table <- function(RSS.table, breaks) { if (2*breaks > ncol(RSS.table)) { RSS.table = .sc_cpp_extend_rss_table(rss_table = RSS.table, rss_triang = RSS.triang, n = n, h=h, breaks = breaks) dimnames(RSS.table) = list(as.character(h:(n-h)), as.vector(rbind(paste("break", 1:breaks, sep = ""),paste("RSS", 1:breaks, sep = "")))) } RSS.table } } else { RSSi <- function(i) { ssr <- if(intercept_only) { (y[i:n] - cumsum(y[i:n])/(1L:(n-i+1L)))[-1L] * sqrt(1L + 1L/(1L:(n-i))) } else { recresid(X[i:n,,drop = FALSE],y[i:n], ...) } c(rep(NA, k), cumsum(ssr^2)) } RSS.triang <- if(hpc == "none") sapply(1:(n-h+1), RSSi) else foreach::foreach(i = 1:(n-h+1)) %dopar% RSSi(i) RSS <- function(i,j) RSS.triang[[i]][j - i + 1] index <- h:(n-h) break.RSS <- sapply(index, function(i) RSS(1,i)) RSS.table <- cbind(index, break.RSS) rownames(RSS.table) <- as.character(index) extend.RSS.table <- function(RSS.table, breaks) { if((breaks*2) > ncol(RSS.table)) { for(m in (ncol(RSS.table)/2 + 1):breaks) { my.index <- (m*h):(n-h) my.RSS.table <- RSS.table[,c((m-1)*2 - 1, (m-1)*2)] my.RSS.table <- cbind(my.RSS.table, NA, NA) for(i in my.index) { pot.index <- ((m-1)*h):(i - h) break.RSS <- sapply(pot.index, function(j) my.RSS.table[as.character(j), 2] + RSS(j+1,i)) opt <- which.min(break.RSS) my.RSS.table[as.character(i), 3:4] <- c(pot.index[opt], break.RSS[opt]) } RSS.table <- cbind(RSS.table, my.RSS.table[,3:4]) } colnames(RSS.table) <- as.vector(rbind(paste("break", 1:breaks, sep = ""), paste("RSS", 1:breaks, sep = ""))) } return(RSS.table) } RSS.table <- extend.RSS.table(RSS.table, breaks) } extract.breaks <- function(RSS.table, breaks) { if((breaks*2) > ncol(RSS.table)) stop("compute RSS.table with enough breaks before") index <- RSS.table[, 1, drop = TRUE] break.RSS <- sapply(index, function(i) RSS.table[as.character(i),breaks*2] + RSS(i + 1, n)) opt <- index[which.min(break.RSS)] if(breaks > 1) { for(i in ((breaks:2)*2 - 1)) opt <- c(RSS.table[as.character(opt[1]),i], opt) } names(opt) <- NULL return(opt) } opt <- extract.breaks(RSS.table, breaks) if(is.ts(y) && NROW(y) == n) { datatsp <- tsp(y) } else { datatsp <- c(1/n, 1, n) } RVAL <- list(breakpoints = opt, RSS.table = RSS.table, RSS.triang = RSS.triang, RSS = RSS, extract.breaks = extract.breaks, extend.RSS.table = extend.RSS.table, nobs = n, nreg = k, y = y, X = X, call = match.call(), datatsp = datatsp) class(RVAL) <- c("breakpointsfull", "breakpoints") RVAL$breakpoints <- breakpoints(RVAL, breaks=breakstat)$breakpoints return(RVAL) } breakpoints.breakpointsfull <- function(obj, breaks = c("BIC", "LWZ", "RSS", "all"), ...) { if (is.numeric(breaks)) { if (length(breaks) > 1) stop("This function is for extracting a single break") if (breaks %% 1 != 0) stop("Please enter an integer number of breaks") } else { breakstat <- match.arg(breaks) sbp <- summary(obj) if (breakstat == "all") breaks <- ncol(sbp$breakpoints) else breaks <- which.min(sbp$RSS[breakstat,]) - 1 } if(breaks < 1) { breakpoints <- NA RSS <- obj$RSS(1, obj$nobs) } else { RSS.tab <- obj$extend.RSS.table(obj$RSS.table, breaks) breakpoints <- obj$extract.breaks(RSS.tab, breaks) bp <- c(0, breakpoints, obj$nobs) RSS <- sum(apply(cbind(bp[-length(bp)]+1,bp[-1]), 1, function(x) obj$RSS(x[1], x[2]))) } fvals = fitted(obj, breaks=breaks, bp=breakpoints) mss = sum((fvals - mean(fvals))^2) r.squared = mss/(mss + RSS) RVAL <- list(breakpoints = breakpoints, RSS = RSS, nobs = obj$nobs, nreg = obj$nreg, call = match.call(), datatsp = obj$datatsp, r.squared = r.squared, MSS = mss) class(RVAL) <- "breakpoints" return(RVAL) } print.breakpoints <- function(x, format.times = NULL, ...) { if(is.null(format.times)) format.times <- ((x$datatsp[3] > 1) & (x$datatsp[3] < x$nobs)) if(any(is.na(x$breakpoints))) lbp <- 0 else lbp <- length(x$breakpoints) cat(paste("\n\t Optimal ", lbp + 1, "-segment partition: \n\n", sep = "")) cat("Call:\n") print(x$call) cat("\nBreakpoints at observation number:\n") cat(x$breakpoints,"\n") cat("\nCorresponding to breakdates:\n") cat(breakdates(x, format.times = format.times),"\n") } breakdates <- function(obj, format.times = FALSE, ...) { UseMethod("breakdates") } breakdates.breakpoints <- function(obj, format.times = FALSE, breaks = NULL, ...) { if(inherits(obj, "breakpointsfull") && !is.null(breaks)) obj <- breakpoints(obj, breaks = breaks) if(is.null(format.times)) format.times <- ((obj$datatsp[3] > 1) & (obj$datatsp[3] < obj$nobs)) format.time <- function(timevec, freq) { first <- floor(timevec + .001) second <- floor(freq * (timevec - first) + 1 + .5 + .001) RVAL <- cbind(first, second) dummy <- function(x) paste(x[1], "(", x[2], ")", sep = "") RVAL <- apply(RVAL, 1, dummy) return(RVAL) } if(is.na(obj$breakpoints)[1]) breakdates <- NA else { breakdates <- (obj$breakpoints - 1)/obj$datatsp[3] + obj$datatsp[1] if(format.times) breakdates <- format.time(breakdates, obj$datatsp[3]) } return(breakdates) } breakfactor <- function(obj, breaks = NULL, labels = NULL, ...) { if("breakpointsfull" %in% class(obj)) obj <- breakpoints(obj, breaks = breaks) breaks <- obj$breakpoints if(all(is.na(breaks))) return(factor(rep("segment1", obj$nobs))) nbreaks <- length(breaks) fac <- rep(1:(nbreaks + 1), c(breaks[1], diff(c(breaks, obj$nobs)))) if(is.null(labels)) labels <- paste("segment", 1:(nbreaks+1), sep = "") fac <- factor(fac, labels = labels, ...) return(fac) } lines.breakpoints <- function(x, breaks = NULL, lty = 2, ...) { if("breakpointsfull" %in% class(x)) x <- breakpoints(x, breaks = breaks) abline(v = breakdates(x), lty = lty, ...) } summary.breakpoints <- function(object, ...) { print(object) cat(paste("\nRSS:", format(object$RSS),"MSS:", format(object$MSS), "\n")) cat(paste("Multiple R-squared:", format(object$r.squared),"\n")) } summary.breakpointsfull <- function(object, breaks = NULL, sort = TRUE, format.times = NULL, ...) { if(is.null(format.times)) format.times <- ((object$datatsp[3] > 1) & (object$datatsp[3] < object$nobs)) if(is.null(breaks)) breaks <- ncol(object$RSS.table)/2 n <- object$nobs RSS <- c(object$RSS(1, n), rep(NA, breaks)) R.sq <- c(breakpoints(object, breaks = 0)$r.squared, rep(NA, breaks)) BIC <- c(n * (log(RSS[1]) + 1 - log(n) + log(2*pi)) + log(n) * (object$nreg + 1), rep(NA, breaks)) names(RSS) <- as.character(0:breaks) bp <- breakpoints(object, breaks = breaks) bd <- breakdates(bp, format.times = format.times, breaks=breaks) RSS[breaks + 1] <- bp$RSS R.sq[breaks + 1] <- bp$r.squared BIC[breaks + 1] <- AIC(bp, k = log(n)) bp <- bp$breakpoints if(breaks > 1) { for(m in (breaks-1):1) { bp <- rbind(NA, bp) bd <- rbind(NA, bd) bpm <- breakpoints(object, breaks = m) if(sort) { pos <- apply(outer(bpm$breakpoints, bp[nrow(bp),], FUN = function(x,y) abs(x - y)), 1, which.min) if(length(pos) > unique(length(pos))) { warning("sorting not possible", call. = FALSE) sort <- FALSE } } if(!sort) pos <- 1:m bp[1,pos] <- bpm$breakpoints bd[1,pos] <- breakdates(bpm, format.times = format.times) RSS[m+1] <- bpm$RSS R.sq[m+1] <- bpm$r.squared BIC[m+1] <- AIC(bpm, k = log(n)) }} else { bp <- as.matrix(bp) bd <- as.matrix(bd) } rownames(bp) <- as.character(1:breaks) colnames(bp) <- rep("", breaks) rownames(bd) <- as.character(1:breaks) colnames(bd) <- rep("", breaks) LWZ = LWZ.breakpointsfull(object) RSS <- rbind(RSS, BIC, LWZ, R.sq) rownames(RSS) <- c("RSS", "BIC", "LWZ", "R.sq") RVAL <- list(breakpoints = bp, breakdates = bd, RSS = RSS, call = object$call) class(RVAL) <- "summary.breakpointsfull" return(RVAL) } print.summary.breakpointsfull <- function(x, digits = max(2, getOption("digits") - 3), ...) { bp <- x$breakpoints breaks <- ncol(bp) bd <- x$breakdates RSS <- x$RSS bp[is.na(bp)] <- "" bd[is.na(bd)] <- "" rownames(bp) <- paste("m = ", rownames(bp), " ", sep = "") rownames(bd) <- paste("m = ", rownames(bd), " ", sep = "") RSS <- rbind(0:(ncol(RSS) - 1), format(RSS, digits = digits)) rownames(RSS) <- c("m","RSS", "BIC", "LWZ", "R.sq") colnames(RSS) <- rep("", breaks + 1) cat("\n\t Optimal (m+1)-segment partition: \n\n") cat("Call:\n") print(x$call) cat("\nBreakpoints at observation number:\n") print(bp, quote = FALSE) cat("\nCorresponding to breakdates:\n") print(bd, quote = FALSE) cat("\nFit:\n") print(RSS, quote = FALSE) } plot.breakpointsfull <- function(x, breaks = NULL, ...) { rval <- summary(x, breaks = breaks) plot(rval, ...) invisible(rval) } plot.summary.breakpointsfull <- function(x, type = "b", col = c(1,4,5), legend = TRUE, xlab = "Number of breakpoints", ylab = "", main = "BIC, LWZ and Residual Sum of Squares", ...) { breaks <- as.numeric(colnames(x$RSS)) RSS <- x$RSS["RSS",] BIC <- x$RSS["BIC",] LWZ <- x$RSS["LWZ",] plot(breaks, BIC, ylab = "", ylim=c(min(c(BIC, LWZ)), max(c(BIC, LWZ))), xlab = xlab, main = main, type = type, col = col[1], ...) points(breaks, LWZ, col=col[3], type=type) onew <- getOption("new") par(new = TRUE) plot(breaks, RSS, type = type, axes = FALSE, col = col[2], xlab = "", ylab = "") if(legend) legend("topright", c("BIC", "RSS", "LWZ"), lty = rep(1, 2), col = col, bty = "n") axis(4) par(new = onew) invisible(x) } logLik.breakpoints <- function(object, ...) { n <- object$nobs df <- (object$nreg + 1) * (length(object$breakpoints[!is.na(object$breakpoints)]) + 1) logL <- -0.5 * n * (log(object$RSS) + 1 - log(n) + log(2 * pi)) attr(logL, "df") <- df class(logL) <- "logLik" return(logL) } logLik.breakpointsfull <- function(object, breaks = NULL, ...) { bp <- breakpoints(object, breaks = breaks) logL <- logLik(bp) return(logL) } AIC.breakpointsfull <- function(object, breaks = NULL, ..., k = 2) { if(is.null(breaks)) breaks <- 0:(ncol(object$RSS.table)/2) RVAL <- NULL for(m in breaks) RVAL <- c(RVAL, AIC(breakpoints(object, breaks = m), k = k)) names(RVAL) <- breaks return(RVAL) } LWZ <- function(object, ...) { UseMethod("LWZ") } LWZ.breakpointsfull <- function(object, ...) { return(AIC.breakpointsfull(object, ..., k=0.299 * log(object$nobs)^2.1)) } LWZ.breakpoints <- function(object, ...) { return(AIC(object, k = 0.299 * log(object$nobs)^2.1)) } pargmaxV <- function(x, xi = 1, phi1 = 1, phi2 = 1) { phi <- xi * (phi2/phi1)^2 G1 <- function(x, xi = 1, phi = 1) { x <- abs(x) frac <- xi/phi rval <- - exp(log(x)/2 - x/8 - log(2*pi)/2) - (phi/xi * (phi + 2*xi)/(phi+xi)) * exp((frac * (1 + frac) * x/2) + pnorm(-(0.5 + frac) * sqrt(x), log.p = TRUE)) + exp(log(x/2 - 2 + ((phi + 2 * xi)^2)/((phi + xi)*xi)) + pnorm(-sqrt(x)/2, log.p = TRUE)) rval } G2 <- function(x, xi = 1, phi = 1) { x <- abs(x) frac <- xi^2/phi rval <- 1 + sqrt(frac) * exp(log(x)/2 - (frac*x)/8 - log(2*pi)/2) + (xi/phi * (2*phi + xi)/(phi + xi)) * exp(((phi + xi) * x/2) + pnorm(-(phi + xi/2)/sqrt(phi) * sqrt(x), log.p = TRUE)) - exp(log(((2*phi + xi)^2)/((phi+xi)*phi) - 2 + frac*x/2) + pnorm(-sqrt(frac) * sqrt(x)/2 , log.p = TRUE)) rval } ifelse(x < 0, G1(x, xi = xi, phi = phi), G2(x, xi = xi, phi = phi)) } confint.breakpointsfull <- function(object, parm = NULL, level = 0.95, breaks = NULL, het.reg = TRUE, het.err = TRUE, vcov. = NULL, sandwich = TRUE, ...) { X <- object$X y <- object$y n <- object$nobs a2 <- (1 - level)/2 if(!is.null(parm) & !is.null(breaks)) warning("`parm' and `breaks' are both specified: `breaks' is used") else if(!is.null(parm)) breaks <- parm myfun <- function(x, level = 0.975, xi = 1, phi1 = 1, phi2 = 1) (pargmaxV(x, xi = xi, phi1 = phi1, phi2 = phi2) - level) myprod <- function(delta, mat) as.vector(crossprod(delta, mat) %*% delta) bp <- breakpoints(object, breaks = breaks)$breakpoints if(any(is.na(bp))) stop("cannot compute confidence interval when `breaks = 0'") nbp <- length(bp) upper <- rep(0, nbp) lower <- rep(0, nbp) bp <- c(0, bp, n) res <- residuals(object, breaks = breaks) sigma1 <- sigma2 <- sum(res^2)/n Q1 <- Q2 <- crossprod(X)/n if(is.null(vcov.)) Omega1 <- Omega2 <- sigma1 * Q1 else { y.nb <- rowSums(X) + res fm <- lm(y.nb ~ 0 + X) if(sandwich) { Omega1 <- Omega2 <- n * crossprod(Q1, vcov.(fm)) %*% Q1 } else { Omega1 <- Omega2 <- vcov.(fm) } } xi <- 1 X2 <- X[(bp[1]+1):bp[2],,drop = FALSE] y2 <- y[(bp[1]+1):bp[2]] fm2 <- lm(y2 ~ 0+ X2) beta2 <- coef(fm2) if(het.reg) Q2 <- crossprod(X2)/nrow(X2) if(het.err) { sigma2 <- sum(residuals(fm2)^2)/nrow(X2) if(is.null(vcov.)) Omega2 <- sigma2 * Q2 else { if(sandwich) Omega2 <- nrow(X2) * crossprod(Q2, vcov.(fm2)) %*% Q2 else Omega2 <- vcov.(fm2) } } for(i in 2:(nbp+1)) { X1 <- X2 y1 <- y2 beta1 <- beta2 sigma1 <- sigma2 Q1 <- Q2 Omega1 <- Omega2 X2 <- X[(bp[i]+1):bp[i+1],,drop = FALSE] y2 <- y[(bp[i]+1):bp[i+1]] fm2 <- lm(y2 ~ 0 + X2) beta2 <- coef(fm2) delta <- beta2 - beta1 if(het.reg) Q2 <- crossprod(X2)/nrow(X2) if(het.err) { sigma2 <- sum(residuals(fm2)^2)/nrow(X2) if(is.null(vcov.)) Omega2 <- sigma2 * Q2 else { if(sandwich) Omega2 <- nrow(X2) * crossprod(Q2, vcov.(fm2)) %*% Q2 else Omega2 <- vcov.(fm2) } } Oprod1 <- myprod(delta, Omega1) Oprod2 <- myprod(delta, Omega2) Qprod1 <- myprod(delta, Q1) Qprod2 <- myprod(delta, Q2) if(het.reg) xi <- Qprod2/Qprod1 if(!is.null(vcov.)) phi1 <- sqrt(Oprod1/Qprod1) else phi1 <- sqrt(sigma1) if(!is.null(vcov.)) phi2 <- sqrt(Oprod2/Qprod2) else phi2 <- sqrt(sigma2) p0 <- pargmaxV(0, phi1 = phi1, phi2 = phi2, xi = xi) if(is.nan(p0) || p0 < a2 || p0 > (1-a2)) { warning(paste("Confidence interval", as.integer(i-1), "cannot be computed: P(argmax V <= 0) =", round(p0, digits = 4))) upper[i-1] <- NA lower[i-1] <- NA } else { ub <- lb <- 0 while(pargmaxV(ub, phi1 = phi1, phi2 = phi2, xi = xi) < (1 - a2)) ub <- ub + 1000 while(pargmaxV(lb, phi1 = phi1, phi2 = phi2, xi = xi) > a2) lb <- lb - 1000 upper[i-1] <- uniroot(myfun, c(0, ub), level = (1-a2), xi = xi, phi1 = phi1, phi2 = phi2)$root lower[i-1] <- uniroot(myfun, c(lb, 0), level = a2, xi = xi, phi1 = phi1, phi2 = phi2)$root upper[i-1] <- upper[i-1] * phi1^2 / Qprod1 lower[i-1] <- lower[i-1] * phi1^2 / Qprod1 } } bp <- bp[-c(1, nbp+2)] bp <- cbind(bp - ceiling(upper), bp, bp - floor(lower)) a2 <- round(a2 * 100, digits = 1) colnames(bp) <- c(paste(a2, "%"), "breakpoints", paste(100 - a2, "%")) rownames(bp) <- 1:nbp RVAL <- list(confint = bp, nobs = object$nobs, nreg = object$nreg, call = match.call(), datatsp = object$datatsp) class(RVAL) <- "confint.breakpoints" return(RVAL) } breakdates.confint.breakpoints <- function(obj, format.times = FALSE, ...) { bp <- list(breakpoints = NA, nobs = obj$nobs, datatsp = obj$datatsp) class(bp) <- "breakpoints" RVAL <- obj$confint for(i in 1:3) { bp$breakpoints <- obj$confint[,i] RVAL[,i] <- breakdates(bp, format.times = format.times, ...) } bp$breakpoints <- c(1, obj$nobs) startend <- breakdates(bp, format.times = NULL, ...) nbp <- nrow(obj$confint) if(any(obj$confint < 1) | any(obj$confint > obj$nobs)) warning(paste("Confidence intervals outside data time interval\n\t from ", startend[1], " to ", startend[2], " (", obj$nobs, " observations)", sep = ""), call. = FALSE) if(any(obj$confint[-1,1] < obj$confint[-nbp,3])) warning("Overlapping confidence intervals", call. = FALSE) return(RVAL) } print.confint.breakpoints <- function(x, format.times = NULL, ...) { if(is.null(format.times)) format.times <- ((x$datatsp[3] > 1) & (x$datatsp[3] < x$nobs)) nbp <- nrow(x$confint) cat("\n\t Confidence intervals for breakpoints") cat(paste("\n\t of optimal ", nbp + 1, "-segment partition: \n\n", sep = "")) cat("Call:\n") print(x$call) cat("\nBreakpoints at observation number:\n") print(x$confint, quote = FALSE) cat("\nCorresponding to breakdates:\n") print(breakdates(x, format.times = format.times, ...), quote = FALSE) } lines.confint.breakpoints <- function(x, col = 2, angle = 90, length = 0.05, code = 3, at = NULL, breakpoints = TRUE, ...) { nbp <- nrow(x$confint) x <- breakdates(x) if(breakpoints) abline(v = x[,2], lty = 2) if(is.null(at)) { at <- par("usr")[3:4] at <- diff(at)/1.08 * 0.02 + at[1] } if(length(at) < nbp) at <- rep(at, length.out = nbp) arrows(x[,1], at, x[,3], at, col = col, angle = angle, length = length, code = code, ...) } coef.breakpointsfull <- function(object, breaks = NULL, names = NULL, ...) { X <- object$X y <- object$y n <- object$nobs bp <- obp <- breakpoints(object, breaks = breaks)$breakpoints if(any(is.na(bp))) { nbp <- 0 bp <- c(0, n) } else { nbp <- length(bp) bp <- c(0, bp, n) } if(!is.null(names)) { if(length(names) == 1) names <- paste(names, 1:(nbp+1)) else if(length(names) != (nbp+1)) names <- NULL } if(is.null(names)) { bd1 <- structure(list(breakpoints = bp[-(nbp+2)] + 1, nobs = n, datatsp = object$datatsp), class = "breakpoints") bd2 <- structure(list(breakpoints = bp[-1], nobs = n, datatsp = object$datatsp), class = "breakpoints") bd1 <- breakdates(bd1, format.times = NULL) bd2 <- breakdates(bd2, format.times = NULL) names <- paste(bd1, "-", bd2) } rval <- NULL for(i in 1:(nbp+1)) { X2 <- X[(bp[i]+1):bp[i+1],,drop = FALSE] y2 <- y[(bp[i]+1):bp[i+1]] rval <- rbind(rval, lm.fit(X2, y2)$coef) } rownames(rval) <- names return(rval) } fitted.breakpointsfull <- function(object, breaks = NULL, bp=NULL, ...) { X <- object$X y <- object$y n <- object$nobs if (is.null(bp)) bp <- obp <- breakpoints(object, breaks = breaks)$breakpoints else obp <- bp if(any(is.na(bp))) { nbp <- 0 bp <- c(0, n) } else { nbp <- length(bp) bp <- c(0, bp, n) } rval <- NULL for(i in 1:(nbp+1)) { X2 <- X[(bp[i]+1):bp[i+1],,drop = FALSE] y2 <- y[(bp[i]+1):bp[i+1]] rval <- c(rval, lm.fit(X2, y2)$fitted.values) } rval <- ts(as.vector(rval)) tsp(rval) <- object$datatsp return(rval) } residuals.breakpointsfull <- function(object, breaks = NULL, ...) { X <- object$X y <- object$y n <- object$nobs bp <- obp <- breakpoints(object, breaks = breaks)$breakpoints if(any(is.na(bp))) { nbp <- 0 bp <- c(0, n) } else { nbp <- length(bp) bp <- c(0, bp, n) } rval <- NULL for(i in 1:(nbp+1)) { X2 <- X[(bp[i]+1):bp[i+1],,drop = FALSE] y2 <- y[(bp[i]+1):bp[i+1]] rval <- c(rval, lm.fit(X2, y2)$residuals) } rval <- ts(as.vector(rval)) tsp(rval) <- object$datatsp return(rval) } vcov.breakpointsfull <- function(object, breaks = NULL, names = NULL, het.reg = TRUE, het.err = TRUE, vcov. = NULL, sandwich = TRUE, ...) { X <- object$X y <- object$y n <- object$nobs bp <- breakpoints(object, breaks = breaks)$breakpoints if(any(is.na(bp))) { nbp <- 0 bp <- c(0, n) } else { nbp <- length(bp) bp <- c(0, bp, n) } if(!is.null(names)) { if(length(names) == 1) names <- paste(names, 1:(nbp+1)) else if(length(names) != (nbp+1)) names <- NULL } if(is.null(names)) { bd1 <- structure(list(breakpoints = bp[-(nbp+2)] + 1, nobs = n, datatsp = object$datatsp), class = "breakpoints") bd2 <- structure(list(breakpoints = bp[-1], nobs = n, datatsp = object$datatsp), class = "breakpoints") bd1 <- breakdates(bd1, format.times = NULL) bd2 <- breakdates(bd2, format.times = NULL) names <- paste(bd1, "-", bd2) } res <- residuals(object, breaks = breaks) sigma2 <- sum(res^2)/n Q2 <- crossprod(X)/n if(is.null(vcov.)) Omega2 <- sigma2 * solve(Q2) / n else { y.nb <- rowSums(X) + res fm <- lm(y.nb ~ 0 + X) if(sandwich) { Omega2 <- vcov.(fm) } else { modelv <- summary(fm)$cov.unscaled Omega2 <- n * modelv %*% vcov.(fm) %*% modelv } } rownames(Omega2) <- colnames(Omega2) <- colnames(X) rval <- list() for(i in 1:(nbp+1)) { X2 <- X[(bp[i]+1):bp[i+1],,drop = FALSE] y2 <- y[(bp[i]+1):bp[i+1]] fm2 <- lm(y2 ~ 0 + X2) if(het.reg) Q2 <- crossprod(X2)/nrow(X2) if(het.err) { sigma2 <- sum(residuals(fm2)^2)/nrow(X2) if(is.null(vcov.)) Omega2 <- sigma2 * solve(Q2) / nrow(X2) else { if(sandwich) { Omega2 <- vcov.(fm2) } else { modelv <- summary(fm2)$cov.unscaled Omega2 <- n * modelv %*% vcov.(fm2) %*% modelv } } rownames(Omega2) <- colnames(Omega2) <- colnames(X) } rval[[i]] <- Omega2 } names(rval) <- names return(rval) } df.residual.breakpointsfull <- function(object, ...) { rval <- table(breakfactor(object, ...)) - object$nreg names(rval) <- rownames(coef(object, ...)) return(rval) } magnitude <- function(object, ...) { UseMethod("magnitude") } magnitude.breakpointsfull <- function(object, interval = 0.1, breaks = NULL, component = "trend", ...) { X <- object$X[,!colnames(object$X) %in% "(Intercept)", drop=FALSE] y <- object$y component <- component[!component %in% "(Intercept)"] if (interval <= 0 || interval > length(y)) stop("Requested interval for magnitude computation out of valid range") if (interval < 1) interval <- floor(length(y)*interval) bp <- breakpoints(object, breaks=breaks)$breakpoints nrbp <- length(bp) if (nrbp < 2 && is.na(bp)) stop("There are no breakpoints to calculate magnitudes for!") if (!any(colnames(object$X) %in% component)) stop(paste("The specified component", component, "is missing")) co <- coef(object, breaks=breaks) Mag <- matrix(NA, nrbp, 6) for (i in 1:nrbp) { interval_start <- max(bp[i]-interval, 1) interval_end <- min(bp[i]+interval, nrow(X)) fit_prev <- co[i, "(Intercept)"] fit_next <- co[i+1, "(Intercept)"] for (comp in component) { fit_prev <- X[interval_start:interval_end,comp] * co[i, comp] + fit_prev fit_next <- X[interval_start:interval_end,comp] * co[i+1, comp] + fit_next } Mag[i, 1] <- co[i, "(Intercept)"] Mag[i, 2] <- co[i+1, "(Intercept)"] for (comp in component) { Mag[i, 1] <- X[bp[i], comp] * co[i, comp] + Mag[i, 1] Mag[i, 2] <- X[bp[i]+1,comp] * co[i+1, comp] + Mag[i, 2] } Mag[i, 3] <- Mag[i, 2] - Mag[i, 1] Mag[i, 4] <- sqrt(mean((fit_next - fit_prev)^2)) Mag[i, 5] <- mean(abs(fit_next - fit_prev)) Mag[i, 6] <- mean(fit_next - fit_prev) colnames(Mag) = c("before", "after", "diff", "RMSD", "MAD", "MD") } index <- which.max(abs(Mag[, 3])) m.x <- rep(bp[index], 2) m.y <- c(Mag[index, 1], Mag[index, 2]) Magnitude <- Mag[index, 3] Time <- bp[index] Result <- list(Mag=Mag, m.x=m.x, m.y=m.y, Magnitude=Magnitude, Time=Time) class(Result) <- "magnitude" return(Result) }
mlb_batting_orders <- function (game_pk, type = "starting") { api_call <- paste0("http://statsapi.mlb.com/api/v1.1/game/", game_pk, "/feed/live") list <- jsonlite::fromJSON(api_call, flatten = TRUE) home_team <- tibble::tibble( homeTeam = list$gameData$teams$home$name, homeTeamId = list$gameData$teams$home$id) away_team <- tibble::tibble( awayTeam = list$gameData$teams$away$name, awayTeamId = list$gameData$teams$away$id) home_players <- tibble::tibble( playerid = names(list[["liveData"]][["boxscore"]][["teams"]][["home"]][["players"]])) away_players <- tibble::tibble( playerid = names(list[["liveData"]][["boxscore"]][["teams"]][["away"]][["players"]])) home_players <- unique(home_players$playerid) home_players <- purrr::map_df(home_players, function(x){ helper_players(list = list, team = "home", playerid = x) }) home_players <- home_players %>% dplyr::mutate( batting_order = as.character(.data$batting_order), batting_position_num = as.character(.data$batting_position_num)) home_players <- dplyr::bind_rows(home_players) %>% dplyr::mutate( team = "home", teamName = home_team$homeTeam, teamID = home_team$homeTeamId) %>% dplyr::arrange(.data$batting_order) away_players <- unique(away_players$playerid) away_players <- purrr::map_df(away_players, function(x){ helper_players(list = list, team = "away", playerid = x) }) away_players <- away_players %>% dplyr::mutate( batting_order = as.character(.data$batting_order), batting_position_num = as.character(.data$batting_position_num)) away_players <- dplyr::bind_rows(away_players) %>% dplyr::mutate( team = "away", teamName = away_team$awayTeam, teamID = away_team$awayTeamId) %>% dplyr::arrange(.data$batting_order) final_batting_order_table <- dplyr::bind_rows(away_players, home_players) %>% dplyr::select(-.data$link, -.data$code, -.data$name, -.data$type) %>% dplyr::arrange(.data$team, .data$batting_order, .data$batting_position_num) %>% dplyr::filter(!is.na(.data$batting_order)) if (type == "starting") { final_batting_order_table <- final_batting_order_table %>% dplyr::filter(.data$batting_position_num == 0) } return(final_batting_order_table) } get_batting_orders <- mlb_batting_orders helper_players <- function(list, team = "home", playerid) { person <- list[["liveData"]][["boxscore"]][["teams"]][[team]][["players"]][[playerid]][["person"]] %>% dplyr::bind_rows() position <- list[["liveData"]][["boxscore"]][["teams"]][[team]][["players"]][[playerid]][["position"]] %>% dplyr::bind_rows() batting_position <- list[["liveData"]][["boxscore"]][["teams"]][[team]][["players"]][[playerid]][["battingOrder"]] final_table <- bind_cols(person, position) final_table <- final_table %>% dplyr::mutate( batting_order = ifelse(is.null(batting_position), NA, substr(batting_position, 1, 1)), batting_position_num = ifelse(is.null(batting_position), NA, as.numeric(substr(batting_position, 2, 3)))) return(final_table) }
tobit.bsreg <- function(target, dataset, threshold = 0.05, wei = NULL) { threshold <- log(threshold) dm <- dim(dataset) if ( is.null(dm) ) { n <- length(dataset) p <- 1 } else { n <- dm[1] p <- dm[2] } if ( p > n ) { res <- paste("The number of variables is hiher than the sample size. No backward procedure was attempted") } else { if ( any(is.na(dataset)) ) { warning("The dataset contains missing values (NA) and they were replaced automatically by the variable (column) median (for numeric) or by the most frequent level (mode) if the variable is factor") if ( is.matrix(dataset) ) { dataset <- apply( dataset, 2, function(x){ x[which(is.na(x))] = median(x, na.rm = TRUE) ; return(x) } ) } else { poia <- unique( which( is.na(dataset), arr.ind = TRUE )[, 2] ) for( i in poia ) { xi <- dataset[, i] if( is.numeric(xi) ) { xi[ which( is.na(xi) ) ] <- median(xi, na.rm = TRUE) } else if ( is.factor( xi ) ) { xi[ which( is.na(xi) ) ] <- levels(xi)[ which.max( as.vector( table(xi) ) )] } dataset[, i] <- xi } } } runtime <- proc.time() dataset <- as.data.frame(dataset) if (p == 1) { ini <- survival::survreg( target ~., data = dataset, weights = wei, dist = "gaussian" ) mod <- anova(ini) stat <- mod[2, 2] dof <- mod[2, 1] } else { ini <- survival::survreg( target ~., data = dataset, weights = wei, dist = "gaussian" ) dofini <- length( ini$coefficients ) stat <- dof <- numeric(p) for (i in 1:p) { mod <- survival::survreg( target ~., data = dataset[, -i ,drop = FALSE], weights = wei, dist = "gaussian" ) stat[i] <- 2 * abs(logLik(mod) - logLik(ini) ) dof[i] <- dofini - length( mod$coefficients ) } } mat <- cbind(1:p, pchisq( stat, dof, lower.tail = FALSE, log.p = TRUE), stat ) colnames(mat) <- c("variable", "log.p-values", "statistic" ) rownames(mat) <- 1:p sel <- which.max( mat[, 2] ) info <- matrix( c(0, -10, -10) , ncol = 3 ) if ( mat[sel, 2] < threshold ) { runtime <- proc.time() - runtime res <- list(runtime = runtime, info = matrix(0, 0, 3), mat = mat, ci_test = "testIndTobit", final = ini ) } else { info[1, ] <- mat[sel, ] mat <- mat[-sel, , drop = FALSE] dat <- dataset[, -sel ,drop = FALSE] i <- 1 while ( info[i, 2] > threshold & NCOL(dat) > 0 ) { i <- i + 1 k <- p - i + 1 ini <- survival::survreg( target ~., data = dat, weights = wei, dist = "gaussian" ) if ( k == 1 ) { mod <- survival::survreg(target ~ 1, data = dat, weights = wei, dist = "gaussian") stat <- 2 * abs( logLik(ini) - logLik(mod) ) dof <- length( ini$coefficients ) - length( mod$coefficients ) pval <- pchisq( stat, dof, lower.tail = FALSE, log.p = TRUE) if (pval > threshold ) { final <- "No variables were selected" info <- rbind(info, c(mat[, 1], pval, stat) ) dat <- dataset[, -info[, 1], drop = FALSE ] mat <- matrix(nrow = 0, ncol = 3) } else { info <- rbind(info, c(0, -10, -10)) final <- ini mat[, 2:3] <- c(pval, stat) } } else { stat <- dof <- numeric(k) for (j in 1:k) { mod <- survival::survreg( target ~., data = dat[, -j, drop = FALSE], weights = wei, dist = "gaussian" ) stat[j] <- 2 * abs( logLik(mod) - logLik(ini) ) dof[j] <- length( ini$coefficients ) - length( mod$coefficients ) } mat[, 2:3] <- cbind( pchisq( stat, dof, lower.tail = FALSE, log.p = TRUE), stat ) sel <- which.max( mat[, 2] ) if ( mat[sel, 2] < threshold ) { final <- ini info <- rbind(info, c(0, -10, -10) ) } else { info <- rbind(info, mat[sel, ] ) mat <- mat[-sel, ,drop = FALSE] dat <- dataset[, -info[, 1], drop = FALSE ] } } } info <- info[ info[, 1] > 0, , drop = FALSE ] res <- list(runtime = runtime, info = info, mat = mat, ci_test = "testIndTobit", final = final ) } } }
ltdl <- function(A) { if (! is.matrix(A)) { A <- as.matrix(A) } n <- ncol(A) L <- matrix(0,n,n) D <- L for (i in (seq(from=n,to=1,by=-1))) { D[i,i] <- A[i,i] L[i,1:i] <- A[i,1:i] / sqrt(as.numeric(A[i,i])) for (j in (1:(i-1))) { A[j,1:j] <- A[j,1:j] - L[i,1:j] * as.numeric(L[i,j]) } L[i,1:i] <- L[i,1:i] / as.numeric(L[i,i]) } rm(n,i,j) out <- list(L,D) names(out) <- c("L","D") return(out) }
plot_selected_workouts <- function(x, session, what, sumX, threshold = TRUE, smooth = FALSE, trend = TRUE, dates = TRUE, changepoints = FALSE, n_changepoints = 6, print_changepoints = FALSE, unit_reference_sport = NULL, moving_threshold = NULL, desampling = 1, k = 200, y_axis_range = NULL, options = NULL) { opts <- if (is.null(options)) trops() else options if (isTRUE(length(session) == 0)) { return(plotly_empty(type = "scatter", mode= "markers")) } if(what == 'altitude') { y_axis_range[[1]] <- y_axis_range[[1]] * 0.80 y_axis_range[[2]] <- y_axis_range[[2]] * 1.2 } if(what == 'pace') { y_axis_range <- y_axis_range[c(2,1)] } sports <- get_sport(x)[session] var_name_units <- lab_sum(feature = what, data = sumX, transform_feature = FALSE) var_units <- lab_sum(feature = what, data = sumX, whole_text = FALSE, transform_feature = FALSE) x <- x[session] units <- get_units(x) if (is.null(session)) { session <- seq_along(x) } if (is.null(unit_reference_sport)) { unit_reference_sport <- find_unit_reference_sport(x) } un <- collect_units(units, unit_reference_sport) for (va in unique(un$variable)) { units$unit[units$variable == va] <- un$unit[un$variable == va] } if (is.null(moving_threshold)) { moving_threshold <- c(cycling = 2, running = 1, swimming = 0.5) speed_unit <- un$unit[un$variable == "speed"] if (speed_unit != "m_per_s") { conversion <- match.fun(paste("m_per_s", speed_unit, sep = "2")) moving_threshold <- conversion(moving_threshold) } } x <- change_units(x, units$variable, units$unit, units$sport) if (threshold) { dots <- list() if (all(c("variable", "lower", "upper", "sport") %in% names(dots))) { th <- generate_thresholds(dots$variable, dots$lower, dots$upper, dots$sport) } else { th <- generate_thresholds() th <- change_units(th, variable = units$variable, unit = units$unit, sport = units$sport) } x <- threshold(x, th$variable, th$lower, th$upper, th$sport) } var_name_units <- unique(var_name_units) plot_stored <- list() images <- list() smoothed_values <- list(maximum = numeric(), minimum = numeric()) n_plot <- 0 shapes <- list() changepoint_y_values <- c() step_size <- 1 / length(unique(session)) start <- 0 for (i in seq_along(session)) { df_subset <- x[[i]] df_subset <- df_subset[, what] dates <- index(df_subset) df_subset <- data.frame(df_subset) names(df_subset) <- what df_subset$Index <- dates df_subset$id <- csession <- session[i] csport <- get_sport(x[i]) df_subset$SessionID <- paste0(paste(csession, csport, sep = ": "), "\n", format(df_subset$Index, "%Y-%m-%d")) df_subset$numericDate <- as.numeric(df_subset$Index) n_plot <- n_plot + 1 colnames(df_subset)[which(colnames(df_subset) == what)] <- "Value" not_na <- !is.na(df_subset[, "Value"]) non_na_values <- sum(not_na) has_values <- non_na_values > 100 df_subset <- if (has_values) df_subset[not_na, ] else df_subset annotations_list <- list(text = paste("Session:", csession), xref = "paper", yref = "paper", yanchor = "bottom", xanchor = "center", align = "center", x = 0.5, y = 1, showarrow = FALSE) axis_list <- list(zeroline = FALSE, fixedrange = TRUE, tickangle = 0) if (has_values) { if (changepoints) { n_sessions <- length(df_subset$Value) - 5 m.binseg <- cpt.mean(df_subset$Value[6:n_sessions], method = "BinSeg", penalty = "BIC", minseglen = length(df_subset$Value) / 100, Q = n_changepoints) x_values <- c(1, cpts(m.binseg) + 5, length(df_subset$Value)) y_values <- coef(m.binseg)$mean if (print_changepoints) { print(df_subset$Index[cpts(m.binseg)]) print(coef(m.binseg)) } line <- list(type = "line", line = list(color = opts$workouts_changepoint_colour, dash = "dot"), xref = paste0("x", n_plot), yref = paste0("y", n_plot)) line_v <- list(type = "line", line = list(color = opts$workouts_changepoint_colour, dash = "dot"), xref = paste0("x", n_plot), yref = paste0("y", n_plot)) for (k in c(1:(length(x_values) - 1))) { line[["x0"]] <- df_subset$Index[x_values[k]] line[["x1"]] <- df_subset$Index[x_values[k + 1]] line[c("y0", "y1")] <- y_values[k] changepoint_y_values <- c(changepoint_y_values, y_values[k]) shapes[[length(shapes) + 1]] <- line } for (k in c(2:(length(x_values) - 1))) { line_v[["x0"]] <- df_subset$Index[x_values[k]] line_v[["x1"]] <- df_subset$Index[x_values[k]] line_v[c("y0", "y1")] <- range(df_subset$Value) changepoint_y_values <- c(changepoint_y_values, y_values[k]) shapes[[length(shapes) + 1]] <- line_v } } sampled_rows <- sort(sample(index(df_subset), size = length(index(df_subset)) * desampling)) col <- ifelse(csport == "running", opts$summary_plots_selected_colour_run, ifelse(csport == "cycling", opts$summary_plots_selected_colour_ride, opts$summary_plots_selected_colour_swim)) ceg <- what == "cumulative_elevation_gain" if (ceg) { hovertext <- paste(round(df_subset$Value, 2), var_units) a <- plot_ly(df_subset[sampled_rows, ], x = ~ Index, y = ~ Value, hoverinfo = "text", text = hovertext[sampled_rows], type = "scatter", mode = "lines", showlegend = FALSE, alpha = 1, color = I(col)) a <- add_lines(a, x = ~ Index, y = I(0), type = 'scatter', mode = 'lines', fill = 'tonexty', fillcolor = I(col), hoverinfo = "none", alpha = 0.2, showlegend = FALSE) } else { a <- plot_ly() } if (smooth & !ceg) { smoothed_data <- rollmedian(zoo(x = df_subset$Value, order.by = df_subset$Index), k = k, align = "center") smoothed_values$minimum <- c(smoothed_values$minimum, min(smoothed_data)) smoothed_values$maximum <- c(smoothed_values$maximum, max(smoothed_data)) a <- a %>% add_lines(data = df_subset, x = index(smoothed_data), y = coredata(smoothed_data), hoverinfo = "text", text = paste(round(smoothed_data, 2), var_units), color = I(col), showlegend = FALSE, alpha = 1) } a <- a %>% layout(annotations = annotations_list, xaxis = axis_list, yaxis = c(axis_list, list(range = y_axis_range))) } else { maximal_range <- c(-1, 1) df_subset$Value <- 0 a <- plot_ly(df_subset, x = ~ Index, y = ~ Value, hoverinfo = "none", type = "scatter", mode = "none", showlegend = FALSE) %>% layout(annotations = annotations_list, xaxis = axis_list, yaxis = c(axis_list, list(range = y_axis_range, showticklabels = TRUE))) } plot_stored[[as.character(i)]] <- a sport_image <- switch(csport, "running" = "running.png", "cycling" = "cycling.png", "swimming" = "swimming.png") images[[csession]] <- list(source = sport_image, xref = "paper", yref = "paper", x = start + step_size / 10, y = 1, sizex = 0.07, sizey = 0.07, opacity = 0.3) start <- start + step_size } y <- list(title = var_units, fixedrange = TRUE) x <- list(title = NULL, fixedrange = TRUE) return(subplot(plot_stored, nrows = 1, titleY = FALSE, margin = 0.003) %>% config(displayModeBar = FALSE) %>% layout(showlegend = FALSE, xaxis = x, yaxis = y, images = images, hovermode = "x", shapes = shapes, dragmode = "pan", plot_bgcolor = "rgba(0, 0, 0, 0)", paper_bgcolor = "rgba(0, 0, 0, 0)")) }
library(dplyr) context("Test tag.search()") test_that("Test input dataset structure and search results feedbacks", { d = tibble(groupname = c("control","control", "lab1", "lab1"), mass = c(100, 200, 150, 250)) expect_error(tag.search(d, delta = 50), regexp = "A variable/column named `group` is required") d = tibble(group = c("control","control", "lab1", "lab1"), `m/z` = c(100, 200, 150, 250)) expect_error(tag.search(d, delta = 50), regexp = "A variable/column named `mass` is required") d = tibble(group = c("ctrl","ctrl", "lab1", "lab1"), mass = c(100, 200, 150, 250)) expect_error(tag.search(d, delta = 50), regexp = "`control` is required in the `group` column.") d = tibble(group = c("control","control", "lab1", "lab1"), mass = c(100, 200, 150, 250)) expect_error(tag.search(d), regexp = "Mass shift input required.") d = tibble(group = c("control","control", "lab1", "lab1"), mass = c(100, 100, 150, 250), intensity = c(10, 20, 30, 40), peptide = c("a", "b", "c", "d")) expected.input = d %>% select(group, mass) %>% distinct() %>% left_join(d, by = c("group", "mass")) expected.output = tag.search(expected.input, delta = 50)[[1]] expect_equal(tag.search(d, delta = 50)[[1]], expected.output) d = tibble(group = c("control","control", "lab1", "lab1"), mass = c(100, 100, 150, 250)) tag.search(d, delta = 50) expect_output(tag.search(d, delta = 50)[[1]]$intensity) expect_equal(tag.search(d, delta = 50)[[1]]$intensity %>% class(), "numeric") }) test_that("Test cases when no search found", { d = tibble(group = c("control","control", "lab1", "lab1"), mass = c(100, 180, 150, 250), intensity = c(500, 600, 200, 300)) tag.search(d, delta = 40) expect_message(tag.search(d, delta = 40)) d = tibble(group = c("control","control", "lab1", "lab1"), mass = c(100, 180, 150, 250)) expect_equal(tag.search(d, delta = 50)[[2]], "Found paired peaks (mass differentiate by expected delta) , but not matched peaks (of the same mass).") d = tibble(group = c("control","control", "lab1", "lab1"), mass = c(100, 150, 150, 250)) expect_equal(tag.search(d, delta = 50)[[2]], "Found both paired peaks (mass differentiate by expected delta) and matched peaks (of the same mass).") d = tibble(group = c("control","control", "lab1", "lab1", "lab2", "lab2", "lab2"), mass = c(100, 150, 150, 250, 150, 200, 250)) d %>% tag.search(delta = c(50, 100)) expect_output(d %>% tag.search(delta = c(50, 100))) })
Pinhull <- function(pts, ppts) { ncol <- length(ppts) out <- matrix(FALSE, ncol=ncol, nrow=nrow(pts)) colnames (out) <- names(ppts) events <- data.frame(EID=1:nrow(pts), X=pts[, 1], Y=pts[, 2]) for (i in seq_len(ncol)) { ppol <- data.frame(ppts[[i]], PID=i, POS=seq_len(nrow(ppts[[i]]))) names(ppol)[1:2] <- c("X", "Y") eids <- PBSmapping::findPolys(events, ppol)$EID out[eids, i] <- TRUE } out }
profileGroupLabels <- function(x0, x1, labels, y0=100, y1=98, label.offset=2, label.cex=0.75) { if(! all.equal(length(x0), length(x1), length(labels)) ) stop('start positions, stop positions, and number of labels must be equal', call. = FALSE) n.groups <- length(x0) label.centers <- (x0 + x1) / 2 segments(x0=x0, x1=x1, y0=y0, y1=y0) arrows(x0=c(x0, x1), x1=c(x0, x1), y0=c(y0, y0), y1=y1, length=0.1) text(x=label.centers, y=y0 + label.offset, labels=labels, cex=label.cex) } plotMultipleSPC <- function(spc.list, group.labels, args = rep(list(NA), times = length(spc.list)), merged.legend = NULL, merged.colors = c(" n.groups <- length(spc.list) spc.lengths <- sapply(spc.list, length) n.pedons <- sum(spc.lengths) group.starts <- c(1, 1 + cumsum(spc.lengths[-n.groups])) group.ends <- cumsum(spc.lengths) yy <- unlist(sapply(spc.list, function(i) profileApply(i, max))) tick.heights <- yy[c(group.starts, group.ends)] + arrow.offset unique.args <- unique( c( names(unlist(args)), names(list(...)) ) ) if(! 'max.depth' %in% unique.args){ max.depth <- max(sapply(spc.list, max), na.rm = TRUE) max.depth + (max.depth / 5) args[[1]]$max.depth <- max.depth } if(missing(bracket.base.depth)) { bracket.base.depth <- max(sapply(spc.list, max), na.rm = TRUE) + 10 } if(! is.null(merged.legend)) { cr <- colorRamp(merged.colors, space = 'Lab', interpolate = 'spline') .mapColor <- function(x, r, col.ramp) { c.rgb <- cr(scales::rescale(x, from = r, to = c(0,1))) cc <- which(complete.cases(c.rgb)) cols <- rep(NA, times = nrow(c.rgb)) cols[cc] <- rgb(c.rgb[cc, ], maxColorValue=255) return(cols) } combined.data <- na.omit( unlist( lapply(spc.list, function(i) i[[merged.legend]]) ) ) combined.range <- range(combined.data, na.rm = TRUE) for(i in 1:length(spc.list)) { spc_i <- spc.list[[i]] arg_i <- args[[i]] if(!is.null(spc_i[[merged.legend]])) { horizons(spc_i)[['.color']] <- .mapColor(spc_i[[merged.legend]], combined.range, cr) arg_i$color = '.color' arg_i$show.legend = FALSE spc.list[[i]] <- spc_i args[[i]] <- arg_i } else { } } pretty.vals <- pretty(combined.data, n = 8) legend.data <- list( legend = pretty.vals, col = rgb( cr( .rescaleRange(pretty.vals, x0 = 0, x1 = 1) ), maxColorValue=255) ) } do.call( what = plotSPC, args = c( x = spc.list[[1]], n = n.pedons, na.omit(args[[1]]), ...) ) if(n.groups > 1) { for(i in 2:n.groups) { this.obj <- spc.list[[i]] this.args <- na.omit(args[[i]]) suppressMessages( do.call( what = plotSPC, args = c(x=this.obj, x.idx.offset=group.ends[i-1], add=TRUE, plot.depth.axis=FALSE, this.args) ) ) } } profileGroupLabels( x0 = group.starts, x1 = group.ends, labels = group.labels, y0 = bracket.base.depth, y1 = tick.heights, label.offset = label.offset, label.cex = label.cex ) if(! is.null(merged.legend)) { mtext(side=3, text = merged.legend.title, font=2, line=1.6) legend('bottom', legend=legend.data$legend, col=legend.data$col, bty='n', pch=15, horiz=TRUE, xpd=TRUE, inset=c(0, 0.99)) } }
test_that("Test whether the tree_metrics works", { data("pc_tree") to_test <- tree_metrics(pc_tree) expect_equal(round(to_test$Height, 4), 6.0365, info = "Height") expect_equal(round(to_test$Crown_area, 4), 28.5489, info = "Crown_area") expect_equal(round(to_test$DBH, 4), 0.2002, info = "DBH") })
NULL "wvs_usa_regions"
uci_ucinewgame <- function(engine){ return(uci_cmd(engine,"ucinewgame")) }
xgx_dirs2char <- function(dirs, include_time = TRUE) { if (typeof(dirs)!="list") { stop("dirs variable must be a list") } missing_filenames <- setdiff(c("parent_dir", "rscript_dir", "rscript_name", "results_dir", "filename"), names(dirs)) if (length(missing_filenames) > 0) { stop(paste("Fields missing from dirs = ", missing_filenames)) } output <- paste0(dirs$parent_dir, "\n", file.path(dirs$rscript_dir, dirs$rscript_name), "\n", file.path(dirs$results_dir, dirs$filename)) if (include_time) { output <- paste0(output, "\n", "Created: ", Sys.time()) } return(output) }
prefit <- function(data, distr, method = c("mle", "mme", "qme", "mge"), feasible.par, memp=NULL, order=NULL, probs=NULL, qtype=7, gof=NULL, fix.arg=NULL, lower, upper, weights=NULL, silent=TRUE, ...) { if (!is.character(distr)) distname <- substring(as.character(match.call()$distr), 2) else distname <- distr method <- match.arg(method, c("mle", "mme", "qme", "mge")) if(method != "qme" && !is.null(probs)) stop("probs is not needed") if(method != "mme" && (!is.null(memp) || !is.null(order))) stop("memp, order are not needed") if(method != "mge" && !is.null(gof)) stop("gof is not needed") ddistname <- paste0("d", distname) if (!exists(ddistname, mode="function")) stop(paste("The ", ddistname, " function must be defined")) pdistname <- paste0("p", distname) if (!exists(pdistname, mode="function") && method == "mge") stop(paste("The ", pdistname, " function must be defined")) qdistname <- paste0("q",distname) if (!exists(qdistname, mode="function") && method == "qme") stop(paste("The ", qdistname, " function must be defined")) mdistname <- paste0("m",distname) if (!exists(mdistname, mode="function") && method == "mme") stop(paste("The ", mdistname, " function must be defined")) if(is.null(probs) && method == "qme") stop("probs must be provided") if(is.null(order) && method == "mme") stop("order must be provided") if(missing(feasible.par)) stop("feasible values must be provided") if(missing(lower) || missing(upper)) stop("bounds (yet infinite) must be provided") if(is.list(feasible.par)) feasible.par <- unlist(feasible.par) npar <- length(feasible.par) lower <- as.double(rep_len(lower, npar)) upper <- as.double(rep_len(upper, npar)) if(all(is.infinite(lower)) && all(is.infinite(upper))) { bnd <- detectbound(distname, feasible.par, data, fix.arg=fix.arg) }else { bnd <- rbind(lower, upper) colnames(bnd) <- names(feasible.par) rownames(bnd) <- c("lowb", "uppb") } if(!silent) print(bnd) translist <- invlist <- NULL for(i in 1:NCOL(bnd)) { if(bnd["lowb", i] == -Inf && bnd["uppb", i] == Inf) { translist <- c(translist, list(function(x) x)) invlist <- c(invlist, list(function(x) x)) }else if(bnd["lowb", i] == 0 && bnd["uppb", i] == Inf) { translist <- c(translist, list(T0Inf)) invlist <- c(invlist, list(iT0Inf)) }else if(bnd["lowb", i] == 1 && bnd["uppb", i] == Inf) { translist <- c(translist, list(T1Inf)) invlist <- c(invlist, list(iT1Inf)) }else if(bnd["lowb", i] == 0 && bnd["uppb", i] == 1) { translist <- c(translist, list(T01)) invlist <- c(invlist, list(iT01)) }else if(bnd["lowb", i] == -1 && bnd["uppb", i] == 0) { translist <- c(translist, list(Tm10)) invlist <- c(invlist, list(iTm10)) }else { print(bnd) stop("unknown parameter domain") } } if(!silent) print(translist) if(!is.null(weights)) { if(any(weights < 0)) stop("weights should be a vector of numerics greater than 0") if(length(weights) != NROW(data)) stop("weights should be a vector with a length equal to the observation number") if(method == "mge") stop("weights is not allowed for maximum GOF estimation") } if(method == "mle") { if(is.null(weights)) weights <- rep(1, NROW(data)) fnobj <- function(par, fix.arg, obs, ddistnam, qdistnam, pdistnam, mdistnam, qtype, memp, gof) { if(!is.list(par)) par <- as.list(par) lpar <- lapply(1:length(par), function(i) translist[[i]](par[[i]])) -sum( weights * log(do.call(ddistnam, c(list(obs), lpar, as.list(fix.arg)) ) ) ) } } if(method == "qme" && is.null(weights)) { DIFF2Q <- function(par, fix.arg, prob, obs, qdistnam, qtype) { if(!is.list(par)) par <- as.list(par) lpar <- lapply(1:length(par), function(i) translist[[i]](par[[i]])) qtheo <- do.call(qdistnam, c(list(prob), lpar, as.list(fix.arg)) ) qemp <- as.numeric(quantile(obs, probs=prob, type=qtype)) (qemp - qtheo)^2 } fnobj <- function(par, fix.arg, obs, ddistnam, qdistnam, pdistnam, mdistnam, qtype, memp, gof) sum( sapply(probs, function(p) DIFF2Q(par, fix.arg, p, obs, qdistnam, qtype)) ) } if(method == "qme" && !is.null(weights)) { DIFF2Q <- function(par, fix.arg, prob, obs, qdistnam, qtype) { if(!is.list(par)) par <- as.list(par) lpar <- lapply(1:length(par), function(i) translist[[i]](par[[i]])) qtheo <- do.call(qdistnam, c(list(prob), lpar, as.list(fix.arg)) ) qemp <- as.numeric(wtd.quantile(x=obs, weights=weights, probs=prob)) (qemp - qtheo)^2 } fnobj <- function(par, fix.arg, obs, ddistnam, qdistnam, pdistnam, mdistnam, qtype, memp, gof) sum( sapply(probs, function(p) DIFF2Q(par, fix.arg, p, obs, qdistnam, qtype)) ) } if(method == "mme" && is.null(weights)) { DIFF2 <- function(par, fix.arg, order, obs, mdistnam, memp, weights) { if(!is.list(par)) par <- as.list(par) lpar <- lapply(1:length(par), function(i) translist[[i]](par[[i]])) momtheo <- do.call(mdistnam, c(list(order), lpar, as.list(fix.arg)) ) momemp <- as.numeric(memp(obs, order)) (momemp - momtheo)^2 } fnobj <- function(par, fix.arg, obs, ddistnam, qdistnam, pdistnam, mdistnam, qtype, memp, gof) sum( sapply(order, function(o) DIFF2(par, fix.arg, o, obs, mdistnam, memp)) ) } if(method == "mme" && !is.null(weights)) { DIFF2 <- function(par, fix.arg, order, obs, mdistnam, memp, weights) { if(!is.list(par)) par <- as.list(par) lpar <- lapply(1:length(par), function(i) translist[[i]](par[[i]])) momtheo <- do.call(mdistnam, c(list(order), lpar, as.list(fix.arg)) ) momemp <- as.numeric(memp(obs, order, weights)) (momemp - momtheo)^2 } fnobj <- function(par, fix.arg, obs, ddistnam, qdistnam, pdistnam, mdistnam, qtype, memp, gof) sum( sapply(order, function(o) DIFF2(par, fix.arg, o, obs, mdistnam, memp, weights)) ) } if(method == "mge") { fnobj <- function(par, fix.arg, obs, ddistnam, qdistnam, pdistnam, mdistnam, qtype, memp, gof) { if(!is.list(par)) par <- as.list(par) lpar <- lapply(1:length(par), function(i) translist[[i]](par[[i]])) n <- length(obs) s <- sort(obs) theop <- do.call(pdistnam, c(list(s), lpar, as.list(fix.arg)) ) obspu <- seq(1,n)/n obspl <- seq(0,n-1)/n if (gof == "CvM") 1/(12*n) + sum( ( theop - (2 * 1:n - 1)/(2 * n) )^2 ) else if (gof == "KS") max(pmax(abs(theop-obspu),abs(theop-obspl))) else if (gof == "AD") - n - mean( (2 * 1:n - 1) * (log(theop) + log(1 - rev(theop))) ) else if (gof == "ADR") n/2 - 2 * sum(theop) - mean ( (2 * 1:n - 1) * log(1 - rev(theop)) ) else if (gof == "ADL") -3*n/2 + 2 * sum(theop) - mean ( (2 * 1:n - 1) * log(theop) ) else if (gof == "AD2R") 2 * sum(log(1 - theop)) + mean ( (2 * 1:n - 1) / (1 - rev(theop)) ) else if (gof == "AD2L") 2 * sum(log(theop)) + mean ( (2 * 1:n - 1) / theop ) else if (gof == "AD2") 2*sum(log(theop) + log(1 - theop)) + mean(((2*1:n - 1) / theop) + ((2*1:n - 1) / (1 - rev(theop)))) } } ltrans.par <- sapply(1:length(feasible.par), function(i) invlist[[i]](feasible.par[[i]])) if(!silent) { cat("before transform\n") print(unlist(feasible.par)) cat("after transform\n") print(unlist(ltrans.par)) } if(method == "mle") test1 <- try(fnobj(par=ltrans.par, fix.arg = fix.arg, obs=data, ddistnam = ddistname), silent=silent) if(method == "qme") test1 <- try(fnobj(par=ltrans.par, fix.arg = fix.arg, obs=data, qdistnam=qdistname, qtype=qtype), silent=silent) if(method == "mme") test1 <- try(fnobj(par=ltrans.par, fix.arg = fix.arg, obs=data, mdistnam=mdistname, memp=memp), silent=silent) if(method == "mge") test1 <- try(fnobj(par=ltrans.par, fix.arg = fix.arg, obs=data, pdistnam=pdistname, gof=gof), silent=silent) if(class(test1) == "try-error" || silent == FALSE) print(test1) owarn <- options(warn=ifelse(silent, -1, 0)) if(method == "mle") opttryerror <- try(opt <- optim(par=ltrans.par, fn=fnobj, fix.arg=fix.arg, obs=data, ddistnam=ddistname, hessian=FALSE, method="BFGS", ...), silent=silent) if(method == "qme") opttryerror <- try(opt <- optim(par=ltrans.par, fn=fnobj, fix.arg=fix.arg, obs=data, qdistnam=qdistname, qtype=qtype, hessian=FALSE, method="BFGS", ...), silent=silent) if(method == "mme") opttryerror <- try(opt <- optim(par=ltrans.par, fn=fnobj, fix.arg=fix.arg, obs=data, mdistnam=mdistname, memp=memp, hessian=FALSE, method="BFGS", ...), silent=silent) if(method == "mge") opttryerror <- try(opt <- optim(par=ltrans.par, fn=fnobj, fix.arg=fix.arg, obs=data, pdistnam=pdistname, gof=gof, hessian=FALSE, method="BFGS", ...), silent=silent) on.exit(options(owarn), add=TRUE) if(class(opttryerror) == "try-error") stop("unsuccessful pre-fitting process") if(!silent) print(opt) if(opt$convergence %in% 0:1) { prefitpar <- unlist(sapply(1:length(opt$par), function(i) translist[[i]](opt$par[i]))) }else { prefitpar <- rep(NA, length(opt$par)) } names(prefitpar) <- names(feasible.par) as.list(prefitpar) }
archmCopulaSim <- function (n, alpha = NULL, type = archmList()) { type <- match.arg(type) Type <- as.integer(type) if (is.null(alpha)) alpha = archmParam(type)$param ans <- rarchmCopula(n = n, alpha = alpha, type = type) control = list(alpha = alpha[[1]], copula = "archm", type = type) attr(ans, "control")<-unlist(control) ans } archmCopulaFit <- function(u, v = NULL, type = archmList(), ...) { type = match.arg(type) Type = as.integer(type) U = u V = v if (is.list(u)) { U = u[[1]] V = u[[2]] } if (is.matrix(u)) { U = u[, 1] V = u[, 2] } alpha = archmParam(type)$param fun = function(x, type, U, V) { -mean( log(darchmCopula(u = U, v = V, alpha = x, type = type)) ) } range = archmRange(type) fit = nlminb(start = alpha, objective = fun, lower = range[1], upper = range[2], type = type, U = U, V = V, ...) fit }
plot.importance <- function(x, Style="BPIC", ...) { if(missing(x)) stop("The x argument is required.") if(!identical(Style, "BPIC") & !identical(Style, "Concordance") & !identical(Style, "Discrep") & !identical(Style, "L-criterion")) stop("Style is unrecognized.") if(!identical(class(x), "importance")) stop("x must be of class importance.") if(identical(Style, "BPIC")) dotchart(x[,1], main="Variable Importance", xlab="BPIC", pch=20) else if(identical(Style, "Concordance")) dotchart(x[,2], main="Variable Importance", xlab="Concordance", pch=20) else if(identical(Style, "Discrep")) dotchart(x[,3], main="Variable Importance", xlab="Discrepancy Statistic", pch=20) else dotchart(x[,4], main="Variable Importance", xlab="L-criterion", pch=20) return(invisible()) }
factoGraph <- function(res, file = "", dim = 1:2, hab = NULL, ellipse = TRUE, Iselec = "contrib", Vselec = "cos2", Rselec = "cos2", Cselec = "cos2", Mselec = "cos2", Icoef = 1, Vcoef = 1, Rcoef = 1, Ccoef = 1, Mcoef = 1, figure.title = "Figure", graph = TRUE, cex = 0.7, codeGraphInd = NULL, codeGraphVar = NULL ,codeGraphCA = NULL,options = NULL) { if(!is.character(file)) {return(warning("the parameter 'file' has to be a character chain giving the name of the .Rmd file to write in"))} analyse = whichFacto(res) if(!analyse %in% c("PCA", "CA", "CaGalt", "MCA", "MFA", "DMFA", "FAMD", "GPA", "HCPC")) {return(warning("the parameter 'res' has to be an object of class 'PCA', 'CA', 'CaGalt', 'MCA', 'MFA', 'DMFA', 'FAMD', 'GPA' or 'HCPC'"))} param = getParam(res) i = 1 switch(analyse, PCA = { graphInd(res, file = file, dim = dim, Iselec = Iselec, Icoef = Icoef, figure.title = paste(figure.title, i, sep = "."), graph = graph, cex = cex, codeGraphInd = codeGraphInd, options = options) if((hab != "none") %dim0% TRUE & !is.null(param$quali.sup)) { writeRmd(file = file) i = i + 1 graphHab(res, file = file, dim = dim, hab = hab, ellipse = ellipse, Iselec = Iselec, Icoef = Icoef, figure.title = paste(figure.title, i, sep = "."), graph = graph, cex = cex, options = options) } i = i + 1 graphVar(res, file = file, dim = dim, Vselec = Vselec, Vcoef = Vcoef, figure.title = paste(figure.title, i, sep = "."), graph = graph, cex = cex, codeGraphVar = codeGraphVar, options = options) if(!is.null(param$quali.sup)) { i = i + 1 graphSup(res, file = file, dim = dim, Mselec = Mselec, Mcoef = Mcoef, figure.title = paste(figure.title, i, sep = "."), graph = graph, cex = cex, options = options) } }, CA = { graphCA(res, file = file, dim = dim, Rselec = Rselec, Cselec = Cselec, Rcoef = Rcoef, Ccoef = Ccoef, figure.title = paste(figure.title, i, sep = "."), graph = graph, cex = cex, codeGraphCA = codeGraphCA, options = options) if((hab != "none") %dim0% TRUE & !is.null(param$quali.sup)) { writeRmd(file = file) i = i + 1 graphHab(res, file = file, dim = dim, hab = hab, ellipse = ellipse, Rselec = Rselec, Cselec = Cselec, Rcoef = Rcoef, Ccoef = Ccoef, figure.title = paste(figure.title, i, sep = "."), graph = graph, cex = cex, options = options) } if(!is.null(param$quanti.sup)) { i = i + 1 graphSup(res, file = file, dim = dim, Mselec = Mselec, Mcoef = Mcoef, figure.title = paste(figure.title, i, sep = "."), graph = graph, cex = cex, options = options) } }, CaGalt = {}, MCA = { graphInd(res, file = file, dim = dim, Iselec = Iselec, Icoef = Icoef, figure.title = paste(figure.title, i, sep = "."), graph = graph, cex = cex, codeGraphInd = codeGraphInd, options = options) if((hab != "none") %dim0% TRUE & !is.null(param$quali.sup)) { writeRmd(file = file) i = i + 1 graphHab(res, file = file, dim = dim, hab = hab, ellipse = ellipse, Iselec = Iselec, Icoef = Icoef, figure.title = paste(figure.title, i, sep = "."), graph = graph, cex = cex, options = options) } i = i + 1 graphVar(res, file = file, dim = dim, Vselec = Vselec, Vcoef = Vcoef, figure.title = paste(figure.title, i, sep = "."), graph = graph, cex = cex, codeGraphVar = codeGraphVar, options = options) if(!is.null(param$quanti.sup)) { i = i + 1 graphSup(res, file = file, dim = dim, Mselec = Mselec, Mcoef = Mcoef, figure.title = paste(figure.title, i, sep = "."), graph = graph, cex = cex, options = options) } }, MFA = {}, HMFA = {}, DMFA = {}, FAMD = {}, GPA = {}, HCPC = {}) }
local_bru_testthat_setup() test_that("bru: clinear component", { skip_on_cran() local_bru_safe_inla() set.seed(123) input.df <- data.frame(x = cos(1:100)) input.df <- within(input.df, y <- 5 + 2 * x + rnorm(100, mean = 0, sd = 0.1)) fit <- bru( y ~ myLin(main = x, model = "clinear", range = c(0, Inf)) + Intercept(1), family = "gaussian", data = input.df ) expect_equal( fit$summary.random[["myLin"]][1, "mean"], 2.002517, tolerance = midtol ) expect_equal( fit$summary.random[["myLin"]][1, "sd"], 0.013, tolerance = hitol ) pr <- predict( fit, data.frame(x = c(1, 2)), ~ myLin + 2, n.samples = 5, seed = 1L ) expect_equal(pr[, "mean"], c(4.0, 6.0), tolerance = midtol) })
knitr::opts_chunk$set(eval = TRUE, message = FALSE, results = 'asis', comment='') options(width = 120) library(arsenal) df1 <- data.frame(id = paste0("person", 1:3), a = c("a", "b", "c"), b = c(1, 3, 4), c = c("f", "e", "d"), row.names = paste0("rn", 1:3), stringsAsFactors = FALSE) df2 <- data.frame(id = paste0("person", 3:1), a = c("c", "b", "a"), b = c(1, 3, 4), d = paste0("rn", 1:3), row.names = paste0("rn", c(1,3,2)), stringsAsFactors = FALSE) comparedf(df1, df2) summary(comparedf(df1, df2)) summary(comparedf(df1, df2, by = "id")) data(mockstudy) mockstudy2 <- muck_up_mockstudy() summary(comparedf(mockstudy, mockstudy2, by = "case")) summary(comparedf(mockstudy, mockstudy2, by = "case", tol.vars = "case")) summary(comparedf(mockstudy, mockstudy2, by = "case", tol.vars = c("._ ", "case") )) summary(comparedf(mockstudy, mockstudy2, by = "case", tol.vars = c(arm = "Arm", fu.stat = "fu stat", fu.time = "fu_time") )) summary(comparedf(mockstudy, mockstudy2, by = "case", tol.vars = c("._ ", "case"), int.as.num = TRUE )) summary(comparedf(mockstudy, mockstudy2, by = "case", tol.vars = c("._ ", "case"), int.as.num = TRUE, tol.num.val = 10 )) summary(comparedf(mockstudy, mockstudy2, by = "case", tol.vars = c("._ ", "case"), int.as.num = TRUE, tol.num.val = 10, tol.factor = "labels" )) summary(comparedf(mockstudy, mockstudy2, by = "case", tol.vars = c("._ ", "case"), int.as.num = TRUE, tol.num.val = 10, tol.factor = "labels", factor.as.char = TRUE )) summary(comparedf(mockstudy, mockstudy2, by = "case", tol.vars = c("._ ", "case"), int.as.num = TRUE, tol.num.val = 10, tol.factor = "labels", factor.as.char = TRUE, tol.char = "case" )) my.tol <- function(x, y, tol) { tol.NA(x, y, x > y) } date.df1 <- data.frame(dt = as.Date(c("2017-09-07", "2017-08-08", "2017-07-09", NA))) date.df2 <- data.frame(dt = as.Date(c("2017-10-01", "2017-08-08", "2017-07-10", "2017-01-01"))) n.diffs(comparedf(date.df1, date.df2)) n.diffs(comparedf(date.df1, date.df2, tol.date = my.tol)) n.diffs(comparedf(date.df2, date.df1, tol.date = my.tol)) tol.minus9 <- function(x, y, tol) { idx1 <- is.na(x) & !is.na(y) & y == -9 idx2 <- tol.num.absolute(x, y, tol) return(!idx1 & idx2) } summary(comparedf(mockstudy, mockstudy2, by = "case", tol.vars = c("._ ", "case"), int.as.num = TRUE, tol.num.val = 10, tol.factor = "labels", factor.as.char = TRUE, tol.char = "case", tol.num = tol.minus9 )) cmp <- comparedf(mockstudy, mockstudy2, by = "case", tol.vars = c("._ ", "case"), int.as.num = TRUE) n.diffs(cmp) head(diffs(cmp)) diffs(cmp, by.var = TRUE) diffs(cmp, vars = c("ps", "ast"), by.var = TRUE) diffs(cmp, vars = c("ps", "ast")) obj <- comparedf(mockstudy, mockstudy2, by = "case") print(obj$frame.summary) print(obj$vars.summary)
runn_mean<-function(vec,runn_mean,na.rm=FALSE,exclude_central_value=FALSE,FUN=mean) { runn_mean<-min(c(runn_mean,floor(length(vec)/2))) if(identical(FUN,mean)) if(na.rm==TRUE) FUN<-function(x) mean(x,na.rm=TRUE) else FUN<-function(x) mean(x,na.rm=FALSE) ww <- vec rr <- vec for (dd in 1:length(ww)) { if (dd < ceiling(runn_mean/2)) { if(!exclude_central_value) rr[dd] <- sapply(list(ww[1:(dd + floor(runn_mean/2))]),FUN) if(exclude_central_value) rr[dd] <- sapply(list(ww[(1:(dd + floor(runn_mean/2)))[which(!(1:(dd + floor(runn_mean/2)))==dd)]]),FUN) } if ((dd >= ceiling(runn_mean/2)) & (dd <= length(ww) - ceiling(runn_mean/2))) { if(!exclude_central_value) rr[dd] <- sapply(list(ww[(dd - floor(runn_mean/2)):(dd + floor(runn_mean/2))]),FUN) if(exclude_central_value) rr[dd] <- sapply(list(ww[((dd - floor(runn_mean/2)):(dd + floor(runn_mean/2)))[ which(!((dd - floor(runn_mean/2)):(dd + floor(runn_mean/2)))==dd)]]),FUN) } if (dd > (length(ww) - ceiling(runn_mean/2))) { if(!exclude_central_value) rr[dd] <- sapply(list(ww[(dd - floor(runn_mean/2)):length(ww)]),FUN) if(exclude_central_value) rr[dd] <- sapply(list(ww[((dd - floor(runn_mean/2)):length(ww))[ which(!((dd - floor(runn_mean/2)):length(ww))==dd)]]),FUN) } } return(rr)} runn_mean_pred<-function(indep,dep,pred,runn_mean=11,na.rm=FALSE,exclude_central_value=FALSE,FUN=mean) { runn_mean<-min(c(runn_mean,floor(length(indep)/2))) runny<-runn_mean(dep,runn_mean,na.rm=na.rm,exclude_central_value=exclude_central_value,FUN=FUN) pred_fun<-function(pr) { if(is.na(pr)) return(NA) if(!(min(indep,na.rm=TRUE)<=pr&pr<=max(indep,na.rm=TRUE))) return(NA) start_y<-runny[max(which(indep<=pr))] end_y<-runny[min(which(indep>=pr))] start_x<-indep[max(which(indep<=pr))] end_x<-indep[min(which(indep>=pr))] if(end_x==start_x) out<-runny[which(indep==pr)] else out<-start_y+(end_y-start_y)*((pr-start_x)/(end_x-start_x)) return(out) } return(list(x=pred,predicted=sapply(pred,pred_fun))) }
getEllipseMatrix <- function(mat, center=c(0,0), traits, sample.line=NULL, n.points=200){ if( is.matrix(mat) ){ ellCtr <- ellipse::ellipse(x = mat, centre = center, which = traits, n.points = n.points) return( ellCtr ) } else{ if( !is.null( sample.line ) ){ ss <- 1:length(mat) ii <- sample(ss, size = sample.line) qq <- 1:length(ii) mat.list <- mat[ii] ellCtr <- lapply(qq, function(x) ellipse::ellipse(x = mat[[x]], centre = center, which = traits, n.points = n.points) ) } if( is.null( sample.line ) ){ ss <- 1:length(mat) ellCtr <- lapply(ss, function(x) ellipse::ellipse(x = mat[[x]], centre = center, which = traits, n.points = n.points) ) } all.points <- do.call(rbind, ellCtr) xlim <- range(all.points[,1]) ylim <- range(all.points[,2]) return( list(limits=data.frame("xlim"=xlim, "ylim"=ylim), ellCtr=ellCtr) ) } }
bt.cluster.risk.parity.weights.test <- function() { load.packages('quantmod') tickers = spl('GLD,UUP,SPY,QQQ,IWM,EEM,EFA,IYR,USO,TLT') map = spl('Gold GLD,US Dollar UUP,S&P500 SPY,Nasdaq QQQ,Small Cap IWM,EmergingM EEM,InternationalM EFA,Real Estate IYR,Oil USO,Treasurys TLT') names(map) = tickers data <- new.env() getSymbols(tickers, src = 'yahoo', from = '1900-01-01', env = data, auto.assign = T) for(i in ls(data)) data[[i]] = adjustOHLC(data[[i]], use.Adjusted=T) for(i in ls(data)) data[[ map[i] ]] = data[[i]] rm(list=tickers, envir=data) bt.prep(data, align='remove.na', dates = '2011:12::2012') periodicity = 'months' lookback.len = 250 cluster.group <<- cluster.group.kmeans.90 obj = portfolio.allocation.helper(data$prices, periodicity = periodicity, lookback.len = lookback.len, min.risk.fns = list( EW=equal.weight.portfolio, RP=risk.parity.portfolio(), ERC=equal.risk.contribution.portfolio, G2.EW = distribute.weights(equal.weight.portfolio, cluster.group), G2.RP=distribute.weights(risk.parity.portfolio(), cluster.group), G2.MV=distribute.weights(min.var.portfolio, cluster.group), G2.ERC=distribute.weights(equal.risk.contribution.portfolio, cluster.group) ), adjust2positive.definite = F, custom.stats.fn = portfolio.allocation.custom.stats.clusters ) clusters = coredata(obj$clusters$EW)[13,] temp = clusters temp[] = 0 temp[clusters == clusters[names(clusters) == 'Treasurys TLT']] = 1 temp[clusters == clusters[names(clusters) == 'US Dollar UUP']] = 2 temp[clusters == clusters[names(clusters) == 'EmergingM EEM']] = 3 temp[clusters == clusters[names(clusters) == 'Gold GLD']] = 4 clusters = temp png(filename = 'plot1.png', width = 1200, height = 600, units = 'px', pointsize = 12, bg = 'white') layout(matrix(1:2,nc=2)) plot.cluster.weights(coredata(obj$weights$ERC)[13,], clusters, main='ERC Weights') plot.cluster.weights(coredata(obj$risk.contributions$ERC)[13,], clusters, main='ERC Risk Contributions') dev.off() png(filename = 'plot2.png', width = 1200, height = 600, units = 'px', pointsize = 12, bg = 'white') layout(matrix(1:2,nc=2)) plot.cluster.weights(coredata(obj$weights$G2.ERC)[13,], clusters, main='Cluster ERC Weights') plot.cluster.weights(coredata(obj$risk.contributions$G2.ERC)[13,], clusters, main='Cluster ERC Risk Contributions') dev.off() } bt.cluster.risk.parity.10.major.assets <- function() { tickers = spl('SPY,EFA,EWJ,EEM,IYR,RWX,IEF,TLT,DBC,GLD') dates='2004:12::' name = 'ETFs AAA' load.packages('quantmod') data <- new.env() getSymbols(tickers, src = 'yahoo', from = '2000-01-01', env = data, auto.assign = T) for(i in ls(data)) data[[i]] = adjustOHLC(data[[i]], use.Adjusted=T) bt.prep(data, align='keep.all', dates=dates, fill.gaps=T) periodicity = 'weeks' lookback.len = 250 cluster.group <<- cluster.group.kmeans.90 obj = portfolio.allocation.helper(data$prices, periodicity = periodicity, lookback.len = lookback.len, min.risk.fns = list( EW=equal.weight.portfolio, RP=risk.parity.portfolio(), ERC=equal.risk.contribution.portfolio, Dynamic.EW = distribute.weights(equal.weight.portfolio, cluster.group), Dynamic.RP=distribute.weights(risk.parity.portfolio(), cluster.group), Dynamic.ERC=distribute.weights(equal.risk.contribution.portfolio, cluster.group) ), adjust2positive.definite = F, custom.stats.fn = portfolio.allocation.custom.stats ) models = create.strategies(obj, data, dates=(lookback.len):nrow(data$prices))$models title = paste(name, '(' ,periodicity, ',' , lookback.len, 'days )') stats = bt.summary.report(models, title, data, obj, control = list( plot.weight.transition.maps = F, plot.risk.contribution.transition.maps = F) ) } bt.cluster.risk.parity.dow.30 <- function() { load.packages('quantmod') dates='1995::' name = 'Dow Jones 30' data = load.dow.jones(align='keep.all', dates=dates) sectors = data$sectors tickers = data$symbolnames periodicity = 'weeks' lookback.len = 250 cluster.group <<- cluster.group.kmeans.90 obj = portfolio.allocation.helper(data$prices, periodicity = periodicity, lookback.len = lookback.len, min.risk.fns = list(EW=equal.weight.portfolio, RP=risk.parity.portfolio(), ERC=equal.risk.contribution.portfolio, Static.EW = distribute.weights(equal.weight.portfolio, static.group(as.numeric(sectors))), Static.RP=distribute.weights(risk.parity.portfolio(), static.group(sectors)), Static.ERC=distribute.weights(equal.risk.contribution.portfolio, static.group(sectors)), Dynamic.EW = distribute.weights(equal.weight.portfolio, cluster.group), Dynamic.RP=distribute.weights(risk.parity.portfolio(), cluster.group), Dynamic.ERC=distribute.weights(equal.risk.contribution.portfolio, cluster.group) ), adjust2positive.definite = F, custom.stats.fn = portfolio.allocation.custom.stats ) models = create.strategies(obj, data, dates=(lookback.len):nrow(data$prices))$models title = paste(name, '(' ,periodicity, ',' , lookback.len, 'days )') stats = bt.summary.report(models, title, data, obj, control = list( plot.weight.transition.maps = F, plot.risk.contribution.transition.maps = F) ) } load.dow.jones <- function(align='remove.na', dates = NULL) { tickers = spl('XLY,XLP,XLE,XLF,XLV,XLI,XLB,XLK,XLU') tickers.desc = spl('ConsumerCyclicals,ConsumerStaples,Energy,Financials,HealthCare,Industrials,Materials,Technology,Utilities') sector.map = c() for(i in 1:len(tickers)) { sector.map = rbind(sector.map, cbind(sector.spdr.components(tickers[i]), tickers.desc[i]) ) } colnames(sector.map) = spl('ticker,sector') load.packages('quantmod') tickers = dow.jones.components() sectors = factor(sector.map[ match(tickers, sector.map[,'ticker']), 'sector']) names(sectors) = tickers data <- new.env() getSymbols(tickers, src = 'yahoo', from = '1900-01-01', env = data, auto.assign = T) for(i in ls(data)) data[[i]] = adjustOHLC(data[[i]], use.Adjusted=T) bt.prep(data, align=align,dates=dates) data$sectors = sectors[data$symbolnames] return(data) } portfolio.allocation.custom.stats.clusters <- function(x,ia) { risk.contributions = portfolio.risk.contribution(x, ia) clusters = cluster.group(ia) return(list( risk.contributions = risk.contributions, clusters = clusters, ncluster = max(clusters) )) } bt.summary.report <- function(models, title, data, obj=NULL, control = list( plot.weight.transition.maps = F, plot.risk.contribution.transition.maps = !is.null(obj) ) ) { if(is.null(control$plot.weight.transition.maps)) control$plot.weight.transition.maps = F if(is.null(control$plot.risk.contribution.transition.maps)) control$plot.risk.contribution.transition.maps = obj!=NULL filename = title filename.pdf = paste(filename, '.pdf', sep='') filename.csv = paste(filename, '.csv', sep='') pdf(file = filename.pdf, width=8.5, height=11) layout(1:2) plotbt(models, plotX = T, log = 'y', LeftMargin = 3, main = title) mtext('Cumulative Performance', side = 2, line = 1) out = plotbt.strategy.sidebyside(models, return.table=T) cdi = custom.composite.diversification.indicator(obj, plot.main = F, plot.table = F) out = rbind(colMeans(cdi, na.rm=T), out) rownames(out)[1] = 'Composite Diversification Indicator(CDI)' y = 100 * sapply(models, compute.turnover, data) out = rbind(y, out) rownames(out)[1] = 'Portfolio Turnover' performance.barchart.helper(out, 'Sharpe,Cagr,DVR,MaxDD,Volatility,Portfolio Turnover,Composite Diversification Indicator(CDI)', c(T,T,T,T,F,F,T)) if(control$plot.weight.transition.maps) { layout(1:4) for(m in names(models)) { plotbt.transition.map(models[[m]]$weight, name=m) legend('topright', legend = m, bty = 'n') } } if(control$plot.risk.contribution.transition.maps) { dates = index(data$prices)[obj$period.ends] layout(1:4) for(m in names(models)) { plotbt.transition.map(make.xts(obj$risk.contributions[[m]], dates), name=paste('Risk Contributions',m)) legend('topright', legend = m, bty = 'n') } } dev.off() load.packages('abind') append=FALSE cat(title, '\n', file=filename.csv, append=append) write.table(out, sep=',', row.names = , col.names = NA, file=filename.csv, append=TRUE) cat('\n\n', file=filename.csv, append=TRUE) if(F) { out = abind(lapply(models, function(m) m$equity)) colnames(out) = names(models) write.xts(make.xts(out, index(models[[1]]$equity)), filename.csv, append=TRUE) } return(out) } pie.labels.fix <- function (x, y, angles, labels, radius = 1, ...) { par(xpd = TRUE) xylim <- par("usr") plotdim <- par("pin") yradius <- radius * (xylim[4] - xylim[3])/(xylim[2] - xylim[1]) * plotdim[1]/plotdim[2] xc <- cos(angles) * radius + x yc <- sin(angles) * yradius + y text(xc, yc, labels, ...) par(xpd = FALSE) } plot.cluster.weights <- function(weight, clusters, main='') { load.packages('RColorBrewer,plotrix') clusters = sort(clusters) weight = weight[names(clusters)] weight.cluster = tapply(weight,clusters,sum) counts = tapply(names(clusters),clusters,len) ncluster = len(counts) require(RColorBrewer) colors = colorRampPalette(brewer.pal(iif(ncluster>9,9,ncluster),'Set1'))(ncluster) cols = c() for(i in 1:ncluster) cols = c(cols, col.add.alpha(colors[i], seq(200,100,length.out = counts[i]))) if(F) { plot(-1:1 ,-1:1, main=main, type='n', yaxt = 'n', xaxt = 'n', xlab = '', ylab = '', axes = F) bisect.angles = floating.pie(0,0,weight, col=cols, border='white', radius=0.9, cex=0.8) pie.labels(0,0,bisect.angles,names(weight),radius=1,bg=0,border=F, srt=bisect.angles) } par(mar = c(2,2,2,2)) pie(weight, col=cols, border='white', radius=0.9, main=main) require(plotrix) bisect.angles = floating.pie(0,0,weight.cluster,radius=0.5,col=colors,border='white') pie.labels.fix(0,0,bisect.angles,paste(round(100*weight.cluster,0),'%',sep=''),radius=0.2) }
gpat_read_distmtx = function(x){ as.dist(suppressMessages(read_csv(x, progress = FALSE))[, -1]) }
context("Scenario of un wanted inputs") test_that("NA values are avoided",{ expect_that(dBETA(NA,0.1,3), throws_error("NA or Infinite or NAN values in the Input")) }) test_that("Infinite values are avoided",{ expect_that(dBETA(Inf,0.1,3), throws_error("NA or Infinite or NAN values in the Input")) }) test_that("NAN values are avoided",{ expect_that(dBETA(NaN,0.1,3), throws_error("NA or Infinite or NAN values in the Input")) }) context("Scenario of invalid inputs") test_that("Greater than 1",{ expect_that(dBETA(3,0.1,3), throws_error("Invalid values in the input")) }) test_that("Lesser than 1",{ expect_that(dBETA(-3,0.1,3), throws_error("Invalid values in the input")) }) context("Scenario of shape parameters") test_that("shape parameter b",{ expect_that(dBETA(0.1,5,-4), throws_error("Shape parameters cannot be less than or equal to zero")) }) test_that("shape parameter a",{ expect_that(dBETA(0.1,-5,4), throws_error("Shape parameters cannot be less than or equal to zero")) })
context("Checking misc: proper handling of missing values") source("settings.r") test_that("rma.glmm() handles NAs correctly.", { skip_on_cran() dat <- data.frame(ni = rep(20, 10), xi = c(NA, 4, 0, 0, 2, 2, 3, 8, 9, 2), mod1 = c(0, NA, 0, 0, 0, 0, 0, 1, 1, 1), mod2 = c(0, 0, 0, 1, 0, 0, 0, 0, 0, 0)) expect_warning(res <- rma.glmm(measure="PLO", xi=xi, ni=ni, mods = ~ mod1, data=dat)) expect_equivalent(res$k, 8) expect_equivalent(length(res$xi), 8) expect_equivalent(length(res$mi), 8) expect_equivalent(nrow(res$X), 8) expect_equivalent(res$k.yi, 8) expect_equivalent(length(res$yi), 8) expect_equivalent(length(res$vi), 8) expect_equivalent(res$k.f, 10) expect_equivalent(length(res$xi.f), 10) expect_equivalent(length(res$mi.f), 10) expect_equivalent(nrow(res$X.f), 10) expect_equivalent(length(res$yi.f), 10) expect_equivalent(length(res$vi.f), 10) expect_warning(res <- rma.glmm(measure="PLO", xi=xi, ni=ni, mods = ~ mod1, data=dat, add=0)) expect_equivalent(res$k, 8) expect_equivalent(length(res$xi), 8) expect_equivalent(length(res$mi), 8) expect_equivalent(nrow(res$X), 8) expect_equivalent(res$k.yi, 6) expect_equivalent(length(res$yi), 6) expect_equivalent(length(res$vi), 6) expect_equivalent(res$k.f, 10) expect_equivalent(length(res$xi.f), 10) expect_equivalent(length(res$mi.f), 10) expect_equivalent(nrow(res$X.f), 10) expect_equivalent(length(res$yi.f), 10) expect_equivalent(length(res$vi.f), 10) expect_warning(res <- rma.glmm(measure="PLO", xi=xi, ni=ni, mods = ~ mod1 + mod2, data=dat, add=0)) expect_equivalent(res$k, 8) expect_equivalent(length(res$xi), 8) expect_equivalent(length(res$mi), 8) expect_equivalent(nrow(res$X), 8) expect_equivalent(res$k.yi, 6) expect_equivalent(length(res$yi), 6) expect_equivalent(length(res$vi), 6) expect_equivalent(res$k.f, 10) expect_equivalent(length(res$xi.f), 10) expect_equivalent(length(res$mi.f), 10) expect_equivalent(nrow(res$X.f), 10) expect_equivalent(length(res$yi.f), 10) expect_equivalent(length(res$vi.f), 10) }) rm(list=ls())
svmEval <- function(X,grp,train,kfold=10,gamvec=seq(0,10,by=1), kernel="radial",degree=3,plotit=TRUE,legend=TRUE,legpos="bottomright",...){ evalSEfac <- function(pred,grptrain,spltr,grplev){ kfold=max(spltr) k=length(grplev) misscli=rep(NA,k) for (i in 1:kfold){ tab=table(grptrain[spltr==i],pred[spltr==i]) misscli[i]=1-sum(diag(tab))/sum(tab) } list(mean=mean(misscli),se=sd(misscli)/sqrt(kfold),all=misscli) } ntrain=length(train) lgamvec=length(gamvec) trainerr=rep(NA,lgamvec) testerr=rep(NA,lgamvec) cvMean=rep(NA,lgamvec) cvSe=rep(NA,lgamvec) cverr=matrix(NA,nrow=kfold,ncol=lgamvec) for (j in 1:lgamvec){ ressvm=svm(X[train,],factor(grp[train]),kernel=kernel,degree=3,gamma=gamvec[j]) pred=predict(ressvm,X[-train,]) tab=table(grp[-train],pred) testerr[j] <- 1-sum(diag(tab))/sum(tab) pred=predict(ressvm,X[train,]) tab=table(grp[train],pred) trainerr[j] <- 1-sum(diag(tab))/sum(tab) splt <- rep(1:kfold,length=ntrain) spltr <- sample(splt,ntrain) pred <- factor(rep(NA,ntrain),levels=levels(grp)) for (i in 1:kfold){ res <- svm(X[train[spltr!=i],],factor(grp[train[spltr!=i]]), kernel=kernel,degree=3,gamma=gamvec[j]) pred[spltr==i] <- predict(res,X[train[spltr==i],]) } resi=evalSEfac(pred,grp[train],spltr,levels(grp)) cverr[,j] <- resi$all cvMean[j] <- resi$mean cvSe[j] <- resi$se } if (plotit){ ymax=max(trainerr,testerr,cvMean+cvSe) vgamvec=seq(1,lgamvec) plot(vgamvec,trainerr,ylim=c(0,ymax),xlab="Gamma", ylab="Missclassification error",cex.lab=1.2,type="l",lty=2,xaxt="n",...) axis(1,at=vgamvec,labels=gamvec) points(vgamvec,trainerr,pch=4) lines(vgamvec,testerr,lty=1,lwd=1.3) points(vgamvec,testerr,pch=1) lines(vgamvec,cvMean,lty=1) points(vgamvec,cvMean,pch=16) for (i in 1:lgamvec){ segments(vgamvec[i],cvMean[i]-cvSe[i],vgamvec[i],cvMean[i]+cvSe[i]) segments(vgamvec[i]-0.2,cvMean[i]-cvSe[i],vgamvec[i]+0.2,cvMean[i]-cvSe[i]) segments(vgamvec[i]-0.2,cvMean[i]+cvSe[i],vgamvec[i]+0.2,cvMean[i]+cvSe[i]) } abline(h=min(cvMean)+cvSe[which.min(cvMean)],lty=3,lwd=1.2) if (legend){ legend(legpos,c("Test error","CV error","Training error"), lty=c(1,1,2),lwd=c(1.3,1,1),pch=c(1,16,4)) } } list(trainerr=trainerr,testerr=testerr,cvMean=cvMean,cvSe=cvSe, cverr=cverr,gamvec=gamvec) }
search_catalogues <- function(string, refresh = FALSE) { stopifnot(!missing(string)) if (isFALSE(refresh)) { df <- abs_lookup_table } else { df <- scrape_abs_catalogues() } if (getNamespaceVersion("dplyr") > "1.0.4") { out <- df %>% dplyr::filter(dplyr::if_any(.cols = dplyr::everything(), ~grepl(string, .x, perl = TRUE, ignore.case = TRUE))) } else { matches <- purrr::map_dfr(df, grepl, pattern = string, perl = TRUE, ignore.case = TRUE) %>% rowSums() out <- dplyr::tibble(sum_true = matches) %>% dplyr::bind_cols(df) %>% dplyr::filter(.data$sum_true >= 1) %>% dplyr::select(-.data$sum_true) } out <- out %>% dplyr::mutate(url = as.character(url)) return(out) }
"demo_data"
mean(Sepal.Length ~ Species, data = iris) var(Sepal.Length ~ Species, data = iris) sd(Sepal.Length ~ Species, data = iris) favstats(Sepal.Length ~ Species, data = iris) df_stats(Sepal.Length ~ Species, data = iris, mean, var, sd)
context("test-ldhap") test_that("ldhap throws an error", { skip_on_cran() expect_error(LDhap(c("r3", "rs4", "rs148890987"), "CEU", token = Sys.getenv("LDLINK_TOKEN"))) }) test_that("ldhap works", { skip_on_cran() expect_named(LDhap(c("rs3", "rs4", "rs148890987"), "CEU", token = Sys.getenv("LDLINK_TOKEN"))) })
kmsymmsetdiff <- function(x, y) { (1 * (x | y) - 1 * (x & y)) } kmsetdistance <- function(x, y) { sum(1 * (x | y) - 1 * (x & y)) }
layout_with_drl <- function(graph, use.seed = FALSE, seed=matrix(runif(vcount(graph)*2), ncol=2), options=drl_defaults$default, weights=E(graph)$weight, fixed=NULL, dim=2) { if (!is_igraph(graph)) { stop("Not a graph object") } if (dim != 2 && dim != 3) { stop("`dim' must be 2 or 3") } use.seed <- as.logical(use.seed) seed <- as.matrix(seed) options.tmp <- drl_defaults$default options.tmp[names(options)] <- options options <- options.tmp if (!is.null(weights)) { weights <- as.numeric(weights) } if (!is.null(fixed)) { fixed <- as.logical(fixed) } on.exit(.Call(C_R_igraph_finalizer)) if (dim==2) { res <- .Call(C_R_igraph_layout_drl, graph, seed, use.seed, options, weights, fixed) } else { res <- .Call(C_R_igraph_layout_drl_3d, graph, seed, use.seed, options, weights, fixed) } res } with_drl <- function(...) layout_spec(layout_with_drl, ...) igraph.drl.default <- list(edge.cut=32/40, init.iterations=0, init.temperature=2000, init.attraction=10, init.damping.mult=1.0, liquid.iterations=200, liquid.temperature=2000, liquid.attraction=10, liquid.damping.mult=1.0, expansion.iterations=200, expansion.temperature=2000, expansion.attraction=2, expansion.damping.mult=1.0, cooldown.iterations=200, cooldown.temperature=2000, cooldown.attraction=1, cooldown.damping.mult=.1, crunch.iterations=50, crunch.temperature=250, crunch.attraction=1, crunch.damping.mult=0.25, simmer.iterations=100, simmer.temperature=250, simmer.attraction=.5, simmer.damping.mult=0) igraph.drl.coarsen <- list(edge.cut=32/40, init.iterations=0, init.temperature=2000, init.attraction=10, init.damping.mult=1.0, liquid.iterations=200, liquid.temperature=2000, liquid.attraction=2, liquid.damping.mult=1.0, expansion.iterations=200, expansion.temperature=2000, expansion.attraction=10, expansion.damping.mult=1.0, cooldown.iterations=200, cooldown.temperature=2000, cooldown.attraction=1, cooldown.damping.mult=.1, crunch.iterations=50, crunch.temperature=250, crunch.attraction=1, crunch.damping.mult=0.25, simmer.iterations=100, simmer.temperature=250, simmer.attraction=.5, simmer.damping.mult=0) igraph.drl.coarsest <- list(edge.cut=32/40, init.iterations=0, init.temperature=2000, init.attraction=10, init.damping.mult=1.0, liquid.iterations=200, liquid.temperature=2000, liquid.attraction=2, liquid.damping.mult=1.0, expansion.iterations=200, expansion.temperature=2000, expansion.attraction=10, expansion.damping.mult=1.0, cooldown.iterations=200, cooldown.temperature=2000, cooldown.attraction=1, cooldown.damping.mult=.1, crunch.iterations=200, crunch.temperature=250, crunch.attraction=1, crunch.damping.mult=0.25, simmer.iterations=100, simmer.temperature=250, simmer.attraction=.5, simmer.damping.mult=0) igraph.drl.refine <- list(edge.cut=32/40, init.iterations=0, init.temperature=50, init.attraction=.5, init.damping.mult=1.0, liquid.iterations=0, liquid.temperature=2000, liquid.attraction=2, liquid.damping.mult=1.0, expansion.iterations=50, expansion.temperature=500, expansion.attraction=.1, expansion.damping.mult=.25, cooldown.iterations=50, cooldown.temperature=250, cooldown.attraction=1, cooldown.damping.mult=.1, crunch.iterations=50, crunch.temperature=250, crunch.attraction=1, crunch.damping.mult=0.25, simmer.iterations=0, simmer.temperature=250, simmer.attraction=.5, simmer.damping.mult=0) igraph.drl.final <- list(edge.cut=32/40, init.iterations=0, init.temperature=50, init.attraction=.5, init.damping.mult=0, liquid.iterations=0, liquid.temperature=2000, liquid.attraction=2, liquid.damping.mult=1.0, expansion.iterations=50, expansion.temperature=2000, expansion.attraction=2, expansion.damping.mult=1.0, cooldown.iterations=50, cooldown.temperature=200, cooldown.attraction=1, cooldown.damping.mult=.1, crunch.iterations=50, crunch.temperature=250, crunch.attraction=1, crunch.damping.mult=0.25, simmer.iterations=25, simmer.temperature=250, simmer.attraction=.5, simmer.damping.mult=0) drl_defaults <- list( coarsen = igraph.drl.coarsen, coarsest = igraph.drl.coarsest, default = igraph.drl.default, final = igraph.drl.final, refine = igraph.drl.refine )
webmockr_reset <- function() { stub_registry_clear() request_registry_clear() invisible(NULL) }
CEEMDANelm <- function(data, stepahead=10, num.IMFs=emd_num_imfs(length(data)), s.num=4L, num.sift=50L, ensem.size=250L, noise.st=0.2){ n.IMF <- num.IMFs AllIMF <- ceemdan(ts(data), num_imfs = n.IMF, ensemble_size = ensem.size, noise_strength = noise.st, S_number = s.num, num_siftings = num.sift, rng_seed = 0L, threads = 0L) data_trn <- ts(head(data, round(length(data) - stepahead))) data_test <- ts(tail(data, stepahead)) IMF_trn <- AllIMF[-c(((length(data)-stepahead)+1):length(data)),] Fcast_AllIMF <- NULL for (IMF in 1:ncol(IMF_trn)) { IndIMF <- NULL IndIMF <- IMF_trn[ ,IMF] CEEMDANELMFit <- nnfor::elm(as.ts(IndIMF), keep = NULL, difforder = NULL, outplot = c( FALSE), sel.lag = c(FALSE), direct = c(FALSE), allow.det.season = c(FALSE)) CEEMDANELM_fcast=forecast::forecast(CEEMDANELMFit, h=stepahead) CEEMDANELM_fcast_Mean=CEEMDANELM_fcast$mean Fcast_AllIMF <- cbind(Fcast_AllIMF, as.matrix(CEEMDANELM_fcast_Mean)) } FinalCEEMDANELM_fcast <- ts(rowSums(Fcast_AllIMF, na.rm = T)) MAE_CEEMDANELM=mean(abs(data_test - FinalCEEMDANELM_fcast)) MAPE_CEEMDANELM=mean(abs(data_test - FinalCEEMDANELM_fcast)/data_test) rmse_CEEMDANELM=sqrt(mean((data_test - FinalCEEMDANELM_fcast)^2)) Plot_IMFs <- AllIMF AllIMF_plots <- plot(Plot_IMFs) return(list(TotalIMF = n.IMF, data_test=data_test, AllIMF_forecast=Fcast_AllIMF, FinalCEEMDANELM_forecast=FinalCEEMDANELM_fcast, MAE_CEEMDANELM=MAE_CEEMDANELM, MAPE_CEEMDANELM=MAPE_CEEMDANELM, rmse_CEEMDANELM=rmse_CEEMDANELM, AllIMF_plots=AllIMF_plots)) }
context("bundles") test_that("pack bundle", { path <- test_prepare_orderly_example("minimal") on.exit(unlink(path, recursive = TRUE)) path_bundles <- tempfile() res <- orderly_bundle_pack(path_bundles, "example", root = path) expect_equal(dir(path_bundles), basename(res$path)) expect_equal(basename(res$path), paste0(res$id, ".zip")) path2 <- paste0(path, "-moved") file.rename(path, path2) workdir <- tempfile() zip <- orderly_bundle_run(res$path, workdir, echo = FALSE) expect_equal(dir(workdir), basename(zip$path)) expect_equal(dir(workdir), zip$filename) orderly_bundle_import(zip$path, root = path2) expect_equal(orderly_list_archive(path2), data_frame(name = "example", id = res$id)) con <- orderly_db("destination", root = path2) on.exit(DBI::dbDisconnect(con)) db_rds <- orderly_db("rds", root = path2) rvd <- DBI::dbReadTable(con, "report_version_data") expect_equal(nrow(rvd), 1) expect_equal(rvd$report_version, res$id) expect_equal(rvd$database, "source") expect_equal(rvd$query, "SELECT name, number FROM thing") expect_equal(rvd$hash, db_rds$list()) d <- db_rds$get(rvd$hash) expect_is(d, "data.frame") expect_equal(names(d), c("name", "number")) }) test_that("can run a bundle in place if wanted", { path <- test_prepare_orderly_example("minimal") on.exit(unlink(path, recursive = TRUE)) path_bundles <- tempfile() res <- orderly_bundle_pack(path_bundles, "example", root = path) expect_equal(dir(path_bundles), basename(res$path)) expect_equal(basename(res$path), paste0(res$id, ".zip")) l1 <- orderly_bundle_list(path_bundles) expect_equal(l1$id, res$id) expect_equal(l1$status, "incomplete") expect_equal(l1$name, "example") expect_equal(l1$parameters, I(list(NULL))) zip <- orderly_bundle_run(res$path, path_bundles, echo = FALSE) expect_true(same_path(zip$path, res$path)) expect_equal(zip$filename, basename(zip$path)) l2 <- orderly_bundle_list(path_bundles) l1$status <- "complete" expect_equal(l1, l2) }) test_that("pack a bundle that requires parameters", { path_src <- test_prepare_orderly_example("demo") path_bundles <- tempfile() path_workdir <- tempfile() res <- orderly_bundle_pack( path_bundles, "other", parameters = list(nmin = 0.5), root = path_src) info <- orderly_bundle_info(res$path) expect_equal(info$parameters, list(nmin = 0.5)) expect_true(all(info$data$data$extract$number >= 0.5)) zip <- orderly_bundle_run(res$path, path_workdir, echo = FALSE) orderly_bundle_import(zip$path, root = path_src) dat <- readRDS(path_orderly_run_rds( file.path(path_src, "archive", "other", res$id))) expect_match(dat$meta$data$query, "number > 0.5", fixed = TRUE) expect_equal(dat$meta$parameters, list(nmin = 0.5)) }) test_that("list a directory of bundles", { path <- test_prepare_orderly_example("demo") on.exit(unlink(path, recursive = TRUE)) path_bundles <- tempfile() path_work <- tempfile() res1 <- orderly_bundle_pack( path_bundles, "other", parameters = list(nmin = 0), root = path) res2 <- orderly_bundle_pack( path_bundles, "other", parameters = list(nmin = 0.5), root = path) info1 <- orderly_bundle_list(path_bundles) expect_equal(info1$id, c(res1$id, res2$id)) expect_equal(info1$status, rep("incomplete", 2)) expect_equal(info1$name, rep("other", 2)) expect_equal(info1$parameters, I(list(list(nmin = 0), list(nmin = 0.5)))) expect_is(info1$time, "POSIXt") zip1 <- orderly_bundle_run(res1$path, path_bundles, echo = FALSE) info2 <- orderly_bundle_list(path_bundles) info1$status[[1]] <- "complete" expect_equal(info2, info1) }) test_that("can't run a bundle twice", { path <- test_prepare_orderly_example("minimal") on.exit(unlink(path, recursive = TRUE)) path_bundles <- tempfile() res <- orderly_bundle_pack(path_bundles, "example", root = path) expect_equal(dir(path_bundles), basename(res$path)) expect_equal(basename(res$path), paste0(res$id, ".zip")) zip <- orderly_bundle_run(res$path, path_bundles, echo = FALSE) expect_error(orderly_bundle_run(res$path, path_bundles, echo = FALSE), sprintf("Bundle '%s' has already been run", res$id)) }) test_that("Can't import a bundle twice", { path <- test_prepare_orderly_example("minimal") on.exit(unlink(path, recursive = TRUE)) path_bundles <- tempfile() res <- orderly_bundle_pack(path_bundles, "example", root = path) expect_equal(dir(path_bundles), basename(res$path)) expect_equal(basename(res$path), paste0(res$id, ".zip")) zip <- orderly_bundle_run(res$path, path_bundles, echo = FALSE) orderly_bundle_import(zip$path, root = path) expect_error( orderly_bundle_import(zip$path, root = path), sprintf("example:%s already exists", res$id)) }) test_that("Can't extract a bundle onto itself", { path <- test_prepare_orderly_example("minimal") on.exit(unlink(path, recursive = TRUE)) path_bundles <- tempfile() res <- orderly_bundle_pack(path_bundles, "example", root = path) dir.create(file.path(path_bundles, res$id)) expect_error( orderly_bundle_run(res$path, path_bundles, echo = FALSE), sprintf("Can't unpack bundle '%s' here; it has already been extracted", res$id)) }) test_that("can run a bundle with dependencies", { path <- orderly::orderly_example("demo") orderly_run_internal("other", parameters = list(nmin = 0), root = path, commit = TRUE, echo = FALSE) path_bundles <- tempfile() path_work <- tempfile() res1 <- orderly_bundle_pack(path_bundles, "use_dependency", root = path) expect_true( file.path(res1$id, "pack/incoming.csv") %in% zip::zip_list(res1$path)$filename) res2 <- orderly_bundle_run(res1$path, path_work, echo = FALSE) orderly_bundle_import(res2$path, root = path) con <- orderly_db("destination", root = path) on.exit(DBI::dbDisconnect(con)) depends <- DBI::dbReadTable(con, "depends") expect_equal(depends$report_version, res2$id) expect_equal(depends$as, "incoming.csv") }) test_that("can run a bundle with global file dependencies", { path <- orderly::orderly_example("demo") path_bundles <- tempfile() path_work <- tempfile() res1 <- orderly_bundle_pack(path_bundles, "global", root = path) expect_true( file.path(res1$id, "pack/data.csv") %in% zip::zip_list(res1$path)$filename) res2 <- orderly_bundle_run(res1$path, path_work, echo = FALSE) orderly_bundle_import(res2$path, root = path) con <- orderly_db("destination", root = path) on.exit(DBI::dbDisconnect(con)) fig <- DBI::dbReadTable(con, "file_input_global") expect_equal(fig$filename, "data.csv") }) test_that("can't cope with connections", { path <- orderly::orderly_example("demo") path_bundles <- tempfile() expect_error( orderly_bundle_pack(path_bundles, "connection", root = path), "Cannot use 'connection:' with a bundle") }) test_that("can't import an unrun bundle", { path <- orderly::orderly_example("minimal") path_bundles <- tempfile() res <- orderly_bundle_pack(path_bundles, "example", root = path) expect_error( orderly_bundle_import(res$path, root = path), "This does not look like a complete bundle (one that has been run)", fixed = TRUE) }) test_that("sensible error when given junk input", { path <- orderly::orderly_example("minimal") tmp <- tempfile(fileext = ".zip") zip_dir(file.path(path, "src"), tmp) expect_error( orderly_bundle_import(tmp, root = path), "Failed to extract bundle info from '.*'") }) test_that("can run a bundle from a relative path", { path <- test_prepare_orderly_example("minimal") on.exit(unlink(path, recursive = TRUE)) writeLines( "readme", file.path(path, "src", "example", "README.md")) path_bundles <- tempfile() res <- orderly_bundle_pack(path_bundles, "example", root = path) workdir <- tempfile() withr::with_dir(dirname(workdir), orderly_bundle_run(res$path, basename(workdir), echo = FALSE)) expect_equal(length(dir(workdir)), 1) }) test_that("Can rename a bundle before import", { path <- test_prepare_orderly_example("minimal") on.exit(unlink(path, recursive = TRUE)) path_bundles <- tempfile() res <- orderly_bundle_pack(path_bundles, "example", root = path) ans <- orderly_bundle_run(res$path, echo = FALSE) tmp <- tempfile() on.exit(unlink(tmp), add = TRUE) file_copy(ans$path, tmp) expect_true(orderly_bundle_import(tmp, root = path)) expect_equal(orderly_list_archive(path), data_frame(name = "example", id = res$id)) }) test_that("failed bundle run writes out failed rds", { path <- test_prepare_orderly_example("minimal") on.exit(unlink(path, recursive = TRUE)) append_lines( c("f <- function() g()", "g <- function() h()", "h <- function() stop('some error')", "f()"), file.path(path, "src", "example", "script.R")) path_bundles <- tempfile() res <- orderly_bundle_pack(path_bundles, "example", root = path) expect_equal(dir(path_bundles), basename(res$path)) expect_equal(basename(res$path), paste0(res$id, ".zip")) path2 <- paste0(path, "-moved") file.rename(path, path2) workdir <- tempfile() expect_error(orderly_bundle_run(res$path, workdir, echo = FALSE), "some error") id <- list.files(workdir) path_rds <- path_orderly_fail_rds(file.path(workdir, id, "pack")) expect_true(file.exists(path_rds)) failed_rds <- readRDS(path_rds) expect_equal(names(failed_rds), c("session_info", "time", "env", "error", "meta", "archive_version")) expect_equal(failed_rds$error$error$message, "some error") expect_true(length(failed_rds$error$trace) > 5) expect_match(failed_rds$error$trace[length(failed_rds$error$trace) - 3], "f()") expect_match(failed_rds$error$trace[length(failed_rds$error$trace) - 2], "g()") expect_match(failed_rds$error$trace[length(failed_rds$error$trace) - 1], "h()") expect_match(failed_rds$error$trace[length(failed_rds$error$trace)], 'stop\\("some error"\\)') }) test_that("zip list helper safely lists", { skip_on_cran() path <- test_prepare_orderly_example("minimal") on.exit(unlink(path, recursive = TRUE)) path_bundles <- tempfile() res <- orderly_bundle_pack(path_bundles, "example", root = path) l1 <- zip::zip_list(res$path)$filename l2 <- zip_list2(res$path)$filename l3 <- zip_list_base(res$path)$filename expect_equal(l1, l2) expect_setequal(l3, l1) }) test_that("fall back error handling", { skip_on_cran() skip_if_not_installed("mockery") d <- data.frame(filename = "x") mock_zip_zip_list <- mockery::mock( d, stop("some zip error"), stop("some zip error")) mock_base_zip_list <- mockery::mock( d, stop("some base error")) mockery::stub(zip_list2, "zip::zip_list", mock_zip_zip_list) mockery::stub(zip_list2, "zip_list_base", mock_base_zip_list) expect_equal(zip_list2("path"), d) mockery::expect_called(mock_zip_zip_list, 1) mockery::expect_called(mock_base_zip_list, 0) expect_equal(zip_list2("path"), d) mockery::expect_called(mock_zip_zip_list, 2) mockery::expect_called(mock_base_zip_list, 1) expect_error(zip_list2("path"), "some zip error") mockery::expect_called(mock_zip_zip_list, 3) mockery::expect_called(mock_base_zip_list, 2) expect_equal(mockery::mock_args(mock_zip_zip_list), rep(list(list("path")), 3)) expect_equal(mockery::mock_args(mock_base_zip_list), rep(list(list("path")), 2)) })
search_taxonomy <- function(query, tsn = NULL, gbif = NULL, eol = NULL, col = NULL, bold = NULL, ncbi = NULL, verbose = TRUE, ...) { req <- Filter(Negate(is.null), list(tsn = tsn, gbif = gbif, eol = eol, col = col, bold = bold, ncbi = ncbi) ) if (length(req)) { if (length(req) > 1) { stop("Queries with multiple criteria are not allowed.") } else query <- req } else { stopifnot(is.character(query)) query <- list(q = query) } taxa <- resp_to_df(get_gen(endpoints()$taxonomy, query = query, verbose = verbose, ...)$body) if (length(taxa)) { nodes <- do.call(rbind, lapply(taxa$id, function(x) get_from_fkey_flt(endpoints()$node, taxonomy_id = x, verbose = verbose, ...))) network_ids <- nodes$network_id } else { if (verbose) message("No taxon found.") return(data.frame()) } if (verbose) message(sprintf("Found %s taxa involved in %s network(s)", nrow(taxa), nrow(network_ids))) class(nodes) <- append(class(nodes), "mgSearchTaxonomy") nodes }
local_edition(3) test_that("Metadata is available before pre_knit", { message_pre_knit = 'pre_knit handles metadata' fmt <- md_document() fmt$pre_knit <- function(input, ...) { if (identical(rmarkdown::metadata, list(foo = 'bar'))) { message(message_pre_knit) } } input_file = tempfile(fileext = '.md') writeLines('---\nfoo: bar\n---', input_file) expect_message(render(input_file, fmt, quiet = TRUE), message_pre_knit) }) test_that("file_scope split correctly input file", { rmd <- local_rmd_file(c(" file_scope_fun <- function(file) { x <- xfun::read_utf8(file) list( list(name = "A", content = x[1:2]), list(name = "B", content = x[3:4]) ) } splitted <- file_scope_split(rmd, file_scope_fun) expect_true(all(file.exists(splitted))) on.exit(unlink(splitted), add = TRUE, after = FALSE) expect_match(splitted, "[.]split[.]md$") expect_snapshot_file(splitted[1]) expect_snapshot_file(splitted[2]) })
fui_el <- list() fui_el$button <- function(...) { div(class = "ui button", ...) } fui_el$container <- function(...) { div(class = "ui container", ...) } fui_el$divider <- function(...) { div(class = "ui divider", ...) } fui_el$emoji <- function(emoji, ...) { tags$em("data-emoji" = emoji, ...) } fui_el$flag <- function(country, ...) { tags$i(class = paste(country, "flag"), ...) } fui_el$header <- function(..., html_tag = shiny::div) { html_tag(class = "ui header", ...) } fui_el$icon <- function(icon, ...) { tags$i(class = paste(icon, "icon"), ...) } fui_el$image <- function(...) { div(class = "ui image", ...) } fui_el$input <- function(...) { div(class = "ui input", ...) } fui_el$label <- function(...) { div(class = "ui label", ...) } fui_el$list <- function(..., html_tag = shiny::div) { html_tag(class = "ui list", ...) } fui_el$item <- function(..., html_tag = shiny::div) { html_tag(class = "item", ...) } fui_el$dimmer <- function(...) { div(class = "ui dimmer", ...) } fui_el$loader <- function(...) { div(class = "ui loader", ...) } fui_el$placeholder <- function(...) { div(class = "ui placeholder", ...) } fui_el$rail <- function(...) { div(class = "ui rail", ...) } fui_el$reveal <- function(...) { div(class = "ui reveal", ...) } fui_el$segment <- function(...) { div(class = "ui segment", ...) } fui_el$steps <- function(...) { div(class = "ui steps", ...) } fui_el$step <- function(...) { div(class = "step", ...) } fui_el$text <- function(...) { span(class = "ui text", ...) } fui_el$breadcrumb <- function(...) { div(class = "ui breadcrumb", ...) } fui_el$form <- function(...) { div(class = "ui form", ...) } fui_el$fields <- function(...) { div(class = "fields", ...) } fui_el$field <- function(...) { div(class = "field", ...) } fui_el$grid <- function(...) { div(class = "ui grid", ...) } fui_el$row <- function(...) { div(class = "row", ...) } fui_el$column <- function(...) { div(class = "column", ...) } fui_el$menu <- function(...) { div(class = "ui menu", ...) } fui_el$message <- function(...) { div(class = "ui message", ...) } fui_el$table <- function(...) { tags$table(class = "ui table", ...) } fui_el$advert <- function(...) { div(class = "ui ad", ...) } fui_el$cards <- function(...) { div(class = "ui cards", ...) } fui_el$card <- function(...) { div(class = "ui card", ...) } fui_el$comments <- function(...) { div(class = "ui comments", ...) } fui_el$comment <- function(...) { div(class = "ui comment", ...) } fui_el$feed <- function(...) { div(class = "ui feed", ...) } fui_el$event <- function(...) { div(class = "event", ...) } fui_el$items <- function(...) { div(class = "ui items", ...) } fui_el$statistics <- function(...) { div(class = "ui statistics", ...) } fui_el$statistic <- function(...) { div(class = "ui statistic", ...) } fui_el <- fui_el[sort(names(fui_el))]
print.nbg_fit <- function(x, ...) { if(x$num_patients > 0) { treated <- data.frame( Patient = 1:length(x$doses), Dose = x$doses, Toxicity = x$tox, Weight = x$weights ) print(treated) } else { cat('No patients have been treated.\n') } cat('\n') df <- data.frame( Dose = factor(x$dose_indices), N = sapply(1:length(x$dose_indices), function(i) sum(x$doses == i)), Tox = sapply(1:length(x$dose_indices), function(i) sum(x$tox[x$doses == i])), ProbTox = x$prob_tox, MedianProbTox = x$median_prob_tox, ProbMTD = x$prob_mtd ) print(df, digits = 3) cat('\n') cat(paste0('The model targets a toxicity level of ', x$dat$target, '.')) cat('\n') cat(paste0('The dose with estimated toxicity probability closest to target is ', x$recommended_dose, '.')) cat('\n') cat(paste0('The dose most likely to be the MTD is ', x$modal_mtd_candidate, '.')) cat('\n') cat(paste0('Model entropy: ', format(round(x$entropy, 2), nsmall = 2))) }
context("Checking repo characteristics") loggerOptions <- futile.logger::logger.options() if (!identical(loggerOptions, list())){ origLogThreshold <- loggerOptions[[1]][['threshold']] } else { origLogThreshold <- futile.logger::INFO } futile.logger::flog.threshold(0) test_that("Files in extdata/ and testdata/ should be smaller than 10MB on disk" , {dataDirs <- c("extdata", "testdata", "inst/extdata", "inst/testdata") results <- lapply(dataDirs, function(dirName){ dirPath <- paste0(find.package("uptasticsearch"), "/", dirName) infoDF <- file.info(list.files(path = dirPath , full.names = TRUE , recursive = TRUE)) return(data.table::data.table(infoDF, keep.rownames = TRUE))}) infoDT <- data.table::rbindlist(results) sizesInMB <- infoDT[, size] / (1024^2) expect_true(nrow(infoDT) != 0 && max(sizesInMB) < 10)} ) futile.logger::flog.threshold(origLogThreshold) rm(list = ls())
github_api_repo_unwatch = function(repo){ ghclass_api_v3_req( endpoint = "DELETE /repos/:owner/:repo/subscription", owner = get_repo_owner(repo), repo = get_repo_name(repo) ) } repo_unwatch = function(repo) { arg_is_chr(repo) res = purrr::map( repo, function(repo) { res = purrr::safely(github_api_repo_unwatch)(repo) status_msg( res, "Unwatched repo {.val {repo}}.", "Failed to unwatch repo {.val {repo}}." ) } ) invisible(res) }
individual_claim_history <- function( majRev, minRev, claim_size, no_pmt, payment_delays, payment_sizes, occurrence, notidel, k1, k2, inflated, base_inflation_vector, keep_all) { base_inflation <- function(q) { if (q == 0) { return(1) } base_inflation_index <- cumprod(1 + base_inflation_vector) if (q >= length(base_inflation_vector)) { q <- length(base_inflation_vector) } max(base_inflation_index[floor(q)], 1) * (1 + base_inflation_vector[ceiling(q)])^(q - floor(q)) } base_inflation <- Vectorize(base_inflation) Ptimes <- cumsum(payment_delays) rev_atP <- minRev$minRev_atP rev_atP <- ifelse(rev_atP == 1, "PMi", "P") if (majRev$majRev_atP == 1) { rev_atP[no_pmt - 1] <- "PMa" if (minRev$minRev_atP[no_pmt - 1] == 1) { no_minRev <- minRev$minRev_freq_atP if (minRev$minRev_atP[no_pmt] == 0) { minRev$minRev_time_atP <- minRev$minRev_time_atP[-no_minRev] minRev$minRev_factor_atP <- minRev$minRev_factor_atP[-no_minRev] } else { minRev$minRev_time_atP <- minRev$minRev_time_atP[-(no_minRev - 1)] minRev$minRev_factor_atP <- minRev$minRev_factor_atP[-(no_minRev - 1)] } minRev$minRev_atP[no_pmt - 1] <- 0 minRev$minRev_freq_atP <- minRev$minRev_freq_atP - 1 } } majRev_freqtatP <- majRev$majRev_time if (majRev$majRev_atP == 1) { majRev_freqtatP <- majRev_freqtatP[-majRev$majRev_freq] } minRev_freqtatP <- minRev$minRev_time_notatP txn_delay <- sort(c(Ptimes, majRev_freqtatP, minRev_freqtatP)) txn_time <- txn_delay + occurrence + notidel txn_type <- rep(NA, length(txn_delay)) txn_type[txn_delay %in% Ptimes] <- rev_atP txn_type[txn_delay %in% majRev_freqtatP] <- "Ma" txn_type[txn_delay %in% minRev_freqtatP] <- "Mi" stopifnot(txn_type %in% c("P", "PMi", "PMa", "Mi", "Ma")) no_txn <- length(txn_delay) c_left <- x_left <- y_left <- c_right <- x_right <- y_right <- rep(NA, no_txn) p_index <- no_pmt Ma_index <- majRev$majRev_freq Mi_index_atP <- minRev$minRev_freq_atP Mi_index_notatP <- minRev$minRev_freq_notatP Ma_multp <- majRev$majRev_factor Mi_multp_atP <- minRev$minRev_factor_atP Mi_multp_notatP <- minRev$minRev_factor_notatP time_unit <- SynthETIC::return_parameters()[2] for (i in no_txn:2) { if (i == no_txn) { if (inflated == FALSE) { c_right[no_txn] <- claim_size x_right[no_txn] <- 0 y_right[no_txn] <- claim_size } else { c_right[no_txn] <- sum(payment_sizes) x_right[no_txn] <- 0 y_right[no_txn] <- sum(payment_sizes) } } if (startsWith(txn_type[i], "P")) { c_right[i - 1] <- c_left[i] <- c_right[i] - payment_sizes[p_index] if (txn_type[i] == "PMa") { sset <- txn_time[which(txn_type[1:(i - 1)] %in% c("Ma", "Mi", "PMa", "PMi"))] next_rev_time <- max(sset[length(sset)], 0) discount <- base_inflation(next_rev_time * time_unit * 4) / base_inflation(txn_time[i] * time_unit * 4) y_left[i] <- y_right[i] * discount k1_inv <- 1 / k1 y_left[i] <- y_left[i] / Ma_multp[Ma_index] y_right[i - 1] <- y_left[i] <- max(y_left[i], k1_inv * c_left[i]) x_right[i - 1] <- x_left[i] <- y_left[i] - c_left[i] Ma_index <- Ma_index - 1 } else if (txn_type[i] == "PMi") { sset <- txn_time[which(txn_type[1:(i - 1)] %in% c("Ma", "Mi", "PMa", "PMi"))] next_rev_time <- max(sset[length(sset)], 0) discount <- base_inflation(next_rev_time * time_unit * 4) / base_inflation(txn_time[i] * time_unit * 4) y_left[i] <- y_right[i] * discount k2_inv <- 1 / k2 x_left[i] <- (y_left[i] - c_left[i]) / Mi_multp_atP[Mi_index_atP] y_left[i] <- x_left[i] + c_left[i] y_right[i - 1] <- y_left[i] <- max(y_left[i], k2_inv * c_left[i]) x_right[i - 1] <- x_left[i] <- y_left[i] - c_left[i] Mi_index_atP <- Mi_index_atP - 1 } else { y_right[i - 1] <- y_left[i] <- y_right[i] x_right[i - 1] <- x_left[i] <- y_left[i] - c_left[i] } p_index <- p_index - 1 } else { c_right[i - 1] <- c_left[i] <- c_right[i] if (txn_type[i] == "Ma") { sset <- txn_time[which(txn_type[1:(i - 1)] %in% c("Ma", "Mi", "PMa", "PMi"))] next_rev_time <- max(sset[length(sset)], 0) discount <- base_inflation(next_rev_time * time_unit * 4) / base_inflation(txn_time[i] * time_unit * 4) y_left[i] <- y_right[i] * discount k1_inv <- 1 / k1 y_left[i] <- y_left[i] / Ma_multp[Ma_index] y_right[i - 1] <- y_left[i] <- max(y_left[i], k1_inv * c_left[i]) x_right[i - 1] <- x_left[i] <- y_left[i] - c_left[i] Ma_index <- Ma_index - 1 } else { sset <- txn_time[which(txn_type[1:(i - 1)] %in% c("Ma", "Mi", "PMa", "PMi"))] next_rev_time <- max(sset[length(sset)], 0) discount <- base_inflation(next_rev_time * time_unit * 4) / base_inflation(txn_time[i] * time_unit * 4) y_left[i] <- y_right[i] * discount k2_inv <- 1 / k2 x_left[i] <- (y_left[i] - c_left[i]) / Mi_multp_notatP[Mi_index_notatP] y_left[i] <- x_left[i] + c_left[i] y_right[i - 1] <- y_left[i] <- max(y_left[i], k2_inv * c_left[i]) x_right[i - 1] <- x_left[i] <- y_left[i] - c_left[i] Mi_index_notatP <- Mi_index_notatP - 1 } } } stopifnot(Ma_index == 1 && Mi_index_atP == 0 && Mi_index_notatP == 0 && p_index == 0) y_left[1] <- y_right[1] x_left[1] <- x_right[1] c_left[1] <- c_right[1] <- 0 if (keep_all == TRUE) { result <- list( txn_delay = txn_delay, txn_time = txn_time, txn_type = txn_type, cumpaid_left = c_left, cumpaid_right = c_right, OCL_left = x_left, OCL_right = x_right, incurred_left = y_left, incurred_right = y_right, minRev = minRev, majRev = majRev) } else { result <- list( txn_delay = txn_delay, txn_time = txn_time, txn_type = txn_type, cumpaid_right = c_right, OCL_right = x_right, incurred_right = y_right, minRev = minRev, majRev = majRev) } return(result) } claim_history <- function( claims, majRev_list, minRev_list, k1 = 0.95, k2 = 0.95, base_inflation_vector = NULL, keep_all = FALSE ) { I <- length(claims$frequency_vector) time_unit <- SynthETIC::return_parameters()[2] max_quarters <- floor(I * time_unit * 4) * 2 if (is.null(base_inflation_vector)) { base_inflation_vector <- rep(0, times = max_quarters) inflated <- FALSE } else if (length(base_inflation_vector) == 1) { base_inflation_vector <- rep(base_inflation_vector, times = max_quarters) inflated <- TRUE } else if (length(base_inflation_vector) != max_quarters) { stop("base_inflation_vector is of a wrong size. Either input a single value or a vector of appropriate length") } else { inflated <- TRUE } full_history <- vector("list", I) for (i in 1:I) { full_history[[i]] <- vector("list", claims$frequency_vector[i]) for (j in 1:claims$frequency_vector[i]) { if (inflated == TRUE) { payment_sizes <- claims$payment_inflated_list[[i]][[j]] } else { payment_sizes <- claims$payment_size_list[[i]][[j]] } full_history[[i]][[j]] <- individual_claim_history( majRev = majRev_list[[i]][[j]], minRev = minRev_list[[i]][[j]], claim_size = claims$claim_size[[i]][j], no_pmt = claims$no_payments_list[[i]][j], payment_delays = claims$payment_delay_list[[i]][[j]], payment_sizes = payment_sizes, occurrence = claims$occurrence_list[[i]][j], notidel = claims$notification_list[[i]][j], k1 = k1, k2 = k2, inflated = inflated, base_inflation_vector = base_inflation_vector, keep_all = keep_all ) } } full_history }
gl.check.lambda.alt1 <- function (l1, l2 = NULL, l3 = NULL, l4 = NULL, param = "fmkl", vect = FALSE) { if (vect == TRUE && length(l1) == 1) { stop("You need to provide a vector of parameters") } lambdas <- .gl.parameter.tidy(lambda1 = l1, lambda2 = l2, lambda3 = l3, lambda4 = l4, param = param) l4 = lambdas[4] l3 = lambdas[3] l2 = lambdas[2] l1 = lambdas[1] param <- switch(param, FKML= , fkml = , freimer = , frm = , FMKL = , fmkl = { ret <- l2 > 0 }, ramberg = , ram = , RS = , rs = { ret <- rep(0, length(l1)) con1 <- (l3 < -1) * (l4 > 1) con2 <- (l3 > 1) * (l4 < -1) con3 <- (l4 > 1) * (l3 > -1) * (l3 < 0) * (((1 - l3)^(1 - l3) * (l4 - 1)^(l4 - 1))/((l4 - l3)^(l4 - l3)) < -l3/l4) con4 <- (l3 < 0) * (l4 <= 0) con5 <- (l3 == 0) * (l4 < 0) con6 <- (l3 > 1) * (l4 > -1) * (l4 < 0) * (((1 - l4)^(1 - l4) * (l3 - 1)^(l3 - 1))/((l3 - l4)^(l3 - l4)) < -l4/l3) con6[which.na(con6)] <- 0 con3[which.na(con3)] <- 0 ret[(l2 < 0)] <- ((con1 + con2 + con3 + con4 + con5 + con6) > 0)[(l2 < 0)] con7 <- (l3 > 0) * (l4 >= 0) con8 <- (l3 == 0) * (l4 > 0) ret[l2 > 0] <- (con7 + con8)[(l2 > 0)] }) ret <- as.logical(ret * ((is.finite(l1) * is.finite(l2) * is.finite(l3) * is.finite(l4)) == 1)) return(ret) }
eval_sub_criteria <- function(x, ...) UseMethod("eval_sub_criteria") eval_sub_criteria.sub_criteria <- function(x, x_pos = seq_len(max(attr_eval(x))), y_pos = rep(1L, length(x_pos)), check_duplicates = TRUE, ...){ curr_ds_len <- length(y_pos) matches <- sapply(1:length(x), function(j){ a <- x[[j]] x <- a[[1]] if(class(x) == "sub_criteria"){ return( unlist(eval_sub_criteria(x = x, x_pos = x_pos, y_pos = y_pos, check_duplicates = check_duplicates), use.names = FALSE) ) } if(class(x) == "d_attribute"){ x <- lapply(x, function(x) if(length(x) == 1) rep(x, curr_ds_len) else x) y <- lapply(x, function(x) x[y_pos]) x <- lapply(x, function(x) x[x_pos]) }else{ if(length(x) == 1) x <- rep(x, curr_ds_len) y <- x[y_pos] x <- x[x_pos] } f1 <- a[[2]] lgk <- try(f1(x, y), silent = TRUE) if(class(lgk) == "try-error" | class(lgk) != "logical"){ if(class(lgk) == "try-error"){ err <- attr(lgk, "condition")$message }else{ err <- "Output is not a `logical` object" } err <- paste0("Unable to evaluate `match_funcs`:\n", "i - Each function in `match_funcs` must have the following syntax and output.\n", "i - Syntax ~ `function(x, y, ...)`.\n", "i - Output ~ `TRUE` or `FALSE`.\n", "X - Issue with `match_funcs`: ", err, ".") stop(err, call. = F) } out1 <- lgk out1[is.na(lgk)] <- 0 if(!length(out1) %in% c(1, curr_ds_len)){ err <- paste0("Output length of `match_funcs` must be 1 or the same as `criteria`:\n", "i - Unexpected length for `match_funcs`:\n", "i - Expecting a length of 1 of ", curr_ds_len, ".\n", "X - Length is ", length(out1), ".") stop(err, call. = F) } if(isFALSE(check_duplicates)){ f2 <- a[[3]] lgk <- try(f2(x, y), silent = T) if(class(lgk) == "try-error" | class(lgk) != "logical"){ if(class(lgk) == "try-error"){ err <- attr(lgk, "condition")$message }else{ err <- "Output is not a `logical` object" } err <- paste0("Unable to evaluate `equal_funcs`:\n", "i - Each function in `equal_funcs` must have the following syntax and output.\n", "i - Syntax ~ `function(x, y, ...)`.\n", "i - Output ~ `TRUE` or `FALSE`.\n", "X - Issue with `equal_funcs`: ", err, ".") stop(err, call. = F) } lgk <- as.numeric(lgk) out2 <- lgk out2[is.na(lgk)] <- 0 if(length(out2) == 1) out2 <- rep(out2, curr_ds_len) if(!length(out2) %in% c(1,curr_ds_len)){ err <- paste0("Output length of `equal_funcs` must be 1 or the same as `criteria`:\n", "i - Unexpected length for `equal_funcs`:\n", "i - Expecting a length of 1 of ", curr_ds_len, ".\n", "X - Length is ", length(out2), ".") stop(err, call. = F) } out1 <- c(out1, out2) } return(out1) }) if(isFALSE(is.matrix(matches))){ matches <- t(as.matrix(matches)) } operator <- attr(x, "operator") if(operator == "or"){ if(isFALSE(check_duplicates)){ set_match <- rowSums(matches) m2 <- rowSums(matches) == ncol(matches) lgk <- which(seq_len(nrow(matches)) >= nrow(matches)/2) set_match[lgk] <- m2[lgk] rm(m2); rm(lgk) }else{ set_match <- rowSums(matches) } set_match[set_match > 0] <- 1 }else if (operator == "and"){ set_match <- rowSums(matches) == ncol(matches) set_match <- as.numeric(set_match) } if(isFALSE(check_duplicates)){ set_match.rf <- set_match[((length(set_match)/2)+1):length(set_match)] set_match <- set_match[1:(length(set_match)/2)] x <- list(logical_test = set_match, equal_test = set_match.rf) }else{ x <- list(logical_test = set_match) } rm(list = ls()[ls() != "x"]) return(x) }
rob.int <- function(x, rc, l.grid = 1000, tol = 1e-4, var.range = NULL){ lo <- l.grid margin <- x$VC$margins[2] params <- x$coefficients min.dn <- 1e-160 min.pr <- x$VC$min.pr max.pr <- x$VC$max.pr j <- 1; inds <- posi <- NULL if(is.null(var.range)){ rlo <- (max(x$VC$y1) - min(x$VC$y1))/lo if(rlo > 1) lo <- round(lo*rlo) if( margin %in% c("N","N2","GU","rGU","LO","LN") ) seq.y <- seq(min(x$VC$y1) - ((max(x$VC$y1) - min(x$VC$y1))/2), max(x$VC$y1) + ((max(x$VC$y1) - min(x$VC$y1))/2), length.out = lo) if( margin %in% c("WEI","iG","GA","DAGUM","SM","FISK","GP","GPII","GPo") ) seq.y <- seq(1e-12, max(x$VC$y1) + ((max(x$VC$y1) - min(x$VC$y1))/2), length.out = lo) if( margin %in% c("TW") ) seq.y <- seq(0, max(x$VC$y1) + ((max(x$VC$y1) - min(x$VC$y1))/2), length.out = lo) if( margin %in% c("BE") ) seq.y <- seq(1e-12, 0.999999, length.out = lo) }else{ rlo <- (var.range[2] - var.range[1])/lo if(rlo > 1) lo <- round(lo*rlo) seq.y <- seq(var.range[1], var.range[2], length.out = lo) } n <- x$VC$n if(is.null(x$VC$X2)){x$VC$X2 <- matrix(1, n, 1); x$VC$X2.d2 <- 1} if(is.null(x$VC$X3)){x$VC$X3 <- matrix(1, n, 1); x$VC$X3.d2 <- 1} eta <- eta.tr(x$VC$X1%*%params[1:x$VC$X1.d2], margin) ss <- esp.tr(x$VC$X2%*%params[(1+x$VC$X1.d2):(x$VC$X1.d2+x$VC$X2.d2)], margin) sigma2 <- ss$vrb sigma2.st <- ss$vrb.st if( margin %in% c("DAGUM","SM","TW") ){ nus <- enu.tr(x$VC$X3%*%params[(1+x$VC$X1.d2+x$VC$X2.d2):(x$VC$X1.d2+x$VC$X2.d2+x$VC$X3.d2)], margin) nu <- nus$vrb nu.st <- nus$vrb.st } else nu <- nu.st <- 1 for(i in 1:lo){ ires <- intB(seq.y[i], eta, sigma2, sigma2.st, nu, nu.st, margin, rc, min.dn, min.pr, max.pr) if(min(ires) == max(ires) && min(ires) < tol) inds[i] <- TRUE else inds[i] <- FALSE if(i > 1 && inds[i] != inds[i-1]) { if(j==1){ posi[j] <- i-1; j <- j + 1} else posi[2] <- i} } if((is.null(posi) || length(posi) < 2) && margin %in% c("N","N2","GU","rGU","LO","LN")) stop("Increase the tolerance value or try a different range.") if((is.null(posi) || length(posi) < 2) && margin %in% c("WEI","iG","GA","DAGUM","TW","SM","FISK","GP","GPII","GPo") ){ posi <- NULL; j <- 1 for(i in 1:lo){ ires <- intB(seq.y[i], eta, sigma2, sigma2.st, nu, nu.st, margin, rc, min.dn, min.pr, max.pr) if(min(ires) == max(ires) && min(ires) < tol) inds[i] <- TRUE else inds[i] <- FALSE if(i > 2 && inds[i] != inds[i-1]) { if(j == 1) posi[2] <- i; j <- j + 1} if(!is.null(posi[2])) posi[1] <- 1 } if(is.null(posi) || length(posi) < 2) stop("Increase the tolerance value or try a different range.") } bs <- c(seq.y[posi[1]], seq.y[posi[2]]) names(bs) <- c("lB", "uB") bs }
model_unlock <- function(id,folder=".",file=".lock",oncluster=TRUE) { lfile <- path(folder,file) if(!oncluster) { if(!file.exists(lfile)) stop("Lock file does not exist!") load(lfile) row <- which(as.integer(lock_queue[,1])==id) if(length(row)==0) stop("Could not find a process with the given id!") if(length(row)>1) stop("More than one lock entry for the given id!") if(dim(lock_queue)[1]>1) { lock_queue <- lock_queue[-row,,drop=FALSE] save(lock_queue,file=lfile) } else { unlink(lfile) } message("...entry removed from queue!") } else { if(!system(paste0("rm -r ",lfile),intern=F,ignore.stdout=T,ignore.stderr=T)) { message("The model was unlocked") } else { stop("Lockfile does not exist") } } }
tparams_mean <- function(value, ...){ stopifnot(is.matrix(value)) check(new_tparams_mean(value, n_samples = ncol(value), ...), ...) } new_tparams_mean <- function(value, n_samples, ...){ l <- c(list(value = value, n_samples = n_samples), do.call("new_id_attributes", list(...))) class(l) <- "tparams_mean" return(l) } check.tparams_mean <- function(object, ...){ id_args <- list(...) check(do.call("new_id_attributes", id_args)) for (v in c("strategy_id", "patient_id", "state_id")){ if (nrow(object$value) != length(id_args[[v]])){ stop("The length of each ID variable must equal the number of rows in 'value'.", call. = FALSE) } } return(object) } summary.tparams_mean <- function(object, probs = c(0.025, 0.975), ...) { q <- apply(object$value, 1, stats::quantile, probs = probs) if (is.matrix(q)) { q <- t(q) } else{ q <- as.matrix(q) colnames(q) <- paste0(probs * 100, "%") } data.table( make_id_data_table(object), mean = apply(object$value, 1, mean), sd = apply(object$value, 1, stats::sd), q ) } print.tparams_mean <- function(x, ...) { cat("A \"tparams_mean\" object \n\n") cat("Summary of means:\n") print(summary(x, ...)) invisible(x) }
cov(mtcars$mpg, mtcars$wt) cov(mtcars$wt, mtcars$mpg)
structure(list(url = "https://api.twitter.com/2/tweets/counts/all?query=%23commtwitter&start_time=2021-03-01T00%3A00%3A00Z&end_time=2021-06-05T00%3A00%3A00Z&granularity=day&next_token=1jzu9lk96azp0b3x7888hrpw8fftxsmypwnzctcz0vi5", status_code = 200L, headers = structure(list(date = "Thu, 24 Jun 2021 18:30:40 UTC", server = "tsa_f", `content-type` = "application/json; charset=utf-8", `cache-control` = "no-cache, no-store, max-age=0", `content-length` = "380", `x-access-level` = "read", `x-frame-options` = "SAMEORIGIN", `content-encoding` = "gzip", `x-xss-protection` = "0", `x-rate-limit-limit` = "300", `x-rate-limit-reset` = "1624560215", `content-disposition` = "attachment; filename=json.json", `x-content-type-options` = "nosniff", `x-rate-limit-remaining` = "284", `strict-transport-security` = "max-age=631138519", `x-connection-hash` = "63ce28965bd1a27b2f5c45eb027ca859f6f7f5156939af01614fd6f0e8bb1556"), class = c("insensitive", "list")), all_headers = list(list(status = 200L, version = "HTTP/2", headers = structure(list(date = "Thu, 24 Jun 2021 18:30:40 UTC", server = "tsa_f", `content-type` = "application/json; charset=utf-8", `cache-control` = "no-cache, no-store, max-age=0", `content-length` = "380", `x-access-level` = "read", `x-frame-options` = "SAMEORIGIN", `content-encoding` = "gzip", `x-xss-protection` = "0", `x-rate-limit-limit` = "300", `x-rate-limit-reset` = "1624560215", `content-disposition` = "attachment; filename=json.json", `x-content-type-options` = "nosniff", `x-rate-limit-remaining` = "284", `strict-transport-security` = "max-age=631138519", `x-connection-hash` = "63ce28965bd1a27b2f5c45eb027ca859f6f7f5156939af01614fd6f0e8bb1556"), class = c("insensitive", "list")))), cookies = structure(list(domain = c(".twitter.com", ".twitter.com"), flag = c(TRUE, TRUE), path = c("/", "/"), secure = c(TRUE, TRUE), expiration = structure(c(1687631316, 1687631316), class = c("POSIXct", "POSIXt")), name = c("personalization_id", "guest_id"), value = c("REDACTED", "REDACTED")), row.names = c(NA, -2L), class = "data.frame"), content = charToRaw("{\"data\":[{\"end\":\"2021-03-05T00:00:00.000Z\",\"start\":\"2021-03-04T00:00:00.000Z\",\"tweet_count\":3},{\"end\":\"2021-03-06T00:00:00.000Z\",\"start\":\"2021-03-05T00:00:00.000Z\",\"tweet_count\":2},{\"end\":\"2021-03-07T00:00:00.000Z\",\"start\":\"2021-03-06T00:00:00.000Z\",\"tweet_count\":6},{\"end\":\"2021-03-08T00:00:00.000Z\",\"start\":\"2021-03-07T00:00:00.000Z\",\"tweet_count\":0},{\"end\":\"2021-03-09T00:00:00.000Z\",\"start\":\"2021-03-08T00:00:00.000Z\",\"tweet_count\":3},{\"end\":\"2021-03-10T00:00:00.000Z\",\"start\":\"2021-03-09T00:00:00.000Z\",\"tweet_count\":3},{\"end\":\"2021-03-11T00:00:00.000Z\",\"start\":\"2021-03-10T00:00:00.000Z\",\"tweet_count\":3},{\"end\":\"2021-03-12T00:00:00.000Z\",\"start\":\"2021-03-11T00:00:00.000Z\",\"tweet_count\":1},{\"end\":\"2021-03-13T00:00:00.000Z\",\"start\":\"2021-03-12T00:00:00.000Z\",\"tweet_count\":3},{\"end\":\"2021-03-14T00:00:00.000Z\",\"start\":\"2021-03-13T00:00:00.000Z\",\"tweet_count\":0},{\"end\":\"2021-03-15T00:00:00.000Z\",\"start\":\"2021-03-14T00:00:00.000Z\",\"tweet_count\":1},{\"end\":\"2021-03-16T00:00:00.000Z\",\"start\":\"2021-03-15T00:00:00.000Z\",\"tweet_count\":0},{\"end\":\"2021-03-17T00:00:00.000Z\",\"start\":\"2021-03-16T00:00:00.000Z\",\"tweet_count\":5},{\"end\":\"2021-03-18T00:00:00.000Z\",\"start\":\"2021-03-17T00:00:00.000Z\",\"tweet_count\":5},{\"end\":\"2021-03-19T00:00:00.000Z\",\"start\":\"2021-03-18T00:00:00.000Z\",\"tweet_count\":14},{\"end\":\"2021-03-20T00:00:00.000Z\",\"start\":\"2021-03-19T00:00:00.000Z\",\"tweet_count\":3},{\"end\":\"2021-03-21T00:00:00.000Z\",\"start\":\"2021-03-20T00:00:00.000Z\",\"tweet_count\":0},{\"end\":\"2021-03-22T00:00:00.000Z\",\"start\":\"2021-03-21T00:00:00.000Z\",\"tweet_count\":2},{\"end\":\"2021-03-23T00:00:00.000Z\",\"start\":\"2021-03-22T00:00:00.000Z\",\"tweet_count\":2},{\"end\":\"2021-03-24T00:00:00.000Z\",\"start\":\"2021-03-23T00:00:00.000Z\",\"tweet_count\":3},{\"end\":\"2021-03-25T00:00:00.000Z\",\"start\":\"2021-03-24T00:00:00.000Z\",\"tweet_count\":21},{\"end\":\"2021-03-26T00:00:00.000Z\",\"start\":\"2021-03-25T00:00:00.000Z\",\"tweet_count\":8},{\"end\":\"2021-03-27T00:00:00.000Z\",\"start\":\"2021-03-26T00:00:00.000Z\",\"tweet_count\":2},{\"end\":\"2021-03-28T00:00:00.000Z\",\"start\":\"2021-03-27T00:00:00.000Z\",\"tweet_count\":1},{\"end\":\"2021-03-29T00:00:00.000Z\",\"start\":\"2021-03-28T00:00:00.000Z\",\"tweet_count\":1},{\"end\":\"2021-03-30T00:00:00.000Z\",\"start\":\"2021-03-29T00:00:00.000Z\",\"tweet_count\":5},{\"end\":\"2021-03-31T00:00:00.000Z\",\"start\":\"2021-03-30T00:00:00.000Z\",\"tweet_count\":1},{\"end\":\"2021-04-01T00:00:00.000Z\",\"start\":\"2021-03-31T00:00:00.000Z\",\"tweet_count\":2},{\"end\":\"2021-04-02T00:00:00.000Z\",\"start\":\"2021-04-01T00:00:00.000Z\",\"tweet_count\":6},{\"end\":\"2021-04-03T00:00:00.000Z\",\"start\":\"2021-04-02T00:00:00.000Z\",\"tweet_count\":6},{\"end\":\"2021-04-04T00:00:00.000Z\",\"start\":\"2021-04-03T00:00:00.000Z\",\"tweet_count\":4}],\"meta\":{\"total_tweet_count\":116,\"next_token\":\"1jzu9lk96azp0b3x7888hrpw8fftsbrvyadx5sfpjdz1\"}}"), date = structure(1624559440, class = c("POSIXct", "POSIXt" ), tzone = "GMT"), times = c(redirect = 0, namelookup = 0.000119, connect = 0.000122, pretransfer = 0.000781, starttransfer = 0.24946, total = 0.249629)), class = "response")
knitPDFMiktex <- function( Rnw, main, cmd="texify --pdf", options="--tex-option=-synctex=-1 --tex-option=-interaction=nonstopmode", includedir="--tex-option=-include-directory=", stylepath=FALSE, source.code=NULL, make=1, preview=NULL, patchLog = TRUE, sleep = 0, weave = knitr::knit, ...) SweavePDFMiktex( Rnw = Rnw, main = main, cmd = cmd, options = options, includedir = includedir, stylepath = stylepath, source.code = source.code, make = make, preview = preview, patchLog = patchLog, sleep = sleep, weave = weave, ...) SweavePDFMiktex <- function( Rnw, main, cmd="texify --pdf", options="--tex-option=-synctex=-1 --tex-option=-interaction=nonstopmode", includedir="--tex-option=-include-directory=", stylepath=FALSE, source.code=NULL, make=1, preview=NULL, patchLog = TRUE, sleep = 0, ...) { if (!is.null(source.code) && file.exists(source.code)) try(source(source.code, local=TRUE)) if (sub(".*\\.tex$", "TeX", Rnw, ignore.case = TRUE) == "TeX") outputname <- Rnw else outputname <- SweaveAll(Rnw, make=make, stylepath=stylepath, ...)[1] if (missing(main)) main <- outputname cmd <- paste(cmd, " ", options, " ", includedir, Rtexinputs(), " ", main, sep="") cat(cmd, "\n") consoleLog <- try(system(cmd, intern = TRUE)) status <- attr(consoleLog, "status") if (patchLog && !inherits(consoleLog, "try-error")) { tempLog <- tempfile(fileext = ".log") writeLines(consoleLog, tempLog) patchLog(tempLog) consoleLog <- readLines(tempLog) } cat(consoleLog, sep="\n") if(!is.null(status) && status) Sys.sleep(sleep) message(patchSynctex(sub("\\.tex$", ".synctex", main, ignore.case = TRUE), patchLog = patchLog)) if (!is.null(preview)) { pdf <- sub("\\.tex$", ".pdf", main, ignore.case = TRUE) cmd <- sprintf(preview, pdf) cat(cmd, "\n") system(cmd, wait=FALSE, invisible=FALSE) } } knitPDF <- function( Rnw, main, texinputs=NULL, source.code=NULL, make=1, links = NULL, preview = NULL, patchLog = TRUE, weave = knitr::knit, ... ) SweavePDF( Rnw = Rnw, main = main, texinputs = texinputs, source.code = source.code, make = make, links = links, preview = preview, patchLog = patchLog, weave = weave, ...) SweavePDF <- function( Rnw, main, texinputs=NULL, source.code=NULL, make=1, links = NULL, preview = NULL, patchLog = TRUE, ... ) { if (!is.null(source.code) && file.exists(source.code)) try(source(source.code, local=TRUE)) if (sub(".*\\.tex$", "TeX", Rnw, ignore.case = TRUE) == "TeX") outputname <- Rnw else outputname <- SweaveAll(Rnw, make=make, ...)[1] if (missing(main)) main <- outputname consoleLog <- try(texi2dvi(main, pdf=TRUE, texinputs=texinputs, links=links)) if (patchLog && !inherits(consoleLog, "try-error")) { tempLog <- tempfile(fileext = ".log") writeLines(consoleLog, tempLog) patchLog(tempLog) consoleLog <- readLines(tempLog) } cat(consoleLog, sep = "\n") message(patchSynctex(sub("\\.tex$", ".synctex", main, ignore.case=TRUE), patchLog = patchLog)) pdf <- sub("\\.tex$", ".pdf", main, ignore.case = TRUE) if (!is.null(preview)) { cmd <- sprintf(preview, pdf) cat(cmd, "\n") system(cmd, wait=FALSE, invisible=FALSE) } } knitDVIPDFM <- function(Rnw, main, latex = "latex", latexOpts = "-synctex=1 -interaction=nonstopmode", dvipdfm = "dvipdfm", dvipdfmOpts = "", texinputs=NULL, source.code=NULL, make=1, preview = NULL, patchLog = TRUE, weave = knitr::knit, ... ) SweaveDVIPDFM(Rnw = Rnw, main=main, latex = latex, latexOpts = latexOpts, dvipdfm = dvipdfm, dvipdfmOpts = dvipdfmOpts, texinputs=texinputs, source.code=source.code, make=make, preview = preview, patchLog = patchLog, weave = weave, ... ) SweaveDVIPDFM <- function(Rnw, main, latex = "latex", latexOpts = "-synctex=1 -interaction=nonstopmode", dvipdfm = "dvipdfm", dvipdfmOpts = "", texinputs=NULL, source.code=NULL, make=1, preview = NULL, patchLog = TRUE, ... ) { if (!is.null(source.code) && file.exists(source.code)) try(source(source.code, local=TRUE)) if (sub(".*\\.tex$", "TeX", Rnw, ignore.case = TRUE) == "TeX") outputname <- Rnw else outputname <- SweaveAll(Rnw, make=make, ...)[1] otexinputs <- Sys.getenv("TEXINPUTS", unset = NA) if(is.na(otexinputs)) { on.exit(Sys.unsetenv("TEXINPUTS")) otexinputs <- "." } else on.exit(Sys.setenv(TEXINPUTS = otexinputs)) Sys.setenv(TEXINPUTS = paste(otexinputs, Rtexinputs(), "", sep = .Platform$path.sep)) if (missing(main)) main <- outputname cmd <- paste(shQuote(latex), latexOpts, shQuote(main)) consoleLog <- try(system(cmd, intern = TRUE)) if (patchLog && !inherits(consoleLog, "try-error")) { tempLog <- tempfile(fileext = ".log") writeLines(consoleLog, tempLog) patchLog(tempLog) consoleLog <- readLines(tempLog) } cat(consoleLog, sep = "\n") dvi <- sub("\\.tex$", ".dvi", main, ignore.case = TRUE) message(patchSynctex(sub("\\.tex$", ".synctex", main, ignore.case=TRUE), fromDVI = dvi, patchLog = patchLog)) cmd <- paste(shQuote(dvipdfm), dvipdfmOpts, shQuote(dvi)) system(cmd) if (!is.null(preview)) { pdf <- sub("\\.tex$", ".pdf", main, ignore.case = TRUE) cmd <- sprintf(preview, pdf) cat(cmd, "\n") system(cmd, wait=FALSE, invisible=FALSE) } } rawToLines <- function(raw) { temp <- tempfile() on.exit(unlink(temp)) writeBin(raw, temp) readLines(temp, warn=FALSE) } pdfEOF <- function(con) { seek(con, -1024, "end") tail <- rawToChar(readBin(con, "raw", 1024), multiple=TRUE) stop <- rev(which(tail == "F")) for (last in stop) { if (all(tail[last - 4:0] == c("%", "%", "E", "O", "F"))) return( last - 4 + seek(con, 0) - 1024 - 1 ) } stop("%%EOF marker not found") } pdfStartxrefs <- function(con, eof=pdfEOF(con)) { seek(con, eof - 20) tail <- rawToLines(readBin(con, "raw", 20)) startxref <- which(tail == "startxref") as.numeric(tail[startxref+1]) } pdfXrefsCompressed <- function(pdfname) { con <- file(pdfname, "rb") on.exit(close(con)) seek(con, pdfStartxrefs(con, pdfEOF(con))) xrefs <- rawToLines(readBin(con, "raw", 50)) grepl(" obj <<$", xrefs[1]) } pdfXrefblock <- function(con, start=pdfStartxrefs(con, eof), eof=pdfEOF(con)) { seek(con, start) xrefs <- rawToLines(readBin(con, "raw", eof - start)) trailer <- which(xrefs == "trailer")[1] if (is.na(trailer)) stop("PDF file can't be read--may be using compression.") tail <- xrefs[trailer:length(xrefs)] xrefs <- xrefs[1:(trailer-1)] line <- 2 offsets <- numeric(0) generations <- numeric(0) free <- logical(0) maxobj <- 0 while (line <= length(xrefs)) { head <- scan(temp <- textConnection(xrefs[line]), what=list(first=0, count=0), quiet = TRUE) close(temp) newmax <- head$first + head$count if (newmax > maxobj) { length(offsets) <- newmax length(generations) <- newmax length(free) <- newmax } if (head$count > 0) { body <- scan(temp <- textConnection(xrefs[line + 1:head$count]), what = list(offsets=0, generations=0, free="n"), quiet = TRUE) close(temp) inds <- head$first + 1:head$count offsets[inds] <- body$offsets generations[inds] <- body$generations free[inds] <- body$free == "f" } line <- line + head$count + 1 } stop <- which(tail == "startxref")[1] tail <- tail[2:(stop-1)] tail[1] <- sub("^<< *", "", tail[1]) tail[length(tail)] <- sub(" *>>$", "", tail[length(tail)]) list(xrefs = data.frame(offsets = offsets, generations = generations, free=free), dict = tail) } dictVal <- function(dict, key) { dict <- unlist(strsplit(paste(dict, collapse=" "), "/")) dict <- grep(paste("^", key, sep=""), dict, value=TRUE) return(substr(dict, nchar(key)+1, nchar(dict))) } pdfXrefblocks <- function(con, collapse = TRUE) { result <- list() eof <- pdfEOF(con) start <- pdfStartxrefs(con, eof) rows <- NULL repeat { block <- pdfXrefblock(con, start, eof) if (collapse) { if (is.null(rows)) { rows <- as.numeric(dictVal(block$dict, "Size")) NAs <- rep(NA_real_, rows) xref <- data.frame(offsets = NAs, generations = NAs, free= as.logical(NAs)) } replace <- ((1:rows) <= nrow(block$xrefs)) & is.na(xref$offsets) xref[replace,] <- block$xrefs[replace,] } else result <- c(result, list(block)) start <- as.numeric(dictVal(block$dict, "Prev")) if (!length(start)) break } if (collapse) return(xref) else return(result) } pdfFindobj <- function(con, pattern) { xrefs <- pdfXrefblocks(con) xrefs <- subset(xrefs, !xrefs$free) o <- order(xrefs$offsets) xrefs <- xrefs[o,] result <- character(0) for (i in 1:nrow(xrefs)) { seek(con, xrefs$offsets[i]) head <- readChar(con, nchar(pattern) + 30) head <- sub("^[[:space:][:digit:]]*obj[[:space:]]*", "", head) if (grepl(pattern, head)) { seek(con, xrefs$offsets[i]) obj <- rawToLines(readBin(con, "raw", xrefs$offsets[i+1]-xrefs$offsets[i])) stop <- grep("endobj", obj) obj <- paste(obj[1:stop], collapse=" ") obj <- sub("^[[:space:][:digit:]]*obj[[:space:]]*", "", obj) obj <- sub("[[:space:]]*endobj.*", "", obj) result <- c(result, obj) } } result } pdfobjs <- function(file, pattern) { if (is.character(file)) { file <- file(file, "rb") on.exit(close(file)) } if (!inherits(file, "connection")) stop("'file' must be a character string or connection") if (!isOpen(file, "rb")) { open(file, "rb") on.exit(close(file)) } pdfFindobj(file, pattern) } pdfStreams <- function(file, pattern) { streamhead <- streams <- pdfobjs(file, "<<\n/Length[[:space:]]+[[:digit:]]+[[:space:]]*\n>>\nstream\n") streams <- sub("^<<[[:space:]]*/Length[[:space:]]+[[:digit:]]+[[:space:]]*>>[[:space:]]*stream[[:space:]]*", "", streams) streams <- sub("[[:space:]]*endstream$", "", streams) streams <- grep(pattern, streams, value=TRUE) streams } syncFiles <- function(lines) { inputs <- grep("^Input:", lines) filenames <- sub("^Input:[[:digit:]]*:","",lines[inputs]) filepaths <- dirname(filenames) filenames <- basename(filenames) nodot <- grep("^[^.]*$", filenames) filenames[nodot] <- paste(filenames[nodot], ".tex", sep="") filenums <- sub("^Input:","",lines[inputs]) filenums <- as.numeric(sub(":.*","",filenums)) o <- order(filenums) data.frame(tag=filenums[o], path=filepaths[o], name=filenames[o]) } parseConcords <- function(lines) { parseConcord <- function(split) { oldname <- split[2] newsrc <- split[3] if (length(split) == 4) { ofs <- 0 vi <- 4 } else { ofs <- as.integer(sub("^ofs ([0-9]+)", "\\1", split[4])) vi <- 5 } values <- as.integer(strsplit(split[vi], " ")[[1]]) firstline <- values[1] rledata <- matrix(values[-1], nrow=2) rle <- structure(list(lengths=rledata[1,], values=rledata[2,]), class="rle") diffs <- inverse.rle(rle) concord <- c(firstline, firstline + cumsum(diffs)) list(oldname=oldname, newsrc=newsrc, concord=concord, ofs=ofs) } concords <- strsplit(lines, ":") concords <- lapply(concords, parseConcord) names(concords) <- sapply(concords, function(x) x$oldname) concords } grepConcords <- function(pdfname) { if (!exists("grepRaw")) return(character(0)) size <- file.info(pdfname)$size if (is.na(size)) stop(pdfname, " not found") buffer <- readBin(pdfname, "raw", size) result <- grepRaw("concordance:[^:\n[:space:]]+:[^:\n[:space:]]+:(ofs [[:digit:]]+:)?[[:digit:]][-[:digit:] ]*", buffer, fixed=FALSE, all=TRUE, value=TRUE) if (!length(result)) character(0) else sapply(result, rawToChar) } patchSynctex <- function(f, newname=f, uncompress="pdftk %s output %s uncompress", fromDVI = NULL, patchLog = TRUE) { basename <- tools::file_path_sans_ext(f) compressed <- FALSE if (!file.exists(f)) { f <- paste(f, ".gz", sep="") if (file.exists(f)) { compressed <- TRUE force(newname) f <- gzfile(f) } } lines <- try(readLines(f, warn=FALSE), silent=TRUE) if (inherits(lines, "try-error")) return(paste(f,"cannot be read, no patching done.")) files <- syncFiles(lines) if (is.null(fromDVI)) { pdfname <- file.path(files$path[1], paste(sub(".tex", "", files$name[1]), ".pdf", sep="")) concords <- parseConcords(grepConcords(pdfname)) if (!length(concords)) concords <- readConcords(list.files(as.character(files$path[1]), pattern = "-concordance.tex$")) if (!length(concords)) { if (pdfXrefsCompressed(pdfname)) { if (missing(uncompress) && nchar(Sys.which("pdftk")) == 0) return(paste("No patches made:\n", pdfname, "\nis compressed and no uncompressor was found.")) oldname <- pdfname pdfname <- tempfile() system(sprintf(uncompress, oldname, pdfname)) } concords <- parseConcords(pdfStreams(pdfname, "^concordance:")) if (!length(concords)) concords <- parseConcords(pdfobjs(pdfname, "^concordance:")) } } else { specials <- DVIspecials(fromDVI) concordind <- grep("^concordance:", specials) concords <- parseConcords(specials[concordind]) if (length(concords)) { specials[concordind] <- NA } } if (patchLog) patchLog(paste0(basename, ".log"), concords = concords) re <- "^([vhxkgr$[(])([[:digit:]]+),([[:digit:]]+)([^[:digit:]].*)" srcrefind <- grep(re, lines) srcrefs <- lines[srcrefind] ops <- sub(re, "\\1", srcrefs) tags <- sub(re, "\\2", srcrefs) linenums <- sub(re, "\\3", srcrefs) rest <- sub(re, "\\4", srcrefs) linenums <- as.integer(linenums) changed <- rep(FALSE, length(tags)) newtags <- c() maxtag <- max(files$tag) for (i in seq_along(concords)) { n <- names(concords)[i] ofs <- concords[[i]]$ofs concord <- concords[[i]]$concord newsrc <- concords[[i]]$newsrc if (!(newsrc %in% names(newtags))) { maxtag <- maxtag + 1 newtags <- c(newtags, maxtag) names(newtags)[length(newtags)] <- newsrc } tag <- files$tag[files$name == n] if (length(tag) == 1) { subset <- (tags == tag) & (linenums > ofs) & (linenums <= ofs + length(concord)) linenums[subset] <- concord[linenums[subset] - ofs] tags[subset] <- newtags[newsrc] changed[subset] <- TRUE } } if (any(changed)) { newrefs <- ifelse(changed, paste(ops, tags, ",", linenums, rest, sep=""), srcrefs) lines[srcrefind] <- newrefs firstInput <- grep("^Input:", lines)[1] lines <- c(lines[1:firstInput], paste("Input:", newtags, ":", names(newtags), sep=""), lines[(firstInput+1):length(lines)]) } con <- if (compressed) gzfile(newname, "wb") else file(newname, "wb") on.exit(close(con)) writeLines(lines, con, sep="\xa") changes <- sum(changed) + length(newtags) msg <- paste(changes, "patches made.") if (!changes) msg <- paste(msg, "Did you set \\SweaveOpts{concordance=TRUE}?") msg }
library(testthat) test_check("RSiteCatalyst", filter = "queuepathing")
standard_error.parameters_skewness <- function(model, ...) { attributes(model)$SE } standard_error.parameters_kurtosis <- standard_error.parameters_skewness
OptSig.Weight <- function(df1,df2,m,delta=2,p=0.5,k=1,Figure=TRUE){ D=Folded.Normal(m,delta,Figure) ff1=D$x; w=D$w stat=numeric() for(i in 1:length(ff1)){ tem=OptSig.F(df1,df2,ncp=ff1[i],p,k, Figure=FALSE)$alpha.opt stat=c(stat,tem)} alphas=sum(w*stat); cr1=qf(1-alphas,df1=df1,df2=df2) return(list(alpha.opt=alphas,crit.opt=cr1))}
Prod2 <- function(blk,p,A,B,options=0){ iscellA <- is.list(A) iscellB <- is.list(B) if(!iscellA & !iscellB){ if(blk[[p,1]] == "s"){ C <- A %*% B if(options == 1){ C <- .5*(C + t(C)) } }else if(blk[[p,1]] == "q" | blk[[p,1]] == "l" | blk[[p,1]] == "u"){ C <- A * B } }else{ stop("Error in Prod2: A, B must be matrices") } return(C) }
insertTableCaption <- function(captionText = "", captionName = "tab.cap", prefix = getOption(paste0(optionName, "_prefix"), "Table %s: "), suffix = getOption(paste0(optionName, "_suffix"), ""), optionName = paste0("setCaptionNumbering_", captionName), resetCounterTo = NULL) { return( insertNumberedCaption( captionText=captionText, captionName=captionName, prefix=prefix, suffix=suffix, optionName=optionName, resetCounterTo=resetCounterTo ) ); }
context("checkListElementClass") test_that("checkListElementClass", { checkListElementClass(list(1, 5), cl="numeric") expect_error(checkListElementClass(list(1, "a"), cl="numeric"), "numeric") xs = list("a", "b") checkListElementClass(xs, "character") expect_error(checkListElementClass(xs, "integer"), "character") })
dots_grob = function(data, x, y, name = NULL, gp = gpar(), vp = NULL, dotsize = 1.07, stackratio = 1, binwidth = NA, layout = "bin", orientation = "vertical" ) { datas = data %>% arrange_at(x) %>% group_by_at(c("group", y)) %>% group_split() gTree( datas = datas, dotsize = dotsize, stackratio = stackratio, binwidth = binwidth, layout = layout, orientation = orientation, name = name, gp = gp, vp = vp, cl = "dots_grob" ) } makeContent.dots_grob = function(x) { grob_ = x datas = grob_$datas orientation = grob_$orientation dotsize = grob_$dotsize binwidth = grob_$binwidth layout = grob_$layout define_orientation_variables(orientation) dot_size_ratio = 1.07 font_size_ratio = 1.43/dot_size_ratio stackratio = grob_$stackratio heightratio = convertUnit(unit(dotsize * stackratio, "native"), "native", axisFrom = x, axisTo = y, typeFrom = "dimension", valueOnly = TRUE) if (is.unit(binwidth)) { binwidth = convertUnit(binwidth, "native", axisFrom = x, typeFrom = "dimension", valueOnly = TRUE) } if (length(binwidth) == 2) { user_min_binwidth = min(binwidth) user_max_binwidth = max(binwidth) binwidth = NA } else { user_min_binwidth = 0 user_max_binwidth = Inf } if (isTRUE(is.na(binwidth))) { binwidths = map_dbl_(datas, function(d) { maxheight = max(d[[ymax]] - d[[ymin]]) find_dotplot_binwidth(d[[x]], maxheight, heightratio, stackratio) }) binwidth = max(min(binwidths, user_max_binwidth), user_min_binwidth) } children = do.call(gList, lapply(datas, function(d) { dot_positions = bin_dots( d$x, d$y, binwidth = binwidth, heightratio = heightratio, stackratio = stackratio, layout = layout, side = d$side[[1]], orientation = orientation ) lwd = d$size * .stroke/2 lwd[is.na(lwd)] = 0 dot_pointsize = convertUnit(unit(binwidth * dotsize, "native"), "points", axisFrom = x, axisTo = "y", typeFrom = "dimension", valueOnly = TRUE) dot_fontsize = max( dot_pointsize * font_size_ratio - lwd, 0.5 ) pointsGrob( dot_positions$x, dot_positions$y, pch = d$shape, gp = gpar( col = alpha(d$colour, d$alpha), fill = alpha(d$fill, d$alpha), fontsize = dot_fontsize, lwd = lwd, lty = d$linetype ) ) })) setChildren(grob_, children) } draw_slabs_dots = function(self, s_data, panel_params, coord, orientation, normalize, fill_type, na.rm, dotsize, stackratio, binwidth, layout, ... ) { define_orientation_variables(orientation) s_data = ggplot2::remove_missing(s_data, na.rm, c(x, y, "justification", "scale"), name = "geom_dotsinterval", finite = TRUE) s_data = ggplot2::remove_missing(s_data, na.rm, "side", name = "geom_dotsinterval", finite = FALSE) if (nrow(s_data) == 0) return(list()) s_data$thickness = 1 s_data = self$override_slab_aesthetics(rescale_slab_thickness( s_data, orientation, normalize, height, y, ymin, ymax )) s_data[[y]] = case_when_side(s_data$side, orientation, topright = s_data[[ymin]], bottomleft = s_data[[ymax]], both = (s_data[[ymax]] + s_data[[ymin]])/2 ) if (!coord$is_linear()) { stop0("geom_dotsinterval does not work properly with non-linear coordinates.") } if (inherits(coord, "CoordFlip")) { orientation = switch(orientation, y = , horizontal = "x", x = , vertical = "y" ) define_orientation_variables(orientation) } s_data = coord$transform(s_data, panel_params) if (!isTRUE(is.na(binwidth)) && !is.unit(binwidth)) { binwidth = binwidth / (max(panel_params[[x.range]]) - min(panel_params[[x.range]])) } slab_grobs = list(dots_grob( s_data, x, y, dotsize = dotsize, stackratio = stackratio, binwidth = binwidth, layout = layout, orientation = orientation )) } NULL GeomDotsinterval = ggproto("GeomDotsinterval", GeomSlabinterval, default_aes = defaults(aes( slab_shape = NULL ), GeomSlabinterval$default_aes), default_key_aes = defaults(aes( slab_shape = 21, slab_size = 0.75, slab_colour = "gray65" ), GeomSlabinterval$default_key_aes), override_slab_aesthetics = function(self, s_data) { s_data = ggproto_parent(GeomSlabinterval, self)$override_slab_aesthetics(s_data) s_data$shape = s_data[["slab_shape"]] s_data }, default_params = defaults(list( normalize = "none", binwidth = NA, dotsize = 1.07, stackratio = 1, layout = "bin" ), GeomSlabinterval$default_params), hidden_params = union(c( "normalize", "fill_type" ), GeomSlabinterval$hidden_params), setup_data = function(self, data, params) { data = ggproto_parent(GeomSlabinterval, self)$setup_data(data, params) data$thickness = 1 data }, draw_slabs = function(self, ...) draw_slabs_dots(self, ...), draw_key_slab = function(self, data, key_data, params, size) { if ( params$show_slab && any(!is.na(data[,c( "fill","alpha","slab_fill","slab_colour","slab_size", "slab_linetype","slab_alpha","slab_shape" )])) ) { s_key_data = self$override_slab_aesthetics(key_data) s_key_data$stroke = s_key_data$size s_key_data$size = 2 draw_key_point(s_key_data, params, size) } } ) geom_dotsinterval = make_geom(GeomDotsinterval) GeomDots = ggproto("GeomDots", GeomDotsinterval, default_key_aes = defaults(aes( shape = 21, size = 0.75, colour = "gray65" ), GeomSlabinterval$default_key_aes), override_slab_aesthetics = function(self, s_data) { s_data$colour = s_data[["slab_colour"]] %||% s_data[["colour"]] s_data$colour = apply_colour_ramp(s_data[["colour"]], s_data[["colour_ramp"]]) s_data$fill = s_data[["slab_fill"]] %||% s_data[["fill"]] s_data$fill = apply_colour_ramp(s_data[["fill"]], s_data[["fill_ramp"]]) s_data$alpha = s_data[["slab_alpha"]] %||% s_data[["alpha"]] s_data$size = s_data[["slab_size"]] %||% s_data[["size"]] s_data$shape = s_data[["slab_shape"]] %||% s_data[["shape"]] s_data }, default_params = defaults(list( show_point = FALSE, show_interval = FALSE ), GeomDotsinterval$default_params), hidden_params = union(c( "show_slab", "show_point", "show_interval", "interval_size_domain", "interval_size_range", "fatten_point" ), GeomDotsinterval$hidden_params), draw_key_slab = function(self, data, key_data, params, size) { s_key_data = self$override_slab_aesthetics(key_data) s_key_data$stroke = s_key_data$size s_key_data$size = 2 draw_key_point(s_key_data, params, size) } ) GeomDots$default_key_aes$slab_colour = NULL GeomDots$default_key_aes$slab_size = NULL geom_dots = make_geom(GeomDots)
ri.d <- function (d,n1,n2,rep.n1=NA, rep.n2=NA,prob.level=.95) { prob_level <- prob.level if (is.na(rep.n1)) { rep.n1=n1 } if (is.na(rep.n2)) { rep.n2=n2 } original_d <-d original_N1 <- n1 original_N2 <- n2 replication_N1 <- rep.n1 replication_N2 <- rep.n2 original_CI <- ci.smd(smd=original_d,n.1=original_N1,n.2=original_N2,conf.level=prob_level) l1 <- original_CI$Lower.Conf.Limit.smd u1 <- original_CI$Upper.Conf.Limit.smd mod_CI <- ci.smd(smd=original_d,n.1=replication_N1,n.2=replication_N2,conf.level=prob_level) l2 <- mod_CI$Lower.Conf.Limit.smd u2 <- mod_CI$Upper.Conf.Limit.smd replication_interval_values <- mas_interval(original_r=original_d,l1=l1,u1=u1,l2=l2,u2=u2) LL <- replication_interval_values$LL UL <- replication_interval_values$UL replication_interval_metrics <- list() replication_interval_metrics$original_d <- original_d replication_interval_metrics$original_N1 <- original_N1 replication_interval_metrics$original_N2 <- original_N2 replication_interval_metrics$replication_N1 <- replication_N1 replication_interval_metrics$replication_N2 <- replication_N2 replication_interval_metrics$lower_replication_interval <- LL replication_interval_metrics$upper_replication_interval <- UL replication_interval_metrics$lower_confidence_interval <- l1 replication_interval_metrics$upper_confidence_interval <- u1 replication_interval_metrics$prob_level <- prob_level percent_level <- as.integer(round(prob_level*100)) method_text <- get_method_text_d(original_d,LL,UL,replication_N1,replication_N2,percent_level,"d-value") replication_interval_metrics$method_text <- method_text$txt_combined replication_interval_metrics$ri_text <- method_text$txt_ri class(replication_interval_metrics) <- "d_replication_interval" return(replication_interval_metrics) } print.d_replication_interval <- function(x,...) { conf_per <- x$prob_level * 100 cat(sprintf("\nOriginal study: d = %1.2f, N1 = %d, N2 = %d, %d%% CI[%1.2f, %1.2f]",x$original_d,x$original_N1,x$original_N2,conf_per,x$lower_confidence_interval,x$upper_confidence_interval)) cat(sprintf("\nReplication study: N1 = %d, N2 = %d",x$replication_N1,x$replication_N2)) cat(sprintf("\nReplication interval: %d%% RI[%1.2f,%1.2f].\n\n",conf_per,x$lower_replication_interval,x$upper_replication_interval)) cat("\nInterpretation:\n") cat(x$method_text) } ri.d.demo <- function(n1=50,n2=50,rep.n1=NA,rep.n2=NA,pop.d=.50,number.trials=10000,prob.level=.95,bias.correction = FALSE) { bias_correction <- bias.correction number_trials <- number.trials original_N1 <- n1 original_N2 <- n2 prob_level <- prob.level if (missing(rep.n1)) {rep.n1 <- n1} if (missing(rep.n2)) {rep.n2 <- n2} replication_N1 <- rep.n1 replication_N2 <- rep.n2 pop_d <- pop.d output <-pbapply::pbreplicate(number_trials,get_orig_rep_d(original_N1,original_N2,replication_N1,replication_N2,pop_d,prob_level,bias_correction)) n1 <-output[,1,] n2 <-output[,2,] d <-output[,3,] ci.LL <-output[,4,] ci.UL <-output[,5,] rep.n1 <-output[,6,] rep.n2 <-output[,7,] rep.d <-output[,8,] ri.LL <-output[,9,] ri.UL <-output[,10,] rep.d.in.ci <-as.logical(output[,11,]) rep.d.in.ri <-as.logical(output[,12,]) output_df <- data.frame(n1,n2,d,ci.LL,ci.UL,rep.n1,rep.n2,ri.LL,ri.UL,rep.d,rep.d.in.ci,rep.d.in.ri) in_replication_interval_count <- sum(rep.d.in.ri) in_confidence_interval_count <- sum(rep.d.in.ci) percent_in_ri <- (in_replication_interval_count/(number_trials))*100 percent_in_ci <- (in_confidence_interval_count/(number_trials))*100 replication_demo_output <- list() replication_demo_output$percent_in_ri <- percent_in_ri replication_demo_output$percent_in_ci <- percent_in_ci replication_demo_output$in_replication_interval_count<-in_replication_interval_count replication_demo_output$in_confidence_interval_count<-in_confidence_interval_count replication_demo_output$results_each_trial <- output_df replication_demo_output$pop_d <- pop_d replication_demo_output$original_N1 <- original_N1 replication_demo_output$original_N2 <- original_N2 replication_demo_output$replication_N1 <- replication_N1 replication_demo_output$replication_N2 <- replication_N2 replication_demo_output$prob_level <- prob_level class(replication_demo_output) <- "replication_demo_d" return(replication_demo_output) } print.replication_demo_d <- function(x,...) { num_trials <- dim(x$results_each_trial)[1] cat(sprintf("\nPopulation d-value: %1.2f\n",x$pop_d)) cat(sprintf("\nOriginal cell sizes: %d %d\nReplication cell sizes: %d %d",x$original_N1,x$original_N2,x$replication_N1,x$replication_N2)) percent_level <- round(x$prob_level*100) cat(sprintf("\n\n%d%% Replication interval capture percentage: %2.1f%% (%d of %d trials)",percent_level,x$percent_in_ri,x$in_replication_interval_count,num_trials)) cat(sprintf("\n%d%% Confidence interval capture percentage: %2.1f%% (%d of %d trials)",percent_level,x$percent_in_ci,x$in_confidence_interval_count,num_trials)) table_out <- x$results_each_trial cat("\n\nIllustrative Trials:\n\n") print(table_out[1:5,],row.names=FALSE,digits = 2) cat("\n") cat("Note: n1 = original cell 1 size, n2 = original cell 2 size, d = original d-value,") cat("\nci.LL = lower-limit confidence interval, ci.UL = upper-limit confidence interval,") cat("\nrep.n1 = replication cell 1 size, rep.n2 = replication cell 2 size,") cat("\nri.LL = lower-limit replication interval, ri.UL = upper-limit replication interval,") cat("\nrep.d = replication d-value.\n") } get_orig_rep_d <- function(original_N1,original_N2,replication_N1,replication_N2,pop_d,prob_level,bias_correction) { original_cell1 <- rnorm(original_N1) + pop_d original_cell2 <- rnorm(original_N2) original_d <- get_d_value(original_cell1,original_cell2,bias_correction) replication_interval_metrics <- ri.d(d=original_d,n1=original_N1,n2=original_N2,rep.n1=replication_N1,rep.n2=replication_N2,prob.level=prob_level) confidence_interval <- c(replication_interval_metrics$lower_confidence_interval,replication_interval_metrics$upper_confidence_interval) replication_interval <- c(replication_interval_metrics$lower_replication_interval,replication_interval_metrics$upper_replication_interval) replication_cell1 <- rnorm(replication_N1) + pop_d replication_cell2 <- rnorm(replication_N2) replication_d <- get_d_value(replication_cell1,replication_cell2,bias_correction) cur_result_in_replication_interval <- FALSE is.in.ci <- is_value_in_interval(replication_d, confidence_interval) is.in.ri <- is_value_in_interval(replication_d, replication_interval) ci_LL <- confidence_interval[1] ci_UL <- confidence_interval[2] ri_LL <- replication_interval[1] ri_UL <- replication_interval[2] output <- c(original_N1,original_N2,original_d,ci_LL,ci_UL,replication_N1,replication_N2,replication_d,ri_LL,ri_UL,is.in.ci,is.in.ri) output <- t(output) return(output) }
fillGroupMembersWithSexRatio <- function(candidates, groupMembers, grpNum, kin, ped, minAge, numGp, sexRatio) { potentialSires <- getPotentialSires(candidates, minAge, ped) availableMales <- makeAvailable(potentialSires, numGp) availableFemales <- makeAvailable(setdiff(candidates, potentialSires), numGp) while (TRUE) { if (isEmpty(grpNum)) { break } i <- sample(grpNum, 1)[[1]] ratio <- calculateSexRatio(groupMembers[[i]], ped) if (is.na(ratio)) ratio <- 0 if (ratio < sexRatio) { id <- sample(availableFemales[[i]], 1) availableFemales <- removeSelectedAnimalFromAvailableAnimals(availableFemales, id, numGp) } else { if (abs(sexRatio - calculateSexRatio(groupMembers[[i]], ped, additionalMales = 1)) < abs(sexRatio - calculateSexRatio(groupMembers[[i]], ped, additionalFemales = 1))) { id <- sample(availableMales[[i]], 1) availableMales <- removeSelectedAnimalFromAvailableAnimals(availableMales, id, numGp) } else { id <- sample(availableFemales[[i]], 1) availableFemales <- removeSelectedAnimalFromAvailableAnimals(availableFemales, id, numGp) } } groupMembers[[i]] <- c(groupMembers[[i]], id) availableMales[[i]] <- setdiff(availableMales[[i]], kin[[id]]) availableFemales[[i]] <- setdiff(availableFemales[[i]], kin[[id]]) grpNum <- removeGroupIfNoAvailableAnimals(grpNum, availableMales) grpNum <- removeGroupIfNoAvailableAnimals(grpNum, availableFemales) } groupMembers }
rawEnrichmentAnalysis <- function(expr, signatures, genes, parallel.sz = 4, parallel.type = 'SOCK') { shared.genes <- intersect(rownames(expr), genes) print(paste("Num. of genes:", length(shared.genes))) expr <- expr[shared.genes, ] if (dim(expr)[1] < 5000) { print(paste("ERROR: not enough genes")) return(-1) } expr <- apply(expr, 2, rank) if(packageVersion("GSVA") >= "1.36.0") { scores <- GSVA::gsva(expr, signatures, method = "ssgsea", ssgsea.norm = FALSE,parallel.sz = parallel.sz) } else { scores <- GSVA::gsva(expr, signatures, method = "ssgsea", ssgsea.norm = FALSE,parallel.sz = parallel.sz,parallel.type = parallel.type) } scores = scores - apply(scores,1,min) cell_types <- unlist(strsplit(rownames(scores), "%")) cell_types <- cell_types[seq(1, length(cell_types), 3)] agg <- aggregate(scores ~ cell_types, FUN = mean) rownames(agg) <- agg[, 1] scores <- agg[, -1] scores } transformScores <- function(scores, fit.vals, scale=TRUE) { rows <- rownames(scores)[rownames(scores) %in% rownames(fit.vals)] tscores <- scores[rows, ] minX <- apply(tscores, 1, min) A <- rownames(tscores) tscores <- (as.matrix(tscores) - minX)/5000 tscores[tscores < 0] <- 0 if (scale==FALSE) { fit.vals[A,3] = 1 } tscores <- (tscores^fit.vals[A,2])/(fit.vals[A,3]*2) return(tscores) } spillOver <- function(transformedScores, K, alpha = 0.5) { K <- K * alpha diag(K) <- 1 rows <- rownames(transformedScores)[rownames(transformedScores) %in% rownames(K)] scores <- apply(transformedScores[rows, ], 2, function(x) pracma::lsqlincon(K[rows,rows], x, lb = 0)) scores[scores<0]=0 rownames(scores) <- rows return(scores) } microenvironmentScores <- function(adjustedScores) { ImmuneScore = apply(adjustedScores[c('B-cells','CD4+ T-cells','CD8+ T-cells','DC','Eosinophils','Macrophages','Monocytes','Mast cells','Neutrophils','NK cells'),],2,sum)/1.5 StromaScore = apply(adjustedScores[c('Adipocytes','Endothelial cells','Fibroblasts'),],2,sum)/2 MicroenvironmentScore = ImmuneScore+StromaScore adjustedScores = rbind(adjustedScores,ImmuneScore,StromaScore,MicroenvironmentScore) } xCellSignifcanceBetaDist = function(scores,beta_params=NULL,rnaseq=T) { if (is.null(beta_params)) { if (rnaseq==T) { beta_params = xCell.data$spill$beta_params } else { beta_params = xCell.data$spill.array$beta_params } } scores = scores[rownames(scores) %in% colnames(xCell.data$spill$beta_params[[1]]),] pvals = matrix(0,nrow(scores),ncol(scores)) rownames(pvals) = rownames(scores) eps = 1e-3 for (i in 1:nrow(scores)) { ct = rownames(scores)[i] beta_dist = c() for (bp in beta_params) { if (sum(bp[,i]==0)) { bd = matrix(eps,1,100000) } else { bd = stats::rbeta(100000,bp[1,ct],bp[2,ct]) bd = ((1+eps)*(bp[3,ct]))*bd } beta_dist = c(beta_dist,bd) } pvals[i,] = 1-mapply(scores[i,],FUN=function(x) mean(x>beta_dist)) } pvals } xCellSignifcanceRandomMatrix = function(scores,expr,spill,alpha=0.5,nperm=250) { shuff_expr = mapply(seq(1:nperm),FUN=function(x) sample(nrow(expr),nrow(expr))) rownames(shuff_expr) = sample(rownames(expr)) shuff_xcell = xCellAnalysis(shuff_expr,spill=spill,alpha=alpha) shuff_xcell = shuff_xcell[rownames(scores),] pvals = matrix(0,nrow(scores),ncol(scores)) beta_dist = matrix(0,nrow(scores),100000) eps = 1e-3 for (i in 1:nrow(scores)) { x = shuff_xcell[i,] if (stats::sd(x)<eps) { beta_dist[i,] = rep(eps,100000) } else { x = x+eps beta_params=MASS::fitdistr(x/((1+2*eps)*(max(x)))+eps,"beta",list(shape1=1,shape2=1),lower=eps) beta_dist[i,] = stats::rbeta(100000,beta_params$estimate[1],beta_params$estimate[2]) beta_dist[i,] = ((1+2*eps)*(max(x)))*beta_dist[i,] } pvals[i,] = 1-unlist(lapply(scores[i,],FUN=function(x) mean(x>beta_dist[i,]))) } rownames(pvals) = rownames(scores) colnames(pvals) = colnames(scores) rownames(shuff_xcell) = rownames(scores) rownames(beta_dist) = rownames(scores) list(pvals=pvals,shuff_xcell=shuff_xcell,shuff_expr=shuff_expr,beta_dist=beta_dist) } xCellAnalysis <- function(expr, signatures=NULL, genes=NULL, spill=NULL, rnaseq=TRUE, scale=TRUE, alpha = 0.5, parallel.sz = 4, parallel.type = 'SOCK', cell.types.use = NULL) { if (is.null(signatures)) signatures = xCell.data$signatures if (is.null(genes)) genes = xCell.data$genes if (is.null(spill)) { if (rnaseq==TRUE) { spill = xCell.data$spill } else { spill = xCell.data$spill.array } } if (!is.null(cell.types.use)) { A = intersect(cell.types.use,rownames(spill$K)) if (length(A)<length(cell.types.use)) { return ('ERROR - not all cell types listed are available') } } scores <- rawEnrichmentAnalysis(expr,signatures,genes, parallel.sz = parallel.sz, parallel.type = 'SOCK') scores.transformed <- transformScores(scores, spill$fv, scale) if (is.null(cell.types.use)) { scores.adjusted <- spillOver(scores.transformed, spill$K, alpha ) scores.adjusted = microenvironmentScores(scores.adjusted) } else { scores.adjusted <- spillOver(scores.transformed[cell.types.use,], spill$K, alpha ) } return(scores.adjusted) } CoreAlg <- function(X, y){ svn_itor <- 3 res <- function(i){ if(i==1){nus <- 0.25} if(i==2){nus <- 0.5} if(i==3){nus <- 0.75} model<-svm(X,y,type="nu-regression",kernel="linear",nu=nus,scale=F) model } if(Sys.info()['sysname'] == 'Windows') out <- mclapply(1:svn_itor, res, mc.cores=1) else out <- mclapply(1:svn_itor, res, mc.cores=svn_itor) nusvm <- rep(0,svn_itor) corrv <- rep(0,svn_itor) t <- 1 while(t <= svn_itor) { mySupportVectors <- out[[t]]$SV myCoefficients <- out[[t]]$coefs weights = t(myCoefficients) %*% mySupportVectors weights[which(weights<0)]<-0 w<-weights/sum(weights) u <- sweep(X,MARGIN=2,w,'*') k <- apply(u, 1, sum) nusvm[t] <- sqrt((mean((k - y)^2))) corrv[t] <- cor(k, y) t <- t + 1 } rmses <- nusvm mn <- which.min(rmses) print(mn) model <- out[[mn]] q <- t(model$coefs) %*% model$SV q[which(q<0)]<-0 w <- (q/sum(q)) mix_rmse <- rmses[mn] mix_r <- corrv[mn] newList <- list("w" = w, "mix_rmse" = mix_rmse, "mix_r" = mix_r) } doPerm <- function(perm, X, Y){ itor <- 1 Ylist <- as.list(data.matrix(Y)) dist <- matrix() while(itor <= perm){ yr <- as.numeric(Ylist[sample(length(Ylist),dim(X)[1])]) yr <- (yr - mean(yr)) / sd(yr) result <- CoreAlg(X, yr) mix_r <- result$mix_r if(itor == 1) {dist <- mix_r} else {dist <- rbind(dist, mix_r)} itor <- itor + 1 } newList <- list("dist" = dist) } CIBERSORT <- function(sig_matrix, mixture_file, perm=0, QN=TRUE){ X <- read.table(sig_matrix,header=T,sep="\t",row.names=1,check.names=F) Y <- mixture_file X <- data.matrix(X) Y <- data.matrix(Y) X <- X[order(rownames(X)),,drop=F] Y <- Y[order(rownames(Y)),,drop=F] P <- perm if(max(Y) < 50) {Y <- 2^Y} if(QN == TRUE){ tmpc <- colnames(Y) tmpr <- rownames(Y) Y <- normalize.quantiles(Y) colnames(Y) <- tmpc rownames(Y) <- tmpr } Xgns <- row.names(X) Ygns <- row.names(Y) YintX <- Ygns %in% Xgns Y <- Y[YintX,,drop=F] XintY <- Xgns %in% row.names(Y) X <- X[XintY,,drop=F] X <- (X - mean(X)) / sd(as.vector(X)) Y_norm <- apply(Y, 2, function(mc) (mc - mean(mc)) / sd(mc) ) if(P > 0) {nulldist <- sort(doPerm(P, X, Y)$dist)} header <- c('Mixture',colnames(X),"P-value","Correlation","RMSE") output <- matrix() itor <- 1 mix <- dim(Y)[2] pval <- 9999 while(itor <= mix){ y <- Y[,itor] y <- (y - mean(y)) / sd(y) result <- CoreAlg(X, y) w <- result$w mix_r <- result$mix_r mix_rmse <- result$mix_rmse if(P > 0) {pval <- 1 - (which.min(abs(nulldist - mix_r)) / length(nulldist))} out <- c(colnames(Y)[itor],w,pval,mix_r,mix_rmse) if(itor == 1) {output <- out} else {output <- rbind(output, out)} itor <- itor + 1 } obj <- rbind(header,output) obj <- obj[,-1, drop=F] obj <- obj[-1,, drop=F] obj <- matrix(as.numeric(unlist(obj)),nrow=nrow(obj)) rownames(obj) <- colnames(Y) colnames(obj) <- c(colnames(X),"P-value","Correlation","RMSE") list(proportions=obj, mix = Y_norm, signatures = X) } exp2cell <- function(exp,method="xCell",perm=100,QN=TRUE,kcdf=c("Gaussian", "Poisson", "none")) { if (method=="xCell") { cellmatrix<-xCellAnalysis(exp) cellmatrix<-cellmatrix[1:64,] }else if (method=="ssGSEA") { cellmatrix<-gsva(as.matrix(exp),immunelist,method='ssgsea',kcdf='Gaussian',abs.ranking=TRUE) }else if (method=="CIBERSORT"){ lm22path<-system.file('extdata', 'LM22.txt', package = 'SMDIC') cellmatrix_pre<-CIBERSORT(lm22path,exp,perm = perm,QN = QN) cellmatrix_T<-cellmatrix_pre$proportions cellmatrix_T<-cellmatrix_T[,1:22] cellmatrix<-t(cellmatrix_T) } else { print("method must be one of xCell, ssGSEA, or CIBERSORT") } return(cellmatrix) }
parseQuery <- function(dataset, site, forcingDataset = NULL, forcingCondition = NULL, species = NULL, variables = NULL, period = NULL, aggregated = NULL, FUN = mean, automaticPanels = F, quality = NULL , decreasing = TRUE, collapse = TRUE){ conn <- try(makeConnection(), T) if ('try-error' %in% class(conn)){ stop("Invalid database connection. Please use setDB() to connect to a valid DB", call. = FALSE) }else{ RSQLite::dbDisconnect(conn) } message("Parsing query") dataset <- gsub(" ", "", dataset) dataset <- gsub("-", "", dataset) dataset <- gsub("+", "", dataset) if(!is.null(forcingDataset)){ forcingDataset <- gsub(" ", "", forcingDataset) forcingDataset <- gsub("-", "", forcingDataset) forcingDataset <- gsub("+", "", forcingDataset) if(!is.null(forcingCondition)){ forcingCondition <- gsub(" ", "", forcingCondition) forcingCondition <- gsub("-", "", forcingCondition) forcingCondition <- gsub("+", "", forcingCondition) dataset <- paste(dataset, forcingDataset, forcingCondition, sep = "_") }else{ dataset <- paste(dataset, forcingDataset, sep = "_") } }else if(!is.null(forcingCondition)){ forcingCondition <- gsub(" ", "", forcingCondition) forcingCondition <- gsub("-", "", forcingCondition) forcingCondition <- gsub("+", "", forcingCondition) dataset <- paste(dataset, forcingCondition, sep = "_") } if(dataset == "TREE" || dataset == "STAND"){ if(!is.null(species)){ species <- gsub("-", "", species) species <- gsub("+", "", species) species <- getTreeSpecies(species) dataset <- paste(dataset, species, sep = "_") } } message("Checking dataset") tmp <- list( dataset = dataset, site = site, variables = variables, period = period, aggregated = aggregated, FUN = FUN, automaticPanels = automaticPanels, quality = quality , decreasing = decreasing, collapse = collapse, aggregated = aggregated, FUN = FUN, item = NULL,variablesChecked = NULL, query = NULL, data = NULL, dropVariables = TRUE ) if(is.null(tmp[["variables"]])) tmp[["dropVariables"]] <- F dataset <- getDatasets(tmp[["dataset"]]) if (!dataset) stop("Invalid dataset. Please use browseData() to see the available datasets", call. = FALSE) if (tmp[["dataset"]] == "VERSION" || grepl("METADATA", tmp[["dataset"]]) || tmp[["dataset"]] == "POLICY" || tmp[["dataset"]] == "SOURCE" ){ stop(paste("Use browseData for accesing", tmp[["dataset"]]), call. = FALSE) } message("Checking site") if(is.null(tmp[["site"]])){ if (tmp[["dataset"]] == "SITES" || tmp[["dataset"]] == "SITEDESCRIPTION" ){ tmp[["item"]] <- paste( "SELECT * FROM", tmp[["dataset"]] ,sep = " ") }else{ stop("Please provide a site name", call. = FALSE) } }else if(!is.null(tmp[["site"]])){ tmp[["site"]] <- getsites(tmp[["site"]]) if (!tmp[["site"]])stop("Invalid site. Please use browseData to see the available sites", call. = FALSE) message("Checking variables") tmp[["variablesChecked"]] <- parseVariables(tmp[["dataset"]], tmp[["variables"]]) message("Checking the availability") available <- checkAvailable(dataset = tmp[["dataset"]], site=tmp[["site"]]) if (!available){ if (tmp$dataset == "CO2_ISIMIP"){ if(!is.null(site)){ site <- getsites(site) if (!site){stop("Invalid site. Please use browseData to see the available sites", call. = FALSE)} } tmp[["item"]] <- paste( "SELECT * FROM", tmp[["dataset"]],sep = " ") tmp[["site"]]<- 99 }else if (!tmp[["dataset"]] == "SITES" ){ stop("The dataset is not available for the site. Please use browseData() to check data availability", call. = FALSE) } } tmp[["item"]] <- paste( "SELECT * FROM", paste(tmp[["dataset"]], tmp[["site"]], sep = "_") ,sep = " ") } return(tmp) }
github_api_org_repos = function(owner) { arg_is_chr_scalar(owner) ghclass_api_v3_req( endpoint = "GET /orgs/:owner/repos", owner = owner ) } org_repos = function(org, filter = NULL, exclude = FALSE, full_repo = TRUE) { arg_is_chr_scalar(org) arg_is_chr_scalar(filter, allow_null = TRUE) res = purrr::safely( function() { type = user_type(org) if (is.na(type)) { cli_stop("Organization {.val {org}} does not exist on GitHub.") } else if (type == "Organization") { github_api_org_repos(org) } else if (type == "User") { cli_stop("{.val {org}} is a user not an organization. Use {.fun user_repos} instead.") } else { cli_stop("{.val {org}} has unknown type {.val {type}}.") } } )() status_msg( res, fail = "Failed to retrieve repos for org {.val {org}}." ) if (failed(res) | empty_result(res)) return(invisible(NULL)) if (full_repo) { res = purrr::map_chr(result(res), "full_name") } else { res = purrr::map_chr(result(res), "name") } filter_results(res, filter, exclude) }
`aa1a2.a0.b` <- function (a,a1,a2, tol=.Machine$double.eps^.5) ifelse(!(le(0,a1) && le(a1,a) && le(a,1) && le(0,a2) && le(a2,1)), NA, ifelse(eq(a,a1)||eq(a2,1), a, ifelse(eq(a,1)||eq(a2,0), NA, ifelse(ge(a2.c.b(a2),a), a, {ctmp <- a0a1a2.a.b(1,a1,a2); ifelse(lt(ctmp,a), NA, ifelse(eq(ctmp,a), 1, uniroot(function(x) a0a1a2.a.b(x,a1,a2) - a, lower=a1, upper=1, tol=tol)$root))}))))
add_html_tags <- function(x, context = NULL, codeClass = rock::opts$get(codeClass), codeValueClass = rock::opts$get(codeValueClass), idClass = rock::opts$get(idClass), sectionClass = rock::opts$get(sectionClass), uidClass = rock::opts$get(uidClass), contextClass = rock::opts$get(contextClass), utteranceClass = rock::opts$get(utteranceClass)) { codeRegexes <- rock::opts$get(codeRegexes); codeValueRegexes <- rock::opts$get(codeValueRegexes); idRegexes <- rock::opts$get(idRegexes); sectionRegexes <- rock::opts$get(sectionRegexes); uidRegex <- rock::opts$get(uidRegex); inductiveCodingHierarchyMarker <- rock::opts$get(inductiveCodingHierarchyMarker); res <- x; res <- gsub("<", "&lt;", res, fixed=TRUE); res <- gsub(">", "&gt;", res, fixed=TRUE); codeRegexes <- gsub("<", "&lt;", codeRegexes, fixed=TRUE); codeRegexes <- gsub(">", "&gt;", codeRegexes, fixed=TRUE); for (currentCodeRegexName in names(codeRegexes)) { currentCodeRegex <- codeRegexes[currentCodeRegexName]; codeContentMatches <- grepl(currentCodeRegex, res); if (any(codeContentMatches)) { codeContent <- ifelse(codeContentMatches, gsub(paste0(".*", currentCodeRegex, ".*"), "\\1", res), ""); splitCodeContent <- unlist(lapply(strsplit(codeContent, inductiveCodingHierarchyMarker), paste0, collapse=" ")); splitCodeContent <- paste0('<span class="', codeClass, ' ', currentCodeRegexName, '">'); res <- gsub(paste0("(", currentCodeRegex, ")"), paste0(splitCodeContent, '\\1</span>'), res); } } codeValueRegexes <- gsub("<", "&lt;", codeValueRegexes, fixed=TRUE); codeValueRegexes <- gsub(">", "&gt;", codeValueRegexes, fixed=TRUE); for (currentCodeValueRegexName in names(codeValueRegexes)) { currentCodeValueRegex <- codeValueRegexes[currentCodeValueRegexName]; codeValueContentMatches <- grepl(currentCodeValueRegex, res); if (any(codeValueContentMatches)) { codeValueContent <- ifelse(codeValueContentMatches, gsub(paste0(".*", currentCodeValueRegex, ".*"), "\\1", res), ""); splitCodeValueContent <- unlist(lapply(strsplit(codeValueContent, inductiveCodingHierarchyMarker), paste0, collapse=" ")); splitCodeValueContent <- paste0('<span class="', codeValueClass, ' ', currentCodeValueRegexName, '">'); res <- gsub(paste0("(", currentCodeValueRegex, ")"), paste0(splitCodeValueContent, '\\1</span>'), res); } } sectionRegexes <- gsub("<", "&lt;", sectionRegexes, fixed=TRUE); sectionRegexes <- gsub(">", "&gt;", sectionRegexes, fixed=TRUE); for (currentBreakRegexName in names(sectionRegexes)) { currentBreakRegex <- sectionRegexes[currentBreakRegexName]; codeContentMatches <- grepl(currentBreakRegex, res); if (any(codeContentMatches)) { codeContent <- ifelse(codeContentMatches, gsub(paste0(".*", currentBreakRegex, ".*"), "\\1", res), ""); splitCodeContent <- unlist(lapply(strsplit(codeContent, inductiveCodingHierarchyMarker), paste0, collapse=" ")); splitCodeContent <- paste0('<span class="', sectionClass, ' ', currentBreakRegexName, '">'); res <- gsub(paste0("(", currentBreakRegex, ")"), paste0(splitCodeContent, '\\1</span>'), res); } } idRegexes <- gsub("<", "&lt;", idRegexes, fixed=TRUE); idRegexes <- gsub(">", "&gt;", idRegexes, fixed=TRUE); for (currentIdRegexName in names(idRegexes)) { currentIdRegex <- idRegexes[currentIdRegexName]; codeContentMatches <- grepl(currentIdRegex, res); if (any(codeContentMatches)) { res <- gsub(paste0("(", currentIdRegex, ")"), paste0('<span class="', idClass, ' ', currentIdRegexName, '">\\1</span>'), res); } } res <- gsub(paste0("(", uidRegex, ")"), paste0('<span class="', uidClass, '">\\1</span>'), res); if (!is.null(context)) { res[context] <- paste0('<span class="', contextClass, '">', res[context], '</span>'); } res <- paste0('<div class="', utteranceClass, '">', res, '</div>\n'); return(res); }
ScanCBSSimPlot <- function(cases, controls, CBSObj, trueTau, SpikeMat, filename, mainTitle, CIObj=NULL, length.out=10000, localWindow=0.5*10^5, localSeparatePlot=TRUE, smoothF=0.025, xlabScale=10^6, width=12, height=18) { p = length(cases)/(length(cases)+length(controls)) maxCase = max(cases) maxControl = max(controls) maxVal = max(c(maxCase, maxControl)) cpts = matrix(CBSObj$statHat[,c(1,2,5)], nrow=nrow(CBSObj$statHat)) tauHatFull = CBSObj$tauHat tauHat = tauHatFull[c(-1, -length(tauHatFull))] relCN = CBSObj$relCN relCN[relCN <= 0] = 1 ylims = c(min(c(0, cpts[,3])), max(c(0, cpts[,3]))) if(!is.null(CIObj)) { CIBounds = CIObj$CIRes[3:4,] CIL = as.numeric(CIObj$CIRes[5,]) CIU = as.numeric(CIObj$CIRes[6,]) relCNCIL = CIL/(1-CIL+10^(-5))/(p/(1-p)) relCNCIU = CIU/(1-CIU+10^(-5))/(p/(1-p)) } grid.fix = seq(1, maxVal, length.out=length.out) gridSize = grid.fix[2]-grid.fix[1] casesCountInGrid = getCountsInWindow(cases, 0, maxVal, gridSize, sorted=FALSE) casesCountInGridSmooth = lowess(x=grid.fix, y=casesCountInGrid, smoothF) casesCountInGridSmooth$y[casesCountInGridSmooth$y<0] = 0 controlCountInGrid = getCountsInWindow(controls, 0, maxVal, gridSize, sorted=FALSE) controlCountInGridSmooth = lowess(x=grid.fix, y=controlCountInGrid, smoothF) controlCountInGridSmooth$y[controlCountInGridSmooth$y<0] = 0 PInGrid = casesCountInGrid/(casesCountInGrid+controlCountInGrid) PInGrid[is.nan(PInGrid)]=0 PInGridSmooth = lowess(x=grid.fix, y=PInGrid, smoothF) relCNInGrid = PInGrid/(1-PInGrid)/(p/(1-p)) relCNInGrid[is.nan(relCNInGrid) | !is.finite(relCNInGrid) | relCNInGrid <= 0]=1 relCNInGrid = log(relCNInGrid, base=2) relCNlims = c(min(min(log(relCN, base=2)), min(relCNInGrid)), max(max(log(relCN, base=2)), max(relCNInGrid))) tauHatInGrid = grid.fix[tauHat %/% gridSize]/xlabScale trueTauInGrid = grid.fix[trueTau %/% gridSize]/xlabScale gridYLims = c(min(log(casesCountInGrid+1) - log(controlCountInGrid+1)), log(max(casesCountInGrid, controlCountInGrid))) plotTauHatInd = c(min(min(cases),min(controls)), tauHat, maxVal) %/% gridSize plotTauHatInd = sapply(plotTauHatInd, function(x) {max(x,1)}) plotTauHatInd = sapply(plotTauHatInd, function(x) {min(x,max(grid.fix))}) plotTauHat = grid.fix[plotTauHatInd]/xlabScale pdf(paste(filename, ".pdf", sep=""), width=width, height=height) par(mfrow=c(3,1)) plot(x=grid.fix/xlabScale, y=rep(0, length(grid.fix)), type="n", ylim=ylims, main=mainTitle, ylab="Statistic", xlab=paste("Base Pairs", xlabScale)) for(i in 1:nrow(cpts)) { plotX = c(grid.fix[max(floor(cpts[i,1]/gridSize), 1)]/xlabScale, grid.fix[ceiling(cpts[i,2]/gridSize)]/xlabScale) lines(x=plotX, y=rep(cpts[i,3],2), lwd=3) } for(i in 1:nrow(SpikeMat)) { plotX = c(grid.fix[max(floor(SpikeMat[i,3]/gridSize), 1)]/xlabScale, grid.fix[ceiling(SpikeMat[i,4]/gridSize)]/xlabScale) lines(x=plotX, y=rep(SpikeMat[i,5],2), lwd=2, col=2) } abline(v=tauHatInGrid, lty=3, col=4) abline(v=trueTauInGrid, lty=3, col=2) matplot(x=grid.fix/xlabScale, y=log(cbind(casesCountInGridSmooth$y, controlCountInGridSmooth$y)+1), type="l", lty=c(1,1), col=c(2,1), main="Log Read Intensity", ylab="Read Intensity", xlab=paste("Base Pairs", xlabScale), ylim=gridYLims) points(x=grid.fix/xlabScale, y=log(casesCountInGrid+1) - log(controlCountInGrid+1), pch=".", col=1) abline(v=tauHatInGrid, lty=3, col=4) abline(v=trueTauInGrid, lty=3, col=2) legend("topright", c("case","control", "case-control"), pch=".", lty=c(1,1,0), col=c(2,1,1)) plot(x=grid.fix/xlabScale, y=relCNInGrid, type="p", pch=20, ylim=relCNlims, main="Log Relative Copy Number", ylab="Log2 Relative CN", xlab=paste("Base Pairs", xlabScale)) lines(x=plotTauHat, y=log(c(relCN, relCN[length(relCN)]), base=2), type="s", col="red") abline(v=tauHatInGrid, lty=3, col=4) abline(v=trueTauInGrid, lty=3, col=2) dev.off() nTauHat = length(tauHat) if(localSeparatePlot == FALSE) { nPlotCol = as.integer(sqrt(nTauHat/(height/width))) nPlotRow = ceiling(nTauHat/nPlotCol) pdf(paste(filename, "_localDetails.pdf", sep=""), width=width*2, height=height*2) par(mfrow=c(nPlotRow, nPlotCol)) } for(i in 1:nTauHat) { if(localSeparatePlot) { pdf(paste(filename, "_local_", i, "_", tauHat[i], ".pdf", sep=""), width=width, height=height/2) } lBound = max(0, tauHat[i]-localWindow) rBound = min(maxVal, tauHat[i]+localWindow) localCas = cases[cases >= lBound & cases < rBound] localCon = controls[controls >= lBound & controls < rBound] grid.fix = seq(lBound, rBound, length.out=length.out/100) gridSize = grid.fix[2]-grid.fix[1] grid.mpt = grid.fix + gridSize/2 CasCountInGrid = getCountsInWindow(localCas, lBound, rBound, gridSize, sorted=FALSE) ConCountInGrid = getCountsInWindow(localCon, lBound, rBound, gridSize, sorted=FALSE) pInGrid = CasCountInGrid/(CasCountInGrid+ConCountInGrid) pInGrid[is.nan(pInGrid)] = 0.0 combLocalCasCon = CombineCaseControlC(localCas, localCon) plotReadRangeInd = combLocalCasCon$combL >= lBound & combLocalCasCon$combL <= rBound plotReadX = combLocalCasCon$combL[plotReadRangeInd] plotReadY = combLocalCasCon$combZ[plotReadRangeInd] > 0 plotPX = cbind(tauHatFull[-length(tauHatFull)], tauHatFull[-1]) pSegment = relCN*p/(1-p)/(1+relCN*p/(1-p)) plotPY = cbind(pSegment, pSegment) if(!is.null(CIObj)) { localCIBounds = (CIBounds[1,] <= rBound) & (CIBounds[2,] >= lBound) localYLims = c(min(CIL[localCIBounds]), max(CIU[localCIBounds])) * c(0.8, 1.2) if(is.nan(localYLims[1]) || !is.finite(localYLims[1])) localYLims[1] = 0 if(is.nan(localYLims[2]) || !is.finite(localYLims[2])) localYLims[2] = 1 } else { localYLims = c(0,1) } plot(x=1, y=1, type="n", xlim=c(lBound, rBound), xaxt="n", ylim=localYLims, main=paste("Reads and Inference around", tauHat[i]), xlab="Base Pair Locations", ylab="p(case read)", cex.main=0.75, cex.lab=0.75, cex.axis=0.75) axis(side=1, at=plotReadX[!plotReadY], labels=FALSE, tcl=0.3) axis(side=3, at=plotReadX[plotReadY], labels=FALSE, tcl=0.3) axis(side=1, xaxp=c(lBound, rBound, 10), tcl=-0.5, cex.axis=0.75) if(is.null(CIObj)) { if(length(grid.mpt) != length(pInGrid)) { grid.mpt = grid.mpt[1:max(length(grid.mpt), length(pInGrid))] pInGrid = pInGrid[1:max(length(grid.mpt), length(pInGrid))] } points(x=grid.mpt, y=pInGrid, pch=20, col=3) } else { for(j in 1:ncol(CIBounds)) { lines(x=CIBounds[,j], y=rep(CIL[j],2), col=" lines(x=CIBounds[,j], y=rep(CIU[j],2), col=" } } for(j in 1:nrow(plotPY)) { lines(x=plotPX[j,], y=plotPY[j,], lwd=3) } abline(v=tauHat, lty=3, lwd=2, col=" abline(v=trueTau, lty=2, lwd=2, col=" if(localSeparatePlot) { dev.off() } } if(localSeparatePlot == FALSE) dev.off() }