code
stringlengths
1
13.8M
diri.contour <- function(a, n = 100, x = NULL, cont.line = FALSE) { nam <- c("X1", "X2", "X3") x1 <- seq(0.001, 0.999, length = n) sqrt3 <- sqrt(3) x2 <- seq(0.001, sqrt3/2 - 1e-03, length = n) mat <- matrix(nrow = n, ncol = n) be <- prod( gamma(a)) / gamma(sum(a) ) for ( i in 1:c(n/2) ) { for (j in 1:n) { if ( x2[j] < sqrt3 * x1[i] ) { w3 <- 2 * x2[j] / sqrt3 w2 <- x1[i] - x2[j]/sqrt3 w1 <- 1 - w2 - w3 w <- c(w1, w2, w3) can <- prod( w^(a - 1) ) / be if (abs(can) < Inf) mat[i, j] <- can } } } for (i in c(n/2 + 1):n) { for (j in 1:n) { if ( x2[j] < sqrt3 - sqrt3 * x1[i] ) { w3 <- 2 * x2[j] / sqrt3 w2 <- x1[i] - x2[j]/sqrt3 w1 <- 1 - w2 - w3 w <- c(w1, w2, w3) can <- prod( w^(a - 1) ) / be if (abs(can) < Inf) mat[i, j] <- can } } } b1 <- c(0.5, 0, 1, 0.5) b2 <- c(sqrt3/2, 0, 0, sqrt3/2) b <- cbind(b1, b2) b_x1 <- seq(from = 0, to = 1, length.out = 11) b_y1 <- rep(0, times = 11) b_x2 <- seq(from = 0.5, to = 0, length.out = 11) b_y2 <- seq(from = sqrt3/2, to = 0, length.out = 11) b_x4 <- seq(from = 1, to = 0.5, length.out = 11) b_y4 <- seq(from = 0, to = sqrt3/2, length.out = 11) par(fg = NA) filled.contour(x1, x2, mat, nlevels = 200, color.palette = colorRampPalette( c( "blue", "cyan", "yellow", "red") ), plot.axes = { text(b_x1, b_y1, c("","0.1", "0.2", "0.3", "0.4", "0.5", "0.6", "0.7", "0.8", "0.9", ""), adj = c(0.5, 1.5), col = "black", cex = 1); text(b_x2, b_y2, c("","0.1", "0.2", "0.3", "0.4", "0.5", "0.6", "0.7", "0.8", "0.9", ""), adj = c(1.25, -0.15), col = "black", cex = 1); text(b_x4, b_y4, c("","0.1", "0.2", "0.3", "0.4", "0.5", "0.6", "0.7", "0.8", "0.9", ""), adj = c(-0.25, -0.15), col = "black", cex = 1); points(b[, 1], b[, 2], type = "l", lwd = 4.5, col = "black"); if ( !is.null(x) ) { proj <- matrix(c(0, 1, 0.5, 0, 0, sqrt3/2), ncol = 2) xa <- x %*% proj points(xa[, 1], xa[, 2], col = "black") nam2 <- colnames(x) if ( !is.null(nam2) ) nam2 <- nam }; text( b[1, 1], b[1, 2] + 0.07, nam[3], cex = 1, col = "black", font = 2 ); text( b[2:3, 1], b[2:3, 2] - 0.07, nam[1:2], cex = 1, col = "black", font = 2 ); if ( cont.line ) { contour(x1, x2, mat, pt = "s", col="black", nlevels = 10, labcex = 0.8, lwd = 1.5, add = TRUE) } }, key.axes = {axis(4, col = "black")}, xlab = "", ylab = "", xlim = c(-0.1, 1.1), ylim = c(-0.1, 1.1) ) }
NULL NULL archive <- function(...) { .Deprecated('gl_archive', package = 'gitlabr', old = 'archive') gl_archive(...) } assign_issue <- function(...) { .Deprecated('gl_assign_issue', package = 'gitlabr', old = 'assign_issue') gl_assign_issue(...) } close_issue <- function(...) { .Deprecated('gl_close_issue', package = 'gitlabr', old = 'close_issue') gl_close_issue(...) } comment_commit <- function(...) { .Deprecated('gl_comment_commit', package = 'gitlabr', old = 'comment_commit') gl_comment_commit(...) } comment_issue <- function(...) { .Deprecated('gl_comment_issue', package = 'gitlabr', old = 'comment_issue') gl_comment_issue(...) } create_branch <- function(...) { .Deprecated('gl_create_branch', package = 'gitlabr', old = 'create_branch') gl_create_branch(...) } create_merge_request <- function(...) { .Deprecated('gl_create_merge_request', package = 'gitlabr', old = 'create_merge_request') gl_create_merge_request(...) } delete_branch <- function(...) { .Deprecated('gl_delete_branch', package = 'gitlabr', old = 'delete_branch') gl_delete_branch(...) } edit_commit_comment <- function(...) { .Deprecated('gl_edit_commit_comment', package = 'gitlabr', old = 'edit_commit_comment') gl_edit_commit_comment(...) } edit_issue <- function(...) { .Deprecated('gl_edit_issue', package = 'gitlabr', old = 'edit_issue') gl_edit_issue(...) } edit_issue_comment <- function(...) { .Deprecated('gl_edit_issue_comment', package = 'gitlabr', old = 'edit_issue_comment') gl_edit_issue_comment(...) } file_exists <- function(...) { .Deprecated('gl_file_exists', package = 'gitlabr', old = 'file_exists') gl_file_exists(...) } get_comments <- function(...) { .Deprecated('gl_get_comments', package = 'gitlabr', old = 'get_comments') gl_get_comments(...) } get_commit_comments <- function(...) { .Deprecated('gl_get_commit_comments', package = 'gitlabr', old = 'get_commit_comments') gl_get_commit_comments(...) } get_commits <- function(...) { .Deprecated('gl_get_commits', package = 'gitlabr', old = 'get_commits') gl_get_commits(...) } get_diff <- function(...) { .Deprecated('gl_get_diff', package = 'gitlabr', old = 'get_diff') gl_get_diff(...) } get_file <- function(...) { .Deprecated('gl_get_file', package = 'gitlabr', old = 'get_file') gl_get_file(...) } get_issue <- function(...) { .Deprecated('gl_get_issue', package = 'gitlabr', old = 'get_issue') gl_get_issue(...) } get_issue_comments <- function(...) { .Deprecated('gl_get_issue_comments', package = 'gitlabr', old = 'get_issue_comments') gl_get_issue_comments(...) } get_issues <- function(...) { .Deprecated('gl_list_issues', package = 'gitlabr', old = 'get_issues') gl_list_issues(...) } get_project_id <- function(...) { .Deprecated('gl_get_project_id', package = 'gitlabr', old = 'get_project_id') gl_get_project_id(...) } gitlab_connection <- function(...) { .Deprecated('gl_connection', package = 'gitlabr', old = 'gitlab_connection') gl_connection(...) } list_branches <- function(...) { .Deprecated('gl_list_branches', package = 'gitlabr', old = 'list_branches') gl_list_branches(...) } list_files <- function(...) { .Deprecated('gl_list_files', package = 'gitlabr', old = 'list_files') gl_list_files(...) } list_projects <- function(...) { .Deprecated('gl_list_projects', package = 'gitlabr', old = 'list_projects') gl_list_projects(...) } new_issue <- function(...) { .Deprecated('gl_new_issue', package = 'gitlabr', old = 'new_issue') gl_new_issue(...) } project_connection <- function(...) { .Deprecated('gl_project_connection', package = 'gitlabr', old = 'project_connection') gl_project_connection(...) } proj_req <- function(...) { .Deprecated('gl_proj_req', package = 'gitlabr', old = 'proj_req') gl_proj_req(...) } push_file <- function(...) { .Deprecated('gl_push_file', package = 'gitlabr', old = 'push_file') gl_push_file(...) } reopen_issue <- function(...) { .Deprecated('gl_reopen_issue', package = 'gitlabr', old = 'reopen_issue') gl_reopen_issue(...) } repository <- function(...) { .Deprecated('gl_repository', package = 'gitlabr', old = 'repository') gl_repository(...) } to_issue_id <- function(...) { .Deprecated('gl_to_issue_id', package = 'gitlabr', old = 'to_issue_id') gl_to_issue_id(...) } unassign_issue <- function(...) { .Deprecated('gl_unassign_issue', package = 'gitlabr', old = 'unassign_issue') gl_unassign_issue(...) }
library(kader) context("Functions related to the rank transformation J") test_that("J1 (with its cc-default) is 0 at 1/2", { expect_equal(J1(u = 1/2), 0) }) test_that("J2 (with its cc-default) is 0 at 1/2", { expect_equal(J2(u = 1/2), 0) }) test_that("cuberoot(x)^3 is equal to x for various values of x", { expect_equal(cuberoot(x = 0)^3, 0) expect_equal(cuberoot(x = 1)^3, 1) expect_equal(cuberoot(x = -1)^3, -1) }) test_that("J_admissible(u) = sqrt(3)*(2u - 1) for cc = sqrt(3)", { uu <- seq(0, 1, length = 51) expect_equal(J_admissible(u = uu, cc = mean(sqrt(c(5/3, 3)))), J1(u = uu, cc = mean(sqrt(c(5/3, 3))))) expect_equal(J_admissible(u = uu, cc = sqrt(3)), sqrt(3)*(2*uu - 1)) expect_equal(J_admissible(u = uu, cc = mean(sqrt(c(3, 5)))), J2(u = uu, cc = mean(sqrt(c(3, 5))))) }) test_that("p_c for various values of c", { expect_identical(pc(cc = 0), 0) expect_identical(pc(cc = 1), -0.2) expect_identical(pc(cc = -1), -0.2) expect_equal(pc(cc = 1/3), -7/195) expect_equal(pc(cc = 4/3), 16/165) expect_true(is.infinite(pc(cc = sqrt(3)))) }) test_that("q_c(u) for various values of c", { uu <- seq(0, 1, length = 51) expect_true(all(qc(u = uu, cc = 0) == 0)) expect_equal(qc(u = uu, cc = 1), (1 - 2*uu)/5) expect_identical(qc(u = 0, cc = 1), 0.2) expect_identical(qc(u = 0.5, cc = 1), 0) expect_identical(qc(u = 1, cc = 1), -0.2) expect_identical(qc(u = uu, cc = sqrt(3)), Inf*(1 - 2*uu)) })
mutate_by_time <- function(.data, .date_var, .by = "day", ..., .type = c("floor", "ceiling", "round")) { if (rlang::quo_is_missing(rlang::enquo(.date_var))) { message(".date_var is missing. Using: ", tk_get_timeseries_variables(.data)[1]) } UseMethod("mutate_by_time") } mutate_by_time.default <- function(.data, .date_var, .by = "day", ..., .type = c("floor", "ceiling", "round")) { stop("Object is not of class `data.frame`.", call. = FALSE) } mutate_by_time.data.frame <- function(.data, .date_var, .by = "day", ..., .type = c("floor", "ceiling", "round")) { data_groups_expr <- rlang::syms(dplyr::group_vars(.data)) date_var_expr <- rlang::enquo(.date_var) if (rlang::quo_is_missing(date_var_expr)) { date_var_text <- tk_get_timeseries_variables(.data)[1] date_var_expr <- rlang::sym(date_var_text) } date_var_text <- rlang::quo_name(date_var_expr) if (!date_var_text %in% names(.data)) { rlang::abort(stringr::str_glue("Attempting to use .date_var = {date_var_text}. Column does not exist in .data. Please specify a date or date-time column.")) } fun_type <- tolower(.type[[1]]) if (fun_type == "floor") { .f <- lubridate::floor_date } else if (fun_type == "ceiling") { .f <- lubridate::ceiling_date } else { .f <- lubridate::round_date } ret_tbl <- .data %>% dplyr::mutate(.date_var_collapsed := .f(!! date_var_expr, unit = .by)) %>% dplyr::group_by_at(.vars = dplyr::vars(!!! data_groups_expr, .date_var_collapsed)) %>% dplyr::arrange(!! date_var_expr, .by_group = TRUE) %>% dplyr::mutate(...) %>% dplyr::ungroup() %>% dplyr::select(-.date_var_collapsed) %>% dplyr::group_by_at(.vars = dplyr::vars(!!! data_groups_expr)) return(ret_tbl) }
jagsTwoBaselinesFullc <- function (muCb1 = NULL, sigmaCb1 = NULL, muNb1 = NULL, sigmaNb1 = NULL, muCb2 = NULL, sigmaCb2 = NULL, muNb2 = NULL, sigmaNb2 = NULL, alpha = NULL, sigmaCc = NULL, TP = NULL, sigmaNc = NULL, muDeltaN = NULL, sigmaDeltaN = NULL, muDeltaC = NULL, sigmaDeltaC = NULL, lambda = NULL) { modelString <- " model { for (i in 1:length(dCb1)){ dCb1[i] ~ dnorm(muCb1, tauCb1) } for (i in 1:length(dNb1)){ dNb1[i] ~ dnorm(muNb1, tauNb1) } for (i in 1:length(dNb2)){ dNb2[i] ~ dnorm(muNb2, tauNb2) } for (i in 1:length(dCb2)){ dCb2[i] ~ dnorm(muCb2, tauCb2) } for (j in 1:length(deltaN)){ deltaN[j] ~ dnorm(muDeltaN, tauDeltaN) } for (j in 1:length(deltaC)){ deltaC[j] ~ dnorm(muDeltaC, tauDeltaC) } for (i in 1:length(dCc)) { dCc[i] ~ dnorm(muCb2 - (muDeltaC * TP) - ((alpha * (muCb2 - muCb1)), tauCc) } for (i in 1:length(dNc)){ dNc[i] ~ dnorm(muDeltaN * (TP - lambda) + muNb1*alpha + muNb2 * (1 - alpha), tauNc) }" if (is.null(muCb1)) { newString <- "muCb1 ~ dnorm(0, 0.0001)" } else { newString <- paste("muCb1 ~", toString(muCb1)) } modelString <- paste (modelString, newString, sep = "\n") if (is.null(sigmaCb1)) { newString <- "tauCb1 <- pow(sigmaCb1, -2) sigmaCb1 ~ dunif(0, 100)" } else { newString <- "tauCb1 <- pow(sigmaCb1, -2)" newString2 <- paste("sigmaCb1 ~", toString(sigmaCb1)) newString <- paste(newString, newString2, sep = "\n") } modelString <- paste (modelString, newString, sep = "\n") if (is.null(muNb1)) { newString <- "muNb1 ~ dnorm(0, 0.0001)" } else { newString <- paste("muNb1 ~", toString(muNb1)) } modelString <- paste (modelString, newString, sep = "\n") if (is.null(sigmaNb1)) { newString <- "tauNb1 <- pow(sigmaNb1, -2) sigmaNb1 ~ dunif(0, 100)" } else { newString <- "tauNb1 <- pow(sigmaNb1, -2)" newString2 <- paste("sigmaNb1 ~", toString(sigmaNb1)) newString <- paste(newString, newString2, sep = "\n") } modelString <- paste (modelString, newString, sep = "\n") if (is.null(muCb2)) { newString <- "muCb2 ~ dnorm(0, 0.0001)" } else { newString <- paste("muCb2 ~", toString(muCb2)) } modelString <- paste (modelString, newString, sep = "\n") if (is.null(sigmaCb2)) { newString <- "tauCb2 <- pow(sigmaCb2, -2) sigmaCb2 ~ dunif(0, 100)" } else { newString <- "tauCb2 <- pow(sigmaCb2, -2)" newString2 <- paste("sigmaCb2 ~", toString(sigmaCb2)) newString <- paste(newString, newString2, sep = "\n") } modelString <- paste (modelString, newString, sep = "\n") if (is.null(muNb2)) { newString <- "muNb2 ~ dnorm(0, 0.0001)" } else { newString <- paste("muNb2 ~", toString(muNb2)) } modelString <- paste (modelString, newString, sep = "\n") if (is.null(sigmaNb2)) { newString <- "tauNb2 <- pow(sigmaNb2, -2) sigmaNb2 ~ dunif(0, 100)" } else { newString <- "tauNb2 <- pow(sigmaNb2, -2)" newString2 <- paste("sigmaNb2 ~", toString(sigmaNb2)) newString <- paste(newString, newString2, sep = "\n") } modelString <- paste (modelString, newString, sep = "\n") if (is.null(alpha)) { newString <- "alpha ~ dbeta(1,1)" } else { newString <- paste("alpha ~", toString(alpha)) } modelString <- paste (modelString, newString, sep = "\n") if (is.null(sigmaCc)) { newString <- "tauCc <- pow(sigmaCc, -2) sigmaCc ~ dunif(0, 100)" } else { newString <- "tauCc <- pow(sigmaCc, -2)" newString2 <- paste("sigmaCc ~", toString(sigmaCc)) newString <- paste(newString, newString2, sep = "\n") } modelString <- paste (modelString, newString, sep = "\n") if (is.null(TP)) { newString <- "TP ~ dunif(lambda, 10)" } else { newString <- paste("TP ~", toString(TP)) } modelString <- paste (modelString, newString, sep = "\n") if (is.null(sigmaNc)) { newString <- "tauNc <- pow(sigmaNc, -2) sigmaNc ~ dunif(0, 100)" } else { newString <- "tauNc <- pow(sigmaNc, -2)" newString2 <- paste("sigmaNc ~", toString(sigmaNc)) newString <- paste(newString, newString2, sep = "\n") } modelString <- paste (modelString, newString, sep = "\n") if (is.null(muDeltaN)) { newString <- "muDeltaN ~ dnorm(0, 0.0001)" } else { newString <- paste("muDeltaN ~", toString(muDeltaN)) } modelString <- paste (modelString, newString, sep = "\n") if (is.null(sigmaDeltaN)) { newString <- "tauDeltaN <- pow(sigmaDeltaN, -2) sigmaDeltaN ~ dunif(0, 100)" } else { newString <- "tauDeltaN <- pow(sigmaDeltaN, -2)" newString2 <- paste("sigmaDeltaN ~", toString(sigmaDeltaN)) newString <- paste(newString, newString2, sep = "\n") } modelString <- paste (modelString, newString, sep = "\n") if (is.null(muDeltaC)) { newString <- "muDeltaC ~ dnorm(0, 0.0001)" } else { newString <- paste("muDeltaC ~", toString(muDeltaC)) } modelString <- paste (modelString, newString, sep = "\n") if (is.null(sigmaDeltaC)) { newString <- "tauDeltaC <- pow(sigmaDeltaC, -2) sigmaDeltaC ~ dunif(0, 100)" } else { newString <- "tauDeltaC <- pow(sigmaDeltaC, -2)" newString2 <- paste("sigmaDeltaC ~", toString(sigmaDeltaC)) newString <- paste(newString, newString2, sep = "\n") } modelString <- paste (modelString, newString, sep = "\n") if (is.null(lambda)) { newString <- "lambda <- 2" } else { newString <- paste("lambda <- ", toString(lambda)) } modelString <- paste (modelString, newString, sep = "\n") newString <- "}" modelString <- paste (modelString, newString, sep = "\n") return(modelString) }
write_output_file_mk.test <- function(outfile, force_v4, vars, vars_data, var_name, grid_vars, grid_vars_data, S.value, Z.value, standard_name, cmsaf_info, calendar, var_atts, global_att) { nc_out <- nc_create(outfile, vars, force_v4 = force_v4) ncvar_put(nc_out, vars[[1]], vars_data$result$target.S) ncvar_put(nc_out, vars[[2]], vars_data$time_bounds) ncvar_put(nc_out, vars[[3]], vars_data$result$target.Z) for (i in seq_along(grid_vars)) { nc_out <- ncvar_add(nc_out, grid_vars[[i]]) ncvar_put(nc_out, grid_vars[[i]], grid_vars_data[[i]]) } ncatt_put(nc_out, S.value$name, ATTR_NAMES$STANDARD_NAME, S.value$standard_name, prec = PRECISIONS_ATT$TEXT) ncatt_put(nc_out, S.value$name, ATTR_NAMES$LONG_NAME, S.value$long_name, prec = PRECISIONS_ATT$TEXT) ncatt_put(nc_out, S.value$name, "description", S.value$info, prec = PRECISIONS_ATT$TEXT) ncatt_put(nc_out, Z.value$name, ATTR_NAMES$STANDARD_NAME, Z.value$standard_name, prec = PRECISIONS_ATT$TEXT) ncatt_put(nc_out, Z.value$name, ATTR_NAMES$LONG_NAME, Z.value$long_name, prec = PRECISIONS_ATT$TEXT) ncatt_put(nc_out, Z.value$name, "description", Z.value$info, prec = PRECISIONS_ATT$TEXT) ncatt_put(nc_out, TIME_NAMES$DEFAULT, ATTR_NAMES$STANDARD_NAME, TIME_NAMES$DEFAULT, prec = PRECISIONS_ATT$TEXT) ncatt_put(nc_out, TIME_NAMES$DEFAULT, ATTR_NAMES$CALENDAR, calendar, prec = PRECISIONS_ATT$TEXT) ncatt_put(nc_out, TIME_NAMES$DEFAULT, ATTR_NAMES$BOUNDS, TIME_BOUNDS_NAMES$DEFAULT, prec = PRECISIONS_ATT$TEXT) if (LON_NAMES$DEFAULT %in% c(names(nc_out$dim), names(nc_out$var))) { ncatt_put(nc_out, LON_NAMES$DEFAULT, ATTR_NAMES$STANDARD_NAME, LON_NAMES$LONG_DEFAULT, prec = PRECISIONS_ATT$TEXT) ncatt_put(nc_out, LON_NAMES$DEFAULT, ATTR_NAMES$LONG_NAME, LON_NAMES$LONG_DEFAULT, prec = PRECISIONS_ATT$TEXT) if (LON_NAMES$DEFAULT %in% names(nc_out$dim)) { ncatt_put(nc_out, LON_NAMES$DEFAULT, ATTR_NAMES$AXIS, AXIS$X, prec = PRECISIONS_ATT$TEXT) } } if (LAT_NAMES$DEFAULT %in% c(names(nc_out$dim), names(nc_out$var))) { ncatt_put(nc_out, LAT_NAMES$DEFAULT, ATTR_NAMES$STANDARD_NAME, LAT_NAMES$LONG_DEFAULT, prec = PRECISIONS_ATT$TEXT) ncatt_put(nc_out, LAT_NAMES$DEFAULT, ATTR_NAMES$LONG_NAME, LAT_NAMES$LONG_DEFAULT, prec = PRECISIONS_ATT$TEXT) if (LAT_NAMES$DEFAULT %in% names(nc_out$dim)) { ncatt_put(nc_out, LAT_NAMES$DEFAULT, ATTR_NAMES$AXIS, AXIS$Y, prec = PRECISIONS_ATT$TEXT) } } if (X_NAMES$DEFAULT %in% names(nc_out$dim)) { ncatt_put(nc_out, X_NAMES$DEFAULT, ATTR_NAMES$STANDARD_NAME, X_NAMES$DEFAULT, prec = PRECISIONS_ATT$TEXT) ncatt_put(nc_out, X_NAMES$DEFAULT, ATTR_NAMES$LONG_NAME, X_NAMES$LONG_DEFAULT, prec = PRECISIONS_ATT$TEXT) ncatt_put(nc_out, X_NAMES$DEFAULT, ATTR_NAMES$AXIS, AXIS$X, prec = PRECISIONS_ATT$TEXT) } if (Y_NAMES$DEFAULT %in% names(nc_out$dim)) { ncatt_put(nc_out, Y_NAMES$DEFAULT, ATTR_NAMES$STANDARD_NAME, Y_NAMES$DEFAULT, prec = PRECISIONS_ATT$TEXT) ncatt_put(nc_out, Y_NAMES$DEFAULT, ATTR_NAMES$LONG_NAME, Y_NAMES$LONG_DEFAULT, prec = PRECISIONS_ATT$TEXT) ncatt_put(nc_out, Y_NAMES$DEFAULT, ATTR_NAMES$AXIS, AXIS$Y, prec = PRECISIONS_ATT$TEXT) } ncatt_put(nc_out, 0, ATTR_NAMES$INFO, INFO_STRING, prec = PRECISIONS_ATT$TEXT) for (i in seq_along(global_att)) { ncatt_put(nc_out, 0, names(global_att)[i], as.character(global_att[i][[1]]), prec = PRECISIONS_ATT$TEXT) } nc_close(nc_out) }
"clev_pts"
isNonZeroNumberOrNanOrInfVectorOrNull <- function(argument, default = NULL, stopIfNot = FALSE, n = NA, message = NULL, argumentName = NULL) { checkarg(argument, "N", default = default, stopIfNot = stopIfNot, nullAllowed = TRUE, n = NA, zeroAllowed = FALSE, negativeAllowed = TRUE, positiveAllowed = TRUE, nonIntegerAllowed = TRUE, naAllowed = FALSE, nanAllowed = TRUE, infAllowed = TRUE, message = message, argumentName = argumentName) }
fts.spectral.density = function(X, Y=X, freq =(-1000:1000/1000)*pi,q = ceiling((dim(X$coefs)[2])^{0.33}), weights = "Bartlett"){ fdom = spectral.density(X=t(X$coefs),Y=t(Y$coefs),freq=freq,q=q,weights=weights) fts.freqdom(fdom,basisX=X$basis,basisY=Y$basis) }
list2matrix <- function(x, diag=FALSE) { if (!is.list(x)) stop("\"x\" has to be a list.") if (!identical(0, var(sapply(x, function(x){dim(x)[[1]]})))) stop("Dimensions of matrices in \"x\" have to be the same in order to stack them together.") if (is.null(dimnames(x[[1]]))) { oldNames <- paste("x", 1:dim(x[[1]])[[1]], sep = "") } else { oldNames <- dimnames(x[[1]])[[1]] } if (diag) { psNames <- vech(outer(oldNames, oldNames, paste, sep = "_")) out <- lapply(x, vech) } else { psNames <- vechs(outer(oldNames, oldNames, paste, sep = "_")) out <- lapply(x, vechs) } out <- do.call(rbind, out) dimnames(out) <- list(names(x), psNames) out }
EstCRMitem <- function(data, max.item, min.item, max.EMCycle=500, converge=.01, type="Shojima",BFGS=TRUE) { n=ncol(data) N=nrow(data) if(is.data.frame(data)==FALSE) stop("The input response data is not a data frame. Please use as.data.frame() and convert your response data to a data frame object before the analysis") if(length(max.item)!=length(min.item)) stop("The length of max.item vector is not equal to the length of min.item vector. Please check your inputs") if(dim(data)[2]!=length(max.item)) stop("The number of columns in the data is not equal to the length of max.item vector") if(dim(data)[2]!=length(min.item)) stop("The number of columns in the data is not equal to the length of min.item vector") for(i in 1:n) { if(max(na.omit(data[,i]))> max.item[i]) stop("The column ",i," has values higher than the maximum available score in the user specified max.item vector. Please check and clean your data.") } for(i in 1:n) { if(min(na.omit(data[,i]))< min.item[i]) stop("The column ",i," has values smaller than the minimum available score in the user specified min.item vector. Please check and clean your data.") } if(is.numeric(data[,i])!=TRUE) stop("The column vectors are not numeric. Please check your data") desc <- as.data.frame(matrix(nrow=n,ncol=4)) colnames(desc) <- c("Mean","SD","Min","Max") for(i in 1:n) { desc[i,1]=mean(data[,i],na.rm=TRUE) desc[i,2]=sd(data[,i],na.rm=TRUE) desc[i,3]=min(data[,i],na.rm=TRUE) desc[i,4]=max(data[,i],na.rm=TRUE) } for(i in 1:n){ data[,i]= data[,i]-min.item[i] } max.item <- max.item-min.item min.item <- rep(0,n) for(i in 1:n) { if(length(which(data[,i]==max.item[i]))!=0) { data[which(data[,i]==max.item[i]),i]=max.item[i]-.01 } if(length(which(data[,i]==0))!=0) { data[which(data[,i]==0),i]=.01 } } data.original <- data for(i in 1:n){data[,i]= log(data[,i]/(max.item[i]-data[,i]))} desc$Mean.of.z <- NA desc$SD.of.z <- NA for(i in 1:n) { desc[i,5]=mean(data[,i],na.rm=TRUE) desc[i,6]=sd(data[,i],na.rm=TRUE) } rownames(desc) <- colnames(data.original) if(type=="Shojima") { loglikelihood <- function(data,ipar,mu,sigma) { first.term <- N*sum(log(ipar[,1])+log(ipar[,3])) sec.term <- sum(rowSums(t(matrix(ipar[,1]^2,nrow=n,ncol=N))* (((t(matrix(ipar[,2],nrow=n,ncol=N))+(data*t(matrix(ipar[,3],nrow=n,ncol=N)))-matrix(rep(mu,n),nrow=N,ncol=n))^2)+ sigma),na.rm=TRUE),na.rm=TRUE)/2 first.term-sec.term - ((N*n/2)*log(2*pi)) } estEM <- function(data,ipar) { sigma = 1/(sum(ipar[,1]^2)+1) mu <-sigma*rowSums(t(matrix(ipar[,1]^2,ncol=N,nrow=n))*(t(matrix(ipar[,3],ncol=N,nrow=n))*data+t(matrix(ipar[,2],ncol=N,nrow=n))),na.rm=TRUE) mumean <- mean(mu,na.rm=TRUE) muvar <- var(mu,na.rm=TRUE) zijmeanlist <- as.vector(colMeans(data,na.rm=TRUE)) zijvarlist <- as.vector(diag(cov(data,use="pairwise.complete.obs"))) zijmucovlist <- as.vector(cov(data,mu,use="pairwise.complete.obs")) gamma <- (muvar+sigma)/zijmucovlist beta <- mumean-gamma*zijmeanlist alpha <- 1/sqrt(gamma^2*zijvarlist-gamma*zijmucovlist) ipar <- cbind(alpha,beta,gamma) colnames(ipar) <- c("a","b","alpha") rownames(ipar) <- colnames(data) list(ipar,loglikelihood(data,ipar,mu,sigma),mu,sigma) } l <- c() ipars <- vector("list",max.EMCycle) mus <- vector("list",max.EMCycle) sig <- c() d=1 iter <- 1 ipars[[iter]] <- t(matrix(nrow=3,ncol=n,c(1,0,1))) ipars[[iter]][,2] <- -colMeans(data,na.rm=TRUE) mus[[iter]] <- rep(0,N) sig[iter] <- 1 l[iter]=loglikelihood(data,ipars[[iter]],mus[[iter]],sig[iter]) while(abs(d)>converge && iter < max.EMCycle){ est1 <- estEM(data,ipars[[iter]]) ipars[[iter+1]] <- est1[[1]] l[iter+1] <- est1[[2]] d <- l[iter+1]-l[iter] mus[[iter+1]] <- est1[[3]] sig[iter+1] <- est1[[4]] iter <- iter+1 } itempar <- vector("list",length(l)) for(i in 1:length(l)) itempar[[i]]=ipars[[i]] for(i in 1:length(l)) itempar[[i]][,3]=1/ipars[[i]][,3] maximums <- vector("list",length(l)) start <- t(matrix(nrow=3,ncol=n,c(1,0,1))) start[,2] <- -colMeans(data,na.rm=TRUE) maximums[[1]]<- cbind(max(abs(itempar[[1]]-start)[,1]),max(abs(itempar[[1]]-start)[,2]),max(abs(itempar[[1]]-start)[,3])) for(i in 1:(length(l)-1)){ maximums[[i+1]]=cbind(max(abs(itempar[[i+1]]-itempar[[i]])[,1]), max(abs(itempar[[i+1]]-itempar[[i]])[,2]), max(abs(itempar[[i+1]]-itempar[[i]])[,3])) } name <- c() name[1] <- paste0("EMCycle",1," Starting Parameters") for(i in 2:length(l)) name[i]=paste("EMCycle",i," Largest Parameter Changes=", round(maximums[[i]],3)[1]," ",round(maximums[[i]],3)[2]," ",round(maximums[[i]],3)[3], sep="") names(itempar) <- name dif <- abs(l[length(l)]-l[length(l)-1]) ipar.est <- itempar[[iter]] se.matrix <- matrix(nrow=n,ncol=3) for(j in 1:n) { a = ipar.est[j,1] b = ipar.est[j,2] alp = ipar.est[j,3] s11 <- (N/a^2)+sum((data[,j]/alp+b-mus[[iter]])^2+sig[iter]) s12 <- 2*a + sum(data[,j]/alp+b-mus[[iter]]) s13 <- 2*a + sum(data[,j]*(data[,j]/alp+b-mus[[iter]])) s22 <- N*a^2 s23 <- a^2*sum(data[,j]) s33 <- N*alp + a^2*sum(data[,j]^2) Hess <- matrix(c(s11,s12,s13,s12,s22,s23,s13,s23,s33),3,3,byrow=FALSE) se.matrix[j,] = sqrt(diag(solve(Hess))) } } if(type=="Wang&Zeng") { loglikelihood2 <- function(ipar,data,mu,sigma) { ipar <- matrix(ipar,nrow=ncol(data),ncol=3) first.term <- N*sum(log(ipar[,1])+log(ipar[,3])) sec.term <- sum(rowSums(t(matrix(ipar[,1]^2,nrow=n,ncol=N))* (((t(matrix(ipar[,2],nrow=n,ncol=N))+(data*t(matrix(ipar[,3],nrow=n,ncol=N)))-matrix(rep(mu,n),nrow=N,ncol=n))^2)+sigma), na.rm=TRUE), na.rm=TRUE)/2 first.term-sec.term - ((N*n/2)*log(2*pi)) } grad <- function(ipar,data,mu,sigma) { ipar <- matrix(ipar,nrow=ncol(data),ncol=3) l0 = loglikelihood2(ipar,data,mu,sigma) g <- c() s=.00001 for(i in 1:length(ipar)) { hold = ipar[i] h = s*hold ipar[i]=hold+h lj = loglikelihood2(ipar,data,mu,sigma) g[i]=(lj-l0)/h ipar[i]=hold } return(g) } hess <- function(ipar,data,mu,sigma){ ipar <- matrix(ipar,nrow=ncol(data),ncol=3) s=.00001 g0 = grad(ipar,data,mu,sigma) fh = matrix(nrow=length(ipar),ncol=length(ipar)) for(i in 1:length(ipar)){ hold = ipar[i] h = s*hold ipar[i]=hold+h gj=grad(ipar,data,mu,sigma) fh[,i]=(gj-g0)/h ipar[i]=hold } hh = (fh + t(fh))/2 diag(hh)= diag(fh) return(hh) } ipars <- vector("list",max.EMCycle) gradient <- vector("list",max.EMCycle) H <- vector("list",max.EMCycle) IH <- vector("list",max.EMCycle) l <- c() maxg <- c() ipar <- t(matrix(nrow=3,ncol=n,c(1,0,1))) ipar[,2] <- -colMeans(data,na.rm=TRUE) ipars[[1]] <- as.vector(ipar) sigma1=1 mus = rep(0,N) iter=1 d=1 l[iter] <- loglikelihood2(ipars[[iter]],data,mu=mus,sigma=sigma1) gradient[[iter]] <- grad(ipars[[iter]],data,mu=mus,sigma=sigma1) H[[iter]] <- hess(ipars[[iter]],data,mu=mus,sigma=sigma1) IH[[iter]] <- solve(H[[iter]]) maxg[iter] <- abs(max(gradient[[iter]])) while(iter < max.EMCycle & abs(d)>converge) { pos = seq(1,3*n,by=n) sigma1 = 1/(sum(ipars[[iter]][1:(pos[2]-1)]^2)+1) mus <-sigma1*rowSums(t(matrix(ipars[[iter]][1:(pos[2]-1)]^2,ncol=N,nrow=n))*((data*t(matrix(ipars[[iter]][(pos[3]):(3*n)],ncol=N,nrow=n)))+t(matrix(ipars[[iter]][(pos[2]):(pos[3]-1)],ncol=N,nrow=n))),na.rm=TRUE) iter <- iter+1 step.size <- 1/(1:100) step=1 ipars[[iter]]= ipars[[iter-1]]-(solve(H[[iter-1]])%*%as.matrix(gradient[[iter-1]]*step.size[step])) a <- ipars[[iter]][1:(length(ipars[[iter]])/3)] b <- ipars[[iter]][((length(ipars[[iter]])/3)+1):(2*(length(ipars[[iter]])/3))] alpha <- ipars[[iter]][(2*(length(ipars[[iter]])/3)+1):length(ipars[[iter]])] while((sum(a<0)!=0 | sum(alpha<0)!=0) & step <100) { step=step+1 ipars[[iter]]= ipars[[iter-1]]-(solve(H[[iter-1]])%*%as.matrix(gradient[[iter-1]]*step.size[step])) a <- ipars[[iter]][1:(length(ipars[[iter]])/3)] b <- ipars[[iter]][((length(ipars[[iter]])/3)+1):(2*(length(ipars[[iter]])/3))] alpha <- ipars[[iter]][(2*(length(ipars[[iter]])/3)+1):length(ipars[[iter]])] } l[iter] <- loglikelihood2(ipars[[iter]],data,mu=mus,sigma=sigma1) d <- l[iter]-l[iter-1] gradient[[iter]] <- grad(ipars[[iter]],data,mu=mus,sigma=sigma1) H[[iter]] <- hess(ipars[[iter]],data,mu=mus,sigma=sigma1) maxg[iter] <- max(abs(gradient[[iter]])) gradiff = gradient[[iter]] - gradient[[iter-1]] pardiff = ipars[[iter]]- ipars[[iter-1]] if(BFGS==FALSE) { H[[iter]] <- hess(ipars[[iter]],data,mu=mus,sigma=sigma1) } else { Hstep = ((H[[iter-1]]%*%pardiff%*%t(pardiff)%*%H[[iter-1]])/as.numeric((t(pardiff)%*%H[[iter-1]]%*%pardiff))) - ((gradiff%*%t(gradiff))/as.numeric(t(pardiff)%*%gradiff)) H[[iter]]= H[[iter-1]] - Hstep } } itempar <- vector("list",length(l)) for(i in 1:length(l)) itempar[[i]]=matrix(ipars[[i]],nrow=ncol(data),ncol=3) for(i in 1:length(l)) itempar[[i]][,3]=1/itempar[[i]][,3] maximums <- vector("list",length(l)) start <- t(matrix(nrow=3,ncol=n,c(1,0,1))) start[,2] <- -colMeans(data,na.rm=TRUE) maximums[[1]]<- cbind(max(abs(itempar[[1]]-start)[,1]),max(abs(itempar[[1]]-start)[,2]),max(abs(itempar[[1]]-start)[,3])) for(i in 1:(length(l)-1)){ maximums[[i+1]]=cbind(max(abs(itempar[[i+1]]-itempar[[i]])[,1]), max(abs(itempar[[i+1]]-itempar[[i]])[,2]), max(abs(itempar[[i+1]]-itempar[[i]])[,3])) } name <- c() name[1] <- paste0("EMCycle",1," Starting Parameters") for(i in 2:length(l)) name[i]=paste("EMCycle",i," Largest Parameter Changes=", round(maximums[[i]],3)[1]," ",round(maximums[[i]],3)[2]," ",round(maximums[[i]],3)[3], sep="") names(itempar) <- name dif <- abs(l[length(l)]-l[length(l)-1]) ipar.est <- itempar[[iter]] hessian <- hess(ipars[[iter]],data,mu=mus,sigma=sigma1) se.matrix <- matrix(sqrt(diag(solve(-hessian))),nrow=n,ncol=3) } out <- list(data=data.original, descriptive=desc, param=itempar[[iter]], iterations=itempar, std.err = se.matrix, LL = l[length(l)], dif=dif) class(out) <- "CRM" return(out) }
validateControlParams <- function(varsel, family, id, control.params) { message ("Validating control.params...") if (family == "gaussian"){ stopifnot(control.params$a.sigsq > 0, control.params$b.sigsq > 0) } if (varsel == TRUE) { stopifnot(control.params$a.p0 > 0, control.params$b.p0 > 0, control.params$r.jump1 > 0, control.params$r.jump2 > 0, control.params$r.muprop > 0) } else { stopifnot(control.params$r.jump > 0) } if (!is.null(id)) { stopifnot(length(control.params$mu.lambda) == 2, length(control.params$sigma.lambda) == 2, length(control.params$lambda.jump) == 2) } for (i in 1:length(control.params$mu.lambda)) { stopifnot(control.params$mu.lambda > 0) } for (i in 1:length(control.params$sigma.lambda)) { stopifnot(control.params$sigma.lambda > 0) } for (i in 1:length(control.params$lambda.jump)) { stopifnot(control.params$lambda.jump > 0) } rprior=control.params$r.prior stopifnot(rprior == "gamma" | rprior == "unif" | rprior == "invunif") if (control.params$r.prior == "gamma") { stopifnot(control.params$mu.r > 0, control.params$sigma.r > 0) } else { stopifnot(control.params$r.a >= 0, control.params$r.b > control.params$r.a) } }
test_that("CompadreDB works correctly", { db1 <- db2 <- db3 <- db4 <- as_cdb(CompadreLegacy) expect_true(validCompadreDB(db1)) expect_output(print(db1)) db2@data <- db2@data[,5:10] expect_false(isTRUE(validCompadreDB(db2))) db3@data$mat <- 1L expect_false(isTRUE(validCompadreDB(db3))) db4@data$mat <- CompadreLegacy$mat expect_false(isTRUE(validCompadreDB(db4))) expect_equal(Compadre$SpeciesAuthor, Compadre@data$SpeciesAuthor) db1 <- Compadre db1$ones <- 1L expect_true(ncol(db1@data) == ncol(Compadre@data) + 1) expect_true(all(db1$ones == 1L)) expect_error(Compadre$mat <- 1L) expect_equal(Compadre[["SpeciesAuthor"]], Compadre@data$SpeciesAuthor) db1 <- Compadre db1[["ones"]] <- 1L expect_true(ncol(db1@data) == ncol(Compadre@data) + 1) expect_true(all(db1$ones == 1L)) expect_error(Compadre[["mat"]] <- 1L) expect_s3_class(CompadreData(Compadre), "data.frame") expect_type(VersionData(Compadre), "list") expect_length(Version(Compadre), 1) expect_length(DateCreated(Compadre), 1) expect_length(DateCreated(Compadre), 1) })
context("cloud_metrics") LASfile <- system.file("extdata", "example.laz", package = "rlas") las <- example ctg <- suppressMessages(readLAScatalog(c(LASfile, LASfile))) opt_progress(ctg) <- FALSE test_that("lasmetrics works with LAS", { x = cloud_metrics(las, mean(Z)) expect_equal(x, 975.9, tol = 0.1) x = cloud_metrics(las, ~mean(Z)) expect_equal(x, 975.9, tol = 0.1) x = cloud_metrics(las, ~list(mean(Z), max(Z))) expect_is(x, "list") expect_equal(length(x), 2L) expect_equal(x[[1]], 975.9, tol = 0.1) expect_equal(x[[2]], 978.3, tol = 0.1) }) test_that("cloud_metrics works with catalog_apply", { x = catalog_apply(ctg, cloud_metrics, func = ~mean(Z)) expect_is(x, "list") expect_equal(length(x), 2L) expect_equal(x[[1]], 975.9, tol = 0.1) expect_equal(x[[2]], 975.9, tol = 0.1) x = catalog_apply(ctg, cloud_metrics, func = ~list(mean(Z), max(Z))) expect_is(x, "list") expect_equal(length(x), 2L) expect_is(x[[1]], "list") expect_is(x[[2]], "list") })
ext_simes = function(x, cor_r){ eff.snpcount.fun <- function(ldmat) { ldmat <- as.matrix(ldmat) snpcount.local <- dim(ldmat)[1] if (snpcount.local <= 1) return(1) ev <- eigen(ldmat, only.values = TRUE)$values if (sum(ev < 0) != 0) { ev <- ev[ev > 0] ev <- ev/sum(ev) * snpcount.local } ev <- ev[ev > 1] snpcount.local - sum(ev - 1) } eff.snpcount.global <- eff.snpcount.fun(cor_r) n_values <- length(x) candid <- sapply(1:n_values, function(i){ (eff.snpcount.global * x[i])/eff.snpcount.fun(cor_r[1:i,1:i]) }) p_ext_simes <- min(candid) p_ext_simes }
gggeo_scale_old <- function(gg, dat = "periods", fill = NULL, color = "black", alpha = 1, height = .05, gap = 0, pos = "bottom", lab = TRUE, rot = 0, abbrv = TRUE, skip = c("Quaternary", "Holocene", "Late Pleistocene"), size = 5, neg = FALSE) { if(is(dat, "data.frame")){ }else{ dat <- getScaleData(dat) } if(neg){ dat$max_age <- -1 * (dat$max_age) dat$min_age <- -1 * (dat$min_age) } dat$mid_age <- (dat$max_age + dat$min_age)/2 if(!is.null(fill)){ dat$color <- rep(fill, length.out = nrow(dat)) }else if(!("color" %in% colnames(dat))){ dat$color <- rep(c("grey60","grey80"), length.out = nrow(dat)) } lims <- ggplot_build(gg)$layout$panel_params[[1]] if(abbrv & "abbr" %in% colnames(dat)){ dat$label <- dat$abbr }else{ dat$label <- dat$name } dat$label[dat$name %in% skip] <- "" gg <- gg + new_scale_fill() if(pos %in% c("bottom", "top", "b", "t")){ y.range <- max(lims$y.range) - min(lims$y.range) if(pos %in% c("top","t")){ ymax <- max(lims$y.range) - gap * y.range ymin <- max(lims$y.range) - (height + gap) * y.range }else{ ymin <- min(lims$y.range) + gap * y.range ymax <- min(lims$y.range) + (height + gap) * y.range } gg <- gg + geom_rect(data = dat, aes(xmin = min_age, xmax = max_age, fill = color), ymin = ymin, ymax = ymax, color = color, alpha = alpha, show.legend = FALSE, inherit.aes = FALSE) + scale_fill_manual(values = setNames(dat$color, dat$color)) if(lab){ gg <- gg + geom_text(data = dat, aes(x = mid_age, label = label), y = (ymin+ymax)/2, vjust = "middle", hjust = "middle", size = size, angle = rot, inherit.aes = FALSE) } }else if(pos %in% c("left", "right","l","r")){ x.range <- max(lims$x.range) - min(lims$x.range) if(pos %in% c("right","r")){ xmax <- max(lims$x.range) - gap * x.range xmin <- max(lims$x.range) - (height + gap) * x.range }else{ xmin <- min(lims$x.range) + gap * x.range xmax <- min(lims$x.range) + (height + gap) * x.range } gg <- gg + geom_rect(data = dat, aes(ymin = min_age, ymax = max_age, fill = color), xmin = xmin, xmax = xmax, color = color, alpha = alpha, show.legend = FALSE, inherit.aes = FALSE) + scale_fill_manual(values = setNames(dat$color, dat$color)) if(lab){ gg <- gg + geom_text(data = dat, aes(y = mid_age, label = label), x = (xmin+xmax)/2, vjust = "middle", hjust = "middle", size = size, angle = rot, inherit.aes = FALSE) } } gg }
NULL edecob <- function(data, smoother = "mov_med", resample_method = "all", min_change_dur = 84, conf_band_lvl = 0.95, bt_tot_rep = 100, time_unit = "day", ...) { data_raw <- data if (!("col_names" %in% names(match.call()))) { col_names <- colnames(data) colnames(data) <- c("source", "time_point", "value", "detec_lower", "detec_upper") } else { col_names <- list(...)$col_names } data <- data.frame("source" = unlist(data$source), "time_point" = unlist(data$time_point), "value" = unlist(data$value), "detec_lower" = unlist(data$detec_lower), "detec_upper" = unlist(data$detec_upper)) stopifnot( "Data not a data frame" = is.data.frame(data), "Data empty" = nrow(data) > 0, "Time points not numeric" = is.numeric(data[,2]), "Measurements not numeric" = is.numeric(data[,3]), "Upper bound of detection interval not numeric" = is.numeric(data[,5]), "Lower bound of detection interval not numeric" = is.numeric(data[,4]), "Upper bound of detection interval not all equal for at least one source" = { all(do.call(c, lapply(unique(data$source), function(x){ return(length(unique(data$detec_upper[data$source == x])) == 1) })))}, "Lower bound of detection interval not all equal for at least one source" = { all(do.call(c, lapply(unique(data$source), function(x){ return(length(unique(data$detec_lower[data$source == x])) == 1) })))}, "Upper bound of detection interval contains NA values" = sum(is.na(data[,5])) == 0, "Lower bound of detection interval contains NA values" = sum(is.na(data[,4])) == 0, "Lower bound of detection interval is larger than upper bound of detection interval for at least one source" = sum(data[,4] > data[,5]) == 0, "Lower bound of detection interval is equal to upper bound of detection interval for at least one source" = sum(data[,4] == data[,5]) == 0 ) if ("med_win" %in% names(list(...))) { stopifnot("Window of the moving median does not contain two numbers" = (length(list(...)$med_win) == 2), "Lower bound of the window for the moving median is not smaller than the upper bound" = (list(...)$med_win[1] <= list(...)$med_win[2])) } if (sum(is.na(data$value)) > 1) { warning("Removing rows where value is NA", immediate. = TRUE) data <- data[!is.na(data$value), ] } if (sum(is.na(data$time_point)) > 1) { warning("Removing rows where time point is NA", immediate. = TRUE) data <- data[!is.na(data$time_point), ] } no_width_given <- FALSE if ((smoother == "mov_med" || smoother == "mov_mean") && !("med_win" %in% names(list(...)))) { warning("Parameter med_win not given after choosing the moving median as smoother. Defaulting to c(-42,42)", immediate. = TRUE) med_win <- c(-42, 42) no_width_given <- TRUE } else if (smoother == "mov_med") { med_win <- list(...)$med_win } if (length(unique(data$source)) > 1) { patients_event_data <- lapply(split(data, factor(data$source)), edecob, smoother, resample_method, min_change_dur,conf_band_lvl, bt_tot_rep, time_unit, "col_names" = col_names, ...) patients_event_data$event_info <- as.data.frame(do.call(rbind, lapply(patients_event_data, function(x) { return(list(x$event$source, x$event$event_detected, x$event$event_onset, x$event$event_duration, x$event$event_stop)) }))) colnames(patients_event_data$event_info) <- c("source", "event_detected", "event_onset", "event_duration", "event_stop") patients_event_data$event_info$source <- unlist(patients_event_data$event_info$source) patients_event_data$event_info$event_detected <- unlist(patients_event_data$event_info$event_detected) patients_event_data$event_info$event_onset <- unlist(patients_event_data$event_info$event_onset) patients_event_data$event_info$event_duration <- unlist(patients_event_data$event_info$event_duration) patients_event_data$event_info$event_stop <- unlist(patients_event_data$event_info$event_stop) return(patients_event_data) } data <- data[order(data$time_point), ] if (smoother == "mov_med") { if ("min_pts_in_win" %in% names(match.call)) { smoother_pts <- mov_med(data, med_win, list(...)$min_pts_in_win) } smoother_pts <- mov_med(data, med_win) } else if (smoother == "mov_mean") { if ("min_pts_in_win" %in% names(match.call)) { smoother_pts <- mov_mean(data, med_win, list(...)$min_pts_in_win) } smoother_pts <- mov_mean(data, med_win) } else { warning("Smoother not recognized. Defaulting to moving median.") if (!("med_win" %in% names(match.call()))) { warning("Parameter med_win not given after choosing the moving median as smoother. Defaulting to c(-42,42).") med_win <- c(-42,42) no_width_given <- TRUE } smoother_pts <- mov_med(data, med_win) } smoother_resid <- smoother_resid(data, smoother_pts) if (no_width_given) { bt_smoother <- bt_smoother(data, smoother, resample_method, smoother_pts, smoother_resid, bt_tot_rep, "med_win" = med_win, ...) } else { bt_smoother <- bt_smoother(data, smoother, resample_method, smoother_pts, smoother_resid, bt_tot_rep, ...) } conf_band <- conf_band(bt_smoother, smoother_pts, bt_tot_rep, conf_band_lvl) event <- detect_event(conf_band, data$detec_lower[1], data$detec_upper[1], min_change_dur) colnames(data_raw) <- colnames(data) <- c("source", "time_point", "value", "detec_lower", "detec_upper") data_raw$event <- event$event_detected data_raw$event_onset <- event$event_onset data_raw$event_duration <- event$event_duration data_raw$event_stop <- event$event_stop output <- list( "source" = data$source[1], "event" = event, "conf_band" = conf_band, "smoother_pts" = smoother_pts, "data" = data_raw, "smoother" = smoother, "resample_method" = resample_method, "detec_lower" = data$detec_lower[1], "detec_upper" = data$detec_upper[1], "min_change_dur" = min_change_dur, "conf_band_lvl" = conf_band_lvl, "bt_tot_rep" = bt_tot_rep, "call" = match.call(), "col_names" = col_names, "time_unit" = time_unit ) class(output) <- "edecob" return(output) }
photon_ratio <- function(w.length, s.irrad, w.band.num = NULL, w.band.denom = NULL, unit.in = "energy", check.spectrum = TRUE, use.cached.mult = FALSE, use.hinges = getOption("photobiology.use.hinges", default = NULL) ) { return(waveband_ratio(w.length, s.irrad, w.band.num, w.band.denom, unit.out.num = "photon", unit.out.denom = "photon", unit.in = unit.in, check.spectrum = check.spectrum, use.cached.mult = use.cached.mult, use.hinges = use.hinges)) }
KerasLayer <- R6Class("KerasLayer", public = list( build = function(input_shape) { }, call = function(inputs, mask = NULL) { stop("Keras custom layers must implement the call function") }, compute_output_shape = function(input_shape) { input_shape }, add_loss = function(losses, inputs = NULL) { args <- list() args$losses <- losses args$inputs <- inputs do.call(private$wrapper$add_loss, args) }, add_weight = function(name, shape, dtype = NULL, initializer = NULL, regularizer = NULL, trainable = TRUE, constraint = NULL) { args <- list() args$name <- name args$shape <- shape args$dtype <- dtype args$initializer <- initializer args$regularizer <- regularizer args$trainable <- trainable args$constraint <- constraint do.call(private$wrapper$add_weight, args) }, .set_wrapper = function(wrapper) { private$wrapper <- wrapper }, python_layer = function() { private$wrapper } ), active = list( input = function(value) { if (missing(value)) return(private$wrapper$input) else private$wrapper$input <- value }, output = function(value) { if (missing(value)) return(private$wrapper$output) else private$wrapper$output <- value } ), private = list( wrapper = NULL ) ) compat_custom_KerasLayer_handler <- function(layer_class, args) { common_arg_names <- c("input_shape", "batch_input_shape", "batch_size", "dtype", "name", "trainable", "weights") py_wrapper_args <- args[common_arg_names] py_wrapper_args[sapply(py_wrapper_args, is.null)] <- NULL for (arg in names(py_wrapper_args)) args[[arg]] <- NULL r6_layer <- do.call(layer_class$new, args) python_path <- system.file("python", package = "keras") tools <- import_from_path("kerastools", path = python_path) py_wrapper_args$r_build <- r6_layer$build py_wrapper_args$r_call <- reticulate::py_func(r6_layer$call) py_wrapper_args$r_compute_output_shape <- r6_layer$compute_output_shape layer <- do.call(tools$layer$RLayer, py_wrapper_args) r6_layer$.set_wrapper(layer) list(layer, args) } py_formals <- function(py_obj) { inspect <- reticulate::import("inspect") sig <- if (inspect$isclass(py_obj)) { inspect$signature(py_obj$`__init__`) } else inspect$signature(py_obj) args <- pairlist() it <- sig$parameters$items()$`__iter__`() repeat { x <- reticulate::iter_next(it) if (is.null(x)) break name <- x[[1]] param <- x[[2]] if (param$kind == inspect$Parameter$VAR_KEYWORD || param$kind == inspect$Parameter$VAR_POSITIONAL) { args[["..."]] <- quote(expr = ) next } default <- param$default if (inherits(default, "python.builtin.object")) { if (default != inspect$Parameter$empty) warning(glue::glue( "Failed to convert default arg {param} for {name} in {py_obj_expr}" )) args[name] <- list(quote(expr = )) next } args[name] <- list(default) } args } create_layer_wrapper <- function(LayerClass, modifiers=NULL, convert=TRUE) { LayerClass_in <- LayerClass force(modifiers) wrapper <- function(object) { args <- capture_args(match.call(), modifiers, ignore = "object") create_layer(LayerClass, object, args) } formals(wrapper) <- local({ if (inherits(LayerClass, "python.builtin.type")) { f <- py_formals(LayerClass) } else { m <- LayerClass$public_methods init <- m$initialize %||% m$`__init__` f <- formals(init) } f$self <- NULL c(formals(wrapper), f) }) if (!inherits(LayerClass, "python.builtin.type")) delayedAssign("LayerClass", r_to_py(LayerClass_in, convert)) class(wrapper) <- c("keras_layer_wrapper", "function") attr(wrapper, "Layer") <- LayerClass_in wrapper } r_to_py.keras_layer_wrapper <- function(fn, convert = FALSE) { layer <- attr(fn, "Layer", TRUE) if (!inherits(layer, "python.builtin.type")) layer <- r_to_py(layer, convert) layer }
CPS1 <- select(CPS85, - workforce.years) head(CPS1, 2)
find_nondominated_points <- function(Y) { idxDominators <- vector(mode = "list", nrow(Y)) idxDominatees <- vector(mode = "list", nrow(Y)) for (i in 1:(nrow(Y) - 1)) { for (j in i:nrow(Y)) { if (all(Y[i, ] <= Y[j, ]) && any(Y[i, ] != Y[j, ])) { idxDominators[[j]] = c(idxDominators[[j]], i) idxDominatees[[i]] = c(idxDominatees[[i]], j) } else if (all(Y[j, ] <= Y[i, ]) && any(Y[j, ] != Y[i, ])) { idxDominators[[i]] = c(idxDominators[[i]], j) idxDominatees[[j]] = c(idxDominatees[[j]], i) } } } return(unlist(lapply(idxDominators,length)) == 0) }
player_outlook <- function(leagueId = ffl_id(), limit = 50) { if (is.null(leagueId)) leagueId <- "42654852" if (is.null(limit)) limit <- "" all_get <- httr::RETRY( verb = "GET", url = paste0( "https://fantasy.espn.com/apis/v3/games/ffl/seasons/2021/segments/0/leagues/", leagueId ), query = list(view = "kona_player_info"), httr::accept_json(), httr::add_headers( `X-Fantasy-Filter` = jsonlite::toJSON( x = list( players = list( limit = limit, sortPercOwned = list( sortAsc = FALSE, sortPriority = 1 ) ) ), auto_unbox = TRUE ) ) ) pl <- jsonlite::fromJSON(httr::content(all_get, as = "text")) pl <- pl$players y <- max(pl$player$stats[[1]]$seasonId) w <- length(pl$player$outlooks$outlooksByWeek) x <- pl$player$outlooks$outlooksByWeek x$`0` <- pl$player$seasonOutlook x <- x[c(length(x), seq(length(x) - 1))] outlooks <- as.vector(t(as.data.frame(x))) out <- tibble::tibble( seasonId = y, scoringPeriodId = rep(seq(0, w), length(outlooks) / (w + 1)), id = rep(pl$player$id, each = w + 1), firstName = rep(pl$player$firstName, each = w + 1), lastName = rep(pl$player$lastName, each = w + 1), outlook = outlooks ) out[!is.na(out$outlook), ] }
deltaT<-function(year,month) { year<-year+(month-0.5)/12 if(year<1986) deltaT<-45.45+1.067*(year-1975)-(year-1975)^2/260-(year-1975)^3/718 else if(year>=1986&year<2005) deltaT<-63.86+0.3345*(year-2000)-0.060374*(year-2000)^2+ 0.0017275*(year-2000)^3+0.000651814*(year-2000)^4+0.00002373599*(year-2000)^5 else if(year>=2005&year<2050) deltaT<-62.92+0.32217*(year-2000)+0.005589*(year-2000)^2 deltaT }
NMixChainComp.NMixMCMC <- function(x, relabel = TRUE, param = c("w", "mu", "var", "sd", "cor", "Sigma", "Q", "Li")) { if (x$prior$priorK != "fixed") stop("Not implemented when x$prior$priorK is not fixed.") param <- match.arg(param) if (relabel) order <- x$order else order <- matrix(rep(1:x$K[1], nrow(x$w)), ncol=x$K[1], byrow=TRUE) if (param == "w"){ if (relabel){ if (x$nx_w == 1){ w <- matrix(nrow = nrow(x$w), ncol = x$K[1]) for (k in 1:x$K[1]) w[,k] <- x$w[cbind(1:nrow(x$w), x$order[,k])] colnames(w) <- colnames(x$w) return(w) }else{ w <- matrix(nrow = nrow(x$w), ncol = x$K[1] * x$nx_w) for (ixw in 1:x$nx_w){ wixw <- x$w[, (ixw-1)*x$K[1] + (1:x$K[1])] for (k in 1:x$K[1]) w[, (ixw-1)*x$K[1] + k] <- wixw[cbind(1:nrow(wixw), x$order[,k])] rm(list = "wixw") } colnames(w) <- colnames(x$w) return(w) } }else{ return(x$w) } }else{ if (param == "mu"){ if (relabel){ order <- x$order }else{ order <- matrix(rep(1:x$K[1], nrow(x$mu)), ncol = x$K[1], byrow = TRUE) } mu <- matrix(nrow = nrow(x$mu), ncol = x$dim * x$K[1]) for (k in 1:x$K[1]){ for (j in 1:x$dim){ i <- (k-1)*x$dim + j mu[,i] <- x$scale$shift[j] + x$scale$scale[j] * x$mu[cbind(1:nrow(x$mu), (order[,k] - 1)*x$dim + j)] } } colnames(mu) <- paste("mu", rep(1:x$K[1], each = x$dim), ".", rep(1:x$dim, x$K[1]), sep = "") return(mu) }else{ names(x$scale$scale) <- paste("s", 1:x$dim, sep = "") scaleRepL <- x$scale$scale if (x$dim > 1){ for (j in 1:(x$dim - 1)) scaleRepL <- c(scaleRepL, x$scale$scale[-(1:j)]) } scaleRepR <- rep(x$scale$scale, x$dim:1) scaleRepL <- rep(scaleRepL, x$K[1]) scaleRepR <- rep(scaleRepR, x$K[1]) if (param %in% c("var", "sd", "cor", "Sigma")){ Samp <- matrix(rep(scaleRepL, nrow(x$Sigma)), nrow = nrow(x$Sigma), byrow = TRUE) * x$Sigma * matrix(rep(scaleRepR, nrow(x$Sigma)), nrow = nrow(x$Sigma), byrow = TRUE) }else{ switch (param, "Q" = {Samp <- matrix(rep(1 / scaleRepL, nrow(x$Q)), nrow = nrow(x$Q), byrow = TRUE) * x$Q * matrix(rep(1 / scaleRepR, nrow(x$Q)), nrow = nrow(x$Q), byrow = TRUE)}, "Li" = {Samp <- matrix(rep(1 / scaleRepL, nrow(x$Li)), nrow = nrow(x$Li), byrow = TRUE) * x$Li} ) } LTp <- (x$dim * (x$dim + 1)) / 2 if (relabel){ SampRel <- matrix(nrow = nrow(Samp), ncol = ncol(Samp)) colnames(SampRel) <- colnames(Samp) for (k in 1:x$K[1]){ for (j in 1:LTp){ i <- (k - 1) * LTp + j SampRel[, i] <- Samp[cbind(1:nrow(Samp), (x$order[,k] - 1) * LTp + j)] } } Samp <- SampRel rm(list = "SampRel") } Idiag <- matrix(0, nrow = x$dim, ncol = x$dim) Idiag[lower.tri(Idiag, diag = TRUE)] <- 1:LTp jdiag <- diag(Idiag) jdiagAll <- numeric() for (k in 1:x$K[1]) jdiagAll <- c(jdiagAll, (k - 1)*LTp + jdiag) if (param %in% c("Sigma", "Q", "Li")){ return(Samp) }else{ switch (param, "var" = { Samp <- Samp[, jdiagAll] colnames(Samp) <- paste("var", rep(1:x$K[1], each = x$dim), ".", rep(1:x$dim, x$K[1]), sep = "") return(Samp) }, "sd" = { Samp <- sqrt(Samp[, jdiagAll]) colnames(Samp) <- paste("sd", rep(1:x$K[1], each = x$dim), ".", rep(1:x$dim, x$K[1]), sep = "") return(Samp) }, "cor" = { if (x$dim == 1) return(numeric(0)) ncor <- (x$dim - 1) * x$dim / 2 cSamp <- matrix(nrow = nrow(Samp), ncol = ncor * x$K[1]) cName <- character(ncor * x$K[1]) cc <- 0 for (k in 1:x$K[1]){ for (j in 1:(x$dim - 1)){ for (i in (j+1):x$dim){ cc <- cc + 1 cName[cc] <- paste("cor", k, ".", i, ".", j, sep = "") cSamp[, cc] <- Samp[, paste("Sigma", k, ".", i, ".", j, sep = "")] / sqrt(Samp[, paste("Sigma", k, ".", i, ".", i, sep = "")] * Samp[, paste("Sigma", k, ".", j, ".", j, sep = "")]) } } } colnames(cSamp) <- cName return(cSamp) } ) } } } }
read_velocities <- function(file="Velocities.txt", ... ) { out <- read_particles(file=file,...) names(out) <- c("velocities", "gofs", "best.velocity", "best.gof") return(out) }
test_that("lint all files in a directory", { the_dir <- file.path("dummy_packages", "package", "vignettes") files <- list.files(the_dir) lints <- lint_dir(the_dir, parse_settings = FALSE) linted_files <- unique(names(lints)) expect_s3_class(lints, "lints") expect_setequal(linted_files, files) }) test_that("lint all relevant directories in a package", { the_pkg <- file.path("dummy_packages", "package") files <- setdiff( list.files(the_pkg, recursive = TRUE), c("package.Rproj", "DESCRIPTION", "NAMESPACE") ) read_settings(NULL) lints <- lint_package(the_pkg, parse_settings = FALSE) linted_files <- unique(names(lints)) linted_files <- gsub("\\", "/", linted_files, fixed = TRUE) expect_s3_class(lints, "lints") expect_setequal(linted_files, files) linters <- list(assignment_linter(), object_name_linter()) read_settings(NULL) lints <- lint_package(the_pkg, linters = linters, parse_settings = FALSE) linted_files <- unique(names(lints)) linted_files <- gsub("\\", "/", linted_files, fixed = TRUE) expect_s3_class(lints, "lints") expect_setequal(linted_files, files) }) test_that("respects directory exclusions", { the_dir <- tempfile() dir.create(the_dir, recursive = TRUE) on.exit(unlink(the_dir, recursive = TRUE)) the_excluded_dir <- file.path(the_dir, "exclude-me") dir.create(the_excluded_dir) file.copy("default_linter_testcode.R", the_dir) file.copy("default_linter_testcode.R", the_excluded_dir) file.copy("default_linter_testcode.R", file.path(the_excluded_dir, "bad2.R")) lints <- lint_dir(the_dir, exclusions = "exclude-me") linted_files <- unique(names(lints)) expect_length(linted_files, 1L) expect_identical(linted_files, "default_linter_testcode.R") lints_norm <- lint_dir(the_dir, exclusions = "exclude-me", relative_path = FALSE) linted_files <- unique(names(lints_norm)) expect_length(linted_files, 1L) expect_identical(linted_files, normalizePath(file.path(the_dir, "default_linter_testcode.R"))) }) test_that("respect directory exclusions from settings", { the_dir <- tempfile() dir.create(the_dir, recursive = TRUE) on.exit(unlink(the_dir, recursive = TRUE)) the_excluded_dir <- file.path(the_dir, "exclude-me") dir.create(the_excluded_dir) file.copy("default_linter_testcode.R", the_dir) file.copy("default_linter_testcode.R", the_excluded_dir) file.copy("default_linter_testcode.R", file.path(the_excluded_dir, "bad2.R")) cat("exclusions:\n 'exclude-me'\n", file = file.path(the_dir, ".lintr")) lints <- lint_dir(the_dir) linted_files <- unique(names(lints)) expect_length(linted_files, 1L) })
languageserver_add_to_rprofile <- function( rlsLib = getOption("langserver_library"), rprofilePath = locate_rprofile(), confirmBeforeChanging = TRUE, code = append_code(rlsLib = rlsLib) ) { sysDepAvailable <- system_dep_available() if (!sysDepAvailable) stop(attr(sysDepAvailable, "msg")) filePath <- make_rprofile_path(rprofilePath) continue <- if (isTRUE(confirmBeforeChanging)) { try(askYesNo( paste( "This will append the following code: \n", paste(code, collapse = "\n"), paste0("\n", "to: ", filePath, "\n"), "Do you agree?", sep = "\n" ), default = FALSE )) } else { TRUE } if (!isTRUE(continue)) { message(confirm_message()) return(FALSE) } write(code, file = filePath, append = TRUE) }
levins.overlap <- function(df, q = 1.65){ Taxa <- df[,1] Counts <- df[,-1] Counts <- cbind.data.frame(Taxa, Counts) melt.Counts <- melt(Counts, id.vars = "Taxa") cast.Counts <- dcast(melt.Counts, Taxa ~ Taxa, fun.aggregate = sum) tmpdf <- cast.Counts[,-1] tmpdf <- cbind.data.frame(colnames(tmpdf), as.numeric(colSums(tmpdf))) colnames(tmpdf) <- c("Taxa", "Sum") tmp5 <- tmpdf[order(tmpdf$Sum, decreasing = T),] Ranks <- c(1:nrow(tmpdf)) tmp6 <- cbind.data.frame(tmp5, Ranks) plot(log(Sum) ~ Ranks, data = tmp6, pch = 16, cex = 1.5, type = "b", ylim = c(0, max(log(tmpdf$Sum))+1), ylab = "Log Abundance", xlab = "Taxon Rank", cex.lab = 1.6, las = 1) legend("topright", pch = c(16, 1, 1), col = c("black", "red", "blue"), legend = c("Data", "Model", "LOQ"), bty = "n", cex = 1.5) SR <- function(S, a, R){ S*exp(-a^2*R^2) } a = sqrt((log(max(tmpdf$Sum))/min(tmpdf$Sum))/max(Ranks)^2) res <- SR(log(max(tmpdf$Sum)), a, Ranks) points(res, col = "red", type = "b", cex = 1.5) maxLOQ <- rep(q*sd(res), length(Ranks)) LOQlim <- q*sd(res) points(maxLOQ, col = "blue", type = "b") rownames(Counts) <- Taxa Counts <- Counts[,-1] cleanCounts <- c(1:ncol(Counts)) checkTaxa <- vector() for(y in 1:nrow(Counts)){ query <- log(rowSums(Counts[y,])) curr.row <- Counts[y,] curr.nom <- as.character(Taxa[y]) if(query < LOQlim){ LOQnom <- paste(curr.nom, "*", sep = "") checkTaxa <- rbind(checkTaxa, LOQnom) }else{ checkTaxa <- rbind(checkTaxa, curr.nom) } } rownames(Counts) <- checkTaxa cleanCounts <- Counts[-1,] Counts.ra <- (sweep(cleanCounts, 1,rowSums(cleanCounts), '/')) Z <- data.frame("LO1,2" = rep(NA, nrow(Counts.ra)^2), "LO2,1" = rep(NA, nrow(Counts.ra)^2), "value" = rep(NA, nrow(Counts.ra)^2)) tmp.res <- vector() tmp.nom1 <- vector() tmp.nom2 <- vector() for(o in 1:1){ for(i in 1:nrow(Counts.ra)){ for(j in 1:nrow(Counts.ra)){ sp1 <- Counts.ra[i,] sp2 <- Counts.ra[j,] noms <- rownames(Counts.ra) f1 <- function(x,y) x*y W <- mapply(f1, sp1, sp2) K <- apply(as.matrix(sp1), 1, function(j) j^2) WK <- cbind(sum(W),sum(K)) res1 <- min(WK)/max(WK) tmp.nom1 <- rbind(tmp.nom1, noms[i]) tmp.nom2 <- rbind(tmp.nom2, noms[j]) tmp.res <- rbind(tmp.res, res1) } } Z$LO1.2 <- tmp.nom1 Z$LO2.1 <- tmp.nom2 Z$value <- tmp.res cast.df <- dcast(Z, LO1.2 ~ LO2.1, value.var = "value") return(cast.df) } }
read_aco <- function(path, use_names=TRUE, .verbose=FALSE) { if (is_url(path)) { tf <- tempfile() httr::stop_for_status(httr::GET(path, httr::write_disk(tf))) path <- tf on.exit(unlink(tf), add = TRUE) } path <- normalizePath(path.expand(path)) aco <- readBin(path, "raw", file.info(path)$size, endian="big") version <- unpack("v", aco[2:1])[[1]] n_colors <- unpack("v", aco[4:3])[[1]] if (.verbose) { message("ACO Version: ", version) message(" } pal <- NULL if (version == 1) { pal <- decode_aco_v1(aco, n_colors) } else { pal <- decode_aco_v2(aco, n_colors) } if (!use_names) { pal <- unname(pal) } gsub(" ", "0", pal) }
knitr::opts_chunk$set( collapse = TRUE, comment = " warning = F, message = F ) library(bestridge) data("trim32", package = "bestridge") y <- trim32[, 1] x <- as.matrix(trim32[, -1]) lm.bsrr <- bsrr(x, y) data("duke") y <- duke$y x <- as.matrix(duke[, -1]) logi.bsrr <- bsrr(x, y, family = "binomial", method = "sequential") plot(logi.bsrr) data(patient.data) x <- patient.data$x y <- patient.data$time status <- patient.data$status cox.bsrr <- bsrr(x, cbind(y, status), family = "cox") summary(cox.bsrr)
setClass( Class = "Tr.match", contains = "ADEg.Tr", ) setMethod( f = "initialize", signature = "Tr.match", definition = function(.Object, data = list(dfxyz = NULL, labels = NULL, frame = 0, storeData = TRUE), ...) { .Object <- callNextMethod(.Object, data = data, ...) .Object@data$labels <- data$labels return(.Object) }) setMethod( f = "prepare", signature = "Tr.match", definition = function(object) { name_obj <- deparse(substitute(object)) if(object@data$storeData) { df <- object@data$dfxyz } else { df <- eval(object@data$dfxyz, envir = sys.frame(object@data$frame)) } oldparamadeg <- adegpar() on.exit(adegpar(oldparamadeg)) adegtot <- adegpar([email protected]) [email protected] <- adegtot callNextMethod() df <- sweep(df, 1, rowSums(df), "/") n <- NROW(df) / 2 df1 <- df[1:n,] df2 <- df[(1 + n):(2 * n), ] object@stats$coords2d1 <- .coordtotriangleM(df1, mini3 = [email protected]$min3d, maxi3 = [email protected]$max3d)[, 2:3] object@stats$coords2d2 <- .coordtotriangleM(df2, mini3 = [email protected]$min3d, maxi3 = [email protected]$max3d)[, 2:3] [email protected]$plabels$optim <- FALSE assign(name_obj, object, envir = parent.frame()) }) setMethod( f = "panel", signature = "Tr.match", definition = function(object, x, y) { if(object@data$storeData) { labels <- object@data$labels df <- object@data$dfxyz } else { labels <- eval(object@data$labels, envir = sys.frame(object@data$frame)) df <- eval(object@data$dfxyz, envir = sys.frame(object@data$frame)) } if(NROW(df) %% 2) stop("error in panel method : unable to split the two datasets") if(any([email protected]$ppoints$cex > 0)) do.call("panel.points", c(list(x = object@stats$coords2d1[, 1], y = object@stats$coords2d1[, 2]), [email protected]$ppoints)) panel.arrows(x0 = object@stats$coords2d1[, 1], y0 = object@stats$coords2d1[, 2] , y1 = object@stats$coords2d2[, 2], x1 = object@stats$coords2d2[, 1], angle = [email protected]$parrows$angle, length = [email protected]$parrows$length, ends = [email protected]$parrows$end, lwd = [email protected]$plines$lwd, col = [email protected]$plines$col, lty = [email protected]$plines$lty) if(any([email protected]$plabels$cex > 0)) { xlab <- (object@stats$coords2d1[, 1] + object@stats$coords2d2[, 1]) / 2 ylab <- (object@stats$coords2d1[, 2] + object@stats$coords2d2[, 2]) / 2 adeg.panel.label(xlab, ylab, labels = labels, [email protected]$plabels) } }) triangle.match <- function(dfxyz1, dfxyz2, labels = row.names(as.data.frame(dfxyz1)), min3d = NULL, max3d = NULL, adjust = TRUE, showposition = TRUE, facets = NULL, plot = TRUE, storeData = TRUE, add = FALSE, pos = -1, ...) { thecall <- .expand.call(match.call()) data1 <- try(as.data.frame(eval(thecall$dfxyz1, envir = sys.frame(sys.nframe() + pos))), silent = TRUE) data2 <- try(as.data.frame(eval(thecall$dfxyz2, envir = sys.frame(sys.nframe() + pos))), silent = TRUE) if(class(data1) == "try-error" || class(data2) == "try-error" || is.null(thecall$dfxyz1) || is.null(thecall$dfxyz2)) stop("non convenient selection for dfxyz1 or dfxyz2 (can not be converted to dataframe)") sortparameters <- sortparamADEg(...) if(!is.null(facets)) { object <- multi.facets.Tr(thecall, samelimits = sortparameters$g.args$samelimits) } else { if(length(sortparameters$rest)) warning(c("Unused parameters: ", paste(unique(names(sortparameters$rest)), " ", sep = "")), call. = FALSE) g.args <- c(sortparameters$g.args, list(adjust = adjust, min3d = min3d, max3d = max3d)) if(storeData) tmp_data <- list(dfxyz = rbind(dfxyz1, dfxyz2), labels = labels, frame = sys.nframe() + pos, storeData = storeData) else tmp_data <- list(dfxyz = call("rbind", thecall$dfxyz1, thecall$dfxyz2), labels = thecall$labels, frame = sys.nframe() + pos, storeData = storeData) object <- new(Class = "Tr.match", data = tmp_data, adeg.par = sortparameters$adepar, trellis.par = sortparameters$trellis, g.args = g.args, Call = match.call()) prepare(object) setlatticecall(object) if(showposition & add) { print("cannot show position and add") showposition <- FALSE } if(showposition) object <- new(Class = "ADEgS", ADEglist = list("triangle" = object, "positions" = .showpos(object)), positions = rbind(c(0, 0, 1, 1), c(0, 0.7, 0.3, 1)), add = matrix(0, ncol = 2, nrow = 2), Call = match.call()) if(add) object <- add.ADEg(object) } if(!add & plot) print(object) invisible(object) }
NOT_CRAN <- identical(tolower(Sys.getenv("NOT_CRAN")), "true") knitr::opts_chunk$set( collapse = TRUE, comment = " purl = NOT_CRAN, eval = NOT_CRAN ) library(magi) tvec <- seq(0, 20, by = 0.5) V <- c(-1.16, -0.18, 1.57, 1.99, 1.95, 1.85, 1.49, 1.58, 1.47, 0.96, 0.75, 0.22, -1.34, -1.72, -2.11, -1.56, -1.51, -1.29, -1.22, -0.36, 1.78, 2.36, 1.78, 1.8, 1.76, 1.4, 1.02, 1.28, 1.21, 0.04, -1.35, -2.1, -1.9, -1.49, -1.55, -1.35, -0.98, -0.34, 1.9, 1.99, 1.84) R <- c(0.94, 1.22, 0.89, 0.13, 0.4, 0.04, -0.21, -0.65, -0.31, -0.65, -0.72, -1.26, -0.56, -0.44, -0.63, 0.21, 1.07, 0.57, 0.85, 1.04, 0.92, 0.47, 0.27, 0.16, -0.41, -0.6, -0.58, -0.54, -0.59, -1.15, -1.23, -0.37, -0.06, 0.16, 0.43, 0.73, 0.7, 1.37, 1.1, 0.85, 0.23) fnmodelODE <- function(theta,x,t) { V <- x[,1] R <- x[,2] result <- array(0, c(nrow(x),ncol(x))) result[,1] = theta[3] * (V - V^3 / 3.0 + R) result[,2] = -1.0/theta[3] * ( V - theta[1] + theta[2] * R) result } fnmodelDx <- function(theta,x,t) { resultDx <- array(0, c(nrow(x), ncol(x), ncol(x))) V = x[,1] resultDx[,1,1] = theta[3] * (1 - V^2) resultDx[,2,1] = theta[3] resultDx[,1,2] = (-1.0 / theta[3]) resultDx[,2,2] = ( -1.0*theta[2]/theta[3] ) resultDx } fnmodelDtheta <- function(theta,x,t) { resultDtheta <- array(0, c(nrow(x), length(theta), ncol(x))) V = x[,1] R = x[,2] resultDtheta[,3,1] = V - V^3 / 3.0 + R resultDtheta[,1,2] = 1.0 / theta[3] resultDtheta[,2,2] = -R / theta[3] resultDtheta[,3,2] = 1.0/(theta[3]^2) * ( V - theta[1] + theta[2] * R) resultDtheta } testDynamicalModel(fnmodelODE, fnmodelDx, fnmodelDtheta, "FN equations", cbind(V,R), c(.5, .6, 2), tvec) fnmodel <- list( fOde=fnmodelODE, fOdeDx=fnmodelDx, fOdeDtheta=fnmodelDtheta, thetaLowerBound=c(0,0,0), thetaUpperBound=c(Inf,Inf,Inf) ) yobs <- data.frame(time=tvec, V=V, R=R) yinput <- setDiscretization(yobs, level=1) result <- MagiSolver(yinput, fnmodel, control=list(niterHmc=2000, nstepsHmc=100)) oldpar <- par(mfrow=c(2,2), mar=c(5,2,1,1)) theta.names <- c("a", "b", "c") for (i in 1:3) { plot(result$theta[,i], main=theta.names[i], type="l", ylab="") } plot(result$lp, main="log-post", type="l", ylab="") theta.est <- apply(result$theta, 2, function(x) c(mean(x), quantile(x, 0.025), quantile(x, 0.975))) colnames(theta.est) <- theta.names rownames(theta.est) <- c("Mean", "2.5%", "97.5%") signif(theta.est, 3) par(mfrow=c(1,2), mar=c(4,2,1,1)) compnames <- c("V", "R") ylim_lower <- c(-3, -2) ylim_upper <- c(3, 2) times <- yinput[,1] xLB <- apply(result$xsampled, c(2,3), function(x) quantile(x, 0.025)) xMean <- apply(result$xsampled, c(2,3), mean) xUB <- apply(result$xsampled, c(2,3), function(x) quantile(x, 0.975)) for (i in 1:2) { plot(times, xMean[,i], type="n", xlab="time", ylab="", ylim=c(ylim_lower[i], ylim_upper[i])) mtext(compnames[i]) polygon(c(times, rev(times)), c(xUB[,i], rev(xLB[,i])), col = "skyblue", border = NA) points(times, yinput[,i+1], col = "grey50") lines(times, xMean[,i], lwd=1) } yinput2 <- setDiscretization(yobs, level=2) result2 <- MagiSolver(yinput, fnmodel, control=list(niterHmc=2000, nstepsHmc=100)) theta.est <- apply(result2$theta, 2, function(x) c(mean(x), quantile(x, 0.025), quantile(x, 0.975))) colnames(theta.est) <- theta.names rownames(theta.est) <- c("Mean", "2.5%", "97.5%") signif(theta.est, 3) hes1modelODE <- function(theta, x, t) { P = x[,1] M = x[,2] H = x[,3] PMHdt = array(0, c(nrow(x), ncol(x))) PMHdt[,1] = -theta[1]*P*H + theta[2]*M - theta[3]*P PMHdt[,2] = -theta[4]*M + theta[5]/(1+P^2) PMHdt[,3] = -theta[1]*P*H + theta[6]/(1+P^2) - theta[7]*H PMHdt } param.true <- list( theta = c(0.022, 0.3, 0.031, 0.028, 0.5, 20, 0.3), x0 = c(1.439, 2.037, 17.904), sigma = c(0.15, 0.15, NA) ) modelODE <- function(t, state, parameters) { list(as.vector(hes1modelODE(parameters, t(state), t))) } x <- deSolve::ode(y = param.true$x0, times = seq(0, 60*4, by = 0.01), func = modelODE, parms = param.true$theta) set.seed(12321) y <- as.data.frame(x[ x[, "time"] %in% seq(0, 240, by = 7.5),]) names(y) <- c("time", "P", "M", "H") y$P <- y$P * exp(rnorm(nrow(y), sd=param.true$sigma[1])) y$M <- y$M * exp(rnorm(nrow(y), sd=param.true$sigma[2])) y$H <- NaN y$P[y$time %in% seq(7.5,240,by=15)] <- NaN y$M[y$time %in% seq(0,240,by=15)] <- NaN matplot(x[, "time"], x[, -1], type="l", lty=1, xlab="Time (min)", ylab="Level") matplot(y$time, y[,-1], type="p", col=1:(ncol(y)-1), pch=20, add = TRUE) legend("topright", c("P", "M", "H"), lty=1, col=c("black", "red", "green")) y[,2:4] <- log(y[,2:4]) hes1logmodelODE <- function (theta, x, t) { eP = exp(x[, 1]) eM = exp(x[, 2]) eH = exp(x[, 3]) PMHdt <- array(0, c(nrow(x), ncol(x))) PMHdt[, 1] = -theta[1] * eH + theta[2] * eM/eP - theta[3] PMHdt[, 2] = -theta[4] + theta[5]/(1 + eP^2)/eM PMHdt[, 3] = -theta[1] * eP + theta[6]/(1 + eP^2)/eH - theta[7] PMHdt } hes1logmodelDx <- function (theta, x, t) { P = x[, 1] M = x[, 2] H = x[, 3] Dx <- array(0, c(nrow(x), ncol(x), ncol(x))) dP = -(1 + exp(2 * P))^(-2) * exp(2 * P) * 2 Dx[, 1, 1] = -theta[2] * exp(M - P) Dx[, 2, 1] = theta[2] * exp(M - P) Dx[, 3, 1] = -theta[1] * exp(H) Dx[, 1, 2] = theta[5] * exp(-M) * dP Dx[, 2, 2] = -theta[5] * exp(-M)/(1 + exp(2 * P)) Dx[, 1, 3] = -theta[1] * exp(P) + theta[6] * exp(-H) * dP Dx[, 3, 3] = -theta[6] * exp(-H)/(1 + exp(2 * P)) Dx } hes1logmodelDtheta <- function (theta, x, t) { P = x[, 1] M = x[, 2] H = x[, 3] Dtheta <- array(0, c(nrow(x), length(theta), ncol(x))) Dtheta[, 1, 1] = -exp(H) Dtheta[, 2, 1] = exp(M - P) Dtheta[, 3, 1] = -1 Dtheta[, 4, 2] = -1 Dtheta[, 5, 2] = exp(-M)/(1 + exp(2 * P)) Dtheta[, 1, 3] = -exp(P) Dtheta[, 6, 3] = exp(-H)/(1 + exp(2 * P)) Dtheta[, 7, 3] = -1 Dtheta } hes1logmodel <- list( fOde = hes1logmodelODE, fOdeDx = hes1logmodelDx, fOdeDtheta = hes1logmodelDtheta, thetaLowerBound = rep(0,7), thetaUpperBound = rep(Inf,7) ) resultHes1 <- MagiSolver(y, hes1logmodel, control=list(sigma = c(0.15,0.15,NA), useFixedSigma = TRUE)) par(mfrow=c(2,4), mar=c(5,2,1,1)) theta.names <- c("a", "b", "c", "d", "e", "f", "g") for (i in 1:7) { plot(resultHes1$theta[,i], main=theta.names[i], type="l", ylab="") } plot(resultHes1$lp, main="log-posterior", type="l", ylab="") theta.est <- apply(resultHes1$theta, 2, function(x) c(mean(x), quantile(x, 0.025), quantile(x, 0.975))) colnames(theta.est) <- theta.names rownames(theta.est) <- c("Post.Mean", "2.5%", "97.5%") signif(theta.est, 3) ylim_lower <- c(1.5, 0.5, 0) ylim_upper <- c(10.0, 3.5, 21) layout(rbind(c(1,2,3), c(4,4,4)), heights = c(5,1)) compnames <- c("P", "M", "H") compobs <- c("17 observations", "16 observations", "unobserved") times <- y[,1] xLB <- exp(apply(resultHes1$xsampled, c(2,3), function(x) quantile(x, 0.025))) xMean <- exp(apply(resultHes1$xsampled, c(2,3), mean)) xUB <- exp(apply(resultHes1$xsampled, c(2,3), function(x) quantile(x, 0.975))) for (i in 1:3) { plot(times, xMean[,i], type="n", xlab="time", ylab=compnames[i], ylim=c(ylim_lower[i], ylim_upper[i])) mtext(paste0(compnames[i], " (", compobs[i], ")"), cex=1) polygon(c(times, rev(times)), c(xUB[,i], rev(xLB[,i])), col = "skyblue", border = NA) lines(x[,1], x[,1+i], col="red", lwd=2) lines(times, xMean[,i], col="forestgreen", lwd=2) } par(mar=rep(0,4)) plot(1,type='n', xaxt='n', yaxt='n', xlab=NA, ylab=NA, frame.plot = FALSE) legend("center", c("truth", "inferred trajectory", "95% interval"), lty=c(1,1,0), lwd=c(2,2,0), col = c("red", "forestgreen", NA), fill=c(0, 0,"skyblue"), text.width=c(0, 0.4, 0.05), bty = "n", border=c(0, 0, "skyblue"), pch=c(NA, NA, 15), horiz=TRUE) theta.names <- c("lambda", "rho", "delta", "N", "c") hivtdmodelODE <- function(theta,x,tvec) { TU <- x[,1] TI <- x[,2] V <- x[,3] lambda <- theta[1] rho <- theta[2] delta <- theta[3] N <- theta[4] c <- theta[5] eta <- 9e-5 * (1 - 0.9 * cos(pi * tvec / 1000)) result <- array(0, c(nrow(x),ncol(x))) result[,1] = lambda - rho * TU - eta * TU * V result[,2] = eta * TU * V - delta * TI result[,3] = N * delta * TI - c * V result } hivtdmodelDx <- function(theta,x,tvec) { resultDx <- array(0, c(nrow(x), ncol(x), ncol(x))) TU <- x[,1] TI <- x[,2] V <- x[,3] lambda <- theta[1] rho <- theta[2] delta <- theta[3] N <- theta[4] c <- theta[5] eta <- 9e-5 * (1 - 0.9 * cos(pi * tvec / 1000)) resultDx[,1,1] = -rho - eta * V resultDx[,2,1] = 0 resultDx[,3,1] = -eta * TU resultDx[,1,2] = eta * V resultDx[,2,2] = -delta resultDx[,3,2] = eta * TU resultDx[,1,3] = 0 resultDx[,2,3] = N * delta resultDx[,3,3] = -c resultDx } hivtdmodelDtheta <- function(theta,x,tvec) { resultDtheta <- array(0, c(nrow(x), length(theta), ncol(x))) TU <- x[,1] TI <- x[,2] V <- x[,3] lambda <- theta[1] rho <- theta[2] delta <- theta[3] N <- theta[4] c <- theta[5] eta <- 9e-5 * (1 - 0.9 * cos(pi * tvec / 1000)) resultDtheta[,1,1] = 1 resultDtheta[,2,1] = -TU resultDtheta[,3,1] = 0 resultDtheta[,4,1] = 0 resultDtheta[,5,1] = 0 resultDtheta[,1,2] = 0 resultDtheta[,2,2] = 0 resultDtheta[,3,2] = -TI resultDtheta[,4,2] = 0 resultDtheta[,5,2] = 0 resultDtheta[,1,3] = 0 resultDtheta[,2,3] = 0 resultDtheta[,3,3] = N * TI resultDtheta[,4,3] = delta * TI resultDtheta[,5,3] = -V resultDtheta } param.true <- list( theta = c(36, 0.108, 0.5, 1000, 3), x0 = c(600, 30, 1e5), sigma= c(sqrt(10), sqrt(10), 10) ) times <- seq(0, 20, 0.1) modelODE <- function(t, state, parameters) { list(as.vector(hivtdmodelODE(parameters, t(state), t))) } xtrue <- deSolve::ode(y = param.true$x0, times = times, func = modelODE, parms = param.true$theta) xsim <- xtrue set.seed(12321) for(j in 1:(ncol(xsim)-1)){ xsim[,1+j] <- xsim[,1+j]+rnorm(nrow(xsim), sd=param.true$sigma[j]) } matplot(xsim[,"time"], xsim[,-1], type="p", col=1:(ncol(xsim)-1), pch=20, log = 'y', ylab="Concentration", xlab="time") legend("topright", c("TU", "TI", "V"), pch=20, col=c("black", "red", "green")) hivtdmodel <- list( fOde=hivtdmodelODE, fOdeDx=hivtdmodelDx, fOdeDtheta=hivtdmodelDtheta, thetaLowerBound=c(0,0,0,0,0), thetaUpperBound=c(Inf,Inf,Inf,Inf,Inf) ) y <- setDiscretization(data.frame(xsim), 0) testDynamicalModel(hivtdmodelODE, hivtdmodelDx, hivtdmodelDtheta, "HIV time-dependent system", y[,2:4], param.true$theta, y[,"time"]) phiExogenous <- matrix(0, nrow=2, ncol=ncol(y)-1) sigmaInit <- rep(0, ncol(y)-1) for (j in 1:(ncol(y)-1)){ hyperparam <- gpsmoothing(y[,j+1], y[,"time"]) phiExogenous[,j] <- hyperparam$phi sigmaInit[j] <- hyperparam$sigma } phiExogenous sigmaInit phiExogenous[,3] <- c(5e7, 1) sigmaInit[3] <- 1 HIVresult <- MagiSolver(y, hivtdmodel, control = list(phi=phiExogenous, sigma=sigmaInit, niterHmc=10000)) theta.est <- apply(HIVresult$theta, 2, function(x) c(mean(x), quantile(x, 0.025), quantile(x, 0.975))) colnames(theta.est) <- theta.names rownames(theta.est) <- c("Mean", "2.5%", "97.5%") signif(theta.est, 3) par(mfrow=c(1,3), mar=c(4,3,1.5,1)) compnames <- c("TU", "TI", "V") ylim_lower <- c(100, 0, 0) ylim_upper <- c(750, 175, 1e5) xMean <- apply(HIVresult$xsampled, c(2,3), mean) for (i in 1:3) { plot(times, xMean[,i], type="n", xlab="time", ylab="", ylim=c(ylim_lower[i], ylim_upper[i])) mtext(compnames[i]) points(times, xsim[,i+1], col = "grey50") lines(times, xMean[,i], col="forestgreen", lwd=4) lines(times, xtrue[,i+1], col="red", lwd=1.5) } par(oldpar)
loglin <- function(table, margin, start = rep(1, length(table)), fit = FALSE, eps = 0.1, iter = 20L, param = FALSE, print = TRUE) { rfit <- fit dtab <- dim(table) nvar <- length(dtab) ncon <- length(margin) conf <- matrix(0L, nrow = nvar, ncol = ncon) nmar <- 0 varnames <- names(dimnames(table)) for (k in seq_along(margin)) { tmp <- margin[[k]] if (is.character(tmp)) { tmp <- match(tmp, varnames) margin[[k]] <- tmp } if (!is.numeric(tmp) || any(is.na(tmp) | tmp <= 0)) stop("'margin' must contain names or numbers corresponding to 'table'") conf[seq_along(tmp), k] <- tmp nmar <- nmar + prod(dtab[tmp]) } ntab <- length(table) if (length(start) != ntab ) stop("'start' and 'table' must be same length") z <- .Call(C_LogLin, dtab, conf, table, start, nmar, eps, iter) if (print) cat(z$nlast, "iterations: deviation", z$dev[z$nlast], "\n") fit <- z$fit attributes(fit) <- attributes(table) observed <- as.vector(table[start > 0]) expected <- as.vector(fit[start > 0]) pearson <- sum((observed - expected)^2 / expected) observed <- as.vector(table[table * fit > 0]) expected <- as.vector(fit[table * fit > 0]) lrt <- 2 * sum(observed * log(observed / expected)) subsets <- function(x) { y <- list(vector(mode(x), length = 0)) for (i in seq_along(x)) { y <- c(y, lapply(y, c, x[i])) } y[-1L] } df <- rep.int(0, 2^nvar) for (k in seq_along(margin)) { terms <- subsets(margin[[k]]) for (j in seq_along(terms)) df[sum(2 ^ (terms[[j]] - 1))] <- prod(dtab[terms[[j]]] - 1) } if (!is.null(varnames) && all(nzchar(varnames))) { for (k in seq_along(margin)) margin[[k]] <- varnames[margin[[k]]] } else { varnames <- as.character(1 : ntab) } y <- list(lrt = lrt, pearson = pearson, df = ntab - sum(df) - 1, margin = margin) if (rfit) y$fit <- fit if (param) { fit <- log(fit) terms <- seq_along(df)[df > 0] parlen <- length(terms) + 1 parval <- list(parlen) parnam <- character(parlen) parval[[1L]] <- mean(fit) parnam[1L] <- "(Intercept)" fit <- fit - parval[[1L]] dyadic <- NULL while(any(terms > 0)) { dyadic <- cbind(dyadic, terms %% 2) terms <- terms %/% 2 } dyadic <- dyadic[order(rowSums(dyadic)), , drop = FALSE] for (i in 2 : parlen) { vars <- which(dyadic[i - 1, ] > 0) parval[[i]] <- apply(fit, vars, mean) parnam[i] <- paste(varnames[vars], collapse = ".") fit <- sweep(fit, vars, parval[[i]], check.margin=FALSE) } names(parval) <- parnam y$param <- parval } return(y) }
bef.portal.vizualize.keywords <- bef.vizualize.keywords <- function(color_pal = brewer.pal(8,"Dark2"), min_freq = 1) { keyword_table = data.frame(bef.portal.get.keywords(), stringsAsFactors=FALSE) wordcloud(keyword_table$name ,keyword_table$count, scale = c(8,.2), min.freq = min_freq, max.words=Inf, random.order=FALSE, rot.per=.15, colors = color_pal) }
library(ggplot2) p <- ggplot(ToothGrowth, aes(x = as.factor(dose), y = len)) + geom_point(na.rm = TRUE) + theme(axis.line = element_line(colour = "black")) grab_axis <- function(plot, side = "b") { gt <- ggplotGrob(plot) grb <- gt$grobs[grep(paste0("axis-", side), gt$layout$name)][[1]] grb <- grb$children[vapply(grb$children, inherits, logical(1), "gtable")][[1]] return(grb) } g1 <- p + scale_y_continuous(guide = "axis") g2 <- p + scale_y_continuous(guide = "prism_offset") expect_silent(ggplotGrob(g1)) expect_silent(ggplotGrob(g2)) control <- grab_axis(g1, side = "l") test <- grab_axis(g2, side = "l") expect_equal(length(test$grobs[[2]]$y), length(control$grobs[[2]]$y)) g1 <- p + scale_x_discrete(guide = "axis") g2 <- p + scale_x_discrete(guide = "prism_offset") expect_silent(ggplotGrob(g1)) expect_silent(ggplotGrob(g2)) control <- grab_axis(g1, side = "b") test <- grab_axis(g2, side = "b") expect_equal(length(test$grobs[[1]]$x), length(control$grobs[[1]]$x)) g1 <- p + scale_y_continuous(guide = "axis") + coord_flip() g2 <- p + scale_y_continuous(guide = "prism_offset") + coord_flip() expect_silent(ggplotGrob(g1)) expect_silent(ggplotGrob(g2)) control <- grab_axis(g1, side = "b") test <- grab_axis(g2, side = "b") expect_equal(length(test$grobs[[1]]$x), length(control$grobs[[1]]$x))
E4.Diagnostics.tempplot<-function(participant_list,rdslocation.binnedtemp,rdslocation.buttonpress,plotlocation.temp,Plot_E4s=TRUE,TempType="C"){ ts_time<-TEMP_C<-TEMP_F<-E4_serial<-NULL if(participant_list[1]=="helper"){participant_list<-get("participant_list",envir=E4tools.env)} if(rdslocation.binnedtemp=="helper"){rdslocation.binnedtemp<-get("rdslocation.binnedtemp",envir=E4tools.env)} if(rdslocation.buttonpress=="helper"){rdslocation.buttonpress<-get("rdslocation.buttonpress",envir=E4tools.env)} if(plotlocation.temp=="helper"){plotlocation.temp<-get("plotlocation.temp",envir=E4tools.env)} Buttons<-readRDS(paste(rdslocation.buttonpress,"button_presses.rds",sep="")) for(NUMB in participant_list){ if(file.exists(paste(rdslocation.binnedtemp,NUMB,"_binnedEDA.rds",sep=""))==FALSE){ message(paste("No data for ",NUMB,". Was EDA part 1 or EDA binning not run or did it fail for this participant? Going on to next participant.",sep="")) next } PlotData<-readRDS(paste(rdslocation.binnedtemp,NUMB,"_binnedEDA.rds",sep="")) if(nrow(PlotData)>10){ message(paste("Starting participant",NUMB)) if(TempType=="F" & length(PlotData$TEMP_F)==0){PlotData$TEMP_F<-(PlotData$TEMP_C*(9/5))+32} if(PlotData$ts[1]>10000000000){PlotData$ts_date<-anytime::anydate(PlotData$ts/1000)} if(PlotData$ts[1]<10000000000){PlotData$ts_date<-anytime::anydate(PlotData$ts)} PlotData$ts_time<-data.table::as.ITime(PlotData$ts/1000) XX<-as.POSIXlt(PlotData$ts/1000,origin="1970-01-01") PlotData$ts_time<-as.POSIXct(as.character(paste("2019-01-01 ",chron::times(format(XX, "%H:%M:%S"))," EST",sep=""))) Plot_Buttons<-Buttons[Buttons$ID==NUMB,] if(nrow(Plot_Buttons)>0){ if(Plot_Buttons$ts[1]>10000000000){Plot_Buttons$Press_Time<-anytime::anytime(Plot_Buttons$ts/1000)} if(Plot_Buttons$ts[1]<10000000000){Plot_Buttons$Press_Time<-anytime::anytime(Plot_Buttons$ts)} Plot_Buttons$ts_time<-as.POSIXct(as.character(paste("2019-01-01 ",chron::times(format(Plot_Buttons$Press_Time, "%H:%M:%S"))," EST",sep=""))) if(Plot_Buttons$ts[1]>10000000000){Plot_Buttons$ts_date<-anytime::anydate(Plot_Buttons$ts/1000)} if(Plot_Buttons$ts[1]<10000000000){Plot_Buttons$ts_date<-anytime::anydate(Plot_Buttons$ts)} } if(nrow(Plot_Buttons)==0){message(paste("NOTE: No button press data for participant number ",NUMB,". The plot will show temperature data only for this participant."))} BinSize<-round((PlotData$ts[5]-PlotData$ts[4])/(60*1000),0) if(TempType=="C"){ PlotOut<-ggplot2::ggplot()+ ggplot2::geom_path(ggplot2::aes(x=ts_time,y=TEMP_C,group=1),data=PlotData)+ ggplot2::facet_wrap(~ts_date)+ ggplot2::scale_x_time(labels = scales::time_format("%H:%M",tz = "America/New_York"),breaks=seq(as.POSIXct("2019-01-01 00:00:00 EST"),as.POSIXct("2019-01-01 24:00:00 EST"),"6 hours"))+ ggplot2::labs(x="Time of Day",y="Binned Temperature (Degrees C)",title=paste("All data for participant ID ",NUMB,sep=""),subtitle=(paste("(",BinSize," minute bins)",sep=""))) } if(TempType=="F"){ PlotOut<-ggplot2::ggplot()+ ggplot2::geom_path(ggplot2::aes(x=ts_time,y=TEMP_F,group=1),data=PlotData)+ ggplot2::facet_wrap(~ts_date)+ ggplot2::scale_x_time(labels = scales::time_format("%H:%M",tz = "America/New_York"),breaks=seq(as.POSIXct("2019-01-01 00:00:00 EST"),as.POSIXct("2019-01-01 24:00:00 EST"),"6 hours"))+ ggplot2::labs(x="Time of Day",y="Binned Temperature (Degrees F)",title=paste("All data for participant ID ",NUMB,sep=""),subtitle=(paste("(",BinSize," minute bins)",sep=""))) } if(nrow(Plot_Buttons)>0){PlotOut<-PlotOut+ggplot2::geom_vline(ggplot2::aes(xintercept=ts_time),data=Plot_Buttons)} if(Plot_E4s==TRUE){PlotOut<-PlotOut+ggplot2::geom_line(ggplot2::aes(x=ts_time,y=0.1,group=E4_serial,linetype=E4_serial),data=PlotData)} if(!dir.exists(plotlocation.temp)==TRUE){dir.create(plotlocation.temp,recursive=TRUE)} ggplot2::ggsave(filename=paste(plotlocation.temp,"TEMPplot_",NUMB,".pdf",sep=""),plot=PlotOut,width=11,height=8.5,units="in") } if(nrow(PlotData)<10){message(paste("No temperature data for ",NUMB,", going to next participant.",sep=""))} } }
print_moran <- function(model, caption = NULL, verbose = TRUE){ if(inherits(model, "rf") | inherits(model, "rf_repeat") | inherits(model, "rf_spatial")){ x <- model$residuals$autocorrelation$per.distance } x <- x[, c("distance.threshold", "moran.i", "p.value", "interpretation")] if(!("model" %in% colnames(x))){ colnames(x) <- c("Distance", "Moran's I", "P value", "Interpretation") x.hux <- huxtable::hux(x) %>% huxtable::set_bold( row = 1, col = huxtable::everywhere, value = TRUE ) %>% huxtable::set_all_borders(TRUE) huxtable::number_format(x.hux)[2:nrow(x.hux), 2:3] <- 3 huxtable::number_format(x.hux)[2:nrow(x.hux), 1] <- 1 } if("model" %in% colnames(x) & !("repetition" %in% colnames(x))){ colnames(x) <- c("Distance", "Moran's I", "P value", "Interpretation", "Model") x <- x[, c("Model", "Distance", "Moran's I", "P value", "Interpretation")] x.hux <- huxtable::hux(x) %>% huxtable::set_bold( row = 1, col = huxtable::everywhere, value = TRUE ) %>% huxtable::set_bold( col = 1, row = huxtable::everywhere, value = TRUE ) %>% huxtable::set_all_borders(TRUE) huxtable::number_format(x.hux)[2:nrow(x.hux), 3:4] <- 3 huxtable::number_format(x.hux)[2:nrow(x.hux), 2] <- 1 } if(!is.null(caption)){ huxtable::caption(x.hux) <- caption } huxtable::print_screen(x.hux, colnames = FALSE) }
slu_date_to_sqlite <- function(date_r){ as.integer(round((date_r - as.Date("1970-1-1"))))}
library(tidymodels) library(nycflights13) library(doMC) library(rlang) library(xgboost) library(vctrs) num_resamples <- 5 num_grid <- 10 num_cores <- 2 preproc <- "light preprocessing" par_method <- "resamples" set.seed(123) flight_data <- flights %>% mutate( arr_delay = ifelse(arr_delay >= 30, "late", "on_time"), arr_delay = factor(arr_delay), date = as.Date(time_hour) ) %>% inner_join(weather, by = c("origin", "time_hour")) %>% select(dep_time, flight, origin, dest, air_time, distance, carrier, date, arr_delay, time_hour) %>% na.omit() %>% mutate_if(is.character, as.factor) %>% sample_n(4000) flights_rec <- recipe(arr_delay ~ ., data = flight_data) %>% update_role(flight, time_hour, new_role = "ID") %>% step_date(date, features = c("dow", "month")) %>% step_holiday(date, holidays = timeDate::listHolidays("US")) %>% step_rm(date) %>% step_dummy(all_nominal_predictors()) %>% step_zv(all_predictors()) preproc_data <- flights_rec %>% prep() %>% juice(all_predictors(), all_outcomes()) xgboost_spec <- boost_tree(trees = tune(), min_n = tune(), tree_depth = tune(), learn_rate = tune(), loss_reduction = tune(), sample_size = tune()) %>% set_mode("classification") %>% set_engine("xgboost") if (preproc != "no preprocessing") { xgboost_workflow <- workflow() %>% add_recipe(flights_rec) %>% add_model(xgboost_spec) set.seed(33) bt <- bootstraps(flight_data, times = num_resamples) } else { xgboost_workflow <- workflow() %>% add_variables(arr_delay, predictors = c(everything())) %>% add_model(xgboost_spec) set.seed(33) bt <- bootstraps(preproc_data, times = num_resamples) } set.seed(22) xgboost_grid <- xgboost_workflow %>% parameters() %>% update(trees = trees(c(100, 2000))) %>% grid_max_entropy(size = num_grid) if (num_cores > 1) { registerDoMC(cores=num_cores) } roc_res <- metric_set(roc_auc) ctrl <- control_grid(parallel_over = par_method) grid_time <- system.time({ set.seed(99) xgboost_workflow %>% tune_grid(bt, grid = xgboost_grid, metrics = roc_res, control = ctrl) }) times <- tibble::tibble( elapsed = grid_time[3], num_resamples = num_resamples, num_grid = num_grid, num_cores = num_cores, preproc = preproc, par_method = par_method ) save(times, file = paste0("xgb_", num_cores, format(Sys.time(), "_%Y_%m_%d_%H_%M_%S.RData"))) sessioninfo::session_info() if (!interactive()) { q("no") }
hypersampleplan.CL <- function (n, x, N) { M <- vector(length=n+1) c <- 0 M[1] <- 1 M[2] <- 1-2*c/n for (i in 1:length(M)) { M[i+2] <- (((2*i+1)*(n-2*c)*M[i+1])-(i*(n+i+1)*M[i]))/((i+1)*(n-i)) length(M) <- n+1 i <- i+1 } m <- vector(length=n+1) for (c in 1:n) { m[1] <- 1 m[2] <- 1-2*c/n for (i in 1:length(m)) { m[i+2] <- (((2*i+1)*(n-2*c)*m[i+1])-(i*(n+i+1)*m[i]))/((i+1)*(n-i)) length(m) <- n+1 i <- i+1 } M <- rbind(M,m) next c <- c+1 } SA <- M SB <- M for(i in 1:(n+1)) { SA[,i] <- cumsum(SA[,i]) SB[,i] <- (sum(SB[,i]))-(cumsum(SB[,i])) i <- i+1 } for (i in 1:(n+1)) { SA[,i] <- SA[,i]/(sum(M[,i]^2)) SB[,i] <- SB[,i]/(sum(M[,i]^2)) i <- i+1 } SA <- t(SA) SB <- t(SB) V <- vector(length=n+1) c <- 0 V[1] <- 1 V[2] <- 1-2*c/N for (i in 1:(n+1)) { V[i+2] <- (((2*i+1)*(N-2*c)*V[i+1])-(i*(N+i+1)*V[i]))/((i+1)*(N-i)) length(V) <- n+1 i <- i+1 } v <- vector(length=n+1) for (c in 1:N) { v[1] <- 1 v[2] <- 1-2*c/N for (i in 1:(n+1)) { v[i+2] <- (((2*i+1)*(N-2*c)*v[i+1])-(i*(N+i+1)*v[i]))/((i+1)*(N-i)) length(v) <- n+1 i <- i+1 } V <- rbind(V,v) next c <- c+1 } Y <- matrix(data=1, nrow=N+1, ncol=n+1) X.upper <- V %*% SA Z.upper <- Y-X.upper Z.upper[Z.upper <= 0.000001] <- 0 X.lower <- V %*% SB Z.lower <- Y-X.lower Z.lower <- Y-Z.lower Z.lower[Z.lower <= 0.000001] <- 0 i <- 1 j <- 1 z.upper <- as.vector(c((i-1), (j-1), Z.upper[i,j])) upper <- vector(length=3) for (i in 1:(N+1)) { if (Z.upper[i,x+1] <= 0.9800 & Z.upper[i,x+1] >= 0.9500) { upper[1] <- i-1 upper[2] <- x upper[3] <- Z.upper[i,x+1] z.upper <- rbind(z.upper, upper) } i <- i+1 } z.upper <- z.upper[-1,] nrow.upper <- nrow(z.upper) rownames(z.upper) <- c(paste("CL.Upper.", 1:(nrow.upper), sep="")) i <- 1 j <- 1 z.lower <- as.vector(c((i-1), j, Z.lower[i,j])) lower <- vector(length=3) for (i in 1:(N+1)) { if (Z.lower[i,x] <= 0.050 & Z.lower[i,x] >= 0.020) { lower[1] <- i-1 lower[2] <- x lower[3] <- Z.lower[i,x] z.lower <- rbind(z.lower,lower) } i <- i+1 } z.lower <- z.lower[-1,] nrow.lower <- nrow(z.lower) rownames(z.lower) <- c(paste("CL.Lower.", 1:(nrow.lower), sep="")) hyper.CL <- rbind(z.lower, z.upper) colnames(hyper.CL) <- c("n", "k", "Conf.Limits") return(hyper.CL) }
coef.CountsEPPM <- function(object, prtpar = c("full", "mean", "scale.factor"), ...) { if (missing(prtpar)) { prtpar <- c("full") } if ((prtpar!="full") & (prtpar!="mean") & (prtpar!="scale.factor")) { cat("\n","unknown prtpar option","\n") coefficients <- NULL } else { if (prtpar=="full") { if (object$model.name=="Poisson") { coefficients <- object$coefficients$mean.est } else { coefficients <- c(object$coefficients$mean.est, object$coefficients$scalef.est) } } else { npar.mean <- ncol(object$covariates.matrix.mean) if (prtpar=="mean") { coefficients <- object$coefficients$mean.est } if (prtpar=="scale.factor") { if (is.null(object$coefficients$scalef.est)==TRUE) { coefficients <- NULL } else { coefficients <- object$coefficients$scalef.est } } } } return(coefficients) }
object_remove <- function() { ui <- miniUI::miniPage( miniUI::gadgetTitleBar("Object Remover"), miniUI::miniContentPanel( shiny::radioButtons("pattern", "Remove Objects", c("Starting With" = "starting with", "Ending With" = "ending with", "According to Regex Pattern" = "according to the regex pattern")), shiny::textInput("txt", "Text Pattern", ""), shiny::checkboxGroupInput("checkGroup", label = shiny::strong("Object Types"), choices = list("Data Frame" = 1L, "Function" = 2L, "Other" = 3L), selected = 1L:3L), shiny::strong("Objects to be removed"), shiny::verbatimTextOutput("objects"), ) ) server <- function(input, output, session) { reactivePattern <- shiny::reactive({ if (input$pattern == "starting with") { paste0("^\\Q", input$txt, "\\E") } else if (input$pattern == "ending with") { paste0("\\Q", input$txt, "\\E$") } else { input$txt } }) reactiveObj <- shiny::reactive({ obj_txt <- ls(pattern = reactivePattern(), envir = globalenv()) obj <- lapply(.GlobalEnv, class)[obj_txt] if (!1L %in% input$checkGroup) { obj <- obj[!grepl("data\\.frame", obj)] } if (!2L %in% input$checkGroup) { obj <- obj[!grepl("function", obj)] } if (!3L %in% input$checkGroup) { obj <- obj[grepl("function|data\\.frame", obj)] } names(obj) }) output$objects <- shiny::renderPrint({ reactiveObj() }) shiny::observeEvent(input$done, { msg <- paste0("Removing Objects:\n(", paste(c("Data Frame", "Function", "Other")[as.integer(input$checkGroup)], collapse = ", "), ") ", input$pattern, " '", input$txt, "'", "\nAre you sure?") rm_confirm <- rstudioapi::showQuestion(title = "objectremover", message = msg) if (rm_confirm) { rm(list = reactiveObj(), envir = globalenv()) shiny::stopApp(message( "Removed objects (", paste(c("Data Frame", "Function", "Other")[as.integer(input$checkGroup)], collapse = ", "), ") ", input$pattern, " '", input$txt, "'") ) } }) shiny::observeEvent(input$cancel, { shiny::stopApp(message("No objects removed")) }) } shiny::runGadget(ui, server) }
hutils::weighted_ntile
biDimColors <- function(start, mid, end, length, show=TRUE) { startToMid <- grDevices::colorRampPalette(c(start, mid))(ceiling(length/2)); midToEnd <- grDevices::colorRampPalette(c(mid, end))(ceiling(length/2)); res <- c(startToMid, utils::tail(midToEnd, -1)); if (show) { graphics::plot(rep(1,length), col=res, pch=15, cex=3, axes=FALSE, xlab="", ylab=""); } return(res); }
"sim_rc"
ssize.Fvary<-function(X,beta,L=NULL,dn,a,b,fdr=0.05,power=0.8,pi0=0.95,maxN=20,cex.title=1.15,cex.legend=1){ XTX<-t(X)%*%X B<-beta if (length(L)==0){L<-diag(length(B))} k<-length(L[1,]) N<-maxN getAvgFcdf_varySigma<-function(c){ intF<-function(rho){ ncF<-pf(q=c,df1=k,df2=df,ncp=rho*lambda)*(1/(gamma(a)*(1/b)^a)*rho^(a-1)*exp(-rho*b)) return(ncF) } val.int<-integrate(f=intF,lower=0,upper=Inf,abs.tol=1e-10)$value return(val.int) } FVarfun<-function(c){ ratio<-fdr*(1-p)/((1-fdr)*p) dif<-abs((1-pf(q=c,df1=k,df2=df))/(1-getAvgFcdf_varySigma(c))-ratio) return(dif) } crit<-NULL pwr2<-NULL ssize<-matrix(0,nrow=length(pi0),ncol=3) colnames(ssize)<-c("pi0","ssize","power") up.start<-100 for(i in 1:length(pi0)){ p<-pi0[i]; pwr.new<-0 up<-up.start for(n in 2:N){ E<-t(L)%*%solve(t(X)%*%X)%*%L/n lambda<-t(t(L)%*%B)%*%solve(E)%*%(t(L)%*%B) df<-dn(n) ci<-optimize(f=FVarfun,interval=c(0,up))$min up<-ci if((abs(ci-up.start)>=1)){pwr.new<-1-getAvgFcdf_varySigma(ci);crit.new<-ci} if((abs(ci-up.start)<1)&(pwr.new!=1)){pwr.new<-0;crit.new<-NA} crit<-c(crit,crit.new) pwr2<-c(pwr2,pwr.new) if(pwr2[(i-1)*(N-1)+n-1]>=power & ssize[i,1]==0){ ssize[i,]<-c(p,n,pwr2[(i-1)*(N-1)+n-1]) } } } ssize[,1]<-pi0 if(sum(ssize==0)>0){warning("Desired power not achieved for at least one pi0")} ssize[ssize==0]<-NA pwrMatrix<-matrix(c(2:N,pwr2),ncol=length(pi0)+1,byrow=FALSE) for(i in 1:length(pi0)){ if(i==1){ plot(2:N,pwrMatrix[,i+1],col=i,xlim=c(0,N),ylim=c(0,1),xlab="",ylab="",pch=16) lines(2:N,pwrMatrix[,i+1],col=i,lty=i) } if(i!=1){ points(2:N,pwrMatrix[,i+1],col=i,pch=16) lines(2:N,pwrMatrix[,i+1],col=i,lty=i) } } abline(h=power,lty=2,lwd=2) abline(v=0:N,h=0.1*(0:10),col="gray",lty=3) title(xlab="Sample size (n)", ylab="Power") mtext(bquote("Average power vs. sample size with specified design matrix,"), cex=cex.title,padj=-2.35) mtext(bquote(paste("fdr=",.(round(fdr,4)),", and ",sigma[g]^2,"~IG(",.(round(a,4)),",",.(round(b,4)),")")), cex=cex.title,padj=-0.1) legend(x=N,y=0,xjust=1,yjust=0,col=1:i,pch=c(16,16,16),lty=1:length(pi0), legend=as.character(pi0),bg="white",title=expression(pi[0]),cex=cex.legend) pwrMatrix<-round(pwrMatrix,7) colnames(pwrMatrix)<-c("n",as.character(pi0)) critMatrix<-matrix(c(2:N,crit),ncol=length(pi0)+1,byrow=FALSE) colnames(critMatrix)<-c("n",as.character(pi0)) ret<-NULL ret$ssize<-ssize ret$power<-pwrMatrix ret$crit.vals<-critMatrix return(ret) }
"boa.menu.file" <- function() { mtitle <- "\nFILE MENU\n=========" choices <- c("Back", "-----------------------+", "Import Data >> |", "Save Session |", "Load Session |", "Exit BOA |", "-----------------------+") idx <- 1 while(idx > 0) { idx <- menu(choices, title = mtitle) switch(idx, "1" = idx <- -1, "2" = NULL, "3" = idx <- boa.menu.import(), "4" = { saveas <- "y" cat("\nEnter name of object to which to save the session", "data [none]\n") value <- scan(what = "", n = 1, strip.white = TRUE) if(length(value)) { if(!boa.save(value)) { cat("Object already exists. Overwrite (y/n) [n]?\n") saveas <- scan(what = "", n = 1, strip.white = TRUE) } if(length(saveas) > 0 && saveas == "y") { boa.save(value, replace = TRUE) cat("+++ Data successfully saved +++\n") } } }, "5" = { cat("\nEnter name of object to load [none]\n") value <- scan(what = "", n = 1, strip.white = TRUE) if(length(value) && boa.load(value)) cat("+++ Data successfully loaded +++\n") }, "6" = { cat("\nDo you really want to EXIT (y/n) [n]?\n") value <- scan(what = "", n = 1, strip.white = TRUE) if(length(value) > 0 && value == "y") idx <- -99 }, "7" = NULL ) } return(abs(idx)) }
context("racusum_beta_arl_int") test_that("Different integration algorithms, detecting deterioration", { skip_on_cran() skip_if(SKIP == TRUE, "skip this test now") h <- 4.5 N <- 70 RQ <- 1 g0 <- -3.6798 g1 <- 0.0768*71 shape1 <- 1 shape2 <- 3 tol <- 10^-6 expect_equal(racusum_beta_arl_int(h=h, N=N, RA=2, RQ=RQ, g0=g0, g1=g1, shape1=shape1, shape2=shape2, pw=TRUE), 4485.203, tolerance=tol) expect_equal(racusum_beta_arl_int(h=h, N=N, RA=2, RQ=RQ, g0=g0, g1=g1, shape1=shape1, shape2=shape2, pw=FALSE), 4561.862, tolerance=tol) expect_equal(racusum_beta_arl_int(h=h, N=N, RA=1/2, RQ=RQ, g0=g0, g1=g1, shape1=shape1, shape2=shape2, pw=TRUE), 5731.772, tolerance=tol) expect_equal(racusum_beta_arl_int(h=h, N=N, RA=1/2, RQ=RQ, g0=g0, g1=g1, shape1=shape1, shape2=shape2, pw=FALSE), 5728.431, tolerance=tol) })
if (requireNamespace("quanteda", quietly = TRUE)) { test_that("can tidy a quanteda dictionary", { lst <- list( terror = c("terrorism", "terrorists", "threat"), economy = c("jobs", "business", "grow", "work") ) d <- quanteda::dictionary(lst) td <- tidy(d) expect_s3_class(td, "tbl_df") expect_type(td$category, "character") expect_type(td$word, "character") expect_equal(nrow(td), 7) expect_equal(sort(unique(td$category)), c("economy", "terror")) expect_equal( sort(unique(td$word)), sort(unique(c(lst[[1]], lst[[2]]))) ) }) }
kpmfe.fun.default <- function(Vec,h, type_data=c("discrete","continuous"), ker=c("bino","triang","dirDU"), x=NULL, a=1,c=2,...) { V=data.frame(table(Vec),row.names=NULL) N=V$Freq if(is.null(x)){ if(ker=="dirDU"){x=0:(max(Vec))} else {x=0:(max(Vec)+2)} } t1=rep(0,length(x)) t2=rep(0,length(x)) n <- length(x) f0=c(N/sum(N),rep(0,length(x)-length(N))) m=matrix(0,n,length(Vec)) for(i in 1:n){ m[i,]= kef(x[i],Vec,h,type_data,ker,a,c) } res<-apply(m,1,mean) result<-res/sum(res) E0=sum((result-f0)^2) for (i in 1:n){ t1[i]=paste(x[i],";",f0[i]) t2[i]=paste(x[i],";",result[i]) } structure(list(data=Vec,n=length(Vec),eval.points= x,h=h, kernel=ker,C_n=sum(res),ISE_0 = E0,f_0=t1,f_n=t2,f0=f0,est.fn=result),class="kpmfe.fun") }
tam_rsessinfo <- function() { si <- Sys.info() si2 <- utils::sessionInfo() paste0( si2$R.version$version.string, " ", si2$R.version$system , " | nodename=", si["nodename"], " | login=", si["login"] ) }
NULL lasso_cv_glmnet_bin_min<-function(X,Y){ requireNamespace("glmnet") resultat<-glmnet::cv.glmnet(X,Y,family="binomial", type.measure = "class",nfolds=10) indice<-resultat$lambda.min resultat<-glmnet::glmnet(X,Y,family="binomial") as.vector(predict(resultat,type="coefficients",s=indice)) } lasso_cv_glmnet_bin_1se<-function(X,Y){ requireNamespace("glmnet") resultat<-glmnet::cv.glmnet(X,Y,family="binomial", type.measure = "class",nfolds=10) indice<-resultat$lambda.1se resultat<-glmnet(X,Y,family="binomial") as.vector(predict(resultat,type="coefficients",s=indice)) } lasso_glmnet_bin_AICc<-function(X,Y){ requireNamespace("glmnet") glmnet.fit <- glmnet::glmnet(X,Y,family="binomial",standardize=F) subSample = 1:min(ncol(X),100) if(is.factor(Y)){Ynum=unclass(Y)-1} else {Ynum=Y} AICc.gn.median <- AICc_glmnetB(X,Ynum,glmnet.fit,alpha=1,subSample, reducer='median') resultat<-vector("numeric",ncol(X)) resultat[AICc.gn.median$bestSet]<-AICc.gn.median$model$beta return(resultat) } lasso_glmnet_bin_BIC<-function(X,Y){ glmnet.fit <- glmnet::glmnet(X,Y,family="binomial",standardize=F) subSample = 1:min(ncol(X),100) if(is.factor(Y)){Ynum=unclass(Y)-1} else {Ynum=Y} BIC.gn.median <- BIC_glmnetB(X,Ynum,glmnet.fit,alpha=1,subSample, reducer='median') resultat<-vector("numeric",ncol(X)) resultat[BIC.gn.median$bestSet]<-BIC.gn.median$model$beta return(resultat) } lasso_cv_lars_min<-function(X,Y){ resultat<-lars::cv.lars(X,Y,plot.it=FALSE,index=seq(0,1,0.005),K=5) indice<-resultat$index[which(resultat$cv==min(resultat$cv))[1]] resultat<-lars::lars(X,Y) lars::predict.lars(resultat,type="coef",mode="fraction",s=indice)$coefficients } lasso_cv_lars_1se<-function(X,Y){ resultat<-lars::cv.lars(X,Y,plot.it=FALSE,index=seq(0,1,0.005),K=5) indice<-resultat$index[which(resultat$cv+resultat$cv.error==min(resultat$cv+resultat$cv.error))[1]] resultat<-lars::lars(X,Y) predict(resultat,type="coef",mode="fraction",s=indice)$coefficients } lasso_cv_glmnet_min<-function(X,Y){ requireNamespace("glmnet") resultat<-glmnet::cv.glmnet(X,Y,nfolds=10) coefvec<-try(as.vector(coef(resultat,s="lambda.min")[-1])) if(!is.vector(coefvec)){coefvec<-rep(0,ncol(X))} return(coefvec) } lasso_cv_glmnet_min_weighted<-function(X,Y,priors){ requireNamespace("glmnet") if(is.null(priors)) priors<-rep(1,ncol(X)) resultat<-glmnet::cv.glmnet(X,Y,nfolds=10,penalty.factor=priors) coefvec<-try(as.vector(coef(resultat,s="lambda.min")[-1])) if(!is.vector(coefvec)){coefvec<-rep(0,ncol(X))} return(coefvec) } lasso_cv_glmnet_1se<-function(X,Y){ requireNamespace("glmnet") resultat<-glmnet::cv.glmnet(X,Y,nfolds=10) coefvec<-try(as.vector(coef(resultat,s="lambda.1se")[-1])) if(!is.vector(coefvec)){coefvec<-rep(0,ncol(X))} return(coefvec) } lasso_cv_glmnet_1se_weighted<-function(X,Y,priors){ requireNamespace("glmnet") if(is.null(priors)) priors<-rep(1,ncol(X)) resultat<-glmnet::cv.glmnet(X,Y,nfolds=10,penalty.factor=priors) coefvec<-try(as.vector(coef(resultat,s="lambda.1se")[-1])) if(!is.vector(coefvec)){coefvec<-rep(0,ncol(X))} return(coefvec) } lasso_msgps_Cp<-function(X,Y,penalty="enet"){ fit <- msgps(X,Y,penalty=penalty) round(coef(fit)[-1,1],6) } lasso_msgps_AICc<-function(X,Y,penalty="enet"){ fit <- msgps(X,Y,penalty=penalty) round(coef(fit)[-1,2],6) } lasso_msgps_GCV<-function(X,Y,penalty="enet"){ fit <- msgps(X,Y,lambda=0,penalty=penalty) round(coef(fit)[-1,3],6) } lasso_msgps_BIC<-function(X,Y,penalty="enet"){ fit <- msgps(X,Y,lambda=0,penalty=penalty) round(coef(fit)[-1,4],6) } enetf_msgps_Cp<-function(X,Y,penalty="enet",alpha=0.5){ fit <- msgps(X,Y,penalty=penalty,alpha=alpha) round(coef(fit)[-1,1],6) } enetf_msgps_AICc<-function(X,Y,penalty="enet",alpha=0.5){ fit <- msgps(X,Y,penalty=penalty,alpha=alpha) round(coef(fit)[-1,2],6) } enetf_msgps_GCV<-function(X,Y,penalty="enet",alpha=0.5){ fit <- msgps(X,Y,lambda=0,penalty=penalty,alpha=alpha) round(coef(fit)[-1,3],6) } enetf_msgps_BIC<-function(X,Y,penalty="enet",alpha=0.5){ fit <- msgps(X,Y,lambda=0,penalty=penalty,alpha=alpha) round(coef(fit)[-1,4],6) } lasso_cascade<-function(M,Y,K,eps=10^-5,cv.fun ){ model<-try(cv.lars1(t(M),(Y),intercept=FALSE,K=K,plot.it=FALSE,eps=eps,cv.fun=cv.fun )) n<-try(model$index[which(model$cv %in% min(model$cv))]) model<-try(lars::lars(t(M),(Y),intercept=FALSE,eps=eps)) repu<-try(lars::coef.lars(model,s=n,mode="fraction")) if(!is.vector(repu)){repu<-rep(0,dim(M)[1])} return(repu) } cv.lars1 <- function (x, y, K = 10, index, trace = FALSE, plot.it = TRUE, se = TRUE, type = c("lasso", "lar", "forward.stagewise", "stepwise"), mode = c("fraction", "step"), cv.fun , ...) { type = match.arg(type) if (missing(mode)) { mode = switch(type, lasso = "fraction", lar = "step", forward.stagewise = "fraction", stepwise = "step") } else mode = match.arg(mode) all.folds <- cv.fun(length(y), K) if (missing(index)) { index = seq(from = 0, to = 1, length = 100) if (mode == "step") { fit = lars::lars(x, y, type = type, ...) nsteps = nrow(fit$beta) maxfold = max(sapply(all.folds, length)) nsteps = min(nsteps, length(y) - maxfold) index = seq(nsteps) } } residmat <- matrix(0, length(index), K) for (i in seq(K)) { omit <- all.folds[[i]] fit <- lars::lars(x[-omit, , drop = FALSE], y[-omit], trace = trace, type = type, ...) fit <- lars::predict.lars(fit, x[omit, , drop = FALSE], mode = mode, s = index)$fit if (length(omit) == 1) fit <- matrix(fit, nrow = 1) residmat[, i] <- apply((y[omit] - fit)^2, 2, mean) if (trace) cat("\n CV Fold", i, "\n\n") } cv <- apply(residmat, 1, mean) cv.error <- sqrt(apply(residmat, 1, var)/K) object <- list(index = index, cv = cv, cv.error = cv.error, mode = mode) if (plot.it) lars::plotCVLars(object, se = se) invisible(object) } NULL lasso_msgps_all <- function (X, Y, penalty = "enet") { fit <- msgps::msgps(X, Y, penalty = penalty) round(coef(fit)[-1, ], 6) } enet_msgps_all <- function (X, Y, penalty = "enet", alpha = 0.5) { fit <- msgps::msgps(X, Y, penalty = penalty, alpha = alpha) round(coef(fit)[-1, ], 6) } alasso_msgps_all = function(X, Y, penalty = "alasso") { fit <- msgps::msgps(X, Y, penalty = "alasso") round(coef(fit)[-1, ], 6) } alasso_enet_msgps_all = function(X, Y, penalty = "alasso", alpha = 0.5) { fit <- msgps::msgps(X, Y, penalty = "alasso", alpha = alpha) round(coef(fit)[-1, ], 6) } lasso_cv_glmnet_all_5f <- function (X, Y) { requireNamespace("glmnet") resultat <- glmnet::cv.glmnet(X, Y, nfolds = 5) coefvec.1se <- try(as.vector(coef(resultat, s = "lambda.1se")[-1])) if (!is.vector(coefvec.1se)) { coefvec.1se <- rep(0, ncol(X)) } coefvec.min <- try(as.vector(coef(resultat, s = "lambda.min")[-1])) if (!is.vector(coefvec.min)) { coefvec.min <- rep(0, ncol(X)) } return(cbind(lambda.min = coefvec.min, lambda.1se = coefvec.1se)) } spls_spls_all <- function(X, Y, K.seq = c(1:5), eta.seq = (1:9) / 10, fold.val = 5) { cv <- spls::cv.spls( X, Y, K = K.seq, eta = eta.seq, fold = fold.val, plot.it = FALSE ) f <- spls::spls(X, Y, eta = cv$eta.opt, K = cv$K.opt) ci.f <- spls::ci.spls(f) cf <- spls::correct.spls(ci.f, plot.it = FALSE) tempres = cbind(raw_coefs = spls::coef.spls(f), bootstrap_corrected_coefs = cf) colnames(tempres) <- c( paste("raw_coefs", "K.opt", cv$K.opt, "eta.opt", cv$eta.opt, sep = "_"), paste( "bootstrap_corrected_coefs", "K.opt", cv$K.opt, "eta.opt", cv$eta.opt, sep = "_" ) ) return(tempres) } varbvs_linear_all <- function (X, Y, include.threshold.list = (1:19) / 20) { fit <- varbvs::varbvs( X, NULL, Y, family = "gaussian", logodds = seq(-3.5, -1, 0.1), sa = 1, verbose = FALSE ) selecvar <- function(fit, include.threshold.val) { res <- coef(fit)[, "averaged"] res[!( rownames(coef(fit)) %in% varbvs::variable.names.varbvs(fit, include.threshold = include.threshold.val) )] <- 0 if (length(grep("(Intercept)", names(res))) > 0) { res <- res[-grep("(Intercept)", names(res))] } if (length(grep("Z", names(res))) > 0) { res <- res[-grep("Z", names(res))] } res <- data.frame(res) colnames(res) <- paste("coef", "varbvs", include.threshold.val, sep = "_") return(res) } tempres = NULL for (thres in include.threshold.list) { tempres <- abind::abind(tempres, selecvar(fit, thres)) } return(tempres) } lasso_cv_glmnet_bin_all <- function (X, Y) { requireNamespace("glmnet") resultat <- glmnet::cv.glmnet(X, Y, family = "binomial", type.measure = "class", nfolds = 5) coefvec.1se <- try(as.vector(coef(resultat, s = "lambda.1se")[-1])) if (!is.vector(coefvec.1se)) { coefvec.1se <- rep(0, ncol(X)) } coefvec.min <- try(as.vector(coef(resultat, s = "lambda.min")[-1])) if (!is.vector(coefvec.min)) { coefvec.min <- rep(0, ncol(X)) } return(cbind(lambda.min = coefvec.min, lambda.1se = coefvec.1se)) } lasso_glmnet_bin_all <- function (X, Y) { requireNamespace("glmnet") glmnet.fit <- glmnet::glmnet(X, Y, family = "binomial", standardize = F) subSample = 1:min(ncol(X), 100) if (is.factor(Y)) { Ynum = unclass(Y) - 1 } else { Ynum = Y } AICc.gn.median <- SelectBoost::AICc_glmnetB(X, Ynum, glmnet.fit, alpha = 1, subSample, reducer = "median") resultat.AICc <- vector("numeric", ncol(X)) resultat.AICc[AICc.gn.median$bestSet] <- AICc.gn.median$model$beta BIC.gn.median <- SelectBoost::BIC_glmnetB(X, Ynum, glmnet.fit, alpha = 1, subSample, reducer = "median") resultat.BIC <- vector("numeric", ncol(X)) resultat.BIC[BIC.gn.median$bestSet] <- BIC.gn.median$model$beta return(cbind(AICc = resultat.AICc, BIC = resultat.BIC)) } splsda_spls_all <- function(X, Y, K.seq = c(1:10), eta.seq = (1:9) / 10) { cv <- spls::cv.splsda( X, Y, fold = 5, K = K.seq, eta = eta.seq, plot.it = FALSE, n.core = 1 ) f <- spls::splsda(X, Y, eta = cv$eta.opt, K = cv$K.opt) tempres = cbind(raw_coefs = spls::coef.splsda(f)) colnames(tempres) <- c(paste("raw_coefs", "K.opt", cv$K.opt, "eta.opt", cv$eta.opt, sep = "_")) return(tempres) } sgpls_spls_all <- function(X, Y, K.seq = c(1:10), eta.seq = (1:9) / 10) { cv <- spls::cv.sgpls( X, Y, fold = 5, K = K.seq, eta = eta.seq, plot.it = FALSE, n.core = 1 ) f <- spls::sgpls(X, Y, eta = cv$eta.opt, K = cv$K.opt) tempres = cbind(raw_coefs = spls::coef.sgpls(f))[-1, , drop = FALSE] colnames(tempres) <- c(paste("raw_coefs", "K.opt", cv$K.opt, "eta.opt", cv$eta.opt, sep = "_")) return(tempres) } varbvs_binomial_all <- function (X, Y, include.threshold.list = (1:19) / 20) { fit <- varbvs::varbvs( X, NULL, Y, family = "binomial", logodds = seq(-3.5, -1, 0.1), sa = 1, verbose = FALSE ) selecvar <- function(fit, include.threshold.val) { res <- coef(fit)[, "averaged"] res[!( rownames(coef(fit)) %in% varbvs::variable.names.varbvs(fit, include.threshold = include.threshold.val) )] <- 0 if (length(grep("(Intercept)", names(res))) > 0) { res <- res[-grep("(Intercept)", names(res))] } if (length(grep("Z", names(res))) > 0) { res <- res[-grep("Z", names(res))] } res <- data.frame(res) colnames(res) <- paste("coef", "varbvs", include.threshold.val, sep = "_") return(res) } tempres = NULL for (thres in include.threshold.list) { tempres <- abind::abind(tempres, selecvar(fit, thres)) } return(tempres) }
webpick<-function(){ shiny::runApp(system.file('webpick', package='enviPick')) }
context("sp_elev") library(dplyr) test_that("sp_elev() is silent if `sp` and `elev` share plot dimensions", { species_from_luquillo <- fgeo.x::stem5 elevation_from_luquillo <- fgeo.x::elevation expect_silent( sp_elev(species_from_luquillo, elevation_from_luquillo) ) }) test_that("sp_elev() warns if `sp` and `elev` have different plot dimensions", { species_from_luquillo <- fgeo.x::stem5 elevation_from_luquillo <- fgeo.tool::fgeo_elevation(fgeo.x::elevation) shrink_gx <- round(max(elevation_from_luquillo$gx, na.rm = TRUE) / 2) smaller_elev <- elevation_from_luquillo %>% filter(gx < shrink_gx) expect_warning( sp_elev(species_from_luquillo, smaller_elev), "`sp` and `elev`.*different dimensions" ) })
fm_checkdesign<-function(occasions=NULL,design=NULL,type="F"){ nocc<-occasions Design<-matrix(0,nrow=c(nocc-1),2) pardef<-strsplit(design,"*",fixed=TRUE) yes1<-0;yes2<-0;seqs<-NULL;subcells<-NULL if(type %in% c("F","M")){ Design[,1]<-1:c(nocc-1) pat<-as.character(nocc) for(t in 1:length(pardef)){ tempo<-pardef[[t]] if(length(tempo)==1) { eval(parse(text=paste("seqs<-",tempo[1],sep=""))) if(any(seqs==pat)) yes1<-1 if(yes1==1) stop(paste("The occasions for F or M range from 1 to",nocc-1,sep=" ")) } if(length(tempo)>1){ eval(parse(text=paste("seqs<-",tempo[1],sep=""))) eval(parse(text=paste("subcells<-",tempo[2],sep=""))) if(any(seqs==pat)) yes1<-1 if(any(subcells==pat)) yes2<-1 if(yes1==1 || yes2==1) stop(paste("The occasions for F or M range from 1 to",nocc-1,sep=" ")) } } pat<-1 yes1<-0 for(t in 1:length(pardef)){ tempo<-pardef[[t]] if(length(tempo)==1) { eval(parse(text=paste("seqs<-",tempo[1],sep=""))) if(any(seqs==1)) yes1<-1 } if(length(tempo)>1){ eval(parse(text=paste("seqs<-",tempo[1],sep=""))) eval(parse(text=paste("subcells<-",tempo[2],sep=""))) if(any(seqs==pat)) yes1<-1 if(any(subcells==pat)) yes1<-1 } } if(yes1==0) stop(paste("Missing occasion 1 for F or M")) } if(type %in% c("P")){ yes1<-0 Design[,1]<-2:c(nocc) pat<-1 for(t in 1:length(pardef)){ tempo<-pardef[[t]] if(length(tempo)==1){ eval(parse(text=paste("seqs<-",tempo[1],sep=""))) if(any(seqs==1)) yes1<-1 if(yes1==1) stop(paste("The occasions for P range from 2 to",nocc,sep=" ")) } if(length(tempo)>1){ eval(parse(text=paste("seqs<-",tempo[1],sep=""))) eval(parse(text=paste("subcells<-",tempo[2],sep=""))) if(any(seqs==pat)) yes1<-1 if(any(subcells==pat)) yes1<-1 if(yes1==1) stop(paste("The occasions for P range from 2 to",nocc,sep=" ")) } } } for(t in 1:length(pardef)){ tempo<-pardef[[t]] if(length(tempo)==1){ eval(parse(text=paste("seqs<-",tempo[1],sep=""))) index<-which(Design[,1] %in% c(seqs)) first<-c(max(Design[,2])+1) remain<-length(seqs)-1 if(remain>0) dtem<-c(first,c(first+c(1:c(length(seqs)-1)))) else dtem<-first Design[index,2]<-dtem } if(length(tempo)>1){ eval(parse(text=paste("seqs<-",tempo[1],sep=""))) index<-which(Design[,1] %in% c(seqs)) first<-c(max(Design[,2])+1) remain<-length(seqs)-1 if(remain>0) dtem<-c(first,c(first+c(1:c(length(seqs)-1)))) else dtem<-first Design[index,2]<-dtem eval(parse(text=paste("subcells<-",tempo[2],sep=""))) index<-which(Design[,1] %in% c(subcells)) if(length(subcells)>length(dtem)) dtem<-c(dtem,rep(dtem[length(dtem)],c(length(subcells)-length(dtem)))) if(length(subcells)<length(dtem)) dtem<-dtem[1:length(subcells)] Design[index,2]<-dtem } } if(Design[1,2]>0){ for(t in 1:length(Design[,1])){ if(Design[t,2]==0) Design[t,2]<-Design[t-1,2] } } Design<-as.data.frame(Design) names(Design)<-c("Occasions","Parameter") return(Design) }
p <- seq(0, 1, length = 1000) plot(p, p * (1 - p), frame = FALSE, type = "l", lwd = 3)
targets::tar_test("tar_knit_raw() works", { skip_pandoc() lines <- c( "---", "title: report", "output_format: html_document", "---", "", "```{r}", "targets::tar_read(data)", "```" ) writeLines(lines, "report.Rmd") targets::tar_script({ library(tarchetypes) list( tar_target(data, data.frame(x = seq_len(26L), y = letters)), tar_knit_raw("report", "report.Rmd", quiet = TRUE) ) }) suppressMessages(targets::tar_make(callr_function = NULL)) expect_equal(sort(targets::tar_progress()$name), sort(c("data", "report"))) out <- targets::tar_read(report) expect_equal(basename(out), c("report.md", "report.Rmd")) suppressMessages(targets::tar_make(callr_function = NULL)) progress <- targets::tar_progress() progress <- progress[progress$progress != "skipped", ] expect_equal(nrow(progress), 0L) targets::tar_script({ library(tarchetypes) list( tar_target(data, data.frame(x = rev(seq_len(26L)), y = letters)), tar_knit_raw("report", "report.Rmd") ) }) suppressMessages(targets::tar_make(callr_function = NULL)) progress <- targets::tar_progress() progress <- progress[progress$progress != "skipped", ] expect_equal(sort(progress$name), sort(c("data", "report"))) }) targets::tar_test("tar_knit_raw() warns about tar_read_raw()", { skip_pandoc() lines <- c( "---", "title: report", "output_format: html_document", "---", "", "```{r}", "targets::tar_read_raw('data')", "```" ) writeLines(lines, "report.Rmd") targets::tar_script({ library(tarchetypes) list( tar_target(data, data.frame(x = seq_len(26L), y = letters)), tar_knit_raw("report", "report.Rmd", quiet = TRUE) ) }) expect_warning( suppressMessages(targets::tar_make(callr_function = NULL)), class = "tar_condition_validate" ) }) targets::tar_test("tar_knit_raw() warns about tar_load_raw()", { skip_pandoc() lines <- c( "---", "title: report", "output_format: html_document", "---", "", "```{r}", "envir <- new.env(parent = emptyenv())", "targets::tar_load_raw('data', envir = envir)", "```" ) writeLines(lines, "report.Rmd") targets::tar_script({ library(tarchetypes) list( tar_target(data, data.frame(x = seq_len(26L), y = letters)), tar_knit_raw("report", "report.Rmd", quiet = TRUE) ) }) expect_warning( suppressMessages(targets::tar_make(callr_function = NULL)), class = "tar_condition_validate" ) }) targets::tar_test("tar_knit_raw(nested) runs from the project root", { skip_pandoc() lines <- c( "---", "title: report", "output_format: html_document", "---", "", "```{r}", "file.create(\"here\")", "```" ) dir.create("out") writeLines(lines, file.path("out", "report.Rmd")) targets::tar_script({ library(tarchetypes) list( tar_knit_raw("report", file.path("out", "report.Rmd")) ) }) expect_false(file.exists("here")) expect_false(file.exists(file.path("out", "here"))) suppressMessages(targets::tar_make(callr_function = NULL)) expect_true(file.exists("here")) expect_false(file.exists(file.path("out", "here"))) })
pamr.geneplot <- function(fit, data, threshold) { par(pch = 1, col = 1) geneid <- data$geneid if(is.null(geneid)) { geneid <- as.character(1:nrow(data$x)) } if(is.null(fit$newy)) { y <- factor(data$y[fit$sample.subset]) } else { y <- factor(fit$newy[fit$sample.subset]) } x <- data$x[fit$gene.subset, fit$sample.subset] geneid <- geneid[fit$gene.subset] nc <- length(unique(y)) aa <- pamr.predict(fit, x, threshold = threshold, type = "nonzero") cen <- pamr.predict(fit, x, threshold = threshold, type = "cen") d <- (cen - fit$centroid.overall)[aa, ]/fit$sd[aa] oo <- order( - apply(abs(d), 1, max)) aa <- aa[oo] ngenes <- length(aa) o <- order(y) xx <- x[aa, o] geneid <- geneid[aa] nc <- length(unique(y)) nn <- c(0, cumsum(table(y))) nrow <- trunc(sqrt(ngenes)) + 1 ncol <- trunc(sqrt(ngenes)) + 1 if(nrow * (ncol - 1) >= ngenes) { ncol <- ncol - 1 } par(mfrow = c(nrow, ncol)) for(i in 1:ngenes) { plot(1:ncol(xx), xx[i, ], type = "n", xlab = "sample", ylab = "expression", axes = FALSE) box() axis(2) for(j in 1:nc) { j1 <- nn[j] + 1 j2 <- nn[j] + table(y)[j] points(j1:j2, xx[i, j1:j2], col = j + 1) } title(main = as.character(geneid[i])) for(j in 1:(nc - 1)) { abline(v = cumsum(table(y))[j] + 0.5, lty = 2) } if(i == 1) { h <- c(0, table(y)) for(j in 2:(nc + 1)) { text(sum(h[1:(j - 1)]) + 0.5 * h[j], max(xx[i, ]), label = levels(y)[j - 1], col = j) } } } par(mfrow = c(1, 1)) }
check_las_validity = function(header, data) { is_defined_offsets(header, "stop") is_defined_scalefactors(header, "stop") is_defined_filesourceid(header, "stop") is_defined_version(header, "stop") is_defined_globalencoding(header, "stop") is_defined_date(header, "stop") is_defined_pointformat(header, "stop") is_defined_coordinates(data, "stop") is_valid_offsets(header, "stop") is_valid_scalefactors(header, "stop") is_valid_globalencoding(header, "stop") is_valid_date(header, "stop") is_valid_pointformat(header, "stop") is_valid_extrabytes(header, "stop") is_valid_filesourceid(header, "stop") is_valid_XYZ(data, "stop") is_valid_Intensity(data, "stop") is_valid_ReturnNumber(data, header, "stop") is_valid_NumberOfReturns(data, header, "stop") is_valid_ScanDirectionFlag(data, "stop") is_valid_EdgeOfFlightline(data, "stop") is_valid_Classification(data, header, "stop") is_valid_ScannerChannel(data, "stop") is_valid_SyntheticFlag(data, "stop") is_valid_KeypointFlag(data, "stop") is_valid_WithheldFlag(data, "stop") is_valid_OverlapFlag(data, "stop") is_valid_ScanAngleRank(data, "stop") is_valid_ScanAngle(data, "stop") is_valid_UserData(data, "stop") is_valid_gpstime(data, "stop") is_valid_PointSourceID(data, "stop") is_valid_RGB(data, "stop") is_valid_NIR(data, "stop") is_NIR_in_valid_format(header, data, "warning") is_gpstime_in_valid_format(header, data, "warning") is_RGB_in_valid_format(header, data, "warning") is_ScanAngle_in_valid_format(header, data, "warning") is_ScannerChannel_in_valid_format(header, data, "warning") is_extrabytes_in_accordance_with_data(header, data, "stop") return(invisible()) } check_las_compliance = function(header, data) { is_defined_offsets(header, "stop") is_defined_scalefactors(header, "stop") is_defined_filesourceid(header, "stop") is_defined_version(header, "stop") is_defined_globalencoding(header, "stop") is_defined_date(header, "stop") is_defined_coordinates(data, "stop") is_compliant_ReturnNumber(data, "warning") is_compliant_NumberOfReturns(data, "warning") is_compliant_RGB(data, "warning") is_compliant_ScanAngle(data, "warning") is_compliant_ScanAngleRank(data, "warning") is_compliant_ReturnNumber_vs_NumberOfReturns(data, "warning") is_XY_larger_than_bbox(header, data, "warning") is_valid_scalefactors(header, behavior = "warning") is_number_of_points_in_accordance_with_header(header, data, "warning") is_number_of_points_by_return_in_accordance_with_header(header, data, "warning") is_XY_smaller_than_bbox(header, data, "warning") is_Z_in_bbox(header, data, "warning") is_RGB_in_valid_format(header, data, "warning") is_NIR_in_valid_format(header, data, "warning") is_gpstime_in_valid_format(header, data, "warning") is_ScanAngle_in_valid_format(header, data, "warning") return(invisible()) } check_output_file = function(file) { islas = tools::file_ext(file) %in% c("las", "laz") if (length(file) > 1) stop("Write only one file at a time.") if (!islas) stop("File not supported. Extension should be 'las' or 'laz'") } check_file = function(file) { valid = file.exists(file) islas = tools::file_ext(file) %in% c("las", "laz", "LAS", "LAZ") file = normalizePath(file) if (!all(valid)) stop("File not found.") if (!all(islas)) stop("File not supported.") } check_filter = function(filter) { if (!is.character(filter) & length(filter) > 1) stop("Incorrect argument 'filter'. A string is expected.") }
[ { "title": "ITS-90 temperature scale", "href": "http://dankelley.github.io/r/2015/05/10/ITS90-temperature-scale.html" }, { "title": "googleVis 0.2.15 is released: Improved geo and bubble charts", "href": "http://www.magesblog.com/2012/03/googlevis-0215-is-released-improved-geo.html" }, { "title": "Scheduling R Tasks with Crontabs to Conserve Memory", "href": "https://nerdsrule.co/2013/09/03/scheduling-r-tasks-with-crontab-to-conserve-memory/" }, { "title": "Using the RcppArmadillo-based Implementation of R’s sample()", "href": "http://gallery.rcpp.org/articles/using-the-Rcpp-based-sample-implementation/" }, { "title": "Working with Sessionized Data 1: Evaluating Hazard Models", "href": "http://www.win-vector.com/blog/2015/07/working-with-sessionized-data-1-evaluating-hazard-models/" }, { "title": "Make your ggplots shareable, collaborative, and with D3", "href": "http://ropensci.org/blog/2014/04/17/plotly/" }, { "title": "Relative error distributions, without the heavy tail theatrics", "href": "http://www.win-vector.com/blog/2016/09/relative-error-distributions-without-the-heavy-tail-theatrics/" }, { "title": "useR! 2010 – Local R User Group Panel", "href": "https://web.archive.org/web/http://www.vcasmo.com/video/drewconway/9025" }, { "title": "Weekend Reading: F-Squared", "href": "https://systematicinvestor.wordpress.com/2013/12/07/weekend-reading-f-squared/" }, { "title": "Extracting Information From Objects Using Names()", "href": "http://rforpublichealth.blogspot.com/2013/03/extracting-information-from-objects.html" }, { "title": "Project Euler in R: Problem 24", "href": "http://www.eggwall.com/2012/01/project-euler-in-r-problem-24.html" }, { "title": "RStudio at the Open Data Science Conference", "href": "https://blog.rstudio.org/2016/03/28/rstudio-at-the-open-data-science-conference/" }, { "title": "Plotting differentially methylated bases on an ideogram", "href": "http://zvfak.blogspot.com/2012/06/plotting-differentially-methylated.html" }, { "title": "3 Reasons to Learn Caret", "href": "https://www.datacamp.com/community/blog/3-reasons-to-learn-caret" }, { "title": "Linear Regression with R : step by step implementation part-2", "href": "http://pingax.com/linear-regression-with-r-step-by-step-implementation-part-2/?utm_source=rss&utm_medium=rss&utm_campaign=linear-regression-with-r-step-by-step-implementation-part-2" }, { "title": "Use GBIF and googleVis to Make Maps with Species Occurrence Data", "href": "http://digitheadslabnotebook.blogspot.com/2012/09/computing-kook-density-in-r.html" }, { "title": "Books and lessons about ggplot2", "href": "https://martinsbioblogg.wordpress.com/2014/02/20/books-and-lessons-about-ggplot2/" }, { "title": "R User Group Sponsorship: Applications open for 2013", "href": "http://blog.revolutionanalytics.com/2012/10/r-user-group-sponsorship-applications-open-for-2013.html" }, { "title": "GPS Basemaps in R Using get_map", "href": "http://blog.mollietaylor.com/2013/02/gps-basemaps-in-r-using-getmap.html" }, { "title": "Yet Another Baseball Defense Statistic", "href": "http://andland.github.io/blog/2014/04/22/yet-another-baseball-defensive-statistic/" }, { "title": "Data Mining in R online course taught by Luis Torgo at statistics.com", "href": "http://www.statistics.com/about-us/" }, { "title": "Cramer’s Stock Pick Recommendations Analyzed (Part II)", "href": "http://www.r-chart.com/2010/10/cramers-stock-pick-recommendations_18.html" }, { "title": "Play Sliding Puzzles on R", "href": "http://weitaiyun.blogspot.com/2009/03/play-sliding-puzzles-on-r_22.html" }, { "title": "RStudio 0.92.44 Release: Try It! You’ll Be Surprised!", "href": "http://jeroldhaas.blogspot.com/2011/03/rstudio-09244-release-try-it-youll-be.html" }, { "title": "Update to Graphing Non-Proportional Hazards in R", "href": "http://christophergandrud.blogspot.com/2012/12/update-to-graphing-non-proportional.html" }, { "title": "More, Please!", "href": "http://www.imachordata.com/more-please/" }, { "title": "Truly random [again]", "href": "https://xianblog.wordpress.com/2010/12/10/truly-random-again/" }, { "title": "Geomorph update 2.1.5 Now Available!", "href": "http://ww1.geomorph.net/2015/05/geomorph-update-215-now-available.html" }, { "title": "In case you missed it: January 2014 roundup", "href": "http://blog.revolutionanalytics.com/2014/02/in-case-you-missed-it-january-2014-roundup.html" }, { "title": "UseR! 2011 in Warwick", "href": "https://xianblog.wordpress.com/2011/02/20/user-2011-in-warwick/" }, { "title": "ggtree supports phylip tree format", "href": "http://guangchuangyu.github.io/2016/01/ggtree-supports-phylip-tree-format/" }, { "title": "Statistical Graphics – Edward Tufte", "href": "http://www.wekaleamstudios.co.uk/posts/statistical-graphics-edward-tufte/" }, { "title": "Scaling data.table using index", "href": "https://jangorecki.github.io/blog/2015-11-23/data.table-index.html" }, { "title": "Incomplete Data by Design: Bringing Machine Learning to Marketing Research", "href": "http://joelcadwell.blogspot.com/2013/05/incomplete-data-by-design-bringing.html" }, { "title": "How big data and statistical modeling are changing video games", "href": "http://blog.revolutionanalytics.com/2013/06/how-big-data-and-statistical-modeling-are-changing-video-games.html" }, { "title": "RMySQL Looking For A New Maintainer", "href": "http://jeffreyhorner.tumblr.com/post/36067674305/rmysql-looking-for-a-new-maintainer" }, { "title": "R for Data Mining", "href": "http://blog.revolutionanalytics.com/2011/06/r-data-mining.html" }, { "title": "Digitizing plots", "href": "http://dankelley.github.io//r/2014/03/12/digitizing-plots.html" }, { "title": "Add a frame to a map", "href": "http://menugget.blogspot.com/2012/04/add-frame-to-map.html" }, { "title": "Financial Data Accessible from R – part III", "href": "http://www.thertrader.com/2013/11/08/financial-data-accessible-from-r-part-iii/" }, { "title": "R for Dummies – a quick review of reviews", "href": "http://blog.fellstat.com/?p=240" }, { "title": "Weighting and prediction in sample surveys", "href": "http://andrewgelman.com/2011/07/01/weighting_and_p/" }, { "title": "F1 Doing the Data Visualisation Competition Thing With Tata?", "href": "https://blog.ouseful.info/2014/07/02/f1-doing-the-data-visualisation-competition-thing-with-tata/" }, { "title": "Chutes & ladders: How long is this going to take?", "href": "https://kbroman.wordpress.com/2013/05/17/chutes-ladders-how-long-is-this-going-to-take/" }, { "title": "Data Science in HR", "href": "http://blog.revolutionanalytics.com/2015/05/data-science-in-hr.html" }, { "title": "JAGS 2.0", "href": "https://web.archive.org/web/http://jackman.stanford.edu/blog/?p=1611" }, { "title": "Martyn Plummer’s Secret JAGS Blog", "href": "http://andrewgelman.com/2011/12/07/martyn-plummers-secret-jags-blog/" }, { "title": "Big’MC seminar", "href": "https://xianblog.wordpress.com/2010/01/30/bigmc-seminar/" }, { "title": "Flag space: a scatter plot of raster images", "href": "http://is-r.tumblr.com/post/33700919594/flag-space-a-scatter-plot-of-raster-images" }, { "title": "Welcome to FOSS Trading", "href": "https://feedproxy.google.com/~r/FossTrading/~3/cZ4m9PCQDDY/welcome-to-foss-trading.html" } ]
tween_components <- function(.data, ease, nframes, time, id = NULL, range = NULL, enter = NULL, exit = NULL, enter_length = 0, exit_length = 0) { time <- enquo(time) time <- eval_tidy(time, .data) id <- enquo(id) id <- if (quo_is_null(id)) rep(1, nrow(.data)) else eval_tidy(id, .data) if (is.null(enter_length)) enter_length <- 0 if (is.null(exit_length)) exit_length <- 0 .data <- .complete_components(.data, time, id, enter, exit, enter_length, exit_length) .tween_individuals(.data, ease, nframes, range) } .tween_individuals <- function(.data, ease, nframes, range) { if (nframes == 0) return(.data[integer(), , drop = FALSE]) if (nrow(.data) == 0) return(.data) if (length(ease) == 1) ease <- rep(ease, ncol(.data) - 3) if (length(ease) == ncol(.data) - 3) { ease <- c(ease, 'linear', 'linear', 'linear') } else { stop('Ease must be either a single string or one for each column', call. = FALSE) } stopifnot(length(nframes) == 1 && is.numeric(nframes) && nframes %% 1 == 0) timerange <- if (is.null(range)) range(.data$.time) else range if (diff(timerange) == 0) stop('range must have a length', call. = FALSE) framelength <- diff(timerange) / (nframes - 1) .data <- .data[order(.data$.id, .data$.time), , drop = FALSE] frame <- round((.data$.time - min(timerange[1])) / framelength) + 1 .data$.time <- NULL colClasses <- col_classes(.data) tweendata <- lapply(seq_along(.data), function(i) { d <- .data[[i]] e <- rep(ease[i], length(d)) switch( colClasses[i], numeric = interpolate_numeric_element(d, .data$.id, frame, e), logical = interpolate_logical_element(d, .data$.id, frame, e), factor = interpolate_factor_element(d, .data$.id, frame, e), character = interpolate_character_element(d, .data$.id, frame, e), colour = interpolate_colour_element(d, .data$.id, frame, e), date = interpolate_date_element(d, .data$.id, frame, e), datetime = interpolate_datetime_element(d, .data$.id, frame, e), constant = interpolate_constant_element(d, .data$.id, frame, e), numlist = interpolate_numlist_element(d, .data$.id, frame, e), list = interpolate_list_element(d, .data$.id, frame, e), phase = get_phase_element(d, .data$.id, frame, e) ) }) tweenInfo <- tweendata[[1]][, c('group', 'frame')] tweendata <- lapply(tweendata, `[[`, i = 'data') tweendata <- structure(tweendata, names = names(.data), row.names = seq_along(tweendata[[1]]), class = 'data.frame') tweendata$.frame <- tweenInfo$frame tweendata$.id <- tweenInfo$group tweendata <- tweendata[tweendata$.frame >= 1 & tweendata$.frame <= nframes, , drop = FALSE] attr(tweendata, 'framelength') <- framelength tweendata[order(tweendata$.frame, tweendata$.id), , drop = FALSE] } .complete_components <- function(data, time, id, enter, exit, enter_length, exit_length) { if (length(id) != nrow(data) || length(time) != nrow(data)) { stop('id and time must have the same length as the number of rows in data', call. = FALSE) } data$.id <- id data$.phase <- rep('raw', nrow(data)) data$.time <- time if (any(!is.null(enter), !is.null(exit))) { time_ord <- order(time) if (!is.null(enter)) { enter_data <- enter(data[time_ord[!duplicated(id[time_ord])], , drop = FALSE]) enter_data$.phase <- 'enter' enter_data$.time <- enter_data$.time - enter_length } else { enter_data <- data[0, , drop = FALSE] } if (!is.null(exit)) { exit_data <- exit(data[time_ord[!duplicated(id[time_ord], fromLast = TRUE)], , drop = FALSE]) exit_data$.phase <- 'exit' exit_data$.time <- exit_data$.time + exit_length } else { exit_data <- data[0, , drop = FALSE] } data <- rbind(enter_data, data, exit_data) } data }
library("testthat") expect_equal_ignore_spaces <- function(string1, string2) { string1 <- gsub("([;()'+-/|*\n])", " \\1 ", string1) string2 <- gsub("([;()'+-/|*\n])", " \\1 ", string2) string1 <- gsub(" +", " ", string1) string2 <- gsub(" +", " ", string2) expect_equal(string1, string2) } expect_match_ignore_spaces <- function(string1, regexp) { string1 <- gsub(" +", " ", string1) expect_match(string1, regexp) } test_that("translate sql server -> spark round", { sql <- translate("SELECT round(3.14, 1)", targetDialect = "spark") expect_equal_ignore_spaces(sql, "SELECT ROUND(CAST(3.14 AS float),1)") }) test_that("translate sql server -> spark select random row using hash", { sql <- translate("SELECT column FROM (SELECT column, ROW_NUMBER() OVER (ORDER BY HASHBYTES('MD5',CAST(person_id AS varchar))) tmp WHERE rn <= 1", targetDialect = "spark") expect_equal_ignore_spaces(sql, "SELECT column FROM (SELECT column, ROW_NUMBER() OVER (ORDER BY MD5(CAST(person_id AS STRING))) tmp WHERE rn <= 1") }) test_that("translate sql server -> spark SELECT CONVERT(VARBINARY, @a, 1)", { sql <- translate("SELECT ROW_NUMBER() OVER CONVERT(VARBINARY, val, 1) rn WHERE rn <= 1", targetDialect = "spark") expect_equal_ignore_spaces(sql, "SELECT ROW_NUMBER() OVER CONVERT(VARBINARY, val, 1) rn WHERE rn <= 1") }) test_that("translate sql server -> spark convert date", { sql <- translate("SELECT convert(date, '2019-01-01')", targetDialect = "spark") expect_equal_ignore_spaces(sql, "SELECT TO_DATE('2019-01-01', 'yyyyMMdd')") }) test_that("translate sql server -> spark dateadd", { sql <- translate("SELECT dateadd(second, 1, '2019-01-01 00:00:00')", targetDialect = "spark") expect_equal_ignore_spaces(sql, "SELECT ('2019-01-01 00:00:00' + INTERVAL 1 second)") sql <- translate("SELECT dateadd(minute, 1, '2019-01-01 00:00:00')", targetDialect = "spark") expect_equal_ignore_spaces(sql, "SELECT ('2019-01-01 00:00:00' + INTERVAL 1 minute)") sql <- translate("SELECT dateadd(hour, 1, '2019-01-01 00:00:00')", targetDialect = "spark") expect_equal_ignore_spaces(sql, "SELECT ('2019-01-01 00:00:00' + INTERVAL 1 hour)") sql <- translate("SELECT dateadd(d, 1, '2019-01-01')", targetDialect = "spark") expect_equal_ignore_spaces(sql, "SELECT date_add('2019-01-01', 1)") sql <- translate("SELECT dateadd(dd, 1, '2019-01-01')", targetDialect = "spark") expect_equal_ignore_spaces(sql, "SELECT date_add('2019-01-01', 1)") sql <- translate("SELECT dateadd(day, 1, '2019-01-01')", targetDialect = "spark") expect_equal_ignore_spaces(sql, "SELECT date_add('2019-01-01', 1)") sql <- translate("SELECT dateadd(m, 1, '2019-01-01')", targetDialect = "spark") expect_equal_ignore_spaces(sql, "SELECT ('2019-01-01' + INTERVAL 1 month)") sql <- translate("SELECT dateadd(mm, 1, '2019-01-01')", targetDialect = "spark") expect_equal_ignore_spaces(sql, "SELECT ('2019-01-01' + INTERVAL 1 month)") sql <- translate("SELECT dateadd(month, 1, '2019-01-01')", targetDialect = "spark") expect_equal_ignore_spaces(sql, "SELECT ('2019-01-01' + INTERVAL 1 month)") sql <- translate("SELECT dateadd(yy, 1, '2019-01-01')", targetDialect = "spark") expect_equal_ignore_spaces(sql, "SELECT ('2019-01-01' + INTERVAL 1 year)") sql <- translate("SELECT dateadd(yyyy, 1, '2019-01-01')", targetDialect = "spark") expect_equal_ignore_spaces(sql, "SELECT ('2019-01-01' + INTERVAL 1 year)") sql <- translate("SELECT dateadd(year, 1, '2019-01-01')", targetDialect = "spark") expect_equal_ignore_spaces(sql, "SELECT ('2019-01-01' + INTERVAL 1 year)") }) test_that("translate sql server -> spark datediff", { sql <- translate("SELECT datediff(d, '2019-01-01', '2019-01-02')", targetDialect = "spark") expect_equal_ignore_spaces(sql, "SELECT datediff('2019-01-02', '2019-01-01')") sql <- translate("SELECT datediff(dd, '2019-01-01', '2019-01-02')", targetDialect = "spark") expect_equal_ignore_spaces(sql, "SELECT datediff('2019-01-02', '2019-01-01')") sql <- translate("SELECT datediff(day, '2019-01-01', '2019-01-02')", targetDialect = "spark") expect_equal_ignore_spaces(sql, "SELECT datediff('2019-01-02', '2019-01-01')") }) test_that("translate sql server -> spark convert date", { sql <- translate("select convert(varchar,'2019-01-01',112)", targetDialect = "spark") expect_equal_ignore_spaces(sql, "select TO_CHAR('2019-01-01', 'yyyyMMdd')") }) test_that("translate sql server -> spark GETDATE()", { sql <- translate("select GETDATE()", targetDialect = "spark") expect_equal_ignore_spaces(sql, "select CURRENT_DATE") }) test_that("translate sql server -> spark concat", { sql <- translate("select 'oh' + 'dsi'", targetDialect = "spark") expect_equal_ignore_spaces(sql, "select 'oh' || 'dsi'") }) test_that("translate sql server -> spark cast varchar and concat", { sql <- translate("select cast('test' as varchar(10)) + 'ing'", targetDialect = "spark") expect_equal_ignore_spaces(sql, "select cast('test' as STRING) || 'ing'") }) test_that("translate sql server -> spark date from parts", { sql <- translate("select datefromparts('2019','01','01')", targetDialect = "spark") expect_equal_ignore_spaces(sql, "select to_date(cast('2019' as string) || '-' || cast('01' as string) || '-' || cast('01' as string))") }) test_that("translate sql server -> spark datetime from parts", { sql <- translate("select datetimefromparts('2019', '01', '01', '12', '15', '30', '01')", targetDialect = "spark") expect_equal_ignore_spaces(sql, "select to_timestamp(cast('2019' as string) || '-' || cast('01' as string) || '-' || cast('01' as string) || ' ' || cast('12' as string) || ':' || cast('15' as string) || ':' || cast('30' as string) || '.' || cast('01' as string))") }) test_that("translate sql server -> spark eomonth", { sql <- translate("select eomonth('2019-01-01')", targetDialect = "spark") expect_equal_ignore_spaces(sql, "select last_day('2019-01-01')") }) test_that("translate sql server -> spark stdev", { sql <- translate("select STDEV(x)", targetDialect = "spark") expect_equal_ignore_spaces(sql, "select STDDEV(x)") }) test_that("translate sql server -> spark var", { sql <- translate("select VAR(x)", targetDialect = "spark") expect_equal_ignore_spaces(sql, "select VARIANCE(x)") }) test_that("translate sql server -> spark len", { sql <- translate("select LEN(x)", targetDialect = "spark") expect_equal_ignore_spaces(sql, "select LENGTH(x)") }) test_that("translate sql server -> spark charindex", { sql <- translate("select CHARINDEX('test', 'e')", targetDialect = "spark") expect_equal_ignore_spaces(sql, "select INSTR('e', 'test')") }) test_that("translate sql server -> spark log", { sql <- translate("select LOG(x,y)", targetDialect = "spark") expect_equal_ignore_spaces(sql, "select LOG(y,x)") sql <- translate("select LOG(x)", targetDialect = "spark") expect_equal_ignore_spaces(sql, "select LN(x)") sql <- translate("select LOG10(x)", targetDialect = "spark") expect_equal_ignore_spaces(sql, "select LOG(10,x)") }) test_that("translate sql server -> spark isnull", { sql <- translate("select ISNULL(x,y)", targetDialect = "spark") expect_equal_ignore_spaces(sql, "select COALESCE(x,y)") }) test_that("translate sql server -> spark isnumeric", { sql <- translate("select ISNUMERIC(x)", targetDialect = "spark") expect_equal_ignore_spaces(sql, "select CASE WHEN CAST ( x AS DOUBLE ) IS NOT NULL THEN 1 ELSE 0 END") }) test_that("translate sql server -> spark count_big", { sql <- translate("select COUNT_BIG(x)", targetDialect = "spark") expect_equal_ignore_spaces(sql, "select COUNT(x)") }) test_that("translate sql server -> spark square", { sql <- translate("select SQUARE(x)", targetDialect = "spark") expect_equal_ignore_spaces(sql, "select ((x)*(x))") }) test_that("translate sql server -> spark NEWID", { sql <- translate("select NEWID()", targetDialect = "spark") expect_equal_ignore_spaces(sql, "select UUID()") }) test_that("translate sql server -> spark if object_id", { sql <- translate("IF OBJECT_ID('some_table', 'U') IS NULL CREATE TABLE some_table (id int);", targetDialect = "spark") expect_equal_ignore_spaces(sql, "CREATE TABLE IF NOT EXISTS some_table \r\nUSING DELTA\r\nAS\r\nSELECT \tCAST(NULL AS int) AS id WHERE 1 = 0;") sql <- translate("IF OBJECT_ID('some_table', 'U') IS NOT NULL DROP TABLE some_table;", targetDialect = "spark") expect_equal_ignore_spaces(sql, "DROP TABLE IF EXISTS some_table;") }) test_that("translate sql server -> spark dbo", { sql <- translate("select * from cdm.dbo.test", targetDialect = "spark") expect_equal_ignore_spaces(sql, "select * from cdm.test") }) test_that("translate sql server -> spark table admin", { sql <- translate("CREATE CLUSTERED INDEX index_name ON some_table (variable);", targetDialect = "spark") expect_equal_ignore_spaces(sql, "") sql <- translate("CREATE UNIQUE CLUSTERED INDEX index_name ON some_table (variable);", targetDialect = "spark") expect_equal_ignore_spaces(sql, "") sql <- translate("PRIMARY KEY NONCLUSTERED", targetDialect = "spark") expect_equal_ignore_spaces(sql, "") sql <- translate("UPDATE STATISTICS test;", targetDialect = "spark") expect_equal_ignore_spaces(sql, "") }) test_that("translate sql server -> spark datetime", { sql <- translate("DATETIME", targetDialect = "spark") expect_equal_ignore_spaces(sql, "TIMESTAMP") sql <- translate("DATETIME2", targetDialect = "spark") expect_equal_ignore_spaces(sql, "TIMESTAMP") }) test_that("translate sql server -> spark varchar", { sql <- translate("VARCHAR(MAX)", targetDialect = "spark") expect_equal_ignore_spaces(sql, "STRING") sql <- translate("VARCHAR", targetDialect = "spark") expect_equal_ignore_spaces(sql, "STRING") sql <- translate("VARCHAR(100)", targetDialect = "spark") expect_equal_ignore_spaces(sql, "STRING") }) test_that("translate sql server -> spark cte ctas", { sql <- translate(sql = "WITH a AS (select b) SELECT c INTO d FROM e;", targetDialect = "spark") expect_equal_ignore_spaces(sql, "CREATE TABLE d \n USING DELTA \n AS \n WITH a AS (select b) SELECT\nc \nFROM\ne;") }) test_that("translate sql server -> spark ctas", { sql <- translate(sql = "SELECT a INTO b FROM c;", targetDialect = "spark") expect_equal_ignore_spaces(sql, "CREATE TABLE b \n USING DELTA \n AS\nSELECT\na \nFROM\nc;") }) test_that("translate sql server -> spark ctas with distribute_on_key", { sql <- translate(sql = "--HINT DISTRIBUTE_ON_KEY(key) SELECT a INTO b FROM c;", targetDialect = "spark") expect_equal_ignore_spaces(sql, "--HINT DISTRIBUTE_ON_KEY(key) \nCREATE TABLE b \nUSING DELTA\nAS\nSELECT\na \nFROM\nc;\nOPTIMIZE b ZORDER BY key;") }) test_that("translate sql server -> spark cross join", { sql <- translate(sql = "SELECT a from (select b) x, (select c) y;", targetDialect = "spark") expect_equal_ignore_spaces(sql, "SELECT a FROM (select b) x cross join (select c) y;") })
context("DCF") test_that("we can read different types of DCFs", { expected <- list(A = "1", B = "2") actual <- renv_dcf_read(text = "A: 1\nB: 2") expect_equal(actual, expected) actual <- renv_dcf_read(text = "A: 1\n\nB: 2\n") expect_equal(actual, expected) }) test_that("we allow for unindented continuations", { actual <- renv_dcf_read(text = "A: This field\nisn't indented.\nB: 42") expected <- list(A = "This field\nisn't indented.", B = "42") expect_equal(actual, expected) }) test_that("we can read a latin-1 DESCRIPTION file", { contents <- heredoc({' Encoding: latin1 Dessert: crème brûlée '}) latin1 <- iconv(enc2utf8(contents), from = "UTF-8", to = "latin1") file <- tempfile("DESCRIPTION-") writeLines(latin1, con = file, useBytes = TRUE) dcf <- renv_dcf_read(file) expect_equal(dcf$Dessert, "crème brûlée") }) test_that("we can read a custom encoded DESCRIPTION file", { skip_if(!"CP936" %in% iconvlist()) nihao <- enc2utf8("\u4f60\u597d") contents <- heredoc({' Encoding: CP936 Greeting: \u4f60\u597d '}) bytes <- iconv( x = enc2utf8(contents), from = "UTF-8", to = "CP936", toRaw = TRUE ) file <- tempfile("DESCRIPTION-") writeBin(bytes[[1L]], con = file) dcf <- renv_dcf_read(file) expect_equal(dcf$Greeting, nihao) }) test_that("we can read mis-encoded DESCRIPTION files", { contents <- heredoc(' Encoding: UTF-8 Dessert: crème brûlée ') latin1 <- iconv(enc2utf8(contents), from = "UTF-8", to = "latin1") file <- tempfile("DESCRIPTION-") writeLines(latin1, con = file, useBytes = TRUE) dcf <- renv_dcf_read(file) expect_equal(dcf$Dessert, "crème brûlée") }) test_that("we can read and write a dcf file", { contents <- heredoc(' Title: The title. Description: The Description field is quite long. It needs to wrap across multiple lines. ') descfile <- renv_scope_tempfile("renv-description-", fileext = "") writeLines(contents, con = descfile) old <- renv_dcf_read(descfile) renv_dcf_write(old, file = descfile) new <- read.dcf(descfile, all = TRUE) expect_equal( gsub("[[:space:]]+", " ", old$Field), gsub("[[:space:]]+", " ", new$Field) ) })
x2pCO2 <- function(S=35, T=25, Patm=1, xCO2=400){ pH20 <- vapress(T=T, S=S, form="d2007") pCO2 <- (Patm - pH20) * xCO2 return(pCO2) }
check_beast2_path <- function(beast2_path) { beautier::check_file_exists(beast2_path, "beast2_path") invisible(beast2_path) }
ciu <- function(model, formula=NULL, data=NULL, in.min.max.limits=NULL, abs.min.max=NULL, input.names=NULL, output.names=NULL, predict.function=NULL, vocabulary=NULL) { ciu <- ciu.new(model, formula, data, in.min.max.limits, abs.min.max, input.names, output.names, predict.function, vocabulary) this <- ciu$as.ciu() this$CIU <- ciu class(this) <- c("ciu", class(this)) return(this) } ciu.to.CIU <- function(ciu) { CIU <- ciu.new(ciu$model, ciu$formula, ciu$data, ciu$in.min.max.limits, ciu$abs.min.max, ciu$input.names, ciu$output.names, ciu$predict.function, ciu$vocabulary) return(CIU) } ciu.explain <- function(ciu, instance, ind.inputs.to.explain, in.min.max.limits=NULL, n.samples=100, target.concept=NULL, target.ciu=NULL) { if ( inherits(ciu, "ciu") ) ciu <- ciu.to.CIU(ciu) ciu$explain(instance, ind.inputs.to.explain, in.min.max.limits, n.samples, target.concept, target.ciu) } ciu.plot <- function(ciu, instance, ind.input, ind.output, in.min.max.limits=NULL, n.points=40, main=NULL, xlab=NULL, ylab=NULL, ylim=NULL, ...) { if ( inherits(ciu, "ciu") ) ciu <- ciu.to.CIU(ciu) ciu$plot.ciu (instance, ind.input, ind.output, in.min.max.limits, n.points, main, xlab, ylab, ylim, ...) } ciu.plot.3D <- function(ciu, instance, ind.inputs, ind.output, in.min.max.limits=NULL, n.points=40, main=NULL, xlab=NULL, ylab=NULL, zlab=NULL, zlim=NULL, ...) { if ( inherits(ciu, "ciu") ) ciu <- ciu.to.CIU(ciu) ciu$plot.ciu.3D(instance, ind.inputs, ind.output, in.min.max.limits, n.points, main, xlab, ylab, zlab, zlim, ...) } ciu.barplot <- function(ciu, instance, ind.inputs=NULL, ind.output=1, in.min.max.limits=NULL, n.samples=100, neutral.CU=0.5, show.input.values=TRUE, concepts.to.explain=NULL, target.concept=NULL, target.ciu=NULL, ciu.meta = NULL, color.ramp.below.neutral=NULL, color.ramp.above.neutral=NULL, use.influence=FALSE, influence.minmax = c(-1,1), sort=NULL, decreasing=FALSE, main= NULL, xlab=NULL, xlim=NULL, ...) { if ( inherits(ciu, "ciu") ) ciu <- ciu.to.CIU(ciu) ciu$barplot.ciu(instance, ind.inputs, ind.output, in.min.max.limits, n.samples, neutral.CU, show.input.values, concepts.to.explain, target.concept, target.ciu, ciu.meta, color.ramp.below.neutral, color.ramp.above.neutral, use.influence, influence.minmax, sort, decreasing, main, xlab, xlim, ...) } ciu.pie <- function(ciu, instance, ind.inputs=NULL, ind.output=1, in.min.max.limits=NULL, n.samples=100, neutral.CU=0.5, show.input.values=TRUE, concepts.to.explain=NULL, target.concept=NULL, target.ciu=NULL, ciu.meta = NULL, color.ramp.below.neutral=NULL, color.ramp.above.neutral=NULL, sort=NULL, decreasing=FALSE, main= NULL, ...) { if ( inherits(ciu, "ciu") ) ciu <- ciu.to.CIU(ciu) ciu$pie.ciu(instance, ind.inputs, ind.output, in.min.max.limits, n.samples, neutral.CU, show.input.values, concepts.to.explain, target.concept, target.ciu, ciu.meta, color.ramp.below.neutral, color.ramp.above.neutral, sort, decreasing, main, ...) }
util_create_sim_XML <- function(nl, seed, siminputrow, xmlfile) { simdata_run <- getsim(nl, "siminput")[siminputrow, ] if (!is.na(getexp(nl, "idrunnum"))) { runnum <- tibble::tibble(paste0("\"", getexp(nl, "expname"), "_", seed, "_", siminputrow, "\"")) names(runnum) <- getexp(nl, "idrunnum") simdata_run <- cbind(simdata_run, runnum) } nlXML <- XML::newXMLDoc() experiments <- XML::newXMLNode("experiments", doc = nlXML) experiment <- XML::newXMLNode("experiment", attrs = c( name = getexp(nl, "expname"), repetitions = getexp(nl, "repetition"), runMetricsEveryStep = getexp(nl, "tickmetrics") ), parent = experiments ) idsetup <- paste(getexp(nl, "idsetup"), sep = "\n", collapse = "\n") idgo <- paste(getexp(nl, "idgo"), sep = "\n", collapse = "\n") XML::addChildren(experiment, XML::newXMLNode("setup", idsetup, parent = experiment)) XML::addChildren(experiment, XML::newXMLNode("go", idgo, parent = experiment)) if (!is.na(getexp(nl, "idfinal"))) { idfinal <- paste(getexp(nl, "idfinal"), sep = "\n", collapse = "\n") XML::addChildren(experiment, XML::newXMLNode("final", idfinal, parent = experiment )) } runtime <- getexp(nl, "runtime") if (is.na(runtime)) { runtime <- 0 } XML::addChildren(experiment, XML::newXMLNode("timeLimit", attrs = c(steps = runtime), parent = experiment )) if (!is.na(getexp(nl, "stopcond"))) { stopcond <- paste(getexp(nl, "stopcond"), sep = "\n", collapse = "\n") XML::addChildren(experiment, XML::newXMLNode("exitCondition", stopcond, parent = experiment )) } metrics <- getexp(nl, "metrics") if (length(getexp(nl, "metrics.turtles")) > 0) { turtles.reporter <- purrr::map_chr(seq_along(nl@[email protected]), function(x) { x.breed <- names(nl@[email protected])[[x]] x.metrics <- nl@[email protected][[x]] if (!"breed" %in% x.metrics) { x.metrics <- c("breed", x.metrics) } turtles.reporter <- paste0("but-first but-last (word [remove \" \" (word ", paste(x.metrics, collapse = paste0("\",\"")), ")] of ", x.breed, ")") return(turtles.reporter) }) metrics <- c(metrics, turtles.reporter) } if (all(!is.na(getexp(nl, "metrics.patches")))) { patches.reporter <- paste0("but-first but-last (word [remove \" \" (word ", paste(getexp(nl, "metrics.patches"), collapse = paste0("\",\"")), ")] of patches)") metrics <- c(metrics, patches.reporter) } if (length(getexp(nl, "metrics.links")) > 0) { links.reporter <- purrr::map_chr(seq_along(nl@[email protected]), function(x) { x.breed <- names(nl@[email protected])[[x]] x.metrics <- nl@[email protected][[x]] if (!"breed" %in% x.metrics) { x.metrics <- c("breed", x.metrics) } links.reporter <- paste0("but-first but-last (word [remove \" \" (word ", paste(x.metrics, collapse = paste0("\",\"")), ")] of ", x.breed, ")") return(links.reporter) }) metrics <- c(metrics, links.reporter) } for (i in metrics) { XML::addChildren(experiment, XML::newXMLNode("metric", i, parent = experiment )) } for (i in seq_along(simdata_run)) { XML::addChildren(experiment, XML::newXMLNode("enumeratedValueSet", attrs = c( variable = names( simdata_run[i] ) ), XML::newXMLNode("value", attrs = c( value = simdata_run[[i]] ) ) )) } if (getexp(nl, "repetition") == 1) { XML::addChildren(experiment, XML::newXMLNode("enumeratedValueSet", attrs = c( variable = "random-seed" ), XML::newXMLNode("value", attrs = c(value = seed) ) )) } prefix <- "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE experiments SYSTEM \"behaviorspace.dtd\">" cat(XML::saveXML(nlXML, prefix = prefix), file = xmlfile) } util_call_nl <- function(nl, xmlfile, outfile, batchfile) { os <- util_get_os() if (os %in% c("win", "unix")) { NLcall <- paste0("\"", batchfile, "\"", " --model ", "\"", getnl(nl, "modelpath"), "\"", " --setup-file ", "\"", xmlfile, "\"", " --experiment ", getexp(nl, "expname"), " --table ", "\"", outfile, "\"", " --threads ", 1) } if (os == "mac") { NLcall <- paste0("sh \"", batchfile, "\"", " --model ", "\"", getnl(nl, "modelpath"), "\"", " --setup-file ", "\"", xmlfile, "\"", " --experiment ", getexp(nl, "expname"), " --table ", "\"", outfile, "\"", " --threads ", 1) } system(NLcall, wait = TRUE) } util_cleanup <- function(nl, cleanup.csv = TRUE, cleanup.xml = TRUE, cleanup.bat = TRUE, cleanup.files) { if (isTRUE(cleanup.csv)) { file.remove(cleanup.files$csv[[1]]) } if (isTRUE(cleanup.xml)) { file.remove(cleanup.files$xml[[1]]) } if (isTRUE(cleanup.bat)) { file.remove(cleanup.files$bat[[1]]) } } util_gather_results <- function(nl, outfile, seed, siminputrow) { if (!file.exists(outfile)) { stop(paste0("Temporary simulation output file not found: ", outfile, ".\n", "Either the simulation did not run or crashed, check the debugging section in ??run_nl_all() for help.")) } NLtable <- readr::read_csv(outfile, skip = 6, col_types = readr::cols()) if (purrr::is_empty(NLtable)) { stop("Output file is empty - simulation aborted due to a runtime error! Make sure that parameter value definitions of the experiment are valid and the model code is running properly!") } NLtable$siminputrow <- siminputrow if (getexp(nl, "tickmetrics") == "false") { NLtable <- NLtable %>% dplyr::filter(`[step]` == max(`[step]`)) } else { if (all(!is.na(getexp(nl, "evalticks")))) { NLtable <- NLtable %>% dplyr::filter(`[step]` %in% getexp(nl, "evalticks")) noeval <- getexp(nl, "evalticks")[!which(getexp(nl, "evalticks") %in% NLtable$`[step]`)] if (length(noeval) > 0) { message(paste0("No model results reported for siminputrow ", siminputrow, " on ticks ", noeval)) } } } if (nrow(NLtable) == 0) { NArow <- tibble::tibble(`[run number]` = NA) NArow <- cbind(NArow, getsim(nl, "siminput")[siminputrow, ]) NArow <- cbind(NArow, tibble::tibble(`random-seed` = seed)) NArow <- cbind(NArow, tibble::tibble(`[step]` = NA)) NAmetrics <- t(tibble::tibble(rep(NA, length(getexp(nl, "metrics"))))) colnames(NAmetrics) <- getexp(nl, "metrics") rownames(NAmetrics) <- NULL NArow <- cbind(NArow, NAmetrics) NArow$siminputrow <- siminputrow NLtable <- NArow } if (length(nl@[email protected]) > 0) { for(x in seq_along(nl@[email protected])) { x.breed <- names(nl@[email protected])[[x]] x.metrics <- nl@[email protected][[x]] if (!"breed" %in% x.metrics) { x.metrics <- c("breed", x.metrics) } col.name <- paste0("metrics.", x.breed) turtles.reporter <- paste0("but-first but-last (word [remove \" \" (word ", paste(x.metrics, collapse = paste0("\",\"")), ")] of ", x.breed, ")") names(NLtable)[names(NLtable) == turtles.reporter] <- col.name NLtable[, grepl(col.name, names(NLtable))] <- list(.util_clean_metrics_turtles(NLtable, nl, col.name, x.metrics)) } } if (all(!is.na(getexp(nl, "metrics.patches")))) { NLtable <- NLtable %>% dplyr::rename(metrics.patches = paste0("but-first but-last (word [remove \" \" (word ", paste(getexp(nl, "metrics.patches"), collapse = paste0("\",\"")), ")] of patches)")) NLtable$metrics.patches <- .util_clean_metrics_patches(NLtable, nl) } if (length(nl@[email protected]) > 0) { for(x in seq_along(nl@[email protected])) { x.breed <- names(nl@[email protected])[[x]] x.metrics <- nl@[email protected][[x]] if (!"breed" %in% x.metrics) { x.metrics <- c("breed", x.metrics) } col.name <- paste0("metrics.", x.breed) links.reporter <- paste0("but-first but-last (word [remove \" \" (word ", paste(x.metrics, collapse = paste0("\",\"")), ")] of ", x.breed, ")") names(NLtable)[names(NLtable) == links.reporter] <- col.name NLtable[, grepl(col.name, names(NLtable))] <- list(.util_clean_metrics_links(NLtable, nl, col.name, x.metrics)) } } return(NLtable) } .util_clean_metrics_patches <- function(NLtable, nl) { patches_string <- NLtable[, grepl(c("metrics.patches"), names(NLtable))] %>% dplyr::mutate_all(function(x) gsub('[\"]', '',x)) patches_string <- stringr::str_split(patches_string$metrics.patches, " ") patches_string <- purrr::map(patches_string, function(x) { patches_owns <- tibble::as_tibble(x = x) patches_owns <- tidyr::separate(patches_owns, value, getexp(nl, "metrics.patches"), sep=",") patches_owns <- dplyr::mutate_all(patches_owns, function(x) { suppressWarnings(if(!all(is.na(x))) {ifelse(is.na(as.numeric(as.character(x))), as.character(x), as.numeric(as.character(x)))}) }) patches_owns$agent <- "patches" patches_owns$breed <- NA_character_ return(patches_owns) }) return(patches_string) } .util_clean_metrics_turtles <- function(NLtable, nl, col.name, metrics) { turtles_string <- NLtable[, grepl(col.name, names(NLtable))] %>% dplyr::mutate_all(function(x) gsub('[\"]', '',x)) turtles_string <- stringr::str_split(dplyr::pull(turtles_string, col.name), " ") turtles_string <- purrr::map(turtles_string, function(x) { turtles_owns <- tibble::as_tibble(x = x) turtles_owns <- tidyr::separate(turtles_owns, value, metrics, sep=",") turtles_owns <- dplyr::mutate_all(turtles_owns, function(x) { suppressWarnings(if(!all(is.na(x))) {ifelse(is.na(as.numeric(as.character(x))), as.character(x), as.numeric(as.character(x)))}) }) turtles_owns$agent <- "turtles" return(turtles_owns) }) return(turtles_string) } .util_clean_metrics_links <- function(NLtable, nl, col.name, metrics) { links_string <- NLtable[, grepl(col.name, names(NLtable))] %>% dplyr::mutate_all(function(x) gsub('[\"]', '',x)) links_string <- stringr::str_split(dplyr::pull(links_string, col.name), " ") links_string <- purrr::map(links_string, function(x) { links_owns <- tibble::as_tibble(x = x) links_owns <- tidyr::separate(links_owns, value, metrics, sep=",") links_owns <- dplyr::mutate_all(links_owns, function(x) { suppressWarnings(if(!all(is.na(x))) {ifelse(is.na(as.numeric(as.character(x))), as.character(x), as.numeric(as.character(x)))}) }) links_owns$agent <- "links" return(links_owns) }) return(links_string) } util_read_write_batch <- function(nl) { os <- util_get_os() batchpath_temp <- NULL if (os == "win") { if (getnl(nl, "nlversion") == "5.3.1") { block1 <- c( "@echo off", "setlocal ENABLEDELAYEDEXPANSION", "set BASE_DIR=%~dp0", "if defined JAVA_HOME (", " set \"JAVA=%JAVA_HOME%\\bin\\java.exe\"", ") ELSE (", " ECHO JAVA_HOME not defined, using java on PATH.", " ECHO If you encounter errors, set JAVA_HOME or update your PATH to include java.exe.", " set \"JAVA=java.exe\"", ")" ) extensionspath <- file.path(getnl(nl, "nlpath"), "app/extensions") jvmoptsline <- paste0("SET \"JVM_OPTS=-Xmx", getnl(nl, "jvmmem"), "m -XX:+UseParallelGC -Dfile.encoding=UTF-8 ", "-Dnetlogo.extensions.dir=^\"", extensionspath, "^\"\"") block2 <- c( "set ARGS=", "FOR %%a IN (%*) DO (", " SET \"ARG=%%a\"", " IF \"!ARG!\" == \"--3D\" (", " SET \"JVM_OPTS=!JVM_OPTS! -Dorg.nlogo.is3d=true\"", " ) ELSE (", " IF \"!ARG:~0,2!\" == \"-D\" (", " SET \"JVM_OPTS=!JVM_OPTS! !ARG!\"", " ) ELSE (", " SET \"ARGS=!ARGS! !ARG!\"", " )", " )", ")" ) jarpath <- file.path(getnl(nl, "nlpath"), "app/NetLogo.jar") jarpathline <- paste0("SET \"ABSOLUTE_CLASSPATH=", jarpath, "\"") block3 <- c("\"%JAVA%\" %JVM_OPTS% -classpath \"%ABSOLUTE_CLASSPATH%\" org.nlogo.headless.Main %ARGS%") allblocks <- c(block1, jvmoptsline, block2, jarpathline, block3) batchpath_temp <- tempfile(pattern = "netlogo-headless", fileext = ".bat") writeLines(allblocks, batchpath_temp) } else { batchpath <- file.path(getnl(nl, "nlpath"), "netlogo-headless.bat") extensionspath <- file.path(getnl(nl, "nlpath"), "app/extensions") jarpath <- file.path(getnl(nl, "nlpath"), paste0("app/netlogo-", getnl(nl, "nlversion"), ".jar")) jvmoptsline <- paste0("SET \"JVM_OPTS=-Xmx", getnl(nl, "jvmmem"), "m -XX:+UseParallelGC -Dfile.encoding=UTF-8 ", "-Dnetlogo.extensions.dir=^\"", extensionspath, "^\"\"") jarpathline <- paste0("SET \"ABSOLUTE_CLASSPATH=", jarpath, "\"") batch <- readr::read_lines(batchpath) pos_jvmopts <- which(grepl("SET \"JVM_OPTS=-Xmx", batch)) pos_jarpath <- which(grepl("SET \"ABSOLUTE_CLASSPATH=", batch)) batch[pos_jvmopts] <- jvmoptsline batch[pos_jarpath] <- jarpathline batchpath_temp <- tempfile(pattern = "netlogo-headless", fileext = ".bat") readr::write_lines(batch, path = batchpath_temp) } } if (os %in% c("unix", "mac")) { if (getnl(nl, "nlversion") == "5.3.1") { block1 <- c(" basedirline <- paste0("cd \"", getnl(nl, "nlpath"), "app/\"") jvmoptsline <- paste0("java -Xmx", getnl(nl, "jvmmem"), "m -Dfile.encoding=UTF-8 -classpath NetLogo.jar ", "org.nlogo.headless.Main \"$@\"") allblocks <- c(block1, basedirline, jvmoptsline) batchpath_temp <- tempfile(pattern = "netlogo-headless", fileext = ".sh") writeLines(allblocks, batchpath_temp) system(paste0("chmod +x ", batchpath_temp), wait = TRUE) } else { batchpath <- file.path(getnl(nl, "nlpath"), "netlogo-headless.sh") batchpath_temp <- tempfile(pattern = "netlogo-headless", fileext = ".sh") system(paste0("cp \"", batchpath, "\" \"", batchpath_temp, "\""), wait = TRUE) basedirline <- paste0("BASE_DIR=\"", getnl(nl, "nlpath"), "\"") jvmoptsline <- paste0("JVM_OPTS=(-Xmx", getnl(nl, "jvmmem"), "m -Dfile.encoding=UTF-8)") system(paste0("sed -i -r 's!^BASE_DIR=.*!", basedirline, "!'", " \"", batchpath_temp, "\"")) system(paste0("sed -i -r 's!^JVM_OPTS=.*!", jvmoptsline, "!'", " \"", batchpath_temp, "\"")) system(paste0("chmod +x ", batchpath_temp), wait = TRUE) } } return(batchpath_temp) }
Kernel_Ridge_MM <- function( Y_train, X_train=as.vector(rep(1,length(Y_train))), Z_train=diag(1,length(Y_train)), Matrix_covariates_train, method="RKHS", kernel="Gaussian", rate_decay_kernel=0.1, degree_poly=2, scale_poly=1, offset_poly=1, degree_anova=3, init_sigma2K=2, init_sigma2E=3, convergence_precision=1e-8, nb_iter=1000, display="FALSE" ) { if ( identical( method, "RKHS" ) ) { if ( identical( kernel, "Gaussian" ) ) { p=dim(Matrix_covariates_train)[2] rbf=rbfdot(sigma = (1/p)*rate_decay_kernel) K_train=kernelMatrix(rbf, Matrix_covariates_train) K_train_inv=ginv(K_train) n_train=length(Y_train) MM_components_solved=EM_REML_MM( K_train_inv, Y_train, X_train, Z_train, init_sigma2K, init_sigma2E, convergence_precision, nb_iter, display ) Beta_hat_train = as.vector(MM_components_solved$Beta_hat) Sigma2K_hat_train = as.vector(MM_components_solved$Sigma2K_hat) Sigma2E_hat_train = as.vector(MM_components_solved$Sigma2E_hat) lambda=(Sigma2E_hat_train/Sigma2K_hat_train) Var_Y_train_div_sig2_alpha = Z_train%*%K_train%*%t(Z_train) + lambda*diag(1, n_train) if ( length(Beta_hat_train) > 1 ) { Vect_alpha = t(Z_train)%*%ginv(Var_Y_train_div_sig2_alpha)%*%(Y_train - X_train%*%Beta_hat_train) }else{ Vect_alpha = t(Z_train)%*%ginv(Var_Y_train_div_sig2_alpha)%*%(Y_train - X_train*Beta_hat_train) } return( list( "Matrix_covariates_train"=Matrix_covariates_train, "Beta_hat"=Beta_hat_train, "Sigma2K_hat"=Sigma2K_hat_train, "Sigma2E_hat"=Sigma2E_hat_train, "Vect_alpha"=Vect_alpha, "method"=method, "kernel"=kernel, "rate_decay_kernel"=rate_decay_kernel ) ) } else if ( identical( kernel, "Laplacian" ) ){ p=dim(Matrix_covariates_train)[2] rbf=laplacedot(sigma = (1/p)*rate_decay_kernel) K_train=kernelMatrix(rbf, Matrix_covariates_train) K_train_inv=ginv(K_train) n_train=length(Y_train) MM_components_solved=EM_REML_MM( K_train_inv, Y_train, X_train, Z_train, init_sigma2K, init_sigma2E, convergence_precision, nb_iter, display ) Beta_hat_train = as.vector(MM_components_solved$Beta_hat) Sigma2K_hat_train = as.vector(MM_components_solved$Sigma2K_hat) Sigma2E_hat_train = as.vector(MM_components_solved$Sigma2E_hat) lambda=(Sigma2E_hat_train/Sigma2K_hat_train) Var_Y_train_div_sig2_alpha = Z_train%*%K_train%*%t(Z_train) + lambda*diag(1, n_train) if ( length(Beta_hat_train) > 1 ) { Vect_alpha = t(Z_train)%*%ginv(Var_Y_train_div_sig2_alpha)%*%(Y_train - X_train%*%Beta_hat_train) }else{ Vect_alpha = t(Z_train)%*%ginv(Var_Y_train_div_sig2_alpha)%*%(Y_train - X_train*Beta_hat_train) } return( list( "Matrix_covariates_train"=Matrix_covariates_train, "Beta_hat"=Beta_hat_train, "Sigma2K_hat"=Sigma2K_hat_train, "Sigma2E_hat"=Sigma2E_hat_train, "Vect_alpha"=Vect_alpha, "method"=method, "kernel"=kernel, "rate_decay_kernel"=rate_decay_kernel ) ) } else if ( identical( kernel, "Polynomial" ) ){ rbf=polydot(degree = degree_poly, scale = scale_poly, offset = offset_poly) K_train=kernelMatrix(rbf, Matrix_covariates_train) K_train_inv=ginv(K_train) n_train=length(Y_train) MM_components_solved=EM_REML_MM( K_train_inv, Y_train, X_train, Z_train, init_sigma2K, init_sigma2E, convergence_precision, nb_iter, display ) Beta_hat_train = as.vector(MM_components_solved$Beta_hat) Sigma2K_hat_train = as.vector(MM_components_solved$Sigma2K_hat) Sigma2E_hat_train = as.vector(MM_components_solved$Sigma2E_hat) lambda=(Sigma2E_hat_train/Sigma2K_hat_train) Var_Y_train_div_sig2_alpha = Z_train%*%K_train%*%t(Z_train) + lambda*diag(1, n_train) if ( length(Beta_hat_train) > 1 ) { Vect_alpha = t(Z_train)%*%ginv(Var_Y_train_div_sig2_alpha)%*%(Y_train - X_train%*%Beta_hat_train) }else{ Vect_alpha = t(Z_train)%*%ginv(Var_Y_train_div_sig2_alpha)%*%(Y_train - X_train*Beta_hat_train) } return( list( "Matrix_covariates_train"=Matrix_covariates_train, "Beta_hat"=Beta_hat_train, "Sigma2K_hat"=Sigma2K_hat_train, "Sigma2E_hat"=Sigma2E_hat_train, "Vect_alpha"=Vect_alpha, "method"=method, "kernel"=kernel, "degree_poly"=degree_poly, "scale_poly"=scale_poly, "offset_poly"=offset_poly ) ) } else if ( identical( kernel, "ANOVA" ) ){ p=dim(Matrix_covariates_train)[2] rbf=anovadot(sigma = (1/p)*rate_decay_kernel, degree = degree_anova) K_train=kernelMatrix(rbf, Matrix_covariates_train) K_train_inv=ginv(K_train) n_train=length(Y_train) MM_components_solved=EM_REML_MM( K_train_inv, Y_train, X_train, Z_train, init_sigma2K, init_sigma2E, convergence_precision, nb_iter, display ) Beta_hat_train = as.vector(MM_components_solved$Beta_hat) Sigma2K_hat_train = as.vector(MM_components_solved$Sigma2K_hat) Sigma2E_hat_train = as.vector(MM_components_solved$Sigma2E_hat) lambda=(Sigma2E_hat_train/Sigma2K_hat_train) Var_Y_train_div_sig2_alpha = Z_train%*%K_train%*%t(Z_train) + lambda*diag(1, n_train) if ( length(Beta_hat_train) > 1 ) { Vect_alpha = t(Z_train)%*%ginv(Var_Y_train_div_sig2_alpha)%*%(Y_train - X_train%*%Beta_hat_train) }else{ Vect_alpha = t(Z_train)%*%ginv(Var_Y_train_div_sig2_alpha)%*%(Y_train - X_train*Beta_hat_train) } return( list( "Matrix_covariates_train"=Matrix_covariates_train, "Beta_hat"=Beta_hat_train, "Sigma2K_hat"=Sigma2K_hat_train, "Sigma2E_hat"=Sigma2E_hat_train, "Vect_alpha"=Vect_alpha, "method"=method, "kernel"=kernel, "rate_decay_kernel"=rate_decay_kernel, "degree_anova"=degree_anova ) ) } } else if ( identical( method, "RR-BLUP" ) ){ Matrix_covariates_train=scale(Matrix_covariates_train, center=TRUE, scale=FALSE) K_train=Matrix_covariates_train%*%t(Matrix_covariates_train) K_train_inv=ginv(K_train) n_train=length(Y_train) MM_components_solved=EM_REML_MM( K_train_inv, Y_train, X_train, Z_train, init_sigma2K, init_sigma2E, convergence_precision, nb_iter, display ) Beta_hat_train = as.vector(MM_components_solved$Beta_hat) Sigma2K_hat_train = as.vector(MM_components_solved$Sigma2K_hat) Sigma2E_hat_train = as.vector(MM_components_solved$Sigma2E_hat) lambda=(Sigma2E_hat_train/Sigma2K_hat_train) Var_Y_train_div_sig2_alpha = Z_train%*%K_train%*%t(Z_train) + lambda*diag(1, n_train) if ( length(Beta_hat_train) > 1 ) { Vect_alpha=t(Z_train)%*%ginv(Var_Y_train_div_sig2_alpha)%*%(Y_train - X_train%*%Beta_hat_train) Gamma_hat=t(Matrix_covariates_train)%*%Vect_alpha }else{ Vect_alpha=t(Z_train)%*%ginv(Var_Y_train_div_sig2_alpha)%*%(Y_train - X_train*Beta_hat_train) Gamma_hat=t(Matrix_covariates_train)%*%Vect_alpha } return( list( "Matrix_covariates_train"=Matrix_covariates_train, "Beta_hat"=Beta_hat_train, "Sigma2K_hat"=Sigma2K_hat_train, "Sigma2E_hat"=Sigma2E_hat_train, "Vect_alpha"=Vect_alpha, "Gamma_hat"=Gamma_hat, "method"=method ) ) } else if ( identical( method, "GBLUP" ) ){ Matrix_covariates_train=scale(Matrix_covariates_train, center=TRUE, scale=FALSE) K_train=Matrix_covariates_train%*%t(Matrix_covariates_train) K_train_inv=ginv(K_train) n_train=length(Y_train) MM_components_solved=EM_REML_MM( K_train_inv, Y_train, X_train, Z_train, init_sigma2K, init_sigma2E, convergence_precision, nb_iter, display ) Beta_hat_train = as.vector(MM_components_solved$Beta_hat) Sigma2K_hat_train = as.vector(MM_components_solved$Sigma2K_hat) Sigma2E_hat_train = as.vector(MM_components_solved$Sigma2E_hat) lambda=(Sigma2E_hat_train/Sigma2K_hat_train) Var_Y_train_div_sig2_alpha = Z_train%*%K_train%*%t(Z_train) + lambda*diag(1, n_train) if ( length(Beta_hat_train) > 1 ) { Vect_alpha = t(Z_train)%*%ginv(Var_Y_train_div_sig2_alpha)%*%(Y_train - X_train%*%Beta_hat_train) }else{ Vect_alpha = t(Z_train)%*%ginv(Var_Y_train_div_sig2_alpha)%*%(Y_train - X_train*Beta_hat_train) } return( list( "Matrix_covariates_train"=Matrix_covariates_train, "Beta_hat"=Beta_hat_train, "Sigma2K_hat"=Sigma2K_hat_train, "Sigma2E_hat"=Sigma2E_hat_train, "Vect_alpha"=Vect_alpha, "method"=method ) ) } }
"congruent"
http_was_redirected <- function(response){ status <- vapply( X = response$all_headers, FUN = `[[`, FUN.VALUE = integer(1), "status" ) any(status >= 300 & status < 400) }
create.Mmat <- function(q){ tvec<-(c(0,(q/2),q))/q x.bounds.es <-c(1:(q-1))/q tvec.es <-tvec Mmat.q1<- cds::ispline(x.bounds.es, tvec = tvec.es,intercept=TRUE) x.bounds.data <- (c(1:q) - 0.5)/q tvec.data<-tvec Mmat.q <- cds::ispline(x.bounds.data, tvec = tvec.data,intercept=TRUE) list(Mmat.q1=Mmat.q1,Mmat.q=Mmat.q) }
context("classify_ground") las = clip_rectangle(topography, 273450, 5274350, 273550, 5274450) ctg = topography_ctg opt_chunk_size(ctg) <- 300 ctg@chunk_options$alignment = c(50, 200) opt_chunk_buffer(ctg) <- 0 opt_progress(ctg) <- FALSE ws = seq(3,21, 5) th = seq(0.1, 2, length.out = length(ws)) mypmf = pmf(ws, th) mycsf = csf(TRUE, 1, 1, time_step = 1) test_that("classify_ground pmf works with LAS", { las <- classify_ground(las, mypmf) n = names(las@data) expect_true("Classification" %in% n) expect_equal(sort(unique(las@data$Classification)), c(1L, 2L)) expect_equal(sum(las@data$Classification == 2L), 1933L) expect_error(classify_ground(ctg, mypmf), "buffer") }) test_that("classify_ground pmf works with LAScatalog", { opt_chunk_buffer(ctg) <- 30 expect_error(classify_ground(ctg, mypmf), "output file") opt_output_files(ctg) <- paste0(tmpDir(), "file_{XLEFT}_{YBOTTOM}") ctg2 = classify_ground(ctg, mypmf) las2 = readLAS(ctg2) expect_equal(sum(las2@data$Classification == 2L), 19472L) }) test_that("classify_ground csf works with LAS", { las <- classify_ground(las, mycsf) n = names(las@data) expect_true("Classification" %in% n) expect_equal(sort(unique(las@data$Classification)), c(1L, 2L)) expect_equal(sum(las@data$Classification == 2L), 2605L) }) test_that("classify_ground csf works with LAScatalog", { skip_on_cran() opt_output_files(ctg) <- paste0(tmpDir(), "file_{XLEFT}_{YBOTTOM}_ground") opt_chunk_buffer(ctg) <- 30 ctg2 = classify_ground(ctg, mycsf) las2 = readLAS(ctg2) expect_equal(sum(las2@data$Classification == 2L), 26715L-450L) }) test_that("classify_ground csf works with last_returns = FALSE", { las <- lidR:::generate_las(500) las <- classify_ground(las, csf(), last_returns = FALSE) n = names(las@data) expect_true("Classification" %in% n) expect_equal(unique(las@data$Classification), c(1L, 2L)) expect_equal(sum(las@data$Classification == 2L), 105L) }) test_that("classify_ground works with last_returns = TRUE but attribute not properly populated", { las <- lidR:::generate_las(500) las@data$ReturnNumber <- 0 las@data$Classification <- NULL las <- suppressWarnings(classify_ground(las, csf(), last_returns = TRUE)) n = names(las@data) expect_true("Classification" %in% n) expect_equal(unique(las@data$Classification), c(1L, 2L)) expect_equal(sum(las@data$Classification == 2L), 105L) }) test_that("makeZhangParam works", { expect_error(util_makeZhangParam(), NA) }) test_that("classify_ground does not erase former classification (but new ground points)", { las <- topography las <- filter_poi(las, X < mean(X), Y < mean(Y)) las$Classification[las$Classification == LASGROUND] <- LASUNCLASSIFIED las <- classify_ground(las, mypmf) expect_equal(names(table(las$Classification)), c("1", "2", "9")) })
get_version <- function(base_url, api_key){ if (missing(base_url)) { stop("Please add a valid URL") } else if (missing(api_key)) { stop("Please add a valid API token") } base_url <- sub("/$", "", base_url) gitea_url <- file.path(base_url, "api/v1", sub("^/", "", "/version")) authorization <- paste("token", api_key) r <- tryCatch( GET( gitea_url, add_headers(Authorization = authorization), accept_json() ), error = function(cond) { "Failure" } ) if (class(r) != "response") { stop(paste0("Error consulting the url: ", gitea_url)) } stop_for_status(r) content_version <- fromJSON(content(r, "text")) content_version <- as.data.frame(content_version) return(content_version) }
cox.stuart.test = function(x) { method = "Cox-Stuart test for trend analysis" leng = length(x) apross = round(leng)%%2 if (apross == 1) { delete = (length(x) + 1)/2 x = x[-delete] } half = length(x)/2 x1 = x[1:half] x2 = x[(half + 1):(length(x))] difference = x1 - x2 signs = sign(difference) signcorr = signs[signs != 0] pos = signs[signs > 0] neg = signs[signs < 0] if (length(pos) < length(neg)) { prop = pbinom(length(pos), length(signcorr), 0.5) names(prop) = "Increasing trend, p-value" rval <- list(method = method, statistic = prop) class(rval) = "htest" return(rval) } else { prop = pbinom(length(neg), length(signcorr), 0.5) names(prop) = "Decreasing trend, p-value" rval <- list(method = method, statistic = prop) class(rval) = "htest" return(rval) } }
NULL roll_mean <- function(x, n = 1L, weights = NULL, by = 1L, fill = numeric(0), partial = FALSE, align = c("center", "left", "right"), normalize = TRUE, na.rm = FALSE) { if (!identical(partial, FALSE)) { warning("'partial' argument is currently unimplemented; using 'partial = FALSE'") partial <- FALSE } result <- roll_mean_impl( x, as.integer(n), as.numeric(weights), as.integer(by), as.numeric(fill), as.logical(partial), as.character(match.arg(align)), as.logical(normalize), as.logical(na.rm) ) colnames(result) <- colnames(x) result } roll_meanr <- function(x, n = 1L, weights = NULL, by = 1L, fill = NA, partial = FALSE, align = "right", normalize = TRUE, na.rm = FALSE) { if (!identical(partial, FALSE)) { warning("'partial' argument is currently unimplemented; using 'partial = FALSE'") partial <- FALSE } result <- roll_mean_impl( x, as.integer(n), as.numeric(weights), as.integer(by), as.numeric(fill), as.logical(partial), as.character(match.arg(align)), as.logical(normalize), as.logical(na.rm) ) colnames(result) <- colnames(x) result } roll_meanl <- function(x, n = 1L, weights = NULL, by = 1L, fill = NA, partial = FALSE, align = "left", normalize = TRUE, na.rm = FALSE) { if (!identical(partial, FALSE)) { warning("'partial' argument is currently unimplemented; using 'partial = FALSE'") partial <- FALSE } result <- roll_mean_impl( x, as.integer(n), as.numeric(weights), as.integer(by), as.numeric(fill), as.logical(partial), as.character(match.arg(align)), as.logical(normalize), as.logical(na.rm) ) colnames(result) <- colnames(x) result } roll_median <- function(x, n = 1L, weights = NULL, by = 1L, fill = numeric(0), partial = FALSE, align = c("center", "left", "right"), normalize = TRUE, na.rm = FALSE) { if (!identical(partial, FALSE)) { warning("'partial' argument is currently unimplemented; using 'partial = FALSE'") partial <- FALSE } result <- roll_median_impl( x, as.integer(n), as.numeric(weights), as.integer(by), as.numeric(fill), as.logical(partial), as.character(match.arg(align)), as.logical(normalize), as.logical(na.rm) ) colnames(result) <- colnames(x) result } roll_medianr <- function(x, n = 1L, weights = NULL, by = 1L, fill = NA, partial = FALSE, align = "right", normalize = TRUE, na.rm = FALSE) { if (!identical(partial, FALSE)) { warning("'partial' argument is currently unimplemented; using 'partial = FALSE'") partial <- FALSE } result <- roll_median_impl( x, as.integer(n), as.numeric(weights), as.integer(by), as.numeric(fill), as.logical(partial), as.character(match.arg(align)), as.logical(normalize), as.logical(na.rm) ) colnames(result) <- colnames(x) result } roll_medianl <- function(x, n = 1L, weights = NULL, by = 1L, fill = NA, partial = FALSE, align = "left", normalize = TRUE, na.rm = FALSE) { if (!identical(partial, FALSE)) { warning("'partial' argument is currently unimplemented; using 'partial = FALSE'") partial <- FALSE } result <- roll_median_impl( x, as.integer(n), as.numeric(weights), as.integer(by), as.numeric(fill), as.logical(partial), as.character(match.arg(align)), as.logical(normalize), as.logical(na.rm) ) colnames(result) <- colnames(x) result } roll_min <- function(x, n = 1L, weights = NULL, by = 1L, fill = numeric(0), partial = FALSE, align = c("center", "left", "right"), normalize = TRUE, na.rm = FALSE) { if (!identical(partial, FALSE)) { warning("'partial' argument is currently unimplemented; using 'partial = FALSE'") partial <- FALSE } result <- roll_min_impl( x, as.integer(n), as.numeric(weights), as.integer(by), as.numeric(fill), as.logical(partial), as.character(match.arg(align)), as.logical(normalize), as.logical(na.rm) ) colnames(result) <- colnames(x) result } roll_minr <- function(x, n = 1L, weights = NULL, by = 1L, fill = NA, partial = FALSE, align = "right", normalize = TRUE, na.rm = FALSE) { if (!identical(partial, FALSE)) { warning("'partial' argument is currently unimplemented; using 'partial = FALSE'") partial <- FALSE } result <- roll_min_impl( x, as.integer(n), as.numeric(weights), as.integer(by), as.numeric(fill), as.logical(partial), as.character(match.arg(align)), as.logical(normalize), as.logical(na.rm) ) colnames(result) <- colnames(x) result } roll_minl <- function(x, n = 1L, weights = NULL, by = 1L, fill = NA, partial = FALSE, align = "left", normalize = TRUE, na.rm = FALSE) { if (!identical(partial, FALSE)) { warning("'partial' argument is currently unimplemented; using 'partial = FALSE'") partial <- FALSE } result <- roll_min_impl( x, as.integer(n), as.numeric(weights), as.integer(by), as.numeric(fill), as.logical(partial), as.character(match.arg(align)), as.logical(normalize), as.logical(na.rm) ) colnames(result) <- colnames(x) result } roll_max <- function(x, n = 1L, weights = NULL, by = 1L, fill = numeric(0), partial = FALSE, align = c("center", "left", "right"), normalize = TRUE, na.rm = FALSE) { if (!identical(partial, FALSE)) { warning("'partial' argument is currently unimplemented; using 'partial = FALSE'") partial <- FALSE } result <- roll_max_impl( x, as.integer(n), as.numeric(weights), as.integer(by), as.numeric(fill), as.logical(partial), as.character(match.arg(align)), as.logical(normalize), as.logical(na.rm) ) colnames(result) <- colnames(x) result } roll_maxr <- function(x, n = 1L, weights = NULL, by = 1L, fill = NA, partial = FALSE, align = "right", normalize = TRUE, na.rm = FALSE) { if (!identical(partial, FALSE)) { warning("'partial' argument is currently unimplemented; using 'partial = FALSE'") partial <- FALSE } result <- roll_max_impl( x, as.integer(n), as.numeric(weights), as.integer(by), as.numeric(fill), as.logical(partial), as.character(match.arg(align)), as.logical(normalize), as.logical(na.rm) ) colnames(result) <- colnames(x) result } roll_maxl <- function(x, n = 1L, weights = NULL, by = 1L, fill = NA, partial = FALSE, align = "left", normalize = TRUE, na.rm = FALSE) { if (!identical(partial, FALSE)) { warning("'partial' argument is currently unimplemented; using 'partial = FALSE'") partial <- FALSE } result <- roll_max_impl( x, as.integer(n), as.numeric(weights), as.integer(by), as.numeric(fill), as.logical(partial), as.character(match.arg(align)), as.logical(normalize), as.logical(na.rm) ) colnames(result) <- colnames(x) result } roll_prod <- function(x, n = 1L, weights = NULL, by = 1L, fill = numeric(0), partial = FALSE, align = c("center", "left", "right"), normalize = TRUE, na.rm = FALSE) { if (!identical(partial, FALSE)) { warning("'partial' argument is currently unimplemented; using 'partial = FALSE'") partial <- FALSE } result <- roll_prod_impl( x, as.integer(n), as.numeric(weights), as.integer(by), as.numeric(fill), as.logical(partial), as.character(match.arg(align)), as.logical(normalize), as.logical(na.rm) ) colnames(result) <- colnames(x) result } roll_prodr <- function(x, n = 1L, weights = NULL, by = 1L, fill = NA, partial = FALSE, align = "right", normalize = TRUE, na.rm = FALSE) { if (!identical(partial, FALSE)) { warning("'partial' argument is currently unimplemented; using 'partial = FALSE'") partial <- FALSE } result <- roll_prod_impl( x, as.integer(n), as.numeric(weights), as.integer(by), as.numeric(fill), as.logical(partial), as.character(match.arg(align)), as.logical(normalize), as.logical(na.rm) ) colnames(result) <- colnames(x) result } roll_prodl <- function(x, n = 1L, weights = NULL, by = 1L, fill = NA, partial = FALSE, align = "left", normalize = TRUE, na.rm = FALSE) { if (!identical(partial, FALSE)) { warning("'partial' argument is currently unimplemented; using 'partial = FALSE'") partial <- FALSE } result <- roll_prod_impl( x, as.integer(n), as.numeric(weights), as.integer(by), as.numeric(fill), as.logical(partial), as.character(match.arg(align)), as.logical(normalize), as.logical(na.rm) ) colnames(result) <- colnames(x) result } roll_sum <- function(x, n = 1L, weights = NULL, by = 1L, fill = numeric(0), partial = FALSE, align = c("center", "left", "right"), normalize = TRUE, na.rm = FALSE) { if (!identical(partial, FALSE)) { warning("'partial' argument is currently unimplemented; using 'partial = FALSE'") partial <- FALSE } result <- roll_sum_impl( x, as.integer(n), as.numeric(weights), as.integer(by), as.numeric(fill), as.logical(partial), as.character(match.arg(align)), as.logical(normalize), as.logical(na.rm) ) colnames(result) <- colnames(x) result } roll_sumr <- function(x, n = 1L, weights = NULL, by = 1L, fill = NA, partial = FALSE, align = "right", normalize = TRUE, na.rm = FALSE) { if (!identical(partial, FALSE)) { warning("'partial' argument is currently unimplemented; using 'partial = FALSE'") partial <- FALSE } result <- roll_sum_impl( x, as.integer(n), as.numeric(weights), as.integer(by), as.numeric(fill), as.logical(partial), as.character(match.arg(align)), as.logical(normalize), as.logical(na.rm) ) colnames(result) <- colnames(x) result } roll_suml <- function(x, n = 1L, weights = NULL, by = 1L, fill = NA, partial = FALSE, align = "left", normalize = TRUE, na.rm = FALSE) { if (!identical(partial, FALSE)) { warning("'partial' argument is currently unimplemented; using 'partial = FALSE'") partial <- FALSE } result <- roll_sum_impl( x, as.integer(n), as.numeric(weights), as.integer(by), as.numeric(fill), as.logical(partial), as.character(match.arg(align)), as.logical(normalize), as.logical(na.rm) ) colnames(result) <- colnames(x) result } roll_sd <- function(x, n = 1L, weights = NULL, by = 1L, fill = numeric(0), partial = FALSE, align = c("center", "left", "right"), normalize = TRUE, na.rm = FALSE) { if (!identical(partial, FALSE)) { warning("'partial' argument is currently unimplemented; using 'partial = FALSE'") partial <- FALSE } result <- roll_sd_impl( x, as.integer(n), as.numeric(weights), as.integer(by), as.numeric(fill), as.logical(partial), as.character(match.arg(align)), as.logical(normalize), as.logical(na.rm) ) colnames(result) <- colnames(x) result } roll_sdr <- function(x, n = 1L, weights = NULL, by = 1L, fill = NA, partial = FALSE, align = "right", normalize = TRUE, na.rm = FALSE) { if (!identical(partial, FALSE)) { warning("'partial' argument is currently unimplemented; using 'partial = FALSE'") partial <- FALSE } result <- roll_sd_impl( x, as.integer(n), as.numeric(weights), as.integer(by), as.numeric(fill), as.logical(partial), as.character(match.arg(align)), as.logical(normalize), as.logical(na.rm) ) colnames(result) <- colnames(x) result } roll_sdl <- function(x, n = 1L, weights = NULL, by = 1L, fill = NA, partial = FALSE, align = "left", normalize = TRUE, na.rm = FALSE) { if (!identical(partial, FALSE)) { warning("'partial' argument is currently unimplemented; using 'partial = FALSE'") partial <- FALSE } result <- roll_sd_impl( x, as.integer(n), as.numeric(weights), as.integer(by), as.numeric(fill), as.logical(partial), as.character(match.arg(align)), as.logical(normalize), as.logical(na.rm) ) colnames(result) <- colnames(x) result } roll_var <- function(x, n = 1L, weights = NULL, by = 1L, fill = numeric(0), partial = FALSE, align = c("center", "left", "right"), normalize = TRUE, na.rm = FALSE) { if (!identical(partial, FALSE)) { warning("'partial' argument is currently unimplemented; using 'partial = FALSE'") partial <- FALSE } result <- roll_var_impl( x, as.integer(n), as.numeric(weights), as.integer(by), as.numeric(fill), as.logical(partial), as.character(match.arg(align)), as.logical(normalize), as.logical(na.rm) ) colnames(result) <- colnames(x) result } roll_varr <- function(x, n = 1L, weights = NULL, by = 1L, fill = NA, partial = FALSE, align = "right", normalize = TRUE, na.rm = FALSE) { if (!identical(partial, FALSE)) { warning("'partial' argument is currently unimplemented; using 'partial = FALSE'") partial <- FALSE } result <- roll_var_impl( x, as.integer(n), as.numeric(weights), as.integer(by), as.numeric(fill), as.logical(partial), as.character(match.arg(align)), as.logical(normalize), as.logical(na.rm) ) colnames(result) <- colnames(x) result } roll_varl <- function(x, n = 1L, weights = NULL, by = 1L, fill = NA, partial = FALSE, align = "left", normalize = TRUE, na.rm = FALSE) { if (!identical(partial, FALSE)) { warning("'partial' argument is currently unimplemented; using 'partial = FALSE'") partial <- FALSE } result <- roll_var_impl( x, as.integer(n), as.numeric(weights), as.integer(by), as.numeric(fill), as.logical(partial), as.character(match.arg(align)), as.logical(normalize), as.logical(na.rm) ) colnames(result) <- colnames(x) result }
".is.protein" <- function(pdb, byres=TRUE) { if(byres) { return(.is.protein1(pdb)) } else { sel1 <- .is.protein1(pdb) sel2 <- .is.protein2(pdb) if(!(identical(sel1, sel2))) { sel <- cbind(sel1, sel2) sums <- apply(sel, 1, sum) inds <- which(sums==1) unq <- paste(unique(pdb$atom$resid[inds]), collapse=",") warning(paste("possible protein residue(s) with non-standard residue name(s) \n (", unq, ")")) } return(sel1) } } ".is.protein1" <- function(pdb) { aa <- bio3d::aa.table$aa3 return(pdb$atom$resid %in% aa) } ".is.protein2" <- function(pdb) { resid <- paste(pdb$atom$chain, pdb$atom$insert, pdb$atom$resno, sep="-") at.ca <- resid[ pdb$atom$elety == "CA"] at.o <- resid[ pdb$atom$elety == "O" ] at.c <- resid[ pdb$atom$elety == "C" ] at.n <- resid[ pdb$atom$elety == "N" ] common <- intersect(intersect(intersect(at.ca, at.o), at.n), at.c) return(resid %in% common) } ".is.nucleic" <- function(pdb) { nuc.aa <- c("A", "U", "G", "C", "T", "I", "DA", "DU", "DG", "DC", "DT", "DI") return(pdb$atom$resid %in% nuc.aa) } ".is.water" <- function(pdb) { hoh <- c("H2O", "OH2", "HOH", "HHO", "OHH", "SOL", "WAT", "TIP", "TIP2", "TIP3", "TIP4") return(pdb$atom$resid %in% hoh) } ".is.hydrogen" <- function(pdb) { return(substr( gsub("^[123]", "",pdb$atom$elety) , 1, 1) %in% "H") } .match.type <- function(pdb, t) { if(!is.character(t)) stop("'type' must be a character vector") pdb$atom$type %in% t } .match.eleno <- function(pdb, eleno) { if(!is.numeric(eleno)) stop("'eleno' must be a numeric vector") pdb$atom$eleno %in% eleno } .match.elety <- function(pdb, elety) { if(!is.character(elety)) stop("'elety' must be a character vector") pdb$atom$elety %in% elety } .match.resid <- function(pdb, resid) { if(!is.character(resid)) stop("'resid' must be a character vector") pdb$atom$resid %in% resid } .match.chain <- function(pdb, chain) { if(!is.character(chain)) stop("'chain' must be a character vector") pdb$atom$chain %in% chain } .match.resno <- function(pdb, resno) { if(!is.numeric(resno)) stop("'resno' must be a numeric vector") pdb$atom$resno %in% resno } .match.insert <- function(pdb, insert) { if(!all(is.na(insert))) { if(!is.character(insert[ !is.na(insert) ])) stop("'insert' must be a character vector") } if(any(insert=="", na.rm=TRUE)) insert[ insert == "" ] = NA if(any(pdb$atom$insert == "", na.rm=TRUE)) pdb$atom$insert[ pdb$atom$insert == "" ] = NA pdb$atom$insert %in% insert } .match.segid <- function(pdb, segid) { if(!all(is.na(segid))) { if(!is.character(segid[ !is.na(segid) ])) stop("'segid' must be a character vector") } if(any(segid=="", na.rm=TRUE)) segid[ segid == "" ] = NA if(any(pdb$atom$segid == "", na.rm=TRUE)) pdb$atom$segid[ pdb$atom$segid == "" ] = NA pdb$atom$segid %in% segid } atom.select.pdb <- function(pdb, string = NULL, type = NULL, eleno = NULL, elety = NULL, resid = NULL, chain = NULL, resno = NULL, insert = NULL, segid = NULL, operator = "AND", inverse = FALSE, value = FALSE, verbose=FALSE, ...) { if(!is.pdb(pdb)) stop("'pdb' must be an object of class 'pdb'") op.tbl <- c(rep("AND",3), rep("OR",4)) operator <- op.tbl[match(operator, c("AND","and","&","OR","or","|","+"))] if(!operator %in% c("AND", "OR")) stop("Allowed values for 'operator' are 'AND' or 'OR'") if(!is.null(string)) { str.allowed <- c("all", "protein", "notprotein", "nucleic", "notnucleic", "water", "notwater", "calpha", "cbeta", "backbone", "back", "side", "sidechain", "ligand", "h", "noh") if(!(string %in% str.allowed)) stop("Unknown 'string' keyword. See documentation for allowed values") } if(verbose) cat("\n") .verboseout <- function(M, type) { cat(" .. ", sprintf("%08s", length(which(M))), " atom(s) from '", type, "' selection \n", sep="") } .combinelv <- function(L, M, operator) { if(operator=="AND") M <- L & M if(operator=="OR") M <- L | M return(M) } cl <- match.call() if(operator=="AND") M <- rep(TRUE, nrow(pdb$atom)) if(operator=="OR") M <- rep(FALSE, nrow(pdb$atom)) if(!is.null(string)) { M <- switch(string, all = M <- rep(TRUE, nrow(pdb$atom)), protein = .is.protein(pdb), notprotein = !.is.protein(pdb), nucleic = .is.nucleic(pdb), notnucleic = !.is.nucleic(pdb), water = .is.water(pdb), notwater = !.is.water(pdb), calpha = .is.protein(pdb) & .match.elety(pdb, "CA"), cbeta = .is.protein(pdb) & .match.elety(pdb, c("CA", "N", "C", "O", "CB")), backbone = .is.protein(pdb) & .match.elety(pdb, c("CA", "N", "C", "O")), back = .is.protein(pdb) & .match.elety(pdb, c("CA", "N", "C", "O")), sidechain = .is.protein(pdb) & !.match.elety(pdb, c("CA", "N", "C", "O")), side = .is.protein(pdb) & !.match.elety(pdb, c("CA", "N", "C", "O")), ligand = !.is.protein(pdb) & !.is.nucleic(pdb) & !.is.water(pdb), h = .is.hydrogen(pdb), noh = !.is.hydrogen(pdb), NA ) if(verbose) { .verboseout(M, 'string') } } if(!is.null(type)) { L <- .match.type(pdb, type) if(verbose) .verboseout(L, 'type') M <- .combinelv(L, M, operator) } if(!is.null(eleno)) { L <- .match.eleno(pdb, eleno) if(verbose) .verboseout(L, 'eleno') M <- .combinelv(L, M, operator) } if(!is.null(elety)) { L <- .match.elety(pdb, elety) if(verbose) .verboseout(L, 'elety') M <- .combinelv(L, M, operator) } if(!is.null(resid)) { L <- .match.resid(pdb, resid) if(verbose) .verboseout(L, 'resid') M <- .combinelv(L, M, operator) } if(!is.null(chain)) { L <- .match.chain(pdb, chain) if(verbose) .verboseout(L, 'chain') M <- .combinelv(L, M, operator) } if(!is.null(resno)) { L <- .match.resno(pdb, resno) if(verbose) .verboseout(L, 'resno') M <- .combinelv(L, M, operator) } if(!is.null(insert)) { L <- .match.insert(pdb, insert) if(verbose) .verboseout(L, 'insert') M <- .combinelv(L, M, operator) } if(!is.null(segid)) { L <- .match.segid(pdb, segid) if(verbose) .verboseout(L, 'segid') M <- .combinelv(L, M, operator) } if(verbose) cat(" ..", sprintf("%08s", length(which(M))), "atom(s) in final combined selection \n") if(inverse) { if(verbose) { cat(" ..", sprintf("%08s", length(which(!M))), "atom(s) in inversed selection \n") } sele <- as.select(which(!M)) } else sele <- as.select(which(M)) sele$call <- cl if(verbose) cat("\n") if(value) return(trim.pdb(pdb, sele)) else return(sele) }
monitor_combine <- function(monitorList) { if ( length(monitorList) == 1 ) { return(monitorList[[1]]) } allMonitorIDs <- unlist( lapply(monitorList, function(x) { return(x$meta$monitorID) }) ) duplicateIDs <- allMonitorIDs[which(duplicated(allMonitorIDs))] if ( length(duplicateIDs) > 0 ) { if ( length(monitorList) > 2 ) { stop("Joining of duplicate monitors requires that monitorList have only two ws_monitor objects.") } warning('Joining data with shared monitorIDs') monitorIDs1 <- setdiff(monitorList[[1]]$meta$monitorID, duplicateIDs) monitorIDs2 <- setdiff(monitorList[[2]]$meta$monitorID, duplicateIDs) mon1 <- monitor_subset(monitorList[[1]], monitorIDs=monitorIDs1) mon2 <- monitor_subset(monitorList[[2]], monitorIDs=monitorIDs2) joined_dups <- monitor_join(monitorList[[1]], monitorList[[2]], duplicateIDs) monitorList <- list(mon1, mon2, joined_dups) } metaList <- lapply(monitorList, function(x) { return(x$meta) }) dataList <- lapply(monitorList, function(x) { return(x$data) }) meta <- dplyr::bind_rows(metaList) meta <- as.data.frame(meta, stringsAsFactors=FALSE) rownames(meta) <- meta$monitorID data <- dataList[[1]] for (i in 2:length(dataList)) { data <- dplyr::full_join(data, dataList[[i]], by="datetime") } data <- as.data.frame(data, stringsAsFactors=FALSE) data <- dplyr::arrange(data, data$datetime) ws_monitor <- list(meta=meta, data=data) ws_monitor <- structure(ws_monitor, class = c("ws_monitor", "list")) return(ws_monitor) }
library(ClassDiscovery) suppressWarnings( RNGversion("3.5.3") ) set.seed(316912) d1 <- matrix(rnorm(100*10, rnorm(100, 0.5)), nrow=100, ncol=10, byrow=FALSE) d2 <- matrix(rnorm(100*10, rnorm(100, 0.5)), nrow=100, ncol=10, byrow=FALSE) d3 <- matrix(rnorm(100*10, rnorm(100, 0.5)), nrow=100, ncol=10, byrow=FALSE) dd <- cbind(d1, d2, d3) kind <- factor(rep(c('red', 'green', 'blue'), each=10)) spc <- SamplePCA(dd, splitter=kind) plot(spc, col=levels(kind)) x1 <- predict(spc, matrix(apply(d1, 1, mean), ncol=1)) points(x1[1], x1[2], col='red', cex=2) x2 <- predict(spc, matrix(apply(d2, 1, mean), ncol=1)) points(x2[1], x2[2], col='green', cex=2) x3 <- predict(spc, matrix(apply(d3, 1, mean), ncol=1)) points(x3[1], x3[2], col='blue', cex=2) screeplot(spc) rm(d1, d2, d3, dd,kind, spc, x1, x2, x3)
seq_disambiguate_IUPAC <- function(x) { check_dna_rna_aa(x) if(is_dna(x)) dic_ambig <- dic_dna()$ambiguity if(is_rna(x)) dic_ambig <- dic_rna()$ambiguity if(is_aa(x)) dic_ambig <- dic_aa()$ambiguity res <- stringr::str_split(x, "") res <- lapply(res, function(x){ if(is.na(x[1])) { return(NA) } out <- lapply(x, function(y){ dic_ambig[[y]] }) out <- base::expand.grid(out, stringsAsFactors = FALSE) out <- apply(out, 1, paste, collapse = "") return(out) }) res <- lapply(res, coerce_seq_as_input, input = x, keep_names = FALSE) names(res) <- names(x) return(res) } seq_spellout <- function(x, short = FALSE, collapse = " - "){ check_dna_rna_aa(x) if(is_dna(x)) dic <- dic_dna()$description if(is_rna(x)) dic <- dic_rna()$description if(is_aa(x) & !short) dic <- dic_aa()$description if(is_aa(x) & short) dic <- dic_aa()$short_description out <- stringr::str_split(x, "") out <- lapply(out, function(x) dic[x]) if(is.character(collapse)){ vapply(out, stringr::str_c, vector("character", 1), collapse = collapse) } else { lapply(out, `names<-`, NULL) } }
library(eChem) cv2 = simulateCV(n = 2, scan.rate = 0.1, t.units = 1000, x.units = 100) str(cv2) cv2_sample = sampleCV(cv2, data.reduction = 10) str(cv2_sample) plotPotential(cv2, main_title = "Potential Profile for a Simulated Cyclic Voltammogram") plotDiffusion(cv2, t = 8) plotGrid(cv2) plotDiffGrid(cv2, species = c(TRUE, FALSE, FALSE), scale.factor = 0.25) plotCV(filenames = list(cv2, cv2_sample), legend_text = c("full data", "reduced data"), line_colors = c("black", "blue"), line_types = c(1, 1)) annotateCV(cv2)
ul.reml.f <- function( samp.data , formula , samp.agg.X.pop , y.name , X.names , ... ) { fit <- lme(formula , data=samp.data, random=(~1|domain.id) , weights=varFixed(~k.ij^2)) var <- fit$apVar par <- attr(var, "Pars") vc <- exp(par)^2 sig.sq.v <- vc[1] sig.sq.e <- vc[2] a.i.dot <- unique(samp.data[c("domain.id","a.i.dot","n.i")]) a.i.dot$alpha.i <- sig.sq.e + a.i.dot$a.i.dot * sig.sq.v I.vv <- 0.5 * sum(a.i.dot$a.i.dot^2 * a.i.dot$alpha.i^-2) I.ee <- 0.5 * sum((a.i.dot$n.i-1) * (1/sig.sq.e^2) + a.i.dot$alpha.i^-2) I.ve <- 0.5 * sum(a.i.dot$a.i.dot * a.i.dot$alpha.i^-2) I.mat <- matrix(nrow=2,ncol=2) I.mat[1,1] <- I.vv I.mat[2,2] <- I.ee I.mat[2,1] <- I.mat[1,2] <- I.ve I.mat.inv <- solve(I.mat) V.bar.vv <- I.mat.inv[1,1] V.bar.ee <- I.mat.inv[2,2] V.bar.ve <- I.mat.inv[1,2] samp.agg.X.pop$gamma.i <- sig.sq.v/(sig.sq.v + sig.sq.e/samp.agg.X.pop$a.i.dot) list( sig.sq.e=as.vector(sig.sq.e), V.bar.ee=V.bar.ee, sqrt.V.bar.ee=sqrt(V.bar.ee) , sig.sq.v=as.vector(sig.sq.v), V.bar.vv=V.bar.vv, sqrt.V.bar.vv=sqrt(V.bar.vv) , V.bar.ve=V.bar.ve , beta.hat=fixed.effects(fit), cov.beta.hat=vcov(fit) , sd.beta.hat=coef(summary(fit))[, "Std.Error"] , lme.fit=fit ) }
generateCRvalues <- function(pCR,settings, Npop){ RandomVec <- c(0,cumsum(as.numeric(rmultinom(1, size = Npop*settings$updateInterval, prob = pCR)))) cand <- sample(Npop*settings$updateInterval) CR <- rep(NA, Npop*settings$updateInterval) for(i in 1:settings$nCR){ Start <- RandomVec[i]+1 End <- RandomVec[i+1] candx <- cand[Start:End] CR[candx] <- i/settings$nCR } CR <- matrix(CR,Npop,settings$updateInterval) return(CR) } AdaptpCR <- function(CR, delta ,lCR, settings, Npop){ if(any(delta >0)){ CR <- c(CR) lCROld <- lCR lCR <- rep(NA,settings$nCR) for (k in 1:settings$nCR){ CR_counter <- length(which(CR==k/settings$nCR)) lCR[k] <- lCROld[k]+ CR_counter } pCR <- Npop * (delta / lCR) / sum(delta) pCR[which(is.nan(pCR))] <- 1/settings$nCR pCR <- pCR/sum(pCR) } return(list(pCR=pCR,lCR=lCR)) }
bigram <- function(DataFrame) { if (!is.data.frame(DataFrame)) { stop("The input for this function is a data frame.") } text <- dplyr::quo(text) word <- dplyr::quo(word) bigram <- dplyr::quo(bigram) word1 <- dplyr::quo(word1) word2 <- dplyr::quo(word2) wu <- "https://t.co/[A-Za-z\\d]+|http://[A-Za-z\\d]+|&amp;|&lt;|&gt;|RT|https" TD_Bigram <- DataFrame %>% dplyr::mutate( text = stringr::str_replace_all( string = text, pattern = "RT", replacement = ""), text = stringr::str_replace_all( string = text, pattern = "&amp", replacement = ""), text = stringr::str_replace_all( string = text, pattern = wu, replacement = ""), text = stringr::str_replace_all( string = text, pattern = " replacement = ""), text = stringr::str_replace_all( string = text, pattern = "[:punct:]", replacement = ""), text = stringr::str_replace_all( string = text, pattern = "[^[:alnum:]///' ]", replacement = "")) %>% tidytext::unnest_tokens( output = bigram, input = text, token = "ngrams", n = 2) %>% tidyr::separate(bigram, c("word1", "word2"), sep = " ") %>% dplyr::filter(!word1 %in% c(tidytext::stop_words$word, "[0-9]+")) %>% dplyr::filter(!word2 %in% c(tidytext::stop_words$word, "[0-9]+")) %>% dplyr::count(word1, word2, sort = TRUE) return(TD_Bigram) }
if (!isGeneric("convSC2Rad") ) { setGeneric("convSC2Rad", function(x, ...) standardGeneric("convSC2Rad")) } NULL setMethod("convSC2Rad", signature(x = "Satellite"), function(x, szen_correction = "TRUE", subset = FALSE){ band_codes <- getSatBCDECalib(x, calib = "SC") for(bcde in band_codes){ if(!is.na(getSatRADM(x, bcde))){ sensor_rad <- convSC2Rad(x = getSatDataLayer(x, bcde), mult = getSatRADM(x, bcde), add = getSatRADA(x, bcde)) layer_bcde <- paste0(bcde, "_RAD") meta_param <- getSatMetaBCDETemplate(x, bcde) meta_param$BCDE <- layer_bcde meta_param$CALIB <- "RAD" info <- sys.calls()[[1]] info <- paste0("Add layer from ", info[1], "(", toString(info[2:length(info)]), ")") x <- addSatDataLayer(x, bcde = layer_bcde, data = sensor_rad, meta_param = meta_param, info = info, in_bcde = bcde) } } if(subset == TRUE){ x <- subset(x, cid = "RAD") x@meta$LNBR <- rep(1:nrow(x@meta)) } return(x) }) setMethod("convSC2Rad", signature(x = "RasterStack"), function(x, mult, add, szen){ for(l in seq(nlayers(x))){ x[[l]] <- convSC2Rad(x[[l]], mult, add, szen) } return(x) }) setMethod("convSC2Rad", signature(x = "RasterLayer"), function(x, mult, add, szen){ if(!missing(szen)){ x <- (mult * x + add) / cos(szen * pi / 180.0) } else { x <- mult * x + add } return(x) })
fweibull <- function(P, SX, PX, X=50){ X <- X[1] V <- (X-100)*log(1-X/100) p <- (P/PX)^((PX*SX)/V) relk <- (1-X/100)^p return(relk) } fsigmoidal <- function(P, PX, a, X=50){ X <- X[1] P <- -P PX <- -PX b <- PX - (1/a)*(50/X - 1) 1 - 1/(1 + exp(a*(P - b))) }
context("Length of the list that is returned by the lss() function.") test_that("test if lss() returns a list of length 4", { expect_length(lss(), 7) })
get_1d_peaks <- function(.total_pair_cov, .subset, .num_of_peaks = 3, .adjust = 10, .peak_frame = data.frame(), .depth = 1){ if (sum(.subset) < 2){ return(data.frame(cov = NA, height = NA)) } if (nrow(.peak_frame) < .num_of_peaks & .depth < 11){ d <- density(.total_pair_cov[.subset], adjust = .adjust) selected_points <- which(diff(sign(diff(d$y))) == -2) + 1 .peak_frame <- data.frame(cov = d$x[selected_points], height = d$y[selected_points] * sum(.subset)) .peak_frame <- get_1d_peaks(.total_pair_cov, .subset, .num_of_peaks, .adjust - 1, .peak_frame, .depth + 1) } .peak_frame }
context("testing costing resource use") test_that("testing costing resource use", { costs_file <- system.file("extdata", "costs_resource_use.csv", package = "packDAMipd") datafile <- system.file("extdata", "resource_use_hc_2.csv", package = "packDAMipd") ind_part_data <- load_trial_data(datafile) unit_cost_data <- load_trial_data(costs_file) part_data <- ind_part_data[1, ] res <- costing_resource_use(part_data, "hospital_admission_1", list("length_1", "length_2"), list("nhs_1", "nhs_2"), "day", unit_cost_data, "Inpatient hospital admissions", "UnitCost", "UnitUsed", NULL, NULL) expect_equal(res$totcost_hospital_admission_1, 20, tolerance = 1e-3) res <- costing_resource_use( ind_part_data[2, ], "hospital_admission_1", list("length_1", "length_2"), list("nhs_1", "nhs_2"), "day", unit_cost_data, "Inpatient hospital admissions", "UnitCost", "UnitUsed", NULL, NULL) expect_equal(res$totcost_hospital_admission_1, 80, tolerance = 1e-3) expect_error(costing_resource_use( ind_part_data[2, ], "hospital_admission_1", list("length_1", "length_2"), list("nhs_1"), "day", unit_cost_data, "Inpatient hospital admissions", "UnitCost", "UnitUsed", NULL, NULL)) datafile <- system.file("extdata", "resource_use_hc_2_codes.csv", package = "packDAMipd") ind_part_data <- load_trial_data(datafile) res <- costing_resource_use( ind_part_data[1, ], "hospital_admission_1", list("length_1", "length_2"), list("nhs_1", "nhs_2"), "day", unit_cost_data, "Inpatient hospital admissions", "UnitCost", "UnitUsed", list(c("yes", "no"), c(1, 2)), list(c("yes", "no"), c(1, 2)) ) expect_equal(res$totcost_hospital_admission_1, 20, tolerance = 1e-3) res <- costing_resource_use( ind_part_data[1, ], "daycare", list("number"), list("nhs_visit"), "visit", unit_cost_data, "Day care admission", "UnitCost", "UnitUsed", NULL, list(c("yes", "no"), c(1, 2)) ) expect_equal(res$totcost_daycare, 45, tolerance = 1e-3) expect_error(costing_resource_use( ind_part_data[1, ], "daycare", list("number"), list("visit"), "visit", unit_cost_data, "Day care admission", "UnitCost", "UnitUsed", NULL, list(c("yes", "no"), c(1, 2)) )) res <- costing_resource_use( ind_part_data[1, ], "other_contact", list("number_of_contacts"), NULL, "visit", unit_cost_data, NULL, "UnitCost", "UnitUsed", NULL, NULL ) expect_equal(res$totcost_other_contact, 39.23, tolerance = 1e-3) res <- costing_resource_use( ind_part_data[2, ], "other_contact", list("number_of_contacts"), NULL, "hour", unit_cost_data, NULL, "UnitCost", "UnitUsed", NULL, NULL ) expect_equal(res$totcost_other_contact, 126, tolerance = 1e-3) expect_error(costing_resource_use( NULL, "other_contact", list("number_of_contacts"), NULL, "hour", unit_cost_data, NULL, "UnitCost", "UnitUsed", NULL, NULL )) expect_error(costing_resource_use( ind_part_data[2, ], NULL, list("number_of_contacts"), NULL, "hour", unit_cost_data, NULL, "UnitCost", "UnitUsed", NULL, NULL )) expect_error(costing_resource_use( ind_part_data[2, ], "other_contact", list("number_of_contacts"), NULL, "hour", unit_cost_data, NULL, NULL, "UnitUsed", NULL, NULL )) datafile <- system.file("extdata", "resource_use_hc_2_noname.csv", package = "packDAMipd") ind_part_data <- load_trial_data(datafile) expect_error(costing_resource_use( ind_part_data[2, ], "other_contact", list("number_of_contacts"), NULL, "hour", unit_cost_data, NULL, "UnitCost", "UnitUsed", NULL, NULL )) costs_file <- system.file("extdata", "costs_resource_use_nounitcostcol.csv", package = "packDAMipd") datafile <- system.file("extdata", "resource_use_hc_2.csv", package = "packDAMipd") ind_part_data <- load_trial_data(datafile) unit_cost_data <- load_trial_data(costs_file) expect_error(costing_resource_use( ind_part_data[2, ], "hospital_admission_1", list("number_of_contacts"), NULL, "hour", unit_cost_data, NULL, "UnitCost", "UnitUsed", NULL, NULL )) costs_file <- system.file("extdata", "costs_resource_use_nonameres.csv", package = "packDAMipd") datafile <- system.file("extdata", "resource_use_hc_2.csv", package = "packDAMipd") ind_part_data <- load_trial_data(datafile) unit_cost_data <- load_trial_data(costs_file) expect_error(costing_resource_use( ind_part_data[2, ], "hospital_admission_1", list("number_of_contacts"), NULL, "hour", unit_cost_data, NULL, "UnitCost", "UnitUsed", NULL, NULL )) costs_file <- system.file("extdata", "costs_resource_use_nohos.csv", package = "packDAMipd") datafile <- system.file("extdata", "resource_use_hc_2.csv", package = "packDAMipd") ind_part_data <- load_trial_data(datafile) unit_cost_data <- load_trial_data(costs_file) expect_error(costing_resource_use( ind_part_data[2, ], "hospital_admission_1", list("number_of_contacts"), NULL, "hour", unit_cost_data, NULL, "UnitCost", "UnitUsed", NULL, NULL )) costs_file <- system.file("extdata", "costs_resource_use.csv", package = "packDAMipd") datafile <- system.file("extdata", "resource_use_hc_2_unitlength.csv", package = "packDAMipd") ind_part_data <- load_trial_data(datafile) unit_cost_data <- load_trial_data(costs_file) res <- costing_resource_use( ind_part_data, "hospital_admission_1", list("length_1", "length_2"), list("nhs_1", "nhs_2"), "unit_length", unit_cost_data, "Inpatient hospital admissions", "UnitCost", "UnitUsed", NULL, NULL ) expect_equal(res$totcost_hospital_admission_1, c(20, 80, NA, NA, NA)) costs_file <- system.file("extdata", "costs_resource_use_unitexpressed_notright.csv", package = "packDAMipd") datafile <- system.file("extdata", "resource_use_hc_2_unitlength.csv", package = "packDAMipd") ind_part_data <- load_trial_data(datafile) unit_cost_data <- load_trial_data(costs_file) expect_error(costing_resource_use( ind_part_data, "hospital_admission_1", list("length_1", "length_2"), list("nhs_1", "nhs_2"), "unit_length", unit_cost_data, "Inpatient hospital admissions", "UnitCost", "UnitUsed", NULL, NULL )) }) context("testing extracting unit cost matching hrg code") test_that("testing extracting unit cost matching hrg code", { ref_cost_data_file <- system.file("extdata", "National_schedule_of_NHS_costs_2019.csv", package = "packDAMipd") result <- get_cost_ip_dc_hrg("AA22C", ref_cost_data_file, "Currency_Code", "National_Average_Unit_Cost") expect_equal(result, 5053, tol = 1e-1) expect_error(get_cost_ip_dc_hrg(NULL, ref_cost_data_file, "Currency_Code", "National_Average_Unit_Cost", "EL")) expect_error(get_cost_ip_dc_hrg("AA22C", NULL, "Currency_Code", "National_Average_Unit_Cost", "EL")) expect_error(get_cost_ip_dc_hrg("AA22C", ref_cost_data_file, NULL, "National_Average_Unit_Cost", "EL")) expect_error(get_cost_ip_dc_hrg("AA22C", ref_cost_data_file, "Currency_Code", NULL, "EL")) result <- get_cost_ip_dc_hrg("AA22C", ref_cost_data_file, "Currency_Code", "National_Average_Unit_Cost", NULL) expect_equal(result, 5053, tol = 1e-1) expect_error(get_cost_ip_dc_hrg("AA", ref_cost_data_file, "Currency_Code", "National_Average_Unit_Cost", "EL")) }) context("testing extracting unit cost matching description") test_that("testing extracting unit cost matching description", { ref_cost_data_file <- system.file("extdata", "National_schedule_of_NHS_costs_2019.csv", package = "packDAMipd") result <- get_cost_ip_dc_description("Cerebrovascular Accident", ref_cost_data_file, "Currency_Description", "National_Average_Unit_Cost", "EL") expect_equal(result, 3530, tol = 1e-1) expect_error(get_cost_ip_dc_description(NULL, ref_cost_data_file, "Currency_Description", "National_Average_Unit_Cost", "EL")) expect_error(get_cost_ip_dc_description("Cerebrovascular Accident", NULL, "Currency_Description", "National_Average_Unit_Cost", "EL")) expect_error(get_cost_ip_dc_description("Cerebrovascular Accident", ref_cost_data_file, NULL, "National_Average_Unit_Cost", "EL")) expect_error(get_cost_ip_dc_description("Cerebrovascular Accident", ref_cost_data_file, "Currency_Description", NULL, "EL")) result <- get_cost_ip_dc_description("Cerebrovascular Accident", ref_cost_data_file, "Currency_Description", "National_Average_Unit_Cost", NULL) expect_equal(result, 3530, tol = 1e-1) expect_error(get_cost_ip_dc_description("hello", ref_cost_data_file, "Currency_Description", "National_Average_Unit_Cost", "EL")) }) context("testing costing inpatient admission") test_that("testing costing inpatient admission", { costs_file <- system.file("extdata", "patient_adm_cost_EL.xlsx", package = "packDAMipd") datafile <- system.file("extdata", "resource_use_hc_ip.csv", package = "packDAMipd") ind_part_data <- packDAMipd::load_trial_data(datafile) unit_cost_data <- packDAMipd::load_trial_data(costs_file) result <- costing_inpatient_daycase_admission(ind_part_data, hrg_code_ip_admi = "HRGcode", descrip_ip_admi = NULL, number_use_ip_admi = "number_use", elective_col = "EL", unit_cost_data, hrg_code_col = "Currency_Code", description_col = NULL, unit_cost_col = "National_Average_Unit_Cost", cost_calculated_in = "admission") expect_equal(result$totcost_ip_admission[1], 5053, tol = 1e-2) expect_error(costing_inpatient_daycase_admission(NULL, hrg_code_ip_admi = "HRGcode", descrip_ip_admi = NULL, number_use_ip_admi = "number_use", elective_col = "EL", unit_cost_data, hrg_code_col = "Currency_Code", description_col = NULL, unit_cost_col = "National_Average_Unit_Cost", cost_calculated_in = "admission")) expect_error(costing_inpatient_daycase_admission(ind_part_data, hrg_code_ip_admi = "HRGcode", descrip_ip_admi = NULL, number_use_ip_admi = "number_use", elective_col = "EL", unit_cost_data, hrg_code_col = NULL, description_col = NULL, unit_cost_col = "National_Average_Unit_Cost", cost_calculated_in = "admission")) expect_error(costing_inpatient_daycase_admission(ind_part_data, hrg_code_ip_admi = NULL, descrip_ip_admi = NULL, number_use_ip_admi = "number_use", elective_col = "EL", unit_cost_data, hrg_code_col = "Currency_Code", description_col = NULL, unit_cost_col = "National_Average_Unit_Cost", cost_calculated_in = "admission")) result <- costing_inpatient_daycase_admission(ind_part_data, hrg_code_ip_admi = "HRGcode", descrip_ip_admi = NULL, number_use_ip_admi = NULL, elective_col = "EL", unit_cost_data, hrg_code_col = "Currency_Code", description_col = NULL, unit_cost_col = "National_Average_Unit_Cost", cost_calculated_in = "admission") expect_equal(result$totcost_ip_admission[1], 5053, tol = 1e-2) expect_error(costing_inpatient_daycase_admission(ind_part_data, hrg_code_ip_admi = "HRGcode", descrip_ip_admi = NULL, number_use_ip_admi = "number_use", elective_col = "EL", NULL, hrg_code_col = "Currency_Code", description_col = NULL, unit_cost_col = "National_Average_Unit_Cost", cost_calculated_in = "admission")) expect_error(costing_inpatient_daycase_admission(ind_part_data, hrg_code_ip_admi = "HRGcode", descrip_ip_admi = NULL, number_use_ip_admi = "number_use", elective_col = NULL, unit_cost_data, hrg_code_col = "Currency_Code", description_col = NULL, unit_cost_col = "National_Average_Unit_Cost", cost_calculated_in = "admission")) result <- costing_inpatient_daycase_admission(ind_part_data, hrg_code_ip_admi = "HRGcode", descrip_ip_admi = NULL, number_use_ip_admi = "number_use", elective_col = "EL", unit_cost_data, hrg_code_col = "Currency_Code", description_col = NULL, unit_cost_col = "National_Average_Unit_Cost", cost_calculated_in = "admission") expect_equal(result$totcost_ip_admission[1], 5053, tol = 1e-2) expect_error(costing_inpatient_daycase_admission(ind_part_data, hrg_code_ip_admi = "HRGcode", descrip_ip_admi = NULL, number_use_ip_admi = "number_use", elective_col = "EL", unit_cost_data, hrg_code_col = "Currency_Code", description_col = NULL, unit_cost_col = NULL, cost_calculated_in = "admission")) expect_error(costing_inpatient_daycase_admission(ind_part_data, hrg_code_ip_admi = "HRGcode", descrip_ip_admi = NULL, number_use_ip_admi = "number_use", elective_col = "EL", unit_cost_data, hrg_code_col = "Currency_Code", description_col = NULL, unit_cost_col = "National_Average_Unit_Cost", cost_calculated_in = NULL)) expect_error(costing_inpatient_daycase_admission(ind_part_data, hrg_code_ip_admi = "HRGcode", descrip_ip_admi = NULL, number_use_ip_admi = "number_use", elective_col = "EL", unit_cost_data, hrg_code_col = "Currency_Code", description_col = NULL, unit_cost_col = "National_Average_Unit_Cost", cost_calculated_in = "ad")) result <- costing_inpatient_daycase_admission(ind_part_data, hrg_code_ip_admi = NULL, descrip_ip_admi = "Description", number_use_ip_admi = "number_use", elective_col = "EL", unit_cost_data, hrg_code_col = NULL, description_col = "Currency_Description", unit_cost_col = "National_Average_Unit_Cost", cost_calculated_in = "admission") expect_equal(result$totcost_ip_admission[1], 3530, tol = 1e-2) datafile <- system.file("extdata", "resource_use_hc_ip_nocol.csv", package = "packDAMipd") ind_part_data <- packDAMipd::load_trial_data(datafile) expect_error(costing_inpatient_daycase_admission(ind_part_data, hrg_code_ip_admi = NULL, descrip_ip_admi = "Description", number_use_ip_admi = "number_use", elective_col = "EL", unit_cost_data, hrg_code_col = "Currency_Code", description_col = "Currency_Description", unit_cost_col = "National_Average_Unit_Cost", cost_calculated_in = "admission")) datafile <- system.file("extdata", "resource_use_hc_ip_nonumuse.csv", package = "packDAMipd") ind_part_data <- packDAMipd::load_trial_data(datafile) expect_error(costing_inpatient_daycase_admission(ind_part_data, hrg_code_ip_admi = NULL, descrip_ip_admi = "Description", number_use_ip_admi = "number_use", elective_col = "EL", unit_cost_data, hrg_code_col = "Currency_Code", description_col = "Currency_Description", unit_cost_col = "National_Average_Unit_Cost", cost_calculated_in = "admission")) datafile <- system.file("extdata", "resource_use_hc_ip_nocols.csv", package = "packDAMipd") ind_part_data <- packDAMipd::load_trial_data(datafile) expect_error(costing_inpatient_daycase_admission(ind_part_data, hrg_code_ip_admi = NULL, descrip_ip_admi = "Description", number_use_ip_admi = "number_use", elective_col = "EL", unit_cost_data, hrg_code_col = "Currency_Code", description_col = "Currency_Description", unit_cost_col = "National_Average_Unit_Cost", cost_calculated_in = "admission")) costs_file <- system.file("extdata", "National_schedule_of_NHS_costs_2019_error.xlsx", package = "packDAMipd") datafile <- system.file("extdata", "resource_use_hc_ip.csv", package = "packDAMipd") ind_part_data <- packDAMipd::load_trial_data(datafile) unit_cost_data <- packDAMipd::load_trial_data(costs_file, sheet = "EL") expect_error(costing_inpatient_daycase_admission(ind_part_data, hrg_code_ip_admi = NULL, descrip_ip_admi = "Description", number_use_ip_admi = "number_use", elective_col = "EL", unit_cost_data, hrg_code_col = "Currency_Code", description_col = "Currency_Description", unit_cost_col = "National_Average_Unit_Cost", cost_calculated_in = "admission")) costs_file <- system.file("extdata", "National_schedule_of_NHS_costs_2019.csv", package = "packDAMipd") datafile <- system.file("extdata", "resource_use_hc_ip.csv", package = "packDAMipd") ind_part_data <- packDAMipd::load_trial_data(datafile) unit_cost_data <- packDAMipd::load_trial_data(costs_file, sheet = "EL") expect_error(costing_inpatient_daycase_admission(ind_part_data, hrg_code_ip_admi = "HRGcode", descrip_ip_admi = "Description", number_use_ip_admi = "number_use", elective_col = "EL", unit_cost_data, hrg_code_col = NULL, description_col = "Currency_Description", unit_cost_col = "National_Average_Unit_Cost", cost_calculated_in = "admission")) expect_error(costing_inpatient_daycase_admission(ind_part_data, hrg_code_ip_admi = NULL, descrip_ip_admi = "Description", number_use_ip_admi = "number_use", elective_col = "EL", unit_cost_data, hrg_code_col = "Currency_Code", description_col = NULL, unit_cost_col = "National_Average_Unit_Cost", cost_calculated_in = "admission")) datafile <- system.file("extdata", "resource_use_hc_ip_numuse_wrong.csv", package = "packDAMipd") ind_part_data <- packDAMipd::load_trial_data(datafile) expect_error(costing_inpatient_daycase_admission(ind_part_data, hrg_code_ip_admi = "HRGcode", descrip_ip_admi = "Description", number_use_ip_admi = "number_use", elective_col = "EL", unit_cost_data, hrg_code_col = "Currency_Code", description_col = NULL, unit_cost_col = "National_Average_Unit_Cost", cost_calculated_in = "admission")) expect_error(costing_inpatient_daycase_admission(ind_part_data, hrg_code_ip_admi = NULL, descrip_ip_admi = "Description", number_use_ip_admi = "number_use", elective_col = "EL", unit_cost_data, hrg_code_col = "Currency_Code", description_col = "Currency_Description", unit_cost_col = "National_Average_Unit_Cost", cost_calculated_in = "admission")) datafile <- system.file("extdata", "resource_use_hc_ip_numuse_missing.csv", package = "packDAMipd") ind_part_data <- packDAMipd::load_trial_data(datafile) result <- costing_inpatient_daycase_admission(ind_part_data, hrg_code_ip_admi = NULL, descrip_ip_admi = "Description", number_use_ip_admi = "number_use", elective_col = "EL", unit_cost_data, hrg_code_col = "Currency_Code", description_col = "Currency_Description", unit_cost_col = "National_Average_Unit_Cost", cost_calculated_in = "admission") expect_equal(result$totcost_ip_admission[1], 3530, tol = 1e-2) result <- costing_inpatient_daycase_admission(ind_part_data, hrg_code_ip_admi = "HRGcode", descrip_ip_admi = "Description", number_use_ip_admi = "number_use", elective_col = "EL", unit_cost_data, hrg_code_col = "Currency_Code", description_col = NULL, unit_cost_col = "National_Average_Unit_Cost", cost_calculated_in = "admission") expect_equal(result$totcost_ip_admission[1], 5053, tol = 1e-2) datafile <- system.file("extdata", "resource_use_hc_ip_el_missing.csv", package = "packDAMipd") ind_part_data <- packDAMipd::load_trial_data(datafile) expect_error(costing_inpatient_daycase_admission(ind_part_data, hrg_code_ip_admi = NULL, descrip_ip_admi = "Description", number_use_ip_admi = NULL, elective_col = "EL", unit_cost_data, hrg_code_col = "Currency_Code", description_col = "Currency_Description", unit_cost_col = "National_Average_Unit_Cost", cost_calculated_in = "admission")) expect_error(costing_inpatient_daycase_admission(ind_part_data, hrg_code_ip_admi = "HRGcode", descrip_ip_admi = "Description", number_use_ip_admi = NULL, elective_col = "EL", unit_cost_data, hrg_code_col = "Currency_Code", description_col = NULL, unit_cost_col = "National_Average_Unit_Cost", cost_calculated_in = "admission")) datafile <- system.file("extdata", "resource_use_hc_ip_noelcol.csv", package = "packDAMipd") ind_part_data <- packDAMipd::load_trial_data(datafile) expect_error(costing_inpatient_daycase_admission(ind_part_data, hrg_code_ip_admi = NULL, descrip_ip_admi = "Description", number_use_ip_admi = NULL, elective_col = "EL", unit_cost_data, hrg_code_col = "Currency_Code", description_col = "Currency_Description", unit_cost_col = "National_Average_Unit_Cost", cost_calculated_in = "admission")) expect_error(costing_inpatient_daycase_admission(ind_part_data, hrg_code_ip_admi = "HRGcode", descrip_ip_admi = "Description", number_use_ip_admi = NULL, elective_col = "EL", unit_cost_data, hrg_code_col = "Currency_Code", description_col = NULL, unit_cost_col = "National_Average_Unit_Cost", cost_calculated_in = "admission")) }) context("testing extracting unit cost for A&E matching code") test_that("testing extracting unit cost for A&E matching code", { ref_cost_data_file <- system.file("extdata", "National_schedule_of_NHS_costs_2019_AandE.csv", package = "packDAMipd") re <- get_cost_AandE_code("VB02Z", "T01A", ref_cost_data_file, "Currency_Code", "National_Average_Unit_Cost", "Service_Code") expect_equal(re, 452, tol = 1e-2) ref_cost_data_file <- system.file("extdata", "NHS_costs_2019_AandE.xlsx", package = "packDAMipd") re <- get_cost_AandE_code("VB02Z", "T01A", ref_cost_data_file, "Currency_Code", "National_Average_Unit_Cost", "Service_Code", sheet = "AE") expect_equal(re, 452, tol = 1e-2) expect_error(get_cost_AandE_code("VB02Z", "T00A", ref_cost_data_file, "Currency_Code", "National_Average_Unit_Cost", "Service_Code", sheet = "AE")) expect_error(get_cost_AandE_code(NULL, "T01A", ref_cost_data_file, "Currency_Code", "National_Average_Unit_Cost", "Service_Code")) expect_error(get_cost_AandE_code("VB02Z", NA, ref_cost_data_file, "Currency_Code", "National_Average_Unit_Cost", "Service_Code")) expect_error(get_cost_AandE_code("VB02Z", "T01A", NULL, "Currency_Code", "National_Average_Unit_Cost", "Service_Code")) expect_error(get_cost_AandE_code("VB02Z", "T01A", ref_cost_data_file, NA, "National_Average_Unit_Cost", "Service_Code")) expect_error(get_cost_AandE_code("VB02Z", "T01A", ref_cost_data_file, "Currency_Code", NA, "Service_Code")) expect_error(get_cost_AandE_code("VB02Z", "T01A", ref_cost_data_file, "Currency_Code", "National_Average_Unit_Cost", NULL)) }) context("testing extracting unit cost matching description") test_that("testing extracting unit cost matching description", { ref_cost_data_file <- system.file("extdata", "National_schedule_of_NHS_costs_2019_AandE.csv", package = "packDAMipd") re <- get_cost_AandE_description("Emergency Medicine", "T01A", ref_cost_data_file, "Currency_Description", "National_Average_Unit_Cost", "Service_Code") expect_equal(re, 265.33, tol = 1e-2) ref_cost_data_file <- system.file("extdata", "NHS_costs_2019_AandE.xlsx", package = "packDAMipd") re <- get_cost_AandE_description("Emergency Medicine", "T01A", ref_cost_data_file, "Currency_Description", "National_Average_Unit_Cost", "Service_Code", sheet = "AE") expect_equal(re, 265.33, tol = 1e-2) expect_error(get_cost_AandE_description("Emergency Medicine", "T00A", ref_cost_data_file, "Currency_Description", "National_Average_Unit_Cost", "Service_Code", sheet = "AE")) expect_error(get_cost_AandE_description(NULL, "T01A", ref_cost_data_file, "Currency_Description", "National_Average_Unit_Cost", "Service_Code")) expect_error(get_cost_AandE_description("Emergency Medicine", NULL, ref_cost_data_file, "Currency_Description", "National_Average_Unit_Cost", "Service_Code")) expect_error(get_cost_AandE_description("Emergency Medicine", "T01A", NULL, "Currency_Description", "National_Average_Unit_Cost", "Service_Code")) expect_error(get_cost_AandE_description("Emergency Medicine", "T01A", ref_cost_data_file, NULL, "National_Average_Unit_Cost", "Service_Code")) expect_error(get_cost_AandE_description("Emergency Medicine", "T01A", ref_cost_data_file, "Currency_Description", NULL, "Service_Code")) expect_error(get_cost_AandE_description("Emergency Medicine", "T01A", ref_cost_data_file, "Currency_Description", "National_Average_Unit_Cost", NULL)) }) context("testing extracting unit cost matching description") test_that("testing extracting unit cost matching description", { costs_file <- system.file("extdata", "National_schedule_of_NHS_costs_2019_AandE.csv", package = "packDAMipd") datafile <- system.file("extdata", "resource_use_ae_ip.csv", package = "packDAMipd") ind_part_data <- packDAMipd::load_trial_data(datafile) unit_cost_data <- packDAMipd::load_trial_data(costs_file) result <- costing_AandE_admission(ind_part_data = ind_part_data, code_ae = "code", descrip_ae = NULL, number_use_ae = "number_use", type_admit_ae = "type_admit", unit_cost_data = unit_cost_data, code_col = "Currency_Code", type_admit_col = "Service_Code", description_col = NULL, unit_cost_col = "National_Average_Unit_Cost", cost_calculated_in = "attendance") expect_equal(result$totcost_AE_Admission[1], 672, tol = 1e-2) expect_error(costing_AandE_admission(ind_part_data = ind_part_data, code_ae = "code", descrip_ae = NULL, number_use_ae = "number_use", type_admit_ae = "type_admit", unit_cost_data = unit_cost_data, code_col = NULL, type_admit_col = "Service_Code", description_col = "Currency_Description", unit_cost_col = "National_Average_Unit_Cost", cost_calculated_in = "attendance")) expect_error(costing_AandE_admission(ind_part_data = ind_part_data, code_ae = NULL, descrip_ae = "desc", number_use_ae = "number_use", type_admit_ae = "type_admit", unit_cost_data = unit_cost_data, code_col = "Currency_Code", type_admit_col = "Service_Code", description_col = NULL, unit_cost_col = "National_Average_Unit_Cost", cost_calculated_in = "attendance")) expect_error(costing_AandE_admission(ind_part_data = NULL, code_ae = "code", descrip_ae = NULL, number_use_ae = "number_use", type_admit_ae = "type_admit", unit_cost_data = unit_cost_data, code_col = "Currency_Code", type_admit_col = "Service_Code", description_col = NULL, unit_cost_col = "National_Average_Unit_Cost", cost_calculated_in = "attendance")) expect_error(costing_AandE_admission(ind_part_data = ind_part_data, code_ae = NULL, descrip_ae = NULL, number_use_ae = "number_use", type_admit_ae = "type_admit", unit_cost_data = unit_cost_data, code_col = "Currency_Code", type_admit_col = "Service_Code", description_col = NULL, unit_cost_col = "National_Average_Unit_Cost", cost_calculated_in = "attendance")) result <- costing_AandE_admission(ind_part_data = ind_part_data, code_ae = "code", descrip_ae = NULL, number_use_ae = NULL, type_admit_ae = "type_admit", unit_cost_data = unit_cost_data, code_col = "Currency_Code", type_admit_col = "Service_Code", description_col = NULL, unit_cost_col = "National_Average_Unit_Cost", cost_calculated_in = "attendance") expect_equal(result$totcost_AE_Admission[1], 672, tol = 1e-2) expect_error(costing_AandE_admission(ind_part_data = ind_part_data, code_ae = "code", descrip_ae = NULL, number_use_ae = "number_use", type_admit_ae = NULL, unit_cost_data = unit_cost_data, code_col = "Currency_Code", type_admit_col = "Service_Code", description_col = NULL, unit_cost_col = "National_Average_Unit_Cost", cost_calculated_in = "attendance")) expect_error(costing_AandE_admission(ind_part_data = ind_part_data, code_ae = "code", descrip_ae = NULL, number_use_ae = "number_use", type_admit_ae = "type_admit", unit_cost_data = NULL, code_col = "Currency_Code", type_admit_col = "Service_Code", description_col = NULL, unit_cost_col = "National_Average_Unit_Cost", cost_calculated_in = "attendance")) expect_error(costing_AandE_admission(ind_part_data = ind_part_data, code_ae = "code", descrip_ae = NULL, number_use_ae = "number_use", type_admit_ae = "type_admit", unit_cost_data = unit_cost_data, code_col = "Currency_Code", type_admit_col = NULL, description_col = NULL, unit_cost_col = "National_Average_Unit_Cost", cost_calculated_in = "attendance")) expect_error(costing_AandE_admission(ind_part_data = ind_part_data, code_ae = "code", descrip_ae = NULL, number_use_ae = "number_use", type_admit_ae = "type_admit", unit_cost_data = unit_cost_data, code_col = NULL, type_admit_col = "Service_Code", description_col = NULL, unit_cost_col = "National_Average_Unit_Cost", cost_calculated_in = "attendance")) expect_error(costing_AandE_admission(ind_part_data = ind_part_data, code_ae = "code", descrip_ae = NULL, number_use_ae = "number_use", type_admit_ae = "type_admit", unit_cost_data = unit_cost_data, code_col = "Currency_Code", type_admit_col = "Service_Code", description_col = NULL, unit_cost_col = NULL, cost_calculated_in = "attendance")) expect_error(costing_AandE_admission(ind_part_data = ind_part_data, code_ae = "code", descrip_ae = NULL, number_use_ae = "number_use", type_admit_ae = "type_admit", unit_cost_data = unit_cost_data, code_col = "Currency_Code", type_admit_col = "Service_Code", description_col = NULL, unit_cost_col = "National_Average_Unit_Cost", cost_calculated_in = "ss")) result <- costing_AandE_admission(ind_part_data = ind_part_data, code_ae = NULL, descrip_ae = "desc", number_use_ae = "number_use", type_admit_ae = "type_admit", unit_cost_data = unit_cost_data, code_col = "Currency_Code", type_admit_col = "Service_Code", description_col = "Currency_Description", unit_cost_col = "National_Average_Unit_Cost", cost_calculated_in = "attendance") expect_equal(result$totcost_AE_Admission[1], 754, tol = 1e-2) expect_error(costing_AandE_admission(ind_part_data = ind_part_data, code_ae = NULL, descrip_ae = "dd", number_use_ae = "number_use", type_admit_ae = "type_admit", unit_cost_data = unit_cost_data, code_col = "Currency_Code", type_admit_col = "Service_Code", description_col = "Currency_Description", unit_cost_col = "National_Average_Unit_Cost", cost_calculated_in = "attendance")) expect_error(costing_AandE_admission(ind_part_data = ind_part_data, code_ae = "dd", descrip_ae = NULL, number_use_ae = NULL, type_admit_ae = "type_admit", unit_cost_data = unit_cost_data, code_col = "Currency_Code", type_admit_col = "Service_Code", description_col = NULL, unit_cost_col = "National_Average_Unit_Cost", cost_calculated_in = "attendance")) datafile <- system.file("extdata", "resource_use_ae_ip_notypecol.csv", package = "packDAMipd") ind_part_data <- packDAMipd::load_trial_data(datafile) expect_error(costing_AandE_admission(ind_part_data = ind_part_data, code_ae = "code", descrip_ae = NULL, number_use_ae = "number_use", type_admit_ae = "type_admit", unit_cost_data = unit_cost_data, code_col = "Currency_Code", type_admit_col = "Service_Code", description_col = NULL, unit_cost_col = "National_Average_Unit_Cost", cost_calculated_in = "attendance")) datafile <- system.file("extdata", "resource_use_ae_ip_nonumusecol.csv", package = "packDAMipd") ind_part_data <- packDAMipd::load_trial_data(datafile) expect_error(costing_AandE_admission(ind_part_data = ind_part_data, code_ae = "code", descrip_ae = NULL, number_use_ae = "number_use", type_admit_ae = "type_admit", unit_cost_data = unit_cost_data, code_col = "Currency_Code", type_admit_col = "Service_Code", description_col = NULL, unit_cost_col = "National_Average_Unit_Cost", cost_calculated_in = "attendance")) costs_file <- system.file("extdata", "NHS_costs_2019_AandE_nocode.csv", package = "packDAMipd") datafile <- system.file("extdata", "resource_use_ae_ip.csv", package = "packDAMipd") ind_part_data <- packDAMipd::load_trial_data(datafile) unit_cost_data <- packDAMipd::load_trial_data(costs_file) expect_error(costing_AandE_admission(ind_part_data = ind_part_data, code_ae = "code", descrip_ae = NULL, number_use_ae = "number_use", type_admit_ae = "type_admit", unit_cost_data = unit_cost_data, code_col = "Currency_Code", type_admit_col = "Service_Code", description_col = NULL, unit_cost_col = "National_Average_Unit_Cost", cost_calculated_in = "attendance")) costs_file <- system.file("extdata", "National_schedule_of_NHS_costs_2019_AandE.csv", package = "packDAMipd") unit_cost_data <- packDAMipd::load_trial_data(costs_file) datafile <- system.file("extdata", "resource_use_ae_ip_lengthtypecodediff.csv", package = "packDAMipd") ind_part_data <- packDAMipd::load_trial_data(datafile) expect_error(costing_AandE_admission(ind_part_data = ind_part_data, code_ae = "code", descrip_ae = NULL, number_use_ae = "number_use", type_admit_ae = "type_admit", unit_cost_data = unit_cost_data, code_col = "Currency_Code", type_admit_col = "Service_Code", description_col = NULL, unit_cost_col = "National_Average_Unit_Cost", cost_calculated_in = "attendance")) expect_error(costing_AandE_admission(ind_part_data = ind_part_data, code_ae = NULL, descrip_ae = "desc", number_use_ae = "number_use", type_admit_ae = "type_admit", unit_cost_data = unit_cost_data, code_col = "Currency_Code", type_admit_col = "Service_Code", description_col = "Currency_Description", unit_cost_col = "National_Average_Unit_Cost", cost_calculated_in = "attendance")) datafile <- system.file("extdata", "resource_use_ae_ip_extranumuse.csv", package = "packDAMipd") ind_part_data <- packDAMipd::load_trial_data(datafile) expect_error(costing_AandE_admission(ind_part_data = ind_part_data, code_ae = "code", descrip_ae = NULL, number_use_ae = "number_use", type_admit_ae = "type_admit", unit_cost_data = unit_cost_data, code_col = "Currency_Code", type_admit_col = "Service_Code", description_col = NULL, unit_cost_col = "National_Average_Unit_Cost", cost_calculated_in = "attendance")) expect_error(costing_AandE_admission(ind_part_data = ind_part_data, code_ae = NULL, descrip_ae = "desc", number_use_ae = "number_use", type_admit_ae = "type_admit", unit_cost_data = unit_cost_data, code_col = "Currency_Code", type_admit_col = "Service_Code", description_col = "Currency_Description", unit_cost_col = "National_Average_Unit_Cost", cost_calculated_in = "attendance")) datafile <- system.file("extdata", "resource_use_ae_ip_lessnumuse.csv", package = "packDAMipd") ind_part_data <- packDAMipd::load_trial_data(datafile) res <- costing_AandE_admission(ind_part_data = ind_part_data, code_ae = "code", descrip_ae = NULL, number_use_ae = "number_use", type_admit_ae = "type_admit", unit_cost_data = unit_cost_data, code_col = "Currency_Code", type_admit_col = "Service_Code", description_col = NULL, unit_cost_col = "National_Average_Unit_Cost", cost_calculated_in = "attendance") expect_equal(res$totcost_AE_Admission[1], 671, tol = 1e-2) res <- costing_AandE_admission(ind_part_data = ind_part_data, code_ae = NULL, descrip_ae = "desc", number_use_ae = "number_use", type_admit_ae = "type_admit", unit_cost_data = unit_cost_data, code_col = "Currency_Code", type_admit_col = "Service_Code", description_col = "Currency_Description", unit_cost_col = "National_Average_Unit_Cost", cost_calculated_in = "attendance") expect_equal(res$totcost_AE_Admission[1], 754, tol = 1e-2) datafile <- system.file("extdata", "resource_use_ae_ip.csv", package = "packDAMipd") ind_part_data <- packDAMipd::load_trial_data(datafile) costs_file <- system.file("extdata", "National_schedule_of_NHS_costs_2019_AandE.csv", package = "packDAMipd") unit_cost_data <- packDAMipd::load_trial_data(costs_file) res <- costing_AandE_admission(ind_part_data = ind_part_data, code_ae = "code", descrip_ae = NULL, number_use_ae = NULL, type_admit_ae = "type_admit", unit_cost_data = unit_cost_data, code_col = "Currency_Code", type_admit_col = "Service_Code", description_col = "Currency_Description", unit_cost_col = "National_Average_Unit_Cost", cost_calculated_in = "attendance") expect_equal(res$totcost_AE_Admission[1], 671, tol = 1e-2) res <- costing_AandE_admission(ind_part_data = ind_part_data, code_ae = NULL, descrip_ae = "desc", number_use_ae = NULL, type_admit_ae = "type_admit", unit_cost_data = unit_cost_data, code_col = "Currency_Code", type_admit_col = "Service_Code", description_col = "Currency_Description", unit_cost_col = "National_Average_Unit_Cost", cost_calculated_in = "attendance") expect_equal(res$totcost_AE_Admission[1], 754, tol = 1e-2) })
DSM_C <- function(train, test, iterations=100, percentage=10, alpha_0=0.1, seed=-1){ alg <- RKEEL::R6_DSM_C$new() alg$setParameters(train, test, iterations, percentage, alpha_0, seed) return (alg) } R6_DSM_C <- R6::R6Class("R6_DSM_C", inherit = ClassificationAlgorithm, public = list( iterations = 100, percentage = 10, alpha_0 = 0.1, seed = -1, setParameters = function(train, test, iterations=100, percentage=10, alpha_0=0.1, seed=-1){ super$setParameters(train, test) stopText <- "" if((hasMissingValues(train)) || (hasMissingValues(test))){ stopText <- paste0(stopText, "Dataset has missing values and the algorithm does not accept it.\n") } if(stopText != ""){ stop(stopText) } self$iterations <- iterations self$percentage <- percentage self$alpha_0 <- alpha_0 if(seed == -1) { self$seed <- sample(1:1000000, 1) } else { self$seed <- seed } } ), private = list( jarName = "IG-DSM.jar", algorithmName = "DSM-C", algorithmString = "DSM", getParametersText = function(){ text <- "" text <- paste0(text, "seed = ", self$seed, "\n") text <- paste0(text, "number_of_iterations = ", self$iterations, "\n") text <- paste0(text, "percentage_respect_training_size = ", self$percentage, "\n") text <- paste0(text, "alpha_0 = ", self$alpha_0, "\n") return(text) } ) )
tornado.glm <- function(model, type="PercentChange", alpha=0.10, dict=NA, ...) { extraArguments <- list(...) ret <- .create_plot_data(model = model, modeldata = model$data, type = type, alpha = alpha, dict = dict) return(structure(list(data = list(plotdat = ret$plotdat, pmeans = ret$pmeans, factordat = ret$factor_plotdat), type = "glm", family = stats::family(model)$family), class = "tornado_plot")) }
wages_spread <- wages %>% features(ln_wages, feat_spread) test_that("feat_spread returns the right names", { expect_equal(names(wages_spread), c("id", "var", "sd", "mad", "iqr")) }) test_that("feat_spread returns the right dimensions", { expect_equal(dim(wages_spread), c(888, 5)) }) library(dplyr) test_that("feat_spread returns all ids", { expect_equal(n_distinct(wages_spread$id), 888) })
.minCAGE <- function(..., centroids, yFinest, ySource, finestOnSource, sourceAreas, finestAreas, gL, gU, localCluster) { if (!is.null(x = localCluster)) { message("obtaining kmeans in parallel") result <- parallel::parSapply(cl = localCluster, X = 1L:ncol(x = yFinest), FUN = .allClusterKmeans, centroids = centroids, gL = gL, gU = gU, yFinest = yFinest, ySource = ySource, finestOnSource = finestOnSource, sourceAreas = sourceAreas, finestAreas = finestAreas) tst <- which(result <= {min(result)+1e-8}, arr.ind = TRUE) whichGibbs <- tst[1L,2L] whichG <- tst[1L,1L] minCAGE <- result[whichG, whichGibbs] } else { minCAGE <- Inf whichGibbs <- 0L whichG <- 0L message("kmeans for Gibbs sample:") for (i in 1L:ncol(x = yFinest)) { message(i, " ", appendLF = FALSE) if (i %% 10L == 0L) message("") result <- .allClusterKmeans(x = i, centroids = centroids, gL = gL, gU = gU, yFinest = yFinest, ySource = ySource, finestOnSource = finestOnSource, sourceAreas = sourceAreas, finestAreas = finestAreas) if (min(result) < minCAGE) { whichGibbs <- i whichG <- which.min(x = result) minCAGE <- result[whichG] } } message("") } if (is.infinite(x = minCAGE)) stop("infinite result in CAGE/DCAGE", call. = FALSE) Jmat <- cbind(centroids, yFinest[,whichGibbs]) IDX <- tryCatch(expr = stats::kmeans(x = Jmat, centers = {gL:gU}[whichG], iter.max = 10000L), error = function(e) { stop("unable to obtain clustering\n", e$message, call. = FALSE) }) cageForIndex <- .cage(yFinest = yFinest, ySource = ySource, finestOnSource = finestOnSource, idxit = IDX$cluster, sourceAreas = sourceAreas, finestAreas = finestAreas) return( list("minCAGE" = mean(x = cageForIndex), "CAGETrack" = cageForIndex, "cluster" = IDX) ) } .allClusterKmeans <- function(x, ..., yFinest, ySource, finestOnSource, sourceAreas, finestAreas, centroids, gL, gU) { Jmat <- cbind(centroids, yFinest[,x]) IDX <- tryCatch(expr = stats::kmeans(x = Jmat, centers = gU, iter.max = 10000L), error = function(e) { stop("unable to obtain clustering\n", e$message, call. = FALSE) }) cageForIndex <- .cage(yFinest = yFinest, ySource = ySource, finestOnSource = finestOnSource, sourceAreas = sourceAreas, finestAreas = finestAreas, idxit = IDX$cluster) cage <- mean(x = cageForIndex) i <- gU - 1L while (i >= gL) { IDX <- tryCatch(expr = stats::kmeans(x = Jmat, centers = IDX$centers[1L:i,1L:3L,drop=FALSE], iter.max = 10000L), error = function(e) { msg <- paste0("unable to obtain clustering\n", e$message) stop(msg) }) cageForIndex <- .cage(yFinest = yFinest, ySource = ySource, finestOnSource = finestOnSource, sourceAreas = sourceAreas, finestAreas = finestAreas, idxit = IDX$cluster) cage <- c(mean(x = cageForIndex), cage) i <- i - 1L } return( cage ) }
cat("Installing remotes\n") if (!requireNamespace("remotes", quietly = TRUE)) install.packages("remotes") cat("Installing outdated dependencies\n") remotes::install_deps(dependencies = TRUE)
nribin <- function (event=NULL, mdl.std=NULL, mdl.new=NULL, z.std=NULL, z.new=NULL, p.std=NULL, p.new=NULL, updown='category', cut=NULL, link='logit', niter=1000, alpha=0.05, msg=TRUE) { flag.mdl = !is.null(mdl.std) && !is.null(mdl.new) flag.prd = !is.null(z.std) && !is.null(z.new) flag.rsk = !is.null(p.std) && !is.null(p.new) if (flag.mdl) { if (is.null(event)) event = as.numeric(mdl.std$y) if (is.null(mdl.std$x) || is.null(mdl.new$x)) stop("\n\nmodel object does not contain predictors. pls set x=TRUE for model calculation.\n\n") z.std = mdl.std$x[,-1] z.new = mdl.new$x[,-1] link = mdl.std$family[[2]] mdl.std = glm(event ~ ., family=binomial(link), data=as.data.frame(cbind(event, z.std))) mdl.new = glm(event ~ ., family=binomial(link), data=as.data.frame(cbind(event, z.new))) } else if (flag.prd) { mdl.std = glm(event ~ ., family=binomial(link), data=as.data.frame(cbind(event, z.std))) mdl.new = glm(event ~ ., family=binomial(link), data=as.data.frame(cbind(event, z.new))) message("\nSTANDARD prediction model:") print(summary(mdl.std)$coef) message("\nNEW prediction model:") print(summary(mdl.new)$coef) } else if (!flag.mdl && !flag.prd && !flag.rsk) { stop("\n\neither one of 'event, z.std, z.new', 'event, p.std, p.new', and 'mdl.std, mdl.new' should be specified.\n\n") } if (is.null(cut)) stop("\n\n'cut' is empty") objs = list(mdl.std, mdl.new, z.std, z.new, p.std, p.new) wk = get.uppdwn.bin(event, objs, flag.mdl, flag.prd, flag.rsk, updown, cut, link, msg=msg) upp = wk[[1]] dwn = wk[[2]] ret = list(mdl.std=mdl.std, mdl.new=mdl.new, p.std=wk[[3]], p.new=wk[[4]], up=upp, down=dwn, rtab=wk[[5]], rtab.case=wk[[6]], rtab.ctrl=wk[[7]]) message("\nNRI estimation:") est = nribin.count.main(event, upp, dwn) message("Point estimates:") result = data.frame(est) names(result) = 'Estimate' row.names(result) = c('NRI','NRI+','NRI-','Pr(Up|Case)','Pr(Down|Case)','Pr(Down|Ctrl)','Pr(Up|Ctrl)') print(result) if (niter > 0) { message("\nNow in bootstrap..") ci = rep(NA, 14) N = length(event) samp = matrix(NA, niter, 7) colnames(samp) = c('NRI','NRI+','NRI-','Pr(Up|Case)','Pr(Down|Case)','Pr(Down|Ctrl)','Pr(Up|Ctrl)') for (b in 1:niter) { f = as.integer(runif(N, 0, N)) + 1 objs = list(mdl.std, mdl.new, z.std[f,], z.new[f,], p.std[f], p.new[f]) wk = get.uppdwn.bin(event[f], objs, flag.mdl, flag.prd, flag.rsk, updown, cut, link, msg=FALSE) upp = wk[[1]] dwn = wk[[2]] samp[b,] = nribin.count.main(event[f], upp, dwn) } ret = c(ret, list(bootstrapsample=samp)) ci = as.numeric(apply(samp, 2, quantile, c(alpha/2, 1-alpha/2), na.rm=TRUE, type=2)) se = as.numeric(apply(samp, 2, sd)) message("\nPoint & Interval estimates:") result = as.data.frame(cbind(est, se, matrix(ci, ncol=2, byrow=TRUE))) names(result) = c('Estimate', 'Std.Error', 'Lower', 'Upper') row.names(result) = c('NRI','NRI+','NRI-','Pr(Up|Case)','Pr(Down|Case)','Pr(Down|Ctrl)','Pr(Up|Ctrl)') print(result) } invisible(c(list(nri=result), ret)) }
ci.boot <- function(x, method = "all", sigma.t = NULL, conf = 0.95){ indices <- c("all", "norm", "basic", "perc", "BCa", "student") method <- match.arg(method, indices) mes <- NULL B <- x$dist se <- x$res[4] est <- x$res[1] alpha <- 1 - conf R <- length(B) uZ <- round((1 - (alpha/2))*R, 0) lZ <- round((alpha/2)*R, 0) sB <- sort(B) if(method == "all" | method == "norm") {z <- qnorm(1 - (alpha/2)) nciL <- est - z * se nciU <- est + z * se nci <- c(nciL, nciU) } else nci <- c(NA, NA) if(method == "all" | method == "basic") { bciL <- 2*est-sB[uZ] bciU <- 2*est-sB[lZ] bci <- c(bciL, bciU) } else bci <- c(NA, NA) if(method == "all" | method == "perc") { pciL <- sB[lZ] pciU <- sB[uZ] pci <- c(pciL, pciU) } else pci <- c(NA, NA) if(method == "all" | method == "BCa") { pv <- pseudo.v(x$data, statistic = x$statistic) jk <- mean(pv[,1]) a <- sum((jk - pv[,1])^3)/(6*((sum((jk - pv[,1])^2))^1.5)) p <- length(sB[sB > est])/R z0 <- qnorm(1 - p) zU <- (z0 - qnorm(alpha/2))/(1 - a*(z0 - qnorm(alpha/2)))+z0 zL <- (z0 + qnorm(alpha/2))/(1 - a*(z0 + qnorm(alpha/2)))+z0 pL <- pnorm(zL) pU <- pnorm(zU) uZ <- round(pL*R, 0) lZ <- round(pU*R, 0); uZ <- ifelse(uZ == 0, 1, uZ) bcciU <- sB[lZ] bcciL <- sB[uZ] bcci <- c(bcciL, bcciU) } else bcci <- c(NA, NA) if(method == "all" | method == "student") { if(is.null(sigma.t)){ mes <- "Bootstrap SEs req'd for studentized intervals" sci <- c(NA, NA) } else{ t <- (B - est)/sigma.t sciL <- est - qt(1-(alpha/2))*se sciU <- est + qt(1-(alpha/2))*se sci <- c(sciL, sciU) } } else sci = c(NA, NA) head <- paste(conf*100,"%", " Bootstrap confidence interval(s)", sep = "") ends <- c(paste(as.character(c((1 - conf)/2, 1 - ((1 - conf)/2)) * 100), "%", sep = "")) res <- matrix(nrow = 5, data=rbind(nci, bci, pci, bcci, sci), dimnames = list(c("Normal","Basic","Percentile", "BCa","Studentized"), ends)) out <- list(head = head, res = res, mes = mes, a = ifelse(method == "BCa"|method == "all", a, 0)) class(out) <- "ciboot" out } print.ciboot <- function(x, digits = max(3, getOption("digits")), ...) { cat("\n") cat(x$head, "\n\n") print(x$res, digits = digits, justify = "center") cat("\n") if(!is.null(x$mes))cat(x$mes,"\n") invisible(x) }
suppressMessages(library(LatticeKrig)) options(echo = FALSE) test.for.zero.flag <- 1 set.seed(122) lambda <- 0.1 x <- matrix(runif(20 * 2), 20, 2) nObs <- nrow(x) LKinfo <- LKrigSetup(x, NC = 4, nlevel = 1, alpha = 1, lambda = lambda, a.wght = 5, NC.buffer = 1) W <- LKrig.basis(x, LKinfo) W<- as.matrix(W) x0<- matrix(runif( 5* 2), 5, 2) W0 <- LKrig.basis(x0, LKinfo) W0<- as.matrix(W0) T.matrix <- do.call(LKinfo$fixedFunction, c(list(x = x, distance.type = LKinfo$distance.type), LKinfo$fixedFunctionArgs)) T0 <- do.call(LKinfo$fixedFunction, c(list(x = x0, distance.type = LKinfo$distance.type), LKinfo$fixedFunctionArgs)) A <- matrix(runif(nObs^2), nObs, nObs) X <- as.spam(A %*% W) U <- A %*% T.matrix c.true <- runif(LKinfo$latticeInfo$m) y <- X %*% c.true obj <- LKrig(x, y, LKinfo = LKinfo, X = X, U = U) rho<- obj$rho.MLE Q <- LKrig.precision(LKinfo) Sigma <- solve(Q) Mlambda <- solve(as.matrix(X %*% Sigma %*% t(X) + lambda * diag(1, nObs))) PMatrix <- solve(t(U) %*% Mlambda %*% U) %*% t(U) %*% Mlambda d.check <- PMatrix %*% y test.for.zero(d.check, obj$d.coef, tag = "checking d coef with inverse X") findCoef <- solve(t(X) %*% X + lambda * Q) %*% t(X) %*% (diag(1, nObs) - U %*% PMatrix) c.check <- findCoef %*% y test.for.zero(c.check, obj$c.coef, tag = "checking c coef with inverse X") AMatrix<- W0 %*% findCoef + T0 %*% PMatrix hold1<- AMatrix%*%y hold2<- predict( obj, xnew=x0) test.for.zero(hold1, hold2, tag = "checking A matrix coef with inverse X") rho<- obj$rho.MLE sigma<- obj$sigma.MLE Q <- LKrig.precision(LKinfo) S <- rho*solve(Q) temp0<- W0%*% (S)%*%t(W0) temp1<- W0%*% (S)%*% t( AMatrix%*%X) temp2<- AMatrix%*%( X%*%S%*%t(X) + diag( sigma^2 ,nObs) )%*%t(AMatrix) covTest<- temp0 - temp1 - t(temp1) + temp2 test1.se <- predictSE(obj, xnew = x0) test.for.zero( sqrt(diag( covTest)), test1.se, tag="SE inverse from first formulas" )
convert_factors <- function(x) { fcts <- vap_lgl(x, is.factor) if (sum_(fcts) == 0L) { return(x) } x[fcts] <- lapply(x[fcts], as.character) x } vap_lgl <- function(.x, .f) { vapply(.x, rlang::as_closure(.f), FUN.VALUE = logical(1), USE.NAMES = FALSE) } vap_dbl <- function(.x, .f) { vapply(.x, rlang::as_closure(.f), FUN.VALUE = numeric(1), USE.NAMES = FALSE) } vap_int <- function(.x, .f) { vapply(.x, rlang::as_closure(.f), FUN.VALUE = integer(1), USE.NAMES = FALSE) } vap_fct <- function(.x, .f) { vapply(.x, rlang::as_closure(.f), FUN.VALUE = factor(1), USE.NAMES = FALSE) } vap_chr <- function(.x, .f) { vapply(.x, rlang::as_closure(.f), FUN.VALUE = character(1), USE.NAMES = FALSE) } tw_or_usr <- function(x) { num_tweets <- sum(c("text", "source", "mentions_screen_name", "retweet_count", "favorite_count", "hashtags") %in% names(x), na.rm = TRUE) num_users <- sum(c("name", "followers_count", "favourites_count", "friends_count", "account_created_at", "description") %in% names(x), na.rm = TRUE) if (num_tweets > 3 && num_users > 3) { return("both") } if (num_tweets > num_users) { return("tweets") } if (num_users > num_tweets) { return("users") } } hourofweekday <- function(x) { h <- as.numeric(substr(x, 12, 13)) m <- as.numeric(substr(x, 15, 16)) / 60 hms <- round(timeofday(x), 0) wd <- format(x, "%a") wd <- as.integer(factor( wd, levels = c("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"))) * 24 (wd + hms) / 24 } timeofday <- function(x) { h <- as.numeric(substr(x, 12, 13)) m <- as.numeric(substr(x, 15, 16)) / 60 s <- as.numeric(substr(x, 18, 19)) / 360 h + m + s } count_mean <- function(x) { if (length(x) == 0) return(0) if (length(x) == 1L && is.na(x) || all(is.na(x))) return(0) x <- table(x) x <- as.integer(x) - 1L mean(x, na.rm = TRUE) } nchar_ <- function(x) { ifelse(is.na(x), 0, nchar(x)) } ndigit_ <- function(x) { ifelse(is.na(x), 0, nchar(gsub("\\D", "", x))) }
rlw <- function (dataMatrix, dv, ivlist){ ivlist <- unlist(ivlist) ilist<-match(ivlist,colnames(dataMatrix)) ilist<-na.omit(ilist) ilist<-colnames(dataMatrix)[ilist] dataMatrix<-na.omit(dataMatrix[,c(dv,ilist)]) k<-length(ivlist) ds<-matrix(ncol=k+1,nrow=nrow(dataMatrix)) ds[,1]<-dataMatrix[,dv] for (i in 1:k){ ds[,i+1]<-dataMatrix[,ivlist[i]] } colnames(ds)<-c(dv,ivlist) rxx<-cor(ds[,2:(k+1)]) rxy<-cor(ds[,2:(k+1)],ds[,1]) evm<-eigen(rxx) ev<-evm$values evec<-evm$vectors d<-diag(ev) delta<-sqrt(d) lambda<-evec %*% delta %*% t(evec) lambdasq<-lambda^2 beta<-solve(lambda) %*% rxy rawrl<-lambdasq %*% beta^2 return(rawrl) }
prepanel.rootogram <- function(x, y = table(x), dfun = NULL, transformation = sqrt, hang = TRUE, probability = TRUE, ...) { stopifnot(is.function(dfun)) if (probability) y <- y / sum(y) yy <- transformation(y) xx <- sort(unique(x)) dotArgs <- list(...) dfunArgs <- names(formals(dfun)) if (!("..." %in% dfunArgs)) dotArgs <- dotArgs[dfunArgs[-1]] dd <- transformation(do.call(dfun, c(list(xx), dotArgs))) list(xlim = range(xx), ylim = if (hang) range(dd, dd-yy, 0) else range(dd, yy, 0), dx = diff(xx), dy = diff(dd)) } panel.rootogram <- function(x, y = table(x), dfun = NULL, col = plot.line$col, lty = plot.line$lty, lwd = plot.line$lwd, alpha = plot.line$alpha, transformation = sqrt, hang = TRUE, probability = TRUE, type = "l", pch = 16, ...) { plot.line <- trellis.par.get("plot.line") ref.line <- trellis.par.get("reference.line") stopifnot(is.function(dfun)) if (probability) y <- y / sum(y) yy <- transformation(y) xx <- sort(unique(x)) dotArgs <- list(...) dfunArgs <- names(formals(dfun)) if (!("..." %in% dfunArgs)) dotArgs <- dotArgs[dfunArgs[-1]] dd <- transformation(do.call(dfun, c(list(xx), dotArgs))) panel.abline(h = 0, col = ref.line$col, lty = ref.line$lty, lwd = ref.line$lwd, alpha = ref.line$alpha) panel.segments(xx, if (hang) dd else 0, xx, if (hang) (dd - yy) else yy, col = col, lty = lty, lwd = lwd, alpha = alpha, ...) if ("l" %in% type) panel.lines(xx, dd) if ("p" %in% type) panel.points(xx, dd, pch = pch) } rootogram <- function(x, ...) UseMethod("rootogram") rootogram.formula <- function(x, data = parent.frame(), ylab = expression(sqrt(P(X == x))), prepanel = prepanel.rootogram, panel = panel.rootogram, ..., probability = TRUE) { if (!probability && missing(ylab)) ylab <- NULL if (length(x) == 2) foo <- densityplot(x, data, prepanel = prepanel, panel = panel, ylab = ylab, ..., probability = probability) else foo <- xyplot(x, data, prepanel = prepanel, panel = panel, ylab = ylab, ..., probability = probability) foo$call <- sys.call(sys.parent()); foo$call[[1]] <- quote(rootogram) foo }
link_records <- function(attribute, blocking_attribute = NULL, cmp_func = diyar::exact_match, attr_threshold = 1, probabilistic = TRUE, m_probability = .95, u_probability = NULL, score_threshold = 1, repeats_allowed = FALSE, permutations_allowed = FALSE, data_source = NULL, ignore_same_source = TRUE, display = "none"){ tm_a <- Sys.time() err <- err_links_wf_probablistic_0(attribute = attribute, blocking_attribute = blocking_attribute, cmp_func = cmp_func, attr_threshold = attr_threshold, probabilistic = probabilistic, m_probability = m_probability, score_threshold = score_threshold, id_1 = NULL, id_2 = NULL, u_probability = u_probability) if(!isFALSE(err)) stop(err, call. = FALSE) if(!display %in% c("none")){ rp_data <- di_report(tm_a, "Data validation", current_tot = length(attrs(attribute)[[1]])) report <- list(rp_data) if(display %in% c("stats_with_report", "stats")){ cat(paste0(rp_data[[1]], ": ", fmt(rp_data[[2]], "difftime"), "\n")) } } tm_ia <- Sys.time() if(class(attribute) %in% c("list", "data.frame")){ attribute <- attrs(.obj = attribute) }else if(class(attribute) %in% c("matrix")){ attribute <- attrs(.obj = as.data.frame(attribute)) }else if(class(attribute) %in% c("d_attribute")){ }else{ attribute <- attrs(attribute) } if(is.null(names(attribute))){ names(attribute) <- paste0("var_", seq_len(length(attribute))) } attr_nm <- names(attribute) rd_n <- length(attribute[[1]]) lgk <- unlist(lapply(attribute, function(x){ if(is.number_line(x)){ length(unique(x)) == 1 }else{ length(x[!duplicated(x)]) == 1 } }), use.names = FALSE) if(any(lgk)){ warning(paste0("Attributes with identicial values in every record are ignored:\n", paste0("i - `", attr_nm[lgk], "` was ignored!", collapse = "\n")), call. = FALSE) } if(all(lgk)){ stop("Linkage stopped because all attributes were ignored.", call. = FALSE) } if(!is.null(blocking_attribute)){ if(all(is.na(blocking_attribute))){ stop("Linkage stopped because all records have a missing (`NA`) `strata`.", call. = FALSE) } } attribute <- attribute[!lgk] attr_nm <- names(attribute) probs_repo <- prep_prob_link_args(attribute = attribute, m_probability = m_probability, u_probability = u_probability) thresh_repo <- prep_cmps_thresh(attr_nm = attr_nm, cmp_func = cmp_func, attr_threshold = attr_threshold, score_threshold = score_threshold) probs_repo$m_probability$x <- lapply(probs_repo$m_probability$x, mk_lazy_opt) probs_repo$u_probability$x <- lapply(probs_repo$u_probability$x, mk_lazy_opt) if(!is.null(blocking_attribute)){ blocking_attribute <- as.vector(blocking_attribute) strata <- match(blocking_attribute, blocking_attribute[!duplicated(blocking_attribute)]) strata[is.na(blocking_attribute)] <- ((seq_len(rd_n) + max(strata))[is.na(blocking_attribute)]) }else{ strata <- blocking_attribute } if(isTRUE(ignore_same_source)){ r_pairs <- make_pairs_wf_source(seq_len(rd_n), strata = strata, repeats_allowed = repeats_allowed, permutations_allowed = permutations_allowed, data_source = data_source) }else{ r_pairs <- make_pairs(seq_len(rd_n), strata = strata, repeats_allowed = repeats_allowed, permutations_allowed = permutations_allowed) } if(length(r_pairs$x_pos) == 0){ pid_weights <- data.frame(sn_x = integer(0), sn_y = integer(0)) if(!is.null(data_source)){ pid_weights$source_x <- pid_weights$source_y <- integer(0) } wts <- lapply(c(attr_nm, "weight"), function(x) numeric(0)) names(wts) <- paste0("cmp.", c(attr_nm, "weight")) pid_weights <- c(pid_weights, wts) if(isTRUE(probabilistic)){ wts <- lapply(c(attr_nm, "weight"), function(x) numeric(0)) names(wts) <- paste0("prb.", c(attr_nm, "weight")) pid_weights <- c(pid_weights, wts) } pid_weights$record.match <- logical(0) pid_weights <- as.data.frame(pid_weights) pids <- list(pid = as.pid(seq_len(rd_n)), pid_weights = pid_weights) rm(list = ls()[ls() != "pids"]) return(pids) } x <- lapply(attribute, function(k) k[r_pairs$x_pos]) y <- lapply(attribute, function(k) k[r_pairs$y_pos]) rp_n <- length(x[[1]]) if(!display %in% c("none")){ rp_data <- di_report(tm_a, "Pairs created", current_tot = length(x[[1]])) report <- c(report, list(rp_data)) if(display %in% c("stats_with_report", "stats")){ cat(paste0(rp_data[[1]], ": ", fmt(rp_data[[2]], "difftime"), "\n")) } } tm_ia <- Sys.time() pid_weights <- prob_link(x = c(x, lapply(probs_repo$m_probability$x, function(k) k[r_pairs$x_pos]), lapply(probs_repo$u_probability$x, function(k) k[r_pairs$x_pos])), y = c(y, lapply(probs_repo$m_probability$x, function(k) k[r_pairs$y_pos]), lapply(probs_repo$u_probability$x, function(k) k[r_pairs$y_pos])), attr_threshold = thresh_repo$attr_threshold, score_threshold = thresh_repo$score_threshold, return_weights = TRUE, cmp_func = thresh_repo$cmp_func, probabilistic = probabilistic) if(!display %in% c("none")){ rp_data <- di_report(tm_a, "Weights calculated", current_tot = length(x[[1]])) report <- c(report, list(rp_data)) if(display %in% c("stats_with_report", "stats")){ cat(paste0(rp_data[[1]], ": ", fmt(rp_data[[2]], "difftime"), "\n")) } } if(!is.null(data_source)){ pid_weights <- cbind(data.frame(r_pairs$x_pos, r_pairs$y_pos, data_source[r_pairs$x_pos], data_source[r_pairs$y_pos], stringsAsFactors = FALSE), pid_weights) colnames(pid_weights)[1:4] <- c("sn_x","sn_y", "source_x", "source_y") }else{ pid_weights <- cbind(data.frame(r_pairs$x_pos, r_pairs$y_pos, stringsAsFactors = FALSE), pid_weights) colnames(pid_weights)[1:2] <- c("sn_x","sn_y") } pids <- pid_weights[c("sn_x", "sn_y", "record.match")] pids <- pids[pids$record.match,] pids <- make_ids(pids$sn_x, pids$sn_y, rd_n) tots <- rle(sort(pids$group_id)) pids <- methods::new("pid", .Data = pids$group_id, sn = pids$sn, pid_cri = as.integer(pids$linked), link_id = pids$link_id, pid_total = tots$lengths[match(pids$group_id, tots$values)], iteration = rep(1L, length(pids$sn))) if(!is.null(data_source)){ rst <- check_links([email protected], data_source, list(l = "ANY")) pids@pid_dataset <- encode(rst$ds) } if(!display %in% c("none")){ rp_data <- di_report(tm_a, "`pid` created", current_tot = length(x[[1]]), current_tagged = nrow(pid_weights[pid_weights$record.match,])) report <- c(report, list(rp_data)) if(display %in% c("stats_with_report", "stats")){ cat(paste0(rp_data[[1]], ": ", fmt(rp_data[[2]], "difftime"), "\n")) } } pids <- list(pid = pids, pid_weights = pid_weights) if(display %in% c("none_with_report", "progress_with_report", "stats_with_report")){ pids$report <- as.list(do.call("rbind", lapply(report, as.data.frame))) class(pids$report) <- "d_report" } rm(list = ls()[ls() != "pids"]) return(pids) } links_wf_probabilistic <- function(attribute, blocking_attribute = NULL, cmp_func = diyar::exact_match, attr_threshold = 1, probabilistic = TRUE, m_probability = .95, u_probability = NULL, score_threshold = 1, id_1 = NULL, id_2 = NULL, ... ){ err <- err_links_wf_probablistic_0(attribute = attribute, blocking_attribute = blocking_attribute, cmp_func = cmp_func, attr_threshold = attr_threshold, probabilistic = probabilistic, m_probability = m_probability, u_probability = u_probability, score_threshold = score_threshold, id_1 = id_1, id_2 = id_2) if(!isFALSE(err)) stop(err, call. = FALSE) if(class(attribute) %in% c("list", "data.frame")){ attribute <- attrs(.obj = attribute) }else if(class(attribute) %in% c("matrix")){ attribute <- attrs(.obj = as.data.frame(attribute)) }else if(class(attribute) %in% c("d_attribute")){ }else{ attribute <- attrs(attribute) } if(is.null(names(attribute))){ names(attribute) <- paste0("var_", seq_len(length(attribute))) } attr_nm <- names(attribute) rd_n <- length(attribute[[1]]) lgk <- unlist(lapply(attribute, function(x){ if(is.number_line(x)){ length(unique(x)) == 1 }else{ length(x[!duplicated(x)]) == 1 } }), use.names = FALSE) if(any(lgk)){ warning(paste0("Attributes with identicial values in every record are ignored:\n", paste0("i - `", attr_nm[lgk], "` was ignored!", collapse = "\n")), call. = FALSE) } if(all(lgk)){ stop("Linkage stopped since all attributes were ignored.", call. = FALSE) } attribute <- attribute[!lgk] attr_nm <- names(attribute) probs_repo <- prep_prob_link_args(attribute = attribute, m_probability = m_probability, u_probability = u_probability) thresh_repo <- prep_cmps_thresh(attr_nm = attr_nm, cmp_func = cmp_func, attr_threshold = attr_threshold, score_threshold = score_threshold) probs_repo$m_probability$x <- lapply(probs_repo$m_probability$x, function(x) if(length(x) == 1) rep(x, rd_n) else x) probs_repo$u_probability$x <- lapply(probs_repo$u_probability$x, function(x) if(length(x) == 1) rep(x, rd_n) else x) probs_repo$m_probability$x <- lapply(probs_repo$m_probability$x, mk_lazy_opt) probs_repo$u_probability$x <- lapply(probs_repo$u_probability$x, mk_lazy_opt) prob_link_wf <- function(x, y){ prob_link(x, y, attr_threshold = thresh_repo$attr_threshold, score_threshold = thresh_repo$score_threshold, return_weights = FALSE, probabilistic = probabilistic, cmp_func = thresh_repo$cmp_func) } same_rec_func <- function(x, y){ attr_n <- length(x) lgk <- sapply(seq_len(attr_n), function(i){ lgk <- x[[i]] == y[[i]] | (is.na(x[[i]]) & is.na(y[[i]])) lgk[is.na(lgk)] <- FALSE lgk }) if(is.null(nrow(lgk))){ sum(lgk) == attr_n }else{ rowSums(lgk) == attr_n } } if(!is.null(id_1) & !is.null(id_2)){ pids <- NULL x <- c(attribute, probs_repo$m_probability, probs_repo$u_probability) y <- lapply(x, function(k) k[id_2]) x <- lapply(x, function(k) k[id_1]) thresh_lgk <- integer() }else{ pids <- links(criteria = "place_holder", strata = blocking_attribute, sub_criteria = list("cr1" = sub_criteria(attrs(.obj = c(attribute, probs_repo$m_probability$x, probs_repo$u_probability$x)), match_funcs = prob_link_wf, equal_funcs = same_rec_func)), ...) x <- c(attribute, probs_repo$m_probability$x, probs_repo$u_probability$x) y <- lapply(x, function(k) k[match(pids@link_id, pids@sn)]) id_1 <- pids@sn id_2 <- pids@link_id thresh_lgk <- which(pids@pid_cri %in% -1:0) } pid_weights <- prob_link(x, y, attr_threshold = thresh_repo$attr_threshold, score_threshold = thresh_repo$score_threshold, return_weights = TRUE, cmp_func = thresh_repo$cmp_func, probabilistic = probabilistic) pid_weights[thresh_lgk,] <- NA pid_weights <- cbind(data.frame(id_1, id_2, stringsAsFactors = FALSE), pid_weights) colnames(pid_weights)[1:2] <- c("sn_x","sn_y") pids <- list(pid = pids, pid_weights = pid_weights) rm(list = ls()[ls() != "pids"]) return(pids) } prob_score_range <- function(attribute, m_probability = .95, u_probability = NULL){ if(class(attribute) %in% c("list", "data.frame")){ attribute <- attrs(.obj = attribute) }else if(class(attribute) %in% c("matrix")){ attribute <- attrs(.obj = as.data.frame(attribute)) }else if(class(attribute) %in% c("d_attribute")){ }else{ attribute <- attrs(attribute) } if(is.null(names(attribute))){ names(attribute) <- paste0("var_", seq_len(length(attribute))) } if(is.null(u_probability)){ u_probability <- lapply(attribute, function(x){ x_cd <- match(x, x[!duplicated(x)]) x_cd[is.na(x)] <- NA_real_ r <- rle(x_cd[order(x_cd)]) n <- r$lengths[match(x_cd, r$values)] p <- n/length(x_cd) p[is.na(x_cd)] <- 0 p }) } lgk <- unlist(lapply(attribute, function(x) length(x[!duplicated(x)]) == 1), use.names = FALSE) if(any(lgk)){ warning(paste0("Attributes with identicial values in every record are ignored:\n", paste0("i - `", names(attribute)[lgk], "` was ignored!", collapse = "\n")), call. = FALSE) attribute <- attribute[!lgk] u_probability <- u_probability[!lgk] } if(class(m_probability) != "list"){ m_probability <- list(m_probability) } if(length(m_probability) != 1 & any(lgk[lgk])){ m_probability <- m_probability[!lgk] } if(length(m_probability) == 1 & length(attribute) > 1){ m_probability <- rep(m_probability, length(attribute)) } max_thresh <- sapply(seq_len(length(u_probability)), function(i){ curr_uprob <- u_probability[[i]] curr_uprob[curr_uprob == 0] <- 1 curr_mprob <- m_probability[[i]] log2((curr_mprob ^ 2) / (curr_uprob ^ 2)) }) if(is.null(nrow(max_thresh))){ max_thresh <- max(sum(max_thresh)) }else{ max_thresh <- max(rowSums(max_thresh)) } min_thresh <- sapply(seq_len(length(u_probability)), function(i){ curr_uprob <- u_probability[[i]] curr_mprob <- m_probability[[i]] log2((1 - (curr_mprob ^ 2))/(1 - (curr_uprob ^ 2))) }) if(is.null(nrow(min_thresh))){ min_thresh <- min(sum(min_thresh)) }else{ min_thresh <- min(rowSums(min_thresh)) } list(minimum_score = min_thresh, mid_scorce = (min_thresh + max_thresh)/2, maximum_score = max_thresh) }