code
stringlengths 1
13.8M
|
---|
test_that("test_nodes works", {
expect_equal(find_nodes(list("name"="Steve Jobs")), "SELECT body FROM nodes WHERE json_extract(body, '$.name') = \"Steve Jobs\"")
expect_equal(find_nodes(list("name"="Steve Jobs"), search_fn = "search_like"), "SELECT body FROM nodes WHERE json_extract(body, '$.name') LIKE \"Steve Jobs\"")
expect_equal(find_nodes(list("name"="Steve"), where_fn = "search_starts_with"), "SELECT body FROM nodes WHERE json_extract(body, '$.name') = \"%Steve\"")
expect_equal(find_nodes(list("name"="Steve"), where_fn = "search_contains"), "SELECT body FROM nodes WHERE json_extract(body, '$.name') = \"%Steve%\"")
}) |
pylint <- function(modules){
find_pylint <- Sys.which("pylint") != ""
if(!find_pylint){
stop(
"This package requires 'pylint'. ",
"Either it is not installed or it is not found."
)
}
HTMLreport <- pylintReport(modules)
x = list(
html = URLencode(HTMLreport)
)
createWidget(
name = "pylintR",
x,
width = NULL,
height = NULL,
package = "pylintR",
elementId = NULL
)
}
pylintROutput <- function(outputId, width = "100%", height = "400px"){
shinyWidgetOutput(outputId, "pylintR", width, height, package = "pylintR")
}
renderPylintR <- function(expr, env = parent.frame(), quoted = FALSE){
if(!quoted){ expr <- substitute(expr) }
shinyRenderWidget(expr, pylintROutput, env, quoted = TRUE)
} |
msplinedesign <- function(x, k, m=4) {
bspline<- splineDesign(k, x, ord=m, outer.ok = FALSE)
for (i in 1:(length(k)-m)){
bspline[,i] <- bspline[,i]*m/(k[i+m]-k[i])
}
return(bspline)
} |
test_that("circacompare() fits a good model to generated data", {
set.seed(40)
tau_in <- 15
phi1_in <- 12
df <- make_data(phi1=(phi1_in/24)*(2*pi), noise_sd = 2)
out <- circacompare(x = df, col_time = "time", col_group = "group", col_outcome = "measure")
df$time <- df$time/24*tau_in
out_tau_adjusted <- circacompare(x = df, col_time = "time", col_group = "group", col_outcome = "measure",
period=NA,
control = list(main_params=c("k", "alpha", "tau", "phi"),
grouped_params=c("k", "alpha", "phi")))
both_groups_rhythmic <- as.logical(out$summary[1, 'value']<0.05 & out$summary[2, 'value']<0.05)
phase_shift_estimated_within_2hours <- abs(abs(out$summary[13,'value']) - phi1_in) < 2
expect_true(both_groups_rhythmic)
expect_true(phase_shift_estimated_within_2hours)
fit_tau <- extract_model_coefs(out_tau_adjusted$fit)['tau', ]
tau_est <- fit_tau['estimate']
tau_ll <- tau_est - 1.96*fit_tau['std_error']
tau_ul <- tau_est + 1.96*fit_tau['std_error']
expect_true(tau_in < tau_ul & tau_in > tau_ll)
df <- make_data(k1=0, alpha1=10, phi1=0, seed=42, hours=96, noise_sd=2)
df$time <- df$time/24*tau_in
alpha_decay1_in <- 0.05
df$measure[df$group=="g2"] <- df$measure[df$group=="g2"]*exp(-alpha_decay1_in*df$time[df$group=="g2"])
out_alpha_decay <-
circacompare(x=df, "time", "group", "measure", period=NA,
control=list(
main_params=c("k", "alpha", "phi", "tau"),
decay_params=c("alpha"),
grouped_params=c("alpha", "alpha_decay")
))
fit_alpha_decay1 <- extract_model_coefs(out_alpha_decay$fit)['alpha_decay1', ]
alpha_decay1_est <- fit_alpha_decay1['estimate']
alpha_decay1_ll <- alpha_decay1_est - 1.96*fit_alpha_decay1['std_error']
alpha_decay1_ul <- alpha_decay1_est + 1.96*fit_alpha_decay1['std_error']
expect_true(alpha_decay1_in < alpha_decay1_ul & alpha_decay1_in > alpha_decay1_ll)
}) |
plsda <- function (x, ...)
UseMethod("plsda")
predict.plsda <- function(object, newdata = NULL, ncomp = NULL, type = "class", ...){
requireNamespaceQuietStop('pls')
if(is.null(ncomp))
if(!is.null(object$ncomp)) ncomp <- object$ncomp else stop("specify ncomp")
if(!is.null(newdata)) {
if(!is.matrix(newdata)) newdata <- as.matrix(newdata)
}
class(object) <- "mvr"
tmpPred <- predict(object, newdata = newdata)[,,ncomp,drop = FALSE]
if(type == "raw") return(tmpPred)
if(is.null(object$probModel)) {
switch(type,
class = {
if(length(dim(tmpPred)) < 3) {
out <- object$obsLevels[apply(tmpPred, 1, which.max)]
out <- factor(out, levels = object$obsLevels)
} else {
tmpOut <- matrix("", nrow = dim(tmpPred)[1], ncol = dim(tmpPred)[3])
for(i in 1:dim(tmpPred)[3]) {
tmpOut[,i] <- object$obsLevels[apply(tmpPred[,,i,drop=FALSE], 1, which.max)]
}
out <- as.data.frame(tmpOut, stringsAsFactors = TRUE)
out <- as.data.frame(
lapply(out, function(x, y) factor(x, levels = y),
y = object$obsLevels),
stringsAsFactors = TRUE
)
names(out) <- paste("ncomp", ncomp, sep = "")
rownames(out) <- rownames(newdata)
if(length(ncomp) == 1) out <- out[,1]
}
},
prob = {
if(length(dim(tmpPred)) < 3) {
out <- t(apply(tmpPred, 1, function(data) exp(data)/sum(exp(data))))
} else {
out <- tmpPred * NA
for(i in 1:dim(tmpPred)[3]) {
out[,,i] <- t(apply(tmpPred[,,i,drop=FALSE], 1, function(data) exp(data)/sum(exp(data))))
}
}
})
} else {
requireNamespaceQuietStop("klaR")
tmp <- vector(mode = "list", length = length(ncomp))
for(i in seq(along = ncomp)) {
tmp[[i]] <- predict(object$probModel[[ ncomp[i] ]],
as.data.frame(tmpPred[,-length(object$obsLevels),i]), stringsAsFactors = TRUE)
}
if(type == "class") {
out <- t(do.call("rbind",
lapply(tmp, function(x) as.character(x$class))))
rownames(out) <- names(tmp[[1]]$class)
colnames(out) <- paste("ncomp", ncomp, sep = "")
out <- as.data.frame(out, stringsAsFactors = TRUE)
out <- as.data.frame(
lapply(out, function(x, y) factor(x, levels = y),
y = object$obsLevels),
stringsAsFactors = TRUE
)
if(length(ncomp) == 1) out <- out[,1]
} else {
out <- array(dim = c(dim(tmp[[1]]$posterior), length(ncomp)),
dimnames = list(
rownames(tmp[[1]]$posterior),
colnames(tmp[[1]]$posterior),
paste("ncomp", ncomp, sep = "")))
for(i in seq(along = ncomp)) out[,,i] <- tmp[[i]]$posterior
}
}
out
}
plsda.default <- function(x, y, ncomp = 2, probMethod = "softmax", prior = NULL, ...) {
requireNamespaceQuietStop('pls')
funcCall <- match.call(expand.dots = TRUE)
if(!is.matrix(x)) x <- as.matrix(x)
if(length(ncomp) > 1) {
ncomp <- max(ncomp)
warning(paste(
"A value single ncomp must be specified.",
"max(ncomp) was used.",
"Predictions can be obtained for values <= ncomp"))
}
if(probMethod == "softmax") {
if(!is.null(prior)) warning("Priors are ignored unless probMethod = \"Bayes\"")
}
if(is.factor(y)) {
obsLevels <- levels(y)
oldY <- y
y <- class2ind(y)
} else {
if(is.matrix(y)) {
test <- apply(y, 1, sum)
if(any(test != 1)) stop("the rows of y must be 0/1 and sum to 1")
obsLevels <- colnames(y)
if(is.null(obsLevels)) stop("the y matrix must have column names")
oldY <- obsLevels[apply(y, 1, which.max)]
} else stop("y must be a matrix or a factor")
}
if(!is.matrix(x)) x <- as.matrix(x)
tmpData <- data.frame(n = paste("row", 1:nrow(y), sep = ""))
tmpData$y <- y
tmpData$x <- x
out <- pls::plsr(y ~ x, data = tmpData, ncomp = ncomp, ...)
out$obsLevels <- obsLevels
out$probMethod <- probMethod
if(probMethod == "Bayes") {
requireNamespaceQuietStop('klaR')
makeModels <- function(x, y, pri) {
probModel <- klaR::NaiveBayes(x, y, prior = pri, usekernel = TRUE)
probModel$train <- predict(probModel)$posterior
probModel$x <- NULL
probModel
}
cls <- class(out)
class(out) <- "mvr"
train <- predict(out, as.matrix(tmpData$x), ncomp = 1:ncomp)
train <- train[, -length(obsLevels),, drop = FALSE]
out$probModel <- apply(train, 3, makeModels, y = oldY, pri = prior)
} else out$probModel <- NULL
class(out) <- c("plsda", class(out))
out
}
print.plsda <- function (x, ...) {
switch(x$method,
kernelpls = {
regr = "Partial least squares"
alg = "kernel"
}, simpls = {
regr = "Partial least squares"
alg = "simpls"
}, oscorespls = {
regr = "Partial least squares"
alg = "orthogonal scores"
}, svdpc = {
regr = "Principal component"
alg = "singular value decomposition"
}, stop("Unknown fit method."))
cat(regr, "classification, fitted with the", alg, "algorithm.")
if (!is.null(x$validation))
cat("\nCross-validated using", length(x$validation$segments),
attr(x$validation$segments, "type"), "segments.")
switch(x$probMethod,
softmax = cat("\nThe softmax function was used to compute class probabilities.\n"),
Bayes = cat("\nBayes rule was used to compute class probabilities.\n"))
invisible(x)
} |
reward <- function(x, belief = NULL, epoch = 1) {
.solved_POMDP(x)
if (is.null(belief))
belief <- x$model$start
belief <- .translate_belief(belief, x)
e <- .get_pg_index(x, epoch)
alpha <- x$solution$alpha[[e]]
pg <- x$solution$pg[[e]]
vs <- .rew(belief, alpha)
list(
belief = belief,
reward = vs$reward,
pg_node = vs$pg_node,
action = factor(pg$action[vs$pg_node], levels = x$model$actions)
)
}
.rew <- function(belief, alpha) {
if (!is.matrix(belief))
belief <- rbind(belief)
r <- apply(
belief,
MARGIN = 1,
FUN = function(b) {
rewards <- alpha %*% b
c(max(rewards), which.max(rewards))
}
)
r <- as.data.frame(t(r))
colnames(r) <- c("reward", "pg_node")
r
} |
common_by <- function(by = NULL, x, y) UseMethod("common_by", by)
common_by.character <- function(by, x, y) {
by <- common_by_from_vector(by)
common_by.list(by, x, y)
}
common_by_from_vector <- function(by) {
by <- by[!duplicated(by)]
by_x <- names(by) %||% by
by_y <- unname(by)
by_x[by_x == ""] <- by_y[by_x == ""]
list(x = by_x, y = by_y)
}
common_by.list <- function(by, x, y) {
x_vars <- tbl_vars(x)
if (!all(by$x %in% x_vars)) {
msg <- glue("`by` can't contain join column {missing} which is missing from LHS.",
missing = fmt_obj(setdiff(by$x, x_vars))
)
abort(msg)
}
y_vars <- tbl_vars(y)
if (!all(by$y %in% y_vars)) {
msg <- glue("`by` can't contain join column {missing} which is missing from RHS.",
missing = fmt_obj(setdiff(by$y, y_vars))
)
abort(msg)
}
by
}
common_by.NULL <- function(by, x, y) {
by <- intersect(tbl_vars(x), tbl_vars(y))
by <- by[!is.na(by)]
if (length(by) == 0) {
msg <- glue("`by` required, because the data sources have no common variables.")
abort(msg)
}
inform(auto_by_msg(by))
list(
x = by,
y = by
)
}
auto_by_msg <- function(by) {
by_quoted <- encodeString(by, quote = '"')
if (length(by_quoted) == 1L) {
by_code <- by_quoted
} else {
by_code <- paste0("c(", paste(by_quoted, collapse = ", "), ")")
}
paste0("Joining, by = ", by_code)
}
common_by.default <- function(by, x, y) {
msg <- glue("`by` must be a (named) character vector, list, or NULL for natural joins (not recommended in production code), not {friendly_type_of(by)}.")
abort(msg)
} |
bref.variable0<-function(x,digits=max(3, getOption("digits") -
3)){
if(is.numeric(x)){ttt<-signif(mean(na.omit(x)),digits=digits)
names(ttt)<-"M"}
else{
if(is.factor(x)){
MAX<-rev(sort(table(na.omit(x))))[1]
ttt<-round(100*MAX/sum(table(na.omit(x))))
names(ttt)<-paste(names(MAX)," (%)",sep="")}
else{ttt<-NA}
}
ttt
} |
unload_user_installed_pkgs <- function(
exceptions = NULL,
force = FALSE,
keep_kim = TRUE
) {
Priority <- Package <- NULL
pkg_df <- as.data.frame(utils::installed.packages())
default_pkg <- subset(
pkg_df, Priority %in% c("base", "recommended"))[["Package"]]
loaded_user_installed_pkgs <- setdiff(loadedNamespaces(), default_pkg)
if (keep_kim == TRUE) {
exceptions <- c(exceptions, "kim")
}
pkgs_to_unload <- setdiff(loaded_user_installed_pkgs, exceptions)
if (!is.null(pkgs_to_unload)) {
status <- "unknown"
status <- tryCatch(
invisible(lapply(
paste0("package:", pkgs_to_unload), unloadNamespace,
character.only = TRUE, force = force)),
warning = function(c) {
msg <- conditionMessage(c)
message(c)
return("warning")
},
error = function(c) {
msg <- conditionMessage(c)
message(c)
return("error")
})
if (!status %in% c("warning", "error")) {
message("The following package(s) were detached: ",
paste0(pkgs_to_unload, collapse = ", "))
}
} else {
message("No packages were detached.")
}
} |
NULL
get_attr_list <- function(dat, item = NULL) {
if (is.null(item)) {
out <- dat[["attr"]]
} else {
missing_item <- setdiff(item, names(dat[["attr"]]))
if (length(missing_item) > 0) {
stop("There is no attributes called `",
paste(missing_item, collapse = ", "),
"` in the attributes list of the Master list object (dat)")
}
out <- dat[["attr"]][item]
}
return(out)
}
get_attr <- function(dat, item, posit_ids = NULL, override.null.error = FALSE) {
if (!item %in% names(dat[["attr"]])) {
if (override.null.error) {
out <- NULL
} else {
stop("There is no attribute called `", item,
"` in the attributes list of the Master list object (dat)")
}
} else {
if (is.null(posit_ids)) {
out <- dat[["attr"]][[item]]
} else {
if (is.logical(posit_ids)) {
if (length(posit_ids) != length(dat[["attr"]][[item]])) {
stop("(logical) `posit_ids` has to have a length equal to the ",
"number of nodes in the network")
}
} else if (is.numeric(posit_ids)) {
if (length(posit_ids > 0) &&
any(posit_ids > length(dat[["attr"]][[item]]))) {
stop("Some (numeric) `posit_ids` are larger than the number of ",
"nodes in the network")
}
} else {
stop("`posit_ids` must be logical, numeric, or NULL")
}
out <- dat[["attr"]][[item]][posit_ids]
}
}
return(out)
}
add_attr <- function(dat, item) {
if (item %in% names(dat[["attr"]])) {
stop("Cannot create the attribute '", item, "': exists already")
}
dat[["attr"]][[item]] <- rep(NA, length(dat[["attr"]][["active"]]))
return(dat)
}
set_attr <- function(dat, item, value, posit_ids = NULL,
override.length.check = FALSE) {
if (!item %in% names(dat[["attr"]])) {
dat <- add_attr(dat, item)
}
if (is.null(posit_ids)) {
if (!override.length.check &&
length(value) != length(dat[["attr"]][["active"]])) {
stop(
"When trying to edit the ", `item`, " nodal attribute: ",
"The size of the `value` vector is not equal to the number of nodes in",
" the network. \n",
"Expected: ", length(dat[["attr"]][["active"]]), "\n",
"Given: ", length(value)
)
}
dat[["attr"]][[item]] <- value
} else {
if (is.logical(posit_ids)) {
if (length(posit_ids) != length(dat[["attr"]][[item]])) {
stop("(logical) `posit_ids` has to have a length equal to the number ",
"of nodes in the network")
}
} else if (is.numeric(posit_ids)) {
if (length(posit_ids) == 0) {
return(dat)
} else if (any(posit_ids > length(dat[["attr"]][[item]]))) {
stop("Some (numeric) `posit_ids` are larger than the number of nodes ",
" in the network")
}
} else {
stop("`posit_ids` must be logical, numeric, or NULL")
}
if (!override.length.check &&
length(value) != 1 &&
length(value) != length(dat[["attr"]][["active"]][posit_ids])) {
stop(
"When trying to edit the `", item, "` nodal attribute: ",
"The size of the `value` vector is not equal to the number of nodes ",
"selected by the `posit_ids` vector nor of length 1. \n",
"Expected: ", length(dat[["attr"]][["active"]][posit_ids]), " or 1 \n",
"Given: ", length(value)
)
}
dat[["attr"]][[item]][posit_ids] <- value
}
return(dat)
}
append_attr <- function(dat, item, value, n.new) {
if (!is.numeric(n.new) || n.new < 0) {
stop("`n_new` must be numeric and greater than or equal to zero.")
}
if (length(value) == 1) {
new_vals <- rep(value, n.new)
} else if (length(value) == n.new) {
new_vals <- value
} else {
stop("`value` must be of length one or `n.new`.")
}
old_vals <- get_attr(dat, item, override.null.error = TRUE)
dat <- set_attr(dat, item, c(old_vals, new_vals),
override.length.check = TRUE)
return(dat)
}
get_epi_list <- function(dat, item = NULL) {
if (is.null(item)) {
out <- dat[["epi"]]
} else {
missing_item <- setdiff(item, names(dat[["epi"]]))
if (length(missing_item) > 0) {
stop("There is no epi output called `",
paste(missing_item, collapse = ", "),
"` in the epi output list of the Master list object (dat)")
}
out <- dat[["epi"]][item]
}
return(out)
}
get_epi <- function(dat, item, at = NULL, override.null.error = FALSE) {
if (!item %in% names(dat[["epi"]])) {
if (override.null.error) {
out <- NULL
} else {
stop("There is no epi out called `", item,
"` in the epi out list of the Master list object (dat)")
}
} else {
if (is.null(at)) {
out <- dat[["epi"]][[item]]
} else {
if (is.logical(at)) {
if (length(at) != dat[["control"]][["nsteps"]]) {
stop("(logical) `at` has to have a length equal to the number of
steps planned for for the simulation (control[['nsteps']])")
}
} else if (is.numeric(at)) {
if (any(at > dat[["control"]][["nsteps"]])) {
stop("Some (numeric) `at` are larger than the number of
steps planned for for the simulation (control[['nsteps']])")
}
} else {
stop("`at` must be logical, numeric, or NULL")
}
out <- dat[["epi"]][[item]][at]
}
}
return(out)
}
add_epi <- function(dat, item) {
if (item %in% names(dat[["epi"]])) {
stop("Cannot create the epi output, ", item, ": exists already")
}
dat[["epi"]][[item]] <- rep(NA, dat[["control"]][["nsteps"]])
return(dat)
}
set_epi <- function(dat, item, at, value) {
if (length(at) != 1 || !is.numeric(at)) {
stop("`at` must be numeric and of length one")
}
if (!item %in% names(dat[["epi"]])) {
dat <- add_epi(dat, item)
}
if (at > length(dat[["epi"]][[item]])) {
dat[["epi"]][[item]] <- c(
dat[["epi"]][[item]],
rep(NA, dat[["control"]][["nsteps"]] - length(dat[["epi"]][[item]]))
)
}
dat[["epi"]][[item]][at] <- value
return(dat)
}
get_param_list <- function(dat, item = NULL) {
if (is.null(item)) {
out <- dat[["param"]]
} else {
missing_item <- setdiff(item, names(dat[["param"]]))
if (length(missing_item) > 0) {
stop("There is no parameters called `",
paste(missing_item, collapse = ", "),
"` in the parameter list of the Master list object (dat)")
}
out <- dat[["param"]][item]
}
return(out)
}
get_param <- function(dat, item, override.null.error = FALSE) {
if (!item %in% names(dat[["param"]])) {
if (override.null.error) {
out <- NULL
} else {
stop("There is no parameter called `", item,
"` in the parameter list of the Master list object (dat)")
}
} else {
out <- dat[["param"]][[item]]
}
return(out)
}
add_param <- function(dat, item) {
if (item %in% names(dat[["param"]])) {
stop("Cannot create the parameter, ", item, ": exists already")
}
dat[["param"]][[item]] <- NA
return(dat)
}
set_param <- function(dat, item, value) {
if (!item %in% names(dat[["param"]])) {
dat <- add_param(dat, item)
}
dat[["param"]][[item]] <- value
return(dat)
}
get_control_list <- function(dat, item = NULL) {
if (is.null(item)) {
out <- dat[["control"]]
} else {
missing_item <- setdiff(item, names(dat[["control"]]))
if (length(missing_item) > 0) {
stop("There is no control value called `",
paste(missing_item, collapse = ", "),
"` in the control list of the Master list object (dat)")
}
out <- dat[["control"]][item]
}
return(out)
}
get_control <- function(dat, item, override.null.error = FALSE) {
if (!item %in% names(dat[["control"]])) {
if (override.null.error) {
out <- NULL
} else {
stop("There is no control value called `", item,
"` in the control list of the Master list object (dat)")
}
} else {
out <- dat[["control"]][[item]]
}
return(out)
}
add_control <- function(dat, item) {
if (item %in% names(dat[["control"]])) {
stop("Cannot create the control value, ", item,
": exists already")
}
dat[["control"]][[item]] <- NA
return(dat)
}
set_control <- function(dat, item, value) {
if (!item %in% names(dat[["control"]])) {
dat <- add_control(dat, item)
}
dat[["control"]][[item]] <- value
return(dat)
}
get_init_list <- function(dat, item = NULL) {
if (is.null(item)) {
out <- dat[["init"]]
} else {
missing_item <- setdiff(item, names(dat[["init"]]))
if (length(missing_item) > 0) {
stop("There is no init value called `",
paste(missing_item, collapse = ", "),
"` in the init list of the Master list object (dat)")
}
out <- dat[["init"]][item]
}
return(out)
}
get_init <- function(dat, item, override.null.error = FALSE) {
if (!item %in% names(dat[["init"]])) {
if (override.null.error) {
out <- NULL
} else {
stop("There is no init value called `", item,
"` in the init list of the Master list object (dat)")
}
} else {
out <- dat[["init"]][[item]]
}
return(out)
}
add_init <- function(dat, item) {
if (item %in% names(dat[["init"]])) {
stop("Cannot create the init value, ", item,
": exists already")
}
dat[["init"]][[item]] <- NA
return(dat)
}
set_init <- function(dat, item, value) {
if (!item %in% names(dat[["init"]])) {
dat <- add_init(dat, item)
}
dat[["init"]][[item]] <- value
return(dat)
}
append_core_attr <- function(dat, at, n.new) {
dat <- append_attr(dat, "active", 1, n.new)
dat <- append_attr(dat, "entrTime", at, n.new)
dat <- append_attr(dat, "exitTime", NA, n.new)
dat <- update_unique_ids(dat, n.new)
return(dat)
}
update_unique_ids <- function(dat, n.new) {
last_unique_id <- if (is.null(dat[["_last_unique_id"]])) 0L
else dat[["_last_unique_id"]]
next_unique_ids <- seq_len(n.new) + last_unique_id
dat[["_last_unique_id"]] <- last_unique_id + as.integer(n.new)
dat <- append_attr(dat, "unique_id", next_unique_ids, n.new)
return(dat)
}
check_attr_lengths <- function(dat) {
attr_lengths <- vapply(dat[["attr"]], length, numeric(1))
expected_length <- attr_lengths["active"]
wrong_lengths <- which(attr_lengths != expected_length)
if (length(wrong_lengths > 0)) {
msg <- c(
"Some attribute are not of the correct length \n",
"Expected length: ", expected_length, "\n",
"Wrong length attributes: \n"
)
for (i in seq_along(wrong_lengths)) {
msg <- c(msg, "`", names(wrong_lengths)[i], "`: ", wrong_lengths[i], "\n")
}
stop(msg)
}
return(invisible(TRUE))
}
NULL
get_unique_ids <- function(dat, posit_ids = NULL) {
if (is.null(posit_ids)) {
return(get_attr(dat, "unique_id"))
}
unique_ids <- get_attr(dat, "unique_id", posit_ids = posit_ids)
return(unique_ids)
}
get_posit_ids <- function(dat, unique_ids = NULL) {
if (is.null(unique_ids)) {
return(seq_along(get_attr(dat, "active")))
}
posit_ids <- base::match(unique_ids, get_attr(dat, "unique_id"))
if (any(is.na(posit_ids))) {
warning(
"While converting `unique_ids` to `posit_ids`, some `unique_ids`",
" correspond to deactivated nodes and NA's where produced"
)
}
return(posit_ids)
}
is_active_unique_ids <- function(dat, unique_ids) {
suppressWarnings({
posit_ids <- get_posit_ids(dat, unique_ids)
})
return(is_active_posit_ids(dat, posit_ids))
}
is_active_posit_ids <- function(dat, posit_ids) {
active <- get_attr(dat, "active")
return(active[posit_ids] %in% 1)
} |
transformspreadbyindividual <-
function(sa)
{
return(sapply(by(sa, sa$startvertex, function(x) { apply(x[, -1, drop=FALSE], 2, mean) }), function(x){x}))
} |
if (.Platform$OS.type == "windows") {
PATH = paste0(getwd(), path.expand("\\VOCAB_token_stats.txt"))
PATH_folder = paste0(getwd(), path.expand("\\VOCAB_token_stats\\"))
PATH_parser = paste0(getwd(), path.expand("\\TOKEN_ngram_PARSER.txt"))
}
if (.Platform$OS.type == "unix") {
PATH = paste0(getwd(), path.expand("/VOCAB_token_stats.txt"))
PATH_folder = paste0(getwd(), path.expand("/VOCAB_token_stats/"))
PATH_parser = paste0(getwd(), path.expand("/TOKEN_ngram_PARSER.txt"))
}
voc = read.table(PATH, quote = "\"", comment.char = "")
voc_vec = as.vector(voc[, 1])
context('token statistics')
while(T) {
testthat::test_that("in case that the x_vec parameter is not NULL or a valid path to a file it returns an error", {
cat("test-token_statistics.R : test id", cnt_tsts, "\n")
cnt_tsts <<- cnt_tsts + 1
testthat::expect_error( token_stats$new(x_vec = list(), path_2folder = NULL, path_2file = NULL, file_delimiter = '\n', n_gram_delimiter = "_") )
})
testthat::test_that("in case that the path_2folder parameter is not NULL or a valid path to a folder it returns an error", {
cat("test-token_statistics.R : test id", cnt_tsts, "\n")
cnt_tsts <<- cnt_tsts + 1
testthat::expect_error( token_stats$new(x_vec = NULL, path_2folder = list(), path_2file = NULL, file_delimiter = '\n', n_gram_delimiter = "_") )
})
testthat::test_that("in case that the path_2folder parameter is a character string but does not end in slash it returns an error", {
if (.Platform$OS.type == "windows") {
PATH_folder1 = paste0(getwd(), path.expand("\\VOCAB_token_stats"))
}
if (.Platform$OS.type == "unix") {
PATH_folder1 = paste0(getwd(), path.expand("/VOCAB_token_stats"))
}
cat("test-token_statistics.R : test id", cnt_tsts, "\n")
cnt_tsts <<- cnt_tsts + 1
testthat::expect_error( token_stats$new(x_vec = NULL, path_2folder = PATH_folder1, path_2file = NULL, file_delimiter = '\n', n_gram_delimiter = "_") )
})
testthat::test_that("in case that the path_2file parameter is not NULL or a valid path to a folder it returns an error", {
cat("test-token_statistics.R : test id", cnt_tsts, "\n")
cnt_tsts <<- cnt_tsts + 1
testthat::expect_error( token_stats$new(x_vec = NULL, path_2folder = NULL, path_2file = list(), file_delimiter = '\n', n_gram_delimiter = "_") )
})
testthat::test_that("in case that the file_delimiter parameter is not a character string it returns an error", {
cat("test-token_statistics.R : test id", cnt_tsts, "\n")
cnt_tsts <<- cnt_tsts + 1
testthat::expect_error( token_stats$new(x_vec = NULL, path_2folder = NULL, path_2file = NULL, file_delimiter = NULL, n_gram_delimiter = "_") )
})
testthat::test_that("in case that the n_gram_delimiter parameter is not a character string it returns an error", {
cat("test-token_statistics.R : test id", cnt_tsts, "\n")
cnt_tsts <<- cnt_tsts + 1
testthat::expect_error( token_stats$new(x_vec = NULL, path_2folder = NULL, path_2file = NULL, file_delimiter = '\n', n_gram_delimiter = NULL) )
})
testthat::test_that("it returns a word vector from a single file", {
init = token_stats$new(x_vec = NULL, path_2folder = NULL, path_2file = PATH, file_delimiter = '\n', n_gram_delimiter = "_")
vec = init$path_2vector()
cat("test-token_statistics.R : test id", cnt_tsts, "\n")
cnt_tsts <<- cnt_tsts + 1
testthat::expect_true( length(vec) == 353 && inherits(vec, c('vector', 'character')) )
})
testthat::test_that("it returns a word vector from a folder of files", {
init = token_stats$new(x_vec = NULL, path_2folder = PATH_folder, path_2file = NULL, file_delimiter = '\n', n_gram_delimiter = "_")
vec = init$path_2vector()
cat("test-token_statistics.R : test id", cnt_tsts, "\n")
cnt_tsts <<- cnt_tsts + 1
testthat::expect_true( length(vec) == 353 * 2 && inherits(vec, c('vector', 'character')) )
})
testthat::test_that("it gives an error if the subset value is not numeric", {
init = token_stats$new(x_vec = voc_vec, path_2folder = NULL, path_2file = NULL, file_delimiter = '\n', n_gram_delimiter = "_")
vec_tmp = init$freq_distribution()
cat("test-token_statistics.R : test id", cnt_tsts, "\n")
cnt_tsts <<- cnt_tsts + 1
testthat::expect_error( init$print_frequency('invalid') )
})
testthat::test_that("it returns the frequency distribution (all data) using a character vector", {
init = token_stats$new(x_vec = voc_vec, path_2folder = NULL, path_2file = NULL, file_delimiter = '\n', n_gram_delimiter = "_")
vec_tmp = init$freq_distribution()
cat("test-token_statistics.R : test id", cnt_tsts, "\n")
cnt_tsts <<- cnt_tsts + 1
testthat::expect_output( init$print_frequency() )
})
testthat::test_that("it returns the frequency distribution (a subset of the data) using a character vector", {
init = token_stats$new(x_vec = voc_vec, path_2folder = NULL, path_2file = NULL, file_delimiter = '\n', n_gram_delimiter = "_")
vec_tmp = init$freq_distribution()
cat("test-token_statistics.R : test id", cnt_tsts, "\n")
cnt_tsts <<- cnt_tsts + 1
testthat::expect_output( init$print_frequency(1:10) )
})
testthat::test_that("it returns the frequency distribution (all data) using a path to a folder", {
init = token_stats$new(x_vec = NULL, path_2folder = PATH_folder, path_2file = NULL, file_delimiter = '\n', n_gram_delimiter = "_")
vec_tmp = init$freq_distribution()
cat("test-token_statistics.R : test id", cnt_tsts, "\n")
cnt_tsts <<- cnt_tsts + 1
testthat::expect_output( init$print_frequency() )
})
testthat::test_that("it returns the frequency distribution (a subset of the data) using a path to a folder", {
init = token_stats$new(x_vec = voc_vec, path_2folder = PATH_folder, path_2file = NULL, file_delimiter = '\n', n_gram_delimiter = "_")
vec_tmp = init$freq_distribution()
cat("test-token_statistics.R : test id", cnt_tsts, "\n")
cnt_tsts <<- cnt_tsts + 1
testthat::expect_output( init$print_frequency(1:10) )
})
testthat::test_that("it returns the frequency distribution (all data) using a path to a file", {
init = token_stats$new(x_vec = NULL, path_2folder = NULL, path_2file = PATH, file_delimiter = '\n', n_gram_delimiter = "_")
vec_tmp = init$freq_distribution()
cat("test-token_statistics.R : test id", cnt_tsts, "\n")
cnt_tsts <<- cnt_tsts + 1
testthat::expect_output( init$print_frequency() )
})
testthat::test_that("it returns the frequency distribution (a subset of the data) using a path to a file", {
init = token_stats$new(x_vec = voc_vec, path_2folder = NULL, path_2file = PATH, file_delimiter = '\n', n_gram_delimiter = "_")
vec_tmp = init$freq_distribution()
cat("test-token_statistics.R : test id", cnt_tsts, "\n")
cnt_tsts <<- cnt_tsts + 1
testthat::expect_output( init$print_frequency(1:10) )
})
testthat::test_that("it gives an error if the number parameter is not a numeric value", {
init = token_stats$new(x_vec = voc_vec, path_2folder = NULL, path_2file = NULL, file_delimiter = '\n', n_gram_delimiter = "_")
vec_tmp = init$count_character()
cat("test-token_statistics.R : test id", cnt_tsts, "\n")
cnt_tsts <<- cnt_tsts + 1
testthat::expect_error( init$print_count_character(number = NULL) )
})
testthat::test_that("it gives an error if the number parameter is a vector", {
init = token_stats$new(x_vec = voc_vec, path_2folder = NULL, path_2file = NULL, file_delimiter = '\n', n_gram_delimiter = "_")
vec_tmp = init$count_character()
cat("test-token_statistics.R : test id", cnt_tsts, "\n")
cnt_tsts <<- cnt_tsts + 1
testthat::expect_error( init$print_count_character(number = 1:3) )
})
testthat::test_that("the count_character() method returns the unique numbers for the characters of the vector", {
init = token_stats$new(x_vec = voc_vec, path_2folder = NULL, path_2file = NULL, file_delimiter = '\n', n_gram_delimiter = "_")
vec_tmp = init$count_character()
cat("test-token_statistics.R : test id", cnt_tsts, "\n")
cnt_tsts <<- cnt_tsts + 1
testthat::expect_true( inherits(vec_tmp, c('vector', 'numeric')) )
})
testthat::test_that("the print_count_character() method returns the correct words for a specific value (for a vector of words)", {
init = token_stats$new(x_vec = voc_vec, path_2folder = NULL, path_2file = NULL, file_delimiter = '\n', n_gram_delimiter = "_")
vec_tmp = init$count_character()
val = 3
res = init$print_count_character(val)
cat("test-token_statistics.R : test id", cnt_tsts, "\n")
cnt_tsts <<- cnt_tsts + 1
testthat::expect_true( unique(sapply(res, nchar)) == val )
})
testthat::test_that("the print_count_character() method returns the correct words for a specific value (for a folder of files)", {
init = token_stats$new(x_vec = NULL, path_2folder = PATH_folder, path_2file = NULL, file_delimiter = '\n', n_gram_delimiter = "_")
vec_tmp = init$count_character()
val = 3
res = init$print_count_character(val)
cat("test-token_statistics.R : test id", cnt_tsts, "\n")
cnt_tsts <<- cnt_tsts + 1
testthat::expect_true( unique(sapply(res, nchar)) == val )
})
testthat::test_that("the print_count_character() method returns the correct words for a specific value (for a file)", {
init = token_stats$new(x_vec = NULL, path_2folder = NULL, path_2file = PATH, file_delimiter = '\n', n_gram_delimiter = "_")
vec_tmp = init$count_character()
val = 3
res = init$print_count_character(val)
cat("test-token_statistics.R : test id", cnt_tsts, "\n")
cnt_tsts <<- cnt_tsts + 1
testthat::expect_true( unique(sapply(res, nchar)) == val )
})
testthat::test_that("it gives an error if the word parameter of the print_collocations method is not a character string", {
tok = tokenize_transform_text(PATH, to_lower = T, split_string = T, min_n_gram = 3, max_n_gram = 3, n_gram_delimiter = "_")
init = token_stats$new(x_vec = tok$token, path_2folder = NULL, path_2file = NULL, file_delimiter = '\n', n_gram_delimiter = "_")
vec_tmp = init$collocation_words()
cat("test-token_statistics.R : test id", cnt_tsts, "\n")
cnt_tsts <<- cnt_tsts + 1
testthat::expect_error( init$print_collocations(word = NULL) )
})
testthat::test_that("it returns a named vector with the collocations for a specific word in case that the input is a vector", {
tok = tokenize_transform_text(PATH, to_lower = T, split_string = T, min_n_gram = 3, max_n_gram = 3, n_gram_delimiter = "_")
init = token_stats$new(x_vec = tok$token, path_2folder = NULL, path_2file = NULL, file_delimiter = '\n', n_gram_delimiter = "_")
vec_tmp = init$collocation_words()
res = init$print_collocations(word = "ancient")
cat("test-token_statistics.R : test id", cnt_tsts, "\n")
cnt_tsts <<- cnt_tsts + 1
testthat::expect_true( inherits(res, c('numeric', 'vector')) )
})
testthat::test_that("it returns a named vector with the collocations for a specific word in case that the input is a path to a file", {
init = token_stats$new(x_vec = NULL, path_2folder = NULL, path_2file = PATH_parser, file_delimiter = '\n', n_gram_delimiter = "_")
vec_tmp = init$collocation_words()
res = init$print_collocations(word = "ancient")
cat("test-token_statistics.R : test id", cnt_tsts, "\n")
cnt_tsts <<- cnt_tsts + 1
testthat::expect_true( inherits(res, c('numeric', 'vector')) )
})
testthat::test_that("it gives an error if the dice_n_gram parameter is not numeric", {
init = token_stats$new(x_vec = voc_vec[1:30], path_2folder = NULL, path_2file = NULL, file_delimiter = '\n', n_gram_delimiter = "_")
cat("test-token_statistics.R : test id", cnt_tsts, "\n")
cnt_tsts <<- cnt_tsts + 1
testthat::expect_error( init$string_dissimilarity_matrix(dice_n_gram = NULL, method = "dice", split_separator = " ", dice_thresh = 0.3, upper = TRUE, diagonal = TRUE, threads = 1) )
})
testthat::test_that("it gives an error if the dice_n_gram parameter is a vector", {
init = token_stats$new(x_vec = voc_vec[1:30], path_2folder = NULL, path_2file = NULL, file_delimiter = '\n', n_gram_delimiter = "_")
cat("test-token_statistics.R : test id", cnt_tsts, "\n")
cnt_tsts <<- cnt_tsts + 1
testthat::expect_error( init$string_dissimilarity_matrix(dice_n_gram = 1:5, method = "dice", split_separator = " ", dice_thresh = 0.3, upper = TRUE, diagonal = TRUE, threads = 1) )
})
testthat::test_that("it gives an error if the dice_n_gram parameter is less than 1", {
init = token_stats$new(x_vec = voc_vec[1:30], path_2folder = NULL, path_2file = NULL, file_delimiter = '\n', n_gram_delimiter = "_")
cat("test-token_statistics.R : test id", cnt_tsts, "\n")
cnt_tsts <<- cnt_tsts + 1
testthat::expect_error( init$string_dissimilarity_matrix(dice_n_gram = 0, method = "dice", split_separator = " ", dice_thresh = 0.3, upper = TRUE, diagonal = TRUE, threads = 1) )
})
testthat::test_that("it gives an error if the method is not one of dice, levenshtein, cosine", {
init = token_stats$new(x_vec = voc_vec[1:30], path_2folder = NULL, path_2file = NULL, file_delimiter = '\n', n_gram_delimiter = "_")
cat("test-token_statistics.R : test id", cnt_tsts, "\n")
cnt_tsts <<- cnt_tsts + 1
testthat::expect_error( init$string_dissimilarity_matrix(dice_n_gram = 2, method = "invalid", split_separator = " ", dice_thresh = 0.3, upper = TRUE, diagonal = TRUE, threads = 1) )
})
testthat::test_that("it gives an error if the split_separator is not a character string", {
init = token_stats$new(x_vec = voc_vec[1:30], path_2folder = NULL, path_2file = NULL, file_delimiter = '\n', n_gram_delimiter = "_")
cat("test-token_statistics.R : test id", cnt_tsts, "\n")
cnt_tsts <<- cnt_tsts + 1
testthat::expect_error( init$string_dissimilarity_matrix(dice_n_gram = 2, method = "dice", split_separator = NULL, dice_thresh = 0.3, upper = TRUE, diagonal = TRUE, threads = 1) )
})
testthat::test_that("it gives an error if the dice_thresh is greater than 1.0", {
init = token_stats$new(x_vec = voc_vec[1:30], path_2folder = NULL, path_2file = NULL, file_delimiter = '\n', n_gram_delimiter = "_")
cat("test-token_statistics.R : test id", cnt_tsts, "\n")
cnt_tsts <<- cnt_tsts + 1
testthat::expect_error( init$string_dissimilarity_matrix(dice_n_gram = 2, method = "dice", split_separator = " ", dice_thresh = 1.1, upper = TRUE, diagonal = TRUE, threads = 1) )
})
testthat::test_that("it gives an error if the upper parameter is not logical", {
init = token_stats$new(x_vec = voc_vec[1:30], path_2folder = NULL, path_2file = NULL, file_delimiter = '\n', n_gram_delimiter = "_")
cat("test-token_statistics.R : test id", cnt_tsts, "\n")
cnt_tsts <<- cnt_tsts + 1
testthat::expect_error( init$string_dissimilarity_matrix(dice_n_gram = 2, method = "dice", split_separator = " ", dice_thresh = 0.3, upper = 'TRUE', diagonal = TRUE, threads = 1) )
})
testthat::test_that("it gives an error if the diagonal parameter is not logical", {
init = token_stats$new(x_vec = voc_vec[1:30], path_2folder = NULL, path_2file = NULL, file_delimiter = '\n', n_gram_delimiter = "_")
cat("test-token_statistics.R : test id", cnt_tsts, "\n")
cnt_tsts <<- cnt_tsts + 1
testthat::expect_error( init$string_dissimilarity_matrix(dice_n_gram = 2, method = "dice", split_separator = " ", dice_thresh = 0.3, upper = TRUE, diagonal = 'TRUE', threads = 1) )
})
testthat::test_that("it gives an error if the threads parameter is less than 1", {
init = token_stats$new(x_vec = voc_vec[1:30], path_2folder = NULL, path_2file = NULL, file_delimiter = '\n', n_gram_delimiter = "_")
cat("test-token_statistics.R : test id", cnt_tsts, "\n")
cnt_tsts <<- cnt_tsts + 1
testthat::expect_error( init$string_dissimilarity_matrix(dice_n_gram = 2, method = "dice", split_separator = " ", dice_thresh = 0.3, upper = TRUE, diagonal = TRUE, threads = 0) )
})
testthat::test_that("it returns a matrix for the dice method", {
cols = 30
init = token_stats$new(x_vec = voc_vec[1:cols], path_2folder = NULL, path_2file = NULL, file_delimiter = '\n', n_gram_delimiter = "_")
res = init$string_dissimilarity_matrix(dice_n_gram = 2, method = "dice", split_separator = " ", dice_thresh = 0.3, upper = TRUE, diagonal = TRUE, threads = 1)
cat("test-token_statistics.R : test id", cnt_tsts, "\n")
cnt_tsts <<- cnt_tsts + 1
testthat::expect_true( is.matrix(res) && ncol(res) == cols )
})
testthat::test_that("it returns a matrix for the levenshtein method", {
cols = 30
init = token_stats$new(x_vec = voc_vec[1:cols], path_2folder = NULL, path_2file = NULL, file_delimiter = '\n', n_gram_delimiter = "_")
res = init$string_dissimilarity_matrix(dice_n_gram = 2, method = "levenshtein", split_separator = " ", dice_thresh = 0.3, upper = TRUE, diagonal = TRUE, threads = 1)
cat("test-token_statistics.R : test id", cnt_tsts, "\n")
cnt_tsts <<- cnt_tsts + 1
testthat::expect_true( is.matrix(res) && ncol(res) == cols )
})
testthat::test_that("it returns a matrix for the cosine method", {
VEC = c('the first sentece', 'the second sentence', 'the third sentence', 'the fourth sentence')
init = token_stats$new(x_vec = VEC, path_2folder = NULL, path_2file = NULL, file_delimiter = '\n', n_gram_delimiter = "_")
res = init$string_dissimilarity_matrix(dice_n_gram = 2, method = "cosine", split_separator = " ", dice_thresh = 0.3, upper = TRUE, diagonal = TRUE, threads = 1)
cat("test-token_statistics.R : test id", cnt_tsts, "\n")
cnt_tsts <<- cnt_tsts + 1
testthat::expect_true( is.matrix(res) && ncol(res) == length(VEC) )
})
testthat::test_that("it gives an error if the n_grams parameter is not a numeric value", {
init = token_stats$new(x_vec = voc_vec, path_2folder = NULL, path_2file = NULL, file_delimiter = '\n', n_gram_delimiter = "_")
cat("test-token_statistics.R : test id", cnt_tsts, "\n")
cnt_tsts <<- cnt_tsts + 1
testthat::expect_error( init$look_up_table(n_grams = NULL) )
})
testthat::test_that("it gives an error if the n_grams parameter is a vector", {
init = token_stats$new(x_vec = voc_vec, path_2folder = NULL, path_2file = NULL, file_delimiter = '\n', n_gram_delimiter = "_")
cat("test-token_statistics.R : test id", cnt_tsts, "\n")
cnt_tsts <<- cnt_tsts + 1
testthat::expect_error( init$look_up_table(n_grams = 1:3) )
})
testthat::test_that("it gives an error if the n_grams parameter is less than 1", {
init = token_stats$new(x_vec = voc_vec, path_2folder = NULL, path_2file = NULL, file_delimiter = '\n', n_gram_delimiter = "_")
cat("test-token_statistics.R : test id", cnt_tsts, "\n")
cnt_tsts <<- cnt_tsts + 1
testthat::expect_error( init$look_up_table(n_grams = 0) )
})
testthat::test_that("it returns a character n-gram vector", {
init = token_stats$new(x_vec = voc_vec, path_2folder = NULL, path_2file = NULL, file_delimiter = '\n', n_gram_delimiter = "_")
is_vec = init$look_up_table(n_grams = 3)
cat("test-token_statistics.R : test id", cnt_tsts, "\n")
cnt_tsts <<- cnt_tsts + 1
testthat::expect_true( inherits(is_vec, c('character', 'vector')) )
})
testthat::test_that("it gives an error if the n_grams parameter is not a numeric value", {
init = token_stats$new(x_vec = voc_vec, path_2folder = NULL, path_2file = NULL, file_delimiter = '\n', n_gram_delimiter = "_")
lktbl = init$look_up_table(n_grams = 4)
cat("test-token_statistics.R : test id", cnt_tsts, "\n")
cnt_tsts <<- cnt_tsts + 1
testthat::expect_error( init$print_words_lookup_tbl(n_gram = NULL) )
})
testthat::test_that("it gives an error if the n_grams parameter is not a character vector", {
init = token_stats$new(x_vec = voc_vec, path_2folder = NULL, path_2file = NULL, file_delimiter = '\n', n_gram_delimiter = "_")
lktbl = init$look_up_table(n_grams = 4)
cat("test-token_statistics.R : test id", cnt_tsts, "\n")
cnt_tsts <<- cnt_tsts + 1
testthat::expect_error( init$print_words_lookup_tbl(n_gram = c("ts'_", "tune")) )
})
testthat::test_that("it returns a vector of n-grams", {
init = token_stats$new(x_vec = voc_vec, path_2folder = NULL, path_2file = NULL, file_delimiter = '\n', n_gram_delimiter = "_")
lktbl = init$look_up_table(n_grams = 4)
res = init$print_words_lookup_tbl(n_gram = "_abo")
cat("test-token_statistics.R : test id", cnt_tsts, "\n")
cnt_tsts <<- cnt_tsts + 1
testthat::expect_true( inherits(res, c('character', 'vector')) )
})
break
} |
imData <- function(data, trt=NULL, surv=NULL, outcome=NULL, endfml=NULL,
y0 = NULL, cov = NULL,
duration = 9999, bounds = NULL, trt.label =NULL,
unitTime="days", err.terminate = TRUE, ...) {
lst.var <- list(trt = trt,
surv = surv,
outcome = outcome,
y0 = y0,
trt.label = trt.label,
cov = cov,
endfml = endfml,
duration = duration,
bounds = bounds,
unitTime = unitTime);
lst.var$parsed.endfml <- get.parsed.endfml(endfml);
err.msg <- chk.pars(data, lst.var);
if (is.null(err.msg)) {
if (is.null(lst.var$trt.label)) {
trt.label <- sort(unique(data[,lst.var$trt]));
trt.label <- sapply(trt.label, function(x) {
if (0 == as.numeric(x)) {
return("Control");
} else if (1 == as.numeric(x)) {
return("Intervention");
} else
return(x);
})
lst.var$trt.label <- trt.label;
}
rst <- list(data = data,
lst.var = lst.var);
class(rst) <- get.const("IDEM.CLASS");
} else {
if (err.terminate) {
print(err.msg);
stop("Please check the error messages above.", call. = FALSE);
}
rst <- err.msg;
}
invisible(rst)
}
print.IDEMERROR <- function(x, html = FALSE, ...) {
msg.head <- "Model specification is invalid. Please check the following:";
if (html) {
rst <- paste(msg.head, "<ul><li>",
paste(x, collapse = "</li><li>"),
"</li></ul>");
} else {
rst <- paste(c(msg.head, x), collapse = "\n ");
rst <- paste(rst, "\n");
cat(rst);
}
invisible(rst);
}
print.IDEMDATA <- function(x, ...) {
data <- x$data;
lst.var <- x$lst.var;
fcat <- function(...) {
cat(" ", ..., "\n");
}
cat("There are", nrow(data), "observations of",
ncol(data), "variables in the data. \n");
cat("Detailed specifications are as follows: \n");
fcat("Treatment:", lst.var$trt);
fcat("Survival time:", lst.var$surv);
fcat("Study duration:", lst.var$duration);
fcat("Outcomes (ordered chronically):", lst.var$outcome);
fcat("Endpoint (in R formula):", lst.var$endfml);
if (!is.null(lst.var$y0))
fcat("Baseline outcome:", lst.var$y0);
if (!is.null(lst.var$trt.label))
fcat("Treatment labels:", lst.var$trt.label);
if (!is.null(cov))
fcat("Covariates:", lst.var$cov);
if (!is.null(lst.var$bounds))
fcat("Biological boundary of the outcomes:", lst.var$bound);
cat("\n\nSee summary information for missingness frequencies.", "\n")
}
summary.IDEMDATA <- function(object,
opt = c("misstable", "missid", "trt"),
endponly=FALSE,
...) {
opt <- match.arg(opt);
switch(opt,
misstable = get.mis.table(object$data, object$lst.var),
missid = get.needimp(object$data, object$lst.var, endponly=endponly),
trt = sort(unique(object$data[,object$lst.var$trt])))
}
plot.IDEMDATA <- function(x, opt = c("survivor", "missing", "KM"),
cols = c("black", "blue"),
fname=NULL, ...) {
opt <- match.arg(opt);
switch(opt,
survivor = plot.survivor(x$data, x$lst.var, fname=fname, ...),
missing = plot.mispattern(x$data, x$lst.var, fname=fname,
cols=cols, ...),
KM = plot.surv(x$data, x$lst.var, fname=fname,
cols=cols, ...))
} |
library(colorednoise)
library(purrr)
library(data.table)
context("Autocorrelation of colored noise")
test_that("colored noise can produce blue noise", {
test_blue <- rerun(.n = 1000, colored_noise(timesteps = 100, mean = 0.5,
sd = 0.2, phi = -0.5)) %>% map_dbl(autocorrelation) %>%
mean()
expect_true(-0.55 < test_blue && test_blue < -0.45)
})
test_that("colored noise can produce red noise", {
test_red <- rerun(.n = 1000, colored_noise(timesteps = 100, mean = 0.5,
sd = 0.2, phi = 0.5)) %>% map_dbl(autocorrelation) %>% mean()
expect_true(0.55 > test_red && test_red > 0.45)
})
test_that("cor2cov output is correct", {
corr <- matrix(c(1, 0.53, 0.73, 0.53, 1, 0.44, 0.73, 0.44, 1), nrow = 3)
sigmas <- c(2, 0.3, 1.2)
covar <- cor2cov(sigmas, corr)
expect_true(all.equal(cov2cor(covar), corr))
})
test_that("colored_multi_rnorm can produce red noise", {
set.seed(989)
corr <- matrix(c(1, 0.53, 0.73, 0.53, 1, 0.44, 0.73, 0.44, 1), nrow = 3)
test <- colored_multi_rnorm(100, c(0, 3, 5), c(1, 0.5, 1), c(0.5, 0.5, 0.5), corr) %>%
as.data.table() %>%
.[, .(V1_autocorr = autocorrelation(V1), V2_autocorr = autocorrelation(V2), V3_autocorr = autocorrelation(V3))] %>%
as.numeric()
expect_true(all(test > 0.5)==T)
})
test_that("colored_multi_rnorm can produce blue noise", {
set.seed(19045)
corr <- matrix(c(1, 0.53, 0.73, 0.53, 1, 0.44, 0.73, 0.44, 1), nrow = 3)
test <- colored_multi_rnorm(100, c(0, 3, 5), c(1, 0.5, 1), c(-0.5, -0.5, -0.5), corr) %>%
as.data.table() %>%
.[, .(V1_autocorr = autocorrelation(V1), V2_autocorr = autocorrelation(V2), V3_autocorr = autocorrelation(V3))] %>%
as.numeric()
expect_true(all(test < -0.4)==T)
}) |
sim_daily <- function(N, sd=2.5, change_sd=0.05, week_sd=NA, month_sd=NA, year_sd=NA, week_change_sd=NA, month_change_sd=NA, year_change_sd=NA, innovations_sd=1, sa_sd=NA,model=list(order=c(3,1,1), ma=0.5, ar=c(0.2, -0.4, 0.1)), beta_1=0.9, beta_tau=0, start=c(2020,1), multiplicative=TRUE, extra_smooth=FALSE, calendar=list(which="Easter", from=-2, to=2), outlier=NULL, timewarping=TRUE, as_index=FALSE) {
if (is.na(week_sd)) {week_sd <- sd}
if (is.na(month_sd)) {month_sd <- sd}
if (is.na(year_sd)) {year_sd <- sd}
if (is.na(week_change_sd)) {week_change_sd <- change_sd/365*7}
if (is.na(month_change_sd)) {month_change_sd <- change_sd/365*31}
if (is.na(year_change_sd)) {year_change_sd <- change_sd}
series <- stats::ts(stats::arima.sim(n=365*N-1, model=model, sd=innovations_sd), start=start, frequency=365)
if (!is.na(sa_sd)) {
series <- series * sa_sd / sd(series, na.rm=FALSE)
}
if (multiplicative & min(series, na.rm=TRUE) < 0) series <- series - min(series, na.rm=TRUE) + 100
if(as_index & N >= 2) series <- series / mean(series[366:730], na.rm=TRUE) * 100
sfac365 <- sim_sfac(length(series), freq=365, sd=year_sd, change_sd=year_change_sd, beta_1=beta_1, beta_tau=beta_tau, ar=0.99, ma=0.99, start=start, burnin=3, multiplicative=multiplicative, extra_smooth=extra_smooth)
series <- dsa::ts2xts(series)
times <- seq.Date(from=stats::start(series), to=stats::end(series), by="days")
blank <- xts::xts(rep(NA,length(times)), times)
series <- zoo::na.approx(xts::merge.xts(series,blank)$series)
seas_adj <- series
sfa <- dsa::ts2xts(sfac365)
sfac365 <- zoo::na.approx(xts::merge.xts(sfa, blank)$sfa)
if(year_sd > 0){
if (multiplicative) {
sfac365 <- (sfac365-1) / stats::sd(sfac365) * year_sd/100
sfac365 <- 1+sfac365
series <- series * as.numeric(sfac365) } else {
sfac365 <- sfac365 / stats::sd(sfac365) * year_sd
series <- series + as.numeric(sfac365)
}}
sfac7 <- sim_sfac(length(series), freq=7, sd=week_sd, change_sd=week_change_sd, beta_1=beta_1, beta_tau=beta_tau, start=start, multiplicative=multiplicative, extra_smooth=extra_smooth)
if (multiplicative) {
series <- series * as.numeric(sfac7) } else {
series <- series + as.numeric(sfac7)
}
series31 <- dsa:::.fill31(series,"spline")
sfac31 <- sim_sfac(length(series31), freq=31, sd=month_sd, change_sd=month_change_sd, beta_1=beta_1, beta_tau=beta_tau, ar=0.9, ma=0.99, start=start, burnin=1, multiplicative=multiplicative, extra_smooth=extra_smooth)
if (timewarping) {
sfac31 <- .stretch_re(sfac31)
} else {
sfac31 <- dsa:::.drop31(stats::ts(sfac31, start=start, frequency=372), 1, 365)}
if(month_sd > 0){
if (multiplicative) {
sfac31 <- (sfac31-1) / stats::sd(sfac31) * month_sd/100
sfac31 <- 1+sfac31
series <- series * as.numeric(sfac31) } else {
sfac31 <- sfac31 / stats::sd(sfac31) * month_sd
series <- series + as.numeric(sfac31)
}}
if (is.null(calendar)) {cfac <- series * 0 + as.numeric(multiplicative)} else {
calendar[["n"]]=length(series)
calendar[["multiplicative"]] <- multiplicative
calendar[["start"]] <- as.Date(paste(start, collapse="-"), "%Y-%j")
calendar[["freq"]] <- 365.25
cfac <- suppressMessages(do.call(sim_calendar, calendar))}
if (multiplicative) {series <- series*cfac} else {series <- series + cfac}
if (is.null(outlier)) {outlier_effect <- series * 0 + as.numeric(multiplicative)} else {
outlier[["n"]]=length(series)
outlier[["multiplicative"]] <- multiplicative
outlier[["start"]] <- as.Date(paste(start, collapse="-"), "%Y-%j")
outlier_effect <- apply(do.call(sim_outlier, outlier), 1, ifelse(multiplicative,prod, sum))
}
if (multiplicative) {series <- series*outlier_effect; seas_adj <- seas_adj * outlier_effect} else {series <- series + outlier_effect; seas_adj <- seas_adj + outlier_effect}
out <- xts::merge.xts(series, seas_adj, xts::xts(sfac7, zoo::index(series)), xts::xts(sfac31, zoo::index(series)), sfac365, cfac, outlier_effect)
colnames(out) <- c("original", "seas_adj", "sfac7", "sfac31", "sfac365", "cfac", "outlier")
return(out)
}
.stretch_re <- function(seas_component) {
start <- stats::start(seas_component)
Mat <- matrix(c(seas_component, rep(NA, 31-length(seas_component)%%31)), nrow=31)
colnames(Mat) <- as.character(seq.Date(as.Date(paste0(start[1], "-", start[2], "-01")), by="months", length.out=ncol(Mat)))
.transformX <- function(x, to=30) {
z <- matrix(NA, nrow=to, ncol=length(x))
z[1,1] <- x[1]
z[to,] <- x
zz <- zoo::na.spline(as.vector(z))
y <- c(matrix(zz, nrow=31)[31,], rep(NA, 31-to))
return(y)
}
.is.leapyear <- function(Year) {
Year <- as.numeric(Year)
if(Year %% 4 == 0) {
if(Year %% 100 == 0) {
if(Year %% 400 == 0) {return(TRUE)} else {
return(FALSE)
}} else {return(TRUE)}
} else {return(FALSE)}}
.not.last <- function(X) {
out <- rep(1, ncol(X))
out[ncol(X)] <- 0
return(as.logical(out))
}
month30 <- c("04", "06", "09", "11")
month28 <- c("02")
Mat[,format(as.Date(colnames(Mat)), "%m") %in% month30 & .not.last(Mat)] <- apply(Mat[,format(as.Date(colnames(Mat)), "%m") %in% month30 & .not.last(Mat)], 2, .transformX, 30)
Mat[,(format(as.Date(colnames(Mat)), "%m") %in% month28 & !sapply(format(as.Date(colnames(Mat)), "%Y"), .is.leapyear) & .not.last(Mat) )] <- apply(Mat[,(format(as.Date(colnames(Mat)), "%m") %in% month28 & !sapply(format(as.Date(colnames(Mat)), "%Y"), .is.leapyear) & .not.last(Mat))], 2, .transformX, 28)
if (sum((format(as.Date(colnames(Mat)), "%m") %in% month28 & sapply(format(as.Date(colnames(Mat)), "%Y"), .is.leapyear) & .not.last(Mat) )) > 1) {
Mat[,(format(as.Date(colnames(Mat)), "%m") %in% month28 & sapply(format(as.Date(colnames(Mat)), "%Y"), .is.leapyear) & .not.last(Mat))] <- apply(Mat[,(format(as.Date(colnames(Mat)), "%m") %in% month28 & sapply(format(as.Date(colnames(Mat)), "%Y"), .is.leapyear) & .not.last(Mat))], 2, .transformX, 29)} else {
if (sum((format(as.Date(colnames(Mat)), "%m") %in% month28 & sapply(format(as.Date(colnames(Mat)), "%Y"), .is.leapyear) & .not.last(Mat))) == 1) {
Mat[,(format(as.Date(colnames(Mat)), "%m") %in% month28 & sapply(format(as.Date(colnames(Mat)), "%Y"), .is.leapyear) & .not.last(Mat))] <- .transformX(Mat[,(format(as.Date(colnames(Mat)), "%m") %in% month28 & sapply(format(as.Date(colnames(Mat)), "%Y"), .is.leapyear) & .not.last(Mat))], 29)
}}
raw <- stats::na.omit(as.vector(Mat))
out <- xts::xts(raw, seq.Date(as.Date(paste0(start[1], "-", start[2], "-01")), by="days", length.out=length(raw)))
return(out)
} |
setGeneric("piecewiseLinearApproximation",
function(object, ...) standardGeneric("piecewiseLinearApproximation"));
setMethod(
f="piecewiseLinearApproximation",
signature(object="FuzzyNumber"),
definition=function(
object, method=c("NearestEuclidean", "SupportCorePreserving", "Naive"),
knot.n=1, knot.alpha=seq(0, 1, length.out=knot.n+2)[-c(1,knot.n+2)],
...,
verbose=FALSE
)
{
method <- match.arg(method)
if (missing(knot.n) && !missing(knot.alpha))
knot.n <- length(knot.alpha)
stopifnot(is.numeric(knot.n), length(knot.n) == 1, knot.n >= 0)
stopifnot(is.numeric(knot.alpha), length(knot.alpha) == knot.n)
stopifnot(is.finite(knot.alpha), knot.alpha >= 0, knot.alpha <= 1)
if (knot.n == 0)
stop('please use the trapezoidalApproximation() function')
if (is.na(object@lower(0)) || is.na(object@upper(0)))
stop("cannot approximate fuzzy numbers with no alpha bound generators")
if (method == "Naive")
return(piecewiseLinearApproximation_Naive(object, knot.n, knot.alpha))
else if (method == "SupportCorePreserving")
{
return(piecewiseLinearApproximation_ApproximateSupportCorePreserving(object, knot.n, knot.alpha, ...))
}
else if (method == "NearestEuclidean")
{
if (knot.n > 200)
warning('`knot.n` is large - consider using method="Naive".')
return(piecewiseLinearApproximation_ApproximateNearestEuclideanN(object, knot.n, knot.alpha, verbose, ...))
}
}
)
piecewiseLinearApproximation_ApproximateSupportCorePreserving <- function(A, knot.n, knot.alpha, ...)
{
if (knot.n != 1) stop("this approximation method currently may only be used only for knot.n == 1")
xsol <- function(A, alpha, ...) {
f1 <- function(beta) 3*beta*(A@a1+(A@a2-A@a1)*A@lower(beta)-A@a1*(alpha-beta)/alpha)/alpha
f2 <- function(beta) 3*(1-beta)*(A@a1+(A@a2-A@a1)*A@lower(beta)-A@a2*(beta-alpha)/(1-alpha))/(1-alpha)
if (alpha <= 0) xm <- A@a1 else if (alpha >= 1) xm <- A@a2 else
xm <- integrate(f1, 0, alpha, ...)$val +
integrate(f2, alpha, 1, ...)$val
if (xm < A@a1) A@a1
else if (xm > A@a2) A@a2
else xm
}
ysol <- function(A, alpha, ...) {
f1 <- function(b) 3*b*(A@a3+(A@a4-A@a3)*A@upper(b)-A@a4*(alpha-b)/alpha)/alpha
f2 <- function(b) 3*(1-b)*(A@a3+(A@a4-A@a3)*A@upper(b)-A@a3*(b-alpha)/(1-alpha))/(1-alpha)
if (alpha <= 0) ym <- A@a4 else if (alpha >= 1) ym <- A@a3 else
ym <- integrate(f1, 0, alpha, ...)$val +
integrate(f2, alpha, 1, ...)$val
if (ym < A@a3) A@a3
else if (ym > A@a4) A@a4
else ym
}
PiecewiseLinearFuzzyNumber(A@a1, A@a2, A@a3, A@a4, knot.alpha=knot.alpha,
knot.left=xsol(A, knot.alpha, ...), knot.right=ysol(A, knot.alpha, ...))
}
piecewiseLinearApproximation_Naive <- function(object, knot.n, knot.alpha)
{
a <- alphacut(object, knot.alpha)
if (knot.n > 1)
{
knot.left <- a[,1]
knot.right <- rev(a[,2])
} else
{
knot.left <- a[1]
knot.right <- a[2]
}
PiecewiseLinearFuzzyNumber(object@a1, object@a2, object@a3, object@a4,
knot.n=knot.n, knot.alpha=knot.alpha, knot.left=knot.left, knot.right=knot.right)
}
piecewiseLinearApproximation_ApproximateNearestEuclideanN <- function(object, knot.n, knot.alpha, verbose, ...)
{
alpha <- c(0, knot.alpha, 1)
stopifnot(!anyDuplicated(alpha), !is.unsorted(alpha))
Phi <- matrix(NA_real_, nrow=2*knot.n+4, ncol=2*knot.n+4)
Phi[1,1] <- 2
for (j in 2:(knot.n+2)) {
Phi[j,1] <- Phi[1,j] <- 2-(alpha[j]+alpha[j-1])/2
for (i in 2:j) {
if (i == j) {
Phi[i,j] <- 2-(2*alpha[j]+alpha[j-1])/3
} else {
Phi[i,j] <- Phi[j,i] <- Phi[i-1,j]
}
}
}
Phi[1:(knot.n+3),knot.n+3] <- Phi[knot.n+3,1:(knot.n+3)] <- 1
for (j in (knot.n+4):(2*knot.n+4)) {
Phi[j,1] <- Phi[1,j] <- (alpha[2*knot.n+5-j]+alpha[2*knot.n+6-j])/2
for (i in 2:j) {
if (i == j) {
Phi[i,j] <- (2*alpha[2*knot.n+5-j]+alpha[2*knot.n+6-j])/3
} else {
Phi[i,j] <- Phi[j,i] <- Phi[j,i-1]
}
}
}
w <- numeric(2*knot.n+3)
for (i in 1:(2*knot.n+3)) {
if (i < knot.n+2) {
w[i] <- tryCatch(
integrateAlpha(object, "lower", alpha[i], alpha[i+1], ...),
error=function(e) {
aph <- seq(alpha[i], alpha[i+1], length.out=5)
(alpha[i+1]-alpha[i])*
sum((object@a1+(object@a2-object@a1)*object@lower(aph))*c(7,32,12,32,7)/90)
})
}
else if (i == knot.n+2)
w[i] <- 0
else {
w[i] <- tryCatch(
integrateAlpha(object, "upper", alpha[2*knot.n-i+4], alpha[2*knot.n-i+5], ...),
error=function(e) {
aph <- seq(alpha[2*knot.n-i+4], alpha[2*knot.n-i+5], length.out=5)
(alpha[2*knot.n-i+5]-alpha[2*knot.n-i+4])*
sum((object@a3+(object@a4-object@a3)*object@upper(aph))*c(7,32,12,32,7)/90)
})
}
}
wp <- numeric(2*knot.n+4)
wp[1] <- 0
for (i in 1:(2*knot.n+3)) {
if (i < knot.n+2) {
wp[i+1] <- tryCatch(
integrateAlpha(object, "lower", alpha[i], alpha[i+1], weight=identity, ...),
error=function(e) {
aph <- seq(alpha[i], alpha[i+1], length.out=5)
(alpha[i+1]-alpha[i])*
sum((object@a1+(object@a2-object@a1)*object@lower(aph))*aph*c(7,32,12,32,7)/90)
})
wp[i+1] <- (wp[i+1]-alpha[i]*w[i])/(alpha[i+1]-alpha[i])
}
else if (i == knot.n+2)
wp[i+1] <- 0
else {
wp[i+1] <- tryCatch(
integrateAlpha(object, "upper", alpha[2*knot.n-i+4], alpha[2*knot.n-i+5], weight=identity, ...),
error=function(e) {
aph <- seq(alpha[2*knot.n-i+4], alpha[2*knot.n-i+5], length.out=5)
(alpha[2*knot.n-i+5]-alpha[2*knot.n-i+4])*
sum((object@a3+(object@a4-object@a3)*object@upper(aph))*aph*c(7,32,12,32,7)/90)
})
wp[i+1] <- (alpha[2*knot.n-i+5]*w[i]-wp[i+1])/(alpha[2*knot.n-i+5]-alpha[2*knot.n-i+4])
}
}
b <- numeric(2*knot.n+4)
b[1] <- sum(w)
for (i in 2:(2*knot.n+4))
b[i] <- b[i-1] - wp[i-1] - w[i-1] + wp[i]
if (verbose) {
cat(sprintf("b=(%s)\n", paste(sprintf("%8g", b), collapse=", ")))
}
EPS <- .Machine$double.eps^0.5;
EPS_RELATIVE <- EPS*(object@a4-object@a1)
d <- solve(Phi, b)
iter <- 1L
K <- rep(FALSE, length(b))
repeat {
d <- solve(Phi, b)
m <- which.min(d[-1])+1
if (verbose)
{
cat(sprintf("Pass %g: K={%5s}, x=(%s)\n",
iter, paste(as.numeric(which(K)),collapse=""),
paste(sprintf("%8g", d), collapse=", ")))
}
if (d[m] >= -EPS_RELATIVE) break
if (K[m]) stop("NOT CONVERGED??? THIS IS A BUG! -> CONTACT THE PACKAGE'S AUTHOR, PLEASE")
Phi[,m] <- 0
Phi[m,m] <- -1
K[m] <- TRUE
iter <- iter+1L
}
d[K] <- 0
d[d<0 & c(FALSE, rep(TRUE, length(d)-1))] <- 0
res <- cumsum(d)
return(PiecewiseLinearFuzzyNumber(res[1], res[knot.n+2], res[knot.n+3], res[2*knot.n+4],
knot.n=knot.n,
knot.alpha=knot.alpha,
knot.left=res[2:(knot.n+1)],
knot.right=res[(knot.n+4):(2*knot.n+3)]))
} |
pcaplot3d <- function (x,...) UseMethod("pcaplot3d")
pcaplot3d.symproc <- function(x,pcshow=c(1,2,3),mag=3,color=4,lwd=1,sym=TRUE,legend=TRUE,type=c("spheres","points"),...)
{
refshape <- x$mshape
if (sym) {
PCs <- x$PCsym
Scores <- x$PCscore_sym
} else {
PCs <- x$PCasym
Scores <- x$PCscore_asym
}
.pcaplot3d(refshape, PCs, Scores, pcshow=pcshow, mag=mag,color=color, lwd=lwd,legend=legend,type=type)
}
pcaplot3d.nosymproc <- function(x,pcshow=c(1,2,3),mag=3,color=4,lwd=1,legend=TRUE,type=c("spheres","points"),...)
{
refshape <- x$mshape
PCs <- x$PCs
Scores <- x$PCscores
.pcaplot3d(refshape, PCs, Scores, pcshow=pcshow, mag=mag,color=color, lwd=lwd, legend=legend, type=type)
}
.pcaplot3d <- function(refshape,PCs, Scores, pcshow=c(1,2,3), mag=3,color=4,lwd=1,legend=TRUE,type=c("spheres","points"),...) {
A <- refshape
k <- dim(A)[1]
m <- dim(A)[2]
if (is.vector(PCs))
PCs <- matrix(PCs,length(PCs),1)
type <- match.arg(type,c("spheres","points"))
npc <- dim(PCs)[2]
lpc <- length(pcshow)
rainb <- rainbow(lpc)
sds <- 0
if (length(mag)==1)
mag <- c(rep(mag,lpc))
for (i in 1:npc)
sds[i] <- sd(Scores[,i])
sz <- cSize(refshape)/sqrt(k)*(1/80)
outarr <- array(NA, dim=c(dim(refshape),length(pcshow)))
for (i in 1:length(pcshow)) {
pc <- refshape+matrix(PCs[,pcshow[i]]*mag[i]*sds[pcshow[i]],k,3)
outarr[,,i] <- pc
linemesh <- list()
linemesh$vb <- t(cbind(rbind(refshape,pc),1))
linemesh$it <- t(cbind(1:k,1:k,(1:k)+k))
class(linemesh) <- "mesh3d"
wire3d(linemesh,lwd=lwd,lit=F,col=rainb[i])
}
if (legend) {
plot(0,0, xlab="", ylab="", axes =F, cex=0,xlim=c(-1,1), ylim=c(-1,1))
legend(-1,1, pch=20, cex=2, col=rainb, legend=paste0("PC",pcshow))
}
if (type == "spheres")
spheres3d(refshape, col = color,radius=sz)
else
points3d(refshape,col=color)
invisible(outarr)
} |
tar_path <- function(
name = NULL,
default = NA_character_,
create_dir = FALSE,
store = targets::tar_config_get("store")
) {
name <- tar_deparse_language(substitute(name))
tar_assert_chr(name %|||% character(0))
tar_assert_chr(default)
tar_assert_lgl(create_dir)
tar_assert_scalar(create_dir)
out <- if_any(
is.null(name),
tar_path_running(default, path_store = store),
path_objects(path_store = store, name = name)
)
if (create_dir) {
dir_create(dirname(out))
}
out
}
tar_path_running <- function(default, path_store) {
if_any(
tar_runtime$exists_target(),
store_tar_path(
tar_runtime$get_target()$store,
tar_runtime$get_target(),
tar_runtime$get_store()
),
as.character(default)
)
} |
test_that("Mock square root function", {
mock_response_success <- mock_response(
input = as_json(list(x = 4)),
expected_response_body = 2,
config = basic_lambda_config(handler = "sqrt")
)
expect_true(mock_response_success)
})
test_that("Mock custom function", {
mock_response_success <- mock_response(
input = as_json(list(number = 5)),
expected_response_body = as_json(list(parity = "odd")),
config = basic_lambda_config(handler = "parity")
)
expect_true(mock_response_success)
})
test_that("Custom deserialisers are used in event handling", {
custom_deserialiser <- function(event_content) {
list(number = 0)
}
config <- basic_lambda_config(
handler = "parity",
deserialiser = custom_deserialiser
)
mock_response_success <- mock_response(
input = as_json(list(number = 5)),
expected_response_body = as_json(list(parity = "even")),
config = config
)
expect_true(mock_response_success)
})
test_that("Custom serialisers are used in event handling", {
custom_serialiser <- function(result) {
"my heart is a fish"
}
config <- basic_lambda_config(
handler = "parity",
serialiser = custom_serialiser
)
mock_response_success <- mock_response(
input = as_json(list(number = 5)),
expected_response_body = "my heart is a fish",
config = config
)
expect_true(mock_response_success)
})
test_that("Mock function with no inputs", {
mock_response_success <- mock_response(
input = as_json(list()),
expected_response_body = as_json(
list(animal = "dog", breed = "corgi")
),
config = basic_lambda_config(handler = "no_arguments")
)
expect_true(mock_response_success)
})
test_that("handlers that accept a context argument receive it", {
assert_context_exists <- function(context) {
if (missing(context)) {
stop("context not received")
}
list(animal = "dog", breed = "corgi")
}
mock_response_success <- mock_response(
input = as_json(list()),
expected_response_body = as_json(
list(animal = "dog", breed = "corgi")
),
config = basic_lambda_config(handler = "assert_context_exists")
)
expect_true(mock_response_success)
})
test_that("errors are sent to invocation error endpoint", {
mock_invocation_error_success <- mock_invocation_error(
input = as_json(list(x = 3)),
expected_error_body = as_json(
list(
errorMessage = "unused argument (x = 3)",
errorType = "simpleError",
stackTrace = list()
)
),
config = basic_lambda_config(handler = "no_arguments")
)
expect_true(mock_invocation_error_success)
}) |
tril <- function(M){
nr = nrow(M)
nc = ncol(M)
if(nr!=nc) stop("non-square matrix")
for(i in 1:(nr-1)) M[i,(i+1):nr] = 0
N = M
} |
context("Quoting")
test_that("quoting captures current environment", {
x <- .(a, b, c)
expect_that(attr(x, "env"), is_identical_to(environment()))
x <- as.quoted(c("a", "b", "c"))
expect_that(attr(x, "env"), is_identical_to(environment()))
})
test_that("can't pass bogus environment for evaluation", {
expect_that(eval.quoted(.(x), envir = -1), throws_error("must be"))
})
test_that("evaluation takes place in correct environment", {
a <- 2
x <- local({
a <- 1
.(a)
})
expect_that(eval.quoted(x)$a, equals(1))
df <- data.frame(x = 1:10)
x <- local({
a <- 1
.(x * a)
})
expect_that(eval.quoted(x, df)[[1]], equals(1:10))
})
test_that("failsafe evaluation", {
b <- 2
x <- local({
a <- 1
.(a)
})
expect_that(eval.quoted(x, try = TRUE)$b, equals(NULL))
})
test_that("names work for long expressions", {
q <- .(foo = barjasdfgjadhfgjsdhfgusdhfgusheguisdhguioahsrofasdgsdfgsdfg +
dfgafgasdfgsdfgsdfgsdfgsdfgsdfgsdfg)
expect_that(names(q), equals("foo"))
})
test_that("printing works", {
expect_that(print(as.quoted(NULL)), testthat::prints_text("list()"))
expect_that(print(as.quoted("id")), testthat::prints_text("id"))
expect_that(print(as.quoted("3")), testthat::prints_text("3"))
expect_that(print(as.quoted(c("a", "b"))), testthat::prints_text("List of 2"))
expect_that(print(as.quoted(~a+b)), testthat::prints_text("List of 2"))
expect_that(print(as.quoted(~a)), testthat::prints_text("List of 1"))
expect_that(print(as.quoted(as.name("a"))), testthat::prints_text("List of 1"))
})
test_that("concatenation", {
expect_equal(c(.(a), .(b)), .(a, b))
expect_equal(c(.(a), .(b, c)), .(a, b, c))
expect_equal(c(.(a), .(b), .(c)), .(a, b, c))
})
test_that("extraction", {
expect_equal(.(a, b)[1], .(a))
expect_equal(.(a, b, c)[-1], .(b, c))
}) |
model_parameters.censReg <- model_parameters.default
standard_error.censReg <- standard_error.default
p_value.censReg <- p_value.default |
c.timeDate <-
function(..., recursive = FALSE)
{
z = list(...)
data <- unlist(lapply(z, function(td) c(as.numeric(td, unit = "secs"))))
timeDate(data, zone = "GMT", FinCenter = z[[1]]@FinCenter)
} |
mcalibrateControlLimit <- function(targetARL = NULL, targetMRL = NULL,
n, m, nv, theta = NULL, Ftheta = NULL,
dists=c("Normal", "Normal"), mu=c(0,0), sigma=NULL, dists.par = matrix(c(0,1,1,0,1,1), ncol=2), correlation=0,
chart="T2", chart.par=c(10), replicates = 50000,
isParallel = FALSE, maxIter = 20, progress = TRUE,
alignment="unadjusted", constant=NULL, absolute=FALSE) {
if (is.null(targetARL) && is.null(targetMRL)) {
stop("Target ARL or target mRL missing")
return()
} else if (!is.null(targetARL) && !is.null(targetMRL)) {
stop("Two targets defined, delete one")
return()
}
p <- 0.1
if (is.null(targetARL)) {
ARL0 <- (targetMRL * 1.5) / 10
} else {
ARL0 <- targetARL
}
switch(chart,
T2 = {
name.par <- "h"
index.par <- 1
}
)
x <- rep(NA, maxIter)
y <- x
i <- 1
x[i] <- chart.par[index.par]
while (i < maxIter) {
chart.par[index.par] <- x[i]
result <- SNSchart::mgetARL(n = n, m = m, nv = nv,
theta = theta, Ftheta = Ftheta,
dists = dists, mu = mu, sigma = sigma, dists.par = dists.par,
correlation=correlation, chart = chart, chart.par = chart.par,
replicates = replicates, isParallel = isParallel, calibrate = TRUE, arl0 = targetARL,
alignment=alignment, constant=constant,absolute=absolute)
target <- NULL
if (!is.null(targetARL)) {
y[i] <- result$ARL
target <- targetARL
name <- "ARL"
} else {
y[i] <- result$MRL
target <- targetMRL
name <- "MRL"
}
print(target)
if (abs(y[i] - target) <= 0.05 * target) {
if (progress) message("Convergence found with", name.par, "=", x[i], "--", name, "=", y[i], "\n", sep = " ")
output <- list(
objective.function = y[i],
par.value = x[i],
found = TRUE
)
return(output)
} else {
f1 <- 0
f2 <- 0
if (i > 2) {
f1 <- x[i] - target
f2 <- x[i - 1] - target
}
if (f1 * f2 < 0) {
x0 <- x[i - 1]
x1 <- x[i]
y0 <- y[i - 1]
y1 <- y[i]
m <- (y1 - y0) / (x1 - x0)
b <- y0 - m * x0
x2 <- (target - b) / m
x[i + 1] <- x2
} else {
if (y[i] <= target) {
x[i + 1] <- x[i] * (1 + p)
} else {
x[i + 1] <- x[i] * (1 - p)
}
if (progress) message("obtained=", y[i], " target=", target, " Change h=", x[i], " to h=", x[i + 1], "\n", sep = "")
}
}
i <- i + 1
}
posMin <- which.min(abs(target - y))
if (progress) message("Best ", name.par, " found ", x[posMin], "--", name, "=", y[posMin], "\n", sep = " ")
output <- list(
objective.function = y[posMin],
par.value = x[posMin],
found = FALSE
)
return(output)
} |
break.string <- function(string,sep=" ")
{
sep.len <- nchar(sep)
sep.loc <- NULL
for (i in 1:nchar(string))
if (substring(string,i,(i+sep.len-1))==sep) sep.loc <- c(sep.loc,i)
numwords <- length(sep.loc) + 1
out <- rep(0,numwords)
if (numwords==1)
out[1] <- string
else
{
out[1] <- substring(string,1,(sep.loc[1]-1))
if (numwords > 2)
for (i in 2:(numwords-1))
out[i] <- substring(string,(sep.loc[i-1]+sep.len),(sep.loc[i]-1))
out[numwords] <- substring(string,(sep.loc[numwords-1]+sep.len ),
nchar(string))
}
return(out)
} |
context("Data Have Descriptions")
getdescript <- function(package){
db <- tools::Rd_db(package)
descript <- lapply(db,function(x) {
tags <- tools:::RdTags(x)
if("\\description" %in% tags){
out <- paste(unlist(x[which(tags=="\\description")]),collapse="")
}
else
out <- NULL
invisible(out)
})
gsub("\n","",unlist(descript))
}
descripts <- getdescript("stevedata")
expect_equal(length(data(package = "stevedata")$results[, "Item"]), length(descripts)) |
myplotcompare.trtsel <-
function(x, bootstraps = 500, alpha = .05,
ci = "horizontal", marker.names = c("Marker 1", "Marker 2"),
fixeddeltas.y1 = NULL, fixeddeltas.y2,
xlab = NULL,
ylab = NULL,
xlim = NULL,
ylim = NULL,
main = NULL, offset = offset, conf.bands)
{
ts1 <- x$x
ts2 <- x$x2
if(substr(ci, 1, 4) =="hori") {
fix.ind = 2
out.ind = 1
}else if(substr(ci, 1, 4) =="vert"){
fix.ind = 1
out.ind = 2
}
fittedrisk.t0.y1 <- ts1$derived.data$fittedrisk.t0
fittedrisk.t1.y1 <- ts1$derived.data$fittedrisk.t1
delta.y1 <- ts1$derived.data$trt.effect
link <- ts1$model.fit$family
fittedrisk.t0.y2 <- ts2$derived.data$fittedrisk.t0
fittedrisk.t1.y2 <- ts2$derived.data$fittedrisk.t1
delta.y2 <- ts2$derived.data$trt.effect
rho <- ts1$model.fit$cohort.attributes
study.design <- ts1$model.fit$study.design
trt <- ts1$derived.data[[ts1$treatment.name]]
if( link$family == "time-to-event"){
event.name1 = ts1$formula[[2]]
event.name2 = ts2$formula[[2]]
}else{
event.name1 = as.character(ts1$formula[[2]])
event.name2 = as.character(ts2$formula[[2]])
}
boot.sample <- ts1$functions$boot.sample
get.F <- ts1$functions$get.F
if( length(fixeddeltas.y1) > 0 & bootstraps >0 ) {
if(link$family == "risks_provided"){
provided_risk <- cbind(fittedrisk.t0.y1,
fittedrisk.t1.y1,
fittedrisk.t0.y2,
fittedrisk.t1.y2)
}else{
provided_risk = NULL
}
boot.dat <- replicate( bootstraps, one.boot.plot.compare(data1 = ts1$derived.data, data2 = ts2$derived.data,
formulas = list(ts1$formula, ts2$formula),
event.names = c(event.name1, event.name2),
treatment.names = c(ts1$treatment.name, ts2$treatment.name),
ci = ci,
fixeddeltas.y1 = fixeddeltas.y1, fixeddeltas.y2 = fixeddeltas.y2,
rho = rho, study.design = study.design, obp.boot.sample = boot.sample, obp.get.F = get.F, fix.ind, out.ind, link = link,
provided_risk = provided_risk,
prediction.times = c(x$x$prediction.time, x$x2$prediction.time)))
if(length(fixeddeltas.y1)==1){
bounds.delta.y1<- quantile(boot.dat[1,,], probs = c(alpha/2, 1-alpha/2), na.rm = TRUE)
bounds.delta.y2<- quantile(boot.dat[2,,], probs = c(alpha/2, 1-alpha/2), na.rm = TRUE)
bounds.delta.y1 = t(t(bounds.delta.y1))
bounds.delta.y2 = t(t(bounds.delta.y2))
}else{
bounds.delta.y1<- apply(boot.dat[1,,], 1, function(x, ...){quantile(unlist(x), ...)}, probs = c(alpha/2, 1-alpha/2), na.rm = TRUE)
bounds.delta.y2<- apply(boot.dat[2,,], 1, function(x, ...){quantile(unlist(x), ...)}, probs = c(alpha/2, 1-alpha/2), na.rm = TRUE)
}
}else{
bounds.delta.y1 <- NULL
bounds.delta.y2 <- NULL
}
if(is.null(ylim)){
if(substr(ci, 1,1) %in% c("v", "V")){
min.delta <- min(c(delta.y1, delta.y2, bounds.delta.y1, bounds.delta.y2), na.rm=TRUE)
max.delta <- max(c(delta.y1, delta.y2, bounds.delta.y1, bounds.delta.y2), na.rm=TRUE)
cen <- mean(c(min.delta, max.delta), na.rm=TRUE)
}else{
min.delta <- min(c(delta.y1, delta.y2), na.rm=TRUE)
max.delta <- max(c(delta.y1, delta.y2), na.rm=TRUE)
cen <- mean(c(min.delta, max.delta), na.rm=TRUE)
}
ran <- max.delta - min.delta
ran <- ran*1.1
ylim <- c(cen-ran/2, cen+ran/2)
}
ts1.curves <- trteffectPLOTcompare_gg(x1=ts1, x2 = ts2, ci = ci, ci.bounds = rbind(bounds.delta.y1, bounds.delta.y2), get.F = get.F, fixed.values = rbind(fixeddeltas.y1, fixeddeltas.y2+offset), conf.bands = conf.bands, rho=rho, xlab=xlab, ylab = ylab, xlim=xlim, ylim = ylim, main = main)
p <- ts1.curves[[1]]
p <- p + scale_linetype_manual(name = "", breaks = c("1","2", "3", "4"), values = c(1, 2, 3, 4), labels = c(marker.names, "Mean", "Zero"))
p <- p + scale_size_manual(name = "", breaks = c("1", "2", "3", "4"), values = c(1, 1, .5, .5), labels = c(marker.names, "Mean", "Zero"))
print(p)
if(bootstraps >0 & length(fixeddeltas.y1) > 0 ){
conf.ints.y1 <- as.data.frame(cbind(fixeddeltas.y1, t(bounds.delta.y1)))
names(conf.ints.y1) <- c("fixed", "lower", "upper")
conf.ints.y2 <- as.data.frame(cbind(fixeddeltas.y2, t(bounds.delta.y2)))
names(conf.ints.y2) <- c("fixed", "lower", "upper")
result <- list("plot" = p,
"x" = list( "conf.intervals" = conf.ints.y1),
"x2" = list( "conf.intervals" = conf.ints.y2))
}else{
result <- list("plot" = p)
}
invisible(result)
}
myplotcompare.trtsel_disc <-
function(x, bootstraps = 500, alpha = .05,
ci = "horizontal", marker.names = c("Marker 1", "Marker 2"),
xlab = NULL,
ylab = NULL,
xlim = NULL,
ylim = NULL,
main = NULL, offset = offset, conf.bands, annotate.plot)
{
quantile <- NULL
ts1 <- x$x
ts2 <- x$x2
fittedrisk.t0.y1 <- ts1$derived.data$fittedrisk.t0
fittedrisk.t1.y1 <- ts1$derived.data$fittedrisk.t1
marker1 = ts1$derived.data[[ts1$model.fit$marker.names]]
delta.y1 <- ts1$derived.data$trt.effect
link <- ts1$model.fit$link
fittedrisk.t0.y2 <- ts2$derived.data$fittedrisk.t0
fittedrisk.t1.y2 <- ts2$derived.data$fittedrisk.t1
marker2 <- ts2$derived.data[[ts2$model.fit$marker.names]]
delta.y2 <- ts2$derived.data$trt.effect
rho <- ts1$model.fit$cohort.attributes
study.design <- ts1$model.fit$study.design
trt <- ts1$derived.data$trt
if( ts1$model.fit$family$family == "time-to-event"){
warning("plotting comparisons of two discrete markers with a time-to-event outcome is not implemented yet.")
return(NULL)
}else{
event <- ts1$derived.data[[as.character(ts1$formula[[2]])]]
boot.sample <- ts1$functions$boot.sample
one.boot.plot_disc <-
function(event, trt, marker1, marker2, w1, w2, rho = rho, obp.boot.sample){
myboot.sample <- obp.boot.sample( event, trt, rho)
rho.b <- myboot.sample[1:7]
ind <- myboot.sample[-c(1:7)]
event.b <- event[ind]
trt.b <- trt[ind]
marker1.b <- marker1[ind]
marker2.b <- marker2[ind]
mval1 <- sort(unique(marker1))
mval2 <- sort(unique(marker2))
c( trteff.mkr10 =mean(event.b[trt.b==0 & marker1.b ==mval1[1]]) - mean(event.b[trt.b==1 & marker1.b ==mval1[1]]),
trteff.mkr11 =mean(event.b[trt.b==0 & marker1.b ==mval1[2]]) - mean(event.b[trt.b==1 & marker1.b ==mval1[2]]),
trteff.mkr20 =mean(event.b[trt.b==0 & marker2.b ==mval2[1]]) - mean(event.b[trt.b==1 & marker2.b ==mval2[1]]),
trteff.mkr21 =mean(event.b[trt.b==0 & marker2.b ==mval2[2]]) - mean(event.b[trt.b==1 & marker2.b ==mval2[2]])
)
}
if(conf.bands){
boot.data <- replicate(bootstraps, one.boot.plot_disc( event, trt, marker1, marker2, rho,obp.boot.sample = boot.sample))
mval1 <- sort(unique(marker1))
mval2 <- sort(unique(marker2))
row.names(boot.data) = c(paste("trteffect.1mkr", mval1[1], sep = ""),
paste("trteffect.1mkr", mval1[2], sep = ""),
paste("trteffect.2mkr", mval2[1], sep = ""),
paste("trteffect.2mkr", mval2[2], sep = ""))
if(substr(ci, 1,1) =="h") { warning("Horizontal CI bands are not allowed for treatment effect plots with a discrete marker. Vertical bands will be computed"); ci <- "vertical";}
myconf.ints <- apply(boot.data, 1, quantile, probs = c(alpha/2, 1-alpha/2))
ci = "vertical"
}else{
myconf.ints = NULL
}
ts1.curves <- trteffectPLOTcompare_gg_disc(x1=ts1,
x2 = ts2,
ci.bounds = myconf.ints,
conf.bands = conf.bands,
offset = offset,
xlab=xlab, ylab = ylab,
xlim=xlim, ylim = ylim,
main = main,
marker.names = marker.names,
annotate.plot = annotate.plot)
p <- ts1.curves[[1]]
print(p)
result <- list("plot" = p,
"ci.bounds" = ts1.curves[[2]])
}
invisible(result)
} |
plothypos<-function(lat, lon, z, proj=list(), mag=NULL, cex=.4, pch =21, PMAT=NULL, alpha=NULL )
{
if(missing(PMAT)) { PMAT=NULL }
if(missing(cex)) { cex=.4 }
if(missing(alpha)) { alpha=NULL }
if(missing(mag)) { mag=NULL }
if(missing(pch)) { pch=21 }
if(missing(proj)) {
proj = setPROJ(type = 2, LAT0=median(lat),LON0=median(lon) )
}
kcol = c(0, 35, 70, 150, 300, 500, 800)
r = c(255, 255, 66, 58, 255, 174)
g = c(108, 163, 255, 75, 52, 0)
b = c(86, 47, 82, 255, 253, 3)
thecols= rgb(r/255, g/255, b/255)
if( !is.null(alpha) )
{
thecols= adjustcolor(thecols, alpha.f=alpha)
}
if(!is.null(mag))
{
cex = getmagsize(mag, minsize=1, slope=1, minmag=4, maxmag=9, style=1)
}
ncols = length(thecols)
h = which(z>kcol[length(kcol)])
if(length(h)>0)
{
pointsGEOmapXY(lat[h], lon[h], PROJ=proj, col='black' , bg=thecols[ncols] , pch=21, cex=cex, PMAT=PMAT)
}
for(i in (length(kcol)):2 )
{
h = which(z>kcol[i-1] & z<=kcol[i])
pointsGEOmapXY(lat[h], lon[h], PROJ=proj, col='black' , bg=thecols[i-1] , pch=21, cex=cex, PMAT=PMAT)
}
} |
MVA.plot <- function(x,type=c("scores","loadings","correlations","biplot","pairs",
"trajectories"),...) {
type <- match.arg(type)
x <- MVA.ident(x)
f <- switch(type,scores=MVA.scoreplot,loadings=MVA.loadplot,correlations=MVA.corplot,
biplot=MVA.biplot,pairs=MVA.pairplot,trajectories=MVA.trajplot)
f(x,...)
} |
print.pin <- function (x,...)
{
print(as.character(x),...)
cat("Personal identity number(s)")
}
`[.pin` <- create_s3_method("[")
rep.pin <- create_s3_method("rep")
`[<-.pin` <- function(x, ..., value){
value <- as.pin(value)
NextMethod()
}
c.pin <- function(..., recursive = FALSE){
args <- list(...)
if (!length(args)) return(as.pin(character()))
as.pin(unlist(lapply(args, as.character)))
} |
barnard.rubin <- function(m, b, t, dfcom = Inf) {
lambda <- (1 + 1 / m) * b / t
lambda[lambda < 1e-04] <- 1e-04
dfold <- (m - 1) / lambda^2
dfobs <- (dfcom + 1) / (dfcom + 3) * dfcom * (1 - lambda)
ifelse(is.infinite(dfcom), dfold, dfold * dfobs / (dfold + dfobs))
} |
CE.Normal.Init.Mean <-
function(data, init.locs, eps=0.01, rho=0.05, M=200, h=5, a=0.8, b=0.8, distyp = 1, penalty = "mBIC", var.init = 100000, parallel=FALSE){
if(is.data.frame(data) == "FALSE"| is.null(dim(data)[2])) {
print("Error in data : dataframe only")
} else if(dim(data)[2] != 1) {
print("Error in data : single column dataframe only")
} else if(missing(init.locs)){
print("Error: Initial locations are not provided!!!")
} else {
if(distyp == 1 & penalty == "mBIC"){
Melite <- M * rho
L <- length(data[, 1])
L0 <- 1
k <- length(init.locs)
if(parallel == TRUE & .Platform$OS.type == "windows"){
cl <- makeCluster(parallel::detectCores(), type="SOCK")
clusterExport(cl, c("ce.sim4beta.Init.mBIC", "betarand", "fun.alpha", "fun.beta", "mBIC", "betaIntEst"), envir=environment())
clusterExport(cl, c("data", "rho", "M", "h", "eps", "Melite", "L", "L0", "a", "init.locs", "var.init"), envir=environment())
registerDoParallel(cl)
sim <- foreach(k = k, .errorhandling = c('pass')) %dopar% ce.sim4beta.Init.mBIC(k, init.locs, data, h, L0, L, M, Melite, eps, a, var.init)
stopCluster(cl)
} else if (parallel == TRUE & .Platform$OS.type == "unix"){
registerDoParallel(parallel::detectCores())
sim <- foreach(k = k, .errorhandling = c('pass')) %dopar% ce.sim4beta.Init.mBIC(k, init.locs, data, h, L0, L, M, Melite, eps, a, var.init)
} else {
sim <- foreach(k = k, .errorhandling = c('pass')) %do% ce.sim4beta.Init.mBIC(k, init.locs, data, h, L0, L, M, Melite, eps, a, var.init)
}
loci.mBIC <- sim[[1]]$loci
logLL <- llhood.MeanNormal(loci.mBIC, data, v=var(data[ ,1]), h)
return(list("No.BPs" = length(loci.mBIC) - 2, "BP.Loc" = loci.mBIC[2:(length(loci.mBIC) - 1)], "mBIC value" = sim[[1]]$mBIC, "ll" = logLL))
} else if(distyp == 2 & penalty == "mBIC"){
Melite <- M*rho
L <- length(data[, 1])
L0 <- 1
k <- length(init.locs)
if(parallel == TRUE & .Platform$OS.type == "windows"){
cl <- makeCluster(parallel::detectCores(), type="SOCK")
clusterExport(cl, c("ce.simnormal.Init.mBIC", "normrand", "mBIC"), envir = environment())
clusterExport(cl, c("data", "rho", "M", "h", "eps", "Melite", "L", "L0", "a", "b", "init.locs", "var.init"), envir = environment())
registerDoParallel(cl)
sim <- foreach(k = k, .errorhandling = c('pass')) %dopar% ce.simnormal.Init.mBIC(k, init.locs, data, h, L0, L, M, Melite, eps, a, b, var.init)
stopCluster(cl)
} else if (parallel == TRUE & .Platform$OS.type == "unix"){
registerDoParallel(parallel::detectCores())
sim <- foreach(k = k, .errorhandling = c('pass')) %dopar% ce.simnormal.Init.mBIC(k, init.locs, data, h, L0, L, M, Melite, eps, a, b, var.init)
} else {
sim <- foreach(k = k, .errorhandling = c('pass')) %do% ce.simnormal.Init.mBIC(k, init.locs, data, h, L0, L, M, Melite, eps, a, b, var.init)
}
loci.mBIC <- sim[[1]]$loci
logLL <- llhood.MeanNormal(loci.mBIC, data, v=var(data[ ,1]), h)
return(list("No.BPs" = length(loci.mBIC) - 2, "BP.Loc" = loci.mBIC[2:(length(loci.mBIC) - 1)], "mBIC value" = sim[[1]]$mBIC, "ll" = logLL))
} else if(distyp == 1 & penalty == "BIC"){
Melite <- M * rho
L <- length(data[, 1])
L0 <- 1
k <- length(init.locs)
if(parallel == TRUE & .Platform$OS.type == "windows"){
cl <- makeCluster(parallel::detectCores(), type="SOCK")
clusterExport(cl, c("ce.sim4beta.Init.Mean.BIC", "betarand", "fun.alpha", "fun.beta", "llhood.MeanNormal", "loglik.MeanNormal", "BIC.MeanNormal", "betaIntEst"), envir=environment())
clusterExport(cl, c("data", "rho", "M", "h", "eps", "Melite", "L", "L0", "a", "init.locs", "var.init"), envir=environment())
registerDoParallel(cl)
sim <- foreach(k = k, .errorhandling = c('pass')) %dopar% ce.sim4beta.Init.Mean.BIC(k, init.locs, data, h, L0, L, M, Melite, eps, a, var.init)
stopCluster(cl)
} else if (parallel == TRUE & .Platform$OS.type == "unix"){
registerDoParallel(parallel::detectCores())
sim <- foreach(k = k, .errorhandling = c('pass')) %dopar% ce.sim4beta.Init.Mean.BIC(k, init.locs, data, h, L0, L, M, Melite, eps, a, var.init)
} else {
sim <- foreach(k = k, .errorhandling = c('pass')) %do% ce.sim4beta.Init.Mean.BIC(k, init.locs, data, h, L0, L, M, Melite, eps, a, var.init)
}
loci.BIC <- sim[[1]]$loci
return(list("No.BPs" = length(loci.BIC) - 2, "BP.Loc" = loci.BIC[2:(length(loci.BIC) - 1)], "BIC value" = sim[[1]]$BIC.Val, "ll" = sim[[1]]$LogLike))
} else if(distyp == 2 & penalty == "BIC"){
Melite <- M * rho
L <- length(data[, 1])
L0 <- 1
k <- length(init.locs)
if(parallel == TRUE & .Platform$OS.type == "windows"){
cl <- makeCluster(parallel::detectCores(), type="SOCK")
clusterExport(cl, c("ce.simnormal.Init.Mean.BIC", "normrand", "llhood.MeanNormal", "loglik.MeanNormal", "BIC.MeanNormal"), envir=environment())
clusterExport(cl, c("data", "rho", "M", "h", "eps", "Melite", "L", "L0", "a", "b", "init.locs", "var.init"), envir=environment())
registerDoParallel(cl)
sim <- foreach(k = k, .errorhandling = c('pass')) %dopar% ce.simnormal.Init.Mean.BIC(k, init.locs, data, h, L0, L, M, Melite, eps, a, b, var.init)
stopCluster(cl)
} else if (parallel == TRUE & .Platform$OS.type == "unix"){
registerDoParallel(parallel::detectCores())
sim <- foreach(k = k, .errorhandling = c('pass')) %dopar% ce.simnormal.Init.Mean.BIC(k, init.locs, data, h, L0, L, M, Melite, eps, a, b, var.init)
} else {
sim <- foreach(k = k, .errorhandling = c('pass')) %do% ce.simnormal.Init.Mean.BIC(k, init.locs, data, h, L0, L, M, Melite, eps, a, b, var.init)
}
loci.BIC <- sim[[1]]$loci
return(list("No.BPs" = length(loci.BIC) - 2, "BP.Loc" = loci.BIC[2:(length(loci.BIC) - 1)], "BIC value" = sim[[1]]$BIC.Val, "ll" = sim[[1]]$LogLike))
} else if(distyp == 1 & penalty == "AIC"){
Melite <- M * rho
L <- length(data[, 1])
L0 <- 1
k <- length(init.locs)
if(parallel == TRUE & .Platform$OS.type == "windows"){
cl <- makeCluster(parallel::detectCores(), type="SOCK")
clusterExport(cl, c("ce.sim4beta.Init.Mean.AIC", "betarand", "fun.alpha", "fun.beta", "llhood.MeanNormal", "loglik.MeanNormal", "AIC.MeanNormal", "betaIntEst"), envir=environment())
clusterExport(cl, c("data", "rho", "M", "h", "eps", "Melite", "L", "L0", "a", "init.locs", "var.init"), envir=environment())
registerDoParallel(cl)
sim <- foreach(k = k, .errorhandling = c('pass')) %dopar% ce.sim4beta.Init.Mean.AIC(k, init.locs, data, h, L0, L, M, Melite, eps, a, var.init)
stopCluster(cl)
} else if (parallel == TRUE & .Platform$OS.type == "unix"){
registerDoParallel(parallel::detectCores())
sim <- foreach(k = k, .errorhandling = c('pass')) %dopar% ce.sim4beta.Init.Mean.AIC(k, init.locs, data, h, L0, L, M, Melite, eps, a, var.init)
} else {
sim <- foreach(k = k, .errorhandling = c('pass')) %do% ce.sim4beta.Init.Mean.AIC(k, init.locs, data, h, L0, L, M, Melite, eps, a, var.init)
}
loci.AIC <- sim[[1]]$loci
return(list("No.BPs" = length(loci.AIC) - 2, "BP.Loc" = loci.AIC[2:(length(loci.AIC) - 1)], "AIC value" = sim[[1]]$AIC.Val, "ll" = sim[[1]]$LogLike))
} else if(distyp == 2 & penalty == "AIC"){
Melite <- M * rho
L <- length(data[, 1])
L0 <- 1
k <- length(init.locs)
if(parallel == TRUE & .Platform$OS.type == "windows"){
cl <- makeCluster(parallel::detectCores(), type="SOCK")
clusterExport(cl, c("ce.simnormal.Init.Mean.AIC", "normrand", "llhood.MeanNormal", "loglik.MeanNormal", "BIC.MeanNormal"), envir=environment())
clusterExport(cl, c("data", "rho", "M", "h", "eps", "Melite", "L", "L0", "a", "b", "init.locs", "var.init"), envir=environment())
registerDoParallel(cl)
sim <- foreach(k = k, .errorhandling = c('pass')) %dopar% ce.simnormal.Init.Mean.AIC(k, init.locs, data, h, L0, L, M, Melite, eps, a, b, var.init)
stopCluster(cl)
} else if (parallel == TRUE & .Platform$OS.type == "unix"){
registerDoParallel(parallel::detectCores())
sim <- foreach(k = k, .errorhandling = c('pass')) %dopar% ce.simnormal.Init.Mean.AIC(k, init.locs, data, h, L0, L, M, Melite, eps, a, b, var.init)
} else {
sim <- foreach(k = k, .errorhandling = c('pass')) %do% ce.simnormal.Init.Mean.AIC(k, init.locs, data, h, L0, L, M, Melite, eps, a, b, var.init)
}
loci.AIC <- sim[[1]]$loci
return(list("No.BPs" = length(loci.AIC) - 2, "BP.Loc" = loci.AIC[2:(length(loci.AIC) - 1)], "AIC value" = sim[[1]]$AIC.Val, "ll" = sim[[1]]$LogLike))
}
}
} |
gcf <- function(x, y, xmean = 1, ymean = 1, c = 0){
N <- length(x)
G <- convolve(x, y) / (N * xmean * ymean)
G <- G + c
return(G)
} |
hydrophobicity_plot<-function(pdb_df, window=3, weight=100, model="exponential"){
KD_values<-list("ALA"= 1.800,"ARG"= -4.500,"ASN"= -3.500,"ASP"= -3.500, "CYS"= 2.500, "GLN"= -3.500, "GLU"= -3.500,"GLY"= -0.400,"HIS"= -3.200,"ILE"= 4.500, "LEU"= 3.800,"LYS"= -3.900,"MET"= 1.900, "PHE"= 2.800, "PRO"= -1.600,"SER"= -0.800,"THR"= -0.700,"TRP"= -0.900,"TYR"= -1.300, "VAL"= 4.200)
if(((window<3) || (window>21))||(window%%2==0)) {stop("Please select a window size between values 3 and 21 where the value is odd")}
if((weight<0) || (weight>100)) {stop("Please select weight values as a percentage, e.g. 50 for 50%")}
if(!(model %in%c("exponential","linear"))) {stop("You can only select linear or exponential models, e.g., 'exponential' ")}
linear_fit<-function(value, size){
k=(100-value)/((size-1)/2)
b= value-k
values<-c()
for(i in 1:((size-1)/2)){
values<-c(values,k*i+b)
}
values<-c(values,100, rev(values))
return(values)
}
exp_fit<-function(value, size){
b=(100/value)^(2/(size-1))
a= value/b
values<-c()
for(i in 1:((size-1)/2)){
values<-c(values,a*b^i)
}
values<-c(values,100, rev(values))
return(values)
}
MINMAX_normalization_func<-function(array){
return ((array-min(array))/(max(array)-min(array)))
}
df<-pdb_df[,c("df_resno","df_res" , "Type")]
rownames(df)<-1:nrow(df)
df$"Type"<-as.factor(df$"Type")
if(model=="exponential"){values<-exp_fit(weight, window) }
if(model=="linear"){ values<-linear_fit(weight, window) }
values<-values/100
scores<-c()
weight_dist<-c()
half_window<-(window-1)/2
for(i in 1+half_window:(nrow(df)-half_window-1)){
scores_temp<-c()
score<-0
for(j in (i-half_window):(i+half_window)){
aa<-df[j,"df_res"]
scores_temp<-c(scores_temp,KD_values[[aa]])
}
for(n in 1:length(values)){
score<-score+scores_temp[n]*values[n] }
scores<-c(scores,score)
}
scores_temp<-c()
for(i in 1:half_window){
aa<-df[i,"df_res"]
scores_temp<-c(scores_temp,KD_values[[aa]])
}
scores<-c(scores_temp,scores)
scores_temp<-c()
for(i in (nrow(df)-half_window+1):nrow(df)){
aa<-df[i,"df_res"]
scores_temp<-c(scores_temp,KD_values[[aa]])
}
scores<-c(scores,scores_temp)
normalised_scores<-MINMAX_normalization_func(scores)
df$"Score"<-normalised_scores
df_resno_val<-df$"df_resno"
Score_val<-df$"Score"
Type_val<-df$"Type"
ggplot2::ggplot() +ggplot2::geom_line(data = df,ggplot2::aes(df_resno_val, Score_val, group = 1, color = Type_val))+ggplot2::scale_x_continuous(breaks=seq(min(df$"df_resno"),max(df$"df_resno"),25), limits=c(min(df$"df_resno"),max(df$"df_resno")))+ggplot2::ggtitle(label="Kyte-Doolittle hydrophobicity plot")+ ggplot2::xlab(label = "Residue number")+ggplot2::ylab(label = "Normalised score")
} |
skip_if_not_installed("xgboost")
test_that("autotest", {
learner = mlr3::lrn("regr.xgboost", nrounds = 5L)
expect_learner(learner)
result = run_autotest(learner)
expect_true(result, info = result$error)
})
test_that("hotstart", {
task = tsk("boston_housing")
task$select(task$feature_names[task$feature_names %nin% c("chas", "town")])
learner_1 = lrn("regr.xgboost", nrounds = 5L)
learner_1$train(task)
expect_equal(learner_1$state$param_vals$nrounds, 5L)
expect_equal(learner_1$model$niter, 5L)
hot = HotstartStack$new(learner_1)
learner_2 = lrn("regr.xgboost", nrounds = 10L)
learner_2$hotstart_stack = hot
expect_equal(hot$start_cost(learner_2, task$hash), 5L)
learner_2$train(task)
expect_equal(learner_2$model$niter, 10L)
expect_equal(learner_2$param_set$values$nrounds, 10L)
expect_equal(learner_2$state$param_vals$nrounds, 10L)
learner_3 = lrn("regr.xgboost", nrounds = 2L)
learner_3$hotstart_stack = hot
expect_equal(hot$start_cost(learner_3, task$hash), NA_real_)
learner_3$train(task)
expect_equal(learner_3$model$niter, 2L)
expect_equal(learner_3$param_set$values$nrounds, 2L)
expect_equal(learner_3$state$param_vals$nrounds, 2L)
learner_4 = lrn("regr.xgboost", nrounds = 5L)
learner_4$hotstart_stack = hot
expect_equal(hot$start_cost(learner_4, task$hash), -1L)
learner_4$train(task)
expect_equal(learner_4$model$niter, 5L)
expect_equal(learner_4$param_set$values$nrounds, 5L)
expect_equal(learner_4$state$param_vals$nrounds, 5L)
}) |
12 / sqrt(36)
sapply(S, sd) / sqrt(36)
sd( ~ boot.mean | sample, data = Boots) |
expected <- eval(parse(text="FALSE"));
test(id=0, code={
argv <- eval(parse(text="list(FALSE)"));
.Internal(isNamespaceEnv(argv[[1]]));
}, o=expected); |
context("Normal example")
wide.lm <- lm(mpg ~ am + wt + qsec + disp + hp, data=mtcars)
inds0 <- c(1, rep(0, length(coef(wide.lm))-1))
combs <- all_inds(wide.lm, inds0)
cmeans <- colMeans(model.frame(wide.lm)[,c("wt","qsec","disp","hp")])
X <- c(intercept=1, am=0, cmeans)
ficall <- fic(wide.lm, inds=combs, inds0=inds0, focus="mean_normal", X=X)
if (interactive()) ggplot_fic(ficall)
summary(ficall)
ficall[ficall$rmse <1,]
wide.lm <- lm(mpg ~ cyl + disp + hp + drat + wt + qsec + vs + am + gear + carb, data=mtcars)
mod1.lm <- lm(mpg ~ cyl + disp + hp + drat + wt, data=mtcars)
inds0 <- c(1,0,0,0,0, 0,0,0,0,0,0)
inds1 <- c(1,1,1,1,1, 0,0,0,0,0,0)
X <- model.matrix(wide.lm)[1:5,]
ficall <- fic(wide.lm, inds=inds1, inds0=inds0, focus="mean_normal", X=X)
1
par <- coef(wide.lm)
J <- solve(vcov(wide.lm))
ana <- fic_multi(par=par, J=J, inds=inds1, inds0=inds0, n=nrow(mtcars), focus="mean_normal", X=X, parsub=c(coef(mod1.lm), 0, 0, 0, 0, 0))
num <- fic_multi(par=par, J=J, inds=inds1, inds0=inds0, n=nrow(mtcars), focus="mean_normal", X=X, parsub=c(coef(mod1.lm), 0, 0, 0, 0, 0))
test_that("Results of higher level and lower level functions match", {
expect_equivalent(ana[,"FIC",1], ficall$FIC)
})
test_that("Analytic focus derivatives match numeric: normal", {
expect_equivalent(ana, num)
})
combs <- all_inds(wide.lm, inds0)
X <- model.matrix(wide.lm)[1,]
ficres <- fic(wide.lm, combs, focus=mean_normal, X=X)
if (interactive()) ggplot_fic(ficres)
focus_med <- function(par,X,sigma){
qnorm(0.5, mean = as.numeric(X %*% par), sd=sigma)
}
focus_quantile <- function(par,X,sigma,focus_p=0.5){
qnorm(focus_p, mean = as.numeric(X %*% par), sd=sigma)
}
test_that("focus functions with sigma and/or extra arguments",{
ficres_med <- fic(wide.lm, combs[1:4,], focus=focus_med, X=X)
expect_equal(ficres_med$FIC[1:4], ficres$FIC[1:4])
ficres_q <- fic(wide.lm, combs[1:4,], focus=focus_quantile, X=X, focus_p=0.5)
expect_equal(ficres_med$FIC[1:4], ficres_q$FIC[1:4])
})
test_that("focus function with multiple quantiles",{
ficres_qmulti <- fic(wide.lm, combs[1:4,], focus=focus_quantile, X=X, focus_p=c(0.1, 0.5, 0.9))
expect_equal(ficres$FIC[1:4], ficres_qmulti[ficres_qmulti$vals=="0.5","FIC"])
})
test_that("focus function argument length mismatch",{
Xmat <- matrix(X, nrow=1)[c(1,1),]
expect_error(fic(wide.lm, combs[1:4,], focus=focus_quantile, X=Xmat, focus_p=c(0.1, 0.5, 0.9)),
"Number of focuses")
}) |
`radSV` <-
function( del, phiS, lam, ichi, phi)
{
DEG2RAD = pi/180
lam = DEG2RAD*lam
del = DEG2RAD*del
phiS = DEG2RAD*phiS
ichi = DEG2RAD*ichi
phi = DEG2RAD*phi
phidif = phi - phiS
A1 = sin(lam)*cos(2*del)*cos(2*ichi)*sin(phidif)
A2 = cos(lam)*cos(del)*cos(2*ichi)*cos(phidif)
A3 = 0.5*cos(lam)*sin(del)*sin(2*ichi)*sin(2*phidif)
A4 = 0.5*sin(lam)*sin(2*del)*sin(2*ichi)*(1+(sin(phidif)*sin(phidif)))
FSV = A1 -A2 +A3 -A4
return(FSV)
} |
splitChr <- function(tex = tex,chr=chr,sex = FALSE, outdir = "."){
pos2 <- list()
for(eachchr in chr){
t <- unlist(lapply(lapply(tex, function(x){grep(strsplit(x,split = ":")[[1]][1],
pattern = paste("chr",eachchr,sep = ""))}),
function(x){ifelse(x == 0, FALSE, TRUE)}))
pos2[eachchr] <- length(t)
}
fil <- list()
for(eachchr in chr){
txtpath <- file.path(outdir,paste("chr",eachchr,".txt",sep = ""))
if(eachchr == 1){
fil[[1]] <- unlist(tex[1:pos2[[eachchr]]])
write(fil[[eachchr]],file = txtpath)
}
else if(eachchr == 10){
fil[[10]] <- unlist(tex[1:pos2[[eachchr]]])
write(fil[[eachchr]],file = txtpath)
}
else{
fil[[eachchr]] <- unlist(tex[(pos2[[eachchr-1]]+1):((pos2[[eachchr-1]]+1) + pos2[[eachchr]]-1)])
pos2[[eachchr]]<- pos2[[eachchr-1]]+1 + pos2[[eachchr]]-1
write(fil[[eachchr]],file = txtpath)
}
}
if(sex == TRUE){
txtpath <- file.path(outdir,paste("sexchrom",".txt",sep = ""))
tX <- unlist(lapply(lapply(tex, function(x){grep(strsplit(x,split = ":")[[1]][1],
pattern = "chr[XY]$",fixed = F)}),
function(x){ifelse(x == 0, FALSE, TRUE)}))
posx <- length(tX)
fil2 <- list()
Xnum <- tail(chr,1)+1
fil[[Xnum]] <- unlist(tail(tex,posx))
write(fil[[Xnum]],file = txtpath)
}
} |
NULL
expFileOutput <- function (file = tk_choose.files(default = "",
caption = "Select file(s) to open & hold down Ctrl to choose more than 1 file",
multi = TRUE, filters = matrix(c("Text file", ".exp", "Text file", ".EXP"),
4, 2, byrow = TRUE)), output = c("csv", "xlsx", "both"), overwrite = TRUE) {
overwrite <- overwrite
station <- NULL
assert_that(testFileExists(file), msg = "You did not choose a file. Please select a file again.")
assert_that((file.info(file)$size != 0), msg = "Your file is empty. Please try again with a different file.")
if (length(file) == 1) {
if (!nchar(file)) {
stop("You did not choose a file. Please try again with a different file.")
} else {
confirm <- gconfirm(toolkit = guiToolkit("tcltk"), msg =
paste0("Do you want to select", " ", file, "?"), title = "Confirm",
icon = "question")
if (confirm == FALSE) {
stop("Please try again with a different file.")
} else {
if (file.info(file)$size == 0) {
stop("Your file is empty. Please try again with a different file.")
} else {
rddatatmp <- read.table(file, sep = "\t", stringsAsFactors = FALSE,
header = FALSE, skip = 1, nrows = 20)
rownames(rddatatmp) <- as.character(rddatatmp[, 1])
rddatatmp <- as.data.frame(t(rddatatmp), stringsAsFactors = FALSE)
rddatatmp <- setDT(rddatatmp[-1, ])
change_class1 <- names(rddatatmp[, c(7:15, 18:20), with = FALSE])
for (col in change_class1) set(rddatatmp, j = col,
value = as.numeric(rddatatmp[[col]]))
change_class2 <- names(rddatatmp[, c(4:5, 16:17), with = FALSE])
for (col in change_class2) set(rddatatmp, j = col,
value = as.integer(rddatatmp[[col]]))
rddatatmp2 <- fread(file, nrows = 1, skip = 1, header = FALSE)
getstationid <- stri_trim_both(rddatatmp2$V2, pattern = "\\P{Wspace}")
rddatatmp3 <- fread(file, stringsAsFactors = FALSE, skip = 21,
colClasses = "character")
rddatatmp3[, station := getstationid]
setcolorder(rddatatmp3, c(ncol(rddatatmp3), 1:(ncol(rddatatmp3)-1L)))
change_class1 <- names(rddatatmp3[, 3:ncol(rddatatmp3), with = FALSE])
for (col in change_class1) set(rddatatmp3, j = col,
value = as.numeric(rddatatmp3[[col]]))
if (output == "csv") {
filesave1 <- tclvalue(tkgetSaveFile(title = paste0("Save goodness-of-fit and trend results", " ", stri_trans_toupper(basename(file_path_sans_ext(file))), " ", "file as"), filetypes = "{{Text file} .csv}"))
write.csv(rddatatmp, filesave1, row.names = FALSE)
filesave2 <- tclvalue(tkgetSaveFile(title = paste0("Save exceedance probability values", " ", stri_trans_toupper(basename(file_path_sans_ext(file))), " ", "file as"), filetypes = "{{Text file} .csv}"))
write.csv(rddatatmp3, filesave2, row.names = FALSE)
} else if (output == "xlsx") {
wb <- createWorkbook()
addWorksheet(wb, "Goodness-of-Fit_Trend Results")
writeData(wb, "Goodness-of-Fit_Trend Results", rddatatmp)
setColWidths(wb, sheet = 1, cols = 1:ncol(rddatatmp), widths = "auto")
addWorksheet(wb, "Exceedance Probability Values")
writeData(wb, "Exceedance Probability Values", rddatatmp3)
setColWidths(wb, sheet = 2, cols = 1:ncol(rddatatmp3), widths = "auto")
filesave3 <- tclvalue(tkgetSaveFile(title = paste0("Save", " ", stri_trans_toupper(basename(file_path_sans_ext(file))), " ", "file as"), filetypes = "{{MS Excel file} .xlsx}"))
saveWorkbook(wb, filesave3, overwrite = overwrite)
} else if (output == "both") {
filesave3 <- tclvalue(tkgetSaveFile(title = paste0("Save goodness-of-fit and trend results", " ", stri_trans_toupper(basename(file_path_sans_ext(file))), " ", "file as"), filetypes = "{{Text file} .csv}"))
write.csv(rddatatmp, filesave3, row.names = FALSE)
filesave4 <- tclvalue(tkgetSaveFile(title = paste0("Save exceedance probability values", " ", stri_trans_toupper(basename(file_path_sans_ext(file))), " ", "file as"), filetypes = "{{Text file} .csv}"))
write.csv(rddatatmp3, filesave4, row.names = FALSE)
wb <- createWorkbook()
addWorksheet(wb, "Goodness-of-Fit_Trend Results")
writeData(wb, "Goodness-of-Fit_Trend Results", rddatatmp)
setColWidths(wb, sheet = 1, cols = 1:ncol(rddatatmp), widths = "auto")
addWorksheet(wb, "Exceedance Probability Values")
writeData(wb, "Exceedance Probability Values", rddatatmp3)
setColWidths(wb, sheet = 2, cols = 1:ncol(rddatatmp3), widths = "auto")
filesave5 <- tclvalue(tkgetSaveFile(title = paste0("Save", " ", stri_trans_toupper(basename(file_path_sans_ext(file))), " ", "file as"), filetypes = "{{MS Excel file} .xlsx}"))
saveWorkbook(wb, filesave5, overwrite = overwrite)
}
}
}
}
} else {
for (i in 1:length(file)) {
assert_that(testFileExists(file[i]), msg = "You did not choose a file. Please select a file again.")
assert_that((file.info(file[i])$size != 0), msg = "Your file is empty. Please try again with a different file.")
if (!nchar(file[i])) {
stop("You did not choose a file. Please try again with a different file.")
} else {
confirm <- gconfirm(toolkit = guiToolkit("tcltk"), msg = paste0("Do you want to select", " ", file[i], "?"), title = "Confirm", icon = "question")
if (confirm == FALSE) {
stop("Please try again with a different file.")
} else {
if (file.info(file[i])$size == 0) {
stop("Your file is empty. Please try again with a different file.")
} else {
rddatatmp <- vector("list", length(file))
rddatatmp2 <- vector("list", length(file))
rddatatmp3 <- vector("list", length(file))
getstationid <- vector("list", length(file))
assign(file[i], read.table(file[i], sep = "\t", stringsAsFactors = FALSE, header = FALSE, skip = 1, nrows = 20))
rddatatmp[[i]] <- assign(file[i], get(file[i]))
rownames(rddatatmp[[i]]) <- as.character(rddatatmp[[i]][[1]])
rddatatmp[[i]] <- as.data.frame(t(rddatatmp[[i]]), stringsAsFactors = FALSE)
rddatatmp[[i]] <- setDT(rddatatmp[[i]])
rddatatmp[[i]] <- rddatatmp[[i]][-1, ]
dataas <- rbindlist(rddatatmp)
change_class1 <- names(dataas[, c(7:15, 18:20), with = FALSE])
for (col in change_class1) set(dataas, j = col, value = as.numeric(dataas[[col]]))
change_class2 <- names(dataas[, c(4:5, 16:17), with = FALSE])
for (col in change_class2) set(dataas, j = col, value = as.integer(dataas[[col]]))
assign(file[i], fread(file[i], nrows = 1, skip = 1, header = FALSE))
rddatatmp2[[i]] <- assign(file[i], get(file[i]))
getstationid[[i]] <- stri_trim_both(rddatatmp2[[i]][[2]], pattern = "\\P{Wspace}")
assign(file[i], fread(file[i], stringsAsFactors = FALSE, skip = 21, colClasses = "character"))
rddatatmp3[[i]] <- assign(file[i], get(file[i]))
rddatatmp3[[i]][, station := getstationid[[i]]]
setcolorder(rddatatmp3[[i]], c(ncol(rddatatmp3[[i]]), 1:(ncol(rddatatmp3[[i]])-1L)))
dataas2 <- rbindlist(rddatatmp3)
change_class1 <- names(dataas2[, 3:ncol(dataas2), with = FALSE])
for (col in change_class1) set(dataas2, j = col, value = as.numeric(dataas2[[col]]))
if (output == "csv") {
filesave1 <- tclvalue(tkgetSaveFile(title = paste0("Save goodness-of-fit and trend results", " ", stri_trans_toupper(basename(file_path_sans_ext(file[i]))), " ", "file as"), filetypes = "{{Text file} .csv}"))
write.csv(dataas, filesave1, row.names = FALSE)
filesave2 <- tclvalue(tkgetSaveFile(title = paste0("Save exceedance probability values", " ", stri_trans_toupper(basename(file_path_sans_ext(file[i]))), " ", "file as"), filetypes = "{{Text file} .csv}"))
write.csv(dataas2, filesave2, row.names = FALSE)
} else if (output == "xlsx") {
wb <- createWorkbook()
addWorksheet(wb, "Goodness-of-Fit_Trend Results")
writeData(wb, "Goodness-of-Fit_Trend Results", dataas)
setColWidths(wb, sheet = 1, cols = 1:ncol(dataas), widths = "auto")
addWorksheet(wb, "Exceedance Probability Values")
writeData(wb, "Exceedance Probability Values", dataas2)
setColWidths(wb, sheet = 2, cols = 1:ncol(dataas2), widths = "auto")
filesave3 <- tclvalue(tkgetSaveFile(title = paste0("Save", " ", stri_trans_toupper(basename(file_path_sans_ext(file[i]))), " ", "file as"), filetypes = "{{MS Excel file} .xlsx}"))
saveWorkbook(wb, filesave3, overwrite = overwrite)
} else if (output == "both") {
filesave3 <- tclvalue(tkgetSaveFile(title = paste0("Save goodness-of-fit and trend results", " ", stri_trans_toupper(basename(file_path_sans_ext(file[i]))), " ", "file as"), filetypes = "{{Text file} .csv}"))
write.csv(dataas, filesave3, row.names = FALSE)
filesave4 <- tclvalue(tkgetSaveFile(title = paste0("Save exceedance probability values", " ", stri_trans_toupper(basename(file_path_sans_ext(file[i]))), " ", "file as"), filetypes = "{{Text file} .csv}"))
write.csv(dataas2, filesave4, row.names = FALSE)
wb <- createWorkbook()
addWorksheet(wb, "Goodness-of-Fit_Trend Results")
writeData(wb, "Goodness-of-Fit_Trend Results", dataas)
setColWidths(wb, sheet = 1, cols = 1:ncol(dataas), widths = "auto")
addWorksheet(wb, "Exceedance Probability Values")
writeData(wb, "Exceedance Probability Values", dataas2)
setColWidths(wb, sheet = 2, cols = 1:ncol(dataas2), widths = "auto")
filesave5 <- tclvalue(tkgetSaveFile(title = paste0("Save", " ", stri_trans_toupper(basename(file_path_sans_ext(file[i]))), " ", "file as"), filetypes = "{{MS Excel file} .xlsx}"))
saveWorkbook(wb, filesave5, overwrite = overwrite)
}
}
}
}
}
}
}
expFileOutputBATCH <- function (path = tk_choose.dir(caption = "Select the directory with the .exp files"), output = c("csv", "xlsx", "both"), overwrite = TRUE) {
station <- NULL
overwrite <- overwrite
confirm <- gconfirm(toolkit = guiToolkit("tcltk"), msg = paste0("Do you want to select", " ", path, " as the directory with the .exp files?"), title = "Confirm", icon = "question")
if (confirm == FALSE) {
stop("Please try again with a different directory.")
} else {
file <- list.files(path, pattern = "exp|EXP$", full.names = TRUE)
file <- file[stri_detect_fixed(file, ".exp", case_insensitive = TRUE)]
for (i in 1:length(file)) {
assert_that(testFileExists(file[i]), msg = "You did not choose a file. Please select a file again.")
assert_that((file.info(file[i])$size != 0), msg = "Your file is empty. Please try again with a different file.")
if (!nchar(file[i])) {
stop("You did not choose a file. Please try again with a different file.")
} else {
if (file.info(file[i])$size == 0) {
stop("Your file is empty. Please try again with a different file.")
} else {
}
}
}
fun1 <- function(file) {
rddatatmp <- read.table(file, sep = "\t", stringsAsFactors = FALSE, header = FALSE, skip = 1, nrows = 20)
rownames(rddatatmp) <- as.character(rddatatmp[[1]])
rddatatmp <- as.data.frame(t(rddatatmp), stringsAsFactors = FALSE)
rddatatmp <- setDT(rddatatmp)
rddatatmp <- rddatatmp[-1, ]
}
dataas <- rbindlist(lapply(file, fun1))
fun2 <- function(file) {
rddatatmp2 <- fread(file, nrows = 1, skip = 1, header = FALSE)
getstationid <- stri_trim_both(rddatatmp2[[2]], pattern = "\\P{Wspace}")
rddatatmp3 <- fread(file, stringsAsFactors = FALSE, skip = 21, colClasses = "character")
rddatatmp3[, station := getstationid]
setcolorder(rddatatmp3, c(ncol(rddatatmp3), 1:(ncol(rddatatmp3)-1L)))
}
dataas2 <- rbindlist(lapply(file, fun2))
change_class1 <- names(dataas[, c(7:15, 18:20), with = FALSE])
for (col in change_class1) set(dataas, j = col, value = as.numeric(dataas[[col]]))
change_class2 <- names(dataas[, c(4:5, 16:17), with = FALSE])
for (col in change_class2) set(dataas, j = col, value = as.integer(dataas[[col]]))
change_class1 <- names(dataas2[, 3:ncol(dataas2), with = FALSE])
for (col in change_class1) set(dataas2, j = col, value = as.numeric(dataas2[[col]]))
if (output == "csv") {
filesave1 <- tclvalue(tkgetSaveFile(title = "Save goodness-of-fit and trend results batch file as", filetypes = "{{Text file} .csv}"))
write.csv(dataas, filesave1, row.names = FALSE)
filesave2 <- tclvalue(tkgetSaveFile(title = "Save exceedance probability values batch file as", filetypes = "{{Text file} .csv}"))
write.csv(dataas2, filesave2, row.names = FALSE)
} else if (output == "xlsx") {
wb <- createWorkbook()
addWorksheet(wb, "Goodness-of-Fit_Trend Results")
writeData(wb, "Goodness-of-Fit_Trend Results", dataas)
setColWidths(wb, sheet = 1, cols = 1:ncol(dataas), widths = "auto")
addWorksheet(wb, "Exceedance Probability Values")
writeData(wb, "Exceedance Probability Values", dataas2)
setColWidths(wb, sheet = 2, cols = 1:ncol(dataas2), widths = "auto")
filesave3 <- tclvalue(tkgetSaveFile(title = "Save batch file as", filetypes = "{{MS Excel file} .xlsx}"))
saveWorkbook(wb, filesave3, overwrite = overwrite)
} else if (output == "both") {
filesave3 <- tclvalue(tkgetSaveFile(title = "Save goodness-of-fit and trend results file as", filetypes = "{{Text file} .csv}"))
write.csv(dataas, filesave3, row.names = FALSE)
filesave4 <- tclvalue(tkgetSaveFile(title = "Save exceedance probability values file as", filetypes = "{{Text file} .csv}"))
write.csv(dataas2, filesave4, row.names = FALSE)
wb <- createWorkbook()
addWorksheet(wb, "Goodness-of-Fit_Trend Results")
writeData(wb, "Goodness-of-Fit_Trend Results", dataas)
setColWidths(wb, sheet = 1, cols = 1:ncol(dataas), widths = "auto")
addWorksheet(wb, "Exceedance Probability Values")
writeData(wb, "Exceedance Probability Values", dataas2)
setColWidths(wb, sheet = 2, cols = 1:ncol(dataas2), widths = "auto")
filesave5 <- tclvalue(tkgetSaveFile(title = "Save batch file as", filetypes = "{{MS Excel file} .xlsx}"))
saveWorkbook(wb, filesave5, overwrite = overwrite)
}
}
} |
ypsummary <- function(...) UseMethod("ypsummary") |
trimcommand <- function(...){
tc <- list(
file = character(0)
, title = character(0)
, ntimes = integer(0)
, ncovars = integer(0)
, labels = character(0)
, missing = integer(0)
, weight = FALSE
, comment = character(0)
, weighting = FALSE
, serialcor = FALSE
, overdisp = FALSE
, basetime = integer(0)
, model = integer(0)
, covariates = integer(0)
, changepoints = integer(0)
, stepwise = FALSE
, autodelete = TRUE
, outputfiles = character(0)
, overallchangepoints = integer(0)
, impcovout = FALSE
, covin = FALSE
)
class(tc) <- c("trimcommand","list")
L <- list(...)
for ( nm in names(L) ){
if (! nm %in% names(tc) ) stop(sprintf("'%s' is not a valid TRIM keyword",nm))
if (nm == "file") L[[nm]] <- convert_path(L[[nm]])
if (length(L[[nm]])>0) tc[[nm]] <- as_rtrim(L[[nm]], tc[[nm]])
}
tc
}
trimbatch <- function(...){
tc <- list(trimcommand(...))
class(tc) <- c("trimbatch", "list")
tc
}
as_rtrim <- function(value, template){
if ( inherits(template, "logical") ){
if ( tolower(value) %in% c("present","on") ) TRUE else FALSE
} else {
as(value,class(template))
}
}
read_tcf <- function(file, encoding=getOption("encoding"),simplify=TRUE){
con <- file(description = file, encoding=encoding)
on.exit(close(con))
tcf <- paste(readLines(con), collapse="\n")
tcf <- gsub("\nIMPCOVOUT\n","\nIMPCOVOUT on\n", tcf)
tcf <- gsub("\nCOVIN\n","\nCOVIN on\n", tcf)
check_tcf(tcf)
tcflist <- trimws(strsplit(tcf,"(\\n|^)RUN")[[1]])
L <- vector(mode="list",length=length(tcflist))
L[[1]] <- tc_from_char(tcflist[[1]])
for ( i in 1+seq_along(L[-1]) ){
L[[i]] <- tc_from_char(tcflist[[i]], default = L[[i-1]])
}
class(L) <- c("trimbatch","list")
if (length(L) == 1 && simplify ) L[[1]] else L
}
summary.trimbatch <- function(object,...){
y <- object[[1]]
cat(sprintf("trimbatch: %s\n",pr(y$title, len=Inf)))
cat(sprintf("file: %s (%s means missing)\n"
, pr(y$file, len=50), pr(y$missing)))
cat(sprintf("Weights %s, %s covariates labeled %s\n",pr(y$weight), pr(y$ncovars)
,paste0("",paste(y$labels,collapse=", "))))
cat("\nModel parameter overview:\n")
oneliner(object)
}
convert_path <- function(x){
if (isTRUE(grepl("\\\\",x)) ){
y <- gsub("\\\\","/",x)
y
} else {
x
}
}
print.trimcommand <- function(x,...){
cat("Object of class trimcommand:\n")
for ( nm in names(x) ){
cat(sprintf("%12s: %s\n",nm,paste0("",paste(x[[nm]]),collapse=", ")) )
}
}
key_regex <- function(trimkey){
re <- paste0("(\\n|^)", trimkey,".+?")
re <- if (trimkey == "LABELS"){
paste0(re,"(\\n|^)END")
} else {
paste0(re,"(\\n|$)")
}
re
}
extract_keyval <- function(trimkey, x){
trimkey <- toupper(trimkey)
re <- key_regex(trimkey)
m <- regexpr(re,x,ignore.case=TRUE)
s <- regmatches(x,m)
re <- paste0("((\\n|^)",trimkey,")|((\\n|^)END)")
s <- trimws(gsub(re,"",s,ignore.case = TRUE))
if (trimkey != "COMMENT" && length(s)>0 && nchar(s)>0) {
s <- unlist(strsplit(s, split="([[:blank:]]|\n)+"))
}
s
}
tc_from_char <- function(x, default = trimcommand()){
L <- lapply(names(default), extract_keyval, x)
L <- setNames(L, names(default))
for ( i in seq_along(L))
if (length(L[[i]])==0) L[[i]] <- default[[i]]
do.call(trimcommand, L)
}
setNames <- function (object = nm, nm) {
names(object) <- nm
object
}
shortstr<- function(x,len=12){
if ( identical(x, character(0)) || nchar(x) <= len ) return(x)
st <- substr(x,1,4)
n <- nchar(x)
en <- substr(x,n-5,n)
paste0(st,"..",en)
}
pr <- function(s, ...){
a <- if ( length(s) == 0 ) "<none>" else paste(as.character(s),collapse=", ")
shortstr(a, ...)
}
oneliner <- function(x){
cat(sprintf("%10s %9s %9s %8s %8s %6s %6s %12s %8s %8s\n"
,"comment","weighting","serialcor","overdisp"
,"basetime","model","covars","changepoints","stepwise","outfiles"))
for ( i in seq_along(x)){
tc <- x[[i]]
cat(sprintf("%10s %9s %9s %8s %8s %6s %6s %12s %8s %8s\n"
, pr(tc$comment)
, pr(tc$weighting)
, pr(tc$serialcor)
, pr(tc$overdisp)
, pr(tc$basetime)
, pr(tc$model)
, pr(tc$covariates)
, pr(tc$changepoints)
, pr(tc$stepwise)
, pr(tc$outputfiles)
))
}
}
check_tcf <- function(x){
keywords <- c(names(trimcommand()),"end","run")
s <- trimws(strsplit(x,"\\n")[[1]])
i_labels <- grep("^labels",s,ignore.case=TRUE)
i_end <- grep("^end",s,ignore.case=TRUE)
if (length(i_labels)>0 && length(i_end) > 0){
if (i_labels > i_end){
stop(sprintf("Detected LABELS keyword (
,i_labels,i_end))
}
if (i_end - i_labels > 1) s <- s[-seq(i_labels+1, i_end-1)]
}
s <- s[nchar(s)>0]
mm <- regexpr("^.+?([[:blank:]]|$)",s)
keys_in_file <- trimws(regmatches(s,mm))
invalid_keys <- keys_in_file[!toupper(keys_in_file) %in% toupper(keywords)]
if(length(invalid_keys) > 0){
warning(sprintf("Ingnoring lines with the following invalid keywords: %s."
, paste0("'",invalid_keys,"'",collapse=", ")), call.=FALSE)
}
} |
csummary.dmm <-
function(object,traitset="all",componentset="all",bytrait=T,gls=F,digits=3, ...)
{
if(!is.null(object$specific)) {
retobj <- csummary.specific(object,traitset,componentset,bytrait,gls,digits,...)
class(retobj) <- "csumspecific.dmm"
return(retobj)
}
else {
if(traitset[1] == "all"){
traitpairs <- dimnames(object$variance.components)[[2]]
traits <- traitpairstotraits(traitpairs)
}
else {
traits <- traitset
traitpairs <- permpaste(traits)
}
l <- length(traits)
alltraitpairs <- dimnames(object$variance.components)[[2]]
if(componentset[1] == "all") {
components <- dimnames(object$variance.components)[[1]]
}
else {
components <- componentset
}
if(bytrait) {
ctables <- vector("list",l*l)
count <- 0
for(i in traits) {
for(j in traits) {
traitpair <- paste(i,":",j,sep="",collapse=NULL)
ij <- match(traitpair,alltraitpairs)
count <- count + 1
ci95lo <- object$variance.components[components,ij] - 1.96 * object$variance.components.se[components,ij]
ci95hi <- object$variance.components[components,ij] + 1.96 * object$variance.components.se[components,ij]
ctable <- data.frame(Traitpair=alltraitpairs[ij],
Estimate=object$variance.components[components,ij],
StdErr=object$variance.components.se[components,ij],
CI95lo=ci95lo,CI95hi=ci95hi,row.names=components)
ctables[[count]] <- ctable
}
}
}
else {
ctables <- vector("list",length(components))
count <- 0
for(i in components) {
count <- count + 1
ci95lo <- object$variance.components[i,traitpairs] - 1.96 * object$variance.components.se[i,traitpairs]
ci95hi <- object$variance.components[i,traitpairs] + 1.96 * object$variance.components.se[i,traitpairs]
ctable <- data.frame(Component=i,
Estimate=object$variance.components[i,traitpairs],
StdErr=object$variance.components.se[i,traitpairs],
CI95lo=ci95lo,CI95hi=ci95hi,row.names=traitpairs)
ctables[[count]] <- ctable
}
}
retobj <- list(ctables=ctables,traits=traits, components=components, bytrait=bytrait, gls=gls, digits=digits)
if(gls) {
if(bytrait) {
gctables <- vector("list",l*l)
count <- 0
for(i in traits) {
for(j in traits) {
traitpair <- paste(i,":",j,sep="",collapse=NULL)
ij <- match(traitpair,alltraitpairs)
count <- count + 1
ci95lo <- object$gls$variance.components[components,ij] - 1.96 * object$gls$variance.components.se[components,ij]
ci95hi <- object$gls$variance.components[components,ij] + 1.96 * object$gls$variance.components.se[components,ij]
ctable <- data.frame(Traitpair=alltraitpairs[ij],
Estimate=object$gls$variance.components[components,ij],
StdErr=object$gls$variance.components.se[components,ij],
CI95lo=ci95lo,CI95hi=ci95hi,row.names=components)
gctables[[count]] <- ctable
}
}
}
else {
gctables <- vector("list",length(components))
count <- 0
for(i in components) {
count <- count + 1
ci95lo <- object$gls$variance.components[i,traitpairs] - 1.96 * object$gls$variance.components.se[i,traitpairs]
ci95hi <- object$gls$variance.components[i,traitpairs] + 1.96 * object$gls$variance.components.se[i,traitpairs]
ctable <- data.frame(Component=i,
Estimate=object$gls$variance.components[i,traitpairs],
StdErr=object$gls$variance.components.se[i,traitpairs],
CI95lo=ci95lo,CI95hi=ci95hi,row.names=traitpairs)
gctables[[count]] <- ctable
}
}
retobj <- list(ctables=ctables,gctables=gctables,traits=traits, components=components, bytrait=bytrait, gls=gls, digits=digits)
}
retobj$call <- match.call()
class(retobj) <- "csummary.dmm"
return(retobj)
}
} |
TOPSISVector <- function(decision,
weights,
cb
)
{
if(! is.matrix(decision))
stop("'decision' must be a matrix with the values of the alternatives")
if(missing(weights))
stop("a vector containing n weigths, adding up to 1, should be provided")
if(sum(weights) != 1)
stop("The sum of 'weights' is not equal to 1")
if(! is.character(cb))
stop("'cb' must be a character vector with the type of the criteria")
if(! all(cb == "max" | cb == "min"))
stop("'cb' should contain only 'max' or 'min'")
if(length(weights) != ncol(decision))
stop("length of 'weights' does not match the number of the criteria")
if(length(cb) != ncol(decision))
stop("length of 'cb' does not match the number of the criteria")
d = sqrt(colSums(decision^2))
NW <- matrix(nrow = nrow(decision), ncol = ncol(decision))
for(j in 1:ncol(decision)){
NW[,j] <- (decision[,j] / d[j]) * weights[j]
}
posI <- as.integer(cb == "max") * apply(NW, 2, max) +
as.integer(cb == "min") * apply(NW, 2, min)
negI <- as.integer(cb == "min") * apply(NW, 2, max) +
as.integer(cb == "max") * apply(NW, 2, min)
distance =function(x,y){
sqrt(sum((x - y) ^ 2))
}
posDis <- apply(NW, 1, distance, posI)
negDis <- apply(NW, 1, distance, negI)
R <- negDis/(negDis+posDis)
return(data.frame(Alternatives = 1:nrow(decision), R = R, Ranking = rank(-R, ties.method= "first")))
} |
test_host <- Sys.getenv("DRILL_TEST_HOST", "localhost")
options(sergeant.bigint.warnonce = FALSE)
if (at_home()) {
dc <- drill_connection(test_host)
expect_true(drill_active(dc))
suppressMessages(
drill_query(dc, "SELECT * FROM cp.`employee.json` limit 10", .progress = FALSE)
) -> test_rest
expect_true(inherits(test_rest, "data.frame"))
expect_true(inherits(drill_version(dc), "character"))
expect_true(inherits(drill_metrics(dc), "list"))
expect_true(inherits(drill_options(dc), "tbl"))
dp <- drill_profiles(dc)
expect_true(inherits(dp, "list"))
expect_true(
inherits(
drill_profile(dc, dp$finishedQueries[1]$queryId[1]),
"list"
)
)
suppressMessages(
expect_true(
drill_cancel(dc, dp$finishedQueries[1]$queryId[1])
)
)
suppressMessages(
suppressWarnings(
expect_true(
inherits(
drill_show_files(dc, schema_spec = "dfs"),
"tbl"
)
)
)
)
expect_true(inherits(drill_show_schemas(dc), "tbl"))
expect_true(inherits(drill_storage(dc), "tbl"))
expect_true(inherits(drill_stats(dc), "list"))
expect_true(inherits(drill_status(dc), "html"))
expect_true(inherits(drill_threads(dc), "html"))
expect_true(inherits(drill_use(dc, "cp"), "tbl"))
expect_true(
inherits(
drill_set(
dc,
exec.errors.verbose = TRUE,
store.format = "parquet",
web.logs.max_lines = 20000
),
"tbl"
)
)
} |
"BNsample" |
labkey.transform.readRunPropertiesFile <- function(runInfoPath)
{
lines = readLines(runInfoPath);
rows <- c()
rowCount = 1
i = 1
while (i <= length(lines))
{
parts <- strsplit(lines[i], split="\t")[[1]];
if (length(parts) == 0)
{
prev <- rowCount-1
if (i > 1 && length(rows[prev]) < 3)
{
res <- concatenateMultiLine(lines, i-1, i)
rows[prev] <- res$value
i = i + res$skipped
}
}
else
{
rows[rowCount] <- lines[i]
i = i + 1
}
rowCount = length(rows) + 1
}
properties = data.frame(NA, NA, NA, NA);
colnames(properties) = c("name", "val1", "val2", "val3");
for (i in 1:length(rows))
{
parts = strsplit(rows[i], split="\t")[[1]];
if (length(parts) < 4)
{
for (j in 1:4)
{
if (is.na(parts[j]))
{
parts[j] = NA;
}
}
}
properties[i,] = parts;
}
return (properties)
}
concatenateMultiLine <- function(lines, idx, start)
{
ret <- list(0, lines[idx])
names(ret) <- c("skipped", "value")
count = 0
for (i in start:length(lines))
{
parts <- strsplit(lines[i], split="\t")[[1]];
if (length(parts) == 0)
{
lines[idx] <- paste(lines[idx], "\n")
}
else
{
ret$skipped <- count+1
ret$value <- paste(lines[idx], lines[i])
break
}
count = count + 1
}
return (ret)
}
labkey.transform.getRunPropertyValue <- function(runProps, propName)
{
value = NA;
if (any(runProps$name == propName))
{
value = runProps$val1[runProps$name == propName];
if (nchar(value) == 0)
{
value = NA;
}
}
return (value)
} |
is.multi_tpfit <-
function(object) {
if(!is(object, "multi_tpfit")) return(FALSE)
if(!prod(c("coordsnames", "coefficients", "prop", "tolerance") %in% names(object))) return(FALSE)
if(length(names(object)) != 5 - is.null(object$rotation)) return(FALSE)
if(!is.list(object$coefficients)) return(FALSE)
if(!prod(sapply(object$coefficients, is.list))) return(FALSE)
if(!prod(sapply(object$coefficients, names) == "coefficients")) return(FALSE)
if(!prod(sapply(object$coefficients, function(xx) is.matrix(xx$coefficients)))) return(FALSE)
if(!is.numeric(object$prop)) return(FALSE)
if(!is.numeric(object$tolerance)) return(FALSE)
if(!is.null(object$rotation)) {
if(!is.numeric(object$rotation)) return(FALSE)
if(length(object$rotation) != length(object$coefficients) - 1) return(FALSE)
}
return(TRUE)
} |
library(fUnitRoots)
set.seed(123)
y <- filter(rnorm(25), c(-1.5, 0.5), method = "recursive")
lower <- -1
upper <- 1
dgp <- function(y, v) {
ran.y <- filter(rnorm(length(y)), c(1-v,v), method = "recursive")
}
statistic <- function(y){
out <- suppressWarnings(adfTest(y, lags = 2, type = "nc"))
return(out@test$statistic)
}
est <- mmc(y, statistic = statistic , dgp = dgp, lower = lower,
upper = upper, N = 99, type = "leq", method = "GenSA",
control = list(max.time = 2))
print(est) |
a <- maxample("animal")
p <- maxample("pop")
test_that("dimSums works", {
ref1 <- new("magpie",
.Data = structure(c(0, 0, 1, 1, 6, 4, 7, 5, 6, 4, 7, 5, 0, 0, 0, 0),
.Dim = c(2L, 2L, 4L),
.Dimnames = list(x.y.country.cell = c("5p75.53p25.NLD.14084", "6p25.53p25.NLD.14113"),
year.month.day = c("y2000.april.20", "y2000.may.20"),
type.color = c("animal.black", "animal.white",
"animal.red", "animal.brown"))))
expect_identical(dimSums(a[1:2, 1:2, ], dim = "species"), ref1)
expect_identical(dimSums(a[1:2, 1:2, ], dim = 3.2), ref1)
ref2 <- new("magpie",
.Data = structure(c(9397, 7653, 3475, 3345, 1900),
.Dim = c(1L, 1L, 5L),
.Dimnames = list(d1 = NULL, d2 = NULL,
type.species.color = c("animal.rabbit.black", "animal.rabbit.white",
"animal.bird.black", "animal.bird.red",
"animal.dog.brown"))))
expect_identical(dimSums(a, dim = 1:2), ref2)
expect_error(dimSums(1), "Input is not a MAgPIE object")
expect_error(dimSums(a, dim = 4), "Invalid dimension")
ax <- a[1, 1:2, 1:4]
getItems(ax, dim = 3, raw = TRUE) <- c("animal.rabbit.black", "animal.rabbit.white",
"animal.bird.black", "animal.bird.white")
ref4 <- new("magpie",
.Data = structure(c(12, 15), .Dim = c(1L, 2L, 1L),
.Dimnames = list(x.y.country.cell = "5p75.53p25.NLD.14084",
year.month.day = c("y2000.april.20", "y2000.may.20"),
type = "animal")))
expect_identical(dimSums(ax, dim = c(3.2, 3.3)), ref4)
ref5 <- new("magpie",
.Data = structure(c(0, 13, 1, 13), .Dim = c(1L, 1L, 4L),
.Dimnames = list(d1 = NULL, d2 = NULL,
type.species.color = c("animal.rabbit.black", "animal.rabbit.white",
"animal.bird.black", "animal.bird.white"))))
expect_identical(dimSums(ax, dim = 1:2), ref5)
ref3 <- new("magpie", .Data = structure(25770, .Dim = c(1L, 1L, 1L),
.Dimnames = list(d1 = NULL, d2 = NULL, d3 = NULL)))
expect_identical(dimSums(a, dim = 1:3), ref3)
expect_true(all(dimSums(p, dim = 1) - colSums(p) == 0))
expect_true(all(dimSums(p, dim = 2:3) - rowSums(p) == 0))
expect_true(all(dimSums(p, dim = 1) / dim(p)[1] - colMeans(p) == 0))
expect_true(all(magpply(p, mean, dim = 2:3) - rowMeans(p) == 0))
p0 <- p[, , -1:-2]
expect_null(dimSums(p0, dim = 3))
}) |
context("NMscanInput")
fix.time <- function(x){
meta.x <- attr(x,"NMdata")
meta.x$details$time.NMscanData <- NULL
meta.x$details$file.lst <- NULL
meta.x$details$file.mod <- NULL
meta.x$details$file.input <- NULL
meta.x$details$mtime.input <- NULL
meta.x$details$mtime.lst <- NULL
meta.x$details$mtime.mod <- NULL
meta.x$datafile$path <- NULL
meta.x$datafile$path.rds <- NULL
meta.x$tables$file <- NULL
meta.x$tables$file.mtime <- NULL
setattr(x,"NMdata",meta.x)
}
NMdataConf(reset=TRUE)
test_that("basic",{
fileRef <- "testReference/NMscanInput_1.rds"
file.lst <- "testData/nonmem/xgxr004.lst"
res <-
NMscanInput(file=file.lst,applyFilters = T,as.fun="data.table")
fix.time(res)
expect_equal_to_reference(res,fileRef,version=2)
})
test_that("input has NMdata meta data",{
fileRef <- "testReference/NMscanInput_2.rds"
file.lst <- "testData/nonmem/xgxr011.lst"
res <- NMscanInput(file=file.lst,applyFilters = T,as.fun="data.table")
fix.time(res)
nm1 <- NMinfo(res)
expect_equal_to_reference(nm1,fileRef,version=2)
})
test_that("single = filter",{
file.lst <- "testData/nonmem/xgxr009.lst"
res <- NMscanInput(file=file.lst,applyFilters = T,as.fun="data.table")
expect_equal(res[,unique(DOSE)],10)
})
test_that("Duplicate columns in input data",{
fileRef <- "testReference/NMscanInput3.rds"
file.lst <- "testData/nonmem/xgxr015.lst"
inpdat <- expect_warning(NMscanInput(file=file.lst))
})
test_that("single-char ignore",{
NMdataConf(reset=T)
fileRef <- "testReference/NMscanInput4.rds"
file.lst <- "testData/nonmem/estim_debug.lst"
res <- NMscanInput(file=file.lst,applyFilters=T,file.mod=function(x)fnExtension(x,".ctl"))
expect_equal(nrow(res),98)
fix.time(res)
expect_equal_to_reference(res,fileRef,version=2)
})
test_that(".mod with mix of space and , in $INPUT",{
fileRef <- "testReference/NMscanInput5.rds"
file.lst <- "testData/nonmem/min036.mod"
inpdat <- NMscanInput(file=file.lst)
expect_equal_to_reference(colnames(inpdat),fileRef,version=2)
})
test_that("Erroneously basing a filter on translated column names",{
expect_error(
NMscanInput("testData/nonmem/min036mod.mod",applyFilters=TRUE)
)
})
test_that("Including meta data",{
NMdataConf(reset=T)
fileRef <- "testReference/NMscanInput6.rds"
file.lst <- "testData/nonmem/xgxr004.lst"
res <-
NMscanInput(file=file.lst,applyFilters = T,details=T, as.fun="data.table")
fix.time(res)
expect_equal_to_reference(res,fileRef,version=2)
})
test_that("CYCLE=DROP",{
fileRef <- "testReference/NMscanInput_7.rds"
file.lst <- system.file("examples/nonmem/xgxr002.lst",package="NMdata")
res <-
NMscanInput(file=file.lst,applyFilters = T,as.fun="data.table")
fix.time(res)
nm1 <- NMinfo(res)
expect_equal_to_reference(nm1,fileRef,version=2)
}) |
fun.cumsum <-
function(matrix)
{
size <- dim(matrix)
Cumsum <- c()
for(i in 1:size[2]){
Cumsum <- c(Cumsum,cumsum(matrix[1:size[1],i]))
}
return(Cumsum)
} |
library(kitagawa)
Rc <- 0.0508
Lc <- 146.9
Rs <- 3*Rc
Ls <- 9.14
Volw <- sensing_volume(Rc, Lc, Rs, Ls)
Frqs <- 10**seq.int(from=-4,to=0,by=0.1)
Rsp <- well_response(omega=Frqs, T.=1e-6, S.=1e-5, Vw.=Volw, Rs.=Rs, Ku.=40e9, B.=0.2, freq.units="Hz")
kitplot(Rsp) |
gaussianRankCorr <- function(x, vec = FALSE) {
n <- nrow(x)
p <- ncol(x)
stopifnot(p >= 2)
r <- apply(x, FUN = rank, MARGIN = 2, ties.method = "average")
rqnorm <- qnorm(r / (n + 1))
den <- sum((qnorm(1 : n / (n + 1))) ^ 2)
res <- unlist(sapply(1:(p-1), FUN = function(i) c(rqnorm[, i] %*% rqnorm[, (i+1):p]))) / den
if (!vec) {
res <- p2P(res)
}
return (res)
}
cor2cov <- function(corr, std) {
outer(std, std) * corr
} |
ogalt3<-function (formula, k, d, aa, data = NULL, na.action,
...)
{
k <- as.matrix(k)
d <- as.matrix(d)
k1 <- k[1L]
d1 <- d[1L]
if (length(aa) == 1L)
A <- as.matrix(aa)
else A <- diag(aa)
altes3 <- function(formula, k1, d1, aa, data = NULL, na.action,
...) {
cal <- match.call(expand.dots = FALSE)
mat <- match(c("formula", "data", "na.action"), names(cal))
cal <- cal[c(1L, mat)]
cal[[1L]] <- as.name("model.frame")
cal <- eval(cal)
y <- model.response(cal)
md <- attr(cal, "terms")
x <- model.matrix(md, cal, contrasts)
s <- t(x) %*% x
xin<-solve(s)
I <- diag(NCOL(x))
bb <- solve(s) %*% t(x) %*% y
bk <- A %*% (solve(s + I) + d1 * solve(s + I) %*% solve(s +
k1 * I)) %*% t(x) %*% y
bkve<-as.vector(bk)
j<-0
sumsq<-0
for (j in 1:NROW(bkve))
{
sumsq=(bkve[j])^2+sumsq
}
cval<-sumsq
ev <- (t(y) %*% y - t(bb) %*% t(x) %*% y)/(NROW(x) -
NCOL(x))
ev <- diag(ev)
rval<-(1/cval)*bk%*%t(bk)
ahat<-cval*rval%*%solve(ev*xin+cval*rval)
ogalt3<-ahat%*%bb
colnames(ogalt3) <- c("Estimate")
dbd <-ev*(ahat%*%xin%*%t(ahat))
Standard_error <- sqrt(diag(abs(dbd)))
dbt <- t(ogalt3)
dbd <-ev*(ahat%*%xin%*%t(ahat))
sdbd_inv <- (sqrt(diag(abs(dbd))))^-1
sdbd_inv_mat <- diag(sdbd_inv)
if (NCOL(dbt) == 1L)
tbd <- dbt * sdbd_inv
else tbd <- dbt %*% sdbd_inv_mat
hggh <- t(tbd)
tst <- t(2L * pt(-abs(tbd), df <- (NROW(x) - NCOL(x))))
colnames(tst) <- c("p_value")
colnames(hggh) <- c("t_statistic")
mse1 <-cval^2*ev*tr(ev*rval*solve(ev*xin+cval*rval)%*%xin%*%solve(ev*xin+cval*rval)%*%rval)+ev^2*t(bk)%*%solve(ev*I+cval*rval%*%s)%*%solve(ev*I+cval*rval%*%s)%*%bk
mse1<-as.vector(mse1)
mse1 <- round(mse1, digits <- 4L)
names(mse1) <- c("MSE")
ans1 <- cbind(ogalt3, Standard_error, hggh, tst)
rownames(ans1) <- rownames(bb)
ans <- round(ans1, digits = 4L)
anw <- list(`*****Ordinary Generalized Type (3) Adjusted Liu Estimator*****` = ans,
`*****Mean Square Error value*****` = mse1)
return(anw)
}
npt <- altes3(formula, k1, d1, aa, data, na.action)
plotalt3 <- function(formula, k, d, aa, data = NULL, na.action,
...) {
j <- 0
i <- 0
arr <- 0
for (j in 1:nrow(k)) {
for (i in 1:nrow(d)) {
altem3 <- function(formula, k, d, aa, data, na.action,
...) {
cal <- match.call(expand.dots = FALSE)
mat <- match(c("formula", "data", "na.action"),
names(cal))
cal <- cal[c(1L, mat)]
cal[[1L]] <- as.name("model.frame")
cal <- eval(cal)
y <- model.response(cal)
md <- attr(cal, "terms")
x <- model.matrix(md, cal, contrasts)
s <- t(x) %*% x
xin<-solve(s)
I <- diag(NCOL(x))
bb <- solve(s) %*% t(x) %*% y
bk <- A %*% (solve(s + I) + d * solve(s + I) %*% solve(s +k * I)) %*% t(x) %*% y
bkve<-as.vector(bk)
j<-0
sumsq<-0
for (j in 1:NROW(bkve))
{
sumsq=(bkve[j])^2+sumsq
}
cval<-sumsq
ev <- (t(y) %*% y - t(bb) %*% t(x) %*% y)/(NROW(x) -
NCOL(x))
ev <- diag(ev)
rval<-(1/cval)*bk%*%t(bk)
ahat<-cval*rval%*%solve(ev*xin+cval*rval)
dbd <-ev*(ahat%*%xin%*%t(ahat))
mse1 <-cval^2*ev*tr(ev*rval*solve(ev*xin+cval*rval)%*%xin%*%solve(ev*xin+cval*rval)%*%rval)+ev^2*t(bk)%*%solve(ev*I+cval*rval%*%s)%*%solve(ev*I+cval*rval%*%s)%*%bk
mse1<-as.vector(mse1)
return(mse1)
}
arr[i * j] <- altem3(formula, k[j], d[i], aa,
data, na.action)
falte3 = file("alt3.data", "a+")
cat(k[j], d[i], arr[i * j], "\n", file = falte3,
append = TRUE)
close(falte3)
}
}
mat <- read.table("alt3.data")
unlink("alt3.data")
rmat <- matrix(mat[, 3L], nrow = NROW(d), dimnames = list(c(paste0("d=",
d)), c(paste0("k=", k))))
return(rmat)
}
pl3 <- plotalt3(formula, k, d, aa, data, na.action)
if (nrow(k) > 1L | nrow(d) > 1L)
val <- pl3
else val <- npt
val
} |
getMedia <- function(labbcat.url, id, track.suffix = "", mime.type = "audio/wav") {
parameters <- list(id=id, trackSuffix=track.suffix, mimeType=mime.type)
resp <- store.get(labbcat.url, "getMedia", parameters)
if (is.null(resp)) return()
resp.content <- httr::content(resp, as="text", encoding="UTF-8")
if (httr::status_code(resp) != 200) {
print(paste("ERROR: ", httr::http_status(resp)$message))
print(resp.content)
return()
}
resp.json <- jsonlite::fromJSON(resp.content)
for (error in resp.json$errors) print(error)
return(resp.json$model)
} |
gammaHRF <- function(TR, paras=NULL, len.seconds=32, onset.seconds=0) {
if (is.null(paras)) paras <- c(6,16,1,1,6)
dt <- TR/16
u <- 0:(len.seconds/dt) - onset.seconds/dt
hrf <- dgamma(u, paras[1]/paras[3], dt/paras[3]) - dgamma(u, paras[2]/paras[4], dt/paras[4])/paras[5]
hrf <- hrf[(0:(len.seconds/TR))*16+1]
hrf <- hrf/sum(hrf)
return(hrf)
} |
.HLfit_body_augZXy <- function(processed, ranFix=list()) {
trace <- processed$verbose["TRACE"]
ranFix <- .canonizeRanPars(ranPars=ranFix,corr_info=NULL, checkComplete = FALSE, rC_transf=.spaMM.data$options$rC_transf)
nobs <- length(processed$y)
nrand <- length(processed$ZAlist)
cum_n_u_h <- processed$cum_n_u_h
n_u_h <- cum_n_u_h[nrand+1L]
sparse_precision <- processed$is_spprec
ranCoefs.Fix <- .getPar(ranFix,"ranCoefs")
ranCoefs_blob <- .process_ranCoefs(processed, ranCoefs.Fix,use_tri_CORREL=.spaMM.data$options$use_tri_for_augZXy)
LMatrices <- processed$AUGI0_ZX$envir$LMatrices
if (any(ranCoefs_blob$is_set)) {
LMatrices[ranCoefs_blob$is_set] <- ranCoefs_blob$LMatrices[ranCoefs_blob$is_set]
attr(LMatrices,"is_given_by")[ranCoefs_blob$is_set] <- "ranCoefs"
}
if (processed$is_spprec) {
.init_AUGI0_ZX_envir_spprec_info(processed)
.wrap_precisionFactorize_ranCoefs(processed,LMatrices)
}
if (processed$is_spprec) {
ZAL <- NULL
} else if ( any((attr(LMatrices,"is_given_by") !="")) ) {
ZAL <- .compute_ZAL(XMatrix=LMatrices, ZAlist=processed$ZAlist, as_matrix=.eval_as_mat_arg(processed))
} else {
ZAL <- processed$AUGI0_ZX$ZAfix
}
lambda.Fix <- ranFix$lambda
if (any(lambda.Fix==0, na.rm=TRUE)) stop("lambda cannot be fixed to 0.")
off <- processed$off
init.lambda <- .calc_initial_init_lambda(lambda.Fix, nrand, processed, ranCoefs_blob,
init.HLfit=NULL, fixed=ranFix)
lambda_est <- .HLfit_finalize_init_lambda(models=processed$models, init.lambda, processed, ZAL=ZAL, cum_n_u_h,
vec_n_u_h=diff(cum_n_u_h), n_u_h, ranCoefs_blob)
if (identical(processed$return_only,"p_vAPHLs")) {
whichAPHLs <- "p_v"
} else if (identical(processed$return_only,"p_bvAPHLs")) {
whichAPHLs <- "p_bv"
} else whichAPHLs <- c("p_v","p_bv")
w.ranef <- 1/lambda_est
GLMweights <- processed$prior.weights
phi_est <- ranFix$phi
if (is.null(phi_est)) phi_est <- processed$phi.Fix
if (is.null(phi_est)) {
w.resid <- GLMweights
} else w.resid <- structure(GLMweights/phi_est, unique=TRUE, is_unit=FALSE)
H_global_scale <- .calc_H_global_scale(w.resid)
if (processed$is_spprec) {
if (trace) cat(".")
sXaug <- do.call(processed$AUGI0_ZX$envir$method,
list(AUGI0_ZX=processed$AUGI0_ZX, corrPars=ranFix$corrPars,w.ranef=w.ranef,
cum_n_u_h=cum_n_u_h,w.resid=w.resid))
} else {
ZAL_scaling <- 1/sqrt(w.ranef*H_global_scale)
weight_X <- .calc_weight_X(w.resid, H_global_scale)
if (inherits(ZAL,"sparseMatrix")) {
ZW <- .Dvec_times_Matrix(weight_X,.Matrix_times_Dvec(ZAL,ZAL_scaling))
XW <- .Dvec_times_m_Matrix(weight_X,processed$AUGI0_ZX$X.pv)
sXaug <- list(ZW=ZW,XW=XW,I=processed$AUGI0_ZX$I)
attr(sXaug,"w.ranef") <- w.ranef
attr(sXaug,"n_u_h") <- ncol(ZW)
attr(sXaug,"pforpv") <- ncol(XW)
attr(sXaug,"weight_X") <- weight_X
attr(sXaug,"H_global_scale") <- H_global_scale
class(sXaug) <- c(class(sXaug),"sXaug_blocks")
} else {
Xscal <- .make_Xscal(ZAL=ZAL, ZAL_scaling = ZAL_scaling, AUGI0_ZX=processed$AUGI0_ZX)
if (inherits(Xscal,"sparseMatrix")) {
mMatrix_method <- .spaMM.data$options$Matrix_method
attr(Xscal,"AUGI0_ZX") <- processed$AUGI0_ZX
} else {
mMatrix_method <- .spaMM.data$options$matrix_method
}
if (trace) cat(".")
sXaug <- do.call(mMatrix_method,
list(Xaug=Xscal, weight_X=weight_X, w.ranef=w.ranef, H_global_scale=H_global_scale))
}
}
augZXy_resu <- .calc_APHLs_by_augZXy_or_sXaug(sXaug=sXaug, phi_est=phi_est,
processed=processed, which=whichAPHLs,
update_info=list(allow= all(processed$AUGI0_ZX$envir$updateable)))
res <- list(APHLs=augZXy_resu)
return(res)
} |
context("Analytical solutions for coupled models")
warn_option <- options(warn=-1)
test_that("The analytical solutions for SFO-SFO are correct", {
SFO_SFO_nosink <- mkinmod(
parent = mkinsub("SFO", to = "m1", sink = FALSE),
m1 = mkinsub("SFO"),
use_of_ff = "min", quiet = TRUE)
f_sfo_sfo_nosink <- mkinfit(SFO_SFO_nosink, FOCUS_D, quiet = TRUE)
f_sfo_sfo_nosink_deSolve <- mkinfit(SFO_SFO_nosink, FOCUS_D,
solution_type = "deSolve", quiet = TRUE)
expect_equal(
parms(f_sfo_sfo_nosink),
parms(f_sfo_sfo_nosink_deSolve)
)
SFO_SFO.ff_nosink <- mkinmod(
parent = mkinsub("SFO", to = "m1", sink = FALSE),
m1 = mkinsub("SFO"),
use_of_ff = "max", quiet = TRUE)
f_sfo_sfo_nosink <- mkinfit(SFO_SFO.ff_nosink, FOCUS_D, quiet = TRUE)
f_sfo_sfo_nosink_deSolve <- mkinfit(SFO_SFO.ff_nosink, FOCUS_D,
solution_type = "deSolve", quiet = TRUE)
expect_equal(
parms(f_sfo_sfo_nosink),
parms(f_sfo_sfo_nosink_deSolve)
)
f_sfo_sfo_analytical <- mkinfit(SFO_SFO, FOCUS_D,
solution_type = "analytical", quiet = TRUE)
expect_equal(
parms(f_sfo_sfo_analytical),
parms(f_sfo_sfo_desolve)
)
f_sfo_sfo.ff_desolve <- mkinfit(SFO_SFO.ff, FOCUS_D,
solution_type = "deSolve", quiet = TRUE)
expect_equal(
parms(f_sfo_sfo.ff),
parms(f_sfo_sfo.ff_desolve),
tolerance = 5e-6
)
})
test_that("The analytical solution for DFOP-SFO are correct", {
f_dfop_sfo_analytical <- mkinfit(DFOP_SFO, FOCUS_D,
solution_type = "analytical", quiet = TRUE)
f_dfop_sfo_desolve <- mkinfit(DFOP_SFO, FOCUS_D,
solution_type = "deSolve", quiet = TRUE)
expect_equal(
parms(f_dfop_sfo_analytical),
parms(f_dfop_sfo_desolve),
tolerance = 5e-6
)
})
options(warn = warn_option$warn) |
dL <- expand.grid(c(TRUE,FALSE),c(TRUE,FALSE))
x = data.frame(x=dL[,1])
y = data.frame(x=dL[,2])
expect_equal(c(0,1,1,NaN), gower_dist(x = x,y = y))
bands <- c("Grand Magus","Skull Fist")
dF <- expand.grid(bands,bands)
expect_equal(gower_dist(data.frame(x=dF[,1]),data.frame(x=dF[,2])),c(0,1,1,0))
dN <- data.frame(x = as.numeric(1:4),y=as.numeric(c(1,1,2,3)))
expect_equal(gower_dist(data.frame(x=dN[,1]),data.frame(x=dN[,2])),c(0,1/3,1/3,1/3))
dC <- data.frame(x=letters[1:3],y=letters[3:1],stringsAsFactors=FALSE)
expect_equal(gower_dist(
data.frame(x=dC[,1],stringsAsFactors=FALSE)
, data.frame(x=dC[,2],stringsAsFactors=FALSE)),c(1,0,1))
bands <- c("Grand Magus","Skull Fist")
dL <- expand.grid(c(TRUE,FALSE),c(TRUE,FALSE))
dN <- data.frame(x = as.numeric(1:4),y=as.numeric(c(1,1,2,3)))
dF <- expand.grid(bands,bands)
dM1 <- data.frame(x=dL[,1],y=dF[,1],z=dN[,1])
dM2 <- data.frame(x=dL[,2],y=dF[,2],z=dN[,2])
expect_equal(gower_dist(x=dM1,y=dM2), c(0,7/9,7/9,1/6))
expect_equal(gower_dist(dM1,dM2),gower_dist(dM2,dM1))
dM1[array(c(2,3,4,1,2,3),dim=c(3,2))] <- NA
expect_equal(gower_dist(dM1,dM2), c(0,3/4,3/4,0))
expect_equivalent(gower_dist(women, women[1]),rep(0,nrow(women)))
dat1 <- iris[1:6,]
dat2 <- iris[1:6,]
names(dat2) <- tolower(names(dat2))
expect_equal(gower_dist(dat1, dat2, ignore_case=TRUE),rep(0,6))
expect_equal(length(gower_dist(x=iris[1,],y=iris)), nrow(iris))
expect_equal(length(gower_dist(x=iris,y=iris[1,])), nrow(iris))
expect_equal(length(gower_dist(x=iris[1:3,],y=iris)), nrow(iris))
expect_equal(length(gower_dist(x=iris,y=iris[1:3,])), nrow(iris))
expect_error(gower_topn(women, women, weights=-(1:4)))
expect_error(gower_dist(women, women, weights=c(NA,1:3)))
d1 <- women[1,]
d2 <- women[2,]
w <- c(1,2)
r <- sapply(women, function(x) abs(diff(range(x))))
d12 <- (w[1]*abs(d1[1,1]-d2[1,1])/r[1] + w[2]*abs(d1[1,2]-d2[1,2])/r[2])/sum(w)
wom2 <- women
wom2[1:2,] <- wom2[2:1,]
expect_equivalent(gower_dist(women,wom2, weights=w)[1], d12)
expect_warning(gower_dist(
x = data.frame(x=c(1.2,1.2,1.2))
, y = data.frame(x=c(1.2,1.2,1.2))
))
expect_warning(gower_dist(
x = data.frame(x=c(1.2,1.2,1.2))
, y = data.frame(x=c(1.2,1.2,1.3))
, eps=0.2
))
expect_warning(gower_dist(data.frame(x=rep(1,100)), data.frame(x=1,100)))
expect_error(gower_dist(
data.frame(a = letters[1:3], stringsAsFactors = TRUE),
data.frame(a = letters[2:4], stringsAsFactors = TRUE)
))
expect_error(gower_dist(
data.frame(a = letters[1:3], stringsAsFactors = FALSE),
data.frame(a = letters[2:4], stringsAsFactors = TRUE)
))
suppressMessages(out <- gower_dist(data.frame(x=1:3), data.frame(y=1:3) ))
expect_true(identical(out,numeric(0)))
suppressMessages(out <- gower_topn(data.frame(x=1:3),data.frame(y=1:3)) )
expect_true( identical(out$distance, matrix(0)[0,0]) )
expect_true( identical(out$index, matrix(0L)[0,0]) )
d1 <- iris[1:3,]
d2 <- iris[1:7,]
L <- gower_topn(d1,d2,n=4)
expect_equal(length(L),2)
expect_equal(dim(L[[1]]),c(4,3))
expect_equal(dim(L[[1]]),dim(L[[2]]))
expect_equal(L[[1]][1,],1:3)
expect_equal(L[[2]][1,],rep(0,3))
L <- gower_topn(d1,d2,n=8)
expect_equal(L[[1]][8,],rep(0,3))
expect_equal(L[[2]][8,],rep(Inf,3))
dat1 <- data.frame(
x = as.factor(sample(letters,2000,replace=TRUE))
,y = sample(LETTERS,2000,replace=TRUE)
,z = as.integer(1:2000)
,w = sample(c(TRUE,FALSE),2000,replace=TRUE)
, stringsAsFactors=FALSE
)
i <- sample(2000)
dat2 <- dat1[i,]
gower_dist(dat1,dat2)
expect_warning(gower_dist(iris[1:3,],iris[1:2,]))
expect_warning(gower_dist(iris[1:2,],iris[1:3,]))
dat <- data.frame(x = c(NA,2,4,5), y = c(6,7,NA,10))
L <- gower_topn(dat[c(1,3),],dat[c(2,4),],n=1)
expect_equivalent(as.vector(L$index),c(1,2)) |
library(Matrix)
context("Flexible Sparse darray")
rblocks <- sample(1:8, 1)
cblocks <- sample(1:8, 1)
rsize <- sample(1:10, rblocks)
csize <- sample(1:10, cblocks)
da<-darray(npartitions=c(rblocks,cblocks), sparse=TRUE)
test_that("Creation and Fetch works", {
expect_equal(nrow(da), 0, info="check darray nrow")
expect_equal(ncol(da), 0, info="check darray col")
expect_equal(is.invalid(da), TRUE, info="check validity of flex sparse darray after declaration")
})
foreach(i, 1:npartitions(da), initArrays<-function(y=splits(da,i), index=i-1, rs = rsize, cs= csize, cb=cblocks ) {
nrow=rs[floor(index/cb)+1]
ncol=cs[(index%%cb)+1]
y<-sparseMatrix(i=1,
j=1,
x=index+1,
dims=c(nrow,ncol))
update(y)
})
rindex<-c(1,(cumsum(rsize)+1)[1:length(rsize)-1])
rindex<-rep(rindex, each=cblocks)
cindex<-c(1,(cumsum(csize)+1)[1:length(csize)-1])
cindex<-rep(cindex, rblocks)
mat<- sparseMatrix(i=rindex,
j=cindex,
x=1:length(rindex),
dims=c(sum(rsize),sum(csize)))
test_that("Creation and Fetch works", {
expect_equal(is.invalid(da), FALSE, info="check validity of flex sparse darray after data write")
expect_equal(nrow(da), nrow(mat), info="check nrow of flex sparse darray")
expect_equal(ncol(da), ncol(mat), info="check ncol of flex sparse darray")
expect_equal(getpartition(da), mat, info="check flex sparse darray contents")
expect_equal(dim(da), dim(mat), info="check dimension of flex sparse darray")
expect_equal(dim(da), dim(mat), info="check dimension of flex sparse darray")
})
context("Flexible Sparse darray operations")
test_that("Operatons: max, min, head, tail works", {
expect_equal(max(da), max(mat), info="check max of flex sparse darray")
expect_equal(min(da), min(mat), info="check min of flex sparse darray")
expect_equal(sum(da), sum(mat), info="check sum of flex sparse darray")
expect_equal(mean(da), mean(mat), info="check mean of flex sparse darray")
expect_equal(colSums(da), colSums(mat), info="check colSums of flex sparse darray")
expect_equal(rowSums(da), rowSums(mat), info="check rowSums of flex sparse darray")
expect_equal(colMeans(da), colMeans(mat), info="check colMeans of flex sparse darray")
expect_equal(rowMeans(da), rowMeans(mat), info="check rowMeans of flex sparse darray")
expect_equal(head(da), head(mat), info="check head operator on flex sparse darray")
expect_equal(as.numeric(tail(da)), as.numeric(tail(mat)), info="check tail operator on flex sparse darray")
})
db<-clone(da)
test_that("Clone works", {
expect_true(da==db, info="clone has to return a same value darray")
expect_equal(getpartition(da+da), (mat+mat), info="check + operator on flex sparse darray")
}) |
f.quad.plateau <- function(d, start = list(a = 1, b = 1, c = 1),
plus_minus = 1e2, n.start=1000, msg=FALSE) {
if ("matrix" %in% class(d)) {d <- data.frame(d)}
names(d) = c("x", "y")
mq = lm(y ~ x + I(x^2), data=d)
c3 = coef(mq)[[1]]
c4 = coef(mq)[[2]]
c5 = coef(mq)[[3]]
a = start[[1]]
b = start[[2]]
c = start[[3]]
a11 = ifelse(a == 1, c3, a)
b11 = ifelse(b == 1, c4, b)
c11 = ifelse(c == 1, c5, c)
m <- nls.multstart::nls_multstart(y ~ (a + b * x + c * I(x^2)) * (x <= -0.5 *
b/c) + (a + I(-b^2/(4 * c))) * (x > -0.5 * b/c),
start_lower=list(a = a11 - plus_minus, b=b11 - plus_minus, c=c11 - plus_minus),
start_upper=list(a = a11 + plus_minus, b=b11 + plus_minus, c=c11 + plus_minus),
iter=n.start, data = d, supp_errors ="Y")
if(!is.null(m)) {
c = coef(m)
a = c[1]
b = c[2]
c = c[3]
pmm = (a + I(-b^2/(4 * c)))
pcc = -0.5 * b/c
nls.summary = c(a, b, c, summary(m)[11][[1]][10], summary(m)[11][[1]][11],
summary(m)[11][[1]][12], AIC(m), BIC(m),
pmm, pcc)
nls.summary = as.data.frame(nls.summary)
rownames(nls.summary) = c("coefficient a", "coefficient b", "coefficient c",
"p-value t.test for a", "p-value t.test for b", "p-value t.test for c",
"AIC", "BIC",
"maximum or minimum value for y", "critical point in x")
res <- list(nls.summary=nls.summary, nls.model=m)
} else{
res <- NULL
if (msg) cat("Not converged! may try larger n.start!\n")
}
res
} |
expected <- eval(parse(text="NULL"));
test(id=0, code={
argv <- eval(parse(text="list(structure(c(3L, 3L, 3L, 1L, 1L, 1L, 1L, 2L, 2L, 2L, 2L, 3L, 3L, 3L, 3L, 1L, 1L, 1L, 1L, 2L, 2L, 2L, 2L, 3L, 3L, 3L, 3L, 1L, 1L, 1L, 1L, 2L, 2L, 2L, 2L, 3L, 3L, 3L, 3L, 1L, 1L, 1L, 1L, 2L, 2L, 2L, 2L, 3L, 3L, 3L, 3L, 1L, 1L, 1L, 1L, 2L, 2L, 2L, 2L, 3L, 3L, 3L, 3L, 1L, 1L, 1L, 1L, 2L, 2L, 2L, 2L), .Label = c(\"Golden.rain\", \"Marvellous\", \"Victory\"), class = \"factor\", contrasts = \"contr.treatment\"))"));
do.call(`dim`, argv);
}, o=expected); |
library("ggplot2")
p <- ggplot(mtcars) +
geom_point(aes(x = wt, y = mpg, colour = factor(gear))) +
facet_wrap(~am)
p + theme_igray() |
"bovids" |
yppeSurv <- function(time, z, par, rho, tau, n_int){
q <- length(z)
psi <- par[1:q]
phi <- par[(q+1):(2*q)]
gamma <- par[(2*q+1):(2*q+n_int)]/tau
theta_S <- exp( as.numeric(z%*%psi) )
theta_L <- exp( as.numeric(z%*%phi) )
Ht0 <- Hpexp(time, rho, gamma)
St0 <- exp(-Ht0)
Ft0 <- 1-St0
St <- exp( -theta_L*(log(theta_L*St0 + theta_S*Ft0)-(as.numeric(z%*%phi) - Ht0) ) )
class(St) <- "survfit.yppe"
return(St)
}
yppeSurv2 <- function(time, z, x, par, rho, tau, n_int){
q <- length(z)
p <- length(x)
psi <- par[1:q]
phi <- par[(q+1):(2*q)]
beta <- par[(2*q+1):(2*q+p)]
gamma <- par[(2*q+p+1):(2*q+p+n_int)]/tau
theta_S <- exp( as.numeric(z%*%psi) )
theta_L <- exp( as.numeric(z%*%phi) )
theta_C <- exp( as.numeric(x%*%beta) )
Ht0 <- Hpexp(time, rho, gamma)
St0 <- exp(-Ht0)
Ft0 <- 1-St0
St <- exp( -theta_L*theta_C*(log(theta_L*St0 + theta_S*Ft0)-(as.numeric(z%*%phi) - Ht0) ) )
class(St) <- "survfit.yppe"
return(St)
}
survfit <- function(object, ...) UseMethod("survfit")
survfit.yppe <- function(object, newdata, ...){
mf <- object$mf
labels <- names(mf)[-1]
time <- sort( stats::model.response(mf)[,1])
status <- sort( stats::model.response(mf)[,2])
data <- data.frame(cbind(time, status, mf[,-1]))
names(data) <- c("time", "status", names(mf)[-1])
rho <- object$rho
n_int <- object$n_int
tau <- object$tau
labels <- match.arg(names(newdata), labels, several.ok = TRUE)
formula <- object$formula
Z <- as.matrix(stats::model.matrix(formula, data = newdata, rhs = 1)[,-1])
X <- suppressWarnings(try( as.matrix(stats::model.matrix(formula, data = newdata, rhs = 2)[,-1]), TRUE))
St <- list()
if(object$approach=="mle"){
par <- object$fit$par
if(object$p==0){
for(i in 1:nrow(newdata)){
St[[i]] <- yppeSurv(time, Z[i,], par, rho, tau, n_int)
}
}else{
for(i in 1:nrow(newdata)){
St[[i]] <- yppeSurv2(time, Z[i,], X[i,], par, rho, tau, n_int)
}
}
}else{
samp <- rstan::extract(object$fit)
if(object$p==0){
par <- cbind(samp$psi, samp$phi, samp$gamma)
for(i in 1:nrow(newdata)){
aux <- apply(par, 1, yppeSurv, time=time, z=Z[i,], rho=rho, tau=tau, n_int=n_int)
St[[i]] <- apply(aux, 1, mean)
}
}else{
par <- cbind(samp$psi, samp$phi, samp$beta, samp$gamma)
for(i in 1:nrow(newdata)){
aux <- apply(par, 1, yppeSurv2, time=time, z=Z[i,], x=X[i,], rho=rho, tau=tau, n_int=n_int)
St[[i]] <- apply(aux, 1, mean)
}
}
}
class(St) <- "survfit.yppe"
return(St)
}
diffSurv <- function(time, z1, z2, par, rho, tau, n_int){
St1 <- yppeSurv(time=time, z=z1, par=par, rho=rho, tau=tau, n_int=n_int)
St2 <- yppeSurv(time=time, z=z2, par=par, rho=rho, tau=tau, n_int=n_int)
return(St1-St2)
}
diffSurv2 <- function(time, z1, z2, x, par, rho, tau, n_int){
St1 <- yppeSurv2(time=time, z=z1, x=x, par=par, rho=rho, tau=tau, n_int=n_int)
St2 <- yppeSurv2(time=time, z=z2, x=x, par=par, rho=rho, tau=tau, n_int=n_int)
return(St1-St2)
}
yppeCrossSurv <- function(z1, z2, par, rho, tau0, tau, n_int){
I <- c(tau0, 1.5*tau)
t <- try(stats::uniroot(diffSurv, interval=I, z1=z1, z2=z2, par=par,
rho=rho, tau=tau, n_int=n_int)$root, TRUE)
if(class(t)=="try-error"){
return(NA)
}else{
return(t)
}
}
yppeCrossSurv2 <- function(z1, z2, x, par, rho, tau0, tau, n_int){
I <- c(tau0, 1.5*tau)
t <- try(stats::uniroot(diffSurv2, interval=I, z1=z1, z2=z2, x=x, par=par,
rho=rho, tau=tau, n_int=n_int)$root, TRUE)
if(class(t)=="try-error"){
return(NA)
}else{
return(t)
}
}
crossTime <- function(object, ...) UseMethod("crossTime")
crossTime.yppe <- function(object, newdata1, newdata2,
conf.level=0.95, nboot=4000, ...){
q <-object$q
p <-object$p
mf <- object$mf
labels <- names(mf)[-1]
time <- stats::model.response(mf)[,1]
status <- stats::model.response(mf)[,2]
o <- order(time)
data <- data.frame(cbind(time, status, mf[,-1]))[o,]
names(data) <- c("time", "status", labels)
tau0 <- min(time)
rho <- object$rho
n_int <- object$n_int
tau <- object$tau
labels <- match.arg(names(newdata1), names(newdata2), several.ok=TRUE)
labels <- match.arg(names(mf)[-1], names(newdata1), several.ok=TRUE)
z1 <- matrix(stats::model.matrix(object$formula, data = newdata1, rhs = 1)[,-1], ncol=q)
z2 <- matrix(stats::model.matrix(object$formula, data = newdata2, rhs = 1)[,-1], ncol=q)
if(p>0){
x <- matrix(stats::model.matrix(object$formula, data = newdata2, rhs = 2)[,-1], ncol=p)
}
I <- c(tau0, 1.5*tau)
alpha <- 1 - conf.level
prob <- c(alpha/2, 1-alpha/2)
if(object$approach=="mle"){
t <- c()
par <- object$fit$par
for(i in 1:nrow(newdata1)){
if(p==0){
t[i] <- yppeCrossSurv(z1=z1[i,], z2=z2[i,], par=par, rho=rho, tau0=tau0, tau=tau, n_int=n_int)
}else{
t[i,] <- yppeCrossSurv2(z1=z1[i,], z2=z2[i,], x=x[i,], par=par, rho=rho, tau0=tau0, tau=tau, n_int=n_int)
}
}
par <- with(object, yppeBoot(formula=formula, data=data, n_int=n_int,
rho=rho, tau=tau, nboot=nboot, prob=prob))
ci <- matrix(nrow=nrow(newdata1), ncol=2)
if(object$p==0){
for(i in 1:nrow(newdata1)){
aux <- apply(par, 1, yppeCrossSurv, z1=z1[i,], z2=z2[i,], rho=rho, tau0=tau0, tau=tau, n_int=n_int)
ci[i,] <- stats::quantile(aux, probs=prob, na.rm=TRUE)
}
}else{
for(i in 1:nrow(newdata1)){
aux <- apply(par, 1, yppeCrossSurv2, z1=z1[i,], z2=z2[i,], x=x[i,], rho=rho, tau0=tau0, tau=tau, n_int=n_int)
ci[i,] <- stats::quantile(aux, probs=prob, na.rm=TRUE)
}
}
t <- data.frame(cbind(t, ci))
names(t) <- c("Est.", paste(100*prob, "%", sep=""))
}else{
t <- matrix(nrow=nrow(newdata1), ncol=3)
samp <- rstan::extract(object$fit)
if(object$p==0){
par <- cbind(samp$psi, samp$phi, samp$gamma)
for(i in 1:nrow(newdata1)){
aux <- apply(par, 1, yppeCrossSurv, z1=z1[i,], z2=z2[i,], rho=rho, tau0=tau0, tau=tau, n_int=n_int)
ci <- stats::quantile(aux, probs=prob, na.rm=TRUE)
t[i,] <- c(mean(aux, na.rm=TRUE), ci)
}
}else{
par <- cbind(samp$psi, samp$phi, samp$beta, samp$gamma)
for(i in 1:nrow(newdata1)){
aux <- apply(par, 1, yppeCrossSurv2, z1=z1[i,], z2=z2[i,], x=x[i,], rho=rho, tau0=tau0, tau=tau, n_int=n_int)
ci <- stats::quantile(aux, probs=prob, na.rm=TRUE)
t[i,] <- c(mean(aux, na.rm=TRUE), ci)
}
}
t <- as.data.frame(t)
names(t) <- c("Est.", names(ci) )
}
return(t)
} |
cbimax<- function(logicalmatrix,minr=2,minc=2,number=100,er=0)
.C("bimax",
as.integer(logicalmatrix),
as.integer(nrow(logicalmatrix)),
as.integer(ncol(logicalmatrix)),
as.integer(minr),
as.integer(minc),
as.integer(matrix(0,nrow=nrow(logicalmatrix),ncol=number)),
as.integer(matrix(0,nrow=number,ncol=ncol(logicalmatrix))),
as.integer(vector(mode="integer",length=nrow(logicalmatrix)+ncol(logicalmatrix))),
as.integer(number),
as.integer(er))
bimaxbiclust<- function(logicalmatrix,...){
MYCALL<-match.call()
flush.console()
ausgabe<-cbimax(logicalmatrix,...)
ausgabe[[6]] <- as.logical(ausgabe[[6]])
ausgabe[[7]] <- as.logical(ausgabe[[7]])
RowxNumber<-matrix(ausgabe[[6]],nrow=nrow(logicalmatrix),ncol=ausgabe[[9]])
NumberxCol<-matrix(ausgabe[[7]],nrow=ausgabe[[9]],ncol=ncol(logicalmatrix))
anzahl<-colSums(RowxNumber)
anzahl2<-rowSums(NumberxCol)
anzahl_ges<-anzahl+anzahl2
anzahl_id<-anzahl_ges>0
Number<-sum(anzahl_id)
if(Number==1){
RowxNumber <-matrix(RowxNumber[,anzahl_id],ncol=1)
NumberxCol <-matrix(NumberxCol[anzahl_id,],nrow=1)
}
if(Number>1)
{
RowxNumber<- RowxNumber[,anzahl_id]
NumberxCol <-NumberxCol[anzahl_id,]
}
return(BiclustResult(as.list(MYCALL),RowxNumber,NumberxCol,Number,list(0)))
}
repbimaxbiclust<- function(logicalmatrix,minr=2,minc=2,number=30,maxc=12)
{
RowxNumber<-matrix(FALSE,nrow=nrow(logicalmatrix),ncol=number)
NumberxCol<-matrix(FALSE,nrow=number,ncol=ncol(logicalmatrix))
daten<-logicalmatrix
datenrows<-rep(TRUE,nrow(logicalmatrix))
forstop <- TRUE
for(j in 1:number)
{
res1<-0
res2<-1
i<-maxc
k<-0
while(res1==0 & i>0)
{
i<-i-1
res_bimax <- bimaxbiclust(daten[datenrows,], minr=minr, minc=i, number=100)
res1<-res_bimax@Number
}
while(res2>0 & i>minc)
{
resbic<-res_bimax
res_bimax <- bimaxbiclust(daten[datenrows,], minr=minr+k, minc=i, number=100)
k<-k+1
res2<-res_bimax@Number
}
if(i>minc)
{
forstop <- FALSE
ind<-which.max(colSums(resbic@RowxNumber))
RowxNumber[datenrows,j]<-resbic@RowxNumber[,ind]
NumberxCol[j,]<-resbic@NumberxCol[ind,]
datenrows[datenrows][resbic@RowxNumber[,ind]]<-FALSE
}
else
{
break
}
}
if(i>minc)
{
RowxNumber <-matrix(RowxNumber[,1:j],ncol=j)
NumberxCol <-matrix(NumberxCol[1:j,],nrow=j)
bimaxbic<-BiclustResult(resbic@Parameters,RowxNumber,NumberxCol,j,list())
}
else
{
if(forstop)
{
bimaxbic<-BiclustResult(res_bimax@Parameters,matrix(NA,1,1),matrix(NA,1,1),0,list())
}
else
{
RowxNumber <-matrix(RowxNumber[,1:(j-1)],ncol=(j-1))
NumberxCol <-matrix(NumberxCol[1:(j-1),],nrow=(j-1))
bimaxbic<-BiclustResult(resbic@Parameters,RowxNumber,NumberxCol,(j-1),list())
}
}
return(bimaxbic)
}
maxbimaxbiclust <- function(logicalmatrix,minr=2,minc=2,number=5, backfit=2, n2=30)
{
RowxNumber<-matrix(FALSE,nrow=nrow(logicalmatrix),ncol=number)
NumberxCol<-matrix(FALSE,nrow=number,ncol=ncol(logicalmatrix))
daten<-logicalmatrix
datenrows<-rep(TRUE,nrow(logicalmatrix))
forstop <- TRUE
sums <- rowSums(logicalmatrix)
for(j in 1:number)
{
print(j)
i <- minc-1
k <- 0
k2 <- FALSE
size <- 0
RUN <- TRUE
FOUND <- FALSE
i_found <- minc
while(RUN)
{
i<-i+1
scountb <- sums >= i
if(sum(scountb)<max(floor(size/i),minr))
{
res1 =0
}
else
{
res_bimax <- bimaxbiclust(daten[(datenrows&scountb),], minr=max(floor(size/i),minr), minc=i, number=n2)
res1<-res_bimax@Number
}
if(res1==0)
{
k <- k + 1
}
else
{
FOUND <- TRUE
forstop <- FALSE
cS <- colSums(res_bimax@RowxNumber)
rS <- rowSums(res_bimax@NumberxCol)
sizeb <- max(si <- cS * rS)
if(sizeb > size)
{
k <- 0
k2 <- FALSE
ind <- which.max(si)[1]
resbic <- res_bimax
size <- sizeb
scount <- scountb
i_found <- i
}
else
{
k <- k + 1
}
}
if(k>=backfit)
{
if(k2)
{
RUN <- FALSE
}
else
{
k2 <- TRUE
i <- max(i_found - k - 1, minc - 1)
k <- 0
}
}
}
if(FOUND)
{
RowxNumber[(datenrows&scount),j] <- resbic@RowxNumber[,ind]
NumberxCol[j,] <- resbic@NumberxCol[ind,]
datenrows[(datenrows&scount)][resbic@RowxNumber[,ind]] <- FALSE
}
else
{
break
}
}
if(FOUND)
{
RowxNumber <-matrix(RowxNumber[,1:j],ncol=j)
NumberxCol <-matrix(NumberxCol[1:j,],nrow=j)
bimaxbic<-BiclustResult(resbic@Parameters,RowxNumber,NumberxCol,j,list())
}
else
{
if(forstop)
{
bimaxbic<-BiclustResult(res_bimax@Parameters,matrix(NA,1,1),matrix(NA,1,1),0,list())
}
else
{
RowxNumber <-matrix(RowxNumber[,1:(j-1)],ncol=(j-1))
NumberxCol <-matrix(NumberxCol[1:(j-1),],nrow=(j-1))
bimaxbic<-BiclustResult(resbic@Parameters,RowxNumber,NumberxCol,(j-1),list())
}
}
return(bimaxbic)
} |
power.t.test(delta = 0.5, power = 0.8) |
eGFR <- function(...) {
UseMethod("eGFR")
}
eGFR_internal <- function(
SCr,
SCysC,
Age,
height,
BUN,
male,
black,
pediatric) {
if (!is.na(SCr) & is.na(SCysC) & !is.na(Age) & !is.na(male) & !is.na(black) & !pediatric) {
eGFR_adult_SCr(SCr, Age, male, black)
} else if (is.na(SCr) & !is.na(SCysC) & !is.na(Age) & !is.na(male) & !pediatric) {
eGFR_adult_SCysC(SCysC, Age, male)
} else if (!is.na(SCr) & !is.na(SCysC) & !is.na(Age) & !is.na(male) & !is.na(black) & !pediatric) {
eGFR_adult_SCr_SCysC(SCr, SCysC, Age, male, black)
} else if (!is.na(SCr) & !is.na(height) & is.na(BUN) & pediatric) {
eGFR_child_SCr(SCr, height)
} else if (!is.na(SCr) & !is.na(height) & !is.na(BUN) & pediatric) {
eGFR_child_SCr_BUN(SCr, height, BUN)
} else if (!is.na(SCysC) & pediatric) {
eGFR_child_SCysC(SCysC)
} else {
warning("Could not find an appropriate eGFR() formula to use")
NA_real_
}
}
eGFR.data.frame <- function(.data,
SCr = NULL,
SCysC = NULL,
Age = NULL,
height = NULL,
BUN = NULL,
male = NULL,
black = NULL,
pediatric = NULL,
...) {
ellipsis::check_dots_used()
if (!is.null(SCr)) SCr <- .data[[rlang::as_name(rlang::enquo(SCr))]]
if (!is.null(SCysC)) SCysC <- .data[[rlang::as_name(rlang::enquo(SCysC))]]
if (!is.null(Age)) Age <- .data[[rlang::as_name(rlang::enquo(Age))]]
if (!is.null(height)) height <- .data[[rlang::as_name(rlang::enquo(height))]]
if (!is.null(BUN)) BUN <- .data[[rlang::as_name(rlang::enquo(BUN))]]
if (!is.null(male)) male <- .data[[rlang::as_name(rlang::enquo(male))]]
if (!is.null(black)) black <- .data[[rlang::as_name(rlang::enquo(black))]]
if (!is.null(pediatric)) pediatric <- .data[[rlang::as_name(rlang::enquo(pediatric))]]
if (!is.null(SCr)) {
eGFR(
SCr = SCr, SCysC = SCysC,
Age = Age, height = height, BUN = BUN,
male = male, black = black, pediatric = pediatric
)
} else if (!is.null(SCysC)) {
eGFR(
SCysC = SCysC,
Age = Age, height = height, BUN = BUN,
male = male, black = black, pediatric = pediatric
)
}
}
eGFR.units <- function(
SCr = NULL,
SCysC = NULL,
Age = NULL,
height = NULL,
BUN = NULL,
male = NULL,
black = NULL,
pediatric = NULL,
...) {
ellipsis::check_dots_used()
if (!is.null(SCr)) SCr <- as_metric(SCr = SCr, value_only = T)
if (!is.null(SCysC)) SCysC <- as_metric(SCysC = SCysC, value_only = T)
if (!is.null(Age)) Age <- as_metric(Age = Age, value_only = T)
if (!is.null(height)) height <- as_metric(height = height, value_only = T)
if (!is.null(BUN)) BUN <- as_metric(BUN = BUN, value_only = T)
if (!is.null(SCr)) {
eGFR_calc <- eGFR(
SCr = SCr, SCysC = SCysC,
Age = Age, height = height, BUN = BUN,
male = male, black = black, pediatric = pediatric
)
} else if (!is.null(SCysC)) {
eGFR_calc <- eGFR(
SCysC = SCysC,
Age = Age, height = height, BUN = BUN,
male = male, black = black, pediatric = pediatric
)
}
units::set_units(eGFR_calc, "mL/min/1.73m2")
}
eGFR.numeric <- function(
SCr = NULL,
SCysC = NULL,
Age = NULL,
height = NULL,
BUN = NULL,
male = NULL,
black = NULL,
pediatric = NULL,
...) {
ellipsis::check_dots_used()
cols <- c(SCr = NA, SCysC = NA, Age = NA, height = NA, BUN = NA, male = NA, black = NA, pediatric = NA)
df <- cbind(SCr, SCysC, Age, height, BUN, male, black, pediatric) %>%
tibble::as_tibble(.data)
df <- tibble::add_column(df, !!!cols[!names(cols) %in% names(df)]) %>%
dplyr::mutate(dplyr::across(c(male, black, pediatric), as.logical))
if (is.null(Age) & is.null(pediatric)) {
warning("Either Age or pediatric should be provided. Assuming pediatric patients as Age must be provided for adults.")
df <- dplyr::mutate(df, pediatric = TRUE)
} else if (!is.null(Age) & is.null(pediatric)) {
df <- dplyr::mutate(df, pediatric = Age < 18)
} else if (is.null(Age) & !is.null(pediatric)) {
df <- dplyr::mutate(df, pediatric = !!pediatric)
} else {
check_ped_ok <- all.equal(df$Age < 18, df$pediatric)
if (is.character(check_ped_ok)) {
stop(paste("Inconsistencies found between pediatric and age colums:", check_ped_ok))
}
df <- dplyr::mutate(df, pediatric = !!pediatric)
}
df %>%
dplyr::rowwise() %>%
dplyr::mutate(eGFR = eGFR_internal(SCr, SCysC, Age, height, BUN, male, black, pediatric)) %>%
dplyr::pull(eGFR)
}
eGFR_adult_SCr <- function(...) {
UseMethod("eGFR_adult_SCr")
}
eGFR_adult_SCr.data.frame <- function(.data, SCr, Age, male, black, ...) {
ellipsis::check_dots_used()
eGFR_adult_SCr(
.data[[rlang::as_name(rlang::enquo(SCr))]],
.data[[rlang::as_name(rlang::enquo(Age))]],
.data[[rlang::as_name(rlang::enquo(male))]],
.data[[rlang::as_name(rlang::enquo(black))]],
)
}
eGFR_adult_SCr.units <- function(SCr, Age, male, black, ...) {
ellipsis::check_dots_used()
eGFR <- eGFR_adult_SCr(
as_metric(SCr = SCr, value_only = T),
as_metric(Age = Age, value_only = T),
male,
black
)
units::set_units(eGFR, "mL/min/1.73m2")
}
eGFR_adult_SCr.numeric <- function(SCr, Age, male, black, ...) {
ellipsis::check_dots_used()
male <- as.logical(male)
black <- as.logical(black)
kappa <- dplyr::if_else(!male, 0.7, 0.9)
alpha <- dplyr::if_else(!male, -0.329, -0.411)
141 * pmin(SCr / kappa, 1)^alpha * pmax(SCr / kappa, 1)^-1.209 * 0.993^Age *
dplyr::if_else(male, 1, 1.018) *
dplyr::if_else(black, 1.159, 1)
}
eGFR_adult_SCysC <- function(...) {
UseMethod("eGFR_adult_SCysC")
}
eGFR_adult_SCysC.data.frame <- function(.data, SCysC, Age, male, ...) {
ellipsis::check_dots_used()
eGFR_adult_SCysC(
.data[[rlang::as_name(rlang::enquo(SCysC))]],
.data[[rlang::as_name(rlang::enquo(Age))]],
.data[[rlang::as_name(rlang::enquo(male))]]
)
}
eGFR_adult_SCysC.units <- function(SCysC, Age, male, ...) {
ellipsis::check_dots_used()
eGFR <- eGFR_adult_SCysC(
as_metric(SCysC = SCysC, value_only = T),
as_metric(Age = Age, value_only = T),
male
)
units::set_units(eGFR, "mL/min/1.73m2")
}
eGFR_adult_SCysC.numeric <- function(SCysC, Age, male, ...) {
ellipsis::check_dots_used()
male <- as.logical(male)
133 * pmin(SCysC / 0.8, 1)^-0.499 * pmax(SCysC / 0.8, 1)^-1.328 * 0.996^Age *
dplyr::if_else(male, 1, 0.932)
}
eGFR_adult_SCr_SCysC <- function(...) {
UseMethod("eGFR_adult_SCr_SCysC")
}
eGFR_adult_SCr_SCysC.data.frame <- function(.data, SCr, SCysC, Age, male, black, ...) {
ellipsis::check_dots_used()
eGFR_adult_SCr_SCysC(
.data[[rlang::as_name(rlang::enquo(SCr))]],
.data[[rlang::as_name(rlang::enquo(SCysC))]],
.data[[rlang::as_name(rlang::enquo(Age))]],
.data[[rlang::as_name(rlang::enquo(male))]],
.data[[rlang::as_name(rlang::enquo(black))]]
)
}
eGFR_adult_SCr_SCysC.units <- function(SCr, SCysC, Age, male, black, ...) {
ellipsis::check_dots_used()
eGFR <- eGFR_adult_SCr_SCysC(
as_metric(SCr = SCr, value_only = T),
as_metric(SCysC = SCysC, value_only = T),
as_metric(Age = Age, value_only = T),
male,
black
)
units::set_units(eGFR, "mL/min/1.73m2")
}
eGFR_adult_SCr_SCysC.numeric <- function(SCr, SCysC, Age, male, black, ...) {
ellipsis::check_dots_used()
male <- as.logical(male)
black <- as.logical(black)
kappa <- dplyr::if_else(!male, 0.7, 0.9)
alpha <- dplyr::if_else(!male, -0.248, -0.207)
eGFR <- 135 * pmin(SCr / kappa, 1)^alpha * pmax(SCr / kappa, 1)^-0.601 *
pmin(SCysC / 0.8, 1)^-0.375 * pmax(SCysC / 0.8, 1)^-0.711 *
0.995^Age *
dplyr::if_else(male, 1, 0.969) *
dplyr::if_else(black, 1.08, 1)
}
eGFR_child_SCr <- function(...) {
UseMethod("eGFR_child_SCr")
}
eGFR_child_SCr.data.frame <- function(.data, SCr, height, ...) {
ellipsis::check_dots_used()
eGFR_child_SCr(
.data[[rlang::as_name(rlang::enquo(SCr))]],
.data[[rlang::as_name(rlang::enquo(height))]]
)
}
eGFR_child_SCr.units <- function(SCr, height, ...) {
ellipsis::check_dots_used()
eGFR <- eGFR_child_SCr(
as_metric(SCr = SCr, value_only = T),
as_metric(height = height, value_only = T),
)
units::set_units(eGFR, "mL/min/1.73m2")
}
eGFR_child_SCr.numeric <- function(SCr, height, ...) {
ellipsis::check_dots_used()
41.3 * (height / SCr)
}
eGFR_child_SCr_BUN <- function(...) {
UseMethod("eGFR_child_SCr_BUN")
}
eGFR_child_SCr_BUN.data.frame <- function(.data, SCr, height, BUN, ...) {
ellipsis::check_dots_used()
eGFR_child_SCr_BUN(
.data[[rlang::as_name(rlang::enquo(SCr))]],
.data[[rlang::as_name(rlang::enquo(height))]],
.data[[rlang::as_name(rlang::enquo(BUN))]]
)
}
eGFR_child_SCr_BUN.units <- function(SCr, height, BUN, ...) {
ellipsis::check_dots_used()
eGFR <- eGFR_child_SCr_BUN(
as_metric(SCr = SCr, value_only = T),
as_metric(height = height, value_only = T),
as_metric(BUN = BUN, value_only = T)
)
units::set_units(eGFR, "mL/min/1.73m2")
}
eGFR_child_SCr_BUN.numeric <- function(SCr, height, BUN, ...) {
ellipsis::check_dots_used()
40.7 * (height / SCr)^0.64 * (30 / BUN)^0.202
}
eGFR_child_SCysC <- function(...) {
UseMethod("eGFR_child_SCysC")
}
eGFR_child_SCysC.data.frame <- function(.data, SCysC, ...) {
ellipsis::check_dots_used()
eGFR_child_SCysC(
.data[[rlang::as_name(rlang::enquo(SCysC))]]
)
}
eGFR_child_SCysC.units <- function(SCysC, ...) {
ellipsis::check_dots_used()
eGFR <- eGFR_child_SCysC(
as_metric(SCysC = SCysC, value_only = T)
)
units::set_units(eGFR, "mL/min/1.73m2")
}
eGFR_child_SCysC.numeric <- function(SCysC, ...) {
ellipsis::check_dots_used()
70.69 * (SCysC)^-0.931
}
GFR_stages <- factor(c("G1", "G2", "G3a", "G3b", "G4", "G5"), ordered = TRUE)
GFR_staging <- function(...) {
UseMethod("GFR_staging")
}
GFR_staging.data.frame <- function(.data, GFR, ...) {
GFR_staging(
.data[[rlang::as_name(rlang::enquo(GFR))]]
)
}
GFR_staging.units <- function(GFR, ...) {
if (grepl("1.73m2-1", units::deparse_unit(GFR))) {
GFR <- GFR * units::set_units(1, "1.73m2")
}
GFR_staging(
as_metric(GFR = GFR, value_only = T)
)
}
GFR_staging.numeric <- function(GFR, ...) {
dplyr::case_when(
GFR >= 90 ~ GFR_stages[1],
GFR >= 60 ~ GFR_stages[2],
GFR >= 45 ~ GFR_stages[3],
GFR >= 30 ~ GFR_stages[4],
GFR >= 15 ~ GFR_stages[5],
GFR >= 0 ~ GFR_stages[6],
TRUE ~ NA_integer_
)
}
Albuminuria_stages <- factor(
c("A1", "A2", "A3", "No Albuminuria"),
levels = c("No Albuminuria", "A1", "A2", "A3"),
ordered = TRUE
)
Albuminuria_staging_AER <- function(...) {
UseMethod("Albuminuria_staging_AER")
}
Albuminuria_staging_AER.data.frame <- function(.data, AER, ...) {
Albuminuria_staging_AER(
.data[[rlang::as_name(rlang::enquo(AER))]]
)
}
Albuminuria_staging_AER.units <- function(AER, ...) {
Albuminuria_staging_AER(
as_metric(AER = AER, value_only = T)
)
}
Albuminuria_staging_AER.numeric <- function(AER, ...) {
dplyr::case_when(
AER > 300 ~ Albuminuria_stages[3],
AER > 30 ~ Albuminuria_stages[2],
AER > 0 ~ Albuminuria_stages[1],
TRUE ~ dplyr::last(Albuminuria_stages)
)
}
Albuminuria_staging_ACR <- function(...) {
UseMethod("Albuminuria_staging_ACR")
}
Albuminuria_staging_ACR.data.frame <- function(.data, ACR, ...) {
Albuminuria_staging_ACR(
.data[[rlang::as_name(rlang::enquo(ACR))]]
)
}
Albuminuria_staging_ACR.units <- function(ACR, ...) {
Albuminuria_staging_ACR(
as_metric(ACR = ACR, value_only = T)
)
}
Albuminuria_staging_ACR.numeric <- function(ACR, ...) {
dplyr::case_when(
ACR > 30 ~ Albuminuria_stages[3],
ACR > 3 ~ Albuminuria_stages[2],
ACR > 0 ~ Albuminuria_stages[1],
TRUE ~ dplyr::last(Albuminuria_stages)
)
} |
.zj_abf = function(Zj, int.Sigma, int.nrep, int.ERR, int.r) {
stopifnot(inherits(Zj, "numeric"))
stopifnot(inherits(int.ERR, "matrix"))
stopifnot(inherits(int.r, "numeric"))
exp.zm = Zj %*% int.Sigma
mexp.zm = matrix(exp.zm, int.nrep, length(Zj), byrow = TRUE)
zstar = mexp.zm + int.ERR
0.5 * t(log(1 - int.r) + (int.r * t(zstar^2)))
}
.zj_pp = function(Zj, int.Sigma, int.nrep, int.ERR, int.r) {
stopifnot(inherits(Zj, "numeric"))
stopifnot(inherits(int.ERR, "matrix"))
stopifnot(inherits(int.r, "numeric"))
exp.zm = Zj %*% int.Sigma
mexp.zm = matrix(exp.zm, int.nrep, length(Zj), byrow = TRUE)
zstar = mexp.zm + int.ERR
bf = 0.5 * t(log(1 - int.r) + (int.r * t(zstar^2)))
denom = logsum_matrix(bf)
exp(bf - denom)
}
NULL
logsum_matrix=function(x) {
my.max=rowMaxs(x)
my.max + log(rowSums(exp(x - my.max)))
} |
UDT <- function (case_a, case_b, controls_a, controls_b,
sd_a = NULL, sd_b = NULL,
sample_size = NULL, r_ab = NULL,
alternative = c("two.sided", "greater", "less"),
conf_int = TRUE, conf_level = 0.95,
conf_int_spec = 0.01,
na.rm = FALSE) {
if (length(case_a) > 1 | length(case_b) > 1) stop("Case scores should be single value")
if (length(controls_a) > 1 & length(controls_b) > 1) {
if (length(controls_a) != length(controls_b)) stop("Sample sizes must be equal")
}
if (length(controls_a) > 1 & length(controls_b) > 1 & is.null(sample_size) == FALSE) message("Value on sample_size will be ignored")
if (length(controls_a) > 1 & is.null(sd_a) == FALSE) message("Value on sd_a will be ignored")
if (length(controls_b) > 1 & is.null(sd_b) == FALSE) message("Value on sd_b will be ignored")
if (length(controls_a) == 1 & is.null(sd_a) == TRUE) stop("Please give sd and n on task A if controls_a is to be treated as mean")
if (length(controls_b) == 1 & is.null(sd_b) == TRUE) stop("Please give sd and n on task B if controls_b is to be treated as mean")
if (conf_int == TRUE & (conf_level < 0 | conf_level >= 1)) stop("Confident level must be between 0 and < 1")
if(is.na(case_a) == TRUE | is.na(case_b) == TRUE) stop("One or both case scores is NA")
if (na.rm == TRUE) {
if (sum(is.na(controls_a)) > 0 & sum(is.na(controls_b)) == 0 ) {
controls_b <- controls_b[!is.na(controls_a)]
controls_a <- controls_a[!is.na(controls_a)]
warning("Removal of NAs on controls_a resulted in removal of non-NAs on controls_b")
}
if (sum(is.na(controls_b)) > 0 & sum(is.na(controls_a)) == 0 ) {
controls_a <- controls_a[!is.na(controls_b)]
controls_b <- controls_b[!is.na(controls_b)]
warning("Removal of NAs on controls_b resulted in removal of non-NAs on controls_a")
}
if (sum(is.na(controls_b)) > 0 & sum(is.na(controls_a)) > 0 ) {
if (identical(!is.na(controls_a), !is.na(controls_b)) == TRUE) {
controls_a <- controls_a[!is.na(controls_a)]
controls_b <- controls_b[!is.na(controls_b)]
} else {
con_a <- controls_a[!is.na(controls_a) & !is.na(controls_b)]
con_b <- controls_b[!is.na(controls_a) & !is.na(controls_b)]
controls_a <- con_a
controls_b <- con_b
warning("Removal of NAs on one control sample resulted in removal of non-NAs on the other")
}
}
}
if (sum(is.na(controls_a)) > 0 | sum(is.na(controls_b)) > 0) stop("Controls contains NA, set na.rm = TRUE to proceed")
if (length(controls_a) > 1 & length(controls_b) > 1) {
if (length(controls_a) != length(controls_b)) stop("Sample sizes must be equal")
}
alternative <- match.arg(alternative)
con_m_a <- mean(controls_a)
con_m_b <- mean(controls_b)
con_sd_a <- stats::sd(controls_a)
if (length(controls_a) == 1 & is.null(sd_a) == FALSE) con_sd_a <- sd_a
con_sd_b <- stats::sd(controls_b)
if (length(controls_b) == 1 & is.null(sd_b) == FALSE) con_sd_b <- sd_b
n <- length(controls_a)
if (length(controls_a) == 1 | length(controls_b) == 1) {
if (is.null(sample_size) == TRUE) stop("Please set sample size")
n <- sample_size
if (length(controls_a) > 1 & n != length(controls_a)) stop("Sample sizes must be equal")
if (length(controls_b) > 1 & n != length(controls_b)) stop("Sample sizes must be equal")
}
if (is.null(r_ab) == TRUE & length(controls_a) == 1) stop("Please set correlation between tasks")
if (is.null(r_ab) == TRUE & length(controls_b) == 1) stop("Please set correlation between tasks")
if (is.null(r_ab) == FALSE){
if (r_ab < -1 | r_ab > 1) stop("Correlation must be between -1 and 1")
}
r <- r_ab
if (length(controls_a) > 1 & length(controls_b) > 1) r <- stats::cor(controls_a, controls_b)
df <- n - 1
def_a <- (case_a - con_m_a)
def_b <- (case_b - con_m_b)
dif <- (def_a - def_b)
std.er <- sqrt(
(con_sd_a^2 + con_sd_b^2 - 2*con_sd_a*con_sd_b*r) * ((n + 1) / n)
)
tstat <- dif/std.er
names(tstat) <- "t"
if (alternative == "two.sided") {
pval <- 2 * stats::pt(abs(tstat), df = df, lower.tail = FALSE)
} else if (alternative == "greater") {
pval <- stats::pt(tstat, df = df, lower.tail = FALSE)
} else {
pval <- stats::pt(tstat, df = df, lower.tail = TRUE)
}
z_a <- (case_a - con_m_a)/con_sd_a
z_b <- (case_b - con_m_b)/con_sd_b
zdcc <- (z_a - z_b)/sqrt(2-2*r)
estimate <- c(def_a, def_b, zdcc, ifelse(alternative == "two.sided", (pval/2*100), pval*100))
if (conf_int == T) {
alph <- 1 - conf_level
stop_ci_lo <- FALSE
ncp_lo <- zdcc*sqrt(n)
perc_lo <- 1 - (alph/2)
while (stop_ci_lo == FALSE) {
ncp_lo <- ncp_lo - conf_int_spec
suppressWarnings(
quant <- stats::qt(perc_lo, df = df, ncp = ncp_lo)
)
if (quant <= zdcc*sqrt(n)) {
stop_ci_lo <- TRUE
}
}
stop_ci_up <- FALSE
ncp_up <- zdcc*sqrt(n)
perc_up <- (alph/2)
while (stop_ci_up == FALSE) {
ncp_up <- ncp_up + conf_int_spec
suppressWarnings(
quant <- stats::qt(perc_up, df = df, ncp = ncp_up)
)
if (quant >= zdcc*sqrt(n)) {
stop_ci_up <- TRUE
}
}
ci_lo_zdcc <- ncp_lo/sqrt(n)
ci_up_zdcc <- ncp_up/sqrt(n)
cint_zdcc <- c(ci_lo_zdcc, ci_up_zdcc)
zdcc.name <- paste0("Std. task discrepancy (Z-DCC), ",
100*conf_level, "% CI [",
format(round(cint_zdcc[1], 2), nsmall = 2),", ",
format(round(cint_zdcc[2], 2), nsmall = 2),"]")
if (alternative == "less") {
ci_lo_p <- stats::pnorm(ci_lo_zdcc)*100
ci_up_p <- stats::pnorm(ci_up_zdcc)*100
cint_p <- c(ci_lo_p, ci_up_p)
p.name <- paste0("Proportion below case (%), ",
100*conf_level, "% CI [",
format(round(cint_p[1], 2), nsmall = 2),", ",
format(round(cint_p[2], 2), nsmall = 2),"]")
} else if (alternative == "greater") {
ci_lo_p <- (1 - stats::pnorm(ci_lo_zdcc))*100
ci_up_p <- (1 - stats::pnorm(ci_up_zdcc))*100
cint_p <- c(ci_up_p, ci_lo_p)
p.name <- paste0("Proportion above case (%), ",
100*conf_level, "% CI [",
format(round(cint_p[1], 2), nsmall = 2),", ",
format(round(cint_p[2], 2), nsmall = 2),"]")
} else {
if (tstat < 0) {
ci_lo_p <- stats::pnorm(ci_lo_zdcc)*100
ci_up_p <- stats::pnorm(ci_up_zdcc)*100
cint_p <- c(ci_lo_p, ci_up_p)
p.name <- paste0("Proportion below case (%), ",
100*conf_level, "% CI [",
format(round(cint_p[1], 2), nsmall = 2),", ",
format(round(cint_p[2], 2), nsmall = 2),"]")
} else {
ci_lo_p <- (1 - stats::pnorm(ci_lo_zdcc))*100
ci_up_p <- (1 - stats::pnorm(ci_up_zdcc))*100
cint_p <- c(ci_up_p, ci_lo_p)
p.name <- paste0("Proportion above case (%), ",
100*conf_level, "% CI [",
format(round(cint_p[1], 2), nsmall = 2),", ",
format(round(cint_p[2], 2), nsmall = 2),"]")
}
}
names(cint_zdcc) <- c("Lower Z-DCC CI", "Upper Z-DCC CI")
names(cint_p) <- c("Lower p CI", "Upper p CI")
typ.int <- 100*conf_level
names(typ.int) <- "Confidence (%)"
interval <- c(typ.int, cint_zdcc, cint_p)
names(estimate) <- c("Std. case score, task A (Z-CC)",
"Std. case score, task B (Z-CC)",
zdcc.name,
p.name)
} else {
interval <- NULL
if (alternative == "two.sided") {
p.name <- paste("Proportion", ifelse(tstat < 0, "below", "above"), "case (%)")
} else if (alternative == "greater") {
p.name <- "Proportion above case (%)"
} else {
p.name <- "Proportion below case (%)"
}
names(estimate) <- c("Std. case score, task A (Z-CC)",
"Std. case score, task B (Z-CC)",
"Std. discrepancy (Z-DCC)",
p.name)
}
names(df) <- "df"
null.value <- 0
names(null.value) <- "difference between tasks"
dname <- paste0("Case A: ", format(round(case_a, 2), nsmall = 2), ", ",
"B: ", format(round(case_b, 2), nsmall = 2), ", ",
"Ctrl. A (m, sd): (", format(round(con_m_a, 2), nsmall = 2), ", ",format(round(con_sd_a, 2), nsmall = 2), "), ",
"B: (", format(round(con_m_b, 2), nsmall = 2), ", ",format(round(con_sd_b, 2), nsmall = 2), ")")
names(con_m_a) <- "Mean A"
names(con_m_b) <- "Mean B"
names(con_sd_a) <- "SD A"
names(con_sd_b) <- "SD B"
names(n) <- "Sample size"
control.desc <- c(con_m_a, con_m_b, con_sd_a, con_sd_b, n)
output <- list(statistic = tstat,
parameter = df,
p.value = pval,
estimate = estimate,
interval = interval,
control.desc = control.desc,
null.value = null.value,
alternative = alternative,
method = paste("Unstandardised Difference Test"),
data.name = dname)
class(output) <- "htest"
output
} |
context("pt_tables")
test_that(
"invalid inputs produce an error",
{
expect_error({
pt_tables()
})
expect_error({
pt_tables(iris)
})
}
) |
library(ggvis)
pp <- function (n, r = 4) {
width <- 2 * r * pi / (n - 1)
mid <- seq(-r * pi, r * pi, len = n)
df <- expand.grid(x = mid, y = mid)
df$r <- sqrt(df$x^2 + df$y^2)
df$z <- cos(df$r^2)*exp(-df$r/6)
df$y2 <- df$y + width
df$x2 <- df$x + width
df$x <- df$x - width
df$y <- df$y - width
df
}
pp(100) %>% ggvis(~x, ~y, x2 = ~x2, y2 = ~y2, fill = ~ z, stroke := NA) %>%
layer_rects() |
knitr::opts_chunk$set(echo = TRUE, eval = TRUE)
library(dataMaid)
Sys.setenv(TZ="Europe/Copenhagen")
Sys.getenv("TZ")
allClasses()
countZeros <- function(v, ...) {
val <- length(which(v == 0))
summaryResult(list(feature = "No. zeros", result = val, value = val))
}
countZeros(c(rep(0, 5), 1:100))
countZeros(c(rep(0, 5), letters))
countZeros <- summaryFunction(countZeros,
description = "Count number of zeros",
classes = c("character", "factor", "integer",
"labelled", "numeric"))
meanSummary <- function(v, maxDecimals = 2) {
UseMethod("meanSummary")
}
meanSummary(1)
meanSummaryHelper <- function(v, maxDecimals) {
v <- na.omit(v)
val <- mean(v)
res <- round(val, maxDecimals)
summaryResult(list(feature = "Mean", result = res, value = val))
}
meanSummary.logical <- function(v, maxDecimals = 2) {
meanSummaryHelper(v, maxDecimals)
}
meanSummary.numeric <- function(v, maxDecimals = 2) {
meanSummaryHelper(v, maxDecimals)
}
meanSummary.integer <- function(v, maxDecimals = 2) {
meanSummaryHelper(v, maxDecimals)
}
meanSummary(rnorm(100))
meanSummary(letters)
meanSummary <- summaryFunction(meanSummary,
description = "Compute arithmetic mean")
allSummaryFunctions()
x <- sample(c("a", "b"), size = 100, replace = TRUE,
prob = c(0.3, 0.7))
mosaicplot(table(x))
mosaicVisual <- function(v, vnam, doEval) {
thisCall <- call("mosaicplot", table(v), main = vnam, xlab = "")
if (doEval) {
return(eval(thisCall))
} else return(deparse(thisCall))
}
mosaicVisual(x, "Variable x", doEval = FALSE)
mosaicVisual <- visualFunction(mosaicVisual,
description = "Mosaic plots using graphics",
classes = setdiff(allClasses(),
c("numeric",
"integer",
"Date")))
library(ggplot2)
prettierHistHelper <- function(v, vnam) {
p <- ggplot(data.frame(v = v), aes(x = v)) +
geom_histogram(col = "white", bins = 20) +
xlab(vnam)
p
}
prettierHistHelper(rnorm(100), "Standard normal variable")
prettierHist <- function(v, vnam, doEval = TRUE) {
thisCall <- call("prettierHistHelper", v = v, vnam = vnam)
if (doEval) {
return(eval(thisCall))
} else return(deparse(thisCall))
}
prettierHist <- visualFunction(prettierHist,
description = "ggplot2 style histogram with contours",
classes = c("numeric", "integer", "logical", "Date"))
allVisualFunctions()
isID <- function(v, nMax = NULL, ...) {
out <- list(problem = FALSE, message = "")
if (class(v) %in% setdiff(allClasses(), c("logical", "Date"))) {
v <- as.character(v)
lengths <- c(nchar(v))
if (all(lengths >= 8) & length(unique(lengths)) == 1) {
out$problem <- TRUE
out$message <- "Warning: This variable seems to contain ID codes."
}
}
out
}
idVar <- c("1234-1233", "9221-0289",
"9831-1201", "6722-1243")
isID(idVar)
isID(rnorm(10))
identifyColons <- function(v, nMax = Inf, ... ) {
v <- unique(na.omit(v))
problemMessage <- "Note that the following values include colons:"
problem <- FALSE
problemValues <- NULL
problemValues <- v[sapply(gregexpr(".:.", v),
function(x) all(x != -1))]
if (length(problemValues) > 0) {
problem <- TRUE
}
problemStatus <- list(problem = problem,
problemValues = problemValues)
outMessage <- messageGenerator(problemStatus, problemMessage, nMax)
checkResult(list(problem = problem,
message = outMessage,
problemValues = problemValues))
}
identifyColons <- checkFunction(identifyColons,
description = "Identify non-trailing colons",
classes = c("character", "factor", "labelled"))
iaVar <- factor(c("a", "b", "a", "c")):factor(c(1, 2, 3, 4))
identifyColons(iaVar)
identifyColons(letters)
data(artData)
head(artData, 5)
makeDataReport(artData,
preChecks = c("isKey", "isSingular", "isSupported", "isID"),
summaries = setSummaries(
character = defaultCharacterSummaries(add = "countZeros"),
factor = defaultFactorSummaries(add = "countZeros"),
labelled = defaultLabelledSummaries(add = "countZeros"),
numeric = defaultNumericSummaries(add = c("countZeros", "meanSummary")),
integer = defaultIntegerSummaries(add = c("countZeros", "meanSummary")),
logical = defaultLogicalSummaries(add = c("meanSummary"))
),
visuals = setVisuals(
factor = "mosaicVisual",
numeric = "prettierHist",
integer = "prettierHist",
Date = "prettierHist"
),
checks = setChecks(
character = defaultCharacterChecks(add = "identifyColons"),
factor = defaultFactorChecks(add = "identifyColons"),
labelled = defaultLabelledChecks(add = "identifyColons")
),
replace = TRUE,
output = "html",
open = FALSE
)
htmltools::includeHTML("dataMaid_artData.html") |
setConstructorS3("MultiSourceCopyNumberNormalization", function(dsList=NULL, fitUgp=NULL, subsetToFit=NULL, targetDimension=1, align=c("byChromosome", "none"), tags="*", ...) {
if (!is.null(dsList)) {
.requirePkg("aroma.light", quietly=TRUE);
if (is.list(dsList)) {
K <- length(dsList);
className <- "AromaUnitTotalCnBinarySet";
for (kk in seq_len(K)) {
ds <- dsList[[kk]];
ds <- Arguments$getInstanceOf(ds, className, .name="dsList");
}
if (length(dsList) < 2L) {
throw("Argument 'dsList' must contain more than one ",
className, ": ", K);
}
} else {
throw("Argument 'dsList' is not a list: ", class(dsList)[1L]);
}
fitUgp <- Arguments$getInstanceOf(fitUgp, "AromaUgpFile");
if (is.null(subsetToFit)) {
} else if (is.character(subsetToFit)) {
throw("Yet not implemented: Argument 'subsetToFit' is of type character.");
} else {
subsetToFit <- Arguments$getIndices(subsetToFit, max=nbrOfUnits(fitUgp));
}
align <- match.arg(align);
targetDimension <- Arguments$getIndex(targetDimension, max=K);
}
args <- list(...);
if (length(args) > 0L) {
argsStr <- paste(names(args), collapse=", ");
throw("Unknown arguments: ", argsStr);
}
extend(Object(), c("MultiSourceCopyNumberNormalization", uses("ParametersInterface")),
.tags = tags,
.dsList = dsList,
.fitUgp = fitUgp,
.subsetToFit = subsetToFit,
.align = align,
.targetDimension = targetDimension,
"cached:.dsSmoothList" = NULL
)
})
setMethodS3("as.character", "MultiSourceCopyNumberNormalization", function(x, ...) {
this <- x;
s <- sprintf("%s:", class(this)[1L]);
tags <- getTags(this, collapse=", ");
s <- c(s, sprintf("Tags: %s", tags));
dsList <- getInputDataSets(this);
s <- c(s, sprintf("Data sets (%d):", length(dsList)));
for (kk in seq_along(dsList)) {
ds <- dsList[[kk]];
s <- c(s, as.character(ds));
}
names <- getAllNames(this);
n <- length(names);
s <- c(s, sprintf("Number of common array names: %d", n));
s <- c(s, sprintf("Names: %s [%d]", hpaste(names), n));
s <- c(s, sprintf("Parameters: %s", getParametersAsString(this)));
GenericSummary(s);
}, protected=TRUE)
setMethodS3("getInputDataSets", "MultiSourceCopyNumberNormalization", function(this, ...) {
this$.dsList;
})
setMethodS3("nbrOfDataSets", "MultiSourceCopyNumberNormalization", function(this, ...) {
length(getInputDataSets(this));
});
setMethodS3("getAsteriskTags", "MultiSourceCopyNumberNormalization", function(this, ...) {
tags <- "mscn";
align <- this$.align;
if (align != "none") {
tags <- c(tags, align);
}
tags <- paste(tags, collapse=",");
tags;
})
setMethodS3("getTags", "MultiSourceCopyNumberNormalization", function(this, collapse=NULL, ...) {
tags <- this$.tags;
tags <- unlist(strsplit(tags, split=","));
tags[tags == "*"] <- getAsteriskTags(this);
tags <- unlist(strsplit(tags, split=","));
if (!is.null(collapse)) {
tags <- paste(tags, collapse=collapse);
}
tags;
})
setMethodS3("getOutputPaths", "MultiSourceCopyNumberNormalization", function(this, ...) {
dsList <- getInputDataSets(this);
tags <- getTags(this);
paths <- lapply(dsList, FUN=function(ds) {
path <- getPath(ds);
path <- getParent(path, 2L);
rootPath <- basename(path);
path <- getParent(path);
rootPath <- "cnData";
path <- Arguments$getWritablePath(rootPath);
fullname <- getFullName(ds);
fullname <- paste(c(fullname, tags), collapse=",");
chipType <- getChipType(ds);
file.path(path, fullname, chipType);
});
paths <- unlist(paths, use.names=FALSE);
paths;
}, protected=TRUE)
setMethodS3("getOutputDataSets", "MultiSourceCopyNumberNormalization", function(this, ..., force=FALSE, verbose=FALSE) {
verbose <- Arguments$getVerbose(verbose);
if (verbose) {
pushState(verbose);
on.exit(popState(verbose));
}
verbose && enter(verbose, "Retrieving list of output data sets");
dsList <- getInputDataSets(this);
paths <- getOutputPaths(this);
dsOutList <- list();
for (kk in seq_along(dsList)) {
ds <- dsList[[kk]];
verbose && enter(verbose, sprintf("Data set %d ('%s') of %d",
kk, getFullName(ds), length(dsList)));
path <- paths[[kk]];
if (isDirectory(path)) {
verbose && enter(verbose, "Scanning directory for matching data files");
verbose && cat(verbose, "Path: ", path);
dsOut <- byPath(ds, path=path, ..., verbose=less(verbose, 10));
verbose && enter(verbose, "Keeping output data files matching input data files");
fullnames <- getFullNames(ds);
df <- getFile(ds, 1L);
translator <- getFullNameTranslator(df);
setFullNamesTranslator(dsOut, translator);
fullnamesOut <- getFullNames(dsOut);
idxs <- match(fullnames, fullnamesOut);
verbose && str(verbose, idxs);
if (anyMissing(idxs)) {
throw("Should not happen.");
}
verbose && cat(verbose, "Number of files dropped: ", length(dsOut) - length(idxs));
verbose && cat(verbose, "Number of files kept: ", length(idxs));
dsOut <- extract(dsOut, idxs);
verbose && exit(verbose);
verbose && exit(verbose);
} else {
dsOut <- NA;
}
dsOutList[[kk]] <- dsOut;
verbose && exit(verbose);
}
verbose && exit(verbose);
dsOutList;
})
setMethodS3("getFitAromaUgpFile", "MultiSourceCopyNumberNormalization", function(this, ...) {
this$.fitUgp;
}, protected=TRUE)
setMethodS3("getAllNames", "MultiSourceCopyNumberNormalization", function(this, ...) {
dsList <- getInputDataSets(this);
allNames <- lapply(dsList, getNames, ...);
allNames <- unlist(allNames, use.names=FALSE);
allNames <- unique(allNames);
allNames <- sort(allNames);
allNames;
})
setMethodS3("extractTupleOfDataFiles", "MultiSourceCopyNumberNormalization", function(this, dsList, name, ..., na.rm=FALSE, verbose=FALSE) {
if (is.list(dsList)) {
className <- "AromaUnitTotalCnBinarySet";
for (kk in seq_along(dsList)) {
ds <- dsList[[kk]];
ds <- Arguments$getInstanceOf(ds, className, .name="dsList");
}
if (length(dsList) < 2L) {
throw("Argument 'dsList' must contain more than one ", className,
": ", length(dsList));
}
} else {
throw("Argument 'dsList' is not a list: ", class(dsList)[1L]);
}
name <- Arguments$getCharacter(name);
verbose <- Arguments$getVerbose(verbose);
if (verbose) {
pushState(verbose);
on.exit(popState(verbose));
}
verbose && enter(verbose, "Getting list tuple of data files for one sample");
verbose && cat(verbose, "Sample name: ", name);
dfList <- lapply(dsList, function(ds) {
idx <- indexOf(ds, name);
df <- NA;
if (!is.na(idx)) {
if (length(idx) > 1L) {
throw("Multiple occurances identified for this sample: ",
getName(ds), " => ", paste(idx, collapse=", "));
}
df <- getFile(ds, idx);
}
df;
});
if (na.rm) {
keep <- sapply(dfList, FUN=function(df) !identical(df, NA));
dfList <- dfList[keep];
}
verbose && cat(verbose, "Number of arrays: ", length(dfList));
verbose && exit(verbose);
dfList;
}, protected=TRUE)
setMethodS3("getSmoothedDataSets", "MultiSourceCopyNumberNormalization", function(this, ..., verbose=FALSE) {
verbose <- Arguments$getVerbose(verbose);
if (verbose) {
pushState(verbose);
on.exit(popState(verbose));
}
dsSmoothList <- this$.dsSmoothList;
if (is.null(dsSmoothList)) {
verbose && enter(verbose, "Smoothing all data sets to the same set of loci");
dsList <- getInputDataSets(this);
verbose && cat(verbose, "Number of data sets: ", length(dsList));
targetUgp <- getFitAromaUgpFile(this);
verbose && print(verbose, targetUgp);
kernel <- "gaussian";
sd <- 50e3;
verbose && printf(verbose, "Kernel: %s\n", kernel);
verbose && printf(verbose, "Bandwidth (sd): %.2f\n", sd);
dsSmoothList <- list();
for (kk in seq_along(dsList)) {
ds <- dsList[[kk]];
verbose && enter(verbose, sprintf("Data set %d ('%s') of %d",
kk, getFullName(ds), length(dsList)));
sm <- TotalCnKernelSmoothing(ds, targetUgp=targetUgp,
kernel=kernel, bandwidth=sd);
verbose && print(verbose, sm);
dsSmoothList[[kk]] <- process(sm, verbose=less(verbose, 1));
verbose && exit(verbose);
}
names(dsSmoothList) <- names(dsList);
this$.dsSmoothList <- dsSmoothList;
verbose && exit(verbose);
}
dsSmoothList;
}, protected=TRUE)
setMethodS3("getSubsetToFit", "MultiSourceCopyNumberNormalization", function(this, ..., verbose=FALSE) {
verbose <- Arguments$getVerbose(verbose);
if (verbose) {
pushState(verbose);
on.exit(popState(verbose));
}
units <- this$.subsetToFit;
if (is.null(units)) {
verbose && enter(verbose, "Identify subset of (smoothed) units for fitting the model");
ugp <- getFitAromaUgpFile(this);
verbose && print(verbose, ugp);
verbose && enter(verbose, "Querying UGP for units on chromosomes of interest");
chromosomes <- 1:22;
verbose && cat(verbose, "Chromosomes to fit: ",
seqToHumanReadable(chromosomes));
units <- sapply(chromosomes, FUN=function(cc) {
getUnitsOnChromosome(ugp, cc);
});
units <- unlist(units, use.names=FALSE);
units <- unique(units);
units <- sort(units);
verbose && str(verbose, units);
verbose && exit(verbose);
this$.subsetToFit <- units;
verbose && exit(verbose);
}
units;
}, protected=TRUE)
setMethodS3("getParameters", "MultiSourceCopyNumberNormalization", function(this, ...) {
params <- NextMethod("getParameters");
params$subsetToFit <- getSubsetToFit(this, ...);
params$fitUgp <- getFitAromaUgpFile(this, ...);
params$align <- this$.align;
params$targetDimension <- this$.targetDimension;
params$pcBandwidth <- this$.pcBandwidth;
params;
}, protected=TRUE)
setMethodS3("getPrincipalCurveEstimator", "MultiSourceCopyNumberNormalization", function(this, ...) {
.requirePkg("aroma.light", quietly=TRUE);
params <- getParameters(this);
df <- params$pcBandwidth;
if (is.null(df)) {
df <- 5;
}
df <- Arguments$getDouble(df);
smoother <- function(lambda, xj, ...) {
o <- order(lambda);
lambda <- lambda[o];
xj <- xj[o];
fit <- smooth.spline(lambda, xj, ..., df=df, keep.data=FALSE);
predict(fit, x=lambda)$y;
}
robustSmoother <- function(lambda, xj, ...) {
o <- order(lambda);
lambda <- lambda[o];
xj <- xj[o];
fit <- .robustSmoothSpline(lambda, xj, ..., df=df);
predict(fit, x=lambda)$y;
}
fcn <- function(Y, ...) {
.fitPrincipalCurve(Y, smoother=smoother, ...);
}
attr(fcn, "smoother") <- smoother;
attr(fcn, "df") <- df;
fcn;
}, protected=TRUE);
setMethodS3("fitOne", "MultiSourceCopyNumberNormalization", function(this, dfList, ..., force=FALSE, .retData=FALSE, verbose=FALSE) {
verbose <- Arguments$getVerbose(verbose);
if (verbose) {
pushState(verbose);
on.exit(popState(verbose));
}
force <- Arguments$getLogical(force);
verbose && enter(verbose, "Fitting one sample across multiple sources");
if (is.character(dfList)) {
verbose && enter(verbose, "Extracting list of input data files");
name <- dfList;
verbose && cat(verbose, "Sample name: ", name);
dsList <- getInputDataSets(this);
dfList <- extractTupleOfDataFiles(this, dsList=dsList, name=name,
verbose=less(verbose, 1));
verbose && print(verbose, dfList);
verbose && exit(verbose);
}
nbrOfArrays <- length(dfList);
verbose && cat(verbose, "Number of arrays: ", nbrOfArrays);
df <- dfList[[1]];
name <- getName(df);
verbose && cat(verbose, "Sample name: ", name);
dfList <- NULL;
params <- getParameters(this, verbose=less(verbose, 1));
verbose && str(verbose, params);
subsetToFit <- params$subsetToFit;
align <- params$align;
targetDimension <- params$targetDimension;
pcEstimator <- getPrincipalCurveEstimator(this);
dsSmooth <- getSmoothedDataSets(this, verbose=less(verbose, 1));
dfSList <- extractTupleOfDataFiles(this, dsList=dsSmooth, name=name,
verbose=less(verbose, 1));
dsSmooth <- NULL;
verbose && str(verbose, dfSList);
keep <- sapply(dfSList, FUN=function(df) !identical(df, NA));
keep <- which(keep);
dfSList <- dfSList[keep];
fullnames <- sapply(dfSList, getFullName);
fullnames <- unname(fullnames);
chipTypes <- sapply(dfSList, getChipType);
chipTypes <- unname(chipTypes);
checkSums <- sapply(dfSList, getChecksum);
checkSums <- unname(checkSums);
df <- params$pcBandwidth;
key <- list(method="fitOne", class="MultiSourceCopyNumberNormalization",
fullnames=fullnames, chipTypes=chipTypes, checkSums=checkSums,
subsetToFit=subsetToFit, align=align, df=df,
.retData=.retData, version="2010-01-14");
dirs <- c("aroma.cn", "MultiSourceCopyNumberNormalization");
if (!force) {
fit <- loadCache(key=key, dirs=dirs);
if (!is.null(fit)) {
verbose && cat(verbose, "Cached results found.");
verbose && exit(verbose);
return(fit);
}
}
verbose && enter(verbose, "Extracting data");
verbose && cat(verbose, "Subset of units used for fitting:");
verbose && str(verbose, subsetToFit);
Y <- lapply(dfSList, FUN=function(df) {
extractMatrix(df, rows=subsetToFit, column=1, drop=TRUE);
});
subsetToFit <- NULL;
Y <- as.data.frame(Y);
colnames(Y) <- NULL;
Y <- as.matrix(Y);
dimnames(Y) <- NULL;
dim <- dim(Y);
verbose && str(verbose, Y);
verbose && summary(verbose, Y);
verbose && exit(verbose);
verbose && enter(verbose, "Fitting across-source normalization function");
verbose && cat(verbose, "Estimator for principal curves:");
verbose && str(verbose, pcEstimator);
t <- system.time({
fit <- pcEstimator(Y);
});
verbose && cat(verbose, "Fitting time:");
verbose && print(verbose, t);
rho <- cor(fit$lambda, Y[,1], use="complete.obs");
flip <- (rho < 0);
if (flip) {
fit$lambda <- max(fit$lambda, na.rm=TRUE) - fit$lambda;
verbose && cat(verbose, "Direction of fitted curve ('lambda') was flipped such that it increases with the signal.");
}
verbose && printf(verbose, "Processing time: %.1f seconds\n",
as.double(t[3L]));
if (.retData) {
fit$Y <- Y;
}
Y <- NULL;
if (!identical(dim(fit$s), dim)) {
throw("Internal error: The fitted data has a different dimension that the input data: ",
paste(dim(fit$s), collapse="x"), " != ", paste(dim, collapse="x"));
}
verbose && str(verbose, fit);
verbose && exit(verbose);
targetChannel <- NULL;
if (!is.null(targetChannel)) {
}
if (is.element(align, c("byChromosome"))) {
verbose && enter(verbose, "Calculating shift for each chromosome");
verbose && cat(verbose, "align=", align);
ugpS <- getAromaUgpFile(dfSList[[1L]]);
chromosomes <- getChromosomes(ugpS);
verbose && cat(verbose, "Chromosomes: ", seqToHumanReadable(chromosomes));
verbose && enter(verbose, "Grouping units by chromosome");
values <- ugpS[,1L,drop=TRUE];
unitsS <- list();
for (chr in chromosomes) {
chrStr <- sprintf("Chr%02d", chr);
unitsS[[chrStr]] <- which(values == chr);
}
values <- NULL;
ns <- sapply(unitsS, FUN=length);
unitsS <- unitsS[ns > 5L];
verbose && str(verbose, unitsS);
verbose && exit(verbose);
verbose && enter(verbose, "Allocating matrix for smooth data");
dfS <- dfSList[[1L]];
naValue <- as.double(NA);
YSN <- matrix(naValue, nrow=nbrOfUnits(dfS), ncol=nbrOfArrays);
verbose && cat(verbose, "RAM: ", objectSize(YSN), " bytes");
verbose && exit(verbose);
verbose && enter(verbose, "Loading and backtransforming *smoothed* data");
for (kk in seq_len(nbrOfArrays)) {
dfS <- dfSList[[kk]];
verbose && enter(verbose, sprintf("Source
getFullName(dfS), nbrOfArrays));
verbose && enter(verbose, "Loading smoothed data");
yS <- extractMatrix(dfS, column=1, drop=TRUE);
verbose && str(verbose, yS);
verbose && exit(verbose);
verbose && enter(verbose, "Backtransforming smoothed data");
ySN <- .backtransformPrincipalCurve(yS, fit=fit, dimensions=kk,
targetDimension=targetDimension);
ySN <- ySN[,1L,drop=TRUE];
verbose && str(verbose, ySN);
yS <- NULL;
verbose && exit(verbose);
YSN[,kk] <- ySN;
ySN <- NULL;
verbose && exit(verbose);
}
verbose && summary(verbose, YSN);
verbose && str(verbose, YSN);
verbose && exit(verbose);
verbose && enter(verbose, "Calculating shifts chromosome by chromosome");
nbrOfChromosomes <- length(unitsS);
naValue <- as.double(NA);
mus <- matrix(naValue, nrow=nbrOfChromosomes, ncol=nbrOfArrays);
rownames(mus) <- names(unitsS);
dmus <- mus;
for (chr in seq_len(nbrOfChromosomes)) {
chrStr <- sprintf("Chr%02d", chr);
verbose && enter(verbose, sprintf("Chromosome
chr, nbrOfChromosomes));
unitsCC <- unitsS[[chrStr]];
verbose && enter(verbose, "Extracting backtransformed *smoothed* data");
yList <- list();
for (kk in seq_len(nbrOfArrays)) {
yList[[kk]] <- YSN[unitsCC,kk,drop=TRUE];
}
verbose && str(verbose, yList);
verbose && exit(verbose);
verbose && enter(verbose, "Estimating averages and shifts toward targetDimension");
verbose && cat(verbose, "Target dimension: ", targetDimension);
yNList <- .normalizeDifferencesToAverage(yList, baseline=targetDimension);
alignFit <- attr(yNList, "fit");
verbose && str(verbose, alignFit);
verbose && exit(verbose);
mus[chrStr,] <- alignFit$mus;
dmus[chrStr,] <- alignFit$deltas;
alignFit <- yList <- yNList <- NULL;
verbose && exit(verbose);
}
verbose && exit(verbose);
YSN <- NULL;
verbose && cat(verbose, "Overall averages:");
verbose && print(verbose, mus);
verbose && cat(verbose, "Overall shifts:");
verbose && print(verbose, dmus);
verbose && cat(verbose, "Target dimension: ", targetDimension);
verbose && exit(verbose);
fit$alignParams <- list(
dmus=dmus,
mus=mus
);
verbose && exit(verbose);
}
verbose && str(verbose, fit);
saveCache(key=key, dirs=dirs, fit);
fit;
}, protected=TRUE)
setMethodS3("normalizeOne", "MultiSourceCopyNumberNormalization", function(this, dfList, fit, ..., force=FALSE, verbose=FALSE) {
verbose <- Arguments$getVerbose(verbose);
if (verbose) {
pushState(verbose);
on.exit(popState(verbose));
}
force <- Arguments$getLogical(force);
verbose && enter(verbose, "Normalize one sample across multiple sources");
df <- dfList[[1]];
name <- getName(df);
verbose && cat(verbose, "Sample name: ", name);
params <- getParameters(this, verbose=less(verbose, 1));
verbose && str(verbose, params);
subsetToUpdate <- params$subsetToUpdate;
targetDimension <- params$targetDimension;
align <- params$align;
outputPaths <- getOutputPaths(this);
if (is.element(align, c("byChromosome"))) {
verbose && enter(verbose, "Estimate alignment parameters");
verbose && cat(verbose, "align=", align);
verbose && enter(verbose, "Extracting align-by-chromosome parameters");
alignParams <- fit$alignParams;
verbose && str(verbose, alignParams);
if (is.null(alignParams)) {
throw("Internal error: No shift estimates found.");
}
dmus <- alignParams$dmus;
verbose && print(verbose, dmus);
if (is.null(dmus)) {
throw("Internal error: No shift estimates found.");
}
verbose && exit(verbose);
verbose && exit(verbose);
}
verbose && enter(verbose, "Normalizing source by source (array by array)");
verbose && cat(verbose, "Units to be updated:");
verbose && str(verbose, subsetToUpdate);
nbrOfArrays <- length(dfList);
dfNList <- vector("list", length=nbrOfArrays);
for (kk in seq_len(nbrOfArrays)) {
df <- dfList[[kk]];
verbose && enter(verbose, sprintf("Source
getFullName(df), nbrOfArrays));
outputPath <- outputPaths[[kk]];
filename <- getFilename(df);
pathname <- Arguments$getWritablePathname(filename, path=outputPath, ...);
if (!force && isFile(pathname)) {
verbose && cat(verbose, "Already normalized.");
dfN <- newInstance(df, pathname);
} else {
verbose && enter(verbose, "Normalizing");
verbose && enter(verbose, "Reading data");
y <- extractMatrix(df, rows=subsetToUpdate, column=1, drop=TRUE);
verbose && str(verbose, y);
verbose && exit(verbose);
verbose && enter(verbose, "Backtransforming data");
yN <- .backtransformPrincipalCurve(y, fit=fit, dimensions=kk,
targetDimension=targetDimension);
yN <- yN[,1L,drop=TRUE];
verbose && str(verbose, yN);
verbose && exit(verbose);
if (is.element(align, c("byChromosome"))) {
verbose && enter(verbose, "Align genomic signals");
verbose && cat(verbose, "align=", align);
verbose && enter(verbose, "Aligning signals for each chromosome");
ugp <- getAromaUgpFile(df);
chromosomes <- getChromosomes(ugp);
verbose && cat(verbose, "Chromosomes: ", seqToHumanReadable(chromosomes));
verbose && enter(verbose, "Grouping units by chromosome");
values <- ugp[subsetToUpdate,1L,drop=TRUE];
stopifnot(length(values) == length(yN));
listOfUnits <- list();
for (chr in chromosomes) {
chrStr <- sprintf("Chr%02d", chr);
subset <- which(values == chr);
listOfUnits[[chrStr]] <- subset;
}
values <- NULL;
verbose && str(verbose, listOfUnits);
ns <- sapply(listOfUnits, FUN=length);
listOfUnits <- listOfUnits[ns > 5L];
verbose && str(verbose, listOfUnits);
verbose && exit(verbose);
idxs <- match(names(listOfUnits), rownames(dmus));
if (anyMissing(idxs)) {
verbose && cat(verbose, "Shift estimates are not available for some chromosomes, which are skipped:");
verbose && print(verbose, names(listOfUnits[!is.finite(idxs)]));
listOfUnits <- listOfUnits[is.finite(idxs)];
}
for (chrStr in names(listOfUnits)) {
subset <- listOfUnits[[chrStr]];
dmu <- dmus[chrStr,kk];
yN[subset] <- yN[subset] - dmu;
}
listOfUnits <- NULL;
verbose && str(verbose, yN);
verbose && exit(verbose);
verbose && exit(verbose);
}
verbose && enter(verbose, "Storing normalized data");
verbose && cat(verbose, "Output pathname: ", pathname);
verbose && enter(verbose, "Create output file");
pathnameT <- sprintf("%s.tmp", pathname);
verbose && cat(verbose, "Temporary pathname: ", pathnameT);
pathnameT <- Arguments$getWritablePathname(pathnameT, mustNotExist=TRUE);
file.copy(getPathname(df), pathnameT);
dfN <- newInstance(df, pathnameT);
verbose && print(verbose, dfN);
verbose && exit(verbose);
verbose && enter(verbose, "Writing data");
if (is.null(subsetToUpdate)) {
dfN[,1L] <- yN;
} else {
dfN[subsetToUpdate,1L] <- yN;
}
yN <- NULL;
verbose && exit(verbose);
verbose && enter(verbose, "Updating file footer");
footer <- readFooter(dfN);
srcFile <- df;
footer$srcFile <- list(
filename = getFilename(srcFile),
filesize = getFileSize(srcFile),
checksum = getChecksum(srcFile)
);
pkg <- aroma.cn;
footer$createdBy <- list(
class=class(this)[1],
package = getName(pkg),
version = getVersion(pkg)
);
footer$createdOn <- format(Sys.time(), "%Y%m%d %H:%M:%S", usetz=TRUE);
writeFooter(dfN, footer);
verbose && exit(verbose);
verbose && enter(verbose, "Renaming temporary filename");
file.rename(pathnameT, pathname);
if (isFile(pathnameT) || !isFile(pathname)) {
throw("Failed to rename temporary file: ",
pathnameT, " -> ", pathname);
}
pathnameT <- NULL;
verbose && exit(verbose);
dfN <- newInstance(df, pathname);
pathname <- NULL;
verbose && exit(verbose);
verbose && exit(verbose);
}
verbose && print(verbose, dfN);
dfNList[[kk]] <- dfN;
dfN <- NULL;
verbose && exit(verbose);
}
verbose && print(verbose, dfNList);
subsetToUpdate <- NULL;
verbose && exit(verbose);
invisible(dfNList);
}, protected=TRUE)
setMethodS3("process", "MultiSourceCopyNumberNormalization", function(this, ..., force=FALSE, verbose=FALSE) {
verbose <- Arguments$getVerbose(verbose);
if (verbose) {
pushState(verbose);
on.exit(popState(verbose));
}
verbose && enter(verbose, "Multi-source normalize all samples");
allNames <- getAllNames(this);
nbrOfSamples <- length(allNames);
verbose && cat(verbose, "Number of unique samples in all sets: ",
nbrOfSamples);
verbose && str(verbose, allNames);
dsList <- getInputDataSets(this);
outputPaths <- getOutputPaths(this);
verbose && enter(verbose, "Processing each array");
for (kk in seq_len(nbrOfSamples)) {
name <- allNames[kk];
verbose && enter(verbose, sprintf("Sample
kk, name, nbrOfSamples));
verbose && enter(verbose, "Identifying source data files");
dfList <- extractTupleOfDataFiles(this, dsList=dsList, name=name,
verbose=less(verbose, 1));
verbose && print(verbose, dfList);
verbose && exit(verbose);
verbose && enter(verbose, "Check if all arrays are already normalized");
isDone <- TRUE;
for (jj in seq_along(dfList)) {
df <- dfList[[jj]];
outputPath <- outputPaths[[jj]];
filename <- getFilename(df);
pathname <- Arguments$getWritablePathname(filename, path=outputPath, ...);
isDone <- isDone && isFile(pathname);
if (!isDone)
break;
}
verbose && cat(verbose, "Is done: ", isDone);
verbose && exit(verbose);
if (!force && isDone) {
verbose && cat(verbose, "Normalized data files already exist");
} else {
verbose && enter(verbose, "Fitting model");
fit <- fitOne(this, dfList=dfList, ..., force=force,
verbose=less(verbose, 1))
verbose && str(verbose, fit);
verbose && exit(verbose);
verbose && enter(verbose, "Normalizing");
dfNList <- normalizeOne(this, dfList=dfList, fit=fit, ...,
force=force, verbose=less(verbose, 1));
fit <- NULL;
verbose && print(verbose, dfNList);
if (length(dfNList) != length(dfList)) {
throw("The number of normalized arrays does not match the number of source arrays: ", length(dfNList), " != ", length(dfList));
}
verbose && exit(verbose);
dfNList <- NULL;
}
dfList <- NULL;
verbose && exit(verbose);
}
verbose && exit(verbose);
dsList <- NULL;
outputDataSets <- getOutputDataSets(this, force=TRUE, verbose=less(verbose, 1));
verbose && exit(verbose);
invisible(outputDataSets);
}) |
printCall = function (call) {
d <- deparse(call)
if (length(d) <= 3) {
paste(d, sep = "\n", collapse = "\n")
} else {
d <- d[1:3]
d[3] <- paste0(d[3], "...")
paste(d, sep = "\n", collapse = "\n")
}
} |
hobbs.f<- function(x){
if (abs(12*x[3]) > 500) {
fbad<-.Machine$double.xmax
return(fbad)
}
res<-hobbs.res(x)
f<-sum(res*res)
}
hobbs.res<-function(x){
if(length(x) != 3) stop("hobbs.res -- parameter vector n!=3")
y<-c(5.308, 7.24, 9.638, 12.866, 17.069, 23.192, 31.443,
38.558, 50.156, 62.948, 75.995, 91.972)
t<-1:12
if(abs(12*x[3])>50) {
res<-rep(Inf,12)
} else {
res<-x[1]/(1+x[2]*exp(-x[3]*t)) - y
}
}
hobbs.jac<-function(x){
jj<-matrix(0.0, 12, 3)
t<-1:12
yy<-exp(-x[3]*t)
zz<-1.0/(1+x[2]*yy)
jj[t,1] <- zz
jj[t,2] <- -x[1]*zz*zz*yy
jj[t,3] <- x[1]*zz*zz*yy*x[2]*t
return(jj)
}
hobbs.g<-function(x){
jj<-hobbs.jac(x)
res<-hobbs.res(x)
gg<-as.vector(2.*t(jj) %*% res)
return(gg)
}
require(optimr)
start <- c(300, 50, .3)
msg <- paste("opm attempt UNSCALED")
cat(msg,"\n")
mydou <- opm(start, hobbs.f, hobbs.g, method="ALL", control=list(trace=0))
summary(mydou, order=value)
msg <- paste("opm attempt SCALED")
cat(msg,"\n")
mydos <- opm(start, hobbs.f, hobbs.g, method="ALL", control=list(trace=0, parscale=c(100,10,.1)))
summary(mydos, order=value)
start <- c(1, 1, 1)
msg <- paste("Bad start: opm attempt UNSCALED")
cat(msg,"\n")
mydoux <- opm(start, hobbs.f, hobbs.g, method="ALL", control=list(trace=0))
summary(mydoux, order=value)
msg <- paste("Bad start: opm attempt SCALED")
cat(msg,"\n")
mydosx <- opm(start, hobbs.f, hobbs.g, method="ALL", control=list(trace=0, parscale=c(100,10,.1)))
summary(mydosx, order=value) |
library(testthat)
context("test-stream")
test_that("create new stream", {
x <- new_stream(5)
expect_equal(length(x), 5)
expect_is(x, "file_stream")
expect_is(x, "list")
expect_is(x[[1]], "list")
expect_equal(x[[3]]$file, "3-5")
})
test_that("create new stream from list", {
a <- list(mtcars, mtcars, mtcars)
x <- new_stream(a)
expect_equal(length(x), 3)
expect_identical(x[[2]]$x, mtcars)
expect_is(x, "file_stream")
expect_is(x, "list")
expect_is(x[[1]], "list")
expect_equal(x[[1]]$file, "1-3")
})
test_that("create new stream from character", {
x <- new_stream(letters[1:5])
expect_equal(length(x), 5)
expect_identical(x[[4]]$x, "d")
expect_is(x, "file_stream")
expect_is(x, "list")
expect_is(x[[1]], "list")
expect_equal(x[[1]]$file, "1-5")
})
test_that("create new stream with ext", {
x <- new_stream(5, ext = ".bar")
expect_equal(x[[3]]$file, "3-5.bar")
})
test_that("add ext to stream ", {
x <- new_stream(11)
x <- ext_stream(x, ".feather")
expect_equal(x[[2]]$file, "02-11.feather")
})
test_that("relocate stream ", {
x <- new_stream(2)
x <- locate_stream(x, temp_ds("kyle"))
test <- basename(dirname(x[[1]]$file))
expect_equal(test, "kyle")
})
test_that("relocate and initialize stream", {
x <- new_stream(2)
dir <- file.path(tempdir(), "test-relocate-init")
if(dir.exists(dir)) unlink(dir, recursive = TRUE)
expect_false(dir.exists(dir))
x <- locate_stream(x, where = dir, initialize = TRUE)
expect_true(dir.exists(dir))
expect_true(file.exists(file.path(dir, mrgsim.parallel:::.locker_file_name)))
y <- new_stream(3)
dir <- file.path(tempdir(), "test-relocate-init-2")
dir.create(dir)
expect_error(
locate_stream(y, where = dir, initialize = TRUE),
regexp="the dataset directory exists, but doesn't appear"
)
unlink(dir, recursive = TRUE)
})
test_that("create new stream with locker", {
unlink(temp_ds("foo"), recursive = TRUE)
x <- new_stream(5, locker = temp_ds("foo"))
expect_is(x, "locker_stream")
expect_is(x, "file_stream")
expect_is(x, "list")
tst <- basename(dirname(x[[3]]$file))
expect_equal(tst, "foo")
})
test_that("create new stream with locker and format", {
x <- new_stream(5, locker = temp_ds("bar"), format = "fst")
expect_is(x, "locker_stream")
expect_is(x, "file_stream")
expect_is(x, "list")
expect_is(x[[1]], "stream_format_fst")
expect_is(x[[1]], "list")
})
test_that("add format to stream with locker", {
x <- new_stream(5, locker = temp_ds("bar"))
x <- format_stream(x, "rds")
expect_is(x, "locker_stream")
expect_is(x, "file_stream")
expect_is(x, "list")
expect_is(x[[1]], "stream_format_rds")
expect_is(x[[1]], "list")
})
test_that("add format to stream without locker", {
x <- new_stream(5)
expect_warning(
format_stream(x, "rds", warn = TRUE),
regexp = "format was set, but file name [1] has no directory specified.",
fixed = TRUE
)
})
test_that("writer function - rds", {
unlink(temp_ds("write/rds"), recursive = TRUE)
x <- new_stream(1, locker = temp_ds("write/rds"), format = "rds")
expect_true(write_stream(x[[1]], mtcars))
mt <- readRDS(x[[1]]$file)
expect_equal(mt, mtcars)
})
test_that("writer function - fst", {
skip_if_not_installed("fst")
expect_true(mrgsim.parallel:::fst_installed())
unlink(temp_ds("write/fst"), recursive = TRUE)
x <- new_stream(1, locker = temp_ds("write/fst"), format = "fst")
expect_true(write_stream(x[[1]], mtcars))
mt <- fst::read_fst(x[[1]]$file)
expect_equivalent(mt, mtcars)
expect_error(
write_stream(x[[1]], list(mtcars)),
regexp="must be a data.frame"
)
})
test_that("writer function - qs", {
skip_if_not_installed("qs")
expect_true(mrgsim.parallel:::qs_installed())
unlink(temp_ds("write/qs"), recursive = TRUE)
x <- new_stream(1, locker = temp_ds("write/qs"), format = "qs")
expect_true(write_stream(x[[1]], mtcars))
mt <- qs::qread(x[[1]]$file)
expect_equivalent(mt, mtcars)
})
test_that("writer function - feather", {
skip_if_not_installed("arrow")
expect_true(mrgsim.parallel:::arrow_installed())
unlink(temp_ds("write/arrow"), recursive = TRUE)
x <- new_stream(1, locker = temp_ds("write/arrow"), format = "feather")
expect_true(write_stream(x[[1]], mtcars))
mt <- arrow::read_feather(x[[1]]$file)
expect_equivalent(mt, mtcars)
expect_error(
write_stream(x[[1]], list(mtcars)),
regexp="must be a data.frame"
)
})
test_that("writer function - default", {
unlink(temp_ds("write/default"), recursive = TRUE)
x <- new_stream(1, locker = temp_ds("write/default"), ext = "rds")
expect_false(write_stream(x[[1]], mtcars))
}) |
arith_wrapper <- function(op, var1, var2, infile1, infile2, outfile, nc34,
overwrite, verbose, nc1 = NULL, nc2 = NULL) {
if (is.null(nc1)) check_variable(var1)
if (is.null(nc2)) check_variable(var2)
check_infile(infile1)
check_infile(infile2)
check_outfile(outfile)
outfile <- correct_filename(outfile)
check_overwrite(outfile, overwrite)
check_nc_version(nc34)
calc_time_start <- Sys.time()
file_data1 <- read_file(infile1, var1, nc = nc1)
file_data2 <- read_file(infile2, var2, nc = nc2)
if (!op == 4 && exists(file_data1$variable$prec) && exists(file_data2$variable$prec)) {
prec <- PRECISIONS_VAR
file_data1$variable$prec <- prec[max(c(which(file_data1$variable$prec == prec),
which(file_data2$variable$prec == prec)))]
} else {
file_data1$variable$prec <- "float"
}
if (file_data1$grid$is_regular != file_data2$grid$is_regular) {
stop("Please use infiles with the same projection ore use the remap operator")
}
if (!(length(file_data1$dimension_data$x) == length(file_data2$dimension_data$x) &&
length(file_data1$dimension_data$y) == length(file_data2$dimension_data$y))) {
stop("Dimensions of infiles do not match!")
}
result <- array(file_data1$variable$attributes$missing_value,
dim = c(length(file_data1$dimension_data$x),
length(file_data1$dimension_data$y),
1))
vars_data <- list(result = result)
if (length(file_data1$dimension_data$t) > 1 && length(file_data2$dimension_data$t) > 1 &&
length(file_data1$dimension_data$t) != length(file_data2$dimension_data$t)) {
stop("Uncompatible time lengths!")
}
time_len1 <- length(file_data1$dimension_data$t)
time_len2 <- length(file_data2$dimension_data$t)
if (time_len1 == time_len2 && time_len1 > 1) {
case <- 1
time <- file_data1$dimension_data$t
}
if (time_len1 == 1 && time_len2 == 1) {
case <- 2
time <- file_data1$dimension_data$t
}
if (time_len1 == 1 && time_len2 > 1) {
case <- 3
time <- file_data2$dimension_data$t
}
if (time_len1 > 1 && time_len2 == 1) {
case <- 4
time <- file_data1$dimension_data$t
}
time_len <- length(time)
nc_format <- get_nc_version(nc34)
cmsaf_info <- switch(op,
paste0("cmsafops::cmsaf.add for variable ", var1),
paste0("cmsafops::cmsaf.sub for variable ", var1),
paste0("cmsafops::cmsaf.mul for variable ", var1),
paste0("cmsafops::cmsaf.div for variable ", var1)
)
global_att_list <- names(file_data1$global_att)
global_att_list <- global_att_list[toupper(global_att_list) %in% toupper(GLOBAL_ATT_DEFAULT)]
global_attributes <- file_data1$global_att[global_att_list]
dims <- define_dims(file_data1$grid$is_regular,
file_data1$dimension_data$x,
file_data1$dimension_data$y,
0,
NB2,
file_data1$time_info$units,
with_time_bnds = FALSE
)
vars <- define_vars(file_data1$variable, dims, nc_format$compression, with_time_bnds = FALSE)
write_output_file(
outfile,
nc_format$force_v4,
vars,
vars_data,
file_data1$variable$name,
file_data1$grid$vars, file_data1$grid$vars_data,
cmsaf_info,
file_data1$time_info$calendar,
file_data1$variable$attributes,
global_attributes,
with_time_bnds = FALSE
)
if (!is.null(nc1)) nc_in1 <- nc1
else nc_in1 <- nc_open(infile1)
if (!is.null(nc2)) nc_in2 <- nc2
else nc_in2 <- nc_open(infile2)
nc_out <- nc_open(outfile, write = TRUE)
switch(case,
{
for (i in seq_len(time_len)) {
dum_dat1 <- ncvar_get(nc_in1, file_data1$variable$name,
start = c(1, 1, i), count = c(-1, -1, 1))
dum_dat2 <- ncvar_get(nc_in2, file_data2$variable$name,
start = c(1, 1, i), count = c(-1, -1, 1))
dum_data <- switch(
op,
dum_dat1 + dum_dat2,
dum_dat1 - dum_dat2,
dum_dat1 * dum_dat2,
{
dum_dat1[dum_dat1 == 0] <- NA
dum_dat2[dum_dat2 == 0] <- NA
dum_dat1 / dum_dat2
}
)
dum_data[is.na(dum_data)] <- file_data1$variable$attributes$missing_value
ncvar_put(nc_out, vars[[1]], dum_data, start = c(1, 1, i),
count = c(-1, -1, 1))
ncvar_put(nc_out, dims$t, time[i], start = i, count = 1)
}
},
{
dum_dat1 <- ncvar_get(nc_in1, file_data1$variable$name,
start = c(1, 1, 1), count = c(-1, -1, 1))
dum_dat2 <- ncvar_get(nc_in2, file_data2$variable$name,
start = c(1, 1, 1), count = c(-1, -1, 1))
dum_data <- switch(
op,
dum_dat1 + dum_dat2,
dum_dat1 - dum_dat2,
dum_dat1 * dum_dat2,
{
dum_dat1[dum_dat1 == 0] <- NA
dum_dat2[dum_dat2 == 0] <- NA
dum_dat1 / dum_dat2
}
)
dum_data[is.na(dum_data)] <- file_data1$variable$attributes$missing_value
ncvar_put(nc_out, vars[[1]], dum_data, start = c(1, 1, 1),
count = c(-1, -1, 1))
ncvar_put(nc_out, dims$t, time[1], start = 1, count = 1)
},
{
dum_dat1 <- ncvar_get(nc_in1, file_data1$variable$name,
start = c(1, 1, 1), count = c(-1, -1, 1))
for (i in seq_len(time_len)) {
dum_dat2 <- ncvar_get(nc_in2, file_data2$variable$name,
start = c(1, 1, i), count = c(-1, -1, 1))
dum_data <- switch(
op,
dum_dat1 + dum_dat2,
dum_dat1 - dum_dat2,
dum_dat1 * dum_dat2,
{
dum_dat1[dum_dat1 == 0] <- NA
dum_dat2[dum_dat2 == 0] <- NA
dum_dat1 / dum_dat2
}
)
dum_data[is.na(dum_data)] <- file_data1$variable$attributes$missing_value
ncvar_put(nc_out, vars[[1]], dum_data, start = c(1, 1, i),
count = c(-1, -1, 1))
ncvar_put(nc_out, dims$t, time[i], start = i, count = 1)
}
},
{
dum_dat2 <- ncvar_get(nc_in2, file_data2$variable$name,
start = c(1, 1, 1), count = c(-1, -1, 1))
for (i in seq_len(time_len)) {
dum_dat1 <- ncvar_get(nc_in1, file_data1$variable$name,
start = c(1, 1, i), count = c(-1, -1, 1))
dum_data <- switch(
op,
dum_dat1 + dum_dat2,
dum_dat1 - dum_dat2,
dum_dat1 * dum_dat2,
{
dum_dat1[dum_dat1 == 0] <- NA
dum_dat2[dum_dat2 == 0] <- NA
dum_dat1 / dum_dat2
}
)
dum_data[is.na(dum_data)] <- file_data1$variable$attributes$missing_value
ncvar_put(nc_out, vars[[1]], dum_data, start = c(1, 1, i), count = c(-1, -1, 1))
ncvar_put(nc_out, dims$t, time[i], start = i, count = 1)
}
}
)
if (is.null(nc1)) nc_close(nc_in1)
if (is.null(nc2)) nc_close(nc_in2)
nc_close(nc_out)
calc_time_end <- Sys.time()
if (verbose) message(get_processing_time_string(calc_time_start, calc_time_end))
} |
output$das_sighting_code_1_uiOut_select <- renderUI({
sp.mammals <- cruzSpeciesMammals()
sp.codes.list <- as.list(sp.mammals$Code)
names(sp.codes.list) <- paste(sp.mammals$Code, sp.mammals$Abbr, sep = " - ")
selectInput("das_sighting_code_1", tags$h5("Select mammal species"),
choices = sp.codes.list, multiple = TRUE,
selected = NULL)
})
outputOptions(output, "das_sighting_code_1_uiOut_select", suspendWhenHidden = FALSE)
output$das_sighting_code_2_uiOut_select <- renderUI({
sp.turtles <- cruzSpeciesTurtles()
sp.codes.list <- as.list(sp.turtles$Code)
names(sp.codes.list) <- paste(sp.turtles$Code, sp.turtles$Name_Scientific, sep = " - ")
selectInput("das_sighting_code_2", tags$h5("Select turtle species"),
choices = sp.codes.list, multiple = TRUE, selected = NULL)
})
outputOptions(output, "das_sighting_code_2_uiOut_select", suspendWhenHidden = FALSE)
dateRange_min_max <- reactive({
x <- req(cruz.list$das.data)
input$das.file
min.date <- as.character(as.Date(min(x$Date, na.rm = T)) - 1)
max.date <- as.character(as.Date(max(x$Date, na.rm = T)) + 1)
c(min.date, max.date)
})
output$das_sight_dateRange_uiOut_date <- renderUI({
req(cruz.list$das.data)
dates <- dateRange_min_max()
dateRangeInput("das_sight_dateRange", label = tags$h5("Date range"),
start = dates[1], end = dates[2])
})
outputOptions(output, "das_sight_dateRange_uiOut_date", suspendWhenHidden = FALSE, priority = 3)
output$das_effort_dateRange_uiOut_date <- renderUI({
req(cruz.list$das.data)
dates <- dateRange_min_max()
dateRangeInput("das_effort_dateRange", label = tags$h5("Date range"),
start = dates[1], end = dates[2])
})
outputOptions(output, "das_effort_dateRange_uiOut_date", suspendWhenHidden = FALSE, priority = 3)
das_cruise_nums <- reactive({
x <- req(cruz.list$das.data)
input$das.file
unique(na.omit(x$Cruise))
})
output$das_sight_cruise_uiOut_select <- renderUI({
req(cruz.list$das.data)
cruises <- das_cruise_nums()
selectInput("das_sight_cruise", tags$h5("Cruise number(s)"),
choices = cruises, multiple = TRUE, selected = NULL)
})
outputOptions(output, "das_sight_cruise_uiOut_select", suspendWhenHidden = FALSE, priority = 3)
output$das_effort_cruise_uiOut_select <- renderUI({
req(cruz.list$das.data)
cruises <- das_cruise_nums()
selectInput("das_effort_cruise", tags$h5("Cruise number(s)"),
choices = cruises, multiple = TRUE, selected = NULL)
})
outputOptions(output, "das_effort_cruise_uiOut_select", suspendWhenHidden = FALSE, priority = 3)
output$das_sight_trunc_uiOut_numeric <- renderUI({
isolate(curr.value <- input$das_sight_trunc)
trunc.units <- input$das_sight_trunc_units
if(trunc.units == 1) widget.name <- "Truncation (km)"
if(trunc.units == 2) widget.name <- "Truncation (nmi)"
numericInput("das_sight_trunc", label = tags$h5(widget.name), value = curr.value)
})
outputOptions(output, "das_sight_trunc_uiOut_numeric", suspendWhenHidden = FALSE, priority = 3) |
"merror.pairs" <- function(df,labels=names(df))
{
pairs(df,xlim=range(df,na.rm=TRUE),ylim=range(df,na.rm=TRUE),
upper.panel=panel.merror,lower.panel=NULL,labels=labels)
} |
owidth <- getOption("width")
options(width=60, continue="+ ","warn"=-1 )
.PngNo <- 0
nom.fich = "anx4-bitmap-"
require("caschrono")
require("timeSeries")
data("csdl")
aa <- returns(csdl, percentage = TRUE)
aab <- aa[complete.cases(aa) == TRUE,]
r.csdl <- zoo(aab, as.POSIXct(row.names(aab)))
r.danone <- r.csdl[, 3]
rdt2 <- (r.danone-mean(r.danone))^2
a0 <- Box.test.2(r.danone, nlag = c(3, 6, 9, 12),
type = "Ljung-Box", decim = 4)
a1 <- Box.test.2(r.danone[1:600], nlag = c(3, 6, 9, 12),
type = "Ljung-Box", decim = 4)
a2 <- Box.test.2(rdt2[1:600], nlag = c(3, 6, 9, 12),
type = "Ljung-Box", decim = 4)
a12 <- cbind(a0, a1[, 2], a2[, 2])
colnames(a12) <- c("Retard", "p-val. serie compl.",
"p-val. 600 obs.", "p-val. rdt carre")
require(xtable)
xtable(a12, caption = "Table a12 : test de blancheur du rendement
de Danone et de son carre.", label = "danoblanc",
digits = 4)
ARMAtoMA(-.7, 0, 10)
ARMAtoMA(c(rep(0, 11), -.7), 0, 25)
require(FitARMA)
ImpulseCoefficientsARMA(-.7, 0, lag.max = 10)
ImpulseCoefficientsARMA(c(rep(0, 11), -.7), 0, lag.max = 25)
set.seed(951)
ya <- arima.sim(n = 200, list(ma = c(-0.3, 0.6)), sd = sqrt(1.5))
.PngNo <- .PngNo + 1; file = paste(nom.fich, .PngNo, sep="")
pdf(file=paste(file,".pdf",sep=""), width = 6, height = 6, pointsize = 10, bg = "white")
set.seed(951)
ya <- arima.sim(n = 200, list(ma = c(-0.3, 0.6)), sd = sqrt(1.5))
titre <- "MA(2)"
plotacfthemp(ya, ma = c(-0.3, 0.6), lag.max = 20)
dev.null <- dev.off()
postscript(file=paste(file,".ps",sep=""), width = 6, height = 6, pointsize = 10, bg = "white",horizontal= FALSE,paper="special")
set.seed(951)
ya <- arima.sim(n = 200, list(ma = c(-0.3, 0.6)), sd = sqrt(1.5))
titre <- "MA(2)"
plotacfthemp(ya, ma = c(-0.3, 0.6), lag.max = 20)
dev.null <- dev.off()
cat("\\includegraphics[width=0.9\\textwidth]{", file, "}\n\n", sep="")
require("forecast")
(mod.ma2 = Arima(ya, order = c(0, 0,2), include.mean = FALSE))
set.seed(5419)
n2 <- 210
yb <- arima.sim(n = 200, list(ar = -0.8), sd = sqrt(1.5))
yb <- yb - 10
.PngNo <- .PngNo + 1; file = paste(nom.fich, .PngNo, sep="")
pdf(file=paste(file,".pdf",sep=""), width = 7, height = 7, pointsize = 12, bg = "white")
set.seed(5419)
n2 <- 210
yb <- arima.sim(n = 200, list(ar = -0.8), sd = sqrt(1.5))
yb <- yb - 10
plotacfthemp(yb, ar = -0.8, lag.max = 20)
dev.null <- dev.off()
postscript(file=paste(file,".ps",sep=""), width = 7, height = 7, pointsize = 12, bg = "white",horizontal= FALSE,paper="special")
set.seed(5419)
n2 <- 210
yb <- arima.sim(n = 200, list(ar = -0.8), sd = sqrt(1.5))
yb <- yb - 10
plotacfthemp(yb, ar = -0.8, lag.max = 20)
dev.null <- dev.off()
cat("\\includegraphics[width=0.9\\textwidth]{", file, "}\n\n", sep="")
(mod12 = Arima(yb, order = c(1, 0, 0)))
require(FitARMA)
set.seed(51)
nsim <- 100
nobs <- 200
nsim <- 50
nlag <- 20
y.mat <- matrix(0, nrow = nobs, ncol = nsim)
facp.mat <- matrix(0, nrow = nlag, ncol = nsim)
y.mat <- matrix(0, nrow = 200, ncol = nsim)
facp.mat <- matrix(0, nrow = nlag, ncol = nsim)
for(isim in 1:nsim){
y.mat[, isim] <- arima.sim(n = nobs,
list(ar = c(-0.7, 0.2)),
sd = sqrt(2))
facp.mat[, isim] = pacf(y.mat[,isim], 20, plot = FALSE)$acf
}
aa <- t(apply(facp.mat,1, "quantile", probs = c(0.25, .75)))
theo <- TacvfARMA(phi = c(-.7, .2), lag.max = 20)
pacf.theo <- PacfDL(theo/theo[1], LinearPredictor = TRUE)$Pacf
binf <- qnorm(.25)/nobs^.5
bsup <- qnorm(.75)/nobs^.5
aaa <- cbind(aa, pacf.theo, binf, bsup)
.PngNo <- .PngNo + 1; file = paste(nom.fich, .PngNo, sep="")
pdf(file=paste(file,".pdf",sep=""), width = 6, height = 6, pointsize = 10, bg = "white")
matplot(1:20, aaa, type = "l", ylab = "PACF",
xlab = "retard", col = "black")
legend("topright", paste("nombre de simulations : ",
as.character(nsim)))
dev.null <- dev.off()
postscript(file=paste(file,".ps",sep=""), width = 6, height = 6, pointsize = 10, bg = "white",horizontal= FALSE,paper="special")
matplot(1:20, aaa, type = "l", ylab = "PACF",
xlab = "retard", col = "black")
legend("topright", paste("nombre de simulations : ",
as.character(nsim)))
dev.null <- dev.off()
cat("\\includegraphics[width=0.9\\textwidth]{", file, "}\n\n", sep="")
set.seed(123)
y1 <- arima.sim(n = 100, list(ar = -.7),
sd = sqrt(4))
y2 <- arima.sim(n = 100, list(ar = c(rep(0, 11), -.7)),
sd = sqrt(4))
(mod2 <- Arima(y1, order = c(0, 0, 1), include.mean = FALSE))
aa <- Box.test.2(residuals(mod2), nlag = c(3, 6, 9, 12),
type = "Ljung-Box",
decim = 4, fitdf = 1)
colnames(aa) <- c("Retard", "p-val.")
t(aa)
theo <- TacvfARMA(theta = c(.3, -.6), lag.max = 20)
(acf.theo <- theo[-1]/theo[1])
(pacf.theo <- PacfDL(theo/theo[1], LinearPredictor = TRUE)$Pacf)
set.seed(12)
y <- arima.sim(n = 200, list(ma = c(-0.3, .6)), sd = sqrt(1.5))
(acf.emp <- acf(y, 20, plot = FALSE)$acf[-1])
(pacf.emp <- pacf(y, 20, plot = FALSE)$acf)
theo <- TacvfARMA(phi = -.8, lag.max = 20)
(acf.theo <- theo[-1]/theo[1])
(pacf.theo <- PacfDL(theo/theo[1], LinearPredictor = TRUE)$Pacf)
set.seed(23)
y <- arima.sim(n = 200, list(ar = -.8),
sd = sqrt(1.5)) - 10
(acf.emp <- acf(y, 20, plot = FALSE)$acf[-1])
(pacf.emp <- pacf(y, 20, plot = FALSE)$acf)
set.seed(4123)
yc <- arima.sim(n = 200, list(ar = -0.8, ma = c(-0.3, 0.6)),
sd = sqrt(1.5)) - 10
acf.th <- ARMAacf(ar = -0.8, ma = c(-0.3, 0.6),
lag.max = 20, pacf = FALSE)
pacf.th <- ARMAacf(ar = -0.8, ma = c(-0.3, 0.6),
lag.max = 20, pacf = TRUE)
.PngNo <- .PngNo + 1; file = paste(nom.fich, .PngNo, sep="")
pdf(file=paste(file,".pdf",sep=""), width = 7, height = 7, pointsize = 12, bg = "white")
plotacfthemp(yc, ar = -0.8, ma = c(-0.3, 0.6),
lag.max = 20)
dev.null <- dev.off()
postscript(file=paste(file,".ps",sep=""), width = 7, height = 7, pointsize = 12, bg = "white",horizontal= FALSE,paper="special")
plotacfthemp(yc, ar = -0.8, ma = c(-0.3, 0.6),
lag.max = 20)
dev.null <- dev.off()
cat("\\includegraphics[width=0.9\\textwidth]{", file, "}\n\n", sep="")
(mod12 = Arima(yc, order=c(1, 0, 2)))
set.seed(951)
ya <- arima.sim(n = 200, list(ma = c(-0.3, 0.6)), sd = sqrt(1.5))
set.seed(7392)
require("polynom")
autopol <- polynomial(c(1, 0.8)) * polynomial(c(1, 0, 0, 0, -0.7))
yd <- arima.sim(n = 200, list(ar = -autopol[-1],
ma = c(0, 0.6)), sd = sqrt(1.5))
yd <- yd + 4 |
tabPanel('2 Factor Box Plot', value = 'tab_box_plot_2',
fluidPage(
fluidRow(
column(12, align = 'left',
h4('Box Plot - II')
)
),
hr(),
fluidRow(
column(12,
tabsetPanel(type = 'tabs',
tabPanel('plotly',
fluidRow(
column(2,
selectInput('boxly2_select_x', 'Variable 1: ',
choices = "", selected = ""),
textInput(inputId = "boxly2_xlabel", label = "X Axes Label: ",
value = "label"),
textInput(inputId = "boxly2_title", label = "Title: ",
value = "title")
),
column(2,
selectInput('boxly2_select_y', 'Variable 2: ',
choices = "", selected = ""),
textInput(inputId = "boxly2_ylabel", label = "Y Axes Label: ",
value = "label")
),
column(8, align = 'center',
plotly::plotlyOutput('boxly2_plot_1', height = '600px')
)
)
),
tabPanel('rbokeh',
fluidRow(
column(2,
selectInput('bobox2_select_x', 'Variable 1: ',
choices = "", selected = ""),
numericInput(inputId = "bobox2_oshape", label = "Outlier Shape: ",
value = 1, min = 0, max = 25, step = 1),
numericInput(inputId = "bobox2_width", label = "Width: ",
value = 0.9, min = 0, max = 1, step = 0.1),
selectInput('bobox2_xgrid', 'X Axis Grid: ',
choices = c("TRUE" = TRUE, "FALSE" = FALSE), selected = "TRUE"),
textInput(inputId = "bobox2_xlabel", label = "X Axes Label: ",
value = "label"),
textInput(inputId = "bobox2_title", label = "Title: ",
value = "title")
),
column(2,
selectInput('bobox2_select_y', 'Variable 2: ',
choices = "", selected = ""),
numericInput(inputId = "bobox2_osize", label = "Outlier Size: ",
value = 10, min = 0, step = 1),
numericInput(inputId = "bobox2_alpha", label = "Alpha: ",
value = 1, min = 0, max = 1, step = 0.1),
selectInput('bobox2_ygrid', 'Y Axis Grid: ',
choices = c("TRUE" = TRUE, "FALSE" = FALSE), selected = "TRUE"),
textInput(inputId = "bobox2_ylabel", label = "Y Axes Label: ",
value = "label")
),
column(8, align = 'center',
rbokeh::rbokehOutput('bobox2_plot_1', height = '600px')
)
)
),
tabPanel('highcharts',
fluidRow(
column(2,
selectInput('hibox2_select_x', 'Variable 1: ',
choices = "", selected = ""),
textInput(inputId = "hibox2_xlabel", label = "X Axes Label: ",
value = "label"),
textInput(inputId = "hibox2_title", label = "Title: ",
value = "title")
),
column(2,
selectInput('hibox2_select_y', 'Variable 2: ',
choices = "", selected = ""),
textInput(inputId = "hibox2_ylabel", label = "Y Axes Label: ",
value = "label")
),
column(8, align = 'center',
highcharter::highchartOutput('hibox2_plot_1', height = '600px')
)
)
)
)
)
)
)
) |
expected <- eval(parse(text="-1.5314339531682e-113"));
test(id=0, code={
argv <- eval(parse(text="list(-1.5314339531682e-113)"));
do.call(`expm1`, argv);
}, o=expected); |
build_W <- function(X,nitems,mpoints,grp_n,groupvec,itmgrps)
{
if(missing(grp_n)) grp_n<- table(groupvec)
if(!is.numeric(grp_n)) stop("Please specify the number of subjects per group.")
if(missing(nitems))stop("Please specify the number of items.")
if(any(grp_n==0)) stop("There are groups with zero sample size.")
if(missing(mpoints)) stop("Please specify the number of time points. If there are none, you might want to use PCM() or LPCM().")
pplgrps <- length(grp_n)
categos <- get_item_cats(X,nitems,grp_n)
tr.des <- build_trdes(nitems,mpoints,pplgrps,categos)
gr.des <- build_effdes(nitems,mpoints,pplgrps,categos,groupvec)
if(length(unique(unlist(categos)))==1&&sum(unique(unlist(categos)))==1) return(cbind(gr.des,tr.des))
ct.des <- build_catdes(nitems,mpoints,pplgrps,categos)
des <-cbind(gr.des,tr.des,ct.des)
des
} |
table <- function( ...
, exclude = if (useNA == "no") c(NA, NaN)
, useNA = c("no","ifany", "always")
, dnn = list.names(...)
, deparse.level = 1
){
UseMethod("table")
}
table.default <- base::table
table.ff <- function( ...
, exclude = if (useNA == "no") c(NA, NaN)
, useNA = c("no","ifany", "always")
, dnn = list.names(...)
, deparse.level = 1
){
args <- list(...)
tab <- NULL
useNA <- match.arg(useNA)
dat <- do.call(ffdf, args)
colnames(dat) <- names(args)
if(sum(!vmode(dat) %in% c("byte", "short", "integer")) > 0){
stop(sprintf("Only vmodes integer currently allowed - are you sure ... contains only factors or integers?"))
}
nonfactors <- sapply(colnames(dat), FUN=function(column, dat) !ff::is.factor(dat[[column]]), dat=dat)
nonfactors <- names(nonfactors)[nonfactors == TRUE]
if(length(nonfactors) > 0){
for(column in nonfactors){
dat[[column]] <- as.character.ff(dat[[column]])
}
}
for (i in chunk(dat)){
Log$chunk(i)
factors <- unname(as.list(dat[i,, drop=FALSE]))
factors$exclude <- exclude
factors$useNA <- useNA
factors$deparse.level <- deparse.level
ttab <- do.call(table,factors)
tab <- if (is.null(tab)){
ttab
}
else { tab + ttab
}
}
return(tab)
}
list.names <- function(...) {
l <- as.list(substitute(list(...)))[-1L]
nm <- names(l)
fixup <- if (is.null(nm))
seq_along(l)
else nm == ""
dep <- sapply(l[fixup], function(x) if (is.symbol(x)) as.character(x) else "")
if (is.null(nm))
dep
else {
nm[fixup] <- dep
nm
}
} |
iter.crossval.combat <-
function (data, surv, censor,batchID,ngroup=10,plot.roc = 0,method = "none",gn.nb = 100){
if(!exists("batchID"))
stop("\rSet batchID", call.=FALSE)
niter = ifelse(ngroup == length(surv), 1,10)
res = NULL
file.name=deparse(substitute(data))
if (plot.roc)
init.plot(file.name)
data =data[!is.na(surv),]
censor= censor[!is.na(surv)]
surv= surv[!is.na(surv)]
cat ("Iteration\tAUC\tHR(CI)\t\tP-val\n")
for (i in 1:niter){
new.lst = cross.val.combat(data, surv, censor,batchID,method = "none", gn.nb, plot.roc, ngroup, i)
res = rbind (res, new.lst)
}
if(ngroup != length(surv)){
cat ("Avg AUC+/-SD\tHR(CI)\n")
if (plot.roc)
legend (0.55,0.1, legend = paste("AUC+/-SD =", sprintf("%.2f",as.numeric(mean(res[,1], na.rm = TRUE))), "+/-", sprintf("%.2f",sd (res[,1],na.rm = TRUE)), sep = " "), bty = "n")
cat (sprintf("%.2f",as.numeric(mean(res[,1], na.rm = TRUE))), "+/-", sprintf("%.2f",sd (res[,1],na.rm = TRUE)), "\t", gm(res[,2]), "(", sprintf("%.2f",ci.gm(res[,2])[1]), "-", sprintf("%.2f",ci.gm(res[,2])[2]), ")\n", sep = "")
}
} |
knitr::opts_chunk$set(
collapse = TRUE,
comment = "
fig.width = 7,
fig.align = 'center'
)
library(JointAI)
options(width = 100)
mod1 <- lm_imp(SBP ~ alc, data = NHANES, n.iter = 100, progress.bar = 'none')
a1 <- capture.output(print(summary(mod1)))
cat(paste0('[...]', '\n',
paste(a1[18:22], collapse = "\n")))
mod2 <- lm_imp(SBP ~ alc, data = NHANES, n.adapt = 10, n.iter = 100, progress.bar = 'none')
a2 <- capture.output(print(summary(mod2)))
cat(paste0('[...]', '\n',
paste(a2[19:23], collapse = "\n")))
mod3 <- lm_imp(SBP ~ alc, data = NHANES, n.iter = 500, thin = 10, progress.bar = 'none')
a3 <- capture.output(print(summary(mod3)))
cat(paste0('[...]', '\n',
paste(a3[19:23], collapse = "\n")))
init_list <- lapply(1:3, function(i) {
list(beta = rnorm(4),
tau_SBP = rgamma(1, 1, 1))
})
init_list
mod4a <- lm_imp(SBP ~ gender + age + WC, data = NHANES, progress.bar = 'none',
inits = init_list)
mod4a$mcmc_settings$inits
inits_fun <- function() {
list(beta = rnorm(4),
alpha = rnorm(3))
}
inits_fun()
mod4b <- lm_imp(SBP ~ gender + age + WC, data = NHANES, progress.bar = 'none',
inits = inits_fun)
mod4b$mcmc_settings$inits
mod4c <- lme_imp(bmi ~ time + HEIGHT_M + hc + SMOKE, random = ~ time | ID,
data = simLong, no_model = 'time', progress.bar = 'none')
str(coef(mod4c$model))
mod4c <- lme_imp(bmi ~ time + HEIGHT_M + hc + SMOKE, random = ~ time | ID,
data = simLong, no_model = 'time', progress.bar = 'none')
options(max.print = 1e5)
a4 <- capture.output(coef(mod4c$model))
a4mod <- capture.output(mod4c$jagsmodel)
mat <- mod4c$data_list$M_ID[1:8, ]
colnames(mat) <- gsub("SMOKEsmoked until pregnancy was known",
"SMOKEsmoked until[...]",
gsub("SMOKEcontinued smoking in pregnancy",
"SMOKEcontin[...]", colnames(mat)))
mat
head(coef(mod4c$model)$M_ID, 8)
cat(paste0('[...]\n',
paste0(a4mod[58:60], collapse = "\n"),
'\n\n[...]\n',
paste0(a4mod[69:72], collapse = "\n"),
'\n\n[...]'))
mod4c$data_list['RinvD_bmi_ID']
cat(paste0('[...]\n', paste(a4mod[25:31], collapse = '\n'), '\n[...]\n'))
coef(mod4c$model)$RinvD_bmi_ID |
testGlmBi <- function(formula, link){
testModel <- function(Train.data, Validation.data){
modT <- stats :: glm(formula , family = stats :: binomial(link = link), Train.data)
predT <- stats :: predict(modT, newdata = Train.data
, type = "response", se.fit = TRUE)$fit
predE <- stats :: predict(modT, newdata = Validation.data
, type = "response", se.fit = TRUE)$fit
res <- stats :: resid(modT, type = "pearson")
Rsp <- as.character(formula)[2]
return(list(predT = predT, predE = predE, res = res, Rsp = Rsp))
}
return(testModel)
} |
BWA <- function(level){
x <- NULL
if(level==1){
x1 <- github.cssegisanddata.covid19(country = "Botswana")
x2 <- ourworldindata.org(id = "BWA")
x <- full_join(x1, x2, by = "date")
}
return(x)
} |
linconb<-function(x,con=0,tr=.2,alpha=.05,nboot=599,pr=TRUE,SEED=TRUE){
if(is.data.frame(x))x=as.matrix(x)
con<-as.matrix(con)
if(is.matrix(x))x<-listm(x)
if(!is.list(x))stop("Data must be stored in a matrix or in list mode.")
J<-length(x)
for(j in 1:J){
xx<-x[[j]]
x[[j]]<-xx[!is.na(xx)]
}
Jm<-J-1
d<-(J^2-J)/2
if(sum(con^2)==0){
con<-matrix(0,J,d)
id<-0
for (j in 1:Jm){
jp<-j+1
for (k in jp:J){
id<-id+1
con[j,id]<-1
con[k,id]<-0-1
}}}
if(nrow(con)!=length(x))stop("The number of groups does not match the number of contrast coefficients.")
bvec<-array(0,c(J,2,nboot))
if(SEED)set.seed(2)
nsam=matl(lapply(x,length))
for(j in 1:J){
paste("Working on group ",j)
xcen<-x[[j]]-mean(x[[j]],tr)
data<-matrix(sample(xcen,size=length(x[[j]])*nboot,replace=TRUE),nrow=nboot)
bvec[j,,]<-apply(data,1,trimparts,tr)
}
m1<-bvec[,1,]
m2<-bvec[,2,]
boot<-matrix(0,ncol(con),nboot)
for (d in 1:ncol(con)){
top<-apply(m1,2,trimpartt,con[,d])
consq<-con[,d]^2
bot<-apply(m2,2,trimpartt,consq)
boot[d,]<-abs(top)/sqrt(bot)
}
testb<-apply(boot,2,max)
ic<-floor((1-alpha)*nboot)
testb<-sort(testb)
psihat<-matrix(0,ncol(con),4)
test<-matrix(0,ncol(con),4)
dimnames(psihat)<-list(NULL,c("con.num","psihat","ci.lower","ci.upper"))
dimnames(test)<-list(NULL,c("con.num","test","se","p.value"))
for (d in 1:ncol(con)){
test[d,1]<-d
psihat[d,1]<-d
testit<-lincon1(x,con[,d],tr,pr=FALSE)
test[d,2]<-testit$test[1,2]
pval<-mean((abs(testit$test[1,2])<boot[d,]))
test[d,4]<-pval
psihat[d,3]<-testit$psihat[1,2]-testb[ic]*testit$test[1,4]
psihat[d,4]<-testit$psihat[1,2]+testb[ic]*testit$test[1,4]
psihat[d,2]<-testit$psihat[1,2]
test[d,3]<-testit$test[1,4]
}
list(n=nsam,psihat=psihat,test=test,crit=testb[ic],alpha = alpha, con=con)
} |
testSWAT2005=function(){
runSWAT2005=function (hist_wx = NULL, elev = 100, rch = 3) {
Sys.setenv(GFORTRAN_STDIN_UNIT = -1)
Sys.setenv(GFORTRAN_STDOUT_UNIT = -1)
Sys.setenv(GFORTRAN_STDERR_UNIT = -1)
if (length(hist_wx) > 2) {
tmp_head = paste("Tmp\nLati Not Used\nLong Not Used\nElev ",
sprintf("%5.0f\n", elev), sep = "")
pcp_head = paste("Pcp\nLati Not Used\nLong Not Used\nElev ",
sprintf("%5.0f\n", elev), sep = "")
cat(tmp_head, sprintf("%s%005.1f%005.1f\n", format(hist_wx$DATE,
"%Y%j"), hist_wx$TMX, hist_wx$TMN), file = "tmp.tmp",
sep = "")
cat(pcp_head, sprintf("%s%005.1f\n", format(hist_wx$DATE,
"%Y%j"), hist_wx$PRECIP), file = "pcp.pcp", sep = "")
print("built new pcp.pcp and tmp.tmp files, make sure they are correct in file.cio")
}
libarch = if (nzchar(version$arch))
paste("libs", version$arch, sep = "/")
else "libs"
swatbin <- "rswat2005.exe"
system(shQuote(paste(path.package("SWATmodel"), libarch, swatbin, sep = "/")))
start_year = read.fortran(textConnection(readLines("file.cio")[9]),
"f20")
temp = readLines(file("output.rch"))
rchcolname = sub(" ", "", (substr(temp[9], 50, 61)))
flow = data.frame(as.numeric(as.character(substr(temp[10:length(temp)],
50, 61))))
colnames(flow) = rchcolname
reach = data.frame(as.numeric(as.character(substr(temp[10:length(temp)],
8, 10))))
rchcolname = sub(" ", "", (substr(temp[9], 8, 10)))
colnames(reach) = rchcolname
outdata = cbind(reach, flow)
temp2 = subset(outdata, outdata$RCH == rch)
temp2$mdate = as.Date(row(temp2)[, 1], origin = paste(start_year -
1, "-12-31", sep = ""))
return(temp2)
}
dirname="testswat"
swat_general <- NULL
rm(swat_general)
load(paste(path.package("SWATmodel"),"data/swat_general.rda", sep = "/"))
dir.create(dirname)
setwd(dirname)
for (file in names(swat_general)) {
print(file)
cat(unlist(swat_general[file]), file = file, sep = "\n")
}
test=runSWAT2005()
if(sum(test$FLOW_OUTcms) < 200){
print("Your computer is having SWAT array issues, please contact [email protected] as we would like to figure why this is happening.\n")
} else {
print("Your computer CPU test out OK for SWAT2005.\n")
}
file.remove(list.files("../testswat/"))
setwd("../")
file.remove("testswat/")
} |
gmd <- function(x, ...) {
qs <- dplyr::quos(...)
if (!sjmisc::is_empty(qs)) x <- suppressMessages(dplyr::select(x, !!!qs))
if (is.data.frame(x))
purrr::map_df(x, gmd_helper)
else
gmd_helper(x)
}
gmd_helper <- function(x) {
if (!is.numeric(x)) return(NA)
x <- stats::na.omit(x)
n <- length(x)
if (n < 2) return(NA)
w <- 4 * ((1:n) - (n - 1) / 2) / n / (n - 1)
sum(w * sort(x - mean(x)))
} |
sleep_thrift <- thriftr::t_load("oneway/sleep.thrift", module_name="sleep_thrift")
client <- thriftr::make_client(
sleep_thrift$Sleep,
"127.0.0.1",
6000)
client$sleep(1)
client$sleep(2)
client$sleep(3)
client$sleep(4) |
UniformFrequency_D <- function(train, test, numIntervals=10, seed=-1){
alg <- RKEEL::R6_UniformFrequency_D$new()
alg$setParameters(train, test, numIntervals, seed)
return (alg)
}
R6_UniformFrequency_D <- R6::R6Class("R6_UniformFrequency_D",
inherit = PreprocessAlgorithm,
public = list(
numIntervals = 10,
seed = -1,
setParameters = function(train, test, numIntervals=10, seed=-1){
super$setParameters(train, test)
self$numIntervals <- numIntervals
if(seed == -1) {
self$seed <- sample(1:1000000, 1)
}
else {
self$seed <- seed
}
}
),
private = list(
jarName = "Disc-UniformFrequency.jar",
algorithmName = "UniformFrequency-D",
algorithmString = "Uniform Frequency Discretizer",
getParametersText = function(){
text <- ""
text <- paste0(text, "numIntervals = ", self$numIntervals, "\n")
text <- paste0(text, "seed = ", self$seed, "\n")
return(text)
}
)
) |
nntsDistribution <- function (theta, cpars = 1/sqrt(2*pi), M = 0)
{
theta<-theta%%(2*pi)
if (M == 0) {
y<-theta/(2*pi)
y<-as.matrix(y)
return(y)}
size <- length(cpars)
if (size != M+1)
return("Length of cpars must be equal to M+1")
if (abs(sum(Mod(cpars)^2) - 1/(2 * pi)) > 0.0000000001)
return("Sum of the squared norms of components greater than condition")
y <- theta/(2 * pi)
for (k in 0:M){
for (m in 0:M){
if (k != m)
y <- y + cpars[k+1]*Conj(cpars[m+1])*(1i/(k-m))*(1 - exp(1i*(k-m)*theta))
}
}
y <- Re(y)
y<-as.matrix(y)
return(y)
} |
remake <- function(remake_file="remake.yml", verbose=TRUE,
allow_cache=TRUE, load_sources=TRUE,
allow_missing_packages=FALSE) {
if (is.null(remake_file)) {
return(.R6_remake_interactive$new(verbose=verbose))
}
if (allow_missing_packages || !allow_cache) {
return(remake_new(remake_file, verbose,
load_sources, allow_missing_packages))
}
ret <- cache$fetch(remake_file)
if (is.null(ret)) {
ret <- remake_new(remake_file, verbose,
load_sources, allow_missing_packages)
cache$add(ret)
} else {
ret$verbose <- remake_verbose(verbose)
if (load_sources) {
ret <- .remake_initialize_sources(ret)
}
}
ret
}
remake_new <- function(remake_file="remake.yml", verbose=TRUE,
load_sources=TRUE, allow_missing_packages=FALSE,
config=NULL) {
obj <- list(file=remake_file, path=".",
verbose=remake_verbose(verbose),
allow_missing_packages=allow_missing_packages,
fmt=.remake_initialize_message_format(NULL),
store=NULL, targets=NULL, config=NULL,
default_target=NULL, hash=NULL)
remake_print_message(obj, "LOAD", "")
if (is.null(config) && !is.null(obj$file)) {
obj$config <- read_remake_file(obj$file)
} else {
obj$config <- config
}
obj$hash <- obj$config$hash
if (!is.null(obj$config) && !isFALSE(obj$config$active)) {
obj <- .remake_initialize_targets(obj)
}
if (is.null(obj$config)) {
packages <- sources <- character(0)
} else {
packages <- obj$config$packages
sources <- obj$config$sources
}
obj$store <- store$new(obj$path, packages, sources)
if (load_sources) {
obj <- .remake_initialize_sources(obj)
}
class(obj) <- "remake"
obj
}
read_remake_file <- function(filename, seen=character(0)) {
if (filename %in% seen) {
stop("Recursive include detected: ",
paste(c(seen, filename), collapse=" -> "))
}
if (dirname(filename) != "." && dirname(filename) != getwd()) {
stop("Logic around paths in out-of-directory remake files not decided")
}
dat <- yaml_read(filename)
warn_unknown(filename, dat,
c("packages", "sources", "include",
"plot_options", "knitr_options",
"file_extensions", "target_default", "targets"))
dat$hash <- hash_files(filename, named=TRUE)
if (length(seen) > 0L) {
dat$target_default <- NULL
}
dat$packages <- with_default(dat$packages, character(0))
dat$sources <- with_default(dat$sources, character(0))
if (!is.null(dat$plot_options)) {
assert_named_list(dat$plot_options)
for (i in names(dat$plot_options)) {
assert_named_list(dat$plot_options[[i]],
name=paste("plot_options: ", i))
}
}
if (!is.null(dat$knitr_options)) {
assert_named_list(dat$knitr_options)
for (i in names(dat$knitr_options)) {
assert_named_list(dat$knitr_options[[i]],
name=paste("knitr_options: ", i))
}
}
if (is.null(dat$file_extensions) || length(dat$file_extensions) == 0L) {
dat$file_extensions <- NULL
} else {
assert_character(dat$file_extensions)
dat$file_extensions <- union(file_extensions(),
sub("^\\.", "", dat$file_extensions))
}
if (!is.null(dat$include)) {
assert_character(dat$include)
if (any(dirname(dat$include) != ".")) {
stop("All included remakefiles must be in the current directory")
}
for (f in dat$include) {
dat_sub <- read_remake_file(f, c(seen, filename))
dat$packages <- unique(c(dat$packages, dat_sub$packages))
dat$sources <- unique(c(dat$sources, dat_sub$sources))
dat$hash <- c(dat$hash, dat_sub$hash)
dups <- intersect(names(dat_sub$plot_options), names(dat$plot_options))
if (length(dups) > 0L) {
stop(sprintf("%s contains duplicate plot_options %s",
f, paste(dups, collapse=", ")))
}
dat$plot_options <- c(dat$plot_options, dat_sub$plot_options)
dups <- intersect(names(dat_sub$knitr_options), names(dat$knitr_options))
if (length(dups) > 0L) {
stop(sprintf("%s contains duplicate knitr_options %s",
f, paste(dups, collapse=", ")))
}
dat$knitr_options <- c(dat$knitr_options, dat_sub$knitr_options)
if ("all" %in% names(dat_sub$targets)) {
warning(f, " contains target 'all', which I am removing")
dat_sub$targets$all <- NULL
}
dups <- intersect(names(dat_sub$targets), names(dat$targets))
if (length(dups) > 0L) {
warning(sprintf("%s contains duplicate targets %s",
f, paste(dups, collapse=", ")))
}
dat$targets <- c(dat$targets, dat_sub$targets)
}
}
if (length(seen) == 0L) {
extra <- list(plot_options=dat$plot_options,
knitr_options=dat$knitr_options)
dat$targets <- lnapply(dat$targets, make_target, extra=extra,
file_extensions=dat$file_extensions)
}
dat
}
remake_default_target <- function(obj, target_name=NULL) {
if (is.null(target_name)) {
if (is.null(obj$default_target)) {
stop(obj$file,
" does not define 'target_default' or have target 'all'")
}
obj$default_target
} else {
target_name
}
}
remake_print_message <- function(obj, status, target_name,
cmd=NULL, style="square") {
verbose <- obj$verbose
if (!verbose$print_progress ||
!verbose$print_noop && status %in% c("", "OK")) {
return()
} else if (!verbose$print_command) {
cmd <- NULL
}
status <- brackets(paint(sprintf("%5s", status),
status_colour(status)), style)
target_name <- abbreviate(target_name, obj$fmt$target_width)
if (is.null(cmd)) {
str <- sprintf("%s %s", status, target_name)
} else {
if (verbose$print_command_abbreviate) {
cmd <- abbreviate(cmd, obj$fmt$max_cmd_width)
}
str <- sprintf(obj$fmt$fmt, status, target_name, paint(cmd, "grey60"))
}
message(str)
}
remake_plan <- function(obj, target_name=NULL) {
target_name <- remake_default_target(obj, target_name)
graph <- remake_dependency_graph(obj)
dependencies(target_name, graph)
}
remake_dependency_graph <- function(obj) {
g <- lapply(obj$targets, function(t) t$depends_name)
topological_sort(g)
}
remake_update2 <- function(obj, target_name, check=NULL,
return_target=TRUE) {
target <- obj$targets[[target_name]]
current <- remake_is_current(obj, target_name)
if (!isTRUE(target$implicit)) {
status <- if (current) "OK" else target$status_string
cmd <- if (current) NULL else target_run_fake(target)
style <- "square"
remake_print_message(obj, status, target_name, cmd, style)
}
ret <- NULL
if (target$type == "fake") {
} else if (!current) {
.remake_initialize_packages(obj)
extra <- load_extra_packages(target$packages, obj$file)
if (target$type == "cleanup") {
for (t in target$targets_to_remove) {
remake_remove_target(obj, t)
}
target_run(target, obj$store, obj$verbose$quiet_target)
ret <- NULL
} else {
ret <- target_build(target, obj$store, obj$verbose$quiet_target)
}
unload_extra_packages(extra)
} else if (return_target) {
ret <- target_get(target, obj$store)
}
if (return_target) {
invisible(ret)
} else {
invisible(NULL)
}
}
remake_remove_target <- function(obj, target_name) {
assert_has_targets(target_name, obj)
target <- obj$targets[[target_name]]
store <- obj$store
if (target$type == "file") {
did_remove_obj <- store$files$del(target$name)
did_remove_db <- store$db$del(target$name)
did_remove <- did_remove_obj || did_remove_db
} else if (target$type == "object") {
did_remove_obj <- store$objects$del(target$name)
did_remove_db <- store$db$del(target$name)
did_remove <- did_remove_obj || did_remove_db
} else {
stop("Not something that can be deleted")
}
if (did_remove) {
status <- "DEL"
fn <- if (target$type == "object") "rm" else "file.remove"
cmd <- sprintf('%s("%s")', fn, target_name)
} else {
status <- ""
cmd <- NULL
}
remake_print_message(obj, status, target_name, cmd, "round")
}
remake_make <- function(obj, target_names=NULL, ...) {
target_names <- remake_default_target(obj, target_names)
for (t in target_names) {
remake_print_message(obj, "MAKE", t, style="angle")
}
remake_make1(obj, target_names, ...)
}
remake_make1 <- function(obj, target_names, check=NULL) {
last_target_name <- utils::tail(target_names, 1L)
plan <- remake_plan(obj, target_names)
remake_update1(obj, plan, last_target_name, check=check)
}
remake_update <- function(obj, target_names, check=NULL, ...) {
last_target_name <- utils::tail(target_names, 1L)
for (t in target_names) {
remake_print_message(obj, "UPDT", t, style="angle")
}
remake_update1(obj, target_names, last_target_name, check=check, ...)
}
remake_update1 <- function(obj, plan, last_target_name, check=NULL) {
ret <- lapply(plan, function(i) {
is_last <- i == last_target_name
remake_update2(obj, i, check=check, return_target=is_last)
})
invisible(ret[plan == last_target_name][[1L]])
}
remake_list_targets <- function(obj, type=NULL,
include_implicit_files=FALSE,
include_cleanup_targets=FALSE) {
filter_targets(obj$targets,
type,
include_implicit_files,
include_cleanup_targets)
}
remake_list_dependencies <- function(obj, target_names, type=NULL,
include_implicit_files=FALSE,
include_cleanup_targets=FALSE) {
if (!all(target_names %in% names(obj$targets))) {
stop("Unknown target: ",
paste(setdiff(target_names, names(obj$targets)), collapse=", "))
}
graph <- remake_dependency_graph(obj)
target_names <- dependencies(target_names, graph)
filter_targets(obj$targets[target_names],
type,
include_implicit_files,
include_cleanup_targets)
}
remake_is_current <- function(obj, target_names, check=NULL) {
assert_has_targets(target_names, obj)
vlapply(obj$targets[target_names], function(x)
target_is_current(x, obj$store, check), USE.NAMES=FALSE)
}
remake_who_refers_to <- function(obj, target_names) {
deps <- lapply(obj$targets, "[[", "depends_name")
mat <- vapply(target_names, function(e)
vlapply(deps, function(x) e %in% x), logical(length(deps)))
unname(apply(mat, 2, function(x) paste(names(deps)[x], collapse=", ")))
}
remake_dump_environment <- function(obj, envir) {
assert_environment(envir)
remake_print_message(obj, "DUMP", "")
packages <- obj$store$packages
if (obj$allow_missing_packages) {
packages <- intersect(packages, .packages(TRUE))
}
load_packages(packages)
for (f in obj$store$env$source_files) {
sys.source(f, envir, chdir=TRUE)
}
obj$store$objects$export(envir)
} |
print.goodnessfit.lba.ls.fe <- function(x,
digits=3L,
...){
cat("Residual sum of square:\n")
res <- c(x$RSS1,
x$RSS)
names(res) <- c("RSS baseline",
"RSS K budget")
print.default(res,
digits=digits,
...)
cat("\n")
} |
html_dependency_phosphor <- function() {
htmlDependency(
name = "phosphor-icons",
version = "1.3.2",
src = c(file = "assets"),
package = "phosphoricons",
stylesheet = "css/phosphor.min.css",
all_files = TRUE
)
}
ph_i <- function(name,
weight = c("light", "regular", "thin", "bold", "fill"),
size = c("lg", "xxs", "xs", "sm", "xl", "1x", "2x", "3x", "4x", "5x", "6x", "7x", "8x", "9x", "10x"),
color = NULL,
...) {
weight <- match.arg(weight)
size <- match.arg(
arg = as.character(size),
choices = c("lg", "xxs", "xs", "sm", "xl", "1x", "2x", "3x", "4x", "5x", "6x", "7x", "8x", "9x", "10x")
)
name <- check_icon(name)
if (!identical(weight, "regular"))
name <- paste(name, weight, sep = "-")
icon <- tags$i(
class = paste0("ph-", name, " ph-", size),
style = css(
color = color
),
...,
html_dependency_phosphor()
)
browsable(icon, value = interactive())
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.