code
stringlengths 1
13.8M
|
---|
fsubset <- function(.x, ...) UseMethod("fsubset")
sbt <- fsubset
fsubset.default <- function(.x, subset, ...) {
if(is.matrix(.x) && !inherits(.x, "matrix")) return(fsubset.matrix(.x, subset, ...))
if(!missing(...)) unused_arg_action(match.call(), ...)
if(is.logical(subset)) return(.Call(C_subsetVector, .x, which(subset), FALSE))
.Call(C_subsetVector, .x, subset, TRUE)
}
fsubset.matrix <- function(.x, subset, ..., drop = FALSE) {
if(missing(...)) return(.x[subset, , drop = drop])
nl <- `names<-`(as.vector(1L:ncol(.x), "list"), dimnames(.x)[[2L]])
vars <- eval(substitute(c(...)), nl, parent.frame())
if(missing(subset)) return(.x[, vars, drop = drop])
.x[subset, vars, drop = drop]
}
ss <- function(x, i, j) {
if(is.atomic(x)) if(is.matrix(x)) return(if(missing(j)) x[i, , drop = FALSE] else if(missing(i)) x[, j, drop = FALSE] else x[i, j, drop = FALSE]) else return(x[i])
mj <- missing(j)
if(mj) j <- seq_along(unclass(x)) else if(is.integer(j)) {
if(missing(i)) return(.Call(C_subsetCols, x, j, TRUE))
if(any(j < 0L)) j <- seq_along(unclass(x))[j]
} else {
if(is.character(j)) {
j <- ckmatch(j, attr(x, "names"))
} else if(is.logical(j)) {
if(length(j) != length(unclass(x))) stop("If j is logical, it needs to be of length ncol(x)")
j <- which(j)
} else if(is.numeric(j)) {
j <- if(any(j < 0)) seq_along(unclass(x))[j] else as.integer(j)
} else stop("j needs to be supplied integer indices, character column names, or a suitable logical vector")
if(missing(i)) return(.Call(C_subsetCols, x, j, TRUE))
}
checkrows <- TRUE
if(!is.integer(i)) {
if(is.numeric(i)) i <- as.integer(i) else if(is.logical(i)) {
nr <- fnrow2(x)
if(length(i) != nr) stop("i needs to be integer or logical(nrow(x))")
i <- which(i)
if(length(i) == nr) if(mj) return(x) else return(.Call(C_subsetCols, x, j, TRUE))
checkrows <- FALSE
} else stop("i needs to be integer or logical(nrow(x))")
}
rn <- attr(x, "row.names")
if(is.numeric(rn) || is.null(rn) || rn[1L] == "1") return(.Call(C_subsetDT, x, i, j, checkrows))
return(`attr<-`(.Call(C_subsetDT, x, i, j, checkrows), "row.names", rn[i]))
}
fsubset.data.frame <- function(.x, subset, ...) {
r <- eval(substitute(subset), .x, parent.frame())
if(missing(...)) vars <- seq_along(unclass(.x)) else {
ix <- seq_along(unclass(.x))
nl <- `names<-`(as.vector(ix, "list"), attr(.x, "names"))
vars <- eval(substitute(c(...)), nl, parent.frame())
nam_vars <- names(vars)
if(is.integer(vars)) {
if(any(vars < 0L)) vars <- ix[vars]
} else {
if(is.character(vars)) vars <- ckmatch(vars, names(nl)) else if(is.numeric(vars)) {
vars <- if(any(vars < 0)) ix[vars] else as.integer(vars)
} else stop("... needs to be comma separated column names, or column indices")
}
if(length(nam_vars)) {
nonmiss <- nzchar(nam_vars)
attr(.x, "names")[vars[nonmiss]] <- nam_vars[nonmiss]
}
}
checkrows <- TRUE
if(is.logical(r)) {
nr <- fnrow2(.x)
if(length(r) != nr) stop("subset needs to be an expression evaluating to logical(nrow(.x)) or integer")
r <- which(r)
if(length(r) == nr) if(missing(...)) return(.x) else return(.Call(C_subsetCols, .x, vars, TRUE))
checkrows <- FALSE
} else if(is.numeric(r)) r <- as.integer(r) else
stop("subset needs to be an expression evaluating to logical(nrow(.x)) or integer")
rn <- attr(.x, "row.names")
if(is.numeric(rn) || is.null(rn) || rn[1L] == "1") return(.Call(C_subsetDT, .x, r, vars, checkrows))
return(`attr<-`(.Call(C_subsetDT, .x, r, vars, checkrows), "row.names", rn[r]))
}
ftransform_core <- function(X, value) {
ax <- attributes(X)
oldClass(X) <- NULL
nam <- names(value)
if(!length(nam) || fanyDuplicated(nam)) stop("All replacement expressions have to be uniquely named")
namX <- names(X)
if(!length(namX) || fanyDuplicated(namX)) stop("All columns of .data have to be uniquely named")
le <- vlengths(value, FALSE)
nr <- length(X[[1L]])
rl <- le == nr
inx <- match(nam, namX)
matched <- !is.na(inx)
if(all(rl)) {
if(any(matched)) X[inx[matched]] <- value[matched]
} else {
if(any(1L < le & !rl)) stop("Lengths of replacements must be equal to nrow(.data) or 1, or NULL to delete columns")
if(any(le1 <- le == 1L)) value[le1] <- lapply(value[le1], alloc, nr)
if(any(le0 <- le == 0L)) {
if(any(le0 & !matched)) stop(paste("Can only delete existing columns, unknown columns:", paste(nam[le0 & !matched], collapse = ", ")))
if(all(le0)) {
X[inx[le0]] <- NULL
return(`oldClass<-`(X, ax[["class"]]))
}
matched <- matched[!le0]
value <- value[!le0]
if(any(matched)) X[inx[!le0][matched]] <- value[matched]
X[inx[le0]] <- NULL
} else if(any(matched)) X[inx[matched]] <- value[matched]
}
if(all(matched)) return(`oldClass<-`(X, ax[["class"]]))
ax[["names"]] <- c(names(X), names(value)[!matched])
setAttributes(c(X, value[!matched]), ax)
}
ftransform <- function(.data, ...) {
if(!is.list(.data)) stop(".data needs to be a list of equal length columns or a data.frame")
e <- eval(substitute(list(...)), .data, parent.frame())
if(is.null(names(e)) && length(e) == 1L && is.list(e[[1L]])) e <- unclass(e[[1L]])
return(condalc(ftransform_core(.data, e), inherits(.data, "data.table")))
}
tfm <- ftransform
`ftransform<-` <- function(.data, value) {
if(!is.list(.data)) stop(".data needs to be a list of equal length columns or a data.frame")
if(!is.list(value)) stop("value needs to be a named list")
return(condalc(ftransform_core(.data, unclass(value)), inherits(.data, "data.table")))
}
`tfm<-` <- `ftransform<-`
eval_exp <- function(nam, exp, pe) {
nl <- `names<-`(as.vector(seq_along(nam), "list"), nam)
eval(exp, nl, pe)
}
ftransformv <- function(.data, vars, FUN, ..., apply = TRUE) {
if(!is.list(.data)) stop(".data needs to be a list of equal length columns or a data.frame")
if(!is.function(FUN)) stop("FUN needs to be a function")
clx <- oldClass(.data)
vs <- tryCatch(vars, error = function(e) NULL)
if(apply) {
oldClass(.data) <- NULL
if(is.null(vs)) vs <- eval_exp(names(.data), substitute(vars), parent.frame())
vars <- cols2int(vs, .data, names(.data), FALSE)
value <- `names<-`(.data[vars], NULL)
value <- if(missing(...)) lapply(value, FUN) else
eval(substitute(lapply(value, FUN, ...)), .data, parent.frame())
} else {
nam <- attr(.data, "names")
if(is.null(vs)) vs <- eval_exp(nam, substitute(vars), parent.frame())
vars <- cols2int(vs, .data, nam, FALSE)
value <- .Call(C_subsetCols, .data, vars, FALSE)
value <- if(missing(...)) unclass(FUN(value)) else
unclass(eval(substitute(FUN(value, ...)), .data, parent.frame()))
if(!identical(names(value), nam[vars]))
return(condalc(ftransform_core(.data, value), any(clx == "data.table")))
oldClass(.data) <- NULL
}
le <- vlengths(value, FALSE)
nr <- length(.data[[1L]])
if(allv(le, nr)) .data[vars] <- value else if(allv(le, 1L))
.data[vars] <- lapply(value, alloc, nr) else {
if(apply) names(value) <- names(.data)[vars]
.data <- ftransform_core(.data, value)
}
return(condalc(`oldClass<-`(.data, clx), any(clx == "data.table")))
}
tfmv <- ftransformv
settransform <- function(.data, ...)
assign(as.character(substitute(.data)), ftransform(.data, ...), envir = parent.frame())
settfm <- settransform
settransformv <- function(.data, ...)
assign(as.character(substitute(.data)), ftransformv(.data, ...), envir = parent.frame())
settfmv <- settransformv
fcompute_core <- function(.data, e, keep = NULL) {
ax <- attributes(.data)
nam <- ax[["names"]]
if(!length(nam) || fanyDuplicated(nam)) stop("All columns of .data have to be uniquely named")
if(length(keep)) {
keep <- cols2int(keep, .data, nam, FALSE)
if(any(m <- match(names(e), nam[keep], nomatch = 0L))) {
temp <- .subset(.data, keep)
pos <- m > 0L
temp[m[pos]] <- e[pos]
e <- c(temp, e[!pos])
} else e <- c(.subset(.data, keep), e)
}
if(inherits(.data, "sf") && !any(names(e) == attr(.data, "sf_column")))
e <- c(e, .subset(.data, attr(.data, "sf_column")))
ax[["names"]] <- names(e)
le <- vlengths(e, FALSE)
nr <- fnrow2(.data)
rl <- le == nr
if(all(rl)) return(condalcSA(e, ax, inherits(.data, "data.table")))
if(any(1L < le & !rl)) stop("Lengths of replacements must be equal to nrow(.data) or 1")
e[!rl] <- lapply(e[!rl], alloc, nr)
return(condalcSA(e, ax, inherits(.data, "data.table")))
}
fcompute <- function(.data, ..., keep = NULL) {
if(!is.list(.data)) stop(".data needs to be a list of equal length columns or a data.frame")
e <- eval(substitute(list(...)), .data, parent.frame())
if(is.null(names(e)) && length(e) == 1L && is.list(e[[1L]])) e <- unclass(e[[1L]])
return(fcompute_core(.data, e, keep))
}
fcomputev <- function(.data, vars, FUN, ..., apply = TRUE, keep = NULL) {
if(!is.list(.data)) stop(".data needs to be a list of equal length columns or a data.frame")
if(!is.function(FUN)) stop("FUN needs to be a function")
vs <- tryCatch(vars, error = function(e) NULL)
nam <- attr(.data, "names")
if(is.null(vs)) vs <- eval_exp(nam, substitute(vars), parent.frame())
vars <- cols2int(vs, .data, nam, FALSE)
if(apply) {
value <- `names<-`(.subset(.data, vars), NULL)
value <- if(missing(...)) lapply(value, FUN) else
eval(substitute(lapply(value, FUN, ...)), .data, parent.frame())
names(value) <- nam[vars]
} else {
value <- .Call(C_subsetCols, .data, vars, FALSE)
value <- if(missing(...)) unclass(FUN(value)) else
unclass(eval(substitute(FUN(value, ...)), .data, parent.frame()))
}
return(fcompute_core(.data, value, keep))
}
fFUN_mutate_add_groups <- function(z) {
if(!is.call(z)) return(z)
cz <- l1orlst(as.character(z[[1L]]))
if(any(cz == .FAST_FUN_MOPS)) {
z$g <- quote(.g_)
if(any(cz == .FAST_STAT_FUN_POLD) && is.null(z$TRA)) z$TRA <- 1L
}
if(length(z) > 2L || is.call(z[[2L]])) return(as.call(lapply(z, fFUN_mutate_add_groups)))
z
}
gsplit_single_apply <- function(x, g, ex, v, encl)
copyMostAttributes(unlist(lapply(gsplit(x, g), function(i) eval(ex, `names<-`(list(i), v), encl)), FALSE, FALSE), x)
gsplit_multi_apply <- function(x, g, ex, encl) {
sx <- seq_along(x)
unlist(lapply(gsplit(NULL, g),
function(i) eval(ex, .Call(C_subsetDT, x, i, sx, FALSE), encl)), FALSE, FALSE)
}
othFUN_compute <- function(x) {
if(length(x) == 2L)
return(substitute(unlist(lapply(.gsplit_(a, .g_), b), FALSE, FALSE),
list(a = x[[2L]], b = x[[1L]])))
lapply_call <- as.call(c(list(quote(lapply), substitute(.gsplit_(a, .g_), list(a = x[[2L]]))), as.list(x[-2L])))
substitute(unlist(a, FALSE, FALSE),
list(a = lapply_call, b = x[[2L]]))
}
keep_v <- function(d, v) copyMostAttributes(null_rm(.subset(d, unique.default(v))), d)
acr_get_cols <- function(.cols, d, nam, ce) {
if(is.null(.cols)) return(if(is.null(d[[".g_"]])) seq_along(nam) else seq_along(nam)[nam %!in% c(".g_", ".gsplit_", d[[".g_"]]$group.vars)])
nl <- `names<-`(as.vector(seq_along(nam), "list"), nam)
cols <- eval(.cols, nl, ce)
if(is.null(cols)) return(if(is.null(d[[".g_"]])) seq_along(nam) else seq_along(nam)[nam %!in% c(".g_", ".gsplit_", d[[".g_"]]$group.vars)])
return(cols2int(cols, d, nam))
}
acr_get_funs <- function(.fnsexp, .fns, ce) {
if(is.function(.fns)) {
namfun <- l1orlst(as.character(.fnsexp))
.fns <- `names<-`(list(.fns), namfun)
} else if(is.list(.fns)) {
namfun <- names(.fns)
if(is.call(.fnsexp) && (.fnsexp[[1L]] == quote(list) || .fnsexp[[1L]] == quote(c))) {
nf <- all.vars(.fnsexp, unique = FALSE)
if(length(nf) == length(.fns)) {
names(.fns) <- nf
if(is.null(namfun)) namfun <- nf
} else {
nf <- vapply(.fnsexp[-1L], function(x) l1orlst(all.vars(x)), character(1L), USE.NAMES = FALSE)
names(.fns) <- nf
if(is.null(namfun)) namfun <- as.character(seq_along(.fns))
}
} else if(is.null(namfun)) names(.fns) <- namfun <- as.character(seq_along(.fns))
} else if(is.character(.fns)) {
namfun <- names(.fns)
names(.fns) <- .fns
.fns <- lapply(.fns, get, mode = "function", envir = ce)
if(is.null(namfun)) namfun <- names(.fns)
} else stop(".fns must be a fucntion, list of functions or character vector of function names")
return(list(namfun = namfun, funs = .fns))
}
fungroup2 <- function(X, ocl) {
attr(X, "groups") <- NULL
oldClass(X) <- fsetdiff(ocl, c("GRP_df", "grouped_df"))
X
}
setup_across <- function(.cols, .fnsexp, .fns, .names, .apply, .transpose, .FFUN) {
pe <- parent.frame(n = 4L)
d <- unclass(pe$.data)
ce <- parent.frame(n = 5L)
nam <- names(d)
cols <- acr_get_cols(.cols, d, nam, ce)
funs <- acr_get_funs(.fnsexp, .fns, ce)
namfun <- funs$namfun
fun <- funs$funs
if(length(.names) && !is.logical(.names)) {
if(is.function(.names)) names <- .names(nam[cols], namfun)
else {
if(length(.names) != length(namfun) * length(cols)) stop("length(.names) must match length(.fns) * length(.cols)")
names <- .names
}
} else {
names <- if((is.null(.names) && length(namfun) == 1L) || (isFALSE(.names) && length(namfun) > 1L)) NULL else if(isFALSE(.names))
nam[cols] else if(isFALSE(.transpose))
as.vector(outer(nam[cols], namfun, paste, sep = "_")) else
as.vector(t(outer(nam[cols], namfun, paste, sep = "_")))
}
if(is.logical(.apply)) {
aplvec <- if(.apply) rep_len(TRUE, length(fun)) else rep_len(FALSE, length(fun))
} else {
.apply <- switch(.apply, auto = NA, stop(".apply must be 'auto', TRUE or FALSE"))
aplvec <- names(fun) %!in% .FFUN
}
.data_ <- if(all(aplvec)) d[cols] else .Call(C_subsetCols, if(is.null(d[[".g_"]])) `oldClass<-`(d, pe$cld) else fungroup2(d, pe$cld), cols, FALSE)
list(data = d,
.data_ = .data_,
funs = fun,
aplvec = aplvec,
ce = ce,
names = names)
}
across <- function(.cols = NULL, .fns, ..., .names = NULL, .apply = "auto", .transpose = "auto") {
stop("across() can only work inside fmutate() and fsummarise()")
}
do_across <- function(.cols = NULL, .fns, ..., .names = NULL, .apply = "auto", .transpose = "auto", .eval_funi, .summ = TRUE) {
setup <- setup_across(substitute(.cols), substitute(.fns), .fns, .names, .apply, .transpose, .FAST_FUN_MOPS)
seqf <- seq_along(setup$funs)
names <- setup$names
if(length(seqf) == 1L) {
res <- .eval_funi(seqf, setup[[1L]], setup[[2L]], setup[[3L]], setup[[4L]], setup[[5L]], ...)
} else {
r <- lapply(seqf, .eval_funi, setup[[1L]], setup[[2L]], setup[[3L]], setup[[4L]], setup[[5L]], ...)
if(isFALSE(.transpose) || (is.character(.transpose) && !all_eq(vlengths(r, FALSE)))) {
res <- unlist(r, FALSE, use.names = TRUE)
} else {
res <- unlist(t_list2(r), FALSE, FALSE)
if(is.null(names(res)) && is.null(names))
names(res) <- unlist(t_list2(lapply(r, names)), FALSE, FALSE)
}
}
if(.summ) return(if(is.null(names)) res else `names<-`(res, names))
return(`[<-`(setup$data, if(is.null(names)) names(res) else names, value = res))
}
mutate_funi_simple <- function(i, data, .data_, funs, aplvec, ce, ...) {
.FUN_ <- funs[[i]]
nami <- names(funs)[i]
if(aplvec[i]) {
value <- if(missing(...)) lapply(unattrib(.data_), .FUN_) else
do.call(lapply, c(list(unattrib(.data_), .FUN_), eval(substitute(list(...)), data, ce)), envir = ce)
names(value) <- names(.data_)
} else if(any(nami == .FAST_STAT_FUN_POLD)) {
if(missing(...)) return(unclass(.FUN_(.data_, TRA = 1L)))
fcal <- as.call(c(list(as.name(nami), quote(.data_)), as.list(substitute(list(...))[-1L])))
if(is.null(fcal$TRA)) fcal$TRA <- 1L
return(unclass(eval(fcal, c(list(.data_ = .data_), data), ce)))
} else {
value <- if(missing(...)) .FUN_(.data_) else
do.call(.FUN_, c(list(.data_), eval(substitute(list(...)), data, ce)), envir = ce)
oldClass(value) <- NULL
if(any(nami == .FAST_FUN_MOPS)) return(value)
}
lv <- vlengths(value, FALSE)
nr <- length(data[[1L]])
if(all(lv == nr)) return(value)
if(all(lv == 1L)) return(lapply(value, alloc, nr))
stop("Without groups, NROW(value) must either be 1 or nrow(.data)")
}
dots_apply_grouped <- function(d, g, f, dots) {
attributes(d) <- NULL
n <- length(d[[1L]])
if(any(ln <- vlengths(dots, FALSE) == n)) {
ln <- which(ln)
if(length(ln) > 1L) {
asl <- lapply(dots[ln], gsplit, g)
if(length(dots) > length(ln)) {
mord <- dots[-ln]
FUN <- function(x) do.call(mapply, c(list(f, gsplit(x, g), SIMPLIFY = FALSE, USE.NAMES = FALSE, MoreArgs = mord), asl))
} else
FUN <- function(x) do.call(mapply, c(list(f, gsplit(x, g), SIMPLIFY = FALSE, USE.NAMES = FALSE), asl))
} else {
nam <- names(dots)
as <- gsplit(dots[[ln]], g)
FUN <- quote(function(x) mapply(f, gsplit(x, g), SIMPLIFY = FALSE, USE.NAMES = FALSE))
FUN[[3L]][[if(length(nam) && nzchar(nam[ln])) nam[ln] else 6L]] <- quote(as)
if(length(dots) > 1L) {
mord <- dots[-ln]
FUN[[3L]]$MoreArgs <- quote(mord)
}
FUN <- eval(FUN)
}
return(lapply(d, function(y) copyMostAttributes(unlist(FUN(y), FALSE, FALSE), y)))
}
do.call(lapply, c(list(d, copysplaplfun, g, f), dots))
}
dots_apply_grouped_bulk <- function(d, g, f, dots) {
n <- fnrow2(d)
dsp <- rsplit.data.frame(d, g, simplify = FALSE, flatten = TRUE, use.names = FALSE)
if(is.null(dots)) return(lapply(dsp, f))
if(any(ln <- vlengths(dots, FALSE) == n)) {
ln <- which(ln)
if(length(ln) > 1L) {
asl <- lapply(dots[ln], gsplit, g)
return(do.call(mapply, c(list(f, dsp, SIMPLIFY = FALSE, USE.NAMES = FALSE,
MoreArgs = if(length(dots) > length(ln)) dots[-ln] else NULL), asl)))
} else {
nam <- names(dots)
as <- gsplit(dots[[ln]], g)
FUN <- quote(mapply(f, dsp, SIMPLIFY = FALSE, USE.NAMES = FALSE))
FUN[[if(length(nam) && nzchar(nam[ln])) nam[ln] else 6L]] <- quote(as)
if(length(dots) > 1L) {
mord <- dots[-ln]
FUN$MoreArgs <- quote(mord)
}
return(eval(FUN))
}
}
do.call(lapply, c(list(dsp, f), dots))
}
mutate_funi_grouped <- function(i, data, .data_, funs, aplvec, ce, ...) {
g <- data[[".g_"]]
.FUN_ <- funs[[i]]
nami <- names(funs)[i]
apli <- aplvec[i]
if(apli) {
value <- if(missing(...)) lapply(unattrib(.data_), copysplaplfun, g, .FUN_) else
dots_apply_grouped(.data_, g, .FUN_, eval(substitute(list(...)), data, ce))
} else if(any(nami == .FAST_STAT_FUN_POLD)) {
if(missing(...)) return(unclass(.FUN_(.data_, g = g, TRA = 1L)))
fcal <- as.call(c(list(as.name(nami), quote(.data_), g = quote(.g_)), as.list(substitute(list(...))[-1L])))
if(is.null(fcal$TRA)) fcal$TRA <- 1L
return(unclass(eval(fcal, c(list(.data_ = .data_), data), ce)))
} else if(any(nami == .FAST_FUN_MOPS)) {
if(any(nami == .OPERATOR_FUN)) {
value <- if(missing(...)) .FUN_(.data_, by = g) else
do.call(.FUN_, c(list(.data_, by = g), eval(substitute(list(...)), data, ce)), envir = ce)
} else {
value <- if(missing(...)) .FUN_(.data_, g = g) else
do.call(.FUN_, c(list(.data_, g = g), eval(substitute(list(...)), data, ce)), envir = ce)
}
oldClass(value) <- NULL
return(value)
} else {
value <- dots_apply_grouped_bulk(.data_, g, .FUN_, if(missing(...)) NULL else eval(substitute(list(...)), data, ce))
value <- .Call(C_rbindlist, unclass(value), FALSE, FALSE, NULL)
oldClass(value) <- NULL
}
lv <- vlengths(value, FALSE)
nr <- length(data[[1L]])
if(all(lv == nr)) {
if(!isTRUE(g$ordered[2L])) value <- lapply(value, greorder, g)
if(apli) names(value) <- names(.data_)
return(value)
}
if(!all(lv == g[[1L]])) stop("With groups, NROW(value) must either be ng or nrow(.data)")
if(apli) names(value) <- names(.data_)
return(.Call(C_subsetDT, value, g[[2L]], seq_along(value), FALSE))
}
do_grouped_expr <- function(ei, eiv, .data, g, pe) {
v <- all.vars(ei, unique = FALSE)
if(length(v) > 1L) {
namd <- names(.data)
if(length(wv <- na_rm(match(v, namd))) > 1L) return(gsplit_multi_apply(.data[wv], g, ei, pe))
return(gsplit_single_apply(.data[[wv]], g, ei, namd[wv], pe))
}
if(length(eiv) == 2L) return(copyMostAttributes(eval(othFUN_compute(ei), .data, pe), .data[[v]]))
gsplit_single_apply(.data[[v]], g, ei, v, pe)
}
fmutate <- function(.data, ..., .keep = "all") {
if(!is.list(.data)) stop(".data needs to be a list of equal length columns or a data.frame")
e <- substitute(list(...))
nam <- names(e)
nullnam <- is.null(nam)
pe <- parent.frame()
cld <- oldClass(.data)
oldClass(.data) <- NULL
nr <- length(.data[[1L]])
namdata <- names(.data)
if(is.null(namdata) || fanyDuplicated(namdata)) stop("All columns of .data have to be uniquely named")
if(!is.character(.keep)) .keep <- cols2char(.keep, .data, namdata)
gdfl <- any(cld == "grouped_df")
if(gdfl) {
g <- GRP.grouped_df(.data, return.groups = FALSE, call = FALSE)
.data[[".g_"]] <- g
.data[[".gsplit_"]] <- gsplit
for(i in 2:length(e)) {
ei <- e[[i]]
if(nullnam || nam[i] == "") {
if(ei[[1L]] != quote(across) && ei[[1L]] != quote(acr)) stop("expressions need to be named or start with across(), or its shorthand acr().")
ei[[1L]] <- quote(do_across)
ei$.eval_funi <- quote(mutate_funi_grouped)
ei$.summ <- FALSE
.data <- eval(ei, list(do_across = do_across, mutate_funi_grouped = mutate_funi_grouped), pe)
} else {
if(is.null(ei)) {
.data[[nam[i]]] <- NULL
next
}
eiv <- all.names(ei)
if(any(eiv %in% .FAST_FUN_MOPS)) {
.data[[nam[i]]] <- eval(fFUN_mutate_add_groups(ei), .data, pe)
} else {
r <- do_grouped_expr(ei, eiv, .data, g, pe)
.data[[nam[i]]] <- if(length(r) == g[[1L]])
.Call(C_subsetVector, r, g[[2L]], FALSE) else
greorder(r, g)
}
}
}
.data[c(".g_", ".gsplit_")] <- NULL
} else {
for(i in 2:length(e)) {
ei <- e[[i]]
if(nullnam || nam[i] == "") {
if(ei[[1L]] != quote(across) && ei[[1L]] != quote(acr)) stop("expressions need to be named or start with across(), or its shorthand acr().")
ei[[1L]] <- quote(do_across)
ei$.eval_funi <- quote(mutate_funi_simple)
ei$.summ <- FALSE
.data <- eval(ei, list(do_across = do_across, mutate_funi_simple = mutate_funi_simple), pe)
} else {
r <- eval(ei, .data, pe)
if(!is.null(r)) {
if(length(r) == 1L) r <- alloc(r, nr)
else if(length(r) != nr) stop("length mismatch")
}
.data[[nam[i]]] <- r
}
}
}
.data <- if(length(.keep) > 1L) keep_v(.data, c(.keep, nam[-1L])) else
switch(.keep,
all = .data,
used = keep_v(.data, c(namdata[namdata %in% c(if(gdfl) g$group.vars, unlist(lapply(e[-1L], all.vars), FALSE, FALSE), nam[-1L])], nam[-1L])),
unused = keep_v(.data, c(namdata[namdata %in% c(if(gdfl) g$group.vars, fsetdiff(namdata, unlist(lapply(e[-1L], all.vars), FALSE, FALSE)), nam[-1L])], nam[-1L])),
none = keep_v(.data, c(if(gdfl) g$group.vars, nam[-1L])),
keep_v(.data, c(.keep, nam[-1L])))
oldClass(.data) <- cld
return(condalc(.data, any(cld == "data.table")))
}
mtt <- fmutate |
active.extension <- function(model, up=130, down=30) {
up <- up/100
down <- down/100
model$sum.long <- up
model$sum.short <- down
model <- lower.bound(model, -down)
model <- upper.bound(model, up)
model$active.extension <- TRUE
return(model)
} |
viscosity <- function (S = 35, t = 25, P = 1.013253) {
if (any (S < 0))
stop ("Salinity should be >= 0")
1.7910 - t*(6.144e-02 - t*(1.4510e-03 - t*1.6826e-05)) +
- 1.5290e-04*P + 8.3885e-08*P*P + 2.4727e-03*S +
+ (6.0574e-06*P - 2.6760e-09*P*P)*t + (t*(4.8429e-05 +
- t*(4.7172e-06 - t*7.5986e-08)))*S
} |
legitWPX<-function(twpx, quiet=TRUE)
{
if(is.null(twpx))
{
return(0)
}
tval = all(is.na(twpx$name))
if(tval)
{
if(!quiet) cat("ERROR WPX (no picks)", sep="\n")
return(0)
}
else
{
return(1)
}
} |
byFasttimeY_____call <- quote(fasttime::fastPOSIXct(sprintf("%04d-01-01" , year(.dateTime) ), tz = .helpers[["timezone"]]))
byFasttimeYQ____call <- quote(fasttime::fastPOSIXct(sprintf("%04d-%02d-01" , year(.dateTime), quarter(.dateTime) * 3L - 2L ), tz = .helpers[["timezone"]]))
byFasttimeYm____call <- quote(fasttime::fastPOSIXct(sprintf("%04d-%02d-01" , year(.dateTime), month(.dateTime) ), tz = .helpers[["timezone"]]))
byFasttimeYmd___call <- quote(fasttime::fastPOSIXct(sprintf("%04d-%02d-%02d" , year(.dateTime), month(.dateTime), mday(.dateTime) ), tz = .helpers[["timezone"]]))
byFasttimeYmdH__call <- quote(fasttime::fastPOSIXct(sprintf("%04d-%02d-%02d %02d:00:00" , year(.dateTime), month(.dateTime), mday(.dateTime), hour(.dateTime) ), tz = .helpers[["timezone"]]))
byFasttimeYmdHM_call <- quote(fasttime::fastPOSIXct(sprintf("%04d-%02d-%02d %02d:%02d:00" , year(.dateTime), month(.dateTime), mday(.dateTime), hour(.dateTime), minute(.dateTime) ), tz = .helpers[["timezone"]]))
byFasttimeYmdHMScall <- quote(fasttime::fastPOSIXct(sprintf("%04d-%02d-%02d %02d:%02d:%02d", year(.dateTime), month(.dateTime), mday(.dateTime), hour(.dateTime), minute(.dateTime), second(.dateTime)), tz = .helpers[["timezone"]]))
byFasttime______call <- quote(fasttime::fastPOSIXct( rep("2199-01-01" , length(.dateTime) ), tz = .helpers[["timezone"]]))
byFasttime_Q____call <- quote(fasttime::fastPOSIXct(sprintf("2199-%02d-01" , quarter(.dateTime) * 3L - 2L), tz = .helpers[["timezone"]]))
byFasttime_m____call <- quote(fasttime::fastPOSIXct(sprintf("2199-%02d-01" , month(.dateTime) ), tz = .helpers[["timezone"]]))
byFasttime___H__call <- quote(fasttime::fastPOSIXct(sprintf("2199-01-01 %02d:00:00", hour(.dateTime) ), tz = .helpers[["timezone"]]))
byFasttime____M_call <- quote(fasttime::fastPOSIXct(sprintf("2199-01-01 00:%02d:00", minute(.dateTime) ), tz = .helpers[["timezone"]]))
byFasttime_____Scall <- quote(fasttime::fastPOSIXct(sprintf("2199-01-01 00:00:%02d", second(.dateTime) ), tz = .helpers[["timezone"]]))
byY_____call <- quote(as.POSIXct(sprintf("%04d-01-01" , year(.dateTime) ), tz = .helpers[["timezone"]]))
byYQ____call <- quote(as.POSIXct(sprintf("%04d-%02d-01", year(.dateTime), quarter(.dateTime) * 3L - 2L), tz = .helpers[["timezone"]]))
byYm____call <- quote(as.POSIXct(sprintf("%04d-%02d-01", year(.dateTime), month(.dateTime) ), tz = .helpers[["timezone"]]))
byYmd___call <- quote(as.POSIXct( trunc(.dateTime , units = "days" ), tz = .helpers[["timezone"]]))
byYmdH__call <- quote(as.POSIXct( trunc(.dateTime , units = "hours" ), tz = .helpers[["timezone"]]))
byYmdHM_call <- quote(as.POSIXct( trunc(.dateTime , units = "mins" ), tz = .helpers[["timezone"]]))
byYmdHMScall <- quote(as.POSIXct( trunc(.dateTime , units = "secs" ), tz = .helpers[["timezone"]]))
by______call <- quote(as.POSIXct( rep("2199-01-01" , length(.dateTime) ), tz = .helpers[["timezone"]]))
by_Q____call <- quote(as.POSIXct(sprintf("2199-%02d-01" , quarter(.dateTime) * 3L - 2L), tz = .helpers[["timezone"]]))
by_m____call <- quote(as.POSIXct(sprintf("2199-%02d-01" , month(.dateTime) ), tz = .helpers[["timezone"]]))
by___H__call <- quote(as.POSIXct(sprintf("2199-01-01 %02d:00:00", hour(.dateTime) ), tz = .helpers[["timezone"]]))
by____M_call <- quote(as.POSIXct(sprintf("2199-01-01 00:%02d:00", minute(.dateTime) ), tz = .helpers[["timezone"]]))
by_____Scall <- quote(as.POSIXct(sprintf("2199-01-01 00:00:%02d", second(.dateTime) ), tz = .helpers[["timezone"]]))
byFasttimeMultY_____call <- quote(fasttime::fastPOSIXct(sprintf("%04d-01-01" , year(.dateTime) %/% .helpers[["multiplier"]] * .helpers[["multiplier"]] ), tz = .helpers[["timezone"]]))
byFasttimeMultYm____call <- quote(fasttime::fastPOSIXct(sprintf("%04d-%02d-01" , year(.dateTime), (month(.dateTime) - 1L) %/% .helpers[["multiplier"]] * .helpers[["multiplier"]] + 1L), tz = .helpers[["timezone"]]))
byFasttimeMultYmdH__call <- quote(fasttime::fastPOSIXct(sprintf("%04d-%02d-%02d %02d:00:00" , year(.dateTime), month(.dateTime), mday(.dateTime), hour(.dateTime) %/% .helpers[["multiplier"]] * .helpers[["multiplier"]] ), tz = .helpers[["timezone"]]))
byFasttimeMultYmdHM_call <- quote(fasttime::fastPOSIXct(sprintf("%04d-%02d-%02d %02d:%02d:00" , year(.dateTime), month(.dateTime), mday(.dateTime), hour(.dateTime), minute(.dateTime) %/% .helpers[["multiplier"]] * .helpers[["multiplier"]] ), tz = .helpers[["timezone"]]))
byFasttimeMultYmdHMScall <- quote(fasttime::fastPOSIXct(sprintf("%04d-%02d-%02d %02d:%02d:%02d", year(.dateTime), month(.dateTime), mday(.dateTime), hour(.dateTime), minute(.dateTime), second(.dateTime) %/% .helpers[["multiplier"]] * .helpers[["multiplier"]] ), tz = .helpers[["timezone"]]))
byFasttimeMult_m____call <- quote(fasttime::fastPOSIXct(sprintf("2199-%02d-01" , (month(.dateTime) - 1L) %/% .helpers[["multiplier"]] * .helpers[["multiplier"]] + 1L), tz = .helpers[["timezone"]]))
byFasttimeMult___H__call <- quote(fasttime::fastPOSIXct(sprintf("2199-01-01 %02d:00:00", hour(.dateTime) %/% .helpers[["multiplier"]] * .helpers[["multiplier"]] ), tz = .helpers[["timezone"]]))
byFasttimeMult____M_call <- quote(fasttime::fastPOSIXct(sprintf("2199-01-01 00:%02d:00", minute(.dateTime) %/% .helpers[["multiplier"]] * .helpers[["multiplier"]] ), tz = .helpers[["timezone"]]))
byFasttimeMult_____Scall <- quote(fasttime::fastPOSIXct(sprintf("2199-01-01 00:00:%02d", second(.dateTime) %/% .helpers[["multiplier"]] * .helpers[["multiplier"]] ), tz = .helpers[["timezone"]]))
byMultY_____call <- quote(as.POSIXct(sprintf("%04d-01-01" , year(.dateTime) %/% .helpers[["multiplier"]] * .helpers[["multiplier"]] ), tz = .helpers[["timezone"]] ))
byMultYm____call <- quote(as.POSIXct(sprintf("%04d-%02d-01" , year(.dateTime), (month(.dateTime) - 1L) %/% .helpers[["multiplier"]] * .helpers[["multiplier"]] + 1L), tz = .helpers[["timezone"]] ))
byMultYmdH__call <- quote(as.POSIXct(sprintf("%04d-%02d-%02d %02d:00:00" , year(.dateTime), month(.dateTime), mday(.dateTime), hour(.dateTime) %/% .helpers[["multiplier"]] * .helpers[["multiplier"]] ), tz = .helpers[["timezone"]] ))
byMultYmdHM_call <- quote(as.POSIXct(sprintf("%04d-%02d-%02d %02d:%02d:00 %s" , year(.dateTime), month(.dateTime), mday(.dateTime), hour(.dateTime), minute(.dateTime) %/% .helpers[["multiplier"]] * .helpers[["multiplier"]], format(.dateTime, "%z", tz = .helpers[["timezone"]])), tz = .helpers[["timezone"]], format = "%Y-%m-%d %H:%M:%S %z"))
byMultYmdHMScall <- quote(as.POSIXct(sprintf("%04d-%02d-%02d %02d:%02d:%02d %s", year(.dateTime), month(.dateTime), mday(.dateTime), hour(.dateTime), minute(.dateTime), second(.dateTime) %/% .helpers[["multiplier"]] * .helpers[["multiplier"]], format(.dateTime, "%z", tz = .helpers[["timezone"]])), tz = .helpers[["timezone"]], format = "%Y-%m-%d %H:%M:%S %z"))
byMult_m____call <- quote(as.POSIXct(sprintf("2199-%02d-01" , (month(.dateTime) - 1L) %/% .helpers[["multiplier"]] * .helpers[["multiplier"]] + 1L), tz = .helpers[["timezone"]]))
byMult___H__call <- quote(as.POSIXct(sprintf("2199-01-01 %02d:00:00", hour(.dateTime) %/% .helpers[["multiplier"]] * .helpers[["multiplier"]] ), tz = .helpers[["timezone"]]))
byMult____M_call <- quote(as.POSIXct(sprintf("2199-01-01 00:%02d:00", minute(.dateTime) %/% .helpers[["multiplier"]] * .helpers[["multiplier"]] ), tz = .helpers[["timezone"]]))
byMult_____Scall <- quote(as.POSIXct(sprintf("2199-01-01 00:00:%02d", second(.dateTime) %/% .helpers[["multiplier"]] * .helpers[["multiplier"]] ), tz = .helpers[["timezone"]]))
to.fakeUTCdateTime <- function(.dateTime, .helpers) {
assertNAstatusPeriodicityOK(
.helpers[["na.status"]],
.helpers[["periodicity"]],
level = "error"
)
from <- .dateTime[1L]
if (as.POSIXlt(from)$isdst) {
from <- from - 3600L
}
.dateTime <- seq(
as.POSIXct(as.character(from, tz = .helpers[["timezone"]]), tz = "UTC"),
by = .helpers[["periodicity"]],
along.with = .dateTime
)
if (grepl("^\\d+ (month|year)(s?)$", .helpers[["periodicity"]]) &&
mday(.dateTime[1L]) > 28L) {
.dateTime <- rollback(.dateTime, .helpers[["periodicity"]])
}
.dateTime
}
NULL
byFasttimeY_____ <- function(.dateTime, .helpers) {
assertFasttimeOK(.dateTime, .helpers)
if (.helpers[["multiplier"]] == 1L) {
eval(byFasttimeY_____call)
} else {
eval(byFasttimeMultY_____call)
}
}
byFasttimeYQ____ <- function(.dateTime, .helpers) {
assertFasttimeOK(.dateTime, .helpers)
eval(byFasttimeYQ____call)
}
byFasttimeYm____ <- function(.dateTime, .helpers) {
assertFasttimeOK(.dateTime, .helpers)
if (.helpers[["multiplier"]] == 1L) {
eval(byFasttimeYm____call)
} else {
eval(byFasttimeMultYm____call)
}
}
byFasttimeYmd___ <- function(.dateTime, .helpers) {
assertFasttimeOK(.dateTime, .helpers)
eval(byFasttimeYmd___call)
}
byFasttimeYmdH__ <- function(.dateTime, .helpers) {
assertFasttimeOK(.dateTime, .helpers)
if (.helpers[["multiplier"]] == 1L) {
eval(byFasttimeYmdH__call)
} else {
eval(byFasttimeMultYmdH__call)
}
}
byFasttimeYmdHM_ <- function(.dateTime, .helpers) {
assertFasttimeOK(.dateTime, .helpers)
if (.helpers[["multiplier"]] == 1L) {
eval(byFasttimeYmdHM_call)
} else {
eval(byFasttimeMultYmdHM_call)
}
}
byFasttimeYmdHMS <- function(.dateTime, .helpers) {
assertFasttimeOK(.dateTime, .helpers)
if (.helpers[["multiplier"]] == 1L) {
eval(byFasttimeYmdHMScall)
} else {
eval(byFasttimeMultYmdHMScall)
}
}
byFasttime______ <- function(.dateTime, .helpers) {
assertFasttimeOK(.dateTime, .helpers)
eval(byFasttime______call)
}
byFasttime_Q____ <- function(.dateTime, .helpers) {
assertFasttimeOK(.dateTime, .helpers)
eval(byFasttime_Q____call)
}
byFasttime_m____ <- function(.dateTime, .helpers) {
assertFasttimeOK(.dateTime, .helpers)
if (.helpers[["multiplier"]] == 1L) {
eval(byFasttime_m____call)
} else {
eval(byFasttimeMult_m____call)
}
}
byFasttime___H__ <- function(.dateTime, .helpers) {
assertFasttimeOK(.dateTime, .helpers)
if (.helpers[["multiplier"]] == 1L) {
eval(byFasttime___H__call)
} else {
eval(byFasttimeMult___H__call)
}
}
byFasttime____M_ <- function(.dateTime, .helpers) {
assertFasttimeOK(.dateTime, .helpers)
if (.helpers[["multiplier"]] == 1L) {
eval(byFasttime____M_call)
} else {
eval(byFasttimeMult____M_call)
}
}
byFasttime_____S <- function(.dateTime, .helpers) {
assertFasttimeOK(.dateTime, .helpers)
if (.helpers[["multiplier"]] == 1L) {
eval(byFasttime_____Scall)
} else {
eval(byFasttimeMult_____Scall)
}
}
byY_____ <- function(.dateTime, .helpers) {
if (.helpers[["ignoreDST"]] && !grepl(
"^(Etc/)?(UCT|UTC)$|^(Etc/)?GMT(\\+|-)?0?$",
.helpers[["timezone"]],
ignore.case = TRUE
)) {
.dateTime <- to.fakeUTCdateTime(.dateTime, .helpers)
}
if (.helpers[["multiplier"]] == 1L) {
eval(byY_____call)
} else {
eval(byMultY_____call)
}
}
byYQ____ <- function(.dateTime, .helpers) {
if (.helpers[["ignoreDST"]] && !grepl(
"^(Etc/)?(UCT|UTC)$|^(Etc/)?GMT(\\+|-)?0?$",
.helpers[["timezone"]],
ignore.case = TRUE
)) {
.dateTime <- to.fakeUTCdateTime(.dateTime, .helpers)
}
eval(byYQ____call)
}
byYm____ <- function(.dateTime, .helpers) {
if (.helpers[["ignoreDST"]] && !grepl(
"^(Etc/)?(UCT|UTC)$|^(Etc/)?GMT(\\+|-)?0?$",
.helpers[["timezone"]],
ignore.case = TRUE
)) {
.dateTime <- to.fakeUTCdateTime(.dateTime, .helpers)
}
if (.helpers[["multiplier"]] == 1L) {
eval(byYm____call)
} else {
eval(byMultYm____call)
}
}
byYmd___ <- function(.dateTime, .helpers) {
if (.helpers[["ignoreDST"]] && !grepl(
"^(Etc/)?(UCT|UTC)$|^(Etc/)?GMT(\\+|-)?0?$",
.helpers[["timezone"]],
ignore.case = TRUE
)) {
.dateTime <- to.fakeUTCdateTime(.dateTime, .helpers)
}
eval(byYmd___call)
}
byYmdH__ <- function(.dateTime, .helpers) {
if (.helpers[["multiplier"]] == 1L) {
eval(byYmdH__call)
} else if (!grepl(
"^(Etc/)?(UCT|UTC)$|^(Etc/)?GMT",
.helpers[["timezone"]],
ignore.case = TRUE
)) {
stop(
'Time zone must be "UTC" or equivalent or any Etc/GMT for this TALF with a multiplier greater than one.',
call. = FALSE
)
} else {
eval(byMultYmdH__call)
}
}
byYmdHM_ <- function(.dateTime, .helpers) {
if (.helpers[["multiplier"]] == 1L) {
eval(byYmdHM_call)
} else {
eval(byMultYmdHM_call)
}
}
byYmdHMS <- function(.dateTime, .helpers) {
if (.helpers[["multiplier"]] == 1L) {
eval(byYmdHMScall)
} else {
eval(byMultYmdHMScall)
}
}
by______ <- function(.dateTime, .helpers) {
eval(by______call)
}
by_Q____ <- function(.dateTime, .helpers) {
if (.helpers[["ignoreDST"]] && !grepl(
"^(Etc/)?(UCT|UTC)$|^(Etc/)?GMT(\\+|-)?0?$",
.helpers[["timezone"]],
ignore.case = TRUE
)) {
.dateTime <- to.fakeUTCdateTime(.dateTime, .helpers)
}
eval(by_Q____call)
}
by_m____ <- function(.dateTime, .helpers) {
if (.helpers[["ignoreDST"]] && !grepl(
"^(Etc/)?(UCT|UTC)$|^(Etc/)?GMT(\\+|-)?0?$",
.helpers[["timezone"]],
ignore.case = TRUE
)) {
.dateTime <- to.fakeUTCdateTime(.dateTime, .helpers)
}
if (.helpers[["multiplier"]] == 1L) {
eval(by_m____call)
} else {
eval(byMult_m____call)
}
}
by___H__ <- function(.dateTime, .helpers) {
if (.helpers[["ignoreDST"]] && !grepl(
"^(Etc/)?(UCT|UTC)$|^(Etc/)?GMT(\\+|-)?0?$",
.helpers[["timezone"]],
ignore.case = TRUE
)) {
.dateTime <- to.fakeUTCdateTime(.dateTime, .helpers)
}
if (.helpers[["multiplier"]] == 1L) {
eval(by___H__call)
} else if (!grepl(
"^(Etc/)?(UCT|UTC)$|^(Etc/)?GMT",
.helpers[["timezone"]],
ignore.case = TRUE
)) {
stop(
'Time zone must be "UTC" or equivalent or any Etc/GMT for this TALF with a multiplier greater than one.',
call. = FALSE
)
} else {
eval(byMult___H__call)
}
}
by____M_ <- function(.dateTime, .helpers) {
if (.helpers[["multiplier"]] == 1L) {
eval(by____M_call)
} else {
eval(byMult____M_call)
}
}
by_____S <- function(.dateTime, .helpers) {
if (.helpers[["multiplier"]] == 1L) {
eval(by_____Scall)
} else {
eval(byMult_____Scall)
}
} |
test_that("list_of columns can be unnested", {
df <- tidytable(x = 1:2, y = list_of(c(a = 1L), c(a = 1L, b = 2L)))
expect_named(unnest_wider.(df, y), c("x", "a", "b"))
df <- tidytable(x = 1:2, y = list_of(c(a = 1L), c(b = 1:2)))
expect_named(unnest_wider.(df, y), c("x", "a", "b1", "b2"))
})
test_that("names_sep creates unique names", {
df <- tidytable(
x = list("a", c("a", "b", "c"))
)
out <- unnest_wider.(df, x, names_sep = "_")
expect_named(out, c("x_1", "x_2", "x_3"))
df <- tidytable(
y = list(c(a = 1), c(b = 2, a = 1))
)
out <- unnest_wider.(df, y, names_sep = "_")
expect_named(out, c("y_a", "y_b"))
expect_equal(out$y_a, c(1, 1))
}) |
library("aroma.affymetrix")
verbose <- Arguments$getVerbose(-8, timestamp=TRUE)
dataSet <- "GSE9890"
chipType <- "HG-U133_Plus_2"
csR <- AffymetrixCelSet$byName(dataSet, chipType=chipType)
res <- doRMA(csR, drop=FALSE, verbose=verbose)
print(res)
ces <- res$ces
print(ces)
plm <- res$plm
pf <- getProbeAffinityFile(plm)
print(pf)
csN <- res$csN
csT <- csN[1]
print(csT)
listOfPriors <- list(probeAffinities=pf)
plmP <- RmaPlm(csT, tags="*,priors", listOfPriors=listOfPriors)
print(plmP)
priorList <- getListOfPriors(plmP)
print(priorList)
priors <- readPriorsByUnits(plmP, units=101:105)
str(priors)
fit(plmP, verbose=verbose)
cesP <- getChipEffectSet(plmP)
print(cesP)
theta <- extractTheta(ces[1], drop=TRUE)
thetaP <- extractTheta(cesP, drop=TRUE)
print(all.equal(thetaP, theta))
rho <- cor(thetaP, theta)
print(rho)
stopifnot(rho > 0.995)
toPNG(getFullName(cesP), tags=c(getNames(cesP), class(plmP)[1], "withAndWithoutPriors"), {
plot(theta, thetaP, xlab="original", ylab="priors", main="RMA chip effect estimates")
abline(a=0, b=1)
}) |
ggpattern_aes <-readr::read_delim(
"aes_name | ggplot_name | aes_type | scale_default
pattern | | discrete | c('stripe', 'crosshatch', 'circle')
pattern_type | | discrete | NULL
pattern_subtype | | discrete | NULL
pattern_angle | | continuous | c(0, 90)
pattern_density | | continuous | c(0, 0.5)
pattern_spacing | | continuous | c(0.01, 0.1)
pattern_xoffset | | continuous | c(0.01, 0.1)
pattern_yoffset | | continuous | c(0.01, 0.1)
pattern_alpha | alpha | continuous | c(0.1, 1)
pattern_linetype | linetype | discrete | NULL
pattern_size | size | continuous | NULL
pattern_shape | shape | continuous | NULL
pattern_colour | colour | discrete | NULL
pattern_fill | fill | discrete | NULL
pattern_fill2 | fill | discrete | NULL
pattern_aspect_ratio | | continuous | c(0.5, 2)
pattern_key_scale_factor | | continuous | c(0.5, 2)
pattern_filename | | discrete | NULL
pattern_filter | | discrete | c('lanczos', 'box', 'spline', 'cubic')
pattern_gravity | | discrete | c('center', 'north', 'south', 'east', 'west', 'northeast', 'northwest', 'southeast', 'southwest')
pattern_scale | | continuous | c(0.5, 2)
pattern_orientation | | discrete | c('horizontal', 'vertical', 'radial')
pattern_phase | | continuous | NULL
pattern_frequency | | continuous | NULL
pattern_grid | | discrete | c('square', 'hex')
pattern_res | | continuous | NULL
pattern_rot | | continuous | c(0, 360)
", trim_ws = TRUE, delim = "|") |
solve_QP <- function( x, control ) {
x <- ROI:::as.no_V_bounds_OP( x )
L <- if( slam::is.simple_triplet_matrix(terms(objective(x))$L) ){
stopifnot( dim(terms(objective(x))$L)[1] == 1L )
as.numeric( as.matrix(terms(objective(x))$L) )
} else {
terms(objective(x))$L
}
if( !length(L) )
L <- double(length(objective(x)))
stopifnot( length(L) == length(objective(x)) )
out <- .quadprog_solve_QP(Q = terms(objective(x))$Q,
L = L,
mat = constraints(x)$L,
dir = constraints(x)$dir,
rhs = constraints(x)$rhs,
max = x$maximum,
control = control)
ROI_plugin_canonicalize_solution( solution = out$sol,
optimum = objective(x)(out$sol),
status = out$ierr,
solver = ROI_plugin_get_solver_name(getPackageName()),
message = out)
}
.quadprog_solve_QP <- function(Q, L, mat, dir, rhs, max, control) {
ind_eq <- which( dir == "==")
ind_geq <- which( (dir == ">=") | (dir == ">") )
ind_leq <- which( (dir == "<=") | (dir == "<") )
meq <- length(ind_eq)
Amat <- as.matrix(mat)
Amat[ ind_leq, ] <- -Amat[ ind_leq, ]
bvec <- rhs
bvec[ ind_leq ] <- -bvec[ ind_leq ]
if( length(ind_eq) ) {
Amat <- rbind( Amat[ ind_eq, ], Amat[ -ind_eq, ] )
bvec <- c( bvec[ ind_eq ], bvec[ -ind_eq ] )
}
Amat <- t(Amat)
Amat[ is.infinite(Amat) & (Amat <= 0) ] <- -.Machine$double.xmax
Amat[ is.infinite(Amat) & (Amat >= 0) ] <- .Machine$double.xmax
bvec[ is.infinite(bvec) & (bvec <= 0) ] <- -.Machine$double.xmax
bvec[ is.infinite(bvec) & (bvec >= 0) ] <- .Machine$double.xmax
dvec <- if( max )
L
else
-L
Dmat <- if( max )
-as.matrix(Q)
else
as.matrix(Q)
factorized <- control$factorized
if( is.null(factorized) )
factorized <- formals(solve.QP)$factorized
out <- tryCatch( solve.QP(Dmat = Dmat, dvec = dvec, Amat = Amat, bvec = bvec, meq = meq, factorized = factorized), error = identity )
out$ierr <- 0L
if( inherits(out, "error") ){
ierr <- if( out$message == "constraints are inconsistent, no solution!" )
1L
else if (out$message == "matrix D in quadratic function is not positive definite!" )
2L
else
3L
out <- list( solution = rep(NA, nrow(Dmat)), ierr = ierr )
}
out$sol <- out$solution
out
}
.add_status_codes <- function(){
solver <- ROI_plugin_get_solver_name( getPackageName() )
ROI_plugin_add_status_code_to_db(solver,
0L,
"OPTIMAL",
"Solution is optimal",
0L
)
ROI_plugin_add_status_code_to_db(solver,
1L,
"INCONSISTENT",
"Constraints are inconsistent, no solution."
)
ROI_plugin_add_status_code_to_db(solver,
2L,
"NOT_POSITIVE_DEFINITE",
"quadratic term in function is not positive definite."
)
ROI_plugin_add_status_code_to_db(solver,
3L,
"ROI_INTERFACE_ERROR",
"contact the plugin maintainer."
)
invisible(TRUE)
} |
store_marshal_value.tar_nonexportable <- function(store, target) {
object <- store_marshal_object(target$store, target$value$object)
target$value <- value_init(object, iteration = target$settings$iteration)
}
store_unmarshal_value.tar_nonexportable <- function(store, target) {
object <- store_unmarshal_object(target$store, target$value$object)
target$value <- value_init(object, iteration = target$settings$iteration)
} |
power.r <- function(n, delta, sig.level = .05){
tc <- qt(p=sig.level/2,df=n-2,lower.tail=FALSE)
rc <- sqrt((tc^2)/((tc^2)+(n-2)))
fisher.z <- function(r){(1/2)*log((1+r)/(1-r))}
Zl <- (-fisher.z(rc)-fisher.z(delta))/(1/sqrt(n-3))
Zu <- (fisher.z(rc)-fisher.z(delta))/(1/sqrt(n-3))
pnorm(Zl,lower.tail=TRUE) + pnorm(Zu,lower.tail=FALSE)
} |
library(ggplot2)
library(scales)
library(reshape2)
load("output/users-data-US.rdata")
load("data/tweet-counts.rdata")
results <- merge(users.tweets, users)
results$q <- cut(results$theta, breaks=seq(-3, 3, 0.05))
counts1 <- aggregate(results$obama, by=list(q=results$q), FUN=sum)
counts1$keyword <- 'Obama'
counts2 <- aggregate(results$romney, by=list(q=results$q), FUN=sum)
counts2$keyword <- 'Romney'
counts <- rbind(counts1, counts2)
counts$theta <- rep(seq(-(3-0.025), 3, 0.05), 2)
p <- ggplot(counts, aes(x=theta, y=x))
pq <- p + geom_bar(stat="identity") +
facet_wrap(~keyword,nrow=1) +
scale_x_continuous("Estimated Ideology", limits=c(-3, 3)) +
scale_y_continuous("Count of Sent Tweets",
breaks=c(0,50000,100000,150000,200000),
labels=c("0", "50K", "100K", "150K", "200K"),
expand=c(0,0)) +
theme_bw()
pq
ggsave(filename="plots/figure6.pdf", plot=pq,
height=3, width=9)
load("data/retweets-data.rdata")
load("output/users-data-US.rdata")
load("output/results-elites-US.rdata")
users <- users[,c("uid", "theta")]
results <- results[,c("screen_name", "phi")]
names(results) <- c("uid", "theta")
users <- rbind(users, results)
names(users)[1] <- 'retweeter_uid'
retweets <- merge(retweets, users)
names(retweets)[4] <- "phat_y"
names(users)[1] <- 'retweeted_uid'
retweets <- merge(retweets, users)
names(retweets)[5] <- "phat_x"
min <- -3
max <- 3
breaks <- 0.10
expand_data <- function(breaks=0.10, candidate, min=-3, max=3){
x <- retweets$phat_x[retweets$candidate==candidate]
y <- retweets$phat_y[retweets$candidate==candidate]
x <- (round((x - min) / breaks, 0) * breaks) + min
y <- (round((y - min) / breaks, 0) * breaks) + min
tab <- table(x, y)
tab <- melt(tab)
tab$prop <- tab$value/sum(tab$value)
tab$candidate <- candidate
return(tab)
}
obamaxy <- expand_data(breaks=0.20, candidate="Obama")
romneyxy <- expand_data(breaks=0.20, candidate="Romney")
xy <- rbind(obamaxy, romneyxy)
sum(xy$prop[xy$x<0 & xy$y<0 & xy$candidate=="Obama"])
sum(xy$prop[xy$x<0 & xy$y<0 & xy$candidate=="Romney"])
sum(xy$prop[xy$x>0 & xy$y>0 & xy$candidate=="Obama"])
sum(xy$prop[xy$x>0 & xy$y>0 & xy$candidate=="Romney"])
tw <- sum(xy$value[xy$y<(-0.5)])
sum(xy$value[xy$y<(-0.5) & xy$x>(-0.5)])/tw
tw <- sum(xy$value[xy$y>(0.5)])
sum(xy$value[xy$y>(0.5) & xy$x<(0.5)])/tw
p <- ggplot(xy, aes(x, y))
pq <- p + geom_tile(aes(fill=prop), colour="white") + scale_fill_gradient(name="% of Tweets",
low = "white", high = "black", labels=percent_format()) +
labs(x="Estimated Ideology of Retweeter", y="Estimated Ideology of Author") +
scale_y_continuous(expand=c(0,0), breaks=(-2:2), limits=c(-2.5, 2.5)) +
scale_x_continuous(expand=c(0,0), breaks=(-2:2), limits=c(-2.5, 2.5)) +
facet_grid(. ~ candidate) +
theme(panel.border=element_rect(fill=NA), panel.background = element_blank()) +
coord_equal()
pq
ggsave(filename="plots/figure7.pdf", plot=pq,
height=4, width=8) |
library(stabm)
listStabilityMeasures()
feats = list(1:3, 1:4, c(1:3, 5:7))
stabilityJaccard(features = feats)
stabilityNogueira(features = feats, p = 10)
mat = 0.92 ^ abs(outer(1:10, 1:10, "-"))
set.seed(1)
stabilityIntersectionCount(features = feats, sim.mat = mat, N = 1000)
plotFeatures(feats)
library(rpart)
data("BostonHousing2", package = "mlbench")
dataset = subset(BostonHousing2, select = -cmedv)
fit_tree = function(target = "medv", data = dataset, ratio = 0.67, cp = 0.01) {
n = nrow(data)
i = sample(n, n * ratio)
formula = as.formula(paste(target, "~ ."))
model = rpart::rpart(formula = formula, data = data, subset = i,
control = rpart.control(maxsurrogate = 0, cp = cp))
names(model$variable.importance)
}
set.seed(1)
fit_tree()
set.seed(1)
selected_features = replicate(30, fit_tree(), simplify = FALSE)
Reduce(intersect, selected_features)
sort(table(unlist(selected_features)), decreasing = TRUE)
plotFeatures(selected_features)
stabilityJaccard(selected_features)
set.seed(1)
selected_features2 = replicate(30, fit_tree(cp = 0.02), simplify = FALSE)
stabilityJaccard(selected_features2)
plotFeatures(selected_features2)
dataset2 = subset(BostonHousing2, select = -town)
dataset2$chas = as.numeric(dataset2$chas)
set.seed(1)
selected_features3 = replicate(30, fit_tree(target = "rm", data = dataset2, cp = 0.075),
simplify = FALSE)
sim.mat = abs(cor(subset(dataset2, select = -rm)))
sel.feats = unique(unlist(selected_features3))
sim.mat[sel.feats, sel.feats]
plotFeatures(selected_features3, sim.mat = sim.mat)
stabilityIntersectionCount(selected_features3, sim.mat = sim.mat, N = 1000)
no.sim.mat = diag(nrow(sim.mat))
colnames(no.sim.mat) = row.names(no.sim.mat) = colnames(sim.mat)
stabilityIntersectionCount(selected_features3, sim.mat = no.sim.mat)
set.seed(1)
inds = sample(nrow(dataset2), 50)
dataset.cluster = dataset2[inds, ]
km = replicate(30, kmeans(dataset.cluster, centers = 3), simplify = FALSE)
best = which.min(sapply(km, function(x) x$tot.withinss))
best.centers = km[[best]]$centers
km.clusters = lapply(km, function(kmi) {
dst = as.matrix(dist(rbind(best.centers, kmi$centers)))[4:6, 1:3]
rownames(dst) = colnames(dst) = 1:3
new.cluster.names = numeric(3)
while(nrow(dst) > 0) {
min.dst = which.min(dst)
row = (min.dst - 1) %% nrow(dst) + 1
row.o = as.numeric(rownames(dst)[row])
col = ceiling(min.dst / nrow(dst))
col.o = as.numeric(colnames(dst)[col])
new.cluster.names[row.o] = col.o
dst = dst[-row, -col, drop = FALSE]
}
new.cluster.names[kmi$cluster]
})
clusters = lapply(1:3, function(i) {
lapply(km.clusters, function(kmc) {
which(kmc == i)
})
})
stab.cl = sapply(clusters, stabilityJaccard)
stab.cl
w = sapply(clusters, function(cl) {
mean(lengths(cl))
})
sum(stab.cl * w) / sum(w) |
context("SMC complete rankings: sequence")
set.seed(994)
data = sushi_rankings[1:100,]
n_items <- dim(sushi_rankings)[2]
leap_size = floor(n_items/5)
metric = "footrule"
alpha_vector <- seq(from = 0, to = 15, by = 1)
iter = 1e2
degree <- 10
logz_estimate <- estimate_partition_function(method = "importance_sampling",
alpha_vector = alpha_vector,
n_items = n_items, metric = metric,
nmc = iter, degree = degree)
nmc = 20
burnin=5
model_fit <- compute_mallows(rankings = data, nmc = nmc, metric = metric, leap_size =leap_size,
alpha_prop_sd = 0.15, logz_estimate = logz_estimate)
model_fit$burnin = burnin
alpha_samples_table = data.frame(iteration = 1:nmc , value = model_fit$alpha$value)
alpha_samples_table = alpha_samples_table[(burnin+1):nmc,]
rho_0 = c(4,5,2,6,8,3,9,1,7,10)
alpha_0 = 1.7
mcmc_rho_matrix = matrix(model_fit$rho$value, ncol = n_items, nrow = nmc, byrow=TRUE)
mcmc_times = 5
num_new_obs = 10
Time = dim(data)[1]/num_new_obs
N = 100
test <- smc_mallows_new_users_complete(
R_obs = data, n_items = n_items, metric = metric,
leap_size = leap_size, N = N, Time = Time,
logz_estimate = logz_estimate, mcmc_kernel_app = mcmc_times,
alpha_prop_sd = 0.1, lambda = 0.001, alpha_max = 1e6,
num_new_obs = num_new_obs, verbose = FALSE
)
test_that("Output of smc_mallows_new_users_complete is OK", {
expect_s3_class(test, "SMCMallows")
expect_length(test, 2)
expect_named(test, c("rho_samples", "alpha_samples"))
expect_equal(dim(test$rho_samples), c(100, 10, 111))
expect_equal(dim(test$alpha_samples), c(100, 111))
})
rho_temp <- compute_posterior_intervals_rho(
output = test$rho_samples[,,Time+1], nmc = N, burnin = 0
)
rho_cp <- compute_rho_consensus(
output = test$rho_samples[,,Time+1], nmc = N, burnin = 0, C = 1, type = "CP"
)
rho_map <- compute_rho_consensus(output = test$rho_samples[,,Time+1], nmc = N, burnin = 0, C = 1, type = "MAP")
test_that("Output of compute_posterior_intervals_rho is OK", {
expect_is(rho_temp, "tbl_df")
expect_length(rho_temp, 7)
expect_named(
rho_temp,
c(
"item", "parameter", "mean", "median", "conf_level", "hpdi",
"central_interval"
)
)
expect_equivalent(sapply(rho_temp, length), rep(10, 7))
})
alpha_samples_table = data.frame(
iteration = 1:N , value = test$alpha_samples[,Time+1]
)
alpha_posterior_intervals = compute_posterior_intervals_alpha(
output = test$alpha_samples[,Time+1], nmc = N, burnin = 0
)
test_that("Output of compute_posterior_intervals_alpha is OK", {
expect_is(alpha_posterior_intervals, "tbl_df")
expect_length(alpha_posterior_intervals, 6)
expect_named(
alpha_posterior_intervals,
c(
"parameter", "mean", "median", "conf_level", "hpdi",
"central_interval"
)
)
expect_equivalent(sapply(alpha_posterior_intervals, length), rep(1, 6))
})
context("SMC complete rankings: breakdown")
test_that("get_mallows_loglik() in smc_mallows_new_users_complete() works", {
data <- sushi_rankings[1:100, ]
n_users <- nrow(data)
n_items <- ncol(sushi_rankings)
Time <- nrow(data) / num_new_obs
num_new_obs <- 10
N <- 100
rho_samples <- array(data=0, dim=c(N, n_items, (n_users + Time + 1)))
for (ii in seq_len(N)){
rho_samples[ii, , 1] <- sample(seq_len(n_items), n_items, replace=FALSE)
}
alpha_samples <- matrix(nrow=N, ncol=(n_items + Time + 1))
alpha_samples[, 1] <- rexp(N, rate=1)
alpha_vector <- seq(from = 0, to = 15, by = 1)
iter <- 3e2
degree <- 10
logz_estimate <- estimate_partition_function(
method="importance_sampling", alpha_vector=alpha_vector,
n_items=n_items, metric=metric, nmc=iter, degree=degree
)
num_obs <- 0
out_loglik <- vector(mode="numeric", length=Time)
for (tt in seq_len(Time)) {
num_obs <- num_obs + num_new_obs
new_observed_rankings <- data[(num_obs - num_new_obs + 1):num_obs, ]
rho_samples[, , tt + 1] <- rho_samples[, , tt]
alpha_samples[, tt + 1] <- alpha_samples[, tt]
alpha_samples_ii <- alpha_samples[ii, tt + 1]
rho_samples_ii <- rho_samples[ii, , tt + 1]
for (ii in seq_len(N)) {
log_z_alpha <- BayesMallows:::get_partition_function(
n_items, alpha_samples_ii, NULL, logz_estimate, metric
)
log_likelihood <- get_mallows_loglik(
alpha_samples_ii, t(rho_samples_ii), n_items,
new_observed_rankings, metric
)
}
out_loglik[tt] <- log_likelihood
}
tolerance <- 0.1
expect_gt(max(out_loglik), mean(out_loglik) * (1 + tolerance))
expect_lt(min(out_loglik), mean(out_loglik) * (1 - tolerance))
}) |
context("retrieveTestCaseDefinitions")
source('pathResolver.R')
sample_folder <- file.path(computeRootPath(), 'code-samples')
tt <- vector('list', 8)
tt[[1]] <- proc.time()
files <- list.files(sample_folder, pattern = glob2rx('*.R'), recursive = TRUE, full.names = TRUE)
files <- files[grep('sample-classes', files, fixed = TRUE, invert = TRUE)]
files <- files[grep('no-instrumentation', files, fixed = TRUE, invert = TRUE)]
files <- files[grep('full-instrumentation', files, fixed = TRUE, invert = TRUE)]
files <- files[grep('tc-defs', files, fixed = TRUE, invert = TRUE)]
files <- files[grep('fun-defs', files, fixed = TRUE, invert = TRUE)]
.sf <- sapply(files, source, encoding = 'UTF-8', keep.source = TRUE, simplify = FALSE)
names(tt) <- c('start', paste0('source n=', length(files)), 'reify', 'verifyObjectName', 'retrieveFunctionReturnTypes',
'verifyType', 'checkResult', 'testthat')
tt[[2]] <- proc.time()
sample_names <- removeFilenameExtension(basename(files))
sample_objects <- sapply(sample_names, function(e) {
eval(parse(text = paste0(e, ifelse(endsWith(e, 'R6'), '$new', ''), '()')))
})
names(sample_objects) <- files
tt[[3]] <- proc.time()
name_compliance <- lapply(sample_objects, verifyObjectNames)
l <- length(name_compliance[[1]])
dcond <- rbindlist(lapply(name_compliance, function(e) e[4:(l - 1)]))
tt[[4]] <- proc.time()
tcd <- lapply(sample_objects, retrieveTestCaseDefinitions)
tt[[5]] <- proc.time()
tcd_b <- unlist(lapply(tcd, function(e) is.data.table(e)))
tt[[6]] <- proc.time()
expectedResult <- function(files_s) {
data.table(file = files_s,
expected_result =
ifelse(grepl('tcd-defs|both-defs', files_s, perl = TRUE),
grepl('good', files_s, perl = TRUE),
FALSE)
)
}
er <- expectedResult(files)
balance <- copy(er)
balance[, `:=`(result = tcd_b)]
balance[, `:=`(status = ifelse(result == expected_result, 'success', 'failure'), fn = basename(file))]
if (nrow(balance[status == 'failure']) > 0) print(balance[status == 'failure'])
tt[[7]] <- proc.time()
test_that("retrieveTestCaseDefinitions", {
mtf <- function(k) {
expect_equal(balance$status[!!k], 'success')
}
lapply(seq_len(nrow(balance)), mtf)
})
tt[[8]] <- proc.time()
if (isAuditable()) {
sapply(seq_len(length(tt) - 1), function(k) cat(names(tt)[k], (tt[[k + 1]] - tt[[k]])[3], '\n'))
}
test_that("retrieveTestCaseDescriptions", {
expect_true(is.character(retrieveTestCaseDescriptions(Addition())))
expect_true(is.data.table(retrieveTestCaseDescriptions(AdditionTCPartial())))
}) |
earl <- function(...,
moPropen,
moMain,
moCont,
data,
response,
txName,
regime,
iter = 0L,
fSet = NULL,
lambdas = 0.5,
cvFolds = 0L,
surrogate = "hinge",
kernel = "linear",
kparam = NULL,
verbose = 2L) {
if (missing(x = moPropen)) moPropen <- NULL
if (is.null(x = moPropen)) stop("moPropen must be provided")
moPropen <- .checkModelObjOrListModelObjSubset(object = moPropen,
nm = 'moPropen')
if (is(object = moPropen, class2 = "ModelObj_SubsetList")) {
if (is.null(x = fSet)) {
stop("if subset structure in moPropen, fSet must be provided.")
}
}
if (missing(x = moMain)) moMain <- NULL
moMain <- .checkModelObjOrListModelObjSubset(object = moMain, nm = 'moMain')
if (is(object = moMain, class2 = "ModelObj_SubsetList")) {
if (is.null(x = fSet)) {
stop("if subset structure in moMain, fSet must be provided.")
}
}
if (missing(x = moCont)) moCont <- NULL
moCont <- .checkModelObjOrListModelObjSubset(object = moCont, nm = 'moCont')
if (is(object = moCont, class2 = "ModelObj_SubsetList")) {
if (is.null(x = fSet)) {
stop("if subset structure in moCont, fSet must be provided.")
}
}
if (is.null(x = moMain) || is.null(x = moCont)) {
iter <- NULL
} else {
if (is(object = moCont, class2 = "ModelObj_SubsetList") &&
!is(object = moMain, class2 = "ModelObj_SubsetList")) {
stop("moMain and moCont must both be ModelObjSubset or both be modelObj")
}
if (is(object = moCont, class2 = "modelObj") &&
!is(object = moMain, class2 = "modelObj")) {
stop("moMain and moCont must both be ModelObjSubset or both be modelObj")
}
}
data <- .verifyDataFrame(data = data)
response <- .verifyVectorResponse(response = response)
data <- .checkTxData(txName = txName, data = data)
regime <- .verifyRegime(regime = regime, fSet = fSet)
if (is.list(x = kernel)) {
if (!is.function(x = fSet)) {
stop("fSet must be a function when using multiple kernels")
}
if (any(is.null(x = names(x = kernel)))) {
stop("if multiple kernels, kernel must be a named list")
}
if (any(nchar(x = names(x = kernel)) == 0L)) {
stop("if multiple kernels, kernel must be a named list")
}
if (is.list(x = kparam)) {
if (!all(names(x = kparam) %in% names(x = kernel)) |
!all(names(x = kernel) %in% names(x = kparam))) {
stop("names of kernel and kparam list elements do not match")
}
}
if (!is.list(x = regime)) {
stop("a list of regimes is required for multiple kernels")
}
if (!all(names(x = regime) %in% names(x = kernel)) |
!all(names(x = kernel) %in% names(x = regime))) {
stop("names of kernel and regime list elements do not match")
}
kernelObj <- list()
cvHold <- NULL
for (i in 1L:length(x = kernel)) {
kname <- names(x = kernel)[i]
cvVerified <- .verifyCV(lambdas = lambdas,
cvFolds = cvFolds,
kparam = kparam[[ kname ]])
if (!is.null(x = cvVerified$cvFolds)) {
cvHold <- cvVerified$cvFolds
}
kernelObj[[ kname ]] <- .newKernelObj(kernel = tolower(kernel[[ i ]]),
data = data,
model = regime[[ kname ]],
kparam = cvVerified$kparam)@kernel
}
kernelObj <- new("SubsetList", kernelObj)
cvVerified$cvFolds <- cvVerified$cvFolds
} else {
cvVerified <- .verifyCV(lambdas = lambdas,
cvFolds = cvFolds,
kparam = kparam)
kernelObj <- .newKernelObj(kernel = tolower(kernel),
data = data,
model = regime,
kparam = cvVerified$kparam)@kernel
}
fSet <- .verifyFSet(fSet = fSet)
surrogate <- .verifySurrogate(surrogate = surrogate)
iter <- .verifyIter(iter = iter)
if (is.logical(x = verbose)) verbose <- verbose*1L
verbose <- as.integer(x = round(x = verbose))
if (verbose > 2L) verbose <- 2L
if (verbose < 0L) verbose <- 0L
result <- .newEARL(moPropen = moPropen,
moMain = moMain,
moCont = moCont,
data = data,
response = response,
txName = txName,
lambdas = cvVerified$lambdas,
cvFolds = cvVerified$cvFolds,
surrogate = surrogate,
iter = iter,
guess = NULL,
kernel = kernelObj,
fSet = fSet,
suppress = verbose, ...)
result@analysis@call <- match.call()
return( result )
} |
isExpensiveExampleOk = function() {
Sys.getenv("R_EXPENSIVE_EXAMPLE_OK") == "TRUE"
} |
assign("idwST.tcv",
function (formula, data, n.neigh, C, factor.p, progress=TRUE)
{
s0 = cbind(coordinates(data),data["t"]@data)
z = extractFormula(formula, data, newdata = data)$z
pred <- as.numeric(NA,length= length(z))
if(progress)
pb <- txtProgressBar(min = 0, max = length(z), char = "=", style = 3)
for (i in 1:length(z)) {
pred[i] <- idwST(formula, data[-i, ], newdata = data[i, ], n.neigh, C, factor.p, progress=F)[,4]
if(progress)
setTxtProgressBar(pb, i)
}
if(progress)
close(pb)
idw.pred <- data.frame(pred,NA,z,NA,NA,NA,s0)
colnames(idw.pred) <- c("var1.pred","var1.var","observed","residual","zscore","fold","x","y","t")
idw.pred[,6] <- 1:length(z)
idw.pred[,3]<- z
idw.pred[,4]<- idw.pred[,3]-idw.pred[,1]
idw.pred
}
) |
dist.point <- function(sp_faxes_coord, ref_sp) {
sp_faxes_coord <- as.data.frame(sp_faxes_coord)
dist_sp <- as.matrix(stats::dist(sp_faxes_coord, method = "euclidean"))
dist_sp[which(rownames(dist_sp) == ref_sp), ]
}
dist.nearneighb <- function(sp_faxes_coord, ref_sp) {
dist_sp <- as.matrix(stats::dist(sp_faxes_coord, method = "euclidean"))
dist_sp[which(dist_sp == 0)] <- NA
nn_id <- dist_sp - apply(dist_sp, 1, min, na.rm = TRUE)
nn_id[which(nn_id != 0)] <- NA
nn_id[which(nn_id == 0)] <- 1
nn_refsp_id <- nn_id[which(rownames(nn_id) == ref_sp), ]
nn_refsp_id <- nn_refsp_id[which(nn_refsp_id == 1)]
nn_refsp_id <- as.data.frame(nn_refsp_id)
nn_id <- rownames(nn_refsp_id)
nn_ref_sp_dist <- dist_sp[which(rownames(dist_sp) ==
ref_sp), which(colnames(dist_sp) %in%
as.vector(nn_id[1]))]
return_list <- list(nn_id, nn_ref_sp_dist)
names(return_list) <- c(
"nearest neighbour identity",
"distance of the reference species to its nearest neighbour")
return(return_list)
}
mst.computation <- function(sp_faxes_coord_k) {
sp_dist_asb_k <- stats::dist(sp_faxes_coord_k, method = "euclidian")
mst_asb_k <- ape::mst(sp_dist_asb_k)
mst_asb_k <- stats::as.dist(mst_asb_k)
return(mst_asb_k)
}
vertices <- function(sp_faxes_coord, order_2D = FALSE, check_input = FALSE) {
if (check_input) {
if (is.null(rownames(sp_faxes_coord))) {
stop("No row names provided in species*coordinates matrix. Please add ",
"species names as row names.")
}
if (any(is.na(sp_faxes_coord))) {
stop("The species x coordinates matrix must not contain NA. Please ",
"check.")
}
if (nrow(sp_faxes_coord) <= ncol(sp_faxes_coord)) {
stop("Number of species should be strictly higher than number of axes ",
"for computing the convex hull.")
}
if (order_2D && ncol(sp_faxes_coord) != 2) {
stop("Ordering vertices could be done only if there are 2 axes.")
}
}
conv_Fx <- tryCatch(
geometry::convhulln(sp_faxes_coord, options = "Fx"),
error = function(err) "NA")
if (!is.character(unlist(conv_Fx))) {
vert_nm <- row.names(sp_faxes_coord)[sort(unique(as.vector((conv_Fx))))]
if (order_2D && ncol(sp_faxes_coord) == 2) {
vert_nm <- vert_nm[order(-1 * atan2(sp_faxes_coord[vert_nm,
1] -
mean(range(sp_faxes_coord[vert_nm,
1])),
sp_faxes_coord[vert_nm, 2] -
mean(range(sp_faxes_coord[vert_nm,
2]))))]
}
}
if (is.character(unlist(conv_Fx))) vert_nm <- NA
return(vert_nm)
} |
meteo2STFDF <- function(obs,
stations,
obs.staid.time=c(1,2),
stations.staid.lon.lat=c(1,2,3),
crs=CRS(as.character(NA)),
delta=NULL
) {
ids <- unique(stations[,obs.staid.time[1]])
time <- unique(obs[,obs.staid.time[2] ])
time <- as.POSIXlt(sort(time))
nt <- length(time)
ns <- length(ids)
tempdf <- data.frame(rep(ids,nt),rep(time,each=ns) )
names(tempdf) <- names(obs)[obs.staid.time]
data <- join(tempdf,obs)
data <- data[ order( data[,obs.staid.time[1] ]), ]
data <- data[ order( data[,obs.staid.time[2] ]), ]
row.names(data) <- 1:length(data[,1])
ids <- data.frame(staid=ids)
names(ids) <- names(stations) [ stations.staid.lon.lat[1] ]
st <- join( ids, stations)
names(st)[ stations.staid.lon.lat[2:3] ] <- c('lon', 'lat')
coordinates(st) <-~ lon +lat
st@proj4string <- crs
data <- as.data.frame(data[,-obs.staid.time] )
names(data)= names(obs)[-obs.staid.time]
if (is.null(delta) && length(time)==1){
endTime <- time + 86400
stfdf <-STFDF(st, time , data, endTime)
} else if (is.null(delta) && length(time)!=1) {
stfdf <-STFDF(st, time , data)
} else {
endTime <- time + delta
stfdf <-STFDF(st, time , data, endTime)
}
bools2 <- apply(matrix(stfdf@data[,1],
nrow=length(stfdf@sp),byrow=F), MARGIN=1,
FUN=function(x) sum(is.na(x)))
stfdf1=stfdf[bools2!=nt,drop=F]
row.names(stfdf@sp) <- 1:nrow(stfdf@sp)
return(stfdf)
} |
Dim.default <- function(object, ...) dim(object)
getCovariate.data.frame <-
function(object, form = formula(object), data)
{
if (!(inherits(form, "formula"))) {
stop("'form' must be a formula")
}
aux <- getCovariateFormula(form)
if (length(all.vars(aux)) > 0) {
eval(aux[[2]], object)
} else {
rep(1, dim(object)[1])
}
}
getData.nls <- function(object)
{
mCall <- object$call
data <- eval(if("data" %in% names(object)) object$data else mCall$data)
if (is.null(data)) return(data)
naAct <- object[["na.action"]]
if (!is.null(naAct)) {
data <- if (inherits(naAct, "omit")) data[-naAct, ]
else if (inherits(naAct, "exclude")) data
else eval(mCall$na.action)(data)
}
subset <- mCall$subset
if (!is.null(subset)) {
subset <- eval(asOneSidedFormula(subset)[[2]], data)
data <- data[subset, ]
}
data
}
getGroups.data.frame <-
function(object, form = formula(object), level, data, sep = "/")
{
if (!missing(data)) {
stop( "data argument to \"data.frame\" method for 'getGroups' does not make sense" )
}
if (inherits(form, "formula")) {
grpForm <- getGroupsFormula(form, asList = TRUE, sep = sep)
if (is.null(grpForm)) {
grpForm <- splitFormula(asOneSidedFormula(form[[length(form)]]),
sep = sep)
names(grpForm) <-
unlist( lapply( grpForm, function(el) deparse( el[[ length(el) ]] ) ) )
}
if (any(vapply(grpForm, function(el) length(all.vars(el)) != 1, NA)))
stop("invalid formula for groups")
form <- grpForm
} else if(is.list(form)) {
if (!all(vapply(form, function(el) inherits(el, "formula"), NA))) {
stop("'form' must have all components as formulas")
}
} else {
stop("'form' can only be a formula, or a list of formulas")
}
vlist <- lapply(form,
function(x, N) {
val <- eval(x[[length(x)]], object)
if (length(val) == 1L)
as.factor(rep(val, N))
else
as.factor(val)[drop = TRUE]
}, N = nrow(object))
if (length(vlist) == 1) return(vlist[[1]])
value <- do.call("data.frame", vlist)
row.names(value) <- row.names(object)
if (missing(level)) return(value)
if (is.character(level)) {
nlevel <- match(level, names(vlist))
if (any(aux <- is.na(nlevel))) {
stop(gettextf("level of %s does not match formula %s",
level[aux], sQuote(deparse(form))),
domain = "NA")
}
} else {
nlevel <- as.numeric(level)
if (any(aux <- is.na(match(nlevel, 1:ncol(value))))) {
stop(gettextf("level of %s does not match formula %s",
level[aux], sQuote(deparse(form))),
domain = "NA")
}
}
if (length(nlevel) > 1) return(value[, nlevel])
if (nlevel == 1) return(value[, 1])
value <- value[, 1:nlevel]
val <- as.factor(do.call("paste", c(lapply(as.list(value),
as.character), sep = sep)))
if (inherits(value[, 1], "ordered")) {
value <- value[do.call("order", value),]
aux <- unique(do.call("paste", c(lapply(as.list(value),
as.character), sep = sep)))
ordered(val, aux)
} else {
ordered(val, unique(as.character(val)))
}
}
getResponse.data.frame <-
function(object, form = formula(object))
{
if (!(inherits(form, "formula") && (length(form) == 3))) {
stop("'form' must be a two-sided formula")
}
eval(form[[2]], object)
}
getGroupsFormula.default <-
function(object, asList = FALSE, sep = "/")
{
form <- formula(object)
if (!inherits(form, "formula")){
stop("'form' argument must be a formula")
}
form <- form[[length(form)]]
if (!(length(form) == 3L && form[[1L]] == as.name("|")))
return(NULL)
val <- splitFormula(asOneSidedFormula(form[[3]]), sep = sep)
names(val) <- unlist(lapply(val, function(el) deparse(el[[2]])))
if (asList) as.list(val)
else as.formula(eval(parse(text = paste("~", paste(names(val),
collapse = sep)))))
}
Names.formula <-
function(object, data = list(), exclude = c("pi", "."), ...)
{
if (!is.list(data)) { return(NULL) }
allV <- all.vars(object)
allV <- allV[is.na(match(allV, exclude))]
if (length(allV) == 0) {
if (attr(terms(object), "intercept")) "(Intercept)"
} else if (!anyNA(match(allV, names(data))))
dimnames(model.matrix(object, model.frame(object, data)))[[2]]
}
Names.listForm <-
function(object, data = list(), exclude = c("pi", "."), ...)
{
pnames <- as.character(unlist(lapply(object, `[[`, 2L)))
nams <- lapply(object, function(el) Names(getCovariateFormula(el), data, exclude))
if (is.null(nams[[1]])) return(NULL)
val <- c()
for(i in seq_along(object))
val <- c(val,
if ((length(nams[[i]]) == 1) && (nams[[i]] == "(Intercept)"))
pnames[i]
else
paste(pnames[i], nams[[i]], sep = "."))
val
}
needUpdate.default <- function(object)
{
val <- attr(object, "needUpdate")
!is.null(val) && val
}
pairs.compareFits <-
function(x, subset, key = TRUE, ...)
{
object <- x
if(!missing(subset)) {
object <- object[subset,,]
}
dims <- dim(object)
if(dims[3] == 1) {
stop("at least two coefficients are needed")
}
dn <- dimnames(object)
coefs <- array(c(object), c(dims[1]*dims[2], dims[3]),
list(rep(dn[[1]], dims[2]), dn[[3]]))
if(dims[3] > 2) {
tt <- list(coefs = coefs,
grp = ordered(rep(dn[[2]], rep(dims[1], dims[2])),
levels = dn[[2]]))
args <- list(~ coefs,
data = tt,
groups = tt$grp,
panel = function(x, y, subscripts, groups, ...)
{
x <- as.numeric(x)
y <- as.numeric(y)
panel.superpose(x, y, subscripts, groups)
aux <- groups[subscripts]
aux <- aux == unique(aux)[1]
lsegments(x[aux], y[aux], x[!aux], y[!aux],
lty = 2, lwd = 0.5)
})
} else {
tt <- list(x = coefs[,1], y = coefs[,2],
grp = ordered(rep(dn[[2]], rep(dims[1], dims[2])),
levels = dn[[2]]))
args <- list(y ~ x,
data = tt,
groups = tt$grp,
panel = function(x, y, subscripts, groups, ...)
{
x <- as.numeric(x)
y <- as.numeric(y)
panel.grid()
panel.superpose(x, y, subscripts, groups)
aux <- groups[subscripts]
aux <- aux == unique(aux)[1]
lsegments(x[aux], y[aux], x[!aux], y[!aux],
lty = 2, lwd = 0.5)
}, xlab = dn[[3]][1], ylab = dn[[3]][2])
}
dots <- list(...)
args[names(dots)] <- dots
if(is.logical(key)) {
if(key && length(unique(tt$grp)) > 1) {
args[["key"]] <-
list(points = Rows(trellis.par.get("superpose.symbol"), 1:2),
text = list(levels = levels(tt$grp)), columns = 2)
}
} else {
args[["key"]] <- key
}
if(dims[3] > 2) do.call("splom", args) else do.call("xyplot", args)
}
plot.nls <-
function(x, form = resid(., type = "pearson") ~ fitted(.), abline,
id = NULL, idLabels = NULL, idResType = c("pearson", "normalized"),
grid, ...)
{
object <- x
if (!inherits(form, "formula")) {
stop("'form' must be a formula")
}
allV <- all.vars(asOneFormula(form, id, idLabels))
allV <- allV[is.na(match(allV,c("T","F","TRUE","FALSE")))]
if (length(allV) > 0) {
data <- getData(object)
if (is.null(data)) {
alist <- lapply(as.list(allV), as.name)
names(alist) <- allV
alist <- c(list(quote(data.frame)), alist)
mode(alist) <- "call"
data <- eval(alist, sys.parent(1))
} else {
if (any(naV <- is.na(match(allV, names(data))))) {
stop(sprintf(ngettext(sum(naV),
"%s not found in data",
"%s not found in data"),
allV[naV]), domain = NA)
}
}
} else data <- NULL
if (inherits(data, "groupedData")) {
ff <- formula(data)
rF <- deparse(getResponseFormula(ff)[[2]])
cF <- c_deparse(getCovariateFormula(ff)[[2]])
lbs <- attr(data, "labels")
unts <- attr(data, "units")
if (!is.null(lbs$x)) cL <- paste(lbs$x, unts$x) else cF <- NULL
if (!is.null(lbs$y)) rL <- paste(lbs$y, unts$y) else rF <- NULL
} else {
rF <- cF <- NULL
}
dots <- list(...)
args <- if (length(dots) > 0) dots else list()
data <- as.list(c(as.list(data), . = list(object)))
covF <- getCovariateFormula(form)
.x <- eval(covF[[2]], data)
if (!is.numeric(.x)) {
stop("covariate must be numeric")
}
argForm <- ~ .x
argData <- data.frame(.x = .x, check.names = FALSE)
if (is.null(xlab <- attr(.x, "label"))) {
xlab <- deparse(covF[[2]])
if (!is.null(cF) && (xlab == cF)) xlab <- cL
else if (!is.null(rF) && (xlab == rF)) xlab <- rL
}
if (is.null(args$xlab)) args$xlab <- xlab
respF <- getResponseFormula(form)
if (!is.null(respF)) {
.y <- eval(respF[[2]], data)
if (is.null(ylab <- attr(.y, "label"))) {
ylab <- deparse(respF[[2]])
if (!is.null(cF) && (ylab == cF)) ylab <- cL
else if (!is.null(rF) && (ylab == rF)) ylab <- rL
}
argForm <- .y ~ .x
argData[, ".y"] <- .y
if (is.null(args$ylab)) args$ylab <- ylab
}
grpsF <- getGroupsFormula(form)
if (!is.null(grpsF)) {
gr <- splitFormula(grpsF, sep = "*")
for(i in seq_along(gr)) {
auxGr <- all.vars(gr[[i]])
for(j in auxGr) {
argData[[j]] <- eval(as.name(j), data)
}
}
if (length(argForm) == 2)
argForm <- eval(parse(text = paste("~ .x |", deparse(grpsF[[2]]))))
else argForm <- eval(parse(text = paste(".y ~ .x |", deparse(grpsF[[2]]))))
}
args <- c(list(argForm, data = argData), args)
if (is.null(args$cex)) args$cex <- par("cex")
if (is.null(args$adj)) args$adj <- par("adj")
if (!is.null(id)) {
idResType <- match.arg(idResType)
id <-
switch(mode(id),
numeric = {
if ((id <= 0) || (id >= 1)) {
stop("'id' must be between 0 and 1")
}
as.logical(abs(resid(object, type = idResType)) >
-qnorm(id / 2))
},
call = eval(asOneSidedFormula(id)[[2]], data),
stop("'id' can only be a formula or numeric")
)
if (is.null(idLabels)) {
idLabels <- getGroups(object)
if (length(idLabels) == 0) idLabels <- 1:object$dims$N
idLabels <- as.character(idLabels)
} else {
if (mode(idLabels) == "call") {
idLabels <-
as.character(eval(asOneSidedFormula(idLabels)[[2]], data))
} else if (is.vector(idLabels)) {
if (length(idLabels <- unlist(idLabels)) != length(id)) {
stop("'idLabels' of incorrect length")
}
idLabels <- as.character(idLabels)
} else {
stop("'idLabels' can only be a formula or a vector")
}
}
}
if (missing(abline)) {
if (missing(form)) {
abline <- c(0, 0)
} else {
abline <- NULL
}
}
assign("abl", abline)
if (length(argForm) == 3) {
if (is.numeric(.y)) {
plotFun <- "xyplot"
if (is.null(args$panel)) {
args <- c(args,
panel = list(function(x, y, subscripts, ...)
{
x <- as.numeric(x)
y <- as.numeric(y)
dots <- list(...)
if (grid) panel.grid()
panel.xyplot(x, y, ...)
if (any(ids <- id[subscripts])){
ltext(x[ids], y[ids], idLabels[subscripts][ids],
cex = dots$cex, adj = dots$adj)
}
if (!is.null(abl)) {
if (length(abl) == 2)
panel.abline(a = abl, ...)
else panel.abline(h = abl, ...)
}
}))
}
} else {
plotFun <- "bwplot"
if (is.null(args$panel)) {
args <- c(args,
panel = list(function(x, y, ...)
{
if (grid) panel.grid()
panel.bwplot(x, y, ...)
if (!is.null(abl)) {
panel.abline(v = abl[1], ...)
}
}))
}
}
} else {
plotFun <- "histogram"
if (is.null(args$panel)) {
args <- c(args,
panel = list(function(x, ...)
{
if (grid) panel.grid()
panel.histogram(x, ...)
if (!is.null(abl)) {
panel.abline(v = abl[1], ...)
}
}))
}
}
if (missing(grid)) grid <- (plotFun == "xyplot")
do.call(plotFun, as.list(args))
}
plot.ACF <-
function(x, alpha = 0, xlab = "Lag", ylab = "Autocorrelation",
grid = FALSE, ...)
{
object <- x
ylim <- range(object$ACF)
if (alpha) {
assign("stdv", qnorm(1-alpha/2)/sqrt(attr(object,"n.used")))
stMax <- max(stdv)
ylim <- c(min(c(-stMax, ylim[1])), max(c(ylim[2], stMax)))
}
assign("alpha", as.logical(alpha))
assign("grid", grid)
xyplot(ACF ~ lag, object, ylim = ylim,
panel = function(x, y, ...) {
x <- as.numeric(x)
y <- as.numeric(y)
if (grid) panel.grid()
panel.xyplot(x, y, type = "h")
panel.abline(0, 0)
if (alpha) {
llines(x, stdv, lty = 2)
llines(x, -stdv, lty = 2)
}
}, xlab = xlab, ylab = ylab, ...)
}
plot.augPred <-
function(x, key = TRUE, grid = FALSE, ...)
{
labels <- list(xlab = paste(attr(x, "labels")$x, attr(x, "units")$x),
ylab = paste(attr(x, "labels")$y, attr(x, "units")$y))
labels <- labels[unlist(lapply(labels, function(el) length(el) > 0))]
args <- c(list(attr(x, "formula"),
groups = quote(.type),
data = x,
strip = function(...) strip.default(..., style = 1),
panel = if (length(levels(x[[".type"]])) == 2) {
function(x, y, subscripts, groups, ...) {
if (grid) panel.grid()
orig <- groups[subscripts] == "original"
panel.xyplot(x[orig], y[orig], ...)
panel.xyplot(x[!orig], y[!orig], ..., type = "l")
}
} else {
function(x, y, subscripts, groups, ...) {
if (grid) panel.grid()
orig <- groups[subscripts] == "original"
panel.xyplot(x[orig], y[orig], ...)
panel.superpose(x[!orig], y[!orig], subscripts[!orig],
groups, ..., type = "l")
}
}), labels)
dots <- list(...)
args[names(dots)] <- dots
if (is.logical(key) && key) {
levs <- levels(x[[".type"]])
if ((lLev <- length(levs)) > 2) {
lLev <- lLev - 1
levs <- levs[1:lLev]
aux <- !is.na(match(substring(levs, 1, 8), "predict."))
if (sum(aux) > 0) {
levs[aux] <- substring(levs[aux], 9)
}
args[["key"]] <-
list(lines = c(Rows(trellis.par.get("superpose.line"), 1:lLev),
list(size = rep(3, lLev))),
text = list(levels = levs), columns = min(6, lLev))
}
} else {
args[["key"]] <- key
}
assign("grid", grid)
do.call("xyplot", args)
}
plot.compareFits <-
function(x, subset, key = TRUE, mark = NULL, ...)
{
object <- x
if(!missing(subset)) {
object <- object[subset,,]
}
dims <- dim(object)
dn <- dimnames(object)
assign("mark", rep(mark, rep(dims[1] * dims[2], dims[3])))
tt <- data.frame(group = ordered(rep(dn[[1]], dims[2] * dims[3]),
levels = dn[[1]]),
coefs = as.vector(object),
what = ordered(rep(dn[[3]],
rep(dims[1] * dims[2], dims[3])), levels = dn[[3]]),
grp = ordered(rep(rep(dn[[2]], rep(dims[1], dims[2])),
dims[3]), levels = dn[[2]]))
args <- list(group ~ coefs | what,
data = tt,
scales = list(x=list(relation="free")),
strip = function(...) strip.default(..., style = 1),
xlab = "",
groups = tt$grp,
panel = function(x, y, subscripts, groups, ...)
{
x <- as.numeric(x)
y <- as.numeric(y)
dot.line <- trellis.par.get("dot.line")
panel.abline(h = y, lwd = dot.line$lwd,
lty = dot.line$lty, col = dot.line$col)
if(!is.null(mark)) {
panel.abline(v = mark[subscripts][1], lty = 2)
}
panel.superpose(x, y, subscripts, groups)
})
dots <- list(...)
args[names(dots)] <- dots
if(is.logical(key)) {
if(key && length(unique(tt$grp)) > 1) {
args[["key"]] <-
list(points = Rows(trellis.par.get("superpose.symbol"), 1:2),
text = list(levels = levels(tt$grp)), columns = 2)
}
} else {
args[["key"]] <- key
}
do.call("dotplot", args)
}
plot.Variogram <-
function(x, smooth, showModel, sigma = NULL, span = 0.6,
xlab = "Distance", ylab = "Semivariogram", type = "p", ylim,
grid = FALSE, ...)
{
object <- x
trlLin <- trellis.par.get("superpose.line")
modVrg <- attr(object, "modelVariog")
lineT <- 1
if (!is.na(match(type, c("l","o","b")))) {
lineT <- lineT + 1
}
if (missing(showModel)) {
showModel <- !is.null(modVrg)
}
if (showModel) {
if (is.null(modVrg)) {
stop("no model variogram available with 'showModel = TRUE'")
}
assign("ltyM", trlLin$lty[lineT])
assign("colM", trlLin$col[lineT])
assign("modVrg", modVrg)
lineT <- lineT + 1
}
if (missing(smooth)) {
smooth <- !showModel
}
if (smooth) {
assign("ltyS", trlLin$lty[lineT])
assign("colS", trlLin$col[lineT])
}
assign("smooth", smooth)
assign("showModel", showModel)
assign("span", span)
assign("type", type)
assign("sigma", sigma)
assign("grid", grid)
if (missing(ylim)) {
ylim <- c(0, max(object$variog))
}
xyplot(variog ~ dist, object, ylim = ylim,
panel = function(x, y, ...) {
if (grid) panel.grid()
panel.xyplot(x, y, type = type, ...)
if (showModel) {
panel.xyplot(modVrg$dist, modVrg$variog, type = "l",
col = colM, lty = ltyM, ...)
}
if (smooth) {
panel.loess(x, y, span = span, col = colS, lty = ltyS, ...)
}
if (!is.null(sigma)) {
panel.abline(c(sigma, 0), lty = 2)
}
}, xlab = xlab, ylab = ylab, ...)
}
print.compareFits <-
function(x, ...)
{
print(unclass(x), ...)
invisible(x)
}
print.correlation <-
function(x, title = " Correlation:", rdig = 3, ...)
{
p <- dim(x)[2]
if (p > 1) {
cat(title, "\n")
ll <- lower.tri(x)
x[ll] <- format(round(x[ll], digits = rdig))
x[!ll] <- ""
if (!is.null(colnames(x))) {
colnames(x) <- abbreviate(colnames(x), minlength = rdig + 3)
}
print(x[-1, - p, drop = FALSE], ..., quote = FALSE)
}
invisible(x)
}
qqnorm.nls <-
function(y, form = ~ resid(., type = "p"), abline = NULL,
id = NULL, idLabels = NULL, grid = FALSE, ...)
{
object <- y
if (!inherits(form, "formula")) {
stop("'form' must be a formula")
}
allV <- all.vars(asOneFormula(form, id, idLabels))
allV <- allV[is.na(match(allV,c("T","F","TRUE","FALSE")))]
if (length(allV) > 0) {
data <- getData(object)
if (is.null(data)) {
alist <- lapply(as.list(allV), as.name)
names(alist) <- allV
alist <- c(as.list(quote(data.frame)), alist)
mode(alist) <- "call"
data <- eval(alist, sys.parent(1))
} else {
if (any(naV <- is.na(match(allV, names(data))))) {
stop(sprintf(ngettext(sum(naV),
"%s not found in data",
"%s not found in data"),
allV[naV]), domain = NA)
}
}
} else data <- NULL
dots <- list(...)
if (length(dots) > 0) args <- dots
else args <- list()
data <- as.list(c(as.list(data), . = list(object)))
covF <- getCovariateFormula(form)
.x <- eval(covF[[2]], data)
labs <- attr(.x, "label")
if (is.null(labs) || ((labs != "Standardized residuals") &&
(labs != "Normalized residuals") &&
(substring(labs, 1, 9) != "Residuals"))) {
stop("only residuals allowed")
}
if (is.null(args$xlab)) args$xlab <- labs
if (is.null(args$ylab)) args$ylab <- "Quantiles of standard normal"
fData <- qqnorm(.x, plot.it = FALSE)
data[[".y"]] <- fData$x
data[[".x"]] <- fData$y
dform <- ".y ~ .x"
if (!is.null(grp <- getGroupsFormula(form))) {
dform <- paste(dform, deparse(grp[[2]]), sep = "|")
}
if (!is.null(id)) {
id <-
switch(mode(id),
numeric = {
if ((id <= 0) || (id >= 1)) {
stop("'id' must be between 0 and 1")
}
if (labs == "Normalized residuals") {
as.logical(abs(resid(object, type="normalized"))
> -qnorm(id / 2))
} else {
as.logical(abs(resid(object, type="pearson"))
> -qnorm(id / 2))
}
},
call = eval(asOneSidedFormula(id)[[2]], data),
stop("'id' can only be a formula or numeric")
)
if (is.null(idLabels)) {
idLabels <- getGroups(object)
if (length(idLabels) == 0) idLabels <- 1:object$dims$N
idLabels <- as.character(idLabels)
} else {
if (mode(idLabels) == "call") {
idLabels <-
as.character(eval(asOneSidedFormula(idLabels)[[2]], data))
} else if (is.vector(idLabels)) {
if (length(idLabels <- unlist(idLabels)) != length(id)) {
stop("'idLabels' of incorrect length")
}
idLabels <- as.character(idLabels)
} else {
stop("'idLabels' can only be a formula or a vector")
}
}
}
assign("id", if (is.null(id)) NULL else as.logical(as.character(id)))
assign("idLabels", as.character(idLabels))
assign("grid", grid)
assign("abl", abline)
if (is.null(args$strip)) {
args$strip <- function(...) strip.default(..., style = 1)
}
if (is.null(args$cex)) args$cex <- par("cex")
if (is.null(args$adj)) args$adj <- par("adj")
args <- c(list(eval(parse(text = dform)),
data = substitute(data)),
args)
if (is.null(args$panel)) {
args <- c(list(panel = function(x, y, subscripts, ...){
x <- as.numeric(x)
y <- as.numeric(y)
dots <- list(...)
if (grid) panel.grid()
panel.xyplot(x, y, ...)
if (any(ids <- id[subscripts])){
ltext(x[ids], y[ids], idLabels[subscripts][ids],
cex = dots$cex, adj = dots$adj)
}
if (!is.null(abl)) { if (length(abl) == 2) panel.abline(a = abl, ...) else panel.abline(h = abl, ...) }
}), args)
}
do.call("xyplot", args)
}
Variogram.default <-
function(object, distance, ...)
{
ld <- length(distance)
lo <- length(object)
if (ld != round(lo*(lo-1)/2)) {
stop("'distance' and 'object' have incompatible lengths")
}
val <- outer(object, object, function(x,y) ((x - y)^2)/2)
val <- val[lower.tri(val)]
val <- data.frame(variog = val, dist = as.numeric(distance))
class(val) <- c("Variogram", "data.frame")
val
}
c_deparse <- function(...) paste(deparse(..., width.cutoff=500), collapse="") |
DatabaseTSD.version <- function() {
return(max(embryogrowth::DatabaseTSD$Version)[1])
} |
egammaAltCensored.jackknife <-
function (x, censored, censoring.side, est.fcn)
{
N <- length(x)
jack.vec <- numeric(N)
new.N <- N - 1
for (i in 1:N) {
new.x <- x[-i]
new.censored <- censored[-i]
new.n.cen <- sum(new.censored)
if (new.n.cen == 0)
jack.vec[i] <- egammaAlt(new.x)$parameters["mean"]
else {
jack.vec[i] <- do.call(est.fcn, list(x = new.x, censored = new.censored,
censoring.side = censoring.side, ci = FALSE))$parameters["mean"]
}
}
jack.vec
} |
context("metadata cache 1/3")
test_that("get_cache_files", {
dir.create(pri <- fs::path_norm(tempfile()))
on.exit(unlink(pri, recursive = TRUE), add = TRUE)
dir.create(rep <- fs::path_norm(tempfile()))
on.exit(unlink(rep, recursive = TRUE), add = TRUE)
cmc <- cranlike_metadata_cache$new(pri, rep, "source", bioc = FALSE)
pri_files <- get_private(cmc)$get_cache_files("primary")
rep_files <- get_private(cmc)$get_cache_files("replica")
check <- function(files, root) {
expect_equal(files$root, root)
expect_true(all(c("meta", "lock", "rds") %in% names(files)))
expect_equal(
fs::path_common(c(files$rds, files$lock, files$rds, root)),
root)
expect_true(tibble::is_tibble(files$pkgs))
expect_equal(
sort(names(files$pkgs)),
sort(c("path", "etag", "basedir", "base", "mirror", "url",
"fallback_url", "platform", "r_version", "type",
"bioc_version", "meta_path", "meta_etag", "meta_url"
))
)
expect_equal(
fs::path_common(c(files$pkgs$path, files$pkgs$etag, root)),
root)
}
check(pri_files, pri)
check(rep_files, rep)
})
test_that("get_current_data", {
dir.create(pri <- fs::path_norm(tempfile()))
on.exit(unlink(pri, recursive = TRUE), add = TRUE)
dir.create(rep <- fs::path_norm(tempfile()))
on.exit(unlink(rep, recursive = TRUE), add = TRUE)
cmc <- cranlike_metadata_cache$new(pri, rep, "source", bioc = FALSE)
set_private(cmc, "data") <- "DATA"
set_private(cmc, "data_time") <- Sys.time()
expect_equal(get_private(cmc)$get_current_data(oneday()), "DATA")
set_private(cmc, "data_time") <- Sys.time() - 2 * oneday()
expect_error(
get_private(cmc)$get_current_data(oneday()),
"Loaded data outdated")
set_private(cmc, "data_time") <- NULL
expect_error(
get_private(cmc)$get_current_data(oneday()),
"Loaded data outdated")
set_private(cmc, "data") <- NULL
expect_error(get_private(cmc)$get_current_data(oneday()), "No data loaded")
})
test_that("load_replica_rds", {
dir.create(pri <- fs::path_norm(tempfile()))
on.exit(unlink(pri, recursive = TRUE), add = TRUE)
dir.create(rep <- fs::path_norm(tempfile()))
on.exit(unlink(rep, recursive = TRUE), add = TRUE)
cmc <- cranlike_metadata_cache$new(pri, rep, "source", bioc = FALSE)
expect_error(
get_private(cmc)$load_replica_rds(oneday()),
"No replica RDS file in cache"
)
rep_files <- get_private(cmc)$get_cache_files("replica")
mkdirp(dirname(rep_files$rds))
saveRDS("This is it", rep_files$rds)
file_set_time(rep_files$rds, Sys.time() - 2 * oneday())
expect_error(
get_private(cmc)$load_replica_rds(oneday()),
"Replica RDS cache file outdated"
)
file_set_time(rep_files$rds, Sys.time() - 1/2 * oneday())
expect_equal(
get_private(cmc)$load_replica_rds(oneday()),
"This is it")
expect_equal(get_private(cmc)$data, "This is it")
expect_true(Sys.time() - get_private(cmc)$data_time < oneday())
})
test_that("load_primary_rds", {
dir.create(pri <- fs::path_norm(tempfile()))
on.exit(unlink(pri, recursive = TRUE), add = TRUE)
dir.create(rep <- fs::path_norm(tempfile()))
on.exit(unlink(rep, recursive = TRUE), add = TRUE)
cmc <- cranlike_metadata_cache$new(pri, rep, "source", bioc = FALSE)
expect_error(
get_private(cmc)$load_primary_rds(oneday()),
"No primary RDS file in cache"
)
pri_files <- get_private(cmc)$get_cache_files("primary")
mkdirp(dirname(pri_files$rds))
saveRDS("This is it", pri_files$rds)
file_set_time(pri_files$rds, Sys.time() - 2 * oneday())
expect_error(
get_private(cmc)$load_primary_rds(oneday()),
"Primary RDS cache file outdated"
)
file_set_time(pri_files$rds, Sys.time() - 1/2 * oneday())
for (f in pri_files$pkgs$path) { mkdirp(dirname(f)); cat("x", file = f) }
file_set_time(pri_files$pkgs$path, Sys.time() - 2 * oneday())
expect_equal(
get_private(cmc)$load_primary_rds(oneday()),
"This is it")
expect_equal(get_private(cmc)$data, "This is it")
expect_true(Sys.time() - get_private(cmc)$data_time < oneday())
expect_equal(
get_private(cmc)$load_replica_rds(oneday()),
"This is it")
})
test_that("locking failures", {
pri <- test_temp_dir()
rep <- test_temp_dir()
cmc <- cranlike_metadata_cache$new(pri, rep, "source", bioc = FALSE)
mockery::stub(cmc__load_primary_rds, "lock", function(...) NULL)
expect_error(
cmc__load_primary_rds(cmc, get_private(cmc), oneday()),
"Cannot acquire lock to copy RDS")
mockery::stub(cmc__load_primary_pkgs, "lock", function(...) NULL)
expect_error(
cmc__load_primary_pkgs(cmc, get_private(cmc), oneday()),
"Cannot acquire lock to copy PACKAGES")
})
test_that("load_primary_rds 3", {
pri <- test_temp_dir()
rep <- test_temp_dir()
cmc <- cranlike_metadata_cache$new(pri, rep, "source", bioc = FALSE)
pri_files <- get_private(cmc)$get_cache_files("primary")
touch(pri_files$rds)
expect_error(
cmc__load_primary_rds(cmc, get_private(cmc), oneday()),
"Primary PACKAGES missing")
})
test_that("load_primary_pkgs", {
withr::local_options(list(repos = NULL))
dir.create(pri <- fs::path_norm(tempfile()))
on.exit(unlink(pri, recursive = TRUE), add = TRUE)
dir.create(rep <- fs::path_norm(tempfile()))
on.exit(unlink(rep, recursive = TRUE), add = TRUE)
cmc <- cranlike_metadata_cache$new(pri, rep, c("macos", "source"),
bioc = FALSE)
expect_error(
get_private(cmc)$load_primary_pkgs(oneday()),
"Some primary PACKAGES files don't exist")
pri_files <- get_private(cmc)$get_cache_files("primary")
mkdirp(dirname(pri_files$pkgs$path))
fs::file_copy(get_fixture("PACKAGES-mac.gz"), pri_files$pkgs$path[1])
expect_error(
synchronise(get_private(cmc)$load_primary_pkgs(oneday())),
"Some primary PACKAGES files don't exist")
for (i in utils::tail(seq_len(nrow(pri_files$pkgs)), -1)) {
fs::file_copy(get_fixture("PACKAGES-src.gz"), pri_files$pkgs$path[i])
}
file_set_time(pri_files$pkgs$path, Sys.time() - 2 * oneday())
expect_error(
synchronise(get_private(cmc)$load_primary_pkgs(oneday())),
"Some primary PACKAGES files are outdated")
file_set_time(pri_files$pkgs$path, Sys.time() - 1/2 * oneday())
res <- synchronise(get_private(cmc)$load_primary_pkgs(oneday()))
check_packages_data(res)
rep_files <- get_private(cmc)$get_cache_files("replica")
expect_true(file.exists(rep_files$rds))
expect_true(Sys.time() - file_get_time(rep_files$rds) < oneday())
expect_true(file.exists(pri_files$rds))
expect_true(Sys.time() - file_get_time(pri_files$rds) < oneminute())
})
test_that("update_replica_pkgs", {
skip_if_offline()
skip_on_cran()
dir.create(pri <- fs::path_norm(tempfile()))
on.exit(unlink(pri, recursive = TRUE), add = TRUE)
dir.create(rep <- fs::path_norm(tempfile()))
on.exit(unlink(rep, recursive = TRUE), add = TRUE)
cmc <- cranlike_metadata_cache$new(pri, rep, "source", bioc = FALSE)
synchronise(get_private(cmc)$update_replica_pkgs())
rep_files <- get_private(cmc)$get_cache_files("replica")
expect_true(all(file.exists(rep_files$pkgs$path)))
expect_true(all(file.exists(rep_files$pkgs$etag)))
data <- get_private(cmc)$update_replica_rds()
expect_identical(data, get_private(cmc)$data)
check_packages_data(data)
})
test_that("update_replica_rds", {
dir.create(pri <- fs::path_norm(tempfile()))
on.exit(unlink(pri, recursive = TRUE), add = TRUE)
dir.create(rep <- fs::path_norm(tempfile()))
on.exit(unlink(rep, recursive = TRUE), add = TRUE)
cmc <- cranlike_metadata_cache$new(pri, rep, c("macos", "source"),
bioc = FALSE)
rep_files <- get_private(cmc)$get_cache_files("replica")
mkdirp(dirname(rep_files$pkgs$path))
fs::file_copy(get_fixture("PACKAGES-mac.gz"), rep_files$pkgs$path[1])
for (i in utils::tail(seq_len(nrow(rep_files$pkgs)), -1)) {
fs::file_copy(get_fixture("PACKAGES-src.gz"), rep_files$pkgs$path[i])
}
data <- get_private(cmc)$update_replica_rds()
expect_identical(get_private(cmc)$data, data)
expect_true(get_private(cmc)$data_time > Sys.time() - oneminute())
check_packages_data(data)
})
test_that("update_primary", {
dir.create(pri <- fs::path_norm(tempfile()))
on.exit(unlink(pri, recursive = TRUE), add = TRUE)
dir.create(rep <- fs::path_norm(tempfile()))
on.exit(unlink(rep, recursive = TRUE), add = TRUE)
cmc <- cranlike_metadata_cache$new(pri, rep, c("macos", "source"),
bioc = FALSE)
pri_files <- get_private(cmc)$get_cache_files("primary")
rep_files <- get_private(cmc)$get_cache_files("replica")
mkdirp(dirname(rep_files$rds))
saveRDS("RDS", rep_files$rds)
get_private(cmc)$update_primary(rds = TRUE, packages = FALSE)
expect_true(file.exists(pri_files$rds))
expect_equal(readRDS(pri_files$rds), "RDS")
lapply_rows(rep_files$pkgs, function(pkg) {
mkdirp(dirname(pkg$path))
cat(basename(pkg$path), "\n", sep = "", file = pkg$path)
mkdirp(dirname(pkg$etag))
cat(pkg$url, "\n", sep = "", file = pkg$etag)
})
get_private(cmc)$update_primary(rds = FALSE, packages = TRUE)
expect_true(all(file.exists(pri_files$pkgs$path)))
expect_true(all(file.exists(pri_files$pkgs$etag)))
lapply_rows(pri_files$pkgs, function(pkg) {
expect_equal(readLines(pkg$path), basename(pkg$path))
expect_equal(readLines(pkg$etag), pkg$url)
})
})
test_that("update_primary 2", {
expect_null(cmc__update_primary(NULL, NULL, FALSE, FALSE, FALSE))
pri <- test_temp_dir()
rep <- test_temp_dir()
cmc <- cranlike_metadata_cache$new(pri, rep, c("macos", "source"),
bioc = FALSE)
mockery::stub(cmc__update_primary, "lock", function(...) NULL)
expect_error(
cmc__update_primary(cmc, get_private(cmc), TRUE, TRUE, TRUE),
"Cannot acquire lock to update primary cache")
})
test_that("update", {
skip_if_offline()
skip_on_cran()
dir.create(pri <- fs::path_norm(tempfile()))
on.exit(unlink(pri, recursive = TRUE), add = TRUE)
dir.create(rep <- fs::path_norm(tempfile()))
on.exit(unlink(rep, recursive = TRUE), add = TRUE)
cmc <- cranlike_metadata_cache$new(pri, rep, "source", bioc = FALSE)
data <- cmc$update()
check_packages_data(data)
expect_identical(get_private(cmc)$data, data)
expect_true(Sys.time() - get_private(cmc)$data_time < oneminute())
rep_files <- get_private(cmc)$get_cache_files("replica")
expect_true(file.exists(rep_files$rds))
expect_true(Sys.time() - file_get_time(rep_files$rds) < oneminute())
pri_files <- get_private(cmc)$get_cache_files("primary")
expect_true(file.exists(pri_files$rds))
expect_true(Sys.time() - file_get_time(pri_files$rds) < oneminute())
expect_true(all(file.exists(rep_files$pkgs$path)))
expect_true(all(file.exists(rep_files$pkgs$etag)))
expect_true(all(file.exists(pri_files$pkgs$path)))
expect_true(all(file.exists(pri_files$pkgs$etag)))
expect_equal(as.list(data$pkgs), as.list(cmc$list()))
lst <- cmc$list(c("igraph", "MASS"))
expect_equal(sort(c("igraph", "MASS")), sort(unique(lst$package)))
rdeps <- cmc$revdeps("MASS")
expect_true("abc" %in% rdeps$package)
expect_true("abd" %in% rdeps$package)
rdeps <- cmc$revdeps("MASS", recursive = FALSE)
expect_true("abc" %in% rdeps$package)
expect_false("abd" %in% rdeps$package)
}) |
`rray_extract<-` <- function(x, ..., value) {
rray_extract_assign(x, ..., value = value)
}
rray_extract_assign <- function(x, ..., value) {
indexer <- rray_as_index(x, ...)
vec_assert(value, arg = "value")
value <- vec_cast_inner(value, x)
if (is_any_na_int(indexer)) {
abort("`NA` indices are not yet supported.")
}
out <- rray__extract_assign(x, indexer, value)
vec_cast_container(out, x)
} |
.onAttach <- function (lib, pkgname="yourpackagename") {
invisible()
} |
tcplAICProb <- function(...) {
aics <- list(...)
lens <- sapply(aics, length)
if (abs(max(lens) - min(lens)) > 0) stop("All inputs must be same length.")
maic <- pmin(..., na.rm = TRUE)
di <- lapply(aics, function(x) x - maic)
l <- lapply(di, function(x) exp(-x/2))
lsum <- apply(do.call(cbind, l), 1, sum, na.rm = TRUE)
prob <- lapply(l, function(x) x/lsum)
prob
} |
`fnorm2` <-
function(dmax2,data,alterInt){
if(any(is.na(dmax2))) return(NaN)
N <- sum(data)
m <- colSums(data)
p<-m/N
p0<-p[1]
p2<-p[3]
ro.0.1<-p0*p2/{sqrt(p0*(1-p0))*sqrt(p2*(1-p2))}
cov<-matrix(c(1,ro.0.1,ro.0.1,1),2,2,byrow=T)
d<-dmax2
lower <- switch(alterInt+1, rep(-d,2), rep(-Inf,2), rep(-d,2))
upper <- switch(alterInt+1, rep( d,2), rep(d, 2), rep(Inf,2))
p.norm<-1-as.numeric(sadmvn(lower=lower, upper=upper,mean=rep(0,2),varcov=cov))
return(p.norm)
} |
sits_accuracy <- function(data, ...) {
UseMethod("sits_accuracy", data)
}
sits_accuracy.sits <- function(data, ...) {
.check_set_caller("sits_accuracy.sits")
if (!requireNamespace("caret", quietly = TRUE)) {
stop("Please install package caret.", call. = FALSE)
}
.check_chr_contains(
x = names(data),
contains = "predicted",
msg = "input data without predicted values"
)
if ("label" %in% names(data)) {
pred_ref <- .sits_accuracy_pred_ref(data)
pred <- pred_ref$predicted
ref <- pred_ref$reference
}
else {
pred <- data$predicted
ref <- data$reference
}
unique_ref <- unique(ref)
pred_fac <- factor(pred, levels = unique_ref)
ref_fac <- factor(ref, levels = unique_ref)
assess <- caret::confusionMatrix(pred_fac, ref_fac)
class(assess) <- c("sits_assessment", class(assess))
return(assess)
}
sits_accuracy.classified_image <- function(data, ..., validation_csv) {
.check_file(
x = validation_csv,
extensions = "csv",
msg = "csv file not available",
)
csv_tb <- tibble::as_tibble(utils::read.csv(validation_csv,
stringsAsFactors = FALSE))
.check_chr_contains(
x = colnames(csv_tb),
contains = c("longitude", "latitude", "label"),
msg = "invalid csv file"
)
labels_cube <- sits_labels(data)
pred_ref_lst <- slider::slide(data, function(row) {
labelled_band <- sits_bands(row)
.check_length(
x = labelled_band,
len_min = 1,
len_max = 1,
msg = "invalid labelled cube"
)
xy_tb <- .sits_proj_from_latlong(
longitude = csv_tb$longitude,
latitude = csv_tb$latitude,
crs = .cube_crs(row)
)
points <- dplyr::bind_cols(csv_tb, xy_tb)
.check_that(
x = nrow(points) != 0,
msg = paste("no validation point intersects the map's",
"spatiotemporal extent.")
)
points_row <- dplyr::filter(
points,
X >= row$xmin & X <= row$xmax &
Y >= row$ymin & Y <= row$ymax
)
if (nrow(points_row) < 1) {
return(NULL)
}
xy <- matrix(c(points_row$X, points_row$Y),
nrow = nrow(points_row), ncol = 2)
colnames(xy) <- c("X", "Y")
values <- .cube_extract(
cube = row,
band_cube = labelled_band,
xy = xy
)
predicted <- labels_cube[unlist(values)]
reference <- points_row$label
.check_that(
x = length(reference) == length(predicted),
msg = "predicted and reference vector do not match"
)
tb <- tibble::tibble(predicted = predicted, reference = reference)
return(tb)
})
pred_ref <- do.call(rbind, pred_ref_lst)
error_matrix <- table(
factor(pred_ref$predicted,
levels = labels_cube,
labels = labels_cube
),
factor(pred_ref$reference,
levels = labels_cube,
labels = labels_cube
)
)
freq_lst <- slider::slide(data, function(tile) {
freq <- .cube_area_freq(tile)
freq <- dplyr::mutate(freq, class = labels_cube[freq$value])
return(freq)
})
freq <- do.call(rbind, freq_lst)
freq <- freq %>%
dplyr::group_by(class) %>%
dplyr::summarise(count = sum(count))
area <- freq$count
names(area) <- freq$class
area[is.na(area)] <- 0
assess <- .sits_accuracy_area_assess(data, error_matrix, area)
class(assess) <- c("sits_area_assessment", class(assess))
return(assess)
}
.sits_accuracy_pred_ref <- function(class) {
pred <- unlist(purrr::map(class$predicted, function(r) r$class))
ref <- class$label
.check_that(
x = !("NoClass" %in% (ref)),
msg = "input data without labels"
)
pred_ref <- tibble::tibble("predicted" = pred, "reference" = ref)
return(pred_ref)
}
.sits_accuracy_area_assess <- function(cube, error_matrix, area) {
.check_set_caller(".sits_accuracy_area_assess")
.check_chr_contains(
x = class(cube),
contains = "classified_image",
msg = "not a classified cube")
if (any(dim(error_matrix) == 0)) {
stop("invalid dimensions in error matrix.", call. = FALSE)
}
if (length(unique(dim(error_matrix))) != 1) {
stop("The error matrix is not square.", call. = FALSE)
}
if (!all(colnames(error_matrix) == rownames(error_matrix))) {
stop("Labels mismatch in error matrix.", call. = FALSE)
}
if (unique(dim(error_matrix)) != length(area)) {
stop("Mismatch between error matrix and area vector.",
call. = FALSE
)
}
if (!all(names(area) %in% colnames(error_matrix))) {
stop("Label mismatch between error matrix and area vector.",
call. = FALSE
)
}
area <- area[colnames(error_matrix)]
res <- .cube_resolution(cube)
area <- area * prod(res) / 10000
weight <- area / sum(area)
class_areas <- rowSums(error_matrix)
prop <- weight * error_matrix / class_areas
prop[is.na(prop)] <- 0
error_adjusted_area <- colSums(prop) * sum(area)
stderr_prop <- sqrt(colSums((weight * prop - prop**2) / (class_areas - 1)))
stderr_area <- sum(area) * stderr_prop
user_acc <- diag(prop) / rowSums(prop)
prod_acc <- diag(prop) / colSums(prop)
over_acc <- sum(diag(prop))
return(
list(
error_matrix = error_matrix,
area_pixels = area,
error_ajusted_area = error_adjusted_area,
stderr_prop = stderr_prop,
stderr_area = stderr_area,
conf_interval = 1.96 * stderr_area,
accuracy = list(user = user_acc,
producer = prod_acc,
overall = over_acc)
)
)
}
sits_accuracy_summary <- function(x,
mode = "sens_spec",
digits = max(3, getOption("digits") - 3)) {
.check_set_caller("sits_accuracy_summary")
if ("sits_area_assessment" %in% class(x)) {
print.sits_area_assessment(x)
return(invisible(TRUE))
}
.check_chr_contains(
x = class(x),
contains = "sits_assessment",
msg = "please run sits_accuracy first"
)
overall <- round(x$overall, digits = digits)
accuracy_ci <- paste("(",
paste(overall[c("AccuracyLower", "AccuracyUpper")],
collapse = ", "
), ")",
sep = ""
)
overall_text <- c(
paste(overall["Accuracy"]), accuracy_ci, "",
paste(overall["Kappa"])
)
overall_names <- c("Accuracy", "95% CI", "", "Kappa")
cat("\nOverall Statistics\n")
overall_names <- ifelse(overall_names == "",
"",
paste(overall_names, ":")
)
out <- cbind(format(overall_names, justify = "right"), overall_text)
colnames(out) <- rep("", ncol(out))
rownames(out) <- rep("", nrow(out))
print(out, quote = FALSE)
invisible(x)
}
print.sits_assessment <- function(x, ...,
mode = "sens_spec",
digits = max(3, getOption("digits") - 3)) {
cat("Confusion Matrix and Statistics\n\n")
print(x$table)
overall <- round(x$overall, digits = digits)
accuracy_ci <- paste("(",
paste(overall[c("AccuracyLower", "AccuracyUpper")],
collapse = ", "
), ")",
sep = ""
)
overall_text <- c(
paste(overall["Accuracy"]), accuracy_ci, "",
paste(overall["Kappa"])
)
overall_names <- c("Accuracy", "95% CI", "", "Kappa")
if (dim(x$table)[1] > 2) {
cat("\nOverall Statistics\n")
overall_names <- ifelse(overall_names == "",
"",
paste(overall_names, ":")
)
out <- cbind(format(overall_names, justify = "right"), overall_text)
colnames(out) <- rep("", ncol(out))
rownames(out) <- rep("", nrow(out))
print(out, quote = FALSE)
cat("\nStatistics by Class:\n\n")
x$byClass <- x$byClass[, grepl(
"(Sensitivity)|(Specificity)|(Pos Pred Value)|(Neg Pred Value)|(F1)",
colnames(x$byClass)
)]
measures <- t(x$byClass)
rownames(measures) <- c(
"Prod Acc (Sensitivity)", "Specificity",
"User Acc (Pos Pred Value)", "Neg Pred Value", "F1"
)
print(measures, digits = digits)
} else {
x$byClass <- x$byClass[
grepl(
"(Sensitivity)|(Specificity)|(Pos Pred Value)|(Neg Pred Value)",
names(x$byClass)
)
]
names_classes <- row.names(x$table)
c1 <- x$positive
c2 <- names_classes[!(names_classes == x$positive)]
pa1 <- paste("Prod Acc ", c1)
pa2 <- paste("Prod Acc ", c2)
ua1 <- paste("User Acc ", c1)
ua2 <- paste("User Acc ", c2)
names(x$byClass) <- c(pa1, pa2, ua1, ua2)
overall_text <- c(
overall_text,
"",
format(x$byClass, digits = digits)
)
overall_names <- c(overall_names, "", names(x$byClass))
overall_names <- ifelse(overall_names == "", "",
paste(overall_names, ":"))
out <- cbind(format(overall_names, justify = "right"), overall_text)
colnames(out) <- rep("", ncol(out))
rownames(out) <- rep("", nrow(out))
out <- rbind(out, rep("", 2))
print(out, quote = FALSE)
}
invisible(x)
}
print.sits_area_assessment <- function(x, ..., digits = 2){
overall <- round(x$accuracy$overall, digits = digits)
cat("Area Weigthed Statistics\n")
cat(paste0("Overall Accuracy = ", overall,"\n"))
acc_user <- round(x$accuracy$user, digits = digits)
acc_prod <- round(x$accuracy$producer, digits = digits)
tb <- t(dplyr::bind_rows(acc_user, acc_prod))
colnames(tb) <- c("User", "Producer")
cat("\nArea-Weighted Users and Producers Accuracy\n")
print(tb)
area_pix <- round(x$area_pixels, digits = digits)
area_adj <- round(x$error_ajusted_area, digits = digits)
conf_int <- round(x$conf_interval, digits = digits)
tb1 <- t(dplyr::bind_rows(area_pix, area_adj, conf_int))
colnames(tb1) <- c("Mapped Area (ha)", "Error-Adjusted Area (ha)", "Conf Interval (ha)")
cat("\nMapped Area x Estimated Area (ha)\n")
print(tb1)
} |
sensi_plot.tree.bd <- function(x, graphs="all", ...){
estimate <- n.tree <- model <- NULL
method <- x$call$method
if(is.null(x$call$method)) method <- "ms"
if (method == "ms") label = "estimated diversification rate [ms]"
if (method == "km") label = "estimated speciation rate [km]"
e1 <- ggplot2::ggplot(x$tree.bd.estimates, aes(x=estimate))+
geom_histogram(fill="yellow",colour="black", size=.2,
alpha = .3) +
geom_vline(xintercept = x$stats$mean, color="red",linetype=2,size=.7)+
xlab(label)+
ylab("Frequency")+
theme(axis.title=element_text(size=12),
axis.text = element_text(size=12),
panel.background = element_rect(fill="white",
colour="black"))
e2 <- ggplot2::ggplot(x$tree.bd.estimates,
aes(y = estimate, x = reorder(n.tree, estimate)))+
geom_point(size = 3, color = "tomato") +
xlab("tree")+
ylab("estimate")+
theme(axis.title=element_text(size=12),
axis.text.x = element_text(size=8),
axis.text.y = element_text(size=12),
panel.background = element_rect(fill="white",
colour="black"))
if (graphs=="all")
suppressMessages(return(multiplot(e1,e2, cols=2)))
if (graphs==1)
suppressMessages(return(e1))
if (graphs==2)
suppressMessages(return(e2))
} |
"fplogistic" <- function(
p1, p2, fixed = c(NA, NA, NA, NA), names = c("b", "c", "d", "e"),
method = c("1", "2", "3", "4"), ssfct = NULL,
fctName, fctText)
{
numParm <- 4
if (!is.character(names) | !(length(names) == numParm)) {stop("Not correct 'names' argument")}
if ( !(length(fixed) == numParm) ) {stop("Not correct 'fixed' argument")}
notFixed <- is.na(fixed)
parmVec <- rep(0, numParm)
parmVec[!notFixed] <- fixed[!notFixed]
fd <- function (dose, b, c, d, e)
{
.expr1 <- d - c
.expr3 <- log(dose + 1)
.expr4 <- .expr3^p1
.expr6 <- .expr3^p2
.expr9 <- exp(b * .expr4 + e * .expr6)
.expr10 <- 1 + .expr9
.expr15 <- .expr10^2
.expr18 <- 1/.expr10
.value <- c + .expr1/.expr10
.grad <- array(0, c(length(.value), 4L), list(NULL, c("b", "c", "d", "e")))
.grad[, "b"] <- -(.expr1 * (.expr9 * .expr4)/.expr15)
.grad[, "c"] <- 1 - .expr18
.grad[, "d"] <- .expr18
.grad[, "e"] <- -(.expr1 * (.expr9 * .expr6)/.expr15)
attr(.value, "gradient") <- .grad
.value
}
fct <- function(dose, parm)
{
parmMat <- matrix(parmVec, nrow(parm), numParm, byrow = TRUE)
parmMat[, notFixed] <- parm
fd(dose, parmMat[, 1], parmMat[, 2], parmMat[, 3], parmMat[, 4])
}
if (!is.null(ssfct))
{
ssfct <- ssfct
} else {
ssfct <- function(dframe)
{
initval <- llogistic()$ssfct(dframe)[1:4]
initval[4] <- initval[1]
initval[1] <- -initval[1]
return(initval[notFixed])
}
}
names <- names[notFixed]
deriv1 <- function(dose, parm)
{
parmMat <- matrix(parmVec, nrow(parm), numParm, byrow = TRUE)
parmMat[, notFixed] <- parm
attr(fd(dose, parmMat[, 1], parmMat[, 2], parmMat[, 3], parmMat[, 4]), "gradient")[, notFixed]
}
deriv2 <- NULL
derivx <- function(dose, parm)
{
parmMat <- matrix(parmVec, nrow(parm), numParm, byrow = TRUE)
parmMat[, notFixed] <- parm
dFct <- function (dose, b, c, d, e)
{
.expr1 <- d - c
.expr2 <- dose + 1
.expr3 <- log(.expr2)
.expr9 <- exp(b * .expr3^p1 + e * .expr3^p2)
.expr10 <- 1 + .expr9
.expr15 <- 1/.expr2
.value <- c + .expr1/.expr10
.grad <- array(0, c(length(.value), 1L), list(NULL, c("dose")))
.grad[, "dose"] <- -(.expr1 * (.expr9 * (b * (.expr3^(p1 - 1) * (p1 * .expr15)) + e * (.expr3^(p2 - 1) * (p2 * .expr15))))/.expr10^2)
attr(.value, "gradient") <- .grad
.value
}
attr(dFct(dose, parmMat[, 1], parmMat[, 2], parmMat[, 3], parmMat[, 4]), "gradient")
}
edfct <- function(parm, respl, reference, type, loged = FALSE, ...)
{
parmVec[notFixed] <- parm
p <- EDhelper(parmVec, respl, reference, type)
invfp <- function(resp, b, e)
{
fct0 <- function(x){resp - (b*(log(x+1)^p1) + e*(log(x+1)^p2))}
uniroot(fct0, c(0.001, 1000))$root
}
EDfct <- function(b, c, d, e)
{
invfp(log((100-p)/p), b, e)
}
EDp <- EDfct(parmVec[1], parmVec[2], parmVec[3], parmVec[4])
logEDp <- log(EDp+1)
denVal <- parmVec[1] * p1 * (logEDp)^(p1-1) + parmVec[4] * p2 * (logEDp)^(p2-1)
derVec <- (EDp+1) * c(logEDp^p1, logEDp^p2) / denVal
EDder <- c(derVec[1], 0, 0, derVec[2])
if (loged)
{
EDder <- EDder / EDp
EDp <- log(EDp)
}
return(list(EDp, EDder[notFixed]))
}
returnList <-
list(fct = fct, ssfct = ssfct, names = names, deriv1 = deriv1, deriv2 = deriv2, derivx = derivx, edfct = edfct,
name = ifelse(missing(fctName), paste(as.character(match.call()[[1]]), "(", p1,",", p2, ")", sep=""), fctName),
text = ifelse(missing(fctText), "Fractional polynomial", fctText),
noParm = sum(is.na(fixed)),
fixed = fixed)
class(returnList) <- "fp-logistic"
invisible(returnList)
}
"FPL.4" <-
function(p1, p2, fixed = c(NA, NA, NA, NA), names = c("b", "c", "d", "e"), ...)
{
numParm <- 4
if (!is.character(names) | !(length(names)==numParm)) {stop("Not correct names argument")}
if (!(length(fixed)==numParm)) {stop("Not correct length of 'fixed' argument")}
return(fplogistic(p1, p2, fixed = fixed, names = names,
fctName = paste(as.character(match.call()[[1]]), "(", p1,",", p2, ")", sep=""), ...) )
} |
library(knotR)
filename <- "product_knot.svg"
a <- reader(filename)
symprod <- symmetry_object(a,xver=c(9,17))
ouprod <- matrix(c(
01,07,
06,02,
03,09,
08,05,
10,14,
16,11,
13,17
),ncol=2,byrow=TRUE)
jj <- knotoptim(filename,
symobj = symprod,
ou = ouprod,
prob = 0,
iterlim=1000, print.level=2
)
write_svg(jj,filename,safe=FALSE)
dput(jj,file=sub('.svg','.S',filename)) |
.createModel_DDexp_multi_ME <- function(tree,r.object){
comment <- "Diversity dependent exponential model with two slope regimes and measurement error."
paramsNames <- c("m0", "logsigma0", "r1", "r2","lognuisance")
params0 <- c(0,log(1),-0.1,-0.1,log(1))
periodizing <- periodizeOneTree_multinogeo(tree,r.object)
eventEndOfPeriods <- endOfPeriods(periodizing, tree)
initialCondition <- function(params) return( list(mean=c(params[1]), var=matrix(c(0))) )
aAGamma <- function(i, params){
rmat<-r.object$S.matrix[[i]]
vectorU <- getLivingLineages(i, eventEndOfPeriods)
vectorA <- function(t) return(rep(0, length(vectorU)))
matrixGamma <- function(t) return(exp(params[2])*1/colSums(rmat)*(exp(params[3]/2*sum(rmat[1,]))*diag(rmat[1,])+exp(params[4]/2*sum(rmat[2,]))*diag(rmat[2,])))
matrixA <- diag(0, length(vectorU))
return(list(a=vectorA, A=matrixA, Gamma=matrixGamma))
}
constraints <- function(params) return(params[3]<=Inf && params[3] >= -Inf && params[4]<=Inf && params[4]>=-Inf)
model <- new(Class="PhenotypicADiag", name="DDexpM", period=periodizing$periods, aAGamma=aAGamma, numbersCopy=eventEndOfPeriods$copy, numbersPaste=eventEndOfPeriods$paste, initialCondition=initialCondition, paramsNames=paramsNames, constraints=constraints, params0=params0, tipLabels=eventEndOfPeriods$labeling, comment=comment)
return(model)
}
getStartingTimes <- function(tree){
nBranch = length(tree$edge.length)
starting_times <- rep(0, times=nBranch)
for(n1 in 1:nBranch){
n2 <- n1 + 1
while(n2 <= nBranch){
if(tree$edge[n2,1]==tree$edge[n1,2]){
starting_times[n2] <- starting_times[n1] + tree$edge.length[n1]
}
n2 <- n2+1
}
}
return(starting_times)
}
isATip <- function(tree, branch_number){
return(!(tree$edge[branch_number,2] %in% tree$edge[,1]))
}
periodizeOneTree_multinogeo <- function(tree,r.object){
hold<-nodeHeights(tree)
startingTimes <- hold[,1]
endTimes <- hold[,2]
all_time_events <- sort(c(startingTimes, endTimes))
nodetimes=max(branching.times(tree))-sort(branching.times(tree),decreasing=TRUE)
extv<-vapply(r.object$S.matrix,function(x)dim(x)[2],1)
outv<-c(1)
for(i in 2:length(extv)){
if(extv[i]!=extv[i-1]){
outv<-c(outv,i)
}}
chg.times=which(!1:length(r.object$times)%in%c(outv,length(r.object$times)))
periods=sort(c(r.object$times[chg.times],unique(startingTimes),max(endTimes)))
return(list(periods=periods, startingTimes=startingTimes, endTimes=endTimes))
}
endOfPeriods <- function(periodizing, tree){
nBranch <- length(periodizing$startingTimes)
nPeriods <- length(periodizing$periods)
numbersCopy <- rep(0, times=nPeriods)
numbersPaste <- rep(0, times=nPeriods)
numbersLineages <- rep(0, times=nPeriods)
labelingLineages <- rep(0, times=nBranch)
initialBranches <- periodizing$startingTimes[periodizing$startingTimes==0]
if(length(initialBranches) == 1){
labelingLineages[1] <- 1
n <- 1
}else{
labelingLineages[periodizing$startingTimes==0] <- c(1,2)
n <- 2
}
numbersLineages[1] <- n
numbersCopy[1] <- 1
numbersPaste[1] <- 2
for(i in 2:nPeriods){
tau_i <- periodizing$periods[i]
newBranches <- which(tau_i == periodizing$startingTimes)
if(length(newBranches) == 2){
n <- n+1
labelingLineages[newBranches[1]] <- labelingLineages[newBranches[1]-1]
labelingLineages[newBranches[2]] <- n
numbersCopy[i] <- labelingLineages[newBranches[1]-1]
numbersPaste[i] <- n
}else{
numbersCopy[i] <- 0
numbersPaste[i] <- 0
}
numbersLineages[i] <- n
}
permutationLabels <- labelingLineages[!(periodizing$endTimes %in% periodizing$startingTimes)]
labeling <- tree$tip.label[order(permutationLabels)]
return(list(copy=numbersCopy, paste=numbersPaste, nLineages=numbersLineages, labeling=labeling))
}
getLivingLineages <- function(i, eventEndOfPeriods){
livingLineages <- rep(1, times=eventEndOfPeriods$nLineages[i])
deads <- eventEndOfPeriods$copy[1:i][eventEndOfPeriods$paste[1:i] == 0]
livingLineages[deads] <- 0
return(livingLineages)
}
|
read_restart.ED2 <- function(outdir,
runid,
stop.time,
settings,
var.names,
params) {
rundir <- settings$rundir
mod_outdir <- settings$modeloutdir
histfile <- get_restartfile.ED2(mod_outdir, runid, stop.time)
if (is.null(histfile)) {
PEcAn.logger::logger.severe("Failed to find ED2 history restart file.")
}
pft_names <- sapply(settings$pfts, '[[', 'name')
histout <- read_S_files(sfile = basename(histfile),
outdir = dirname(histfile),
pft_names = pft_names,
pecan_names = var.names)
forecast <- list()
for (var_name in var.names) {
perpft <- FALSE
if (var_name == "AGB") {
forecast_tmp <- switch(perpft+1, sum(histout$AGB, na.rm = TRUE), histout$AGB)
forecast[[length(forecast)+1]] <- udunits2::ud.convert(forecast_tmp, "kg/m^2", "Mg/ha")
names(forecast)[length(forecast)] <- switch(perpft+1, "AGB", paste0("AGB.", pft_names))
}
if (var_name == "AGB.pft") {
perpft <- TRUE
forecast_tmp <- switch(perpft+1, sum(histout$AGB, na.rm = TRUE), histout$AGB)
names(forecast_tmp) <- switch(perpft + 1, "AGB", paste0(pft_names))
forecast[[length(forecast)+1]] <- udunits2::ud.convert(forecast_tmp, "kg/m^2", "Mg/ha")
names(forecast)[length(forecast)] <- "AGB.pft"
}
if (var_name == "TotLivBiom") {
forecast_tmp <- switch(perpft+1, sum(histout$TotLivBiom, na.rm = TRUE), histout$TotLivBiom)
forecast[[length(forecast)+1]] <- udunits2::ud.convert(forecast_tmp, "kg/m^2", "Mg/ha")
names(forecast)[length(forecast)] <- switch(perpft+1, "TotLivBiom", paste0("TotLivBiom.", pft_names))
}
if (var_name == "AbvGrndWood") {
forecast_tmp <- switch(perpft+1, sum(histout$AbvGrndWood, na.rm = TRUE), histout$AbvGrndWood)
forecast[[length(forecast)+1]] <- udunits2::ud.convert(forecast_tmp, "kg/m^2", "Mg/ha")
names(forecast)[length(forecast)] <- switch(perpft+1, "AbvGrndWood", paste0("AbvGrndWood.", pft_names))
}
if (var_name == "leaf_carbon_content") {
forecast_tmp <- switch(perpft+1, sum(histout$leaf_carbon_content, na.rm = TRUE), histout$leaf_carbon_content)
forecast[[length(forecast)+1]] <- udunits2::ud.convert(forecast_tmp, "kg/m^2", "Mg/ha")
names(forecast)[length(forecast)] <- switch(perpft+1, "leaf_carbon_content", paste0("leaf_carbon_content.", pft_names))
}
if (var_name == "storage_carbon_content") {
forecast[[length(forecast)+1]] <- switch(perpft+1, sum(histout$storage_carbon_content, na.rm = TRUE), histout$storage_carbon_content)
names(forecast)[length(forecast)] <- switch(perpft+1, "storage_carbon_content", paste0("storage_carbon_content.", pft_names))
}
if (var_name == "GWBI") {
forecast_tmp <- switch(perpft+1, sum(histout$GWBI, na.rm = TRUE), histout$GWBI)
forecast[[length(forecast)+1]] <- udunits2::ud.convert(forecast_tmp, "kg/m^2/yr", "Mg/ha/yr")
names(forecast)[length(forecast)] <- switch(perpft+1, "GWBI", paste0("GWBI.", pft_names))
}
if (var_name == "fast_soil_pool_carbon_content") {
forecast[[length(forecast)+1]] <- histout$fast_soil_pool_carbon_content
names(forecast)[length(forecast)] <- "fast_soil_pool_carbon_content"
}
if (var_name == "structural_soil_pool_carbon_content") {
forecast[[length(forecast)+1]] <- histout$structural_soil_pool_carbon_content
names(forecast)[length(forecast)] <- "structural_soil_pool_carbon_content"
}
}
restart <- list()
restart$restart <- histout$restart
restart$histfile <- histfile
params$restart <- restart
PEcAn.logger::logger.info("Finished --", runid)
X_tmp <- list(X = unlist(forecast), params = params)
return(X_tmp)
} |
AddToKnownHosts <- R6Class(
"AddToKnownHosts",
inherit = TicStep,
public = list(
initialize = function(host = "github.com") {
private$host <- host
},
run = function() {
cli_text("{.fun step_add_to_known_hosts}: Running ssh-keyscan for
{private$host}.")
keyscan_result <- system2(
"ssh-keyscan",
c("-H", shQuote(private$host)),
stdout = TRUE
)
cat(keyscan_result, "\n", sep = "")
known_hosts_path <- file.path("~", ".ssh", "known_hosts")
dir.create(
dirname(known_hosts_path),
showWarnings = FALSE, recursive = TRUE
)
cli_text("Adding to {known_hosts_path}.")
write(keyscan_result, known_hosts_path, append = TRUE)
},
check = function() {
(!ci_is_interactive()) && (Sys.which("ssh-keyscan") != "")
}
),
private = list(
host = NULL
)
)
step_add_to_known_hosts <- function(host = "github.com") {
AddToKnownHosts$new(host = host)
}
InstallSSHKeys <- R6Class(
"InstallSSHKeys",
inherit = TicStep,
public = list(
initialize = function(private_key_name = "TIC_DEPLOY_KEY") {
private$private_key_name <- compat_ssh_key(private_key_name = private_key_name)
},
run = function() {
private_key_name <- private$private_key_name
deploy_key_path <- file.path("~", ".ssh", private_key_name)
dir.create(
dirname(deploy_key_path),
recursive = TRUE, showWarnings = FALSE
)
cli_text("{.fun step_install_ssh_keys}:
Writing deploy key to {.file {deploy_key_path}}.")
if (file.exists(deploy_key_path)) {
cli_text("Not overwriting existing SSH key.")
return()
}
writeLines(
rawToChar(openssl::base64_decode(Sys.getenv(private_key_name))),
deploy_key_path
)
Sys.chmod(file.path("~", ".ssh", private_key_name), "600")
git2r::config(
core.sshCommand = sprintf(
paste0(
"ssh ",
"-i ~/.ssh/%s -F /dev/null ",
"-o LogLevel=error"
),
private_key_name
),
global = TRUE
)
},
prepare = function() {
verify_install("openssl")
},
check = function() {
if (!ci_is_interactive()) {
if (!ci_can_push(private$private_key_name)) {
cli_alert_danger("{.fun step_install_ssh_keys}: Deployment was
requested but the build is not able to deploy.
We checked for env var {.var {private$private_key_name}}
but could not find it as an env var in the current build.
Double-check if it exists.
Calling {.fun tic::use_ghactions_deploy} may help resolving issues.",
wrap = TRUE
)
stopc("This build cannot deploy to GitHub.")
}
TRUE
} else {
FALSE
}
}
),
private = list(
private_key_name = NULL
)
)
step_install_ssh_keys <- function(private_key_name = "TIC_DEPLOY_KEY") {
private_key_name <- compat_ssh_key(private_key_name = private_key_name)
InstallSSHKeys$new(private_key_name = private_key_name)
}
TestSSH <- R6Class(
"TestSSH",
inherit = TicStep,
public = list(
initialize = function(url = "[email protected]",
verbose = "",
private_key_name = "TIC_DEPLOY_KEY") {
private_key_name <- compat_ssh_key(private_key_name = private_key_name)
private$url <- url
private$verbose <- verbose
private$private_key_name <- private_key_name
},
run = function() {
cli_text("{.fun step_test_ssh}: Trying to ssh into {private$url}")
cli_text("{.fun step_test_ssh}: Using command: ssh -i ~/.ssh/{private$private_key_name} -o LogLevel=error {private$url} {private$verbose}")
if (Sys.info()[["sysname"]] != "Windows") {
system2("ssh", c(
"-o", "LogLevel=error",
"-i", file.path("~", ".ssh", private$private_key_name),
private$url, private$verbose
))
}
}
),
private = list(
url = NULL,
verbose = NULL,
private_key_name = NULL
)
)
step_test_ssh <- function(url = "[email protected]",
verbose = "",
private_key_name = "TIC_DEPLOY_KEY") {
TestSSH$new(url = url, verbose = verbose, private_key_name = private_key_name)
}
SetupSSH <- R6Class(
"SetupSSH",
inherit = TicStep,
public = list(
initialize = function(private_key_name = "TIC_DEPLOY_KEY",
host = "github.com",
url = paste0("git@", host),
verbose = "") {
private$install_ssh_keys <- step_install_ssh_keys(private_key_name = private_key_name)
private$add_to_known_hosts <- step_add_to_known_hosts(host = host)
private$test_ssh <- step_test_ssh(
url = url, verbose = verbose,
private_key_name = private_key_name
)
},
prepare = function() {
verify_install("git2r")
private$install_ssh_keys$prepare()
private$add_to_known_hosts$prepare()
private$test_ssh$prepare()
},
run = function() {
private$install_ssh_keys$run()
private$add_to_known_hosts$run()
private$test_ssh$run()
},
check = function() {
if (!private$install_ssh_keys$check()) {
cli_alert_info("{.fun SetupSSH$check}: {.fun install_ssh_keys} failed.")
return(FALSE)
}
if (!private$add_to_known_hosts$check()) {
cli_alert_info("{.fun SetupSSH$check}: {.fun add_to_known_hosts} failed.")
return(FALSE)
}
if (!private$test_ssh$check()) {
cli_alert_info("{.fun SetupSSH$check}: {.fun test_ssh} failed.")
return(FALSE)
}
cli_alert_success("{.fun step_setup_ssh} Everything ok.")
return(TRUE)
}
),
private = list(
add_to_known_hosts = NULL,
install_ssh_keys = NULL,
test_ssh = NULL
)
)
step_setup_ssh <- function(private_key_name = "TIC_DEPLOY_KEY",
host = "github.com",
url = paste0("git@", host),
verbose = "") {
SetupSSH$new(
private_key_name = private_key_name, host = host,
url = url, verbose = verbose
)
}
compat_ssh_key <- function(private_key_name) {
if (ci_has_env("id_rsa") && !ci_has_env(private_key_name)) {
private_key_name <- "id_rsa"
}
private_key_name
} |
plotSpectraJS <- function(spectra, which = NULL, browser = NULL, minify = TRUE) {
.chkArgs(mode = 11L)
chkSpectra(spectra)
if (!is.null(which)) {
which <- as.integer(which)
which2 <- setdiff(1:length(spectra$names), which)
spectra <- removeSample(spectra, rem.sam = which2)
}
if (!requireNamespace("jsonlite", quietly = TRUE)) {
stop("You need install package jsonlite to use this function")
}
if (is.unsorted(spectra$freq)) {
spectra$freq <- rev(spectra$freq)
spectra$data <- spectra$data[, ncol(spectra$data):1]
}
if (requireNamespace("jsonlite", quietly = TRUE)) {
Freq <- jsonlite::toJSON(spectra$freq)
D0 <- jsonlite::toJSON(spectra$data)
D1 <- jsonlite::toJSON(spectra$data)
Names <- jsonlite::toJSON(paste(" ", spectra$names, sep = " "))
Groups <- jsonlite::toJSON(spectra$groups)
Colors <- jsonlite::toJSON(spectra$colors)
xUnit <- jsonlite::toJSON(spectra$unit[1])
Desc <- jsonlite::toJSON(spectra$desc)
Dx <- jsonlite::toJSON(range(spectra$freq))
Dy <- jsonlite::toJSON(range(spectra$data))
sampleBOOL <- c(1L, rep(0, length(spectra$names) - 1))
sampleBOOL <- jsonlite::toJSON(sampleBOOL)
data1 <- paste("var Freq = ", Freq, sep = "")
data2 <- paste("var D0 = ", D0, sep = "")
data3 <- paste("var D1 = ", D1, sep = "")
data4 <- paste("var Names = ", Names, sep = "")
data5 <- paste("var Groups = ", Groups, sep = "")
data6 <- paste("var Colors = ", Colors, sep = "")
data7 <- paste("var xUnit = ", xUnit, sep = "")
data8 <- paste("var Desc = ", Desc, sep = "")
data9 <- paste("var Dx = ", Dx, sep = "")
data10 <- paste("var Dy = ", Dy, sep = "")
data11 <- paste("var sampleBOOL = ", sampleBOOL, sep = "")
td <- tempdir()
fd <- system.file("extdata", package = "ChemoSpec")
pSfiles <- c(
"pS.css", "pS_globals.js", "pS_controls.js",
"pS_brushNguides.js", "pS_main.js", "plotSpectraJS.html", "pS_spectra.js"
)
chk2 <- file.copy(
from = file.path(fd, pSfiles), to = file.path(td, pSfiles),
overwrite = TRUE
)
if (!all(chk2)) stop("Copying to temporary directory failed")
js1 <- readLines(con = file.path(td, "pS_globals.js"))
js2 <- readLines(con = file.path(td, "pS_brushNguides.js"))
js3 <- readLines(con = file.path(td, "pS_controls.js"))
js4 <- readLines(con = file.path(td, "pS_spectra.js"))
js5 <- readLines(con = file.path(td, "pS_main.js"))
text <- c(
data1, data2, data3, data4,
data5, data6, data7, data8, data9, data10, data11,
js1, js2, js3, js4, js5
)
if (minify) {
if (requireNamespace("js", quietly = TRUE)) {
text <- js::uglify_optimize(text, unused = FALSE)
}
if (!requireNamespace("js", quietly = TRUE)) {
stop("You need install package js to minify the JavaScript code")
}
}
writeLines(text, sep = "\n", con = file.path(td, "pS.js"))
pg <- file.path(td, "plotSpectraJS.html")
if (!is.null(browser)) {
browseURL(pg, browser = browser)
} else {
viewer <- getOption("viewer")
if (is.null(browser) && !is.null(viewer)) {
viewer(pg)
} else {
browseURL(pg)
}
}
message("The plotSpectraJS web page is in the following\ntemp directory which is deleted when you quit R: ")
message(td)
return(invisible())
}
} |
summary.minimum.entropy <- function(object, ..., average=FALSE) {
if (average)
return(tapply( object$Prop, object$MinEnt, sum))
else
return( as.data.frame(object))
} |
ce.simNormalZINB <-
function(N, data, h, L0, L, M, Melite, eps, a, b, r){
if (N == 0){
loglik.full <- loglikzinb(1, (L+1), data, r, h)[[1]]
BIC.full <- BICzinb(loglik.full, 0, L)
return(list(loci = c(1, (L + 1)), BIC = BIC.full))
rm(BIC.full, loglik.full)
} else {
new.para <- rbind(rep(L0 + (L - L0)/2, N), rep(sqrt(L - L0)^2/12), N)
bic <- c()
k <- 0
repeat
{
k <- k + 1
ch <- array(0, dim = c(M, N + 2))
ch[, 1] <- c(1)
ch[, ( N + 2)] <- c(L + 1)
ch[, (2:(N + 1))] <- apply(new.para, 2, normrand, L0, L, M)
ch <- t(apply(ch, 1, sort))
loglike <- apply(ch, 1, llhoodzinb, data, r, h)
bic.vals <- apply(as.data.frame(loglike), 1, BICzinb, N, L)
ch <- cbind(ch, bic.vals)
ch <- ch[order(ch[, (N + 3)], decreasing = FALSE), ]
melitesmpl <- ch[1:Melite, ]
bic[k] <- melitesmpl[1, (N + 3)]
newpar.n <- array(0, dim = c(2, N))
newpar.n[1, ] <-apply(as.matrix(melitesmpl[, (2:(N + 1))]), 2, mean)
newpar.n[2, ] <-apply(as.matrix(melitesmpl[, (2:(N + 1))]), 2, sd)
new.para[1, ] <- a * newpar.n[1, ] + (1 - a) * new.para[1, ]
new.para[2, ] <- b * newpar.n[2, ] + (1 - b) * new.para[2, ]
mad <- apply(as.matrix(melitesmpl[, (2:(N + 1))]), 2, mad)
if(max(mad) <= eps){break}
}
return(list(loci = ch[1, (1:(N + 2))], BIC = bic[k]))
}
} |
reset_options_scipen <- getOption("scipen")
reset_options_digits <- getOption("digits")
options(scipen = 1, digits = 5)
library(dnapath)
set.seed(12345)
data(meso)
str(meso)
results <- dnapath(meso$gene_expression,
pathway_list = NULL,
groups = meso$groups)
results
plot(results, alpha = 0.05, only_dc = TRUE)
data(meso)
data(p53_pathways)
results <- dnapath(x = meso$gene_expression,
pathway_list = p53_pathways,
groups = meso$groups,
seed = 0)
results
results <- filter_pathways(results, alpha_pathway = 0.1)
results
results <- sort(results, decreasing = TRUE, by = "n_dc")
results
set.seed(0)
plot(results[[1]], alpha = 0.05, only_dc = TRUE)
results <- rename_genes(results, to = "symbol", species = "human",
dir_save = tempdir())
results[[1]]
set.seed(0)
plot(results[[1]], alpha = 0.05, only_dc = TRUE)
summarize_edges(results[[1]], alpha = 0.05)
library(dplyr)
tab <- summarize_edges(results[[1]])
tab <- dplyr::arrange(tab, p_value, decreasing = FALSE)
tab <- dplyr::filter(tab, pmax(abs(nw1), abs(nw2)) > 0.2)
tab
plot_pair(results, "BANP", "TP53")
plot_pair(results, "BANP", "TP53", method = "lm")
set.seed(0)
plot(results[[1]], alpha = 0.05, only_dc = TRUE, require_dc_genes = TRUE)
summarize_edges(results[[1]], alpha = 0.05, require_dc_genes = TRUE)
set.seed(0)
plot(results[[1]], alpha = 0.05)
options(scipen = reset_options_scipen, digits = reset_options_digits) |
LIK.density <- function(pp,hlim=NULL,hseq=NULL,resolution=64,edge=TRUE,auto.optim=TRUE,
type=c("fixed","adaptive"),seqres=30,parallelise=NULL,
zero.action=0,verbose=TRUE,...){
if(class(pp)!="ppp") stop("data object 'pp' must be of class \"ppp\"")
W <- Window(pp)
if(is.null(hlim)){
ppu <- pp
marks(ppu) <- NULL
md <- min(nndist(unique(ppu)))
hlim <- c(md,max(md*50,min(diff(W$xrange),diff(W$yrange))/6))
} else {
hlim <- checkran(hlim,"'hlim'")
}
if(!zero.action%in%((-1):2)) stop("invalid 'zero.action'")
typ <- type[1]
if(typ=="fixed"){
if(auto.optim){
if(verbose) cat("Searching for optimal h in ",prange(hlim),"...",sep="")
result <- suppressWarnings(optimise(LIK.density.spatial.single,interval=hlim,pp=pp,res=resolution,edge=edge,za=zero.action,maximum=TRUE)$maximum)
if(verbose) cat("Done.\n")
} else {
if(is.null(hseq)) hseq <- seq(hlim[1],hlim[2],length=seqres)
hn <- length(hseq)
if(is.null(parallelise)){
lik.vec <- rep(NA,hn)
if(verbose) pb <- txtProgressBar(1,hn)
for(i in 1:hn){
lik.vec[i] <- LIK.density.spatial.single(hseq[i],pp,resolution,edge,za=zero.action)
if(verbose) setTxtProgressBar(pb,i)
}
if(verbose) close(pb)
} else {
ncores <- detectCores()
if(verbose) cat(paste("Evaluating criterion on",parallelise,"/",ncores,"cores..."))
if(parallelise>ncores) stop("cores requested exceeds available count")
registerDoParallel(cores=parallelise)
lik.vec <- foreach(i=1:hn,.packages="spatstat",.combine=c) %dopar% {
return(LIK.density.spatial.single(hseq[i],pp,resolution,edge,zero.action))
}
if(verbose) cat("Done.\n")
}
result <- cbind(hseq,lik.vec)
dimnames(result)[[2]] <- c("h","CV")
}
} else if(typ=="adaptive"){
ellip <- list(...)
if(is.null(ellip$hp)){
if(verbose) cat("Selecting pilot bandwidth...")
hp <- LSCV.density(pp,verbose=FALSE,zero.action=zero.action)
if(verbose) cat(paste("Done.\n [ Found hp =",hp,"]\n"))
} else {
hp <- ellip$hp
}
if(is.null(ellip$pilot.density)){
pilot.density <- pp
} else {
pilot.density <- ellip$pilot.density
}
if(is.null(ellip$gamma.scale)){
gamma.scale <- "geometric"
} else {
gamma.scale <- ellip$gamma.scale
}
if(is.null(ellip$trim)){
trim <- 5
} else {
trim <- ellip$trim
}
if(is.null(ellip$dimz)){
dimz <- resolution
} else {
dimz <- ellip$dimz
}
if(verbose) cat("Computing multi-scale estimate...")
hhash <- mean(hlim)
msobject <- multiscale.density(pp,h0=hhash,hp=hp,h0fac=hlim/hhash,edge=ifelse(edge,"uniform","none"),resolution=resolution,dimz=dimz,gamma.scale=gamma.scale,trim=trim,intensity=FALSE,pilot.density=pilot.density,verbose=FALSE)
if(verbose) cat("Done.\n")
h0range <- range(as.numeric(names(msobject$z)))
if(auto.optim){
if(verbose) cat("Searching for optimal h0 in ",prange(h0range),"...",sep="")
h0opt <- suppressWarnings(optimise(ms.loo.lik,interval=h0range,object=msobject,za=zero.action,maximum=TRUE)$maximum)
if(verbose) cat("Done.\n")
return(h0opt)
} else {
if(is.null(hseq)) hseq <- seq(h0range[1],h0range[2],length=seqres)
hn <- length(hseq)
lik.vec <- rep(NA,hn)
if(verbose) pb <- txtProgressBar(1,hn)
for(i in 1:hn){
lik.vec[i] <- ms.loo.lik(hseq[i],msobject,zero.action)
if(verbose) setTxtProgressBar(pb,i)
}
if(verbose) close(pb)
result <- cbind(hseq,lik.vec)
dimnames(result)[[2]] <- c("h0","CV")
}
} else stop("invalid 'type'")
return(result)
} |
tbl_lazy <- function(df, con = NULL, src = NULL) {
if (!is.null(src)) {
warn("`src` is deprecated; please use `con` instead")
con <- src
}
con <- con %||% sql_current_con() %||% simulate_dbi()
subclass <- class(con)[[1]]
dplyr::make_tbl(
purrr::compact(c(subclass, "lazy")),
ops = op_base_local(df),
src = src_dbi(con)
)
}
setOldClass(c("tbl_lazy", "tbl"))
lazy_frame <- function(..., con = NULL, src = NULL) {
con <- con %||% sql_current_con() %||% simulate_dbi()
tbl_lazy(tibble(...), con = con, src = src)
}
dimnames.tbl_lazy <- function(x) {
list(NULL, op_vars(x$ops))
}
dim.tbl_lazy <- function(x) {
c(NA, length(op_vars(x$ops)))
}
print.tbl_lazy <- function(x, ...) {
show_query(x)
}
as.data.frame.tbl_lazy <- function(x, row.names, optional, ...) {
abort("Can not coerce `tbl_lazy` to data.frame")
}
same_src.tbl_lazy <- function(x, y) {
inherits(y, "tbl_lazy")
}
tbl_vars.tbl_lazy <- function(x) {
op_vars(x$ops)
}
groups.tbl_lazy <- function(x) {
lapply(group_vars(x), as.name)
}
group_by_drop_default.tbl_lazy <- function(x) {
TRUE
}
group_vars.tbl_lazy <- function(x) {
op_grps(x$ops)
} |
snqProfitHessianDeriv <- function( prices, weights, nFix = 0, form = 0 ) {
prices <- array( prices )
weights <- array( weights )
nNetput <- dim(array(prices))
nCoef <- nNetput + nNetput * ( nNetput - 1 )/2 + nNetput * nFix
if( form == 0 ) {
nCoef <- nCoef + ( nFix + 1 ) * nFix / 2
} else if ( form == 1 ) {
nCoef <- nCoef + nNetput * ( nFix + 1 ) * nFix / 2
} else {
stop( "argument 'form' must be either 0 or 1" )
}
normPrice <- sum( t( prices ) %*% weights )
Hderiv <- array( 0, c( nNetput * ( nNetput - 1 )/2, nCoef ) )
kro <- diag( 1, nNetput, nNetput )
for( i in 1:( nNetput - 1 ) ) {
for( j in i:( nNetput - 1 ) ) {
for( k in 1:( nNetput - 1 ) ) {
for( l in k:( nNetput - 1 ) ) {
Hderiv[ veclipos( i, j, nNetput-1 ),
nNetput + veclipos( k, l, nNetput - 1 ) ] <-
( kro[ i, k ] * kro[ j, l ] +
kro[ i, l ] * kro[ j, k ] * ( 1 - kro[ i, j ] ) ) /
normPrice -
kro[ i, l ] * weights[ j ] *
( prices[ k ] - prices[ nNetput ] ) / normPrice^2 -
( 1 - kro[ k, l ] ) * kro[ i, k ] * weights[ j ] *
( prices[ l ] - prices[ nNetput ] ) / normPrice^2 -
kro[ j, k ] * weights[ i ] *
( prices[ l ] - prices[ nNetput ] ) / normPrice^2 -
( 1 - kro[ k, l ] ) * kro[ j, l ] * weights[ i ] *
( prices[ k ] - prices[ nNetput ] ) / normPrice^2 +
( 2 - kro[ k, l ] ) * weights[ i ] * weights[ j ] *
( prices[ k ] - prices[ nNetput ] ) *
( prices[ l ] - prices[ nNetput ] ) / normPrice^3
}
}
}
}
return( Hderiv )
} |
forest.netmeta <- function(x,
pooled = ifelse(x$random, "random", "fixed"),
reference.group = x$reference.group,
baseline.reference = x$baseline.reference,
labels = x$trts,
equal.size = TRUE,
leftcols = "studlab",
leftlabs,
rightcols = c("effect", "ci"),
rightlabs,
digits = gs("digits.forest"),
small.values = x$small.values,
nsim = 1000,
digits.prop = 2,
smlab = NULL,
sortvar = x$seq,
backtransf = x$backtransf,
lab.NA = ".",
add.data,
drop.reference.group = FALSE,
col.by = "black",
print.subgroup.name = FALSE,
...) {
chkclass(x, "netmeta")
x <- updateversion(x)
is.bin <- inherits(x, "netmetabin")
pooled <- setchar(pooled, c("fixed", "random"))
chklogical(equal.size)
chknumeric(digits, min = 0, length = 1)
if (is.null(small.values))
small.values <- "good"
else
small.values <- setchar(small.values, c("good", "bad"))
chknumeric(nsim, min = 1, length = 1)
chknumeric(digits.prop, min = 0, length = 1)
chklogical(baseline.reference)
mf <- match.call()
trts <- x$trts
if (!missing(labels)) {
labels <- eval(mf[[match("labels", names(mf))]],
x, enclos = sys.frame(sys.parent()))
if (is.null(labels))
stop("Argument 'labels' must be not NULL.")
if (length(labels) != length(trts))
stop("Length of argument 'labels' must be equal to number of treatments.")
names(labels) <- trts
}
chklogical(drop.reference.group)
chklogical(print.subgroup.name)
chklogical(backtransf)
chkchar(lab.NA)
stdlabs <- c("event.e", "n.e", "event.c", "n.c",
"mean.e", "sd.e", "mean.c", "sd.c",
"n", "time", "event",
"TE", "seTE",
"time.e", "time.c",
"effect", "ci", "effect.ci",
"w.fixed", "w.random")
if (missing(leftlabs)) {
leftlabs <- leftcols
leftlabs[leftcols %in% stdlabs] <- NA
if (length(reference.group) > 1)
leftlabs[matchVar(leftcols, "studlab")] <- "Comparison"
else
leftlabs[matchVar(leftcols, "studlab")] <- "Treatment"
}
else if (length(leftcols) != length(leftlabs)) {
if (length(reference.group) > 1)
leftlabs[matchVar(leftcols, "studlab")] <- "Comparison"
else
leftlabs[matchVar(leftcols, "studlab")] <- "Treatment"
}
if (missing(rightlabs)) {
rightlabs <- rightcols
rightlabs[rightcols %in% stdlabs] <- NA
}
for (i in names(list(...))) {
if (!is.null(setchar(i, "weight.study", stop.at.error = FALSE)))
stop("Argument 'weight.study' set internally.", call. = TRUE)
if (!is.null(setchar(i, "prediction", stop.at.error = FALSE)))
stop("For prediction intervals see example in help file of ",
"forest.netsplit().", call. = TRUE)
}
one.rg <- length(reference.group) == 1
sortvar.c <- deparse(substitute(sortvar))
sortvar.c <- gsub("\"", "", sortvar.c)
calcPscore <-
anyCol(rightcols, "Pscore") || anyCol(leftcols, "Pscore") ||
any(matchVar(sortvar.c, "Pscore")) || any(matchVar(sortvar.c, "-Pscore"))
calcSUCRA <-
anyCol(rightcols, "SUCRA") || anyCol(leftcols, "SUCRA") ||
any(matchVar(sortvar.c, "SUCRA")) || any(matchVar(sortvar.c, "-SUCRA"))
if (one.rg && reference.group == "") {
warning("First treatment used as reference as argument ",
"'reference.group' is unspecified.",
call. = FALSE)
reference.group <- trts[1]
}
reference.group <- setref(reference.group, trts, length = 0)
if (pooled == "fixed") {
TE <- x$TE.fixed
seTE <- x$seTE.fixed
prop.direct <- x$P.fixed
if (calcPscore)
Pscore <- netrank(x, small.values = small.values,
method = "P-score")$ranking.fixed
if (calcSUCRA) {
x$fixed <- TRUE
x$random <- FALSE
SUCRA <- netrank(x, small.values = small.values,
method = "SUCRA", nsim = nsim)$ranking.fixed
}
text.pooled <- "Fixed Effect Model"
if (x$method == "MH")
text.pooled <- "Mantel-Haenszel Method"
else if (x$method == "NCH")
text.pooled <- "Non-Central Hypergeometric"
}
if (pooled == "random") {
TE <- x$TE.random
seTE <- x$seTE.random
prop.direct <- x$P.random
if (calcPscore)
Pscore <- netrank(x, small.values = small.values,
method = "P-score")$ranking.random
if (calcSUCRA) {
x$fixed <- FALSE
x$random <- TRUE
SUCRA <- netrank(x, small.values = small.values,
method = "SUCRA", nsim = nsim)$ranking.random
}
text.pooled <- "Random Effects Model"
}
if (is.null(smlab)) {
if (one.rg) {
if (baseline.reference)
smlab <- paste0("Comparison: other vs '",
reference.group, "'\n(",
text.pooled,
")")
else
smlab <- paste0("Comparison: '",
reference.group,
"' vs other \n(",
text.pooled,
")")
}
else
smlab <- text.pooled
}
rightcols <- setCol(rightcols, "Pscore")
rightlabs <- setLab(rightlabs, rightcols, "Pscore", "P-score")
rightcols <- setCol(rightcols, "SUCRA")
rightlabs <- setLab(rightlabs, rightcols, "SUCRA", "SUCRA")
rightcols <- setCol(rightcols, "k")
rightlabs <- setLab(rightlabs, rightcols, "k", "Direct\nComparisons")
rightcols <- setCol(rightcols, "prop.direct")
rightlabs <- setLab(rightlabs, rightcols, "prop.direct",
"Direct Evidence\nProportion")
leftcols <- setCol(leftcols, "Pscore")
leftlabs <- setLab(leftlabs, leftcols, "Pscore", "P-score")
leftcols <- setCol(leftcols, "SUCRA")
leftlabs <- setLab(leftlabs, leftcols, "SUCRA", "SUCRA")
leftcols <- setCol(leftcols, "k")
leftlabs <- setLab(leftlabs, leftcols, "k", "Direct\nComparisons")
leftcols <- setCol(leftcols, "prop.direct")
leftlabs <- setLab(leftlabs, leftcols, "prop.direct",
"Direct Evidence\nProportion")
dat <- data.frame(comparison = character(0),
treat = character(0),
TE = numeric(0), seTE = numeric(0),
Pscore = numeric(0),
SUCRA = numeric(0),
k = numeric(0),
prop.direct = numeric(0),
stringsAsFactors = FALSE)
for (i in seq_along(reference.group)) {
rg.i <- reference.group[i]
if (baseline.reference)
dat.i <- data.frame(comparison = rg.i,
treat = colnames(TE),
labels = labels,
TE = TE[, colnames(TE) == rg.i],
seTE = seTE[, colnames(seTE) == rg.i],
Pscore = if (calcPscore) Pscore else NA,
SUCRA = if (calcSUCRA) SUCRA else NA,
k = x$A.matrix[, colnames(TE) == rg.i],
prop.direct =
if (is.bin) prop.direct
else prop.direct[, colnames(TE) == rg.i],
stringsAsFactors = FALSE)
else
dat.i <- data.frame(comparison = rg.i,
treat = rownames(TE),
labels = labels,
TE = TE[rownames(TE) == rg.i, ],
seTE = seTE[rownames(seTE) == rg.i, ],
Pscore = if (calcPscore) Pscore else NA,
SUCRA = if (calcSUCRA) SUCRA else NA,
k = x$A.matrix[rownames(TE) == rg.i, ],
prop.direct =
if (is.bin) prop.direct
else prop.direct[rownames(TE) == rg.i, ],
stringsAsFactors = FALSE)
if (!missing(add.data)) {
if (!is.data.frame(add.data))
stop("Argument 'add.data' must be a data frame.",
call. = FALSE)
if (nrow(add.data) != length(trts))
stop("Dataset 'add.data' must have ", nrow(dat.i),
" rows (corresponding to number of treatments)",
call. = FALSE)
if (any(rownames(add.data) != trts))
stop("Dataset 'add.data' must have the following row names:\n",
paste(paste("'", trts, "'", sep = ""), collapse = " - "),
call. = FALSE)
dat.i <- cbind(dat.i, add.data)
}
if (any(matchVar(sortvar.c, "Pscore")))
sortvar <- Pscore
else if (any(matchVar(sortvar.c, "-Pscore")))
sortvar <- -Pscore
else if (any(matchVar(sortvar.c, "SUCRA")))
sortvar <- SUCRA
else if (any(matchVar(sortvar.c, "-SUCRA")))
sortvar <- -SUCRA
else if (any(matchVar(sortvar.c, "TE")))
sortvar <- dat.i$TE
else if (any(matchVar(sortvar.c, "-TE")))
sortvar <- -dat.i$TE
else if (any(matchVar(sortvar.c, "seTE")))
sortvar <- dat.i$seTE
else if (any(matchVar(sortvar.c, "-seTE")))
sortvar <- -dat.i$seTE
else if (any(matchVar(sortvar.c, "k")))
sortvar <- dat.i$k
else if (any(matchVar(sortvar.c, "-k")))
sortvar <- -dat.i$k
else if (any(matchVar(sortvar.c, "prop.direct")))
sortvar <- dat.i$prop.direct
else if (any(matchVar(sortvar.c, "-prop.direct")))
sortvar <- -dat.i$prop.direct
if (!is.null(sortvar)) {
if (is.character(sortvar))
sort <- setseq(sortvar, trts)
else
sort <- order(sortvar)
dat.i <- dat.i[sort, ]
}
if (drop.reference.group)
dat.i <- subset(dat.i, treat != rg.i)
if (baseline.reference)
dat.i$comparison <- paste0("Other vs '", dat.i$comparison, "'")
else
dat.i$comparison <- paste0("'", dat.i$comparison, "' vs other")
dat <- rbind(dat, dat.i)
}
dat.out <- dat
if ("Pscore" %in% names(dat))
dat$Pscore <- formatN(dat$Pscore, digits = digits.prop,
text.NA = lab.NA)
if ("SUCRA" %in% names(dat))
dat$SUCRA <- formatN(dat$SUCRA, digits = digits.prop,
text.NA = lab.NA)
if ("prop.direct" %in% names(dat))
dat$prop.direct <- formatN(dat$prop.direct,
digits = digits.prop, text.NA = lab.NA)
rm(TE)
rm(seTE)
treat <- dat$treat
if (one.rg)
m1 <- suppressWarnings(metagen(TE, seTE, data = dat,
sm = x$sm,
studlab = labels, backtransf = backtransf,
method.tau = "DL", method.tau.ci = "",
warn = FALSE))
else
m1 <- suppressWarnings(metagen(TE, seTE, data = dat,
byvar = dat$comparison,
sm = x$sm,
studlab = labels, backtransf = backtransf,
method.tau = "DL", method.tau.ci = "",
warn = FALSE))
forest(m1,
digits = digits,
overall = FALSE, fixed = FALSE, random = FALSE,
hetstat = FALSE, test.subgroup = FALSE,
leftcols = leftcols,
leftlabs = leftlabs,
rightcols = rightcols,
rightlabs = rightlabs,
smlab = smlab,
lab.NA = lab.NA,
col.by = col.by,
print.subgroup.name = print.subgroup.name,
weight.study = if (equal.size) "same" else pooled,
...)
rownames(dat.out) <- seq_len(nrow(dat.out))
attr(dat.out, "pooled") <- pooled
attr(dat.out, "small.values") <- small.values
attr(dat.out, "small.values") <- small.values
invisible(dat.out)
}
plot.netmeta <- function(x, ...)
forest(x, ...) |
fitted.tuneclus <- function(object, mth = c("centers", "classes"), ...)
{
mth <- match.arg(mth)
object$clusobjbest$centroid = data.frame(object$clusobjbest$centroid)
if (mth == "centers")
object$clusobjbest$centroid[object$clusobjbest$cluster, , drop = FALSE]
else object$clusobjbest$cluster
} |
dag_class <- R6Class(
"dag_class",
public = list(
mode = "all_forward",
node_list = list(),
target_nodes = list(),
variables_without_free_state = list(),
tf_environment = NA,
tf_graph = NA,
tf_float = NA,
n_cores = 0L,
compile = NA,
trace_names = NULL,
initialize = function(target_greta_arrays,
tf_float = "float32",
compile = FALSE) {
self$build_dag(target_greta_arrays)
self$target_nodes <- lapply(target_greta_arrays, get_node)
self$new_tf_environment()
self$tf_float <- tf_float
self$compile <- compile
},
new_tf_environment = function() {
self$tf_environment <- new.env()
self$tf_graph <- tf$Graph()
self$tf_environment$all_forward_data_list <- list()
self$tf_environment$all_sampling_data_list <- list()
self$tf_environment$hybrid_data_list <- list()
},
on_graph = function(expr) {
old_float_type <- options()$greta_tf_float
old_batch_size <- options()$greta_batch_size
on.exit(options(
greta_tf_float = old_float_type,
greta_batch_size = old_batch_size
))
options(
greta_tf_float = self$tf_float,
greta_batch_size = self$tf_environment$batch_size
)
with(self$tf_graph$as_default(), expr)
},
tf_run = function(expr, as_text = FALSE) {
tfe <- self$tf_environment
if (as_text) {
tfe$expr <- parse(text = expr)
} else {
tfe$expr <- substitute(expr)
}
on.exit(rm("expr", envir = tfe))
self$on_graph(with(tfe, eval(expr)))
},
tf_sess_run = function(expr, as_text = FALSE) {
if (!as_text) {
expr <- deparse(substitute(expr))
}
expr <- glue::glue("sess$run({expr}, feed_dict = feed_dict)")
self$tf_run(expr, as_text = TRUE)
},
build_dag = function(greta_array_list) {
target_node_list <- lapply(greta_array_list, get_node)
for (node in target_node_list) {
node$register_family(self)
}
},
get_tf_names = function(types = NULL) {
names <- self$node_tf_names
if (!is.null(types)) {
names <- names[which(self$node_types %in% types)]
}
if (length(names) > 0) {
names <- paste(self$mode, names, sep = "_")
}
},
tf_name = function(node) {
name <- self$node_tf_names[node$unique_name]
if (length(name) == 0) {
name <- ""
}
if (!is.na(name)) {
name <- paste(self$mode, name, sep = "_")
}
name
},
how_to_define_hybrid = function(node) {
node_type <- node_type(node)
stateless_names <- names(self$variables_without_free_state)
if (node_type == "data") {
node_mode <- ifelse(has_distribution(node), "sampling", "forward")
}
if (node_type == "variable") {
to_sample <- node$unique_name %in% stateless_names
node_mode <- ifelse(to_sample, "sampling", "forward")
}
if (node_type == "operation") {
parent_name <- node$parents[[1]]$unique_name
parent_stateless <- parent_name %in% stateless_names
to_sample <- has_distribution(node) & parent_stateless
node_mode <- ifelse(to_sample, "sampling", "forward")
}
if (node_type == "distribution") {
target <- node$target
target_type <- node_type(target)
if (is.null(target)) {
node_mode <- "sampling"
}
if (target_type == "data") {
node_mode <- "sampling"
}
if (target_type == "variable") {
to_sample <- target$unique_name %in% stateless_names
node_mode <- ifelse(to_sample, "sampling", "forward")
}
if (target_type == "operation") {
target_parent_name <- target$parents[[1]]$unique_name
target_parent_stateless <- target_parent_name %in% stateless_names
node_mode <- ifelse(target_parent_stateless, "sampling", "forward")
}
}
node_mode
},
how_to_define_all_sampling = function(node) {
switch(node_type(node),
data = ifelse(has_distribution(node), "sampling", "forward"),
operation = ifelse(has_distribution(node), "sampling", "forward"),
"sampling"
)
},
how_to_define = function(node) {
switch(self$mode,
all_forward = "forward",
all_sampling = self$how_to_define_all_sampling(node),
hybrid = self$how_to_define_hybrid(node)
)
},
define_batch_size = function() {
self$tf_run(
batch_size <- tf$compat$v1$placeholder(dtype = tf$int32)
)
},
define_free_state = function(type = c("variable", "placeholder"),
name = "free_state") {
type <- match.arg(type)
tfe <- self$tf_environment
vals <- self$example_parameters(free = TRUE)
vals <- unlist_tf(vals)
if (type == "variable") {
vals <- as.logical(vals)
vals <- t(as.matrix(vals))
self$on_graph(free_state <- tf$Variable(
initial_value = vals,
dtype = tf_float()
))
} else {
shape <- shape(NULL, length(vals))
self$on_graph(free_state <- tf$compat$v1$placeholder(
dtype = tf_float(),
shape = shape
))
}
assign(name,
free_state,
envir = tfe
)
},
split_free_state = function() {
tfe <- self$tf_environment
free_state <- get("free_state", envir = tfe)
params <- self$example_parameters(free = TRUE)
lengths <- vapply(params,
function(x) length(x),
FUN.VALUE = 1L
)
if (length(lengths) > 1) {
args <- self$on_graph(tf$split(free_state, lengths, axis = 1L))
} else {
args <- list(free_state)
}
names <- glue::glue("{names(params)}_free")
for (i in seq_along(names)) {
assign(names[i], args[[i]], envir = tfe)
}
},
define_tf_body = function(target_nodes = self$node_list) {
if (self$mode %in% c("all_forward", "hybrid")) {
self$split_free_state()
}
self$on_graph(
lapply(target_nodes, function(x) x$define_tf(self))
)
invisible(NULL)
},
define_tf_session = function() {
tfe <- self$tf_environment
tfe$n_cores <- self$n_cores
self$tf_run(
config <- tf$compat$v1$ConfigProto(
inter_op_parallelism_threads = n_cores,
intra_op_parallelism_threads = n_cores
)
)
if (self$compile) {
self$tf_run(
py_set_attr(
config$graph_options$optimizer_options,
"global_jit_level",
tf$compat$v1$OptimizerOptions$ON_1
)
)
}
self$tf_run(sess <- tf$compat$v1$Session(config = config))
self$tf_run(sess$run(tf$compat$v1$global_variables_initializer()))
},
define_tf = function(target_nodes = self$node_list) {
if (self$mode %in% c("all_forward", "hybrid")) {
self$define_free_state("placeholder")
}
self$define_batch_size()
self$define_tf_body(target_nodes = target_nodes)
self$define_tf_session()
},
define_joint_density = function() {
tfe <- self$tf_environment
distribution_nodes <- self$node_list[self$node_types == "distribution"]
target_nodes <- lapply(distribution_nodes, member, "get_tf_target_node()")
has_target <- !vapply(target_nodes, is.null, FUN.VALUE = TRUE)
distribution_nodes <- distribution_nodes[has_target]
target_nodes <- target_nodes[has_target]
densities <- mapply(self$evaluate_density,
distribution_nodes,
target_nodes,
SIMPLIFY = FALSE
)
self$on_graph(summed_densities <- lapply(densities, tf_sum, drop = TRUE))
names(summed_densities) <- NULL
self$on_graph(joint_density <- tf$add_n(summed_densities))
assign("joint_density",
joint_density,
envir = self$tf_environment
)
adj_names <- glue::glue("{self$get_tf_names(types = 'variable')}_adj")
adj <- lapply(adj_names, get, envir = self$tf_environment)
names(adj) <- NULL
adj <- match_batches(adj)
self$on_graph(total_adj <- tf$add_n(adj))
assign("joint_density_adj",
joint_density + total_adj,
envir = self$tf_environment
)
},
evaluate_density = function(distribution_node, target_node) {
tfe <- self$tf_environment
parameter_nodes <- distribution_node$parameters
distrib_constructor <- self$get_tf_object(distribution_node)
tf_target <- self$get_tf_object(target_node)
tf_parameter_list <- lapply(parameter_nodes, self$get_tf_object)
tfp_distribution <- distrib_constructor(tf_parameter_list, dag = self)
self$tf_evaluate_density(tfp_distribution,
tf_target,
truncation = distribution_node$truncation,
bounds = distribution_node$bounds
)
},
tf_evaluate_density = function(tfp_distribution,
tf_target,
truncation = NULL,
bounds = NULL) {
ld <- tfp_distribution$log_prob(tf_target)
if (!is.null(truncation)) {
lower <- truncation[[1]]
upper <- truncation[[2]]
if (all(lower == bounds[1])) {
offset <- tfp_distribution$log_cdf(fl(upper))
} else if (all(upper == bounds[2])) {
offset <- tf$math$log(fl(1) - tfp_distribution$cdf(fl(lower)))
} else {
offset <- tf$math$log(tfp_distribution$cdf(fl(upper)) -
tfp_distribution$cdf(fl(lower)))
}
ld <- ld - offset
}
ld
},
get_tf_object = function(node) {
get(self$tf_name(node), envir = self$tf_environment)
},
generate_log_prob_function = function(which = c(
"adjusted",
"unadjusted",
"both"
)) {
which <- match.arg(which)
function(free_state) {
tfe_old <- self$tf_environment
on.exit(self$tf_environment <- tfe_old)
tfe <- self$tf_environment <- new.env()
data_names <- self$get_tf_names(types = "data")
for (name in data_names) {
tfe[[name]] <- tfe_old[[name]]
}
tfe$batch_size <- tfe_old$batch_size
tfe$free_state <- free_state
self$define_tf_body()
self$define_joint_density()
objectives <- list(
adjusted = tfe$joint_density_adj,
unadjusted = tfe$joint_density
)
result <- switch(which,
adjusted = objectives$adjusted,
unadjusted = objectives$unadjusted,
both = objectives
)
result
}
},
example_parameters = function(free = TRUE) {
nodes <- self$node_list[self$node_types == "variable"]
names(nodes) <- self$get_tf_names(types = "variable")
if (free) {
parameters <- lapply(nodes, member, "value(free = TRUE)")
} else {
parameters <- lapply(nodes, member, "value()")
}
stateless_names <- vapply(self$variables_without_free_state,
self$tf_name,
FUN.VALUE = character(1)
)
keep <- !names(parameters) %in% stateless_names
parameters <- parameters[keep]
parameters
},
get_tf_data_list = function() {
data_list_name <- glue::glue("{self$mode}_data_list")
self$tf_environment[[data_list_name]]
},
set_tf_data_list = function(element_name, value) {
data_list_name <- glue::glue("{self$mode}_data_list")
self$tf_environment[[data_list_name]][[element_name]] <- value
},
build_feed_dict = function(dict_list = list(),
data_list = self$get_tf_data_list()) {
tfe <- self$tf_environment
tfe$dict_list <- c(dict_list, data_list)
on.exit(rm("dict_list", envir = tfe))
self$tf_run(feed_dict <- do.call(dict, dict_list))
},
send_parameters = function(parameters) {
if (is.null(dim(parameters))) {
parameters <- array(parameters, dim = c(1, length(parameters)))
}
parameter_list <- list(free_state = parameters)
self$set_tf_data_list("batch_size", nrow(parameters))
self$build_feed_dict(parameter_list)
},
log_density = function() {
res <- cleanly(self$tf_sess_run(joint_density_adj))
if (inherits(res, "error")) {
res <- NA
}
res
},
hessians = function() {
tfe <- self$tf_environment
nodes <- self$target_nodes
ga_names <- names(nodes)
ga_dims <- lapply(nodes, member, "dim")
if (!exists("hessian_list", envir = tfe)) {
tf_names <- vapply(nodes, self$tf_name, FUN.VALUE = "")
y <- tfe$joint_density
xs <- lapply(tf_names, get, tfe)
names(xs) <- NULL
tfe$hessian_list <- self$on_graph(tf$hessians(y, xs))
}
hessian_list <- self$tf_sess_run(hessian_list)
dims <- lapply(ga_dims, hessian_dims)
hessian_list <- mapply(array, hessian_list, dims, SIMPLIFY = FALSE)
names(hessian_list) <- ga_names
hessian_list
},
trace_values_batch = function(free_state_batch) {
self$send_parameters(free_state_batch)
tfe <- self$tf_environment
target_tf_names <- lapply(
self$target_nodes,
self$tf_name
)
target_tensors <- lapply(target_tf_names,
get,
envir = tfe
)
trace_list <- tfe$sess$run(target_tensors,
feed_dict = tfe$feed_dict
)
trace_list
},
trace_values = function(free_state,
flatten = TRUE,
trace_batch_size = Inf) {
n_samples <- nrow(free_state)
indices <- seq_len(n_samples)
splits <- split(indices, (indices - 1) %/% trace_batch_size)
names(splits) <- NULL
get_rows <- function(rows, x) x[rows, , drop = FALSE]
free_state_batches <- lapply(splits, get_rows, free_state)
trace_list_batches <- lapply(free_state_batches, self$trace_values_batch)
stack_elements <- function(name, list) {
elems <- lapply(trace_list_batches, `[[`, name)
do.call(abind::abind, c(elems, list(along = 1)))
}
elements <- seq_along(trace_list_batches[[1]])
trace_list <- lapply(elements, stack_elements, trace_list_batches)
names(trace_list) <- names(trace_list_batches[[1]])
if (flatten) {
trace_list_flat <- lapply(
seq_along(trace_list),
flatten_trace,
trace_list
)
out <- do.call(cbind, trace_list_flat)
self$trace_names <- colnames(out)
} else {
out <- trace_list
}
out
},
subgraph_membership = function() {
adj <- self$adjacency_matrix
sym <- (adj + t(adj)) > 0
maxit <- 1000
it <- 0
p <- r <- sym
while (it < maxit) {
p <- p %*% sym
t <- (r + p) > 0
if (any(t != r)) {
r <- t
it <- it + 1
} else {
break()
}
}
if (it == maxit) {
msg <- cli::format_error(
"could not determine the number of independent models in a \\
reasonable amount of time"
)
stop(
msg,
call. = FALSE
)
}
n <- nrow(r)
neighbours <- lapply(seq_len(n), function(i) which(r[i, ]))
cluster_names <- vapply(X = neighbours,
FUN = paste,
FUN.VALUE = character(1),
collapse = "_")
cluster_id <- match(cluster_names, unique(cluster_names))
names(cluster_id) <- rownames(adj)
cluster_id
},
get_tfp_distribution = function(distrib_node) {
distrib_constructor <- self$get_tf_object(distrib_node)
parameter_nodes <- distrib_node$parameters
tf_parameter_list <- lapply(parameter_nodes, self$get_tf_object)
tfp_distribution <- distrib_constructor(tf_parameter_list, dag = self)
},
draw_sample = function(distribution_node) {
tfp_distribution <- self$get_tfp_distribution(distribution_node)
sample <- tfp_distribution$sample
if (is.null(sample)) {
msg <- cli::format_error(
c(
"sampling is not yet implemented for \\
{.val {distribution_node$distribution_name}} distributions"
)
)
stop(
msg,
call. = FALSE
)
}
truncation <- distribution_node$truncation
if (is.null(truncation)) {
tensor <- sample(seed = get_seed())
} else {
cdf <- tfp_distribution$cdf
quantile <- tfp_distribution$quantile
if (is.null(cdf) | is.null(quantile)) {
msg <- cli::format_error(
"sampling is not yet implemented for truncated \\
{.val {distribution_node$distribution_name}} distributions"
)
stop(
msg,
call. = FALSE
)
}
u <- tf_randu(distribution_node$dim, self)
lower <- cdf(fl(truncation[1]))
upper <- cdf(fl(truncation[2]))
range <- upper - lower
tensor <- quantile(lower + u * range)
}
tensor
}
),
active = list(
node_types = function(value) {
vapply(self$node_list, node_type, FUN.VALUE = "")
},
node_tf_names = function(value) {
types <- self$node_types
for (type in c("variable", "data", "operation", "distribution")) {
idx <- which(types == type)
types[idx] <- paste(type, seq_along(idx), sep = "_")
}
types
},
adjacency_matrix = function(value) {
n_node <- length(self$node_list)
node_names <- names(self$node_list)
node_types <- self$node_types
dag_mat <- matrix(0, nrow = n_node, ncol = n_node)
rownames(dag_mat) <- colnames(dag_mat) <- node_names
children <- lapply(
self$node_list,
member,
"child_names()"
)
parents <- lapply(
self$node_list,
member,
"parent_names(recursive = FALSE)"
)
distribs <- which(node_types == "distribution")
for (i in distribs) {
own_name <- node_names[i]
target_name <- self$node_list[[i]]$target$unique_name
if (!is.null(target_name)) {
parents[[i]] <- parents[[i]][parents[[i]] != target_name]
children[[i]] <- c(children[[i]], target_name)
idx <- match(target_name, node_names)
children[[idx]] <- children[[idx]][children[[idx]] != own_name]
parents[[idx]] <- c(parents[[idx]], own_name)
}
}
for (i in seq_len(n_node)) {
dag_mat[i, children[[i]]] <- 1
dag_mat[parents[[i]], i] <- 1
}
dag_mat
}
)
) |
test_that("OSOAs_regular", {
temp <- OSOAs_regular(s=3, k=3, el=3, optimize=FALSE)
expect_s3_class(temp, "SOA")
expect_equal(attr(temp, "type"), "OSOA")
expect_equal(attr(temp, "strength"), "2*")
expect_equal(dim(temp), c(27, 4))
expect_equal(length(unique(c(temp))), 27)
expect_snapshot_output((temp <- OSOAs_regular(s=3, k=3, el=2, optimize=FALSE)))
expect_s3_class(temp, "SOA")
expect_equal(attr(temp, "type"), "OSOA")
expect_equal(attr(temp, "strength"), "2+")
expect_equal(dim(temp), c(27, 4))
expect_equal(length(unique(c(temp))), 9)
expect_error(OSOAs_regular(s=3, k=3, el=2, m=5),
regexp="m is too large", fixed=TRUE)
expect_error(OSOAs_regular(s = 4, k = 3, el = 3, m = 5),
regexp="m is too large in combination with el=3")
temp <- OSOAs_regular(s = 3, k = 3, m = 3, el = 2, optimize = FALSE)
expect_equal(dim(temp), c(27, 3))
temp <- OSOAs_regular(s = 3, k = 3, m = 3, el = 3, optimize = FALSE)
expect_equal(dim(temp), c(27, 3))
}) |
context("Create statements")
test_that("Read statements from XBRL data", {
data(xbrl_data_aapl2014)
test_statement <- xbrl_get_statements(xbrl_data_aapl2014)
expect_gt(length(test_statement), 2)
expect_is(test_statement, "statements")
expect_is(test_statement[[1]], "statement")
expect_output(print(test_statement[[1]]), "Financial")
})
context("Merging")
test_that("Merge", {
data(xbrl_data_aapl2013)
data(xbrl_data_aapl2014)
st1 <- xbrl_get_statements(xbrl_data_aapl2013)
st2 <- xbrl_get_statements(xbrl_data_aapl2014)
st_all <- merge(st1, st2)
expect_true( nrow(st_all[[1]]) > nrow(st1[[1]]), "merge" )
}) |
`mostattributes<-` <- function(x, value)
{
if(length(value)) {
if(!is.list(value)) stop("'value' must be a list")
if(h.nam <- !is.na(inam <- match("names", names(value)))) {
n1 <- value[[inam]]; value <- value[-inam] }
if(h.dim <- !is.na(idin <- match("dim", names(value)))) {
d1 <- value[[idin]]; value <- value[-idin] }
if(h.dmn <- !is.na(idmn <- match("dimnames", names(value)))) {
dn1 <- value[[idmn]]; value <- value[-idmn] }
attributes(x) <- value
dm <- attr(x, "dim")
L <- length(if(is.list(x)) unclass(x) else x)
if(h.dim && L == prod(d1)) attr(x, "dim") <- dm <- d1
if(h.dmn && !is.null(dm)) {
ddn <- vapply(dn1, length, 1, USE.NAMES=FALSE)
if( all((dm == ddn)[ddn > 0]) ) attr(x, "dimnames") <- dn1
}
if(h.nam && is.null(dm) && L == length(n1)) attr(x, "names") <- n1
}
x
} |
testthat::test_that("We can read a Quake II model in MD2 format.", {
md2f = file.path("~/data/q2_pak/models/items/quaddama/tris.md2");
if(! file.exists(md2f)) {
testthat::skip("Test MD2 file available");
}
md2 = read.quake.md2(md2f);
testthat::expect_true(is.quakemodel(md2));
testthat::expect_true(is.quakemodel_md2(md2));
testthat::expect_false(is.quakemodel_mdl(md2));
testthat::expect_false(is.null(md2$header));
testthat::expect_false(is.null(md2$skins));
testthat::expect_false(is.null(md2$triangles));
testthat::expect_false(is.null(md2$frames));
testthat::expect_false(is.null(md2$glcmds));
testthat::expect_equal(nrow(md2$frames[[1]]$vertex_coords), 78L);
testthat::expect_equal(ncol(md2$frames[[1]]$vertex_coords), 3L);
fs_surf_from_md2 = quakemodel.to.fs.surface(md2);
testthat::expect_false(is.null(fs_surf_from_md2$faces));
testthat::expect_false(is.null(fs_surf_from_md2$vertices));
}) |
library(rearrr)
context("vector_length()")
test_that("testing vector_length()", {
xpectr::set_test_seed(42)
expect_equal(vector_length(c(-3, 5, 7)),
sqrt(sum(c(-3, 5, 7)^2)))
xpectr::set_test_seed(42)
output_19148 <- vector_length(c(-3, 5, 7))
expect_equal(
class(output_19148),
"numeric",
fixed = TRUE)
expect_type(
output_19148,
type = "double")
expect_equal(
output_19148,
9.11043,
tolerance = 1e-4)
expect_equal(
names(output_19148),
NULL,
fixed = TRUE)
expect_equal(
length(output_19148),
1L)
expect_equal(
sum(xpectr::element_lengths(output_19148)),
1L)
xpectr::set_test_seed(42)
df <- data.frame(
"x" = runif(20),
"y" = runif(20),
"z" = runif(20),
stringsAsFactors = FALSE
)
expect_equal(
vector_length(df, cols = c("x", "y", "z"))$.vec_len,
df %>%
dplyr::rowwise() %>%
dplyr::summarise(vl = sqrt(sum(c(x, y, z)^2)), .groups = "drop") %>%
.[["vl"]]
)
xpectr::set_test_seed(42)
output_19148 <- vector_length(df, cols = c("x", "y", "z"))$.vec_len
expect_equal(
class(output_19148),
"numeric",
fixed = TRUE)
expect_type(
output_19148,
type = "double")
expect_equal(
output_19148,
c(1.34097, 1.04271, 1.03014, 1.59173, 0.77784, 1.2045, 1.21776,
1.11717, 1.25467, 1.25657, 0.92992, 1.13805, 1.08767, 1.07259,
0.46395, 1.46221, 1.18983, 0.2937, 1.05627, 0.97615),
tolerance = 1e-4)
expect_equal(
names(output_19148),
NULL,
fixed = TRUE)
expect_equal(
length(output_19148),
20L)
expect_equal(
sum(xpectr::element_lengths(output_19148)),
20L)
expect_equal(
vector_length(df, cols = c("x", "y", "z"), by_row = FALSE) %>%
as.data.frame(stringsAsFactors = FALSE),
df %>%
dplyr::summarise_all(.funs = function(x)sqrt(sum(x^2)))
)
xpectr::set_test_seed(42)
output_17148 <- vector_length(df, cols = c("x", "y", "z"), by_row = FALSE)
expect_equal(
class(output_17148),
c("tbl_df", "tbl", "data.frame"),
fixed = TRUE)
expect_equal(
output_17148[["x"]],
2.98838,
tolerance = 1e-4)
expect_equal(
output_17148[["y"]],
2.93397,
tolerance = 1e-4)
expect_equal(
output_17148[["z"]],
2.70165,
tolerance = 1e-4)
expect_equal(
names(output_17148),
c("x", "y", "z"),
fixed = TRUE)
expect_equal(
xpectr::element_classes(output_17148),
c("numeric", "numeric", "numeric"),
fixed = TRUE)
expect_equal(
xpectr::element_types(output_17148),
c("double", "double", "double"),
fixed = TRUE)
expect_equal(
dim(output_17148),
c(1L, 3L))
expect_equal(
colnames(dplyr::group_keys(output_17148)),
character(0),
fixed = TRUE)
})
test_that("fuzz testing vector_length()", {
xpectr::set_test_seed(42)
df <- data.frame(
"x" = runif(20),
"y" = runif(20),
"z" = runif(20),
"ch" = LETTERS[1:20],
"g" = rep(1:4, each = 5),
stringsAsFactors = FALSE
)
xpectr::set_test_seed(42)
xpectr::set_test_seed(42)
output_19148 <- vector_length(data = df, cols = c("x", "y", "z"), by_row = TRUE, len_col_name = ".vec_len")
expect_equal(
class(output_19148),
c("tbl_df", "tbl", "data.frame"),
fixed = TRUE)
expect_equal(
output_19148[["x"]],
c(0.91481, 0.93708, 0.28614, 0.83045, 0.64175, 0.5191, 0.73659,
0.13467, 0.65699, 0.70506, 0.45774, 0.71911, 0.93467, 0.25543,
0.46229, 0.94001, 0.97823, 0.11749, 0.475, 0.56033),
tolerance = 1e-4)
expect_equal(
output_19148[["y"]],
c(0.90403, 0.13871, 0.98889, 0.94667, 0.08244, 0.51421, 0.3902,
0.90574, 0.44697, 0.836, 0.7376, 0.81106, 0.38811, 0.68517,
0.00395, 0.83292, 0.00733, 0.20766, 0.9066, 0.61178),
tolerance = 1e-4)
expect_equal(
output_19148[["z"]],
c(0.37956, 0.43577, 0.03743, 0.97354, 0.43175, 0.95758, 0.88775,
0.63998, 0.97097, 0.61884, 0.33343, 0.34675, 0.39849, 0.78469,
0.03894, 0.7488, 0.67728, 0.17126, 0.26109, 0.51441),
tolerance = 1e-4)
expect_equal(
output_19148[["ch"]],
c("A", "B", "C", "D", "E", "F", "G", "H",
"I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T"))
expect_equal(
output_19148[["g"]],
c(1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4),
tolerance = 1e-4)
expect_equal(
output_19148[[".vec_len"]],
c(1.34097, 1.04271, 1.03014, 1.59173, 0.77784, 1.2045, 1.21776,
1.11717, 1.25467, 1.25657, 0.92992, 1.13805, 1.08767, 1.07259,
0.46395, 1.46221, 1.18983, 0.2937, 1.05627, 0.97615),
tolerance = 1e-4)
expect_equal(
names(output_19148),
c("x", "y", "z", "ch", "g", ".vec_len"),
fixed = TRUE)
expect_equal(
xpectr::element_classes(output_19148),
c("numeric", "numeric", "numeric", "character", "integer", "numeric"),
fixed = TRUE)
expect_equal(
xpectr::element_types(output_19148),
c("double", "double", "double", "character", "integer", "double"),
fixed = TRUE)
expect_equal(
dim(output_19148),
c(20L, 6L))
expect_equal(
colnames(dplyr::group_keys(output_19148)),
character(0),
fixed = TRUE)
xpectr::set_test_seed(42)
output_19370 <- vector_length(data = dplyr::group_by(df, g), cols = c("x", "y", "z"), by_row = TRUE, len_col_name = ".vec_len")
expect_equal(
class(output_19370),
c("tbl_df", "tbl", "data.frame"),
fixed = TRUE)
expect_equal(
output_19370[["x"]],
c(0.91481, 0.93708, 0.28614, 0.83045, 0.64175, 0.5191, 0.73659,
0.13467, 0.65699, 0.70506, 0.45774, 0.71911, 0.93467, 0.25543,
0.46229, 0.94001, 0.97823, 0.11749, 0.475, 0.56033),
tolerance = 1e-4)
expect_equal(
output_19370[["y"]],
c(0.90403, 0.13871, 0.98889, 0.94667, 0.08244, 0.51421, 0.3902,
0.90574, 0.44697, 0.836, 0.7376, 0.81106, 0.38811, 0.68517,
0.00395, 0.83292, 0.00733, 0.20766, 0.9066, 0.61178),
tolerance = 1e-4)
expect_equal(
output_19370[["z"]],
c(0.37956, 0.43577, 0.03743, 0.97354, 0.43175, 0.95758, 0.88775,
0.63998, 0.97097, 0.61884, 0.33343, 0.34675, 0.39849, 0.78469,
0.03894, 0.7488, 0.67728, 0.17126, 0.26109, 0.51441),
tolerance = 1e-4)
expect_equal(
output_19370[["ch"]],
c("A", "B", "C", "D", "E", "F", "G", "H",
"I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T"))
expect_equal(
output_19370[["g"]],
c(1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4),
tolerance = 1e-4)
expect_equal(
output_19370[[".vec_len"]],
c(1.34097, 1.04271, 1.03014, 1.59173, 0.77784, 1.2045, 1.21776,
1.11717, 1.25467, 1.25657, 0.92992, 1.13805, 1.08767, 1.07259,
0.46395, 1.46221, 1.18983, 0.2937, 1.05627, 0.97615),
tolerance = 1e-4)
expect_equal(
names(output_19370),
c("x", "y", "z", "ch", "g", ".vec_len"),
fixed = TRUE)
expect_equal(
xpectr::element_classes(output_19370),
c("numeric", "numeric", "numeric", "character", "integer", "numeric"),
fixed = TRUE)
expect_equal(
xpectr::element_types(output_19370),
c("double", "double", "double", "character", "integer", "double"),
fixed = TRUE)
expect_equal(
dim(output_19370),
c(20L, 6L))
expect_equal(
colnames(dplyr::group_keys(output_19370)),
character(0),
fixed = TRUE)
xpectr::set_test_seed(42)
side_effects_12861 <- xpectr::capture_side_effects(vector_length(data = c(1, 2, 3, 4), cols = c("x", "y", "z"), by_row = TRUE, len_col_name = ".vec_len"), reset_seed = TRUE)
expect_equal(
xpectr::strip(side_effects_12861[['error']]),
xpectr::strip("1 assertions failed:\n * when 'data' is not a data.frame, 'col(s)' must be 'NULL'."),
fixed = TRUE)
expect_equal(
xpectr::strip(side_effects_12861[['error_class']]),
xpectr::strip(c("simpleError", "error", "condition")),
fixed = TRUE)
xpectr::set_test_seed(42)
side_effects_18304 <- xpectr::capture_side_effects(vector_length(data = "hej", cols = c("x", "y", "z"), by_row = TRUE, len_col_name = ".vec_len"), reset_seed = TRUE)
expect_equal(
xpectr::strip(side_effects_18304[['error']]),
xpectr::strip("1 assertions failed:\n * when 'data' is not a data.frame, 'col(s)' must be 'NULL'."),
fixed = TRUE)
expect_equal(
xpectr::strip(side_effects_18304[['error_class']]),
xpectr::strip(c("simpleError", "error", "condition")),
fixed = TRUE)
xpectr::set_test_seed(42)
side_effects_16417 <- xpectr::capture_side_effects(vector_length(data = 1, cols = c("x", "y", "z"), by_row = TRUE, len_col_name = ".vec_len"), reset_seed = TRUE)
expect_equal(
xpectr::strip(side_effects_16417[['error']]),
xpectr::strip("1 assertions failed:\n * when 'data' is not a data.frame, 'col(s)' must be 'NULL'."),
fixed = TRUE)
expect_equal(
xpectr::strip(side_effects_16417[['error_class']]),
xpectr::strip(c("simpleError", "error", "condition")),
fixed = TRUE)
xpectr::set_test_seed(42)
side_effects_15190 <- xpectr::capture_side_effects(vector_length(data = NA, cols = c("x", "y", "z"), by_row = TRUE, len_col_name = ".vec_len"), reset_seed = TRUE)
expect_equal(
xpectr::strip(side_effects_15190[['error']]),
xpectr::strip(ifelse(
is_checkmate_v2_1(),
"Assertion failed. One of the following must apply:\n * checkmate::check_data_frame(data): Must be of type 'data.frame', not 'logical'\n * checkmate::check_vector(data): Contains missing values (element 1)\n * checkmate::check_factor(data): Must be of type 'factor', not 'logical'",
"Assertion failed. One of the following must apply:\n * checkmate::check_data_frame(data): Must be of type 'data.frame', not 'logical'\n * checkmate::check_vector(data): Contains missing values (element 1)\n * checkmate::check_factor(data): Contains missing values (element 1)"
)),
fixed = TRUE)
expect_equal(
xpectr::strip(side_effects_15190[['error_class']]),
xpectr::strip(c("simpleError", "error", "condition")),
fixed = TRUE)
xpectr::set_test_seed(42)
side_effects_17365 <- xpectr::capture_side_effects(vector_length(data = NULL, cols = c("x", "y", "z"), by_row = TRUE, len_col_name = ".vec_len"), reset_seed = TRUE)
expect_equal(
xpectr::strip(side_effects_17365[['error']]),
xpectr::strip("Assertion failed. One of the following must apply:\n * checkmate::check_data_frame(data): Must be of type 'data.frame', not 'NULL'\n * checkmate::check_vector(data): Must be of type 'vector', not 'NULL'\n * checkmate::check_factor(data): Must be of type 'factor', not 'NULL'"),
fixed = TRUE)
expect_equal(
xpectr::strip(side_effects_17365[['error_class']]),
xpectr::strip(c("simpleError", "error", "condition")),
fixed = TRUE)
xpectr::set_test_seed(42)
output_11346 <- vector_length(data = c(1, 2, 3, 4), cols = NULL, by_row = FALSE, len_col_name = ".vec_len")
expect_equal(
class(output_11346),
"numeric",
fixed = TRUE)
expect_type(
output_11346,
type = "double")
expect_equal(
output_11346,
5.47723,
tolerance = 1e-4)
expect_equal(
names(output_11346),
NULL,
fixed = TRUE)
expect_equal(
length(output_11346),
1L)
expect_equal(
sum(xpectr::element_lengths(output_11346)),
1L)
xpectr::set_test_seed(42)
output_16569 <- vector_length(data = df, cols = c("x", "y"), by_row = TRUE, len_col_name = ".vec_len")
expect_equal(
class(output_16569),
c("tbl_df", "tbl", "data.frame"),
fixed = TRUE)
expect_equal(
output_16569[["x"]],
c(0.91481, 0.93708, 0.28614, 0.83045, 0.64175, 0.5191, 0.73659,
0.13467, 0.65699, 0.70506, 0.45774, 0.71911, 0.93467, 0.25543,
0.46229, 0.94001, 0.97823, 0.11749, 0.475, 0.56033),
tolerance = 1e-4)
expect_equal(
output_16569[["y"]],
c(0.90403, 0.13871, 0.98889, 0.94667, 0.08244, 0.51421, 0.3902,
0.90574, 0.44697, 0.836, 0.7376, 0.81106, 0.38811, 0.68517,
0.00395, 0.83292, 0.00733, 0.20766, 0.9066, 0.61178),
tolerance = 1e-4)
expect_equal(
output_16569[["z"]],
c(0.37956, 0.43577, 0.03743, 0.97354, 0.43175, 0.95758, 0.88775,
0.63998, 0.97097, 0.61884, 0.33343, 0.34675, 0.39849, 0.78469,
0.03894, 0.7488, 0.67728, 0.17126, 0.26109, 0.51441),
tolerance = 1e-4)
expect_equal(
output_16569[["ch"]],
c("A", "B", "C", "D", "E", "F", "G", "H",
"I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T"))
expect_equal(
output_16569[["g"]],
c(1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4),
tolerance = 1e-4)
expect_equal(
output_16569[[".vec_len"]],
c(1.28613, 0.94729, 1.02946, 1.2593, 0.64702, 0.73067, 0.83356,
0.91569, 0.79462, 1.09363, 0.86809, 1.08394, 1.01205, 0.73123,
0.46231, 1.25594, 0.97825, 0.23859, 1.0235, 0.82961),
tolerance = 1e-4)
expect_equal(
names(output_16569),
c("x", "y", "z", "ch", "g", ".vec_len"),
fixed = TRUE)
expect_equal(
xpectr::element_classes(output_16569),
c("numeric", "numeric", "numeric", "character", "integer", "numeric"),
fixed = TRUE)
expect_equal(
xpectr::element_types(output_16569),
c("double", "double", "double", "character", "integer", "double"),
fixed = TRUE)
expect_equal(
dim(output_16569),
c(20L, 6L))
expect_equal(
colnames(dplyr::group_keys(output_16569)),
character(0),
fixed = TRUE)
xpectr::set_test_seed(42)
side_effects_17050 <- xpectr::capture_side_effects(vector_length(data = df, cols = c("x", "y", "ch"), by_row = TRUE, len_col_name = ".vec_len"), reset_seed = TRUE)
expect_equal(
xpectr::strip(side_effects_17050[['error']]),
xpectr::strip("1 assertions failed:\n * Variable ''cols' columns': May only contain the following types: {numeric}, but element 3 has type\n * 'character'."),
fixed = TRUE)
expect_equal(
xpectr::strip(side_effects_17050[['error_class']]),
xpectr::strip(c("simpleError", "error", "condition")),
fixed = TRUE)
xpectr::set_test_seed(42)
side_effects_14577 <- xpectr::capture_side_effects(vector_length(data = df, cols = "hej", by_row = TRUE, len_col_name = ".vec_len"), reset_seed = TRUE)
expect_equal(
xpectr::strip(side_effects_14577[['error']]),
xpectr::strip("1 assertions failed:\n * These names in the 'col(s)' argument were not found in 'data': hej."),
fixed = TRUE)
expect_equal(
xpectr::strip(side_effects_14577[['error_class']]),
xpectr::strip(c("simpleError", "error", "condition")),
fixed = TRUE)
xpectr::set_test_seed(42)
side_effects_17191 <- xpectr::capture_side_effects(vector_length(data = df, cols = c(NA, NA), by_row = TRUE, len_col_name = ".vec_len"), reset_seed = TRUE)
expect_equal(
xpectr::strip(side_effects_17191[['error']]),
xpectr::strip("Assertion on 'specified column names (NA, NA, \".vec_len\")' failed: Contains missing values (element 1)."),
fixed = TRUE)
expect_equal(
xpectr::strip(side_effects_17191[['error_class']]),
xpectr::strip(c("simpleError", "error", "condition")),
fixed = TRUE)
xpectr::set_test_seed(42)
side_effects_19346 <- xpectr::capture_side_effects(vector_length(data = df, cols = NULL, by_row = TRUE, len_col_name = ".vec_len"), reset_seed = TRUE)
expect_equal(
xpectr::strip(side_effects_19346[['error']]),
xpectr::strip("When 'data' is a data.frame, 'cols' must be specified."),
fixed = TRUE)
expect_equal(
xpectr::strip(side_effects_19346[['error_class']]),
xpectr::strip(c("simpleError", "error", "condition")),
fixed = TRUE)
xpectr::set_test_seed(42)
output_12554 <- vector_length(data = df, cols = c("x", "y", "z"), by_row = FALSE, len_col_name = ".vec_len")
expect_equal(
class(output_12554),
c("tbl_df", "tbl", "data.frame"),
fixed = TRUE)
expect_equal(
output_12554[["x"]],
2.98838,
tolerance = 1e-4)
expect_equal(
output_12554[["y"]],
2.93397,
tolerance = 1e-4)
expect_equal(
output_12554[["z"]],
2.70165,
tolerance = 1e-4)
expect_equal(
names(output_12554),
c("x", "y", "z"),
fixed = TRUE)
expect_equal(
xpectr::element_classes(output_12554),
c("numeric", "numeric", "numeric"),
fixed = TRUE)
expect_equal(
xpectr::element_types(output_12554),
c("double", "double", "double"),
fixed = TRUE)
expect_equal(
dim(output_12554),
c(1L, 3L))
expect_equal(
colnames(dplyr::group_keys(output_12554)),
character(0),
fixed = TRUE)
xpectr::set_test_seed(42)
side_effects_14622 <- xpectr::capture_side_effects(vector_length(data = df, cols = c("x", "y", "z"), by_row = 1, len_col_name = ".vec_len"), reset_seed = TRUE)
expect_equal(
xpectr::strip(side_effects_14622[['error']]),
xpectr::strip("1 assertions failed:\n * Variable 'by_row': Must be of type 'logical flag', not 'double'."),
fixed = TRUE)
expect_equal(
xpectr::strip(side_effects_14622[['error_class']]),
xpectr::strip(c("simpleError", "error", "condition")),
fixed = TRUE)
xpectr::set_test_seed(42)
side_effects_19400 <- xpectr::capture_side_effects(vector_length(data = df, cols = c("x", "y", "z"), by_row = NA, len_col_name = ".vec_len"), reset_seed = TRUE)
expect_equal(
xpectr::strip(side_effects_19400[['error']]),
xpectr::strip("1 assertions failed:\n * Variable 'by_row': May not be NA."),
fixed = TRUE)
expect_equal(
xpectr::strip(side_effects_19400[['error_class']]),
xpectr::strip(c("simpleError", "error", "condition")),
fixed = TRUE)
xpectr::set_test_seed(42)
side_effects_19782 <- xpectr::capture_side_effects(vector_length(data = df, cols = c("x", "y", "z"), by_row = NULL, len_col_name = ".vec_len"), reset_seed = TRUE)
expect_equal(
xpectr::strip(side_effects_19782[['error']]),
xpectr::strip("1 assertions failed:\n * Variable 'by_row': Must be of type 'logical flag', not 'NULL'."),
fixed = TRUE)
expect_equal(
xpectr::strip(side_effects_19782[['error_class']]),
xpectr::strip(c("simpleError", "error", "condition")),
fixed = TRUE)
xpectr::set_test_seed(42)
output_11174 <- vector_length(data = df, cols = c("x", "y", "z"), by_row = TRUE, len_col_name = ".vl")
expect_equal(
class(output_11174),
c("tbl_df", "tbl", "data.frame"),
fixed = TRUE)
expect_equal(
output_11174[["x"]],
c(0.91481, 0.93708, 0.28614, 0.83045, 0.64175, 0.5191, 0.73659,
0.13467, 0.65699, 0.70506, 0.45774, 0.71911, 0.93467, 0.25543,
0.46229, 0.94001, 0.97823, 0.11749, 0.475, 0.56033),
tolerance = 1e-4)
expect_equal(
output_11174[["y"]],
c(0.90403, 0.13871, 0.98889, 0.94667, 0.08244, 0.51421, 0.3902,
0.90574, 0.44697, 0.836, 0.7376, 0.81106, 0.38811, 0.68517,
0.00395, 0.83292, 0.00733, 0.20766, 0.9066, 0.61178),
tolerance = 1e-4)
expect_equal(
output_11174[["z"]],
c(0.37956, 0.43577, 0.03743, 0.97354, 0.43175, 0.95758, 0.88775,
0.63998, 0.97097, 0.61884, 0.33343, 0.34675, 0.39849, 0.78469,
0.03894, 0.7488, 0.67728, 0.17126, 0.26109, 0.51441),
tolerance = 1e-4)
expect_equal(
output_11174[["ch"]],
c("A", "B", "C", "D", "E", "F", "G", "H",
"I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T"))
expect_equal(
output_11174[["g"]],
c(1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4),
tolerance = 1e-4)
expect_equal(
output_11174[[".vl"]],
c(1.34097, 1.04271, 1.03014, 1.59173, 0.77784, 1.2045, 1.21776,
1.11717, 1.25467, 1.25657, 0.92992, 1.13805, 1.08767, 1.07259,
0.46395, 1.46221, 1.18983, 0.2937, 1.05627, 0.97615),
tolerance = 1e-4)
expect_equal(
names(output_11174),
c("x", "y", "z", "ch", "g", ".vl"),
fixed = TRUE)
expect_equal(
xpectr::element_classes(output_11174),
c("numeric", "numeric", "numeric", "character", "integer", "numeric"),
fixed = TRUE)
expect_equal(
xpectr::element_types(output_11174),
c("double", "double", "double", "character", "integer", "double"),
fixed = TRUE)
expect_equal(
dim(output_11174),
c(20L, 6L))
expect_equal(
colnames(dplyr::group_keys(output_11174)),
character(0),
fixed = TRUE)
xpectr::set_test_seed(42)
side_effects_14749 <- xpectr::capture_side_effects(vector_length(data = df, cols = c("x", "y", "z"), by_row = TRUE, len_col_name = 1), reset_seed = TRUE)
expect_equal(
xpectr::strip(side_effects_14749[['error']]),
xpectr::strip("1 assertions failed:\n * Variable 'len_col_name': Must be of type 'string', not 'double'."),
fixed = TRUE)
expect_equal(
xpectr::strip(side_effects_14749[['error_class']]),
xpectr::strip(c("simpleError", "error", "condition")),
fixed = TRUE)
xpectr::set_test_seed(42)
side_effects_15603 <- xpectr::capture_side_effects(vector_length(data = df, cols = c("x", "y", "z"), by_row = TRUE, len_col_name = NA), reset_seed = TRUE)
expect_equal(
xpectr::strip(side_effects_15603[['error']]),
xpectr::strip("1 assertions failed:\n * Variable 'len_col_name': May not be NA."),
fixed = TRUE)
expect_equal(
xpectr::strip(side_effects_15603[['error_class']]),
xpectr::strip(c("simpleError", "error", "condition")),
fixed = TRUE)
xpectr::set_test_seed(42)
side_effects_19040 <- xpectr::capture_side_effects(vector_length(data = df, cols = c("x", "y", "z"), by_row = TRUE, len_col_name = NULL), reset_seed = TRUE)
expect_equal(
xpectr::strip(side_effects_19040[['error']]),
xpectr::strip("1 assertions failed:\n * Variable 'len_col_name': Must be of type 'string', not 'NULL'."),
fixed = TRUE)
expect_equal(
xpectr::strip(side_effects_19040[['error_class']]),
xpectr::strip(c("simpleError", "error", "condition")),
fixed = TRUE)
}) |
knitr::opts_chunk$set(
collapse = TRUE,
comment = "
eval = FALSE,
message = FALSE,
warning = FALSE,
include = FALSE,
dpi = 400
) |
test_that("creating an alignments track returns the correct string", {
assembly <- assembly("https://jbrowse.org/genomes/hg19/fasta/hg19.fa.gz", bgzip = TRUE)
expect_type(track_variant("foo.vcf", assembly), "character")
expect_equal(track_variant(
"https://ftp.ncbi.nlm.nih.gov/pub/clinvar/vcf_GRCh37/clinvar.vcf.gz",
assembly
), "{ \"type\": \"VariantTrack\", \"name\": \"clinvar\", \"assemblyNames\": [\"hg19\"], \"trackId\": \"hg19_clinvar\", \"adapter\": { \"type\": \"VcfTabixAdapter\", \"vcfGzLocation\": { \"uri\": \"https://ftp.ncbi.nlm.nih.gov/pub/clinvar/vcf_GRCh37/clinvar.vcf.gz\" }, \"index\": { \"location\": { \"uri\": \"https://ftp.ncbi.nlm.nih.gov/pub/clinvar/vcf_GRCh37/clinvar.vcf.gz.tbi\" } }}}")
expect_error(track_variant("foo.bcf", assembly), "variant data must be VCF")
}) |
regexpr2 <- function(
x, pattern, ...,
ignore_case=FALSE, fixed=FALSE
) {
if (!is.character(x)) x <- as.character(x)
if (!is.character(pattern)) pattern <- as.character(pattern)
stopifnot(is.logical(fixed) && length(fixed) == 1L)
stopifnot(is.logical(ignore_case) && length(ignore_case) == 1L && !is.na(ignore_case))
ret <- {
if (is.na(fixed)) {
if (!ignore_case)
stringi::stri_locate_first_coll(x, pattern, get_length=TRUE, ...)
else
stringi::stri_locate_first_coll(x, pattern, get_length=TRUE, strength=2L, ...)
} else if (fixed == TRUE) {
stringi::stri_locate_first_fixed(x, pattern, get_length=TRUE, case_insensitive=ignore_case, ...)
} else {
stringi::stri_locate_first_regex(x, pattern, get_length=TRUE, case_insensitive=ignore_case, ...)
}
}
structure(
.attribs_propagate_binary(as.integer(ret[, "start", drop=TRUE]), x, pattern),
match.length=as.integer(ret[, "length", drop=TRUE])
)
}
gregexpr2 <- function(
x, pattern, ...,
ignore_case=FALSE, fixed=FALSE
) {
if (!is.character(x)) x <- as.character(x)
if (!is.character(pattern)) pattern <- as.character(pattern)
stopifnot(is.logical(fixed) && length(fixed) == 1L)
stopifnot(is.logical(ignore_case) && length(ignore_case) == 1L && !is.na(ignore_case))
ret <- {
if (is.na(fixed)) {
if (!ignore_case)
stringi::stri_locate_all_coll(x, pattern, get_length=TRUE, ...)
else
stringi::stri_locate_all_coll(x, pattern, get_length=TRUE, strength=2L, ...)
} else if (fixed == TRUE) {
stringi::stri_locate_all_fixed(x, pattern, get_length=TRUE, case_insensitive=ignore_case, ...)
} else {
stringi::stri_locate_all_regex(x, pattern, get_length=TRUE, case_insensitive=ignore_case, ...)
}
}
.attribs_propagate_binary(
lapply(ret, function(e)
structure(
as.integer(e[, "start", drop=TRUE]),
match.length=as.integer(e[, "length", drop=TRUE])
)
),
x, pattern
)
}
regexec2 <- function(
x, pattern, ...,
ignore_case=FALSE, fixed=FALSE
) {
if (!is.character(x)) x <- as.character(x)
if (!is.character(pattern)) pattern <- as.character(pattern)
stopifnot(is.logical(fixed) && length(fixed) == 1L)
stopifnot(is.logical(ignore_case) && length(ignore_case) == 1L && !is.na(ignore_case))
ret <- {
if (is.na(fixed)) {
if (!ignore_case)
stringi::stri_locate_first_coll(x, pattern, get_length=TRUE, ...)
else
stringi::stri_locate_first_coll(x, pattern, get_length=TRUE, strength=2L, ...)
} else if (fixed == TRUE) {
stringi::stri_locate_first_fixed(x, pattern, get_length=TRUE, case_insensitive=ignore_case, ...)
} else {
NULL
}
}
if (!is.null(ret)) {
starts <- as.integer(ret[, "start", drop=TRUE])
lengths <- as.integer(ret[, "length", drop=TRUE])
ret <- lapply(
seq_along(starts),
function(i) structure(
starts[i],
match.length=lengths[i]
)
)
}
else {
ret <- stringi::stri_locate_all_regex(x, pattern, get_length=TRUE, case_insensitive=ignore_case, capture_groups=TRUE, ...)
ret <- lapply(ret, function(e) {
cnames <- names(attr(e, "capture_groups"))
if (!is.null(cnames)) cnames <- c("", cnames)
structure(
c(
as.integer(e[1L, "start", drop=TRUE]),
unlist(lapply(
attr(e, "capture_groups"),
function(e2) as.integer(e2[1L, "start", drop=TRUE])
))
),
match.length=structure(
c(
as.integer(e[1L, "length", drop=TRUE]),
unlist(lapply(
attr(e, "capture_groups"),
function(e2) as.integer(e2[1L, "length", drop=TRUE])
))
),
names=cnames
),
names=cnames
)
})
}
.attribs_propagate_binary(ret, x, pattern)
}
gregexec2 <- function(
x, pattern, ...,
ignore_case=FALSE, fixed=FALSE
) {
if (!is.character(x)) x <- as.character(x)
if (!is.character(pattern)) pattern <- as.character(pattern)
stopifnot(is.logical(fixed) && length(fixed) == 1L)
stopifnot(is.logical(ignore_case) && length(ignore_case) == 1L && !is.na(ignore_case))
ret <- {
if (is.na(fixed)) {
if (!ignore_case)
stringi::stri_locate_all_coll(x, pattern, get_length=TRUE, ...)
else
stringi::stri_locate_all_coll(x, pattern, get_length=TRUE, strength=2L, ...)
} else if (fixed == TRUE) {
stringi::stri_locate_all_fixed(x, pattern, get_length=TRUE, case_insensitive=ignore_case, ...)
} else {
NULL
}
}
if (!is.null(ret)) {
ret <- lapply(
ret,
function(e) structure(
as.integer(e[, "start", drop=TRUE]),
match.length=structure(
as.integer(e[, "length", drop=TRUE]),
dim=c(1L, NROW(e))
),
dim=c(1L, NROW(e))
)
)
}
else {
ret <- stringi::stri_locate_all_regex(x, pattern, get_length=TRUE, case_insensitive=ignore_case, capture_groups=TRUE, ...)
ret <- lapply(ret, function(e) {
cnames <- names(attr(e, "capture_groups"))
if (!is.null(cnames)) cnames <- c("", cnames)
structure(
do.call(rbind, c(
list(as.integer(e[, "start", drop=TRUE])),
lapply(
attr(e, "capture_groups"),
function(e2) as.integer(e2[, "start", drop=TRUE])
)
)),
match.length=structure(
do.call(rbind, c(
list(as.integer(e[, "length", drop=TRUE])),
lapply(
attr(e, "capture_groups"),
function(e2) as.integer(e2[, "length", drop=TRUE])
)
)),
dimnames=if (!is.null(cnames)) list(cnames, NULL) else NULL
),
dimnames=if (!is.null(cnames)) list(cnames, NULL) else NULL
)
})
}
.attribs_propagate_binary(ret, x, pattern)
}
regexpr <- function(
pattern, x=text, ...,
ignore.case=FALSE, fixed=FALSE,
perl=FALSE, useBytes=FALSE, text
) {
if (!isFALSE(perl)) warning("argument `perl` has no effect in stringx")
if (!isFALSE(useBytes)) warning("argument `useBytes` has no effect in stringx")
if (!missing(x) && !missing(text)) stop("do not use `text` if `x` is given as well")
regexpr2(x, pattern, ..., ignore_case=ignore.case, fixed=fixed)
}
gregexpr <- function(
pattern, x=text, ...,
ignore.case=FALSE, fixed=FALSE,
perl=FALSE, useBytes=FALSE, text
) {
if (!isFALSE(perl)) warning("argument `perl` has no effect in stringx")
if (!isFALSE(useBytes)) warning("argument `useBytes` has no effect in stringx")
if (!missing(x) && !missing(text)) stop("do not use `text` if `x` is given as well")
gregexpr2(x, pattern, ..., ignore_case=ignore.case, fixed=fixed)
}
regexec <- function(
pattern, x=text, ...,
ignore.case=FALSE, fixed=FALSE,
perl=FALSE, useBytes=FALSE, text
) {
if (!isFALSE(perl)) warning("argument `perl` has no effect in stringx")
if (!isFALSE(useBytes)) warning("argument `useBytes` has no effect in stringx")
if (!missing(x) && !missing(text)) stop("do not use `text` if `x` is given as well")
regexec2(x, pattern, ..., ignore_case=ignore.case, fixed=fixed)
}
gregexec <- function(
pattern, x=text, ...,
ignore.case=FALSE, fixed=FALSE,
perl=FALSE, useBytes=FALSE, text
) {
if (!isFALSE(perl)) warning("argument `perl` has no effect in stringx")
if (!isFALSE(useBytes)) warning("argument `useBytes` has no effect in stringx")
if (!missing(x) && !missing(text)) stop("do not use `text` if `x` is given as well")
gregexec2(x, pattern, ..., ignore_case=ignore.case, fixed=fixed)
} |
cemMixRHLP <- function(X, Y, K, R, p = 3, q = 1, variance_type = c("heteroskedastic", "homoskedastic"), init_kmeans = TRUE, n_tries = 1, max_iter = 100, threshold = 1e-5, verbose = FALSE, verbose_IRLS = FALSE) {
fData <- FData(X, Y)
top <- 0
try_CEM <- 0
best_com_loglik <- -Inf
while (try_CEM < n_tries) {
try_CEM <- try_CEM + 1
if (n_tries > 1 && verbose) {
message("EM try number: ", try_CEM, "\n")
}
variance_type <- match.arg(variance_type)
param <- ParamMixRHLP$new(fData = fData, K = K, R = R, p = p, q = q, variance_type = variance_type)
param$initParam(init_kmeans, try_CEM)
iter <- 0
converge <- FALSE
prev_com_loglik <- -Inf
reg_irls <- 0
stat <- StatMixRHLP(param)
while (!converge && (iter <= max_iter)) {
stat$EStep(param)
stat$CStep(reg_irls)
res <- param$CMStep(stat, verbose_IRLS)
reg_irls = res[[1]]
good_segmentation = res[[2]]
if (good_segmentation == FALSE) {
try_CEM <- try_CEM - 1
break
}
iter <- iter + 1
if (verbose) {
message("CEM - mixRHLP: Iteration: ", iter, " | Complete log-likelihood: " , stat$com_loglik)
}
if (prev_com_loglik - stat$com_loglik > 1e-5) {
warning("CEM complete log-likelihood is decreasing from ", prev_com_loglik, "to ", stat$com_loglik, " !")
top <- top + 1
if (top > 20)
break
}
converge <- abs((stat$com_loglik - prev_com_loglik) / prev_com_loglik) <= threshold
if (is.na(converge)) {
converge <- FALSE
}
prev_com_loglik <- stat$com_loglik
stat$stored_loglik[iter] <- stat$com_loglik
}
if (stat$com_loglik > best_com_loglik) {
statSolution <- stat$copy()
paramSolution <- param$copy()
best_com_loglik <- stat$com_loglik
}
if (n_tries > 1 && verbose) {
message("Max value of the complete log-likelihood: ", stat$com_loglik, "\n\n")
}
}
if (n_tries > 1 && verbose) {
message("Best value of the complete log-likelihood: ", statSolution$com_loglik, "\n")
}
statSolution$computeStats(paramSolution)
return(ModelMixRHLP(param = paramSolution, stat = statSolution))
} |
from_aml <- function(aml,
simplifyVector = FALSE,
simplifyDataFrame = simplifyVector,
simplifyMatrix = simplifyVector,
flatten = FALSE, ...) {
json <- aml_to_json(aml)
result <- fromJSON(json,
simplifyVector = simplifyVector,
simplifyDataFrame = simplifyDataFrame,
simplifyMatrix = simplifyMatrix,
flatten = FALSE, ...
)
class(result) <- c("list", "from_aml")
return(result)
}
from_archie <- function(...) {
.Deprecated("from_aml")
from_aml(...)
}
aml_to_json <- function(aml, pretty = FALSE, indent = 4) {
aml <- read_aml(aml)
aml <- paste(aml, collapse = "\n")
ct <- new_context()
ct$source(system.file("archieml-js/archieml.js", package = "rchie"))
ct$assign("aml", aml)
json <- ct$eval("JSON.stringify(archieml.load(aml));")
class(json) <- "json"
if (pretty) json <- prettify(json, indent = indent)
return(json)
} |
log_dgnorm <- function( x, loc, scale, power )
{
if ( power < 1E3 ){
y <- - ( abs( x - loc ) / scale )^power / power
} else {
y <- rep(0, length(x) )
}
return(y)
} |
coolDownLinear<-function(start, end, steps, step){
start-((start-end)/steps)*(step-1)
} |
seqgen.missing <- function(seqdata, p.cases=.1, p.left=.2, p.gaps=0, p.right=.3,
mt.left="nr", mt.gaps="nr", mt.right="nr"){
n <- nrow(seqdata)
lgth <- max(seqlength(seqdata))
nr.l <- attr(seqdata, mt.left)
nr.g <- attr(seqdata, mt.gaps)
nr.r <- attr(seqdata, mt.right)
nm <- round(p.cases * n, 0)
idm <- sort(sample(1:n, nm))
rdu.l <- runif(n,min=0,max=p.left)
rdu.g <- runif(n,min=0,max=p.gaps)
rdu.r <- runif(n,min=0,max=p.right)
for (i in idm){
gaps <- sample(1:lgth, round(rdu.g[i] * lgth, 0))
seqdata[i,gaps] <- nr.g
nl <- round(rdu.l[i] * lgth, 0)
if (nl>0) seqdata[i,1:nl] <- nr.l
nr <- round(rdu.r[i] * lgth, 0)
if (nr>0) seqdata[i,(lgth-nr+1):lgth] <- nr.r
}
return(seqdata)
} |
supplementDefaultGraphTheme <- function(graphTheme,
defaultGraphTheme =
behaviorchange::opts$get(
'defaultGraphTheme'
)) {
if (is.null(graphTheme)) {
return(defaultGraphTheme);
}
names(defaultGraphTheme) <-
unlist(
lapply(
defaultGraphTheme,
function(x) paste0(x[1], "_", x[3])
)
);
names(graphTheme) <-
unlist(
lapply(
graphTheme,
function(x) paste0(x[1], "_", x[3])
)
);
graphTheme <-
unname(
c(graphTheme,
defaultGraphTheme[
setdiff(
names(defaultGraphTheme),
names(graphTheme)
)
]
)
);
return(graphTheme);
} |
knitr::opts_chunk$set(
collapse = TRUE,
comment = "
)
dat <- data.frame(
x = c(
-2, -2, -2, -2, -2,
-1, -1, -1, -1, -1,
0, 0, 0, 0, 0,
1, 1, 1, 1, 1,
2, 2, 2, 2, 2
),
y = c(
1, 0, 0, 0, 0,
1, 1, 1, 0, 0,
1, 1, 0, 0, 0,
1, 1, 1, 1, 0,
1, 1, 1, 1, 1
)
)
library(gfilogisreg)
set.seed(666L)
fidsamples <- gfilogisreg(y ~ x, data = dat, N = 500L)
gfiSummary(fidsamples)
glm(y ~ x, data = dat, family = binomial())
gfiConfInt(~ -`(Intercept)`/x, fidsamples) |
pwr.confIntProp <- function(prop,
conf.level=.95,
w=.1,
silent=TRUE) {
if (length(w) != 1) {
warning("Multiple widths not supported (yet); only the first one is used!\n",
"You can use sapply to approximate this vectorization, for example,\n\n",
"sapply(c(", vecTxt(w, lastElements = 0), "), pwr.cohensdCI, d=.5)",
"\n");
w <- w[1];
}
wHandler <- function(w) {
myWarnings <<- c(myWarnings, list(w));
invokeRestart("muffleWarning");
}
eHandler <- function(e) {
myErrors <<- c(myErrors, list(e));
}
myWarnings <- NULL;
myErrors <- NULL;
propVector <- prop;
nVector <- numeric();
for (prop in propVector) {
n <- 4;
if (!silent) {
cat0("Estimating required sample size for a confidence interval with maximum halfwidth ",
w, " for a population proportion of ", prop, ". Setting n to 4 to start.\n");
}
for (steps in c(1000, 100, 10, 1)) {
ciWidth <- 3*w;
while (ciWidth > 2*w) {
n <- n + steps;
if (!silent) {
if (!silent) {
cat0("Adding ",
steps,
" to n.\n");
}
cat0("Computing obtained confidence interval for n = ", n , ".\n");
}
x <- prop * n;
obtainedCI <- confIntProp(x, n, conf.level);
ciWidth <- abs(diff(as.numeric(obtainedCI)));
if (!silent) {
cat0("Obtained CI of ",
formatCI(obtainedCI),
"; width=",
round(ciWidth, 2),
".\n");
if (ciWidth < w*2) {
cat0(" This is smaller than the margin of error (2*w, or ",
2*w,
").\n");
} else {
cat0(" This is larger than the margin of error (2*w, or ",
2*w,
").\n");
}
}
}
if (!silent) {
cat0("Done with this cycle; subtracting ",
steps,
" from n.\n");
}
n <- n - steps;
}
nVector <- c(nVector,
n);
}
names(nVector) <- propVector;
return(nVector);
} |
test_that("assigning NULL dim to rvar works", {
x <- rvar(array(1:20, dim = c(2,2,5)))
dim(x) <- NULL
expect_equal(x, rvar(array(1:20, dim = c(2,10))))
})
test_that("unname() works", {
x_array = array(1:24, dim = c(2,3,4), dimnames = list(NULL, A = paste0("a", 1:3), B = paste0("b", 1:4)))
x = rvar(x_array)
expect_equal(unname(x), rvar(unname(x_array)))
}) |
options(width=80)
require(OpenMx)
multiData1 <- suppressWarnings(try(read.csv("models/passing/data/multiData.csv"), silent=TRUE))
if (is(multiData1, "try-error")) multiData1 <- read.csv("data/multiData.csv")
manifests <- c("x1", "x2", "y")
multiData1Cov <- cov(multiData1[,c(1,2,5)])
biRegModel <- mxModel("Bivariate Regression of y on x1 and x2",
type="RAM",
manifestVars=manifests,
mxPath(from=c("x1","x2"), to="y",
arrows=1,
free=TRUE, values=.2, labels=c("b1", "b2")),
mxPath(from=manifests,
arrows=2,
free=TRUE, values=.8,
labels=c("VarX1", "VarX2", "VarE")),
mxPath(from="x1", to="x2",
arrows=2,
free=TRUE, values=.2,
labels=c("CovX1X2")),
mxData(observed=multiData1Cov, type="cov", numObs=500)
)
biRegModel <- mxOption(biRegModel, "Standard Errors", "Yes")
biRegModelOut <- mxRun(biRegModel)
summary(biRegModelOut)
lmOut <- lm(y~x1+x2, data=multiData1)
lmOutSummary <- summary(lmOut)
omxCheckCloseEnough(biRegModelOut$output$estimate[1:2],
lmOutSummary$coef[2:3,1],
0.001)
omxCheckCloseEnough(biRegModelOut$output$standardErrors[1:2],
lmOutSummary$coef[2:3,2],
0.001) |
layer_locally_connected_1d <- function(object, filters, kernel_size, strides = 1L, padding = "valid", data_format = NULL,
activation = NULL, use_bias = TRUE, kernel_initializer = "glorot_uniform",
bias_initializer = "zeros", kernel_regularizer = NULL, bias_regularizer = NULL,
activity_regularizer = NULL, kernel_constraint = NULL, bias_constraint = NULL,
implementation = 1L,
batch_size = NULL, name = NULL, trainable = NULL, weights = NULL) {
create_layer(keras$layers$LocallyConnected1D, object, list(
filters = as.integer(filters),
kernel_size = as_integer_tuple(kernel_size),
strides = as_integer_tuple(strides),
padding = padding,
data_format = data_format,
activation = activation,
use_bias = use_bias,
kernel_initializer = kernel_initializer,
bias_initializer = bias_initializer,
kernel_regularizer = kernel_regularizer,
bias_regularizer = bias_regularizer,
activity_regularizer = activity_regularizer,
kernel_constraint = kernel_constraint,
bias_constraint = bias_constraint,
implementation = as.integer(implementation),
batch_size = as_nullable_integer(batch_size),
name = name,
trainable = trainable,
weights = weights
))
}
layer_locally_connected_2d <- function(object, filters, kernel_size, strides = c(1L, 1L), padding = "valid", data_format = NULL,
activation = NULL, use_bias = TRUE, kernel_initializer = "glorot_uniform",
bias_initializer = "zeros", kernel_regularizer = NULL, bias_regularizer = NULL,
activity_regularizer = NULL, kernel_constraint = NULL, bias_constraint = NULL,
implementation = 1L,
batch_size = NULL, name = NULL, trainable = NULL, weights = NULL) {
create_layer(keras$layers$LocallyConnected2D, object, list(
filters = as.integer(filters),
kernel_size = as_integer_tuple(kernel_size),
strides = as_integer_tuple(strides),
padding = padding,
data_format = data_format,
activation = activation,
use_bias = use_bias,
kernel_initializer = kernel_initializer,
bias_initializer = bias_initializer,
kernel_regularizer = kernel_regularizer,
bias_regularizer = bias_regularizer,
activity_regularizer = activity_regularizer,
kernel_constraint = kernel_constraint,
bias_constraint = bias_constraint,
implementation = as.integer(implementation),
batch_size = as_nullable_integer(batch_size),
name = name,
trainable = trainable,
weights = weights
))
} |
context("sequence statistics")
data(anoteropsis)
test_that("seqStat gives correct number of bases", {
expect_equal(seqStat(anoteropsis)[[1]], 395)
}) |
eigFunc <- function(kmat, p){
errorFlag <- FALSE
if( nrow(kmat) <= p ) {
egDecomp <- try(expr = eigen(x = kmat,
symmetric = TRUE,
only.values = TRUE),
silent = TRUE)
if( is(egDecomp, "try-error") ) {
stop("Unable to obtain eigenvalue decomposition of kernel matrix.",
call. = FALSE)
}
} else {
egDecomp <- try(expr = rARPACK::eigs_sym(A = kmat,
k = p,
opts = list(retvec=FALSE)),
silent = TRUE)
if( is(egDecomp, "try-error") ) {
egDecomp <- try(expr = eigen(x = kmat,
symmetric = TRUE,
only.values = TRUE),
silent = TRUE)
if( is(egDecomp, "try-error") ) {
stop("Unable to obtain eigenvalue decomposition of kernel matrix.",
call. = FALSE)
}
errorFlag <- TRUE
}
}
if( any(egDecomp$values < -1.5e-8) ) {
stop("Negative eigenvalues encountered.", call.=FALSE)
}
keep <- egDecomp$values > 1.5e-8
if( all(keep) ) {
if( p < ncol(kmat) ) return(NULL)
}
egDecomp$values <- egDecomp$values[keep]
gSum <- cumsum(egDecomp$values)
gSum <- gSum/gSum[length(gSum)]
nEV <- length(egDecomp$values)
if( nEV < nrow(kmat) && !errorFlag ) {
Zg <- rARPACK::eigs_sym(A = kmat, k = nEV)
} else {
Zg <- eigen(x = kmat, symmetric = TRUE)
Zg$values <- Zg$values[1:nEV]
Zg$vectors <- Zg$vectors[,1:nEV]
}
return(list( "Zg" = Zg,
"propV" = gSum))
} |
ggplot_na_level2 <- function(x,
inside_information = "boxplot",
color_before = "pink3",
color_after = "pink3",
color_source = "steelblue",
color_inside = "black",
alpha_violin = 0.5,
alpha_inside = 0.9,
title = "Before/After Analysis",
subtitle = "Level of values occurring directly before and after NAs",
xlab = "",
ylab = "Value",
legend = FALSE,
orientation = "vertical",
label_before = "before",
label_after = "after",
label_source = "source",
add_n_label = T,
theme = ggplot2::theme_linedraw()) {
data <- x
if (any(class(data) == "tbl_ts")) {
data <- as.vector(as.data.frame(data)[, 2])
}
else if (any(class(data) == "tbl")) {
data <- as.vector(as.data.frame(data)[, 1])
}
if (!is.null(dim(data)[2]) && dim(data)[2] > 1) {
stop("x is not univariate. The function only works with univariate
input for x. For data types with multiple variables/columns only input
the column you want to plot as parameter x.")
}
if (!is.null(dim(data)[2])) {
data <- data[, 1]
}
data <- as.vector(data)
if (!is.numeric(data)) {
stop("Input x is not numeric")
}
missindx <- is.na(data)
if (all(missindx)) {
stop("Input data consists only of NAs. Meaningful ggplot_na_level2 plots can only
be generated with more non-NA data available.)")
}
if (!anyNA(data)) {
stop("Input data contains no NAs. At least one missing value is needed
to create a meaningful ggplot_na_level2 plot)")
}
na_indx_after <- which(is.na(data[1:(length(data) - 1)])) + 1
na_indx_before <- which(is.na(data[2:length(data)]))
before <- data.frame(type = "before", input = na_remove(data[na_indx_before]))
after <- data.frame(type = "after", input = na_remove(data[na_indx_after]))
all <- data.frame(type = "source", input = na_remove(data))
n_before <- length(before$input)
n_all <- length(all$input)
n_after <- length(after$input)
df <- rbind(before, after, all)
type <- df$type
input <- df$input
gg <- ggplot2::ggplot(data = df, ggplot2::aes(x = type, y = input, fill = type)) +
ggplot2::geom_violin(width = 1, alpha = alpha_violin)
if (inside_information == "boxplot") {
gg <- gg + ggplot2::geom_boxplot(width = 0.1, color = color_inside, alpha = alpha_inside)
}
else if (inside_information == "points") {
gg <- gg + ggplot2::geom_jitter(width = 0.1, color = color_inside, alpha = alpha_inside)
}
else if (inside_information == "none") {
}
else {
stop("Wrong input for parameter inside_information Input must be either 'boxplot',
'points' or 'none'. Call ?ggplot_na_level2 to view the documentation.")
}
gg <- gg + ggplot2::ggtitle(label = title, subtitle = subtitle) +
ggplot2::xlab(xlab) +
ggplot2::ylab(ylab) +
theme +
ggplot2::scale_x_discrete(
limits = c("before", "source", "after"),
labels = c(
paste0(label_before, ifelse(add_n_label, paste0(" \n n = ", n_before), "")),
paste0(label_source, ifelse(add_n_label, paste0(" \n n = ", n_all), "")),
paste0(label_after, ifelse(add_n_label, paste0(" \n n = ", n_after), ""))
)
) +
ggplot2::scale_fill_manual(values = c(color_after, color_before, color_source))
if (orientation == "horizontal") {
gg <- gg + ggplot2::coord_flip()
}
gg <- gg + ggplot2::theme(
legend.position = base::ifelse(legend == TRUE, "right", "none"),
legend.title = ggplot2::element_blank()
)
return(gg)
} |
e2e_read <- function(model.name, model.variant, models.path=NULL, results.path=NULL, results.subdir="", model.ident="base", quiet=TRUE, silent=FALSE) {
oo <- options()
on.exit(options(oo))
if(silent==TRUE) quiet<-TRUE
pkg.env$quiet <- quiet
pkg.env$silent <- silent
if(! pkg.env$silent){
message("Current working directory is... ")
message("'",getwd(),"'\n")
}
packagemodel<-FALSE
if(is.null(models.path)) packagemodel<-TRUE
models.path <- remove.dirsep(models.path)
results.path <- remove.dirsep(results.path)
if( ! is.null(models.path)) {
models.path<-makepath(getwd(),models.path)
if (! dir.exists(models.path)) {
mesg <- paste0("Error: could not find the model path '", models.path, "' !\n")
stop(mesg)
}
}
if( ! is.null(results.path)) {
results.path<-makepath(getwd(),results.path)
if (! dir.exists(results.path)) {
mesg <- paste0("Error: could not find the results path '", results.path, "' !\n")
stop(mesg)
}
}
if( is.null(results.path)){
results.path<-tempdir()
if (! pkg.env$silent){
message("No 'results.path' specified so any csv data requested")
message("will be directed to/from the temporary directory...")
message("'",results.path,"'")
message("")
}
}
read.only <- (is.null(models.path))
model.path <- get.variant.path(model.name, model.variant, models.path)
resultsdir <- makepath(results.path, model.name, model.variant, results.subdir)
setup <- list(
read.only = read.only,
model.name = model.name,
model.variant = model.variant,
model.ident = model.ident,
model.subdir = results.subdir,
model.path = model.path,
resultsdir = resultsdir
)
if (! pkg.env$quiet) message("Loading model : ", model.path)
read.model.setup(model.path)
physical.parameters <- read_physical_parameters(model.path)
fixed.parameters <- read_fixed_parameters(model.path)
physics.drivers <- read_physics_drivers(model.path)
chemistry.drivers <- read_chemistry_drivers(model.path)
biological.events <- read_biological_event_timings(model.path)
fitted.parameters <- read_fitted_parameters(model.path)
initial.state <- read_initial_state(model.path)
fleet.model <- read_fishing_fleet_model(model.path, physical.parameters)
data <- list(
fixed.parameters = fixed.parameters,
fitted.parameters = fitted.parameters,
physical.parameters = physical.parameters,
physics.drivers = physics.drivers,
chemistry.drivers = chemistry.drivers,
biological.events = biological.events,
fleet.model = fleet.model,
initial.state = initial.state
)
model <- list(
setup = setup,
data = data
)
if (! pkg.env$silent){
message("Model setup and parameters gathered from ...")
if(packagemodel==FALSE) message("'",model$setup$model.path,"'")
if(packagemodel==TRUE) message("StrathE2E2 package folder")
}
if (! pkg.env$silent){
message("Model results will be directed to/from ...")
message("'",model$setup$resultsdir,"'\n")
}
model
} |
context("nextItem-PKL")
load("cat_objects.Rdata")
test_that("ltm nextItem PKL calculates correctly", {
ltm_cat@estimation <- "EAP"
ltm_cat@selection <- "PKL"
ltm_cat@answers[c(1:7,27,36)] <- c(0, 1, 0, 0, 1, 0, 0, 1, 1)
package_next <- selectItem(ltm_cat)
package_item <- package_next$next_item
package_est <- package_next$estimates[package_next$estimates$q_number
== package_item, "PKL"]
expect_equal(package_item, 40)
expect_equal(round(package_est, 6), 0.000218)
})
test_that("grm nextItem PKL calculates correctly", {
grm_cat@estimation <- "EAP"
grm_cat@selection <- "PKL"
grm_cat@answers[c(1:8,14,17)] <- c(3, 4, 2, 2, 1, 2, 2, 3, 4, 4)
package_next <- selectItem(grm_cat)
package_item <- package_next$next_item
package_est <- package_next$estimates[package_next$estimates$q_number
== package_item, "PKL"]
expect_equal(package_item, 10)
expect_equal(round(package_est, 8), 3e-08)
})
test_that("nextItem PKL chooses item (not NA) when no questions asked", {
ltm_cat@selection <- "PKL"
grm_cat@selection <- "PKL"
gpcm_cat@selection <- "PKL"
expect_true(!is.na(selectItem(ltm_cat)$next_item))
expect_true(!is.na(selectItem(grm_cat)$next_item))
expect_true(!is.na(selectItem(gpcm_cat)$next_item))
})
test_that("nextItem PKL estimates are not NA (when no questions asked)", {
ltm_cat@selection <- "PKL"
grm_cat@selection <- "PKL"
gpcm_cat@selection <- "PKL"
expect_equal(sum(!is.na(selectItem(ltm_cat)$estimates[,"PKL"])), 40)
expect_equal(sum(!is.na(selectItem(grm_cat)$estimates[,"PKL"])), 18)
expect_equal(sum(!is.na(selectItem(gpcm_cat)$estimates[,"PKL"])), 10)
})
test_that("nextItem PKL is actually the maximum estimate", {
ltm_cat@selection <- "PKL"
grm_cat@selection <- "PKL"
gpcm_cat@selection <- "PKL"
ltm_next <- selectItem(ltm_cat)
grm_next <- selectItem(grm_cat)
gpcm_next <- selectItem(gpcm_cat)
expect_equal(ltm_next$next_item, which(ltm_next$estimates[, "PKL"] ==
max(ltm_next$estimates[, "PKL"])))
expect_equal(grm_next$next_item, which(grm_next$estimates[, "PKL"] ==
max(grm_next$estimates[, "PKL"])))
expect_equal(gpcm_next$next_item, which(gpcm_next$estimates[, "PKL"] ==
max(gpcm_next$estimates[, "PKL"])))
})
test_that("nextItem PKL correctly skips questions", {
ltm_cat@selection <- "PKL"
grm_cat@selection <- "PKL"
gpcm_cat@selection <- "PKL"
ltm_cat@answers[1:10] <- c(rep(-1, 5), 1, 1, 0, 0, 1)
grm_cat@answers[1:5] <- c(-1, -1, 5, 4, 3)
gpcm_cat@answers[1:5] <- c(-1, -1, 5, 4, 3)
ltm_next <- selectItem(ltm_cat)
grm_next <- selectItem(grm_cat)
gpcm_next <- selectItem(gpcm_cat)
expect_equal(nrow(ltm_next$estimates) + sum(!is.na(ltm_cat@answers)),
length(ltm_cat@answers))
expect_equal(nrow(grm_next$estimates) + sum(!is.na(grm_cat@answers)),
length(grm_cat@answers))
expect_equal(nrow(gpcm_next$estimates) + sum(!is.na(gpcm_cat@answers)),
length(gpcm_cat@answers))
}) |
survPHplot <- function(formula, data, subset, contrasts, weights,
col=1:5, lty=1:5, pch=19,
xlab="Time (log scale)",
ylab="-log(-log(Survival))",
log="x",
legend.args=list(),
...) {
if (class(formula)=="survfit") {
fit <- formula
} else {
Call <- match.call()
mf <- match.call(expand.dots = FALSE)
m <- match(c("formula", "data", "subset", "contrasts", "weights"),
names(mf), 0L)
mf <- mf[c(1L, m)]
mf[[1]] <- as.name("survfit")
fit <- eval(mf, parent.frame())
}
n <- length(fit$strata)
index <- rep(names(fit$strata), fit$strata)
time <- fit$time
trans <- -log(-log(fit$surv))
plot <- plot(trans~time, type="n", log=log, xlab=xlab, ylab=ylab, ...)
col <- rep(col, length.out=n)
lty <- rep(lty, length.out=n)
for (i in 1:n) {
j <- names(fit$strata)[i]==index
lines(time[j],trans[j],col=col[i],lty=lty[i])
points(time[j],trans[j],pch=19,col=col[i])
}
base.legend.args <- list(x="topright",legend=names(fit$strata),col=col,lty=lty,pch=pch)
legend.args <- do.call("updateList",c(list(base.legend.args), legend.args))
do.call("legend", legend.args)
invisible(plot)
} |
AnalysisCenter.2Party = function(regression = "linear",
data = NULL,
response = NULL,
strata = NULL,
mask = TRUE,
monitorFolder = NULL,
msreqid = "v_default_00_000",
blocksize = 500,
tol = 1e-8,
maxIterations = 25,
sleepTime = 10,
maxWaitingTime = 86400,
popmednet = TRUE,
trace = FALSE,
verbose = TRUE) {
startTime = proc.time()
stats = list()
if (verbose) cat("Process started on", as.character(GetUTCTime()), "UTC.\n")
if (is.null(monitorFolder)) {
warning("monitorFolder must be specified.")
} else if (regression == "cox") {
stats = PartyAProcess2Cox(data, response, strata, mask, monitorFolder,
msreqid, blocksize, tol, maxIterations,
sleepTime, maxWaitingTime, popmednet, trace,
verbose)
} else if (regression == "linear") {
stats = PartyAProcess2Linear(data, response, monitorFolder, msreqid,
blocksize, sleepTime, maxWaitingTime,
popmednet, trace, verbose)
} else if (regression == "logistic") {
stats = PartyAProcess2Logistic(data, response, monitorFolder, msreqid,
blocksize, tol, maxIterations, sleepTime,
maxWaitingTime, popmednet, trace, verbose)
} else {
warning("Regression type must be \"cox\", \"linear\" or \"logistic\"")
}
elp = GetElapsedTime(proc.time() - startTime, final = TRUE, timeOnly = FALSE)
if (verbose) cat("Process completed on", as.character(GetUTCTime()), "UTC.\n")
if (verbose) cat(elp, "\n")
return(stats)
}
DataPartner.2Party = function(regression = "linear",
data = NULL,
strata = NULL,
mask = TRUE,
monitorFolder = NULL,
sleepTime = 10,
maxWaitingTime = 86400,
popmednet = TRUE,
trace = FALSE,
verbose = TRUE) {
startTime = proc.time()
stats = list()
if (verbose) cat("Process started on", as.character(GetUTCTime()), "UTC.\n")
if (is.null(monitorFolder)) {
warning("monitorFolder must be specified.")
} else if (regression == "cox") {
stats = PartyBProcess2Cox(data, strata, mask,
monitorFolder, sleepTime, maxWaitingTime,
popmednet, trace, verbose)
} else if (regression == "linear") {
stats = PartyBProcess2Linear(data, monitorFolder, sleepTime, maxWaitingTime,
popmednet, trace, verbose)
} else if (regression == "logistic") {
stats = PartyBProcess2Logistic(data, monitorFolder, sleepTime, maxWaitingTime,
popmednet, trace, verbose)
} else {
warning("Regression type must be \"cox\", \"linear\" or \"logistic\"")
}
elp = GetElapsedTime(proc.time() - startTime, final = TRUE, timeOnly = FALSE)
if (verbose) cat("Process completed on", as.character(GetUTCTime()), "UTC.\n")
if (verbose) cat(elp, "\n")
return(stats)
}
DataPartner1.3Party = function(regression = "linear",
data = NULL,
response = NULL,
strata = NULL,
mask = TRUE,
monitorFolder = NULL,
sleepTime = 10,
maxWaitingTime = 86400,
popmednet = TRUE,
trace = FALSE,
verbose = TRUE) {
startTime = proc.time()
stats = list()
if (verbose) cat("Process started on", as.character(GetUTCTime()), "UTC.\n")
if (is.null(monitorFolder)) {
warning("monitorFolder must be specified.")
} else if (regression == "cox") {
stats = PartyAProcess3Cox(data, response, strata, mask, monitorFolder,
sleepTime, maxWaitingTime, popmednet, trace,
verbose)
} else if (regression == "linear") {
stats = PartyAProcess3Linear(data, response, monitorFolder,
sleepTime, maxWaitingTime, popmednet, trace,
verbose)
} else if (regression == "logistic") {
stats = PartyAProcess3Logistic(data, response, monitorFolder,
sleepTime, maxWaitingTime, popmednet, trace,
verbose)
} else {
warning("Regression type must be \"cox\", \"linear\" or \"logistic\"")
}
elp = GetElapsedTime(proc.time() - startTime, final = TRUE, timeOnly = FALSE)
if (verbose) cat("Process completed on", as.character(GetUTCTime()), "UTC.\n")
if (verbose) cat(elp, "\n")
return(stats)
}
DataPartner2.3Party = function(regression = "linear",
data = NULL,
strata = NULL,
mask = TRUE,
monitorFolder = NULL,
sleepTime = 10,
maxWaitingTime = 86400,
popmednet = TRUE,
trace = FALSE,
verbose = TRUE) {
startTime = proc.time()
stats = list()
if (verbose) cat("Process started on", as.character(GetUTCTime()), "UTC.\n")
if (is.null(monitorFolder)) {
warning("monitorFolder must be specified.")
} else if (regression == "cox") {
stats = PartyBProcess3Cox(data, strata, mask, monitorFolder,
sleepTime, maxWaitingTime, popmednet, trace,
verbose)
} else if (regression == "linear") {
stats = PartyBProcess3Linear(data, monitorFolder,
sleepTime, maxWaitingTime, popmednet, trace,
verbose)
} else if (regression == "logistic") {
stats = PartyBProcess3Logistic(data, monitorFolder,
sleepTime, maxWaitingTime, popmednet, trace,
verbose)
} else {
warning("Regression type must be \"cox\", \"linear\" or \"logistic\"")
}
elp = GetElapsedTime(proc.time() - startTime, final = TRUE, timeOnly = FALSE)
if (verbose) cat("Process completed on", as.character(GetUTCTime()), "UTC.\n")
if (verbose) cat(elp, "\n")
return(stats)
}
AnalysisCenter.3Party = function(regression = "linear",
monitorFolder = NULL,
msreqid = "v_default_00_000",
blocksize = 500,
tol = 1e-8,
maxIterations = 25,
sleepTime = 10,
maxWaitingTime = 86400,
popmednet = TRUE,
trace = FALSE,
verbose = TRUE) {
startTime = proc.time()
stats = list()
if (verbose) cat("Process started on", as.character(GetUTCTime()), "UTC.\n")
if (is.null(monitorFolder)) {
warning("monitorFolder must be specified.")
} else if (regression == "cox") {
stats = PartyTProcess3Cox(monitorFolder, msreqid, blocksize, tol,
maxIterations, sleepTime, maxWaitingTime,
popmednet, trace, verbose)
} else if (regression == "linear") {
stats = PartyTProcess3Linear(monitorFolder, msreqid, blocksize,
sleepTime, maxWaitingTime, popmednet, trace,
verbose)
} else if (regression == "logistic") {
stats = PartyTProcess3Logistic(monitorFolder, msreqid, blocksize, tol,
maxIterations, sleepTime, maxWaitingTime,
popmednet, trace, verbose)
} else {
warning("Regression type must be \"cox\", \"linear\" or \"logistic\"")
}
elp = GetElapsedTime(proc.time() - startTime, final = TRUE, timeOnly = FALSE)
if (verbose) cat("Process completed on", as.character(GetUTCTime()), "UTC.\n")
if (verbose) cat(elp, "\n")
return(stats)
}
DataPartner.KParty = function(regression = "linear",
data = NULL,
response = NULL,
strata = NULL,
mask = TRUE,
numDataPartners = NULL,
dataPartnerID = NULL,
monitorFolder = NULL,
sleepTime = 10,
maxWaitingTime = 86400,
popmednet = TRUE,
trace = FALSE,
verbose = TRUE) {
startTime = proc.time()
stats = list()
if (verbose) cat("Process started on", as.character(GetUTCTime()), "UTC.\n")
if (is.null(monitorFolder)) {
warning("monitorFolder must be specified.")
} else if (is.null(numDataPartners)) {
warning("numDataPartners must be specified")
} else if (is.null(dataPartnerID)) {
warning("dataPartnerID must be specified")
} else if (regression == "cox") {
stats = DataPartnerKCox(data, response, strata, mask, numDataPartners,
dataPartnerID, monitorFolder,
sleepTime, maxWaitingTime, popmednet, trace,
verbose)
} else if (regression == "linear") {
stats = DataPartnerKLinear(data, response, numDataPartners,
dataPartnerID, monitorFolder,
sleepTime, maxWaitingTime, popmednet, trace,
verbose)
} else if (regression == "logistic") {
stats = DataPartnerKLogistic(data, response, numDataPartners,
dataPartnerID, monitorFolder,
sleepTime, maxWaitingTime, popmednet, trace,
verbose)
} else {
warning("Regression type must be \"cox\", \"linear\" or \"logistic\"")
}
elp = GetElapsedTime(proc.time() - startTime, final = TRUE, timeOnly = FALSE)
if (verbose) cat("Process completed on", as.character(GetUTCTime()), "UTC.\n")
if (verbose) cat(elp, "\n")
return(stats)
}
AnalysisCenter.KParty = function(regression = "linear",
numDataPartners = NULL,
monitorFolder = NULL,
msreqid = "v_default_00_000",
tol = 1e-8,
maxIterations = 25,
sleepTime = 10,
maxWaitingTime = 86400,
popmednet = TRUE,
trace = FALSE,
verbose = TRUE) {
startTime = proc.time()
stats = list()
if (verbose) cat("Process started on", as.character(GetUTCTime()), "UTC.\n")
if (is.null(monitorFolder)) {
warning("monitorFolder must be specified.")
} else if (is.null(numDataPartners)) {
warning("numDataPartners must be specified.")
} else if (regression == "cox") {
stats = AnalysisCenterKCox(numDataPartners, monitorFolder, msreqid, tol,
maxIterations, sleepTime, maxWaitingTime,
popmednet, trace, verbose)
} else if (regression == "linear") {
stats = AnalysisCenterKLinear(numDataPartners, monitorFolder, msreqid,
sleepTime, maxWaitingTime, popmednet, trace,
verbose)
} else if (regression == "logistic") {
stats = AnalysisCenterKLogistic(numDataPartners, monitorFolder, msreqid, tol,
maxIterations, sleepTime, maxWaitingTime,
popmednet, trace, verbose)
} else {
warning("Regression type must be \"cox\", \"linear\" or \"logistic\"")
}
elp = GetElapsedTime(proc.time() - startTime, final = TRUE, timeOnly = FALSE)
if (verbose) cat("Process completed on", as.character(GetUTCTime()), "UTC.\n")
if (verbose) cat(elp, "\n")
return(stats)
}
CreateIOLocation = function(monitorFolder, folder) {
location = file.path(monitorFolder, folder)
if (!dir.exists(location) && !file.exists(location)) {
dir.create(location)
return(TRUE)
}
if (file_test("-d", location)) {
return(TRUE)
}
return(FALSE)
}
CheckDataFormat = function(params, data) {
if ("data.frame" %in% class(data)) {
data = data.frame(data)
} else if ("matrix" %in% class(data)) {
data = matrix(data)
} else {
warning("Data is not a matrix or a data frame.")
return(TRUE)
}
if (nrow(data) == 0 || ncol(data) == 0) {
warning("The data is empty.")
return(TRUE)
}
badValue = rep(FALSE, nrow(data))
for (i in 1:ncol(data)) {
if (class(data[, i]) %in% c("integer", "single", "double", "numeric")) {
badValue = badValue | !is.finite(data[, i])
} else {
badValue = badValue | is.na(data[, i])
}
}
idx = data.frame(which(badValue))
colnames(idx) = "Observations with invalid entries"
if (nrow(idx) > 0) {
warning(paste0("Some observations contain invalid values: NA, NaN, or Inf. ",
"A list of all such observations has been outputted to",
file.path(params$writePath, "invalidEntries.csv"),
". Terminating program."))
write.csv(idx, file.path(params$writePath, "invalidEntries.csv"))
return(TRUE)
}
if (is.null(colnames(data))) {
warning("Variables are not named.")
return(TRUE)
}
return(FALSE)
}
CheckResponse = function(params, data, yname) {
if (is.null(yname)) {
warning("Response is not specified.")
return(NULL)
}
if (class(yname) != "character") {
warning("response label is not a character string.")
return(NULL)
}
yname = unique(yname)
if (params$analysis == "linear" || params$analysis == "logistic") {
if (length(yname) != 1) {
warning(paste("Specify only one reponse for", params$analysis, "regression."))
return(NULL)
}
responseColIndex = which(colnames(data) %in% yname)
if (length(responseColIndex) == 0) {
warning("Response variable not found.")
return(NULL)
}
if (length(responseColIndex) > 1) {
warning("Response variable appears more than once.")
return(NULL)
}
}
if (params$analysis == "cox") {
if (length(yname) != 2) {
warning("Specify exactly two variables (time and censor) for Cox regression.")
return(NULL)
}
responseColIndexTime = c(which(colnames(data) %in% yname[1]))
responseColIndexCensor = c(which(colnames(data) %in% yname[2]))
if (length(responseColIndexTime) == 0) {
warning("Time variable not found.")
return(NULL)
}
if (length(responseColIndexTime) > 1) {
warning("Time variable appears more than once.")
return(NULL)
}
if (length(responseColIndexCensor) == 0) {
warning("Censor variable not found.")
return(NULL)
}
if (length(responseColIndexCensor) > 1) {
warning("Censor variable appears more than once.")
return(NULL)
}
responseColIndex = c(responseColIndexTime, responseColIndexCensor)
}
for (i in 1:length(yname)) {
if (!("numeric" %in% class(data[, responseColIndex[i]])) &&
!("integer" %in% class(data[, responseColIndex[i]]))) {
warning(paste(yname[i], "is not numeric."))
return(NULL)
}
}
if (params$analysis == "logistic") {
if (sum(!(data[, responseColIndex] %in% c(0, 1))) > 0) {
warning("Response variable is not binary. It should only be 0's and 1's.")
return(NULL)
}
}
if (params$analysis == "cox") {
if (sum(!(data[, responseColIndex[2]] %in% c(0, 1))) > 0) {
warning("Censoring variable is not binary. It should only be 0's and 1's.")
return(NULL)
}
}
return(responseColIndex)
}
CreateModelMatrixTags = function(data) {
if (ncol(data) == 0) {
return(c())
}
num = numeric(ncol(data))
classes = character(ncol(data))
for (i in 1:ncol(data)) {
if (class(data[, i]) %in% c("integer", "single", "double", "numeric")) {
num[i] = 1
classes[i] = "numeric"
} else {
num[i] = length(unique(data[, i])) - 1
classes[i] = "factor"
}
}
tags = rep(names(data), times = num)
names(tags) = rep(classes, times = num)
return(tags)
}
PrepareParams.2p = function(analysis, party, msreqid = "v_default_00_000",
popmednet = TRUE, trace = FALSE, verbose = TRUE) {
params = list()
params$partyName = party
params$analysis = analysis
params$msreqid = msreqid
params$popmednet = popmednet
params$trace = trace & verbose
params$verbose = verbose
params$failed = FALSE
params$errorMessage = ""
params$pmnStepCounter = 0
params$algIterationCounter = 0
params$completed = FALSE
params$converged = FALSE
params$maxIterExceeded = FALSE
params$lastIteration = FALSE
params$p1 = 0
params$p2 = 0
params$p1.old = 0
params$p2.old = 0
params$stats = list()
class(params$stats) = paste0("vdra", analysis)
params$stats$failed = TRUE
params$stats$converged = FALSE
return(params)
}
PrepareParams.3p = function(analysis, party, msreqid = "v_default_00_000",
popmednet = TRUE, trace = FALSE, verbose = TRUE) {
params = list()
params$partyName = party
params$analysis = analysis
params$msreqid = msreqid
params$popmednet = popmednet
params$trace = trace & verbose
params$verbose = verbose
params$failed = FALSE
params$errorMessage = ""
params$pmnStepCounter = 0
params$algIterationCounter = 0
params$completed = FALSE
params$converged = FALSE
params$maxIterExceeded = FALSE
params$lastIteration = FALSE
params$p1 = 0
params$p2 = 0
params$p1.old = 0
params$p2.old = 0
params$stats = list()
class(params$stats) = paste0("vdra", analysis)
params$stats$failed = TRUE
params$stats$converged = FALSE
return(params)
}
PrepareParams.kp = function(analysis, dataPartnerID, numDataPartners,
msreqid = "v_default_00_000", cutoff = NULL,
maxIterations = NULL, ac = FALSE, popmednet = TRUE,
trace = FALSE, verbose = TRUE) {
params = list()
params$dataPartnerID = dataPartnerID
params$numDataPartners = numDataPartners
params$analysis = analysis
params$msreqid = msreqid
params$popmednet = popmednet
params$trace = trace & verbose
params$verbose = verbose
params$failed = FALSE
params$errorMessage = ""
params$pmnStepCounter = 0
params$algIterationCounter = 0
params$maxIterations = maxIterations
params$completed = FALSE
params$converged = FALSE
params$maxIterExceeded = FALSE
params$lastIteration = FALSE
params$cutoff = cutoff
params$stats = list()
class(params$stats) = paste0("vdra", analysis)
params$stats$failed = TRUE
params$stats$converged = FALSE
if (((class(numDataPartners) != "integer" &&
class(numDataPartners) != "numeric") ||
numDataPartners <= 0 ||
is.infinite(numDataPartners) ||
round(numDataPartners) != numDataPartners)) {
params$failed = TRUE
params$errorMessage = "numDataPartners must be a positive integer, and must equal the number of data partners providing data."
}
if (!params$failed) {
if (ac) {
if (dataPartnerID != 0) {
params$failed = TRUE
params$errorMessage = "dataPartnerID for Analysis Center must be 0.\n\n"
}
} else {
if (dataPartnerID <= 0 || dataPartnerID > numDataPartners) {
params$failed = TRUE
params$errorMessage = paste0("dataPartnerID must be between 1 and ", numDataPartners, " inclusive.\n\n")
}
}
}
return(params)
}
Header = function(params) {
large.cox = c(" ____ _____ __",
" / ___/ _ \\ \\/ /",
"| | | | | \\ / ",
"| |__| |_| / \\ ",
" \\____\\___/_/\\_\\")
large.linear = c(" _ ___ _ _ _____ _ ____ ",
"| | |_ _| \\ | | ____| / \\ | _ \\ ",
"| | | || \\| | _| / _ \\ | |_) |",
"| |___ | || |\\ | |___ / ___ \\| _ < ",
"|_____|___|_| \\_|_____/_/ \\_|_| \\_\\")
large.logistic = c(" _ ___ ____ ___ ____ _____ ___ ____ ",
"| | / _ \\ / ___|_ _/ ___|_ _|_ _/ ___|",
"| | | | | | | _ | |\\___ \\ | | | | | ",
"| |__| |_| | |_| || | ___) || | | | |___ ",
"|_____\\___/ \\____|___|____/ |_| |___\\____|")
large.regression = c(" ____ _____ ____ ____ _____ ____ ____ ___ ___ _ _ ",
"| _ \\| ____/ ___| _ \\| ____/ ___/ ___|_ _/ _ \\| \\ | |",
"| |_) | _|| | _| |_) | _| \\___ \\___ \\| | | | | \\| |",
"| _ <| |__| |_| | _ <| |___ ___) ___) | | |_| | |\\ |",
"|_| \\_|_____\\____|_| \\_|_____|____|____|___\\___/|_| \\_|")
small.cox = c(" ___ _____ __",
" / __/ _ \\ \\/ /",
"| (_| (_) > < ",
" \\___\\___/_/\\_\\")
small.linear = c(" _ ___ _ _ ___ _ ___ ",
"| | |_ _| \\| | __| /_\\ | _ \\",
"| |__ | || .` | _| / _ \\| /",
"|____|___|_|\\_|___/_/ \\_\\_|_\\")
small.logistic = c(" _ ___ ___ ___ ___ _____ ___ ___ ",
"| | / _ \\ / __|_ _/ __|_ _|_ _/ __|",
"| |_| (_) | (_ || |\\__ \\ | | | | (__ ",
"|____\\___/ \\___|___|___/ |_| |___\\___|")
small.regression = c(" ___ ___ ___ ___ ___ ___ ___ ___ ___ _ _ ",
"| _ \\ __/ __| _ \\ __/ __/ __|_ _/ _ \\| \\| |",
"| / _| (_ | / _|\\__ \\__ \\| | (_) | .` |",
"|_|_\\___\\___|_|_\\___|___/___/___\\___/|_|\\_|")
tiny.cox = c("+-+-+-+",
"|C|O|X|")
tiny.linear = c("+-+-+-+-+-+-+",
"|L|I|N|E|A|R|")
tiny.logistic = c("+-+-+-+-+-+-+-+-+",
"|L|O|G|I|S|T|I|C|")
tiny.regression = c("+-+-+-+-+-+-+-+-+-+-+",
"|R|E|G|R|E|S|S|I|O|N|",
"+-+-+-+-+-+-+-+-+-+-+")
width = getOption("width")
if (width > nchar(large.regression[1])) {
cox = large.cox
linear = large.linear
logistic = large.logistic
regression = large.regression
} else if (width > nchar(small.regression[1])) {
cox = small.cox
linear = small.linear
logistic = small.logistic
regression = small.regression
} else {
cox = tiny.cox
linear = tiny.linear
logistic = tiny.logistic
regression = tiny.regression
}
offset.cox = floor((width - nchar(cox[1])) / 2)
offset.linear = floor((width - nchar(linear[1])) / 2)
offset.logistic = floor((width - nchar(logistic[1])) / 2)
offset.regression = floor((width - nchar(regression[1])) / 2)
space.cox = paste(rep(" ", offset.cox), collapse = "")
space.linear = paste(rep(" ", offset.linear), collapse = "")
space.logistic = paste(rep(" ", offset.logistic), collapse = "")
space.regression = paste(rep(" ", offset.regression), collapse = "")
if (params$analysis == "linear") {
if (params$verbose) cat(paste0("\r", space.linear, linear, "\n"))
if (params$verbose) cat(paste0("\r", space.regression, regression, "\n"))
}
if (params$analysis == "logistic") {
if (params$verbose) cat(paste0("\r", space.logistic, logistic, "\n"))
if (params$verbose) cat(paste0("\r", space.regression, regression, "\n"))
}
if (params$analysis == "cox") {
if (params$verbose) cat(paste0("\r", space.cox, cox, "\n"))
if (params$verbose) cat(paste0("\r", space.regression, regression, "\n"))
}
if (params$verbose) cat("\n")
}
BeginningIteration = function(params) {
width = getOption("width")
msg = paste("*** Beginning Iteration", params$algIterationCounter, "***")
offset = max(floor((width - nchar(msg)) / 2) - 1, 0)
space = paste(rep(" ", offset), collapse = "")
if (params$verbose) cat(space, msg, "\n\n")
}
EndingIteration = function(params) {
width = getOption("width")
msg = paste("*** Ending Iteration", params$algIterationCounter, "***")
offset = floor((width - nchar(msg)) / 2) - 1
space = paste(rep(" ", offset), collapse = "")
if (params$verbose) cat(space, msg, "\n\n")
}
GetLion = function(p) {
lion1 = rep("", 5)
lion1[1] = " (\"`-''-/\").___..--''\"`-._\" "
lion1[2] = " `6_ 6 ) `-. ( ).`-.__.`) "
lion1[3] = " (_Y_.)' ._ ) `._ `. ``-..-' "
lion1[4] = " _..`--'_..-_/ /--'_.' ,' "
lion1[5] = " (il),-'' (li),' ((!.-' "
lion2 = rep("", 8)
lion2[1] = " ___ ___ _ _ _ _ "
lion2[2] = " | -_>||__>|\\ |||\\ || "
lion2[3] = " | | ||__>| \\||| \\|| "
lion2[4] = " |_| ||__>|_\\_||_\\_| "
lion2[5] = " ___ _____ ___ _____ ___ "
lion2[6] = " //__>|_ _|//_\\|_ _|||__> "
lion2[7] = " \\_\\ | | | | | | | ||__> "
lion2[8] = " <__// |_| |_|_| |_| ||__> "
lion3 = rep("", 4)
lion3[1] = " ___ ___ _ _ "
lion3[2] = " | -_> //__> | | | | "
lion3[3] = " | | \\_\\ | |_| | "
lion3[4] = " |_| <__// \\___// "
lion4 = " (il),-'' (li),' ((!.-' PSU "
lion5 = " PSU "
if (p >= 13) {
nittany = c(lion1, lion2)
} else if (p >= 9) {
nittany = c(lion1, lion3)
} else if (p >= 5) {
nittany = lion1
nittany[5] = lion4
} else {
nittany = lion5
}
diff = p - length(nittany)
top = floor(diff / 2)
bottom = diff - top
nittany = c(rep("", top), nittany, rep("", bottom))
return(nittany)
}
MakeProgressBar1 = function(steps, message, verbose) {
pb = list()
messageLength = 18
pb$numSteps = steps
pb$numBlanks = 20
pb$delimeter = "|"
pb$filler = "
pb$blank = "."
pb$percent = 0
pb$percentstr = " 0%"
pb$prints = 0
message = substr(message, 1, messageLength)
message = paste0(message,
paste(rep(" ", messageLength - nchar(message)),
collapse = ""))
pb$header = paste0("Processing ", message, ": ")
toPrint = paste0(pb$header, pb$percentstr, pb$delimeter,
paste(rep(pb$blank, pb$numBlanks), collapse = ""), pb$delimeter)
if (verbose) cat(toPrint, "\r")
if (verbose) flush.console()
return(pb)
}
MakeProgressBar2 = function(i, pb, verbose) {
percent = floor(100 * i / pb$numSteps)
if (percent == pb$percent) {
return(pb)
}
pb$percent = percent
pb$percentstr = paste0(paste(rep(" ", 3 - nchar(percent)), collapse = ""), percent, "%")
numFiller = floor(pb$numBlanks * i / pb$numSteps)
toPrint = paste0(pb$header, pb$percentstr, pb$delimeter,
paste(rep(pb$filler, numFiller), collapse = ""),
paste(rep(pb$blank, pb$numBlanks - numFiller), collapse = ""),
pb$delimeter)
if (i == pb$numSteps) {
if (verbose) cat(toPrint, "\n\n")
} else {
if (verbose) cat(toPrint, "\r")
}
if (verbose) flush.console()
return(pb)
}
MultiplyDiagonalWTimesX = function(w, x) {
if (!is.matrix(x)) {
x = matrix(x, length(x), 1)
wx1 = matrix(NA, length(x), 1)
} else {
wx1 = matrix(NA, nrow = nrow(x), ncol = ncol(x))
}
if (is.matrix(w)) {
for (i in 1:nrow(w)) {
wx1[i, ] = w[i] * x[i, ]
}
} else {
for (i in 1:length(w)) {
wx1[i, ] = w[i] * x[i, ]
}
}
return(wx1)
}
FindOrthogonalVectors = function(x, g) {
x = as.matrix(x)
x = cbind(x, runif(nrow(x)))
n = nrow(x)
Q = qr.Q(qr(x), complete = TRUE)
Q = Q[, (n - g + 1):n]
return(Q)
}
RandomOrthonomalMatrix = function(size) {
return(qr.Q(qr(matrix(runif(size * size), size, size)), complete = TRUE))
}
MakeCSV = function(file_nm, transfer_to_site_in, dp_cd_list, writePath) {
dframe = data.frame(file_nm, transfer_to_site_in, dp_cd_list)
fp = file.path(writePath, "file_list.csv")
write.csv(dframe, fp, row.names = FALSE, quote = FALSE)
}
SeqZW = function(letter = "Z_", nblocks = 1) {
return(paste0(letter, 1:nblocks, ".rdata"))
}
Standby = function(triggerName, triggerLocation,
sleepTime = 1, maxWaitingTime = NULL, remove = FALSE,
verbose = TRUE) {
found = FALSE
if (is.null(maxWaitingTime)) { maxWaitingTime = 60 * 60 * 24 }
fpath = file.path(triggerLocation, triggerName)
startTime = proc.time()[3]
elapsedTime = 0
while (!found) {
found = all(file.exists(fpath))
if (elapsedTime > maxWaitingTime) {
break
}
if (!found) {
Sys.sleep(sleepTime)
elapsedTime = round(proc.time()[3] - startTime, 0)
}
if (verbose) cat("Elapsed Time:", HMS(elapsedTime), "\r")
}
if (verbose) cat("\n")
if (!found) {
stop("Exceeded maximum time waiting for files to be dropped.")
}
Sys.sleep(sleepTime)
if (remove) DeleteTrigger(triggerName, triggerLocation)
}
CopyFile = function(readDirectory, writeDirectory, filename) {
source = file.path(readDirectory, filename)
destination = file.path(writeDirectory, filename)
if (all(file.exists(source))) {
file.copy(source, destination, overwrite = TRUE)
} else {
stop(paste0("These files do not exist:\n",
paste0(source[!file.exists(source)], collapse = ", "), "\n"))
}
}
MakeTrigger = function(triggerName, triggerPath, message = "Trigger File") {
fn = file.path(triggerPath, triggerName)
if (file.exists(fn)) {
file.remove(fn)
}
write(message, fn)
}
DeleteTrigger = function(triggerName, triggerPath) {
Sys.sleep(1)
targets = file.path(triggerPath, triggerName)
for (target in targets) {
if (file.exists(target)) {
startTime = proc.time()[3]
repeat {
result = suppressWarnings(try(file.remove(target)))
if (result) break
Sys.sleep(1)
if (proc.time()[3] - startTime > 60) {
stop(paste("Could not delete the file", target, "after 60 seconds."))
}
}
}
}
}
MakeTransferMessage = function(writePath) {
message = "A has no covariates."
save(message, file = file.path(writePath, "transferControl.rdata"))
}
MakeErrorMessage = function(writePath, message = "") {
save(message, file = file.path(writePath, "errorMessage.rdata"))
}
ReadErrorMessage = function(readPath) {
load(file.path(readPath, "errorMessage.rdata"))
return(message)
}
SendPauseQuit.2p = function(params,
files = c(),
sleepTime = 10,
job_failed = FALSE) {
params = StoreLogEntry.2p(params, files)
params = StoreTrackingTableEntry.2p(params)
WriteLogCSV(params)
WriteLogRaw(params)
params$lastIteration = TRUE
files = c(files, "stamps.rdata", "log.rdata", "file_list.csv")
transfer = c(rep(1, length(files) - 1), 10)
if (params$partyName == "A") {
if (job_failed) {
files = c(files, "job_fail.ok")
params = StoreStampsEntry(params, "Job failed trigger file", "Trigger File created")
} else {
files = c(files, "job_done.ok")
params = StoreStampsEntry(params, "Job done trigger file", "Trigger File created")
}
transfer = c(transfer, 10)
}
if (params$partyName == "A") {
files = c(files, "dl_track_tbl.csv")
transfer = c(transfer, 10)
destination = rep(1, length(files))
destination[transfer == 10] = 10
} else {
files = c(files, "tr_tb_updt.rdata")
transfer = c(transfer, 1)
destination = rep(0, length(files))
destination[transfer == 10] = 10
}
MakeCSV(files, transfer, destination, params$writePath)
params = StoreStampsEntry(params, "Files done trigger file", "Trigger File Created")
params = StoreStampsEntry(params, "R program execution complete, output files written",
"Tracking Table")
WriteStampsCSV(params)
WriteStampsRaw(params)
if (job_failed) {
MakeTrigger("job_fail.ok", params$writePath)
} else {
MakeTrigger("job_done.ok", params$writePath)
}
MakeTrigger("files_done.ok", params$writePath)
return(params)
}
SendPauseContinue.2p = function(params,
files = c(),
sleepTime = 10,
maxWaitingTime = NULL,
job_started = FALSE) {
params = StoreLogEntry.2p(params, files)
params = StoreTrackingTableEntry.2p(params)
WriteLogCSV(params)
WriteLogRaw(params)
files = c(files, "stamps.rdata", "log.rdata", "file_list.csv")
transfer = c(rep(1, length(files) - 1), 10)
if (params$partyName == "A") {
files = c(files, "dl_track_tbl.csv")
transfer = c(transfer, 10)
destination = rep(1, length(files))
destination[transfer == 10] = 10
} else {
files = c("tr_tb_updt.rdata", files)
transfer = c(1, transfer)
destination = rep(0, length(files))
destination[transfer == 10] = 10
}
MakeCSV(files, transfer, destination, params$writePath)
params = StoreStampsEntry(params, "Files done trigger file", "Trigger File created")
WriteStampsCSV(params)
WriteStampsRaw(params)
params$pmnStepCounter = params$pmnStepCounter + 2
if (job_started) {
MakeTrigger("job_started.ok", params$writePath)
} else {
MakeTrigger("files_done.ok", params$writePath)
}
if (params$partyName == "A") {
if (params$verbose) cat("Waiting for data partner\n")
} else {
if (params$verbose) cat("Waiting for analysis center\n")
}
Standby("files_done.ok", params$readPath,
maxWaitingTime = maxWaitingTime,
verbose = params$verbose)
if (params$verbose) cat("Resuming local processing\n\n")
DeleteTrigger("files_done.ok", params$readPath)
params = ReadLogRaw.2p(params)
params = NewLogEntry.2p(params)
params = ReadStampsRaw.2p(params)
params = StoreStampsEntry(params, "R program execution begins", "Tracking Table")
if (params$partyName == "A") {
params = ReadTrackingTableUpdate.2p(params)
}
return(params)
}
PauseContinue.2p = function(params, maxWaitingTime) {
params = StoreLogEntry.2p(params, "")
WriteLogCSV(params)
if (params$partyName == "A") {
if (params$verbose) cat("Waiting for data partner\n")
} else {
if (params$verbose) cat("Waiting for analysis center\n")
}
Standby("files_done.ok", params$readPath,
maxWaitingTime = maxWaitingTime,
verbose = params$verbose)
if (params$verbose) cat("Resuming local processing\n\n")
DeleteTrigger("files_done.ok", params$readPath)
params = MergeLogRaw.2p(params)
params = NewLogEntry.2p(params)
params = MergeStampsRaw.2p(params)
params = ReadTrackingTableUpdate.2p(params)
WriteLogCSV(params)
return(params)
}
WaitForTurn.3p = function(params, sleepTime) {
Sys.sleep(sleepTime)
if ((params$partyName == "T") || (!params$popmednet)) return(NULL)
if (params$verbose) cat("Waiting For Turn\n")
startTime = proc.time()[3]
if (params$verbose) cat("Elapsed Time:", HMS(0), "\r")
if (exists("partyOffset")) {
if (params$verbose) cat("\n\n")
return()
}
partyOffset = 15
modulus = 2 * partyOffset
if (params$partyName == "A") targetTime = 0
if (params$partyName == "B") targetTime = partyOffset
while (as.integer(Sys.time()) %% modulus != targetTime) {
elapsedTime = round(proc.time()[3] - startTime, 0)
if (params$verbose) cat("Elapsed Time:", HMS(elapsedTime), "\r")
Sys.sleep(0.1)
}
if (params$verbose) cat("\n\n")
}
SendPauseQuit.3p = function(params,
filesA = NULL,
filesB = NULL,
filesT = NULL,
sleepTime = 10,
job_failed = FALSE,
waitForTurn = FALSE) {
params$lastIteration = TRUE
params$completed = TRUE
files = c(filesA, filesB, filesT, "file_list.csv")
transfer = c(rep(1, length(files) - 1), 10)
destination = c(rep(1, length(filesA)),
rep(2, length(filesB)),
rep(0, length(filesT)),
10)
if (params$party != "T") {
files = c(files, "stamps.rdata", "log.rdata")
transfer = c(transfer, 1, 1)
destination = c(destination, 0, 0)
}
if (params$party == "T") {
if (job_failed) {
files = c(files, "job_fail.ok")
params = StoreStampsEntry(params, "Job failed trigger file", "Trigger File created")
} else {
files = c(files, "job_done.ok")
params = StoreStampsEntry(params, "Job done trigger file", "Trigger File created")
}
transfer = c(transfer, 10)
destination = c(destination, 10)
}
params = StoreLogEntry.3p(params, c(filesA, filesB, filesT))
params = StoreTrackingTableEntry.3p(params)
WriteLogCSV(params)
WriteLogRaw(params)
if (params$partyName == "T") {
WriteTrackingTableCSV(params)
files = c(files, "dl_track_tbl.csv")
transfer = c(transfer, 10)
destination = c(destination, 10)
} else {
WriteTrackingTableRaw(params)
files = c(files, "tr_tb_updt.rdata")
transfer = c(transfer, 1)
destination = c(destination, 0)
}
MakeCSV(files, transfer, destination, params$writePath)
params = StoreStampsEntry(params, "Files done trigger file", "Trigger File Created")
params = StoreStampsEntry(params, "R program execution complete, output files written",
"Tracking Table")
if (waitForTurn) {
params = StoreStampsEntry(params, "R program execution delayed", "Tracking Table")
WaitForTurn.3p(params, sleepTime)
params = StoreStampsEntry(params, "R program execution restarted", "Tracking Table")
}
WriteStampsCSV(params)
WriteStampsRaw(params)
if (params$party == "T") {
if (job_failed) {
MakeTrigger("job_fail.ok", params$writePath)
} else {
MakeTrigger("job_done.ok", params$writePath)
}
}
MakeTrigger("files_done.ok", params$writePath)
return(params)
}
SendPauseContinue.3p = function(params,
filesA = NULL,
filesB = NULL,
filesT = NULL,
from = NULL,
sleepTime = 10,
maxWaitingTime = 24 * 60 * 60,
job_started = FALSE,
waitForTurn = FALSE) {
params = StoreLogEntry.3p(params, c(filesA, filesB, filesT))
params = StoreTrackingTableEntry.3p(params)
WriteLogCSV(params)
WriteLogRaw(params)
files = c(filesA, filesB, filesT, "file_list.csv")
transfer = c(rep(1, length(files) - 1), 10)
destination = c(rep(1, length(filesA)),
rep(2, length(filesB)),
rep(0, length(filesT)), 10)
if (length(files) > 1) {
WriteTrackingTableRaw(params)
}
if (!is.null(filesA)) {
files = c(files, "stamps.rdata", "log.rdata", "tr_tb_updt.rdata")
transfer = c(transfer, 1, 1, 1)
destination = c(destination, 1, 1, 1)
}
if (!is.null(filesB)) {
files = c(files, "stamps.rdata", "log.rdata", "tr_tb_updt.rdata")
transfer = c(transfer, 1, 1, 1)
destination = c(destination, 2, 2, 2)
}
if (!is.null(filesT)) {
files = c(files, "stamps.rdata", "log.rdata", "tr_tb_updt.rdata")
transfer = c(transfer, 1, 1, 1)
destination = c(destination, 0, 0, 0)
}
if (params$partyName == "T") {
WriteTrackingTableCSV(params)
files = c(files, "dl_track_tbl.csv")
transfer = c(transfer, 10)
destination = c(destination, 10)
}
MakeCSV(files, transfer, destination, params$writePath)
params = StoreStampsEntry(params, "Files done trigger file", "Trigger File created")
if (waitForTurn) {
params = StoreStampsEntry(params, "R program execution delayed", "Tracking Table")
WaitForTurn.3p(params, sleepTime)
params = StoreStampsEntry(params, "R program execution restarted", "Tracking Table")
}
WriteStampsCSV(params)
WriteStampsRaw(params)
if (job_started) {
MakeTrigger("job_started.ok", params$writePath)
} else {
MakeTrigger("files_done.ok", params$writePath)
}
if (length(from) == 1) {
if (from == "T") {
if (params$verbose) cat("Waiting for analysis center\n")
} else if (from == "A") {
if (params$verbose) cat("Waiting for data partner 1\n")
} else {
if (params$verbose) cat("Waiting for data partner 2\n")
}
} else if (length(from) == 2) {
if (params$verbose) cat("Waiting for data partners\n")
}
Standby("files_done.ok", params$readPath[from],
maxWaitingTime = maxWaitingTime,
verbose = params$verbose)
if (params$verbose) cat("Resuming local processing\n\n")
DeleteTrigger("files_done.ok", params$readPath[from])
params = MergeLogRaw.3p(params, from)
params = UpdateCounters.3p(params)
params = NewLogEntry.3p(params)
params = MergeStampsRaw.3p(params, from)
params = StoreStampsEntry(params, "R program execution begins", "Tracking Table")
params = MergeTrackingTableRAW.3p(params, from)
return(params)
}
PauseContinue.3p = function(params, from = NULL, maxWaitingTime = 24 * 60 * 60) {
params = StoreLogEntry.3p(params, "")
params = StoreTrackingTableEntry.3p(params)
WriteLogCSV(params)
if (length(from) == 1) {
if (from == "T") {
if (params$verbose) cat("Waiting for analysis center\n")
} else if (from == "A") {
if (params$verbose) cat("Waiting for data partner 1\n")
} else {
if (params$verbose) cat("Waiting for data partner 2\n")
}
} else if (length(from) == 2) {
if (params$verbose) cat("Waiting for data partners\n")
}
Standby("files_done.ok", params$readPath[from],
maxWaitingTime = maxWaitingTime,
verbose = params$verbose)
if (params$verbose) cat("Resuming local processing\n\n")
DeleteTrigger("files_done.ok", params$readPath[from])
params = MergeLogRaw.3p(params, from)
params = UpdateCounters.3p(params)
params = NewLogEntry.3p(params)
params = MergeStampsRaw.3p(params, from)
params = MergeTrackingTableRAW.3p(params, from)
WriteLogCSV(params)
return(params)
}
UpdateCounters.3p = function(params) {
params$pmnStepCounter = max(params$log$history$Step) + 1
return(params)
}
WaitForTurn.kp = function(params, sleepTime) {
Sys.sleep(sleepTime)
if (!params$popmednet) return(NULL)
if (params$verbose) cat("Waiting For Turn\n")
startTime = proc.time()[3]
if (params$verbose) cat("Elapsed Time:", HMS(0), "\r")
if (exists("partyOffset")) {
if (params$verbose) cat("\n\n")
return()
}
partyOffset = 15
modulus = (params$numDataPartners + 1) * partyOffset
targetTime = params$dataPartnerID * partyOffset
if (params$verbose) cat("Elapsed Time:", HMS(0), "\r")
while (as.integer(Sys.time()) %% modulus != targetTime) {
elapsedTime = round(proc.time()[3] - startTime, 0)
if (params$verbose) cat("Elapsed Time:", HMS(elapsedTime), "\r")
Sys.sleep(0.1)
}
if (params$verbose) cat("\n\n")
}
SendPauseQuit.kp = function(params,
filesAC = NULL,
filesDP = NULL,
sleepTime = 10,
job_failed = FALSE,
waitForTurn = FALSE) {
params$lastIteration = TRUE
params$completed = TRUE
params = StoreLogEntry.kp(params, c(filesAC, filesDP))
params = StoreTrackingTableEntry.kp(params)
WriteLogCSV(params)
WriteLogRaw(params)
if (params$dataPartnerID != 0) {
WriteTrackingTableRaw(params)
filesAC = c(filesAC, "stamps.rdata", "log.rdata", "tr_tb_updt.rdata")
if (!is.null(filesDP)) {
filesDP = c(filesDP, "stamps.rdata", "log.rdata", "tr_tb_updt.rdata")
}
dataPartnerTarget = 1:params$numDataPartners
dataPartnerTarget = dataPartnerTarget[-params$dataPartnerID]
files = c(filesAC, rep(filesDP, length(dataPartnerTarget)), "file_list.csv")
transfer = c(rep(1, length(files) - 1), 10)
destination = c(rep(0, length(filesAC)),
rep(dataPartnerTarget, each = length(filesDP)),
10)
}
if (params$dataPartnerID == 0) {
WriteTrackingTableCSV(params)
files = c("dl_track_tbl.csv", "file_list.csv")
if (job_failed) {
files = c(files, "job_fail.ok")
params = StoreStampsEntry(params, "Job failed trigger file", "Trigger File created")
} else {
files = c(files, "job_done.ok")
params = StoreStampsEntry(params, "Job done trigger file", "Trigger File created")
}
transfer = c(10, 10, 10)
destination = c(10, 10, 10)
}
MakeCSV(files, transfer, destination, params$writePath)
params = StoreStampsEntry(params, "Files done trigger file", "Trigger File Created")
params = StoreStampsEntry(params, "R program execution complete, output files written",
"Tracking Table")
if (waitForTurn) {
params = StoreStampsEntry(params, "R program execution delayed", "Tracking Table")
WaitForTurn.kp(params, sleepTime)
params = StoreStampsEntry(params, "R program execution restarted", "Tracking Table")
}
WriteStampsCSV(params)
WriteStampsRaw(params)
if (params$dataPartnerID == 0) {
if (job_failed) {
MakeTrigger("job_fail.ok", params$writePath)
} else {
MakeTrigger("job_done.ok", params$writePath)
}
}
MakeTrigger("files_done.ok", params$writePath)
return(params)
}
SendPauseContinue.kp = function(params,
filesAC = NULL,
filesDP = NULL,
from = NULL,
sleepTime = 10,
maxWaitingTime = 24 * 60 * 60,
job_started = FALSE,
waitForTurn = FALSE) {
if (class(filesDP) != "list") {
params = StoreLogEntry.kp(params, c(filesAC, filesDP))
params = StoreTrackingTableEntry.kp(params)
WriteLogCSV(params)
WriteLogRaw(params)
if (length(filesAC) + length(filesDP) > 0) {
WriteTrackingTableRaw(params)
}
if (!is.null(filesAC)) {
filesAC = c(filesAC, "stamps.rdata", "log.rdata", "tr_tb_updt.rdata")
}
if (!is.null(filesDP)) {
filesDP = c(filesDP, "stamps.rdata", "log.rdata", "tr_tb_updt.rdata")
}
dataPartnerTarget = 1:params$numDataPartners
if (params$dataPartnerID != 0) {
dataPartnerTarget = dataPartnerTarget[-params$dataPartnerID]
}
files = c(filesAC, rep(filesDP, length(dataPartnerTarget)), "file_list.csv")
transfer = c(rep(1, length(files) - 1), 10)
destination = c(rep(0, length(filesAC)),
rep(dataPartnerTarget, each = length(filesDP)),
10)
} else {
files = filesAC
for (dp in 1:params$numDataPartners) {
files = c(files, filesDP[[dp]])
}
params = StoreLogEntry.kp(params, files)
params = StoreTrackingTableEntry.kp(params)
WriteLogCSV(params)
WriteLogRaw(params)
if (length(files) > 0) {
WriteTrackingTableRaw(params)
}
if (!is.null(filesAC)) {
filesAC = c(filesAC, "stamps.rdata", "log.rdata", "tr_tb_updt.rdata")
}
files = filesAC
transfer = rep(1, length(files))
destination = rep(0, length(filesAC))
for (dp in 1:params$numDataPartners) {
if (length(filesDP[[dp]]) > 0 && dp != params$dataPartnerID) {
files = c(files, filesDP[[dp]], "stamps.rdata", "log.rdata", "tr_tb_updt.rdata")
transfer = c(transfer, rep(1, length(filesDP[[dp]]) + 3))
destination = c(destination, rep(dp, length(filesDP[[dp]]) + 3))
}
}
files = c(files, "file_list.csv")
transfer = c(transfer, 10)
destination = c(destination, 10)
}
if (params$dataPartnerID == 0) {
WriteTrackingTableCSV(params)
files = c(files, "dl_track_tbl.csv")
transfer = c(transfer, 10)
destination = c(destination, 10)
}
MakeCSV(files, transfer, destination, params$writePath)
params = StoreStampsEntry(params, "Files done trigger file", "Trigger File created")
if (waitForTurn) {
params = StoreStampsEntry(params, "R program execution delayed", "Tracking Table")
WaitForTurn.kp(params, sleepTime)
params = StoreStampsEntry(params, "R program execution restarted", "Tracking Table")
}
WriteStampsCSV(params)
WriteStampsRaw(params)
if (job_started) {
MakeTrigger("job_started.ok", params$writePath)
} else {
MakeTrigger("files_done.ok", params$writePath)
}
if (from == "AC") {
if (params$verbose) cat("Waiting for analysis center\n")
Standby("files_done.ok", params$readPathAC,
maxWaitingTime = maxWaitingTime,
verbose = params$verbose)
DeleteTrigger("files_done.ok", params$readPathAC)
} else if (from == "DP") {
if (params$verbose) cat("Waiting for data partners\n")
if (params$dataPartnerID == 0) {
Standby("files_done.ok", params$readPathDP,
maxWaitingTime = maxWaitingTime,
verbose = params$verbose)
DeleteTrigger("files_done.ok", params$readPathDP)
} else {
Standby("files_done.ok",
params$readPathDP[-params$dataPartnerID],
maxWaitingTime = maxWaitingTime,
verbose = params$verbose)
DeleteTrigger("files_done.ok", params$readPathDP[-params$dataPartnerID])
}
} else if (from == "DP1") {
if (params$verbose) cat("Waiting for data partner 1\n")
Standby("files_done.ok",
params$readPathDP[1],
maxWaitingTime = maxWaitingTime,
verbose = params$verbose)
DeleteTrigger("files_done.ok", params$readPathDP[1])
} else if (from == "DP2") {
if (params$verbose) cat("Waiting for data partner 2\n")
Standby("files_done.ok",
params$readPathDP[2],
maxWaitingTime = maxWaitingTime,
verbose = params$verbose)
DeleteTrigger("files_done.ok", params$readPathDP[2])
}
if (params$verbose) cat("Resuming local processing\n\n")
params = MergeLogRaw.kp(params, from)
params = UpdateCounters.kp(params)
params = NewLogEntry.kp(params)
params = MergeStampsRaw.kp(params, from)
params = StoreStampsEntry(params, "R program execution begins", "Tracking Table")
params = MergeTrackingTableRAW.kp(params, from)
return(params)
}
PauseContinue.kp = function(params, from = NULL, maxWaitingTime = 24 * 60 * 60) {
params = StoreLogEntry.kp(params, "")
params = StoreTrackingTableEntry.kp(params)
WriteLogCSV(params)
params = StoreStampsEntry(params, "R program execution paused", "Tracking Table")
if (from == "AC") {
if (params$verbose) cat("Waiting for analysis center\n")
Standby("files_done.ok",
params$readPathAC,
maxWaitingTime = maxWaitingTime,
verbose = params$verbose)
DeleteTrigger("files_done.ok", params$readPathAC)
} else {
if (params$verbose) cat("Waiting for data partners\n")
if (params$dataPartnerID == 0) {
Standby("files_done.ok",
params$readPathDP,
maxWaitingTime = maxWaitingTime,
verbose = params$verbose)
DeleteTrigger("files_done.ok", params$readPathDP)
} else {
Standby("files_done.ok",
params$readPathDP[-params$dataPartnerID],
maxWaitingTime = maxWaitingTime,
verbose = params$verbose)
DeleteTrigger("files_done.ok", params$readPathDP[-params$dataPartnerID])
}
}
if (params$verbose) cat("Resuming local processing\n\n")
params = MergeLogRaw.kp(params, from)
params = UpdateCounters.kp(params)
params = NewLogEntry.kp(params)
params = MergeStampsRaw.kp(params, from)
params = StoreStampsEntry(params, "R program execution begins", "Tracking Table")
params = MergeTrackingTableRAW.kp(params, from)
WriteLogCSV(params)
return(params)
}
UpdateCounters.kp = function(params) {
params$pmnStepCounter = max(params$log$history$Step) + 1
return(params)
}
ReceivedError.kp = function(params, from) {
result = list()
message = ""
if (from == "AC") {
messageExists = file.exists(file.path(params$readPathAC, "errorMessage.rdata"))
if (messageExists) {
message = ReadErrorMessage(params$readPathAC)
}
} else {
messageExists = file.exists(file.path(params$readPathDP, "errorMessage.rdata"))
for (id in 1:params$numDataPartners) {
if (messageExists[id]) {
message = paste0(message, ReadErrorMessage(params$readPathDP[id]), " ")
}
}
}
result$error = any(messageExists)
result$message = message
return(result)
}
GetUTCTime = function() {
t = Sys.time()
attr(t, "tzone") = "UTC"
return(as.POSIXlt(t))
}
GetUTCOffset = function() {
t = Sys.time()
return(format(t, "%z"))
}
GetUTCOffsetSeconds = function() {
t = Sys.time()
offset = format(t, "%z")
hour = as.numeric(substr(offset, 2, 3))
min = as.numeric(substr(offset, 4, 5))
pm = ifelse(substr(offset, 1, 1) == "-", -1, 1)
return(pm * (hour * 3600 + min * 60))
}
ConvertUTCtoRoundTripTime = function(t) {
if (t$mon < 9) { month = paste0("0", t$mon + 1) } else { month = t$mon + 1}
if (t$mday < 10) { day = paste0("0", t$mday) } else { day = t$mday }
if (t$hour < 10) { hour = paste0("0", t$hour) } else { hour = t$hour }
if (t$min < 10) { min = paste0("0", t$min) } else { min = t$min }
if (t$sec < 10) { sec = paste0("0", t$sec) } else { sec = t$sec }
t = paste0(t$year + 1900, "-", month, "-", day, " ", hour, ":",
min, ":", sec)
}
GetRoundTripTime = function() {
return(ConvertUTCtoRoundTripTime(GetUTCTime()))
}
GetElapsedTime = function(time1, final = FALSE, timeOnly = FALSE) {
etime = floor(time1[3])
hrs = floor(etime / 3600)
mins = floor((etime %% 3600) / 60)
secs = etime - hrs * 3600 - mins * 60
hr1 = if (hrs > 9) toString(hrs) else paste0("0", toString(hrs))
min1 = if (mins > 9) toString(mins) else paste0("0", toString(mins))
sec1 = if (secs > 9) toString(secs) else paste0("0", toString(secs))
if (final) {
return(paste0("(Total time elapsed: ", hr1, "hr ", min1, "min ",
sec1, "sec)"))
} else if (timeOnly) {
return(paste0("(", hr1, "hr ", min1, "min ", sec1, "sec)"))
}
return(paste0("(Time elapsed: ", hr1, "hr ", min1, "min ", sec1, "sec)"))
}
ConvertSecsToHMS = function(secs, final = FALSE, timeOnly = FALSE) {
if (length(secs) != 1) {
secs = 0
}
secs = round(secs, digits = 0)
hrs = floor(secs / 3600)
mins = floor((secs %% 3600) / 60)
secs = secs - hrs * 3600 - mins * 60
hr1 = if (hrs > 9) toString(hrs) else paste0("0", toString(hrs))
min1 = if (mins > 9) toString(mins) else paste0("0", toString(mins))
sec1 = if (secs > 9) toString(secs) else paste0("0", toString(secs))
if (final) {
return(paste0("(Total time elapsed: ", hr1, ":", min1, ":", sec1, ")"))
}
if (timeOnly) {
return(paste0("(", hr1, ":", min1, ":", sec1, ")"))
}
return(paste0("(Time elapsed: ", hr1, ":", min1, ":", sec1, ")"))
}
HMS = function(t) {
paste(paste(formatC(t %/% (60*60), width = 2, format = "d", flag = "0"),
formatC(t %/% 60 %% 60, width = 2, format = "d", flag = "0"),
formatC(t %% 60, width = 2, format = "d", flag = "0"),
sep = ":"))
}
GetBlockSize = function(pA, pB) {
minBlocksize = max(25, trunc(1 + (pA + pB + 1)^2 / pB))
return(minBlocksize)
}
CreateBlocks = function(pA, pB, n, blocksize) {
blocks = list()
numBlocks = max(trunc(n / blocksize), 1)
newBlocksize = trunc(n / numBlocks)
numBigBlocks = n %% newBlocksize
numLittleBlocks = numBlocks - numBigBlocks
bigBlocksize = newBlocksize + 1
gLittleBlock = trunc(newBlocksize * (pA + 1) / (pA + pB + 1))
gBigBlock = trunc(bigBlocksize * (pA + 1) / (pA + pB + 1))
blocks$numBlocks = numBlocks
blocks$littleBlocksize = newBlocksize
blocks$bigBlocksize = bigBlocksize
blocks$numLittleBlocks = numLittleBlocks
blocks$numBigBlocks = numBigBlocks
blocks$gLittleBlock = gLittleBlock
blocks$gBigBlock = gBigBlock
blocks$stops = integer()
if (numBigBlocks > 0) {
blocks$stops = bigBlocksize * 1:numBigBlocks
}
if (numLittleBlocks > 0) {
blocks$stops = c(blocks$stops, bigBlocksize * numBigBlocks +
newBlocksize * 1:numLittleBlocks)
}
if (numBlocks == 1) {
blocks$starts = c(1)
} else {
blocks$starts = c(1, 1 + blocks$stops)[1:numBlocks]
}
blocks$g = c(rep(gBigBlock, numBigBlocks), rep(gLittleBlock, numLittleBlocks))
return(blocks)
}
CreateContainers = function(pA, pB, blocks) {
containers = list()
maximumFilesize = 25 * 1024^2
numBlocks = blocks$numBlocks
littleBlocksize = blocks$littleBlocksize
littleBlockG = blocks$gLittleBlock
littleFilesize.Z = 8 * littleBlocksize * littleBlockG
littleFilesize.W = 8 * littleBlocksize * pB
littleFilesize.RZ = 8 * littleBlocksize^2
littleFilesize.PR = 8 * (pA + 1) * pB
littleFilesize.XR = 8 * pA * pB
numContainers.Z = ceiling(numBlocks * littleFilesize.Z / maximumFilesize)
numBlocksSmallContainer.Z = trunc(numBlocks / numContainers.Z)
numBlocksLargeContainer.Z = numBlocksSmallContainer.Z + 1
numLargeContainer.Z = numBlocks %% numContainers.Z
numSmallContainer.Z = numContainers.Z - numLargeContainer.Z
numContainers.W = ceiling(numBlocks * littleFilesize.W / maximumFilesize)
numBlocksSmallContainer.W = trunc(numBlocks / numContainers.W)
numBlocksLargeContainer.W = numBlocksSmallContainer.W + 1
numLargeContainer.W = numBlocks %% numContainers.W
numSmallContainer.W = numContainers.W - numLargeContainer.W
numContainers.RZ = ceiling(numBlocks * littleFilesize.RZ / maximumFilesize)
numBlocksSmallContainer.RZ = trunc(numBlocks / numContainers.RZ)
numBlocksLargeContainer.RZ = numBlocksSmallContainer.RZ + 1
numLargeContainer.RZ = numBlocks %% numContainers.RZ
numSmallContainer.RZ = numContainers.RZ - numLargeContainer.RZ
numContainers.PR = ceiling(numBlocks * littleFilesize.PR / maximumFilesize)
numBlocksSmallContainer.PR = trunc(numBlocks / numContainers.PR)
numBlocksLargeContainer.PR = numBlocksSmallContainer.PR + 1
numLargeContainer.PR = numBlocks %% numContainers.PR
numSmallContainer.PR = numContainers.PR - numLargeContainer.PR
numContainers.XR = ceiling(numBlocks * littleFilesize.XR / maximumFilesize)
numBlocksSmallContainer.XR = trunc(numBlocks / numContainers.XR)
numBlocksLargeContainer.XR = numBlocksSmallContainer.XR + 1
numLargeContainer.XR = numBlocks %% numContainers.XR
numSmallContainer.XR = numContainers.XR - numLargeContainer.XR
if (numLargeContainer.Z > 0) {
filebreak.Z = c(0:(numLargeContainer.Z - 1) * numBlocksLargeContainer.Z + 1,
0:(numSmallContainer.Z - 1) * numBlocksSmallContainer.Z + 1 +
numLargeContainer.Z * numBlocksLargeContainer.Z)
} else {
filebreak.Z = c(0:(numSmallContainer.Z - 1) * numBlocksSmallContainer.Z + 1 +
numLargeContainer.Z * numBlocksLargeContainer.Z)
}
if (numLargeContainer.W > 0) {
filebreak.W = c(0:(numLargeContainer.W - 1) * numBlocksLargeContainer.W + 1,
0:(numSmallContainer.W - 1) * numBlocksSmallContainer.W + 1 +
numLargeContainer.W * numBlocksLargeContainer.W)
} else {
filebreak.W = c(0:(numSmallContainer.W - 1) * numBlocksSmallContainer.W + 1 +
numLargeContainer.W * numBlocksLargeContainer.W)
}
if (numLargeContainer.RZ > 0) {
filebreak.RZ = c(0:(numLargeContainer.RZ - 1) * numBlocksLargeContainer.RZ + 1,
0:(numSmallContainer.RZ - 1) * numBlocksSmallContainer.RZ + 1 +
numLargeContainer.RZ * numBlocksLargeContainer.RZ)
} else {
filebreak.RZ = c(0:(numSmallContainer.RZ - 1) * numBlocksSmallContainer.RZ + 1 +
numLargeContainer.RZ * numBlocksLargeContainer.RZ)
}
if (numLargeContainer.PR > 0) {
filebreak.PR = c(0:(numLargeContainer.PR - 1) * numBlocksLargeContainer.PR + 1,
0:(numSmallContainer.PR - 1) * numBlocksSmallContainer.PR + 1 +
numLargeContainer.PR * numBlocksLargeContainer.PR)
} else {
filebreak.PR = c(0:(numSmallContainer.PR - 1) * numBlocksSmallContainer.PR + 1 +
numLargeContainer.PR * numBlocksLargeContainer.PR)
}
if (numLargeContainer.XR > 0) {
filebreak.XR = c(0:(numLargeContainer.XR - 1) * numBlocksLargeContainer.XR + 1,
0:(numSmallContainer.XR - 1) * numBlocksSmallContainer.XR + 1 +
numLargeContainer.XR * numBlocksLargeContainer.XR)
} else {
filebreak.XR = c(0:(numSmallContainer.XR - 1) * numBlocksSmallContainer.XR + 1 +
numLargeContainer.XR * numBlocksLargeContainer.XR)
}
containers$filebreak.Z = filebreak.Z
containers$filebreak.W = filebreak.W
containers$filebreak.RZ = filebreak.RZ
containers$filebreak.PR = filebreak.PR
containers$filebreak.V = filebreak.W
containers$filebreak.RW = filebreak.W
containers$filebreak.WR = filebreak.W
containers$filebreak.RV = filebreak.W
containers$filebreak.VR = filebreak.W
containers$filebreak.Cox = filebreak.W
containers$filebreak.XR = filebreak.XR
return(containers)
}
formatPValue = function(pvals, width = 7) {
p = c()
for (x in pvals) {
if (is.na(x)) {
x = format("NA", width = width, justify = "right")
} else if (x > 1e-3) {
x = format(round(x, 5), width = width, justify = "right", nsmall = 5)
} else if (x > 2e-16) {
x = formatC(x, format = "e", digits = 1)
} else {
x = format("<2e-16", width = width, justify = "right")
}
p = c(p, x)
}
return(p)
}
formatStrings = function(x, minWidth = NULL, justify = "left") {
width = max(max(nchar(x)), minWidth)
x = format(x, justify = justify, width = width)
return(x)
}
formatStat = function(x) {
if (is.na(x)) {
"NA"
} else if (x >= 1000000) {
formatC(x, format = "e", digits = 3)
} else if (x >= 1000) {
as.character(round(x, 0))
} else if (x > 1e-3) {
format(signif(x, 4))
} else {
formatC(x, format = "e", digits = 3)
}
}
formatStatList = function(vals) {
notNA = which(!is.na(vals))
notZero = which(vals != 0)
keep = intersect(notNA, notZero)
if (length(keep) == 0) {
f = c()
for (x in vals) {
if (is.na(x)) {
f = c(f, "NA")
} else {
f = c(f, "0")
}
}
return(f)
}
temp = vals[keep]
minval = min(abs(temp))
maxval = max(abs(temp))
decmin = floor(log10(minval))
decmax = floor(log10(maxval))
if (minval >= 1) decmin = decmin + 1
if (maxval >= 1) decmax = decmax + 1
if ((decmin < -3) || (decmax > 6) || (decmax - decmin > 3)) {
f = c()
for (x in vals) {
if (is.na(x)) {
f = c(f, "NA")
} else {
f = c(f, formatC(x, format = "e", digits = 3))
}
}
return(f)
} else {
if (decmin < 0) {
nsmall = 6
} else {
nsmall = 6 - decmin
}
f = c()
for (x in vals) {
if (is.na(x)) {
f = c(f, "NA")
} else {
f = c(f, format(round(x, nsmall), scientific = FALSE, nsmall = nsmall))
}
}
return(f)
}
}
StoreStampsEntry = function(params, description = "", type = "") {
newEntry = params$stamps$blank
newEntry$Step = params$pmnStepCounter
newEntry$Description = description
newEntry$Time = GetRoundTripTime()
newEntry$Type = type
params$stamps$history = rbind(params$stamps$history, newEntry)
return(params)
}
WriteStampsRaw = function(params) {
stamps = params$stamps$history
save(stamps, file = file.path(params$writePath, "stamps.rdata"))
}
WriteStampsCSV = function(params) {
write.csv(params$stamps$history, file.path(params$writePath, "stamps.csv"),
row.names = FALSE)
}
InitializeStamps.2p = function(params) {
stamps = list()
stamps$blank = data.frame(
Step = params$pmnStepCounter,
Source = paste("Org", params$partyName, "Dist Reg"),
Description = "R program execution begins",
Time = GetRoundTripTime(),
Type = "Tracking Table")
stamps$history = stamps$blank
params$stamps = stamps
return(params)
}
ReadStampsRaw.2p = function(params) {
stamps = NULL
load(file.path(params$readPath, "stamps.rdata"))
params$stamps$history = stamps
return(params)
}
MergeStampsRaw.2p = function(params) {
stamps = NULL
load(file.path(params$readPath, "stamps.rdata"))
params$stamps$history = rbind(params$stamps$history, stamps)
return(params)
}
InitializeStamps.3p = function(params) {
stamps = list()
stamps$blank = data.frame(
Step = params$pmnStepCounter,
Source = paste("Org", params$partyName, "Dist Reg"),
Description = "R program execution begins",
Time = GetRoundTripTime(),
Type = "Tracking Table")
stamps$history = stamps$blank
params$stamps = stamps
return(params)
}
MergeStampsRaw.3p = function(params, from) {
stamps = NULL
for (party in from) {
load(file.path(params$readPath[[party]], "stamps.rdata"))
key1 = paste0(params$stamps$history$Step,
params$stamps$history$Source,
params$stamps$history$Description)
key2 = paste0(stamps$Step,
stamps$Source,
stamps$Description)
idx = which(key2 %in% key1)
if (length(idx) == 0) {
params$stamps$history = rbind(params$stamps$history, stamps)
} else if (length(idx) < length(key2)) {
params$stamps$history = rbind(params$stamps$history, stamps[-idx, ])
}
}
idx = order(as.character(params$stamps$history$Time))
params$stamps$history = params$stamps$history[idx, ]
return(params)
}
InitializeStamps.kp = function(params) {
stamps = list()
stamps$blank = data.frame(Step = params$pmnStepCounter,
Source = paste0("Org dp", params$dataPartnerID, " Dist Reg"),
Description = "R program execution begins",
Time = GetRoundTripTime(),
Type = "Tracking Table")
stamps$history = stamps$blank
params$stamps = stamps
return(params)
}
MergeStampsRaw.kp = function(params, from) {
stamps = NULL
if (from == "AC") {
load(file.path(params$readPathAC, "stamps.rdata"))
key1 = paste0(params$stamps$history$Step,
params$stamps$history$Source,
params$stamps$history$Description)
key2 = paste0(stamps$Step,
stamps$Source,
stamps$Description)
idx = which(key2 %in% key1)
if (length(idx) == 0) {
params$stamps$history = rbind(params$stamps$history, stamps)
} else if (length(idx) < length(key2)) {
params$stamps$history = rbind(params$stamps$history, stamps[-idx, ])
}
} else if (from == "DP1") {
load(file.path(params$readPathDP[1], "stamps.rdata"))
key1 = paste0(params$stamps$history$Step,
params$stamps$history$Source,
params$stamps$history$Description)
key2 = paste0(stamps$Step,
stamps$Source,
stamps$Description)
idx = which(key2 %in% key1)
if (length(idx) == 0) {
params$stamps$history = rbind(params$stamps$history, stamps)
} else if (length(idx) < length(key2)) {
params$stamps$history = rbind(params$stamps$history, stamps[-idx, ])
}
} else {
for (id in 1:params$numDataPartners) {
if (id == params$dataPartnerID) next
load(file.path(params$readPathDP[id], "stamps.rdata"))
key1 = paste0(params$stamps$history$Step,
params$stamps$history$Source,
params$stamps$history$Description)
key2 = paste0(stamps$Step,
stamps$Source,
stamps$Description)
idx = which(key2 %in% key1)
if (length(idx) == 0) {
params$stamps$history = rbind(params$stamps$history, stamps)
} else if (length(idx) < length(key2)) {
params$stamps$history = rbind(params$stamps$history, stamps[-idx, ])
}
}
}
idx = order(as.character(params$stamps$history$Time))
params$stamps$history = params$stamps$history[idx, ]
return(params)
}
AddToLog = function(params, functionName, readTime, readSize, writeTime, writeSize) {
readTime = round(as.numeric(readTime), digits = 2)
writeTime = round(as.numeric(writeTime), digits = 2)
readSize = round(as.numeric(readSize), digits = 0)
writeSize = round(as.numeric(writeSize), digits = 0)
if (params$log$current$Functions == "") {
params$log$current$Functions = functionName
} else {
params$log$current$Functions = paste0(params$log$current$Functions,
", ", functionName)
}
params$log$current$Read.Time = params$log$current$Read.Time + readTime
params$log$current$Read.Size = params$log$current$Read.Size + readSize
params$log$current$Write.Time = params$log$current$Write.Time + writeTime
params$log$current$Write.Size = params$log$current$Write.Size + writeSize
return(params)
}
WriteLogRaw = function(params) {
log = params$log$history
save(log, file = file.path(params$writePath, "log.rdata"))
}
WriteLogCSV = function(params) {
write.csv(params$log$history, file.path(params$writePath, "log.csv"),
row.names = FALSE)
}
WriteToLogSummary = function(c1 = "", c2 = "", c3 = "",
writePath = NULL, append = TRUE) {
if (is.numeric(c2)) {
c2 = round(c2, 2)
}
write.table(data.frame(c1, c2, c3),
file.path(writePath, "log_summary.csv"), sep = ",", col.names = FALSE,
row.names = FALSE, append = append)
}
InitializeLog.2p = function(params) {
log = list()
log$blank = data.frame(Step = 0,
Iteration.alg = 0,
Party = "",
Functions = "",
Wait.Time = 0,
Start.Time = GetUTCTime(),
End.Time = GetUTCTime(),
Read.Time = 0,
Read.Size = 0,
Write.Time = 0,
Write.Size = 0,
Computation.Time = 0,
Files.Sent = "",
Bytes.Sent = 0)
log$current = log$blank
log$history = log$blank
params$log = log
return(params)
}
NewLogEntry.2p = function(params) {
params$log$current = params$log$blank
params$log$current$Party = params$partyName
params$log$current$Start.Time = GetUTCTime()
return(params)
}
StoreLogEntry.2p = function(params, files) {
params$log$current$Step = params$pmnStepCounter
params$log$current$Iteration.alg = params$algIterationCounter
params$log$current$Party = params$partyName
params$log$current$End.Time = GetUTCTime()
params$log$current$Computation.Time = round(as.numeric(difftime(
params$log$current$End.Time, params$log$current$Start.Time, units = "secs")) -
params$log$current$Read.Time - params$log$current$Write.Time, 2)
params$log$current$Files.Sent = paste(files, collapse = ", ")
params$log$current$Bytes.Sent = sum(file.size(file.path(params$writePath, files)))
if (is.na(params$log$current$Bytes.Sent)) {
params$log$current$Bytes.Sent = 0
}
nrows = nrow(params$log$history)
if (nrows >= 2) {
params$log$current$Wait.Time =
round(as.numeric(difftime(
params$log$current$Start.Time,
max(params$log$history$End.Time[which(params$log$history$Party ==
params$log$current$Party)]),
units = "secs")), 2)
}
if (params$log$history$Party[nrows] == "") {
params$log$history = params$log$current
} else {
params$log$history = rbind(params$log$history, params$log$current)
}
nrows = nrow(params$log$history)
return(params)
}
ReadLogRaw.2p = function(params) {
load(file.path(params$readPath, "log.rdata"))
params$log$history = log
return(params)
}
MergeLogRaw.2p = function(params) {
load(file.path(params$readPath, "log.rdata"))
params$log$history = rbind(params$log$history, log)
return(params)
}
SummarizeLog.2p = function(params) {
writePath = params$writePath
log = params$log$history
indexA = which(log$Party == "A")
indexB = which(log$Party == "B")
Party.A.Start.Time = log$Start.Time[indexA[1]]
Party.A.End.Time = log$End.Time[indexA[length(indexA)]]
Party.A.Total.Time = round(as.numeric(difftime(
Party.A.End.Time, Party.A.Start.Time, units = "secs")), digits = 2)
Party.A.Reading.Time = sum(log$Read.Time[indexA])
Party.A.Writing.Time = sum(log$Write.Time[indexA])
Party.A.Computing.Time = sum(log$Computation.Time[indexA])
Party.A.Waiting.Time = sum(log$Wait.Time[indexA])
Party.A.Total.Time.HMS = ConvertSecsToHMS(Party.A.Total.Time, timeOnly = TRUE)
Party.A.Reading.Time.HMS = ConvertSecsToHMS(Party.A.Reading.Time, timeOnly = TRUE)
Party.A.Writing.Time.HMS = ConvertSecsToHMS(Party.A.Writing.Time, timeOnly = TRUE)
Party.A.Computing.Time.HMS = ConvertSecsToHMS(Party.A.Computing.Time, timeOnly = TRUE)
Party.A.Waiting.Time.HMS = ConvertSecsToHMS(Party.A.Waiting.Time, timeOnly = TRUE)
Party.A.Bytes.Read = sum(log$Read.Size[indexA])
Party.A.Bytes.Written = sum(log$Write.Size[indexA])
Party.B.Start.Time = log$Start.Time[indexB[1]]
Party.B.End.Time = log$End.Time[indexB[length(indexB)]]
Party.B.Total.Time = round(as.numeric(difftime(
Party.B.End.Time, Party.B.Start.Time, units = "secs")), digits = 2)
Party.B.Reading.Time = sum(log$Read.Time[indexB])
Party.B.Writing.Time = sum(log$Write.Time[indexB])
Party.B.Computing.Time = sum(log$Computation.Time[indexB])
Party.B.Waiting.Time = Party.B.Total.Time - Party.B.Reading.Time -
Party.B.Writing.Time - Party.B.Computing.Time
Party.B.Total.Time.HMS = ConvertSecsToHMS(Party.B.Total.Time, timeOnly = TRUE)
Party.B.Reading.Time.HMS = ConvertSecsToHMS(Party.B.Reading.Time, timeOnly = TRUE)
Party.B.Writing.Time.HMS = ConvertSecsToHMS(Party.B.Writing.Time, timeOnly = TRUE)
Party.B.Computing.Time.HMS = ConvertSecsToHMS(Party.B.Computing.Time, timeOnly = TRUE)
Party.B.Waiting.Time.HMS = ConvertSecsToHMS(Party.B.Waiting.Time, timeOnly = TRUE)
Party.B.Bytes.Read = sum(log$Read.Size[indexB])
Party.B.Bytes.Written = sum(log$Write.Size[indexB])
Total.Transfer.Time = 0
if (max(log$Step) > 1) {
for (i in 2:max(log$Step)) {
idx1 = which(log$Step == i - 1)
idx2 = which(log$Step == i)
Total.Transfer.Time = Total.Transfer.Time +
as.numeric(difftime(min(log$Start.Time[idx2]),
max(log$End.Time[idx1]), units = "secs"))
}
}
Total.Transfer.Time = round(Total.Transfer.Time, 2)
Total.Reading.Time = sum(log$Read.Time)
Total.Writing.Time = sum(log$Write.Time)
Total.Computing.Time = sum(log$Computation.Time)
Elapsed.Computing.Time = Party.A.Total.Time - Total.Transfer.Time
Total.Reading.Time.HMS = ConvertSecsToHMS(Total.Reading.Time, timeOnly = TRUE)
Total.Writing.Time.HMS = ConvertSecsToHMS(Total.Writing.Time, timeOnly = TRUE)
Total.Computing.Time.HMS = ConvertSecsToHMS(Total.Computing.Time, timeOnly = TRUE)
Elapsed.Computing.Time.HMS = ConvertSecsToHMS(Elapsed.Computing.Time, timeOnly = TRUE)
Total.Transfer.Time.HMS = ConvertSecsToHMS(Total.Transfer.Time, timeOnly = TRUE)
Total.Bytes.Transferred = sum(log$Bytes.Sent)
KB.Per.Second = round(Total.Bytes.Transferred / (Total.Transfer.Time * 1024), digits = 2)
WriteToLogSummary(c1 = "Analysis", c2 = params$analysis, writePath = writePath, append = FALSE)
if (!is.null(params$blocks)) {
WriteToLogSummary(c1 = "Blocksize", c2 = params$blocks$littleBlocksize, writePath = writePath)
WriteToLogSummary(c1 = "Number of Blocks",
c2 = params$blocks$numLittleBlocks + params$blocks$numBigBlocks,
writePath = writePath)
}
if (!is.null(params$n)) WriteToLogSummary(c1 = "N", c2 = params$n, writePath = writePath)
p = max(0, params$p1.old - (params$analysis != "cox"))
WriteToLogSummary(c1 = "pA", c2 = p, writePath = writePath)
p = params$p2.old
WriteToLogSummary(c1 = "pB", c2 = p, writePath = writePath)
WriteToLogSummary(writePath = writePath)
WriteToLogSummary(c1 = "Party A Start Time", c2 = Party.A.Start.Time, writePath = writePath)
WriteToLogSummary(c1 = "Party A End Time", c2 = Party.A.End.Time, writePath = writePath)
WriteToLogSummary(c1 = "Party A Total Run Time", c2 = Party.A.Total.Time,
c3 = Party.A.Total.Time.HMS, writePath = writePath)
WriteToLogSummary(c1 = "Party A Total Reading Time", c2 = Party.A.Reading.Time,
c3 = Party.A.Reading.Time.HMS, writePath = writePath)
WriteToLogSummary(c1 = "Party A Total Bytes Read", c2 = Party.A.Bytes.Read, writePath = writePath)
WriteToLogSummary(c1 = "Party A Total Writing Time", c2 = Party.A.Writing.Time,
c3 = Party.A.Writing.Time.HMS, writePath = writePath)
WriteToLogSummary(c1 = "Party A Total Bytes Written", c2 = Party.A.Bytes.Written, writePath = writePath)
WriteToLogSummary(c1 = "Party A Total Computing Time", c2 = Party.A.Computing.Time,
c3 = Party.A.Computing.Time.HMS, writePath = writePath)
WriteToLogSummary(c1 = "Party A Total Waiting Time", c2 = Party.A.Waiting.Time,
c3 = Party.A.Waiting.Time.HMS, writePath = writePath)
WriteToLogSummary(writePath = writePath)
WriteToLogSummary(c1 = "Party B Start Time", c2 = Party.B.Start.Time, writePath = writePath)
WriteToLogSummary(c1 = "Party B End Time", c2 = Party.B.End.Time, writePath = writePath)
WriteToLogSummary(c1 = "Party B Total Run Time", c2 = Party.B.Total.Time,
c3 = Party.B.Total.Time.HMS, writePath = writePath)
WriteToLogSummary(c1 = "Party B Total Reading Time", c2 = Party.B.Reading.Time,
c3 = Party.B.Reading.Time.HMS, writePath = writePath)
WriteToLogSummary(c1 = "Party B Total Bytes Read", c2 = Party.B.Bytes.Read, writePath = writePath)
WriteToLogSummary(c1 = "Party B Total Writing Time", c2 = Party.B.Writing.Time,
c3 = Party.B.Writing.Time.HMS, writePath = writePath)
WriteToLogSummary(c1 = "Party B Total Bytes Written", c2 = Party.B.Bytes.Written, writePath = writePath)
WriteToLogSummary(c1 = "Party B Total Computing Time", c2 = Party.B.Computing.Time,
c3 = Party.B.Computing.Time.HMS, writePath = writePath)
WriteToLogSummary(c1 = "Party B Total Waiting Time", c2 = Party.B.Waiting.Time,
c3 = Party.B.Waiting.Time.HMS, writePath = writePath)
WriteToLogSummary(writePath = writePath)
WriteToLogSummary(c1 = "Total Reading Time", c2 = Total.Reading.Time,
c3 = Total.Reading.Time.HMS, writePath = writePath)
WriteToLogSummary(c1 = "Total Writing Time", c2 = Total.Writing.Time,
c3 = Total.Writing.Time.HMS, writePath = writePath)
WriteToLogSummary(c1 = "Total Computing Time", c2 = Total.Computing.Time,
c3 = Total.Computing.Time.HMS, writePath = writePath)
WriteToLogSummary(c1 = "Elapsed Computing Time", c2 = Elapsed.Computing.Time,
c3 = Elapsed.Computing.Time.HMS, writePath = writePath)
WriteToLogSummary(c1 = "Total Transfer Time", c2 = Total.Transfer.Time,
c3 = Total.Transfer.Time.HMS, writePath = writePath)
WriteToLogSummary(c1 = "Total Bytes Transferred", c2 = Total.Bytes.Transferred, writePath = writePath)
WriteToLogSummary(c1 = "KB / Sec Transfer Rate", c2 = KB.Per.Second, writePath = writePath)
}
InitializeLog.3p = function(params) {
log = list()
log$blank = data.frame(Step = 0,
Iteration.alg = 0,
Party = "",
Functions = "",
Wait.Time = 0,
Start.Time = GetUTCTime(),
End.Time = GetUTCTime(),
Read.Time = 0,
Read.Size = 0,
Write.Time = 0,
Write.Size = 0,
Computation.Time = 0,
Files.Sent = "",
Bytes.Sent = 0)
log$current = log$blank
log$history = log$blank
params$log = log
return(params)
}
NewLogEntry.3p = function(params) {
params$log$current = params$log$blank
params$log$current$Party = params$partyName
params$log$current$Start.Time = GetUTCTime()
return(params)
}
StoreLogEntry.3p = function(params, files) {
params$log$current$Step = params$pmnStepCounter
params$log$current$Iteration.alg = params$algIterationCounter
params$log$current$Party = params$partyName
params$log$current$End.Time = GetUTCTime()
params$log$current$Computation.Time = round(as.numeric(difftime(
params$log$current$End.Time, params$log$current$Start.Time, units = "secs")) -
params$log$current$Read.Time - params$log$current$Write.Time, 2)
params$log$current$Files.Sent = paste(files, collapse = ", ")
params$log$current$Bytes.Sent = sum(file.size(file.path(params$writePath, files)))
if (is.na(params$log$current$Bytes.Sent)) {
params$log$current$Bytes.Sent = 0
}
nrows = nrow(params$log$history)
if (nrows >= 3) {
params$log$current$Wait.Time =
round(as.numeric(difftime(
params$log$current$Start.Time,
max(params$log$history$End.Time[which(params$log$history$Party ==
params$log$current$Party)]),
units = "secs")), 2)
}
if (params$log$history$Party[nrows] == "") {
params$log$history = params$log$current
} else {
params$log$history = rbind(params$log$history, params$log$current)
}
return(params)
}
MergeLogRaw.3p = function(params, from) {
for (party in from) {
load(file.path(params$readPath[[party]], "log.rdata"))
key1 = paste0(params$log$history$Step, params$log$history$Party)
key2 = paste0(log$Step, log$Party)
idx = which(key2 %in% key1)
if (length(idx) == 0) {
params$log$history = rbind(params$log$history, log)
} else if (length(idx) < length(key2)) {
params$log$history = rbind(params$log$history, log[-idx, ])
}
}
idx = order(params$log$history$Step, params$log$history$Party)
params$log$history = params$log$history[idx, ]
return(params)
}
SummarizeLog.3p = function(params) {
writePath = params$writePath
log = params$log$history
indexA = which(log$Party == "A")
indexB = which(log$Party == "B")
indexT = which(log$Party == "T")
Party.A.Start.Time = log$Start.Time[indexA[1]]
Party.A.End.Time = log$End.Time[indexA[length(indexA)]]
Party.A.Total.Time = round(as.numeric(difftime(
Party.A.End.Time, Party.A.Start.Time, units = "secs")), digits = 2)
Party.A.Reading.Time = sum(log$Read.Time[indexA])
Party.A.Writing.Time = sum(log$Write.Time[indexA])
Party.A.Computing.Time = sum(log$Computation.Time[indexA])
Party.A.Waiting.Time = sum(log$Wait.Time[indexA])
Party.A.Total.Time.HMS = ConvertSecsToHMS(Party.A.Total.Time, timeOnly = TRUE)
Party.A.Reading.Time.HMS = ConvertSecsToHMS(Party.A.Reading.Time, timeOnly = TRUE)
Party.A.Writing.Time.HMS = ConvertSecsToHMS(Party.A.Writing.Time, timeOnly = TRUE)
Party.A.Computing.Time.HMS = ConvertSecsToHMS(Party.A.Computing.Time, timeOnly = TRUE)
Party.A.Waiting.Time.HMS = ConvertSecsToHMS(Party.A.Waiting.Time, timeOnly = TRUE)
Party.A.Bytes.Read = sum(log$Read.Size[indexA])
Party.A.Bytes.Written = sum(log$Write.Size[indexA])
Party.B.Start.Time = log$Start.Time[indexB[1]]
Party.B.End.Time = log$End.Time[indexB[length(indexB)]]
Party.B.Total.Time = round(as.numeric(difftime(
Party.B.End.Time, Party.B.Start.Time, units = "secs")), digits = 2)
Party.B.Reading.Time = sum(log$Read.Time[indexB])
Party.B.Writing.Time = sum(log$Write.Time[indexB])
Party.B.Computing.Time = sum(log$Computation.Time[indexB])
Party.B.Waiting.Time = sum(log$Wait.Time[indexB])
Party.B.Total.Time.HMS = ConvertSecsToHMS(Party.B.Total.Time, timeOnly = TRUE)
Party.B.Reading.Time.HMS = ConvertSecsToHMS(Party.B.Reading.Time, timeOnly = TRUE)
Party.B.Writing.Time.HMS = ConvertSecsToHMS(Party.B.Writing.Time, timeOnly = TRUE)
Party.B.Computing.Time.HMS = ConvertSecsToHMS(Party.B.Computing.Time, timeOnly = TRUE)
Party.B.Waiting.Time.HMS = ConvertSecsToHMS(Party.B.Waiting.Time, timeOnly = TRUE)
Party.B.Bytes.Read = sum(log$Read.Size[indexB])
Party.B.Bytes.Written = sum(log$Write.Size[indexB])
Party.T.Start.Time = log$Start.Time[indexT[1]]
Party.T.End.Time = log$End.Time[indexT[length(indexT)]]
Party.T.Total.Time = round(as.numeric(difftime(
Party.T.End.Time, Party.T.Start.Time, units = "secs")), digits = 2)
Party.T.Reading.Time = sum(log$Read.Time[indexT])
Party.T.Writing.Time = sum(log$Write.Time[indexT])
Party.T.Computing.Time = sum(log$Computation.Time[indexT])
Party.T.Waiting.Time = sum(log$Wait.Time[indexT])
Party.T.Total.Time.HMS = ConvertSecsToHMS(Party.T.Total.Time, timeOnly = TRUE)
Party.T.Reading.Time.HMS = ConvertSecsToHMS(Party.T.Reading.Time, timeOnly = TRUE)
Party.T.Writing.Time.HMS = ConvertSecsToHMS(Party.T.Writing.Time, timeOnly = TRUE)
Party.T.Computing.Time.HMS = ConvertSecsToHMS(Party.T.Computing.Time, timeOnly = TRUE)
Party.T.Waiting.Time.HMS = ConvertSecsToHMS(Party.T.Waiting.Time, timeOnly = TRUE)
Party.T.Bytes.Read = sum(log$Read.Size[indexT])
Party.T.Bytes.Written = sum(log$Write.Size[indexT])
Total.Transfer.Time = 0
if (max(log$Step) > 1) {
for (i in 2:max(log$Step)) {
idx1 = which(log$Step == i - 1)
idx2 = which(log$Step == i)
Total.Transfer.Time = Total.Transfer.Time +
as.numeric(difftime(min(log$Start.Time[idx2]),
max(log$End.Time[idx1]), units = "secs"))
}
}
Total.Transfer.Time = round(Total.Transfer.Time, 2)
Elapsed.Computing.Time = Party.T.Total.Time - Total.Transfer.Time
Total.Reading.Time = sum(log$Read.Time)
Total.Writing.Time = sum(log$Write.Time)
Total.Computing.Time = sum(log$Computation.Time)
Total.Reading.Time.HMS = ConvertSecsToHMS(Total.Reading.Time, timeOnly = TRUE)
Total.Writing.Time.HMS = ConvertSecsToHMS(Total.Writing.Time, timeOnly = TRUE)
Total.Transfer.Time.HMS = ConvertSecsToHMS(Total.Transfer.Time, timeOnly = TRUE)
Total.Computing.Time.HMS = ConvertSecsToHMS(Total.Computing.Time, timeOnly = TRUE)
Elapsed.Computing.Time.HMS = ConvertSecsToHMS(Elapsed.Computing.Time, timeOnly = TRUE)
Total.Bytes.Transferred = sum(log$Bytes.Sent)
KB.Per.Second = round(Total.Bytes.Transferred / (Total.Transfer.Time * 1024), digits = 2)
WriteToLogSummary(c1 = "Analysis", c2 = params$analysis, writePath = writePath, append = FALSE)
if (!is.null(params$blocks)) {
WriteToLogSummary(c1 = "Blocksize", c2 = params$blocks$littleBlocksize, writePath = writePath)
WriteToLogSummary(c1 = "Number of Blocks",
c2 = params$blocks$numLittleBlocks + params$blocks$numBigBlocks,
writePath = writePath)
}
if (!is.null(params$n)) WriteToLogSummary(c1 = "N", c2 = params$n, writePath = writePath)
p = max(0, params$p1.old - (params$analysis != "cox"))
WriteToLogSummary(c1 = "pA", c2 = p, writePath = writePath)
p = params$p2.old
WriteToLogSummary(c1 = "pB", c2 = p, writePath = writePath)
WriteToLogSummary(writePath = writePath)
WriteToLogSummary(c1 = "Party A Start Time", c2 = Party.A.Start.Time, writePath = writePath)
WriteToLogSummary(c1 = "Party A End Time", c2 = Party.A.End.Time, writePath = writePath)
WriteToLogSummary(c1 = "Party A Total Run Time", c2 = Party.A.Total.Time,
c3 = Party.A.Total.Time.HMS, writePath = writePath)
WriteToLogSummary(c1 = "Party A Total Reading Time", c2 = Party.A.Reading.Time,
c3 = Party.A.Reading.Time.HMS, writePath = writePath)
WriteToLogSummary(c1 = "Party A Total Bytes Read", c2 = Party.A.Bytes.Read, writePath = writePath)
WriteToLogSummary(c1 = "Party A Total Writing Time", c2 = Party.A.Writing.Time,
c3 = Party.A.Writing.Time.HMS, writePath = writePath)
WriteToLogSummary(c1 = "Party A Total Bytes Written", c2 = Party.A.Bytes.Written, writePath = writePath)
WriteToLogSummary(c1 = "Party A Total Computing Time", c2 = Party.A.Computing.Time,
c3 = Party.A.Computing.Time.HMS, writePath = writePath)
WriteToLogSummary(c1 = "Party A Total Waiting Time", c2 = Party.A.Waiting.Time,
c3 = Party.A.Waiting.Time.HMS, writePath = writePath)
WriteToLogSummary(writePath = writePath)
WriteToLogSummary(c1 = "Party B Start Time", c2 = Party.B.Start.Time, writePath = writePath)
WriteToLogSummary(c1 = "Party B End Time", c2 = Party.B.End.Time, writePath = writePath)
WriteToLogSummary(c1 = "Party B Total Run Time", c2 = Party.B.Total.Time,
c3 = Party.B.Total.Time.HMS, writePath = writePath)
WriteToLogSummary(c1 = "Party B Total Reading Time", c2 = Party.B.Reading.Time,
c3 = Party.B.Reading.Time.HMS, writePath = writePath)
WriteToLogSummary(c1 = "Party B Total Bytes Read", c2 = Party.B.Bytes.Read, writePath = writePath)
WriteToLogSummary(c1 = "Party B Total Writing Time", c2 = Party.B.Writing.Time,
c3 = Party.B.Writing.Time.HMS, writePath = writePath)
WriteToLogSummary(c1 = "Party B Total Bytes Written", c2 = Party.B.Bytes.Written, writePath = writePath)
WriteToLogSummary(c1 = "Party B Total Computing Time", c2 = Party.B.Computing.Time,
c3 = Party.B.Computing.Time.HMS, writePath = writePath)
WriteToLogSummary(c1 = "Party B Total Waiting Time", c2 = Party.B.Waiting.Time,
c3 = Party.B.Waiting.Time.HMS, writePath = writePath)
WriteToLogSummary(writePath = writePath)
WriteToLogSummary(c1 = "Party T Start Time", c2 = Party.T.Start.Time, writePath = writePath)
WriteToLogSummary(c1 = "Party T End Time", c2 = Party.T.End.Time, writePath = writePath)
WriteToLogSummary(c1 = "Party T Total Run Time", c2 = Party.T.Total.Time,
c3 = Party.T.Total.Time.HMS, writePath = writePath)
WriteToLogSummary(c1 = "Party T Total Reading Time", c2 = Party.T.Reading.Time,
c3 = Party.T.Reading.Time.HMS, writePath = writePath)
WriteToLogSummary(c1 = "Party T Total Bytes Read", c2 = Party.T.Bytes.Read, writePath = writePath)
WriteToLogSummary(c1 = "Party T Total Writing Time", c2 = Party.T.Writing.Time,
c3 = Party.T.Writing.Time.HMS, writePath = writePath)
WriteToLogSummary(c1 = "Party T Total Bytes Written", c2 = Party.T.Bytes.Written, writePath = writePath)
WriteToLogSummary(c1 = "Party T Total Computing Time", c2 = Party.T.Computing.Time,
c3 = Party.T.Computing.Time.HMS, writePath = writePath)
WriteToLogSummary(c1 = "Party T Total Waiting Time", c2 = Party.T.Waiting.Time,
c3 = Party.T.Waiting.Time.HMS, writePath = writePath)
WriteToLogSummary(writePath = writePath)
WriteToLogSummary(c1 = "Total Reading Time", c2 = Total.Reading.Time,
c3 = Total.Reading.Time.HMS, writePath = writePath)
WriteToLogSummary(c1 = "Total Writing Time", c2 = Total.Writing.Time,
c3 = Total.Writing.Time.HMS, writePath = writePath)
WriteToLogSummary(c1 = "Total Computing Time", c2 = Total.Computing.Time,
c3 = Total.Computing.Time.HMS, writePath = writePath)
WriteToLogSummary(c1 = "Elapsed Computing Time", c2 = Elapsed.Computing.Time,
c3 = Elapsed.Computing.Time.HMS, writePath = writePath)
WriteToLogSummary(c1 = "Total Transfer Time", c2 = Total.Transfer.Time,
c3 = Total.Transfer.Time.HMS, writePath = writePath)
WriteToLogSummary(c1 = "Total Bytes Transferred", c2 = Total.Bytes.Transferred, writePath = writePath)
WriteToLogSummary(c1 = "KB / Sec Transfer Rate", c2 = KB.Per.Second, writePath = writePath)
}
InitializeLog.kp = function(params) {
log = list()
log$blank = data.frame(Step = 0,
Iteration.alg = 0,
Party = "",
Functions = "",
Wait.Time = 0,
Start.Time = GetUTCTime(),
End.Time = GetUTCTime(),
Read.Time = 0,
Read.Size = 0,
Write.Time = 0,
Write.Size = 0,
Computation.Time = 0,
Files.Sent = "",
Bytes.Sent = 0)
log$current = log$blank
log$history = log$blank
params$log = log
return(params)
}
NewLogEntry.kp = function(params) {
params$log$current = params$log$blank
params$log$current$Party = paste0("dp", params$dataPartnerID)
params$log$current$Start.Time = GetUTCTime()
return(params)
}
StoreLogEntry.kp = function(params, files) {
params$log$current$Step = params$pmnStepCounter
params$log$current$Iteration.alg = params$algIterationCounter
params$log$current$Party = paste0("dp", params$dataPartnerID)
params$log$current$End.Time = GetUTCTime()
params$log$current$Computation.Time = round(as.numeric(difftime(
params$log$current$End.Time, params$log$current$Start.Time, units = "secs")) -
params$log$current$Read.Time - params$log$current$Write.Time, 2)
params$log$current$Files.Sent = paste(files, collapse = ", ")
params$log$current$Bytes.Sent = sum(file.size(file.path(params$writePath, files)))
if (is.na(params$log$current$Bytes.Sent)) {
params$log$current$Bytes.Sent = 0
}
nrows = nrow(params$log$history)
if (nrows >= 3) {
params$log$current$Wait.Time =
round(as.numeric(difftime(
params$log$current$Start.Time,
max(params$log$history$End.Time[which(params$log$history$Party ==
params$log$current$Party)]),
units = "secs")), 2)
}
if (params$log$history$Party[nrows] == "") {
params$log$history = params$log$current
} else {
params$log$history = rbind(params$log$history, params$log$current)
}
return(params)
}
MergeLogRaw.kp = function(params, from) {
if (from == "AC") {
load(file.path(params$readPathAC, "log.rdata"))
key1 = paste0(params$log$history$Step, params$log$history$Party)
key2 = paste0(log$Step, log$Party)
idx = which(key2 %in% key1)
if (length(idx) == 0) {
params$log$history = rbind(params$log$history, log)
} else if (length(idx) < length(key2)) {
params$log$history = rbind(params$log$history, log[-idx, ])
}
} else if (from == "DP1") {
load(file.path(params$readPathDP[1], "log.rdata"))
key1 = paste0(params$log$history$Step, params$log$history$Party)
key2 = paste0(log$Step, log$Party)
idx = which(key2 %in% key1)
if (length(idx) == 0) {
params$log$history = rbind(params$log$history, log)
} else if (length(idx) < length(key2)) {
params$log$history = rbind(params$log$history, log[-idx, ])
}
} else {
for (id in 1:params$numDataPartners) {
if (id == params$dataPartnerID) next
load(file.path(params$readPathDP[id], "log.rdata"))
key1 = paste0(params$log$history$Step, params$log$history$Party)
key2 = paste0(log$Step, log$Party)
idx = which(key2 %in% key1)
if (length(idx) == 0) {
params$log$history = rbind(params$log$history, log)
} else if (length(idx) < length(key2)) {
params$log$history = rbind(params$log$history, log[-idx, ])
}
}
}
idx = order(params$log$history$Step, params$log$history$Party)
params$log$history = params$log$history[idx, ]
return(params)
}
SummarizeLog.kp = function(params) {
writePath = params$writePath
log = params$log$history
WriteToLogSummary(c1 = "Analysis", c2 = params$analysis, writePath = writePath, append = FALSE)
if (!is.null(params$n)) WriteToLogSummary(c1 = "N", c2 = params$n, writePath = writePath)
for (i in 1:params$numDataPartners) {
if (is.null(params$pi)) {
p = 0
} else {
p = params$pi[i] - (i == 1) * (2 + (params$analysis == "cox"))
}
WriteToLogSummary(c1 = paste0("p", i), c2 = p, writePath = writePath)
}
WriteToLogSummary(writePath = writePath)
total.time.0 = 0
for (party in 0:params$numDataPartners) {
partyName = paste0("dp", party)
index = which(log$Party == partyName)
if (length(index) > 0) {
Start.Time = log$Start.Time[index[1]]
End.Time = log$End.Time[index[length(index)]]
Total.Time = round(as.numeric(difftime(End.Time, Start.Time, units = "secs")), digits = 2)
if (party == 0) { total.time.0 = Total.Time }
Reading.Time = sum(log$Read.Time[index])
Writing.Time = sum(log$Write.Time[index])
Computing.Time = sum(log$Computation.Time[index])
Waiting.Time = sum(log$Wait.Time[index])
Total.Time.HMS = ConvertSecsToHMS(Total.Time, timeOnly = TRUE)
Reading.Time.HMS = ConvertSecsToHMS(Reading.Time, timeOnly = TRUE)
Writing.Time.HMS = ConvertSecsToHMS(Writing.Time, timeOnly = TRUE)
Computing.Time.HMS = ConvertSecsToHMS(Computing.Time, timeOnly = TRUE)
Waiting.Time.HMS = ConvertSecsToHMS(Waiting.Time, timeOnly = TRUE)
Bytes.Read = sum(log$Read.Size[index])
Bytes.Written = sum(log$Write.Size[index])
WriteToLogSummary(c1 = paste(partyName, "Start Time"), c2 = Start.Time, writePath = writePath)
WriteToLogSummary(c1 = paste(partyName, "End Time"), c2 = End.Time, writePath = writePath)
WriteToLogSummary(c1 = paste(partyName, "Total Run Time"), c2 = Total.Time,
c3 = Total.Time.HMS, writePath = writePath)
WriteToLogSummary(c1 = paste(partyName, "Total Reading Time"), c2 = Reading.Time,
c3 = Reading.Time.HMS, writePath = writePath)
WriteToLogSummary(c1 = paste(partyName, "Total Bytes Read"), c2 = Bytes.Read, writePath = writePath)
WriteToLogSummary(c1 = paste(partyName, "Total Writing Time"), c2 = Writing.Time,
c3 = Writing.Time.HMS, writePath = writePath)
WriteToLogSummary(c1 = paste(partyName, "Total Bytes Written"), c2 = Bytes.Written, writePath = writePath)
WriteToLogSummary(c1 = paste(partyName, "Total Computing Time"), c2 = Computing.Time,
c3 = Computing.Time.HMS, writePath = writePath)
WriteToLogSummary(c1 = paste(partyName, "Total Waiting Time"), c2 = Waiting.Time,
c3 = Waiting.Time.HMS, writePath = writePath)
WriteToLogSummary(writePath = writePath)
}
}
Total.Transfer.Time = 0
if (max(log$Step) > 1) {
for (i in 2:max(log$Step)) {
idx1 = which(log$Step == i - 1)
idx2 = which(log$Step == i)
Total.Transfer.Time = Total.Transfer.Time +
as.numeric(difftime(min(log$Start.Time[idx2]),
max(log$End.Time[idx1]), units = "secs"))
}
}
Total.Transfer.Time = round(Total.Transfer.Time, 2)
Elapsed.Computing.Time = total.time.0 - Total.Transfer.Time
Total.Reading.Time = sum(log$Read.Time)
Total.Writing.Time = sum(log$Write.Time)
Total.Computing.Time = sum(log$Computation.Time)
Total.Reading.Time.HMS = ConvertSecsToHMS(Total.Reading.Time, timeOnly = TRUE)
Total.Writing.Time.HMS = ConvertSecsToHMS(Total.Writing.Time, timeOnly = TRUE)
Total.Transfer.Time.HMS = ConvertSecsToHMS(Total.Transfer.Time, timeOnly = TRUE)
Total.Computing.Time.HMS = ConvertSecsToHMS(Total.Computing.Time, timeOnly = TRUE)
Elapsed.Computing.Time.HMS = ConvertSecsToHMS(Elapsed.Computing.Time, timeOnly = TRUE)
Total.Bytes.Transferred = sum(log$Bytes.Sent)
KB.Per.Second = round(Total.Bytes.Transferred / (Total.Transfer.Time * 1024), digits = 2)
WriteToLogSummary(c1 = "Total Reading Time", c2 = Total.Reading.Time,
c3 = Total.Reading.Time.HMS, writePath = writePath)
WriteToLogSummary(c1 = "Total Writing Time", c2 = Total.Writing.Time,
c3 = Total.Writing.Time.HMS, writePath = writePath)
WriteToLogSummary(c1 = "Total Computing Time", c2 = Total.Computing.Time,
c3 = Total.Computing.Time.HMS, writePath = writePath)
WriteToLogSummary(c1 = "Elapsed Computing Time", c2 = Elapsed.Computing.Time,
c3 = Elapsed.Computing.Time.HMS, writePath = writePath)
WriteToLogSummary(c1 = "Total Transfer Time", c2 = Total.Transfer.Time,
c3 = Total.Transfer.Time.HMS, writePath = writePath)
WriteToLogSummary(c1 = "Total Bytes Transferred", c2 = Total.Bytes.Transferred, writePath = writePath)
WriteToLogSummary(c1 = "KB / Sec Transfer Rate", c2 = KB.Per.Second, writePath = writePath)
}
WriteTrackingTableRaw = function(params) {
trackingTable = params$trackingTable$history
save(trackingTable, file = file.path(params$writePath, "tr_tb_updt.rdata"))
return(params)
}
WriteTrackingTableCSV = function(params) {
write.csv(params$trackingTable$history, file.path(params$writePath, "dl_track_tbl.csv"),
row.names = FALSE)
return(params)
}
InitializeTrackingTable.2p = function(params) {
trackingTable = list()
trackingTable$current = data.frame(DP_CD = ifelse(params$partyName == "A", 0, 1),
MSREQID = params$msreqid,
RUNID = "dl",
ITER_NB = 0,
STEP_NB = 0,
START_DTM = GetUTCTime(),
END_DTM = GetUTCTime(),
CURR_STEP_IN = 0,
STEP_RETURN_CD = 0,
STEP_RETURN_MSG = "PASS",
REG_CONV_IN = 0,
REG_CONV_MSG = "",
LAST_ITER_IN = 0,
LAST_RUNID_IN = 0,
UTC_OFFSET_DISPLAY = GetUTCOffset(),
UTC_OFFSET_SEC = GetUTCOffsetSeconds(),
REGR_TYPE_CD = params$analysis
)
trackingTable$history = NA
params$trackingTable = trackingTable
return(params)
}
StoreTrackingTableEntry.2p = function(params) {
params$trackingTable$current$ITER_NB = params$pmnStepCounter
params$trackingTable$current$START_DTM = params$log$current$Start.Time
params$trackingTable$current$END_DTM = params$log$current$End.Time
if (file.exists(file.path(params$readPath, "errorMessage.rdata"))) {
load(file.path(params$readPath, "errorMessage.rdata"))
params$trackingTable$current$STEP_RETURN_MSG = message
} else if (file.exists(file.path(params$writePath, "errorMessage.rdata"))) {
load(file.path(params$writePath, "errorMessage.rdata"))
params$trackingTable$current$STEP_RETURN_MSG = message
}
params$trackingTable$current$REG_CONV_IN = ifelse(params$completed, 1, 0)
if (params$completed) {
params$trackingTable$current$REG_CONV_MSG = ifelse(params$converged, "Success", "Failed")
}
params$trackingTable$current$LAST_ITER_IN = ifelse(params$lastIteration, 1, 0)
if (params$partyName == "A") {
if (class(params$trackingTable$history) == "data.frame") {
params$trackingTable$history = rbind(params$trackingTable$history,
params$trackingTable$current)
} else {
params$trackingTable$history = params$trackingTable$current
}
write.csv(params$trackingTable$history, file.path(params$writePath, "dl_track_tbl.csv"),
row.names = FALSE)
} else {
trackingTableEntry = params$trackingTable$current
save(trackingTableEntry, file = file.path(params$writePath, "tr_tb_updt.rdata"))
}
return(params)
}
ReadTrackingTableUpdate.2p = function(params) {
trackingTableEntry = NULL
load(file.path(params$readPath, "tr_tb_updt.rdata"))
trackingTableEntry$MSREQID = params$msreqid
if (class(params$trackingTable$history) == "data.frame") {
params$trackingTable$history = rbind(params$trackingTable$history,
trackingTableEntry)
} else {
params$trackingTable$history = trackingTableEntry
}
return(params)
}
InitializeTrackingTable.3p = function(params) {
trackingTable = list()
trackingTable$current = data.frame(DP_CD = ifelse(params$partyName == "T", 0,
ifelse(params$partyName == "A", 1, 2)),
MSREQID = params$msreqid,
RUNID = "dl",
ITER_NB = 0,
STEP_NB = 0,
START_DTM = GetUTCTime(),
END_DTM = GetUTCTime(),
CURR_STEP_IN = 0,
STEP_RETURN_CD = 0,
STEP_RETURN_MSG = "PASS",
REG_CONV_IN = 0,
REG_CONV_MSG = "",
LAST_ITER_IN = 0,
LAST_RUNID_IN = 0,
UTC_OFFSET_DISPLAY = GetUTCOffset(),
UTC_OFFSET_SEC = GetUTCOffsetSeconds(),
REGR_TYPE_CD = params$analysis
)
trackingTable$history = NA
params$trackingTable = trackingTable
return(params)
}
StoreTrackingTableEntry.3p = function(params) {
params$trackingTable$current$ITER_NB = params$pmnStepCounter
params$trackingTable$current$START_DTM = params$log$current$Start.Time
params$trackingTable$current$END_DTM = params$log$current$End.Time
if (file.exists(file.path(params$writePath, "errorMessage.rdata"))) {
load(file.path(params$writePath, "errorMessage.rdata"))
params$trackingTable$current$STEP_RETURN_MSG = message
} else {
msg = ""
for (party in c("A", "B", "T")) {
if (!is.na(params$readPath[[party]]) &&
file.exists(file.path(params$readPath[[party]], "errorMessage.rdata"))) {
load(file.path(params$readPath[[party]], "errorMessage.rdata"))
msg = paste0(msg, message)
}
}
params$trackingTable$current$STEP_RETURN_MSG = msg
}
params$trackingTable$current$REG_CONV_IN = ifelse(params$completed, 1, 0)
if (params$completed) {
params$trackingTable$current$REG_CONV_MSG = ifelse(params$converged, "Success", "Failed")
}
params$trackingTable$current$LAST_ITER_IN = ifelse(params$lastIteration, 1, 0)
if (params$pmnStepCounter == 0) {
params$trackingTable$history = params$trackingTable$current
} else {
params$trackingTable$history = rbind(params$trackingTable$history,
params$trackingTable$current)
}
return(params)
}
MergeTrackingTableRAW.3p = function(params, from) {
trackingTable = NULL
for (party in from) {
load(file.path(params$readPath[[party]], "tr_tb_updt.rdata"))
key1 = paste0(params$trackingTable$history$ITER_NB,
params$trackingTable$history$DP_CD)
key2 = paste0(trackingTable$ITER_NB,
trackingTable$DP_CD)
idx = which(key2 %in% key1)
if (length(idx) == 0) {
params$trackingTable$history =
rbind(params$trackingTable$history, trackingTable)
} else if (length(idx) < length(key2)) {
params$trackingTable$history =
rbind(params$trackingTable$history, trackingTable[-idx, ])
}
}
idx = order(params$trackingTable$history$START_DTM)
params$trackingTable$history = params$trackingTable$history[idx, ]
params$trackingTable$history$MSREQID = params$msreqid
return(params)
}
InitializeTrackingTable.kp = function(params) {
trackingTable = list()
trackingTable$current = data.frame(DP_CD = params$dataPartnerID,
MSREQID = params$msreqid,
RUNID = "dl",
ITER_NB = 0,
STEP_NB = 0,
START_DTM = GetUTCTime(),
END_DTM = GetUTCTime(),
CURR_STEP_IN = 0,
STEP_RETURN_CD = 0,
STEP_RETURN_MSG = "PASS",
REG_CONV_IN = 0,
REG_CONV_MSG = "",
LAST_ITER_IN = 0,
LAST_RUNID_IN = 0,
UTC_OFFSET_DISPLAY = GetUTCOffset(),
UTC_OFFSET_SEC = GetUTCOffsetSeconds(),
REGR_TYPE_CD = params$analysis
)
trackingTable$history = NA
params$trackingTable = trackingTable
return(params)
}
StoreTrackingTableEntry.kp = function(params) {
params$trackingTable$current$ITER_NB = params$pmnStepCounter
params$trackingTable$current$START_DTM = params$log$current$Start.Time
params$trackingTable$current$END_DTM = params$log$current$End.Time
if (file.exists(file.path(params$writePath, "errorMessage.rdata"))) {
load(file.path(params$writePath, "errorMessage.rdata"))
params$trackingTable$current$STEP_RETURN_MSG = message
} else {
msg = ""
for (id in 1:params$numDataPartners) {
if (!is.na(params$readPathDP[id]) &&
file.exists(file.path(params$readPathDP[id], "errorMessage.rdata"))) {
load(file.path(params$readPathDP[id], "errorMessage.rdata"))
msg = paste0(msg, message)
}
}
if (!is.na(params$readPathAC) &&
file.exists(file.path(params$readPathAC, "errorMessage.rdata"))) {
load(file.path(params$readPathAC, "errorMessage.rdata"))
msg = paste0(msg, message)
}
params$trackingTable$current$STEP_RETURN_MSG = msg
}
params$trackingTable$current$REG_CONV_IN = ifelse(params$completed, 1, 0)
if (params$completed) {
params$trackingTable$current$REG_CONV_MSG = ifelse(params$converged, "Success", "Failed")
}
params$trackingTable$current$LAST_ITER_IN = ifelse(params$lastIteration, 1, 0)
if (params$pmnStepCounter == 0) {
params$trackingTable$history = params$trackingTable$current
} else {
params$trackingTable$history = rbind(params$trackingTable$history,
params$trackingTable$current)
}
return(params)
}
MergeTrackingTableRAW.kp = function(params, from) {
trackingTable = NULL
if (from == "AC") {
load(file.path(params$readPathAC, "tr_tb_updt.rdata"))
key1 = paste0(params$trackingTable$history$ITER_NB,
params$trackingTable$history$DP_CD)
key2 = paste0(trackingTable$ITER_NB,
trackingTable$DP_CD)
idx = which(key2 %in% key1)
if (length(idx) == 0) {
params$trackingTable$history =
rbind(params$trackingTable$history, trackingTable)
} else if (length(idx) < length(key2)) {
params$trackingTable$history =
rbind(params$trackingTable$history, trackingTable[-idx, ])
}
} else if (from == "DP1") {
load(file.path(params$readPathDP[1], "tr_tb_updt.rdata"))
key1 = paste0(params$trackingTable$history$ITER_NB,
params$trackingTable$history$DP_CD)
key2 = paste0(trackingTable$ITER_NB,
trackingTable$DP_CD)
idx = which(key2 %in% key1)
if (length(idx) == 0) {
params$trackingTable$history =
rbind(params$trackingTable$history, trackingTable)
} else if (length(idx) < length(key2)) {
params$trackingTable$history =
rbind(params$trackingTable$history, trackingTable[-idx, ])
}
} else {
for (id in 1:params$numDataPartners) {
if (id == params$dataPartnerID) next
load(file.path(params$readPathDP[id], "tr_tb_updt.rdata"))
key1 = paste0(params$trackingTable$history$ITER_NB,
params$trackingTable$history$DP_CD)
key2 = paste0(trackingTable$ITER_NB,
trackingTable$DP_CD)
idx = which(key2 %in% key1)
if (length(idx) == 0) {
params$trackingTable$history =
rbind(params$trackingTable$history, trackingTable)
} else if (length(idx) < length(key2)) {
params$trackingTable$history =
rbind(params$trackingTable$history, trackingTable[-idx, ])
}
}
}
idx = order(params$trackingTable$history$START_DTM)
params$trackingTable$history = params$trackingTable$history[idx, ]
params$trackingTable$history$MSREQID = params$msreqid
return(params)
}
validFormula = function(expression) {
if (tryCatch({is.expression(expression); FALSE},
error = function(err) { TRUE })) {
return(FALSE)
}
vars = all.vars(expression)
names = all.names(expression)
res1 = all(names %in% c("~", "+", vars))
res2 = (sum(names %in% "~") == 1)
res3 = (names[1] == "~") & (names[2] %in% vars)
res4 = !(names[2] %in% names[3:length(names)])
res5 = vars[1] != "."
return(res1 & res2 & res3 & res4 & res5)
}
validFormula2 = function(expression) {
if (tryCatch({is.expression(expression); FALSE},
error = function(err) { TRUE })) {
return(FALSE)
}
vars = all.vars(expression)
names = all.names(expression)
res1 = all(names %in% c("~", "+", vars))
res2 = (sum(names %in% "~") == 1)
res3 = length(expression) == 2
return(res1 && res2 && res3)
}
print.vdralinear = function(x, ...) {
if (x$failed) {
warning("Distributed linear regression failed. No results to print.")
return(invisible(NULL))
}
cat("Coefficients:\n")
print(x$coefficients, digits = 4)
return(invisible(NULL))
}
summary.vdralinear = function(object, ...) {
temp = list()
class(temp) = "summary.vdralinear"
temp$failed = object$failed
if (object$failed) {
return(temp)
}
temp$party = object$party
temp$coefficients = object$coefficients
temp$secoef = object$secoef
temp$tvals = object$tvals
temp$pvals = object$pvals
temp$rstderr = object$rstderr
temp$df2 = object$df2
temp$rsquare = object$rsquare
temp$adjrsquare = object$adjrsquare
temp$Fstat = object$Fstat
temp$df1 = object$df1
temp$Fpval = object$Fpval
return(temp)
}
print.summary.vdralinear = function(x, lion = FALSE, ...) {
arguments = list(...)
if (x$failed) {
warning("Distributed linear regression failed. No results to print.")
return(invisible(NULL))
}
x$stars = sapply(x$pvals, function(x) {
if (is.na(x)) ''
else if (x < 0.001) '***'
else if (x < 0.01) '**'
else if (x < 0.05) '*'
else if (x < 0.1) '.'
else ' '
})
temp = data.frame(formatStrings(names(x$party)),
formatStrings(x$party, minWidth = 5, justify = "centre"),
formatStatList(x$coefficients),
formatStatList(x$secoef),
formatStatList(x$tvals),
format.pval(x$pvals),
formatStrings(x$stars))
colnames(temp) = c("", "Party", "Estimate", "Std. Error", "t value", "Pr(>|t|)", "")
if (lion) {
temp = cbind(temp, GetLion(length(x$party)))
colnames(temp)[8] = ""
}
print(temp, row.names = FALSE, right = TRUE)
cat("---", "\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n")
cat("Residual standard error: ", formatStat(x$rstderr), "on", x$df2, "degrees of freedom\n")
cat("Multiple R-squared: ", formatStat(x$rsquare), ", Adjusted R-squared: ", formatStat(x$adjrsquare), "\n")
cat("F-statistic:", formatStat(x$Fstat), "on", x$df1, "and", x$df2, "DF, p-value:", format.pval(x$Fpval), "\n\n")
}
print.vdralogistic = function(x, ...) {
if (x$failed) {
warning("Distributed logistic regression failed. No results to print.")
return(invisible(NULL))
}
if (!x$converged) {
warning(paste("Warning: Distributed logistic regression did not converge in",
x$iter, "iterations. Reported statistics are approximate."))
}
cat("Coefficients:\n")
print(x$coefficients, digits = 4)
cat("\n")
cat("Degrees of Freedom:", x$nulldev_df, "Total (i.e. Null); ", x$resdev_df, "Residual\n")
cat("Null Deviance: ", formatStat(x$nulldev), "\n")
cat("Residual Deviance:", formatStat(x$resdev), " AIC:", formatStat(x$aic))
return(invisible(NULL))
}
summary.vdralogistic = function(object, ...) {
temp = list()
class(temp) = "summary.vdralogistic"
temp$failed = object$failed
temp$converged = object$converged
if (object$failed) {
return(temp)
}
temp$party = object$party
temp$coefficients = object$coefficients
temp$secoef = object$secoef
temp$tvals = object$tvals
temp$pvals = object$pvals
temp$nulldev = object$nulldev
temp$nulldev_df = object$nulldev_df
temp$resdev = object$resdev
temp$resdev_df = object$resdev_df
temp$aic = object$aic
temp$bic = object$bic
temp$iter = object$iter
return(temp)
}
print.summary.vdralogistic = function(x, lion = FALSE, ...) {
arguments = list(...)
if (!is.na(arguments$lion) && is.logical(arguments$lion)) lion = arguments$lion
if (x$failed) {
warning("Distributed logistic regression failed. No results to print.")
return(invisible(NULL))
}
if (!x$converged) {
warning(paste("Warning: Distributed logistic regression did not converge in",
x$iter, "iterations. Reported statistics are approximate."))
}
x$stars = sapply(x$pvals, function(x) {
if (is.na(x)) ''
else if (x < 0.001) '***'
else if (x < 0.01) '**'
else if (x < 0.05) '*'
else if (x < 0.1) '.'
else ' '
})
temp = data.frame(formatStrings(names(x$party)),
formatStrings(x$party, minWidth = 5, justify = "centre"),
formatStatList(x$coefficients),
formatStatList(x$secoef),
formatStatList(x$tvals),
format.pval(x$pvals),
formatStrings(x$stars))
colnames(temp) = c("", "Party", "Estimate", "Std. Error", "t value", "Pr(>|t|)", "")
if (lion) {
temp = cbind(temp, GetLion(length(x$party)))
colnames(temp)[8] = ""
}
print(temp, row.names = FALSE, right = TRUE)
cat("---", "\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n")
cat("(Dispersion parameter for binomial family taken to be 1)\n\n")
cat(" Null Deviance:", formatStat(x$nulldev), " on ", x$nulldev_df, " degrees of freedom\n")
cat("Residual deviance:", formatStat(x$resdev), " on ", x$resdev_df, " degrees of freedom\n")
cat("AIC:", formatStat(x$aic), "\n")
cat("BIC:", formatStat(x$bic), "\n\n")
cat("Number of Newton-Raphson iterations:", x$iter, "\n\n")
return(invisible(NULL))
}
print.vdracox = function(x, ...) {
if (x$failed) {
warning("Distributed Cox regression failed. No results to print.")
return(invisible(NULL))
}
if (!x$converged) {
warning(paste("Warning: Distributed Cox regression did not converge in",
x$iter, "iterations. Reported statistics are approximate."))
}
coeftab = data.frame(x$coefficients, x$expcoef, x$secoef, x$zvals, x$pvals)
colnames(coeftab) = c("coef", "exp(coef)", "se(coef)", "z", "p")
printCoefmat(coeftab, P.values = TRUE, has.Pvalue=TRUE, signif.stars = FALSE)
cat("\n")
cat(paste0("Likelihood ratio test=", formatStat(x$lrt[1])), "on",
x$df, paste0("df, p=", format.pval(x$lrt[2])), "\n")
cat("n=", paste0(x$n, ","), "number of events=", x$nevent, "\n\n")
return(invisible(NULL))
}
summary.vdracox = function(object, ...) {
temp = list()
class(temp) = "summary.vdracox"
temp$failed = object$failed
temp$converged = object$converged
if (object$failed) {
return(temp)
}
temp$party = object$party
temp$coefficients = object$coefficients
temp$expcoef = object$expcoef
temp$secoef = object$secoef
temp$zval = object$zval
temp$pvals = object$pvals
temp$expncoef = object$expncoef
temp$lower = object$lower
temp$upper = object$upper
temp$n = object$n
temp$nevent = object$nevent
temp$concordance = object$concordance
temp$rsquare = object$rsquare
temp$lrt = object$lrt
temp$df = object$df
temp$wald.test = object$wald.test
temp$score = object$score
temp$iter = object$iter
return(temp)
}
print.summary.vdracox = function(x, lion = FALSE, ...) {
arguments = list(...)
if (!is.na(arguments$lion) && is.logical(arguments$lion)) lion = arguments$lion
if (x$failed) {
warning("Distributed Cox regression failed. No results to print.")
return(invisible(NULL))
}
if (!x$converged) {
warning(paste("Warning: Distributed Cox regression did not converge in",
x$iter, "iterations. Reported statistics are approximate."))
}
x$stars = sapply(x$pvals, function(x) {
if (is.na(x)) ''
else if (x < 0.001) '***'
else if (x < 0.01) '**'
else if (x < 0.05) '*'
else if (x < 0.1) '.'
else ' '
})
temp1 = data.frame(formatStrings(names(x$party)),
formatStrings(x$party, minWidth = 5, justify = "centre"),
formatStatList(x$coefficients),
formatStatList(x$expcoef),
formatStatList(x$secoef),
formatStatList(x$zval),
format.pval(x$pvals),
formatStrings(x$stars))
colnames(temp1) = c("", "party", " coef", "exp(coef)", "se(coef)", " z", "Pr(>|z|)", "")
temp2 = data.frame(formatStrings(names(x$party)),
formatStrings(x$party, minWidth = 5, justify = "centre"),
formatStatList(x$expcoef),
formatStatList(x$expncoef),
formatStatList(x$lower),
formatStatList(x$upper))
colnames(temp2) = c("", "party", "exp(coef)", "exp(-coef)", "lower .95", "upper .95")
cat(" n=", paste0(x$n, ","), "number of events=", x$nevent, "\n\n")
print(temp1, row.names = FALSE, right = TRUE)
cat("---\n")
cat("Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n")
print(temp2, row.names = FALSE, right = TRUE)
cat("\n")
if (!is.na(x$concordance[5])) {
cat("Concordance=", formatStat(x$concordance[5]), "(se =",formatStat(x$concordance[6]), ")\n")
}
cat("Likelihood ratio test=", formatStat(x$lrt[1]),
"on", x$df,
"df,", "p=", format.pval(x$lrt[2]), "\n")
cat("Wald test =", formatStat(x$wald.test[1]),
"on", x$df,
"df, p=", format.pval(x$wald.test[2]), "\n")
cat("Score test =", formatStat(x$score[1]),
"on", x$df,
"df, p=", format.pval(x$score[2]), "\n\n")
cat("Number of Newton-Raphson iterations:", x$iter, "\n\n")
}
differentModel = function(formula = NULL, x = NULL) {
if (class(x) != "vdralinear") {
warning("This function can only be on objects of class vdralinear. Returning original model.")
return(invisible(x))
}
if (x$failed) {
warning("Distributed linear regression failed. Cannot compute a different model.")
return(invisible(x))
}
if (max(table(names(x$party))) > 1) {
warning("Duplicate variable names exist. All variable names must be unique. Returning original model.")
return(invisible(x))
}
if (!validFormula(formula)) {
warning("Invalid formula, returning original model.")
return(x)
}
valid_names = c(colnames(x$xty), colnames(x$xtx)[-1])
responseName = all.vars(formula)[1]
covariateNames = all.vars(formula)[-1]
variableNames = all.vars(formula)
variableNames = variableNames[which(variableNames != ".")]
if (!all(variableNames %in% valid_names)) {
vars = variableNames[which(variableNames %in% valid_names == FALSE)]
if (length(vars) == 1) {
warning("Variable", vars, "not found. Returning original model.")
} else {
temp = c(paste0(vars[-length(vars)], ","), vars[length(vars)])
warning(paste("Variables", temp, "not found. Returning original model."))
}
return(invisible(x))
}
if ("." %in% covariateNames) {
covariateNames = valid_names[which(valid_names != responseName)]
}
xytxy = rbind(cbind(x$yty, t(x$xty)), cbind(x$xty, x$xtx))
scramble = c(2, 1, 3:ncol(xytxy))
xytxy[scramble, scramble] = xytxy
all_names = c(colnames(x$xty), colnames(x$xtx))[scramble]
colnames(xytxy) = all_names
rownames(xytxy) = all_names
responseIndex = match(responseName, all_names)
covariateIndex = c(1, match(covariateNames, all_names))
xtx = xytxy[covariateIndex, covariateIndex]
xty = matrix(xytxy[covariateIndex, responseIndex], ncol = 1)
yty = xytxy[responseIndex, responseIndex]
means = c(x$meansy, x$means)[scramble][covariateIndex]
meansy = c(x$meansy, x$means)[scramble][responseIndex]
nrow = nrow(xtx)
indicies = c(1)
for (i in 2:nrow) {
tempIndicies = c(indicies, i)
if (rcond(xtx[tempIndicies, tempIndicies]) > 10 * .Machine$double.eps) {
indicies = c(indicies, i)
}
}
xtx.old = xtx
xty.old = xty
xtx = xtx[indicies, indicies]
xty = matrix(xty[indicies, 1], ncol = 1)
means.old = means
means = means[indicies]
p = length(indicies)
n = x$n
invxtx = solve(xtx)
betas = drop(invxtx %*% xty)
numCovariates = p - 1
sse = max(drop(yty - 2 * t(xty) %*% betas + (t(betas) %*% xtx) %*% betas), 0)
rstderr = drop(sqrt(sse / (n - numCovariates - 1)))
sst = drop(yty - meansy^2 * n)
ssr = sst - sse
df1 = numCovariates
df2 = n - numCovariates - 1
if (sse == 0) {
Fstat = Inf
} else {
Fstat = (ssr / df1) / (sse / df2)
}
Fpval = pf(Fstat, df1, df2, lower.tail = FALSE)
if (sse == 0) {
Rsq = 1
} else {
Rsq = drop(1 - sse / sst)
}
adjRsq = drop(1 - (n - 1) / (n - numCovariates - 1) * (1 - Rsq))
if (rstderr == 0) {
tvals = rep(Inf, numCovariates + 1)
} else {
tvals = betas / (rstderr * sqrt(diag(invxtx)))
}
secoef = tvals^-1 * betas
pvals = 2 * pt(abs(tvals), n - numCovariates - 1, lower.tail = FALSE)
stars = matrix(sapply(pvals, function(x) {
if (is.na(x)) ''
else if (x < 0.001) '***'
else if (x < 0.01) '**'
else if (x < 0.05) '*'
else if (x < 0.1) '.'
else ' '
}))
y = list()
class(y) = "vdralinear"
y$failed = x$failed
y$converged = x$converged
y$party = c(x$responseParty, x$party)[scramble][covariateIndex]
y$responseParty = c(x$responseParty, x$party)[scramble][responseIndex]
p1 = length(covariateIndex)
y$coefficients = rep(NA, p1)
y$tvals = rep(NA, p1)
y$secoef = rep(NA, p1)
y$pvals = rep(NA, p1)
y$sse = sse
y$coefficients[indicies] = betas
y$tvals[indicies] = tvals
y$secoef[indicies] = secoef
y$pvals[indicies] = pvals
y$rstderr = rstderr
y$rsquare = Rsq
y$adjrsquare = adjRsq
y$Fstat = Fstat
y$Fpval = Fpval
y$df1 = df1
y$df2 = df2
y$n = x$n
y$xtx = xtx.old
y$xty = xty.old
y$yty = yty
y$meansy = meansy
y$means = means.old
names.old = all_names[covariateIndex]
names(y$party) = names.old
names(y$coefficients) = names.old
names(y$secoef) = names.old
names(y$tvals) = names.old
names(y$pvals) = names.old
colnames(y$xtx) = names.old
rownames(y$xtx) = names.old
colnames(y$xty) = responseName
rownames(y$xty) = names.old
return(invisible(y))
}
HoslemInternal = function(x, data = NULL, nGroups = 10){
n = x$n
if (nGroups <= 0) {
nGroups = 10
}
if (nGroups > n) {
nGroups = n
}
if (is.null(data)) {
Y = x$Y
} else {
Y = data$Y
}
pi_ = exp(x$FinalFitted) / (1 + exp(x$FinalFitted))
uq = unique(quantile(pi_, probs = seq(0, 1, 1 / nGroups)))
group_ = cut(pi_, breaks = uq, include.lowest = TRUE)
dd = data.frame(y = Y[order(pi_)], pi_ = sort(pi_),
group = group_[order(pi_)])
e1 = by(dd, dd$group, function(x) sum(x$pi_))
o1 = by(dd, dd$group, function(x) sum(x$y))
gn = table(dd$group)
e0 = gn - e1
o0 = gn - o1
testStat = 0
for (i in 1:length(e1)) {
if (o0[i] == e0[i]) {
temp1 = 0
} else {
temp1 = (o0[i] - e0[i])^2 / e0[i]
}
if (o1[i] == e1[i]) {
temp2 = 0
} else {
temp2 = (o1[i] - e1[i])^2 / e1[i]
}
testStat = testStat + temp1 + temp2
}
df = nGroups - 2
rtrn = c(testStat, df, 1 - pchisq(testStat, df))
names(rtrn) = c("Chi-sq", "DF", "p-value")
return(rtrn)
}
print.hoslemdistributed = function(x, ...) {
cat("Hosmer and Lemeshow goodness of fit (GOF) test\n",
" Chi-squared:", x$hoslem[1], "with DF",
paste0(x$hoslem[2],","), " p-value:", x$hoslem[3], "\n")
}
HoslemTest = function(x = NULL, nGroups = 10) {
if (class(x) != "vdralogistic") {
warning("Cannot perform test on non vdralogistic object.")
return(invisible(NULL))
}
if (!(x$converged)) {
warning("Process did not converge. Cannot perform Hosmer and Lemeshow goodness of fit test.")
return(invisible(NULL))
}
if (is.null(x$Y) || is.null(x$FinalFitted)) {
warning("HoslemTest can only be invoked by the party which holds the response.")
return(invisible(NULL))
} else if (is.numeric(nGroups)) {
temp = list()
class(temp) = "hoslemdistributed"
temp$hoslem = HoslemInternal(x, nGroups = nGroups)
return(temp)
}
}
RocInternal = function(x, data = NULL, bins = 500){
if (is.null(data)) {
Y = x$Y
} else {
Y = data$Y
}
if (bins < 2) bins = 2
positive = sum(Y)
negative = length(Y) - positive
pi_ = exp(x$FinalFitted) / (1 + exp(x$FinalFitted))
threshold = seq(0, 1, length.out = bins)
rtrn = matrix(NA, bins, 2)
oldX = 1
oldY = 1
AUC = 0
for (i in 1:bins) {
newX = 1 - sum(Y == 0 & pi_ < threshold[i]) / negative
newY = sum(Y & pi_ >= threshold[i]) / positive
rtrn[i, 1] = newX
rtrn[i, 2] = newY
AUC = AUC + oldY * (oldX - newX)
oldX = newX
oldY = newY
}
temp = list()
temp$roc = rtrn
temp$auc = AUC
return(temp)
}
print.rocdistributed = function(x, ...) {
rtrn = x$roc
plot(rtrn[, 1], rtrn[, 2], xaxt = "n", yaxt = "n",
xlim = c(-0.2, 1.2), ylim = c(0,1 ),
type = "s", ylab = "Sensitivity", xlab = "1 - Specificity", col = "blue",
main = "ROC Curve")
axis(side = 1, at = seq(0,1, 0.2))
axis(side = 2, at = seq(0,1, 0.2))
lines(x = c(0, 1), y = c(0, 1), col = "limegreen")
text(0.8, 0.05, paste("Area under the curve:",
format(x$auc, digits = 4)))
}
RocTest = function(x = NULL, bins = 10) {
if (class(x) != "vdralogistic") {
warning("Cannot create ROC on non vdralogistic object.")
return(invisible(NULL))
}
if (!x$converged) {
warning("Process did not converge. Cannot generate ROC.")
return(invisible(NULL))
}
if (is.null(x$Y) || is.null(x$FinalFitted)) {
warning("RocTest can only be invoked by the party which holds the response.")
return(invisible(NULL))
} else if (is.numeric(bins)) {
temp = list()
temp = RocInternal(x, bins = bins)
class(temp) = "rocdistributed"
return(temp)
}
}
GetColors = function(n) {
color = matrix(0, 6, 3)
color[1, ] = c(0.000, 0.000, 1.000)
color[2, ] = c(0.627, 0.125, 0.941)
color[3, ] = c(1.000, 0.000, 0.000)
color[4, ] = c(1.000, 0.647, 0.000)
color[5, ] = c(0.000, 1.000, 0.000)
if (n == 1) {
return(rgb(0, 0, 0))
}
if (n <= 3) {
cols = c(rgb(color[1, 1], color[1, 2], color[1, 3]),
rgb(color[2, 1], color[2, 2], color[2, 3]),
rgb(color[3, 1], color[3, 2], color[3, 3]))
return(cols[1:n])
}
cols = c()
for (i in 1:n) {
idx = 4 * (i - 1) / (n - 1) + 1
idx1 = floor(idx)
idx2 = ceiling(idx)
dx = idx - idx1
tcol = color[idx1, ] + (color[idx2, ] - color[idx1, ]) * dx
cols = c(cols, rgb(tcol[1], tcol[2], tcol[3]))
}
return(cols)
}
plot.survfitDistributed = function(x, merge = FALSE, ...) {
max = 0
n = length(x$strata)
labels = c()
max = max(x$time)
labels = names(x$strata)
arguments = list(...)
arguments$x = 1
arguments$type = "n"
if (is.null(arguments$ylim)) arguments$ylim = c(0, 1)
if (is.null(arguments$xlim)) arguments$xlim = c(0, max)
if (is.null(arguments$xlab)) arguments$xlab = "Time"
if (is.null(arguments$ylab)) arguments$ylab = "Percent Survival"
if (is.null(arguments$main)) arguments$main = "Survival Curve"
if (merge) {
do.call("plot", arguments)
cols = GetColors(n)
start = 1
for (i in 1:n) {
end = start + x$strata[i] - 1
lines(c(1, x$surv[start:end]) ~ c(0, x$time[start:end]), type = "s", col = cols[i])
start = end + 1
}
legend("bottomleft", legend = labels, col = cols, lty = 1)
} else {
cols = GetColors(1)
start = 1
for (i in 1:n) {
end = start + x$strata[i] - 1
do.call("plot", arguments)
lines(c(1, x$surv[start:end]) ~ c(0, x$time[start:end]), type = "s", col = cols)
legend("bottomleft", legend = labels[i], col = cols, lty = 1)
start = end + 1
}
}
}
print.survfitDistributed = function(x, ...) {
start = 1
events = integer(length(x$strata))
for (i in 1:length(x$strata)) {
end = start + x$strata[i] - 1
events[i] = sum(x$n.event[start:end])
start = end + 1
}
df = data.frame(n = x$n, events = events)
row.names(df) = names(x$strata)
print(df)
}
survfitDistributed.stats = function(x) {
surv = list()
surv$n = x$strata$end - x$strata$start + 1
for (i in 1:nrow(x$strata)) {
start = x$strata$start[i]
end = x$strata$end[i]
idx = which(c(1, diff(x$survival$rank[start:end])) != 0)
temp0 = table(x$survival$rank[start:end], x$survival$status[start:end])
if (ncol(temp0) == 1) {
if (which(c(0, 1) %in% colnames(temp0)) == 1) {
temp0 = cbind(temp0, 0)
colnames(temp0) = c("0", "1")
} else {
temp0 = cbind(0, temp0)
colnames(temp0) = c("0", "1")
}
}
surv$time = c(surv$time, x$survival$rank[start:end][idx])
surv$n.risk = c(surv$n.risk, rev(cumsum(rev(temp0[, 1] + temp0[, 2]))))
surv$n.event = c(surv$n.event, temp0[, 2])
surv$n.censor = c(surv$n.censor, temp0[, 1])
surv$strata = c(surv$strata, length(idx))
surv$surv = c(surv$surv, x$survival$surv[start:end][idx])
}
names(surv$n.risk) = NULL
names(surv$n.event) = NULL
names(surv$n.censor) = NULL
names(surv$strata) = x$strata$label
surv$type = "right"
class(surv) = "survfitDistributed"
return(invisible(surv))
}
survfitDistributed.formula = function(x, formula, data) {
surv = list()
vars = all.vars(formula)
if ("." %in% vars) {
warning("This function does not allow the . symbol in formulas.")
return(invisible(NULL))
}
if (!all(vars %in% colnames(data))) {
warning("Not all strata are found in the data.")
return(invisible(NULL))
}
if (length(vars) == 0) {
data = data.frame(const__ = rep(1, length(x$survival$rank)))
} else {
idx = which(colnames(data) %in% vars)
data = data[x$survival$sorted, idx, drop = FALSE]
}
sorted = do.call("order", as.data.frame(cbind(data, x$survival$rank, x$survival$status)))
data = data[sorted, , drop = FALSE]
rank = x$survival$rank[sorted]
status = x$survival$status[sorted]
data2 = matrix(0, nrow = nrow(data), ncol = ncol(data))
legend = list()
colnames(data2) = colnames(data)
for (i in 1:ncol(data)) {
levels = levels(as.factor(data[, i]))
legend[[colnames(data)[i]]] = levels
data2[, i] = sapply(data[, i], function(x) { which(levels %in% x)})
}
ranks = which(apply(abs(apply(data2, 2, diff)), 1, sum) > 0)
ranks = c(ranks, nrow(data2))
start = 1
for (i in 1:length(ranks)) {
end = ranks[i]
surv$n = c(surv$n, end - start + 1)
rank2 = rank[start:end]
event2 = status[start:end]
temp = table(rank2)
M = length(temp)
temp0 = table(rank2, event2)
if (ncol(temp0) == 1) {
if (which(c(0, 1) %in% colnames(temp0)) == 1) {
temp0 = cbind(temp0, 0)
colnames(temp0) = c("0", "1")
} else {
temp0 = cbind(0, temp0)
colnames(temp0) = c("0", "1")
}
}
idx = which(temp0[, 2] > 0)
if (temp0[nrow(temp0), 2] == 0) idx = c(idx, nrow(temp0))
nfails = temp0[idx, 2]
start0 = c(1, (cumsum(temp)[1:(M - 1)] + 1))[idx]
start1 = start0 + temp0[idx, 1]
stop1 = start1 + nfails - 1
final = length(rank2)
S = 1
t2 = rep(0, length(nfails))
S2 = rep(0, length(nfails))
for (j in 1:length(nfails)) {
n = final - start0[j] + 1
d = stop1[j] - start1[j] + 1
S = S * (n - d) / n
t2[j] = rank2[start0[j]]
S2[j] = S
}
surv$time = c(surv$time, t2)
surv$n.risk = c(surv$n.risk, rev(cumsum(rev(temp0[, 1] + temp0[, 2]))))
surv$n.event = c(surv$n.event, temp0[, 2])
surv$n.censor = c(surv$n.censor, temp0[, 1])
surv$surv = c(surv$surv, S2)
surv$strata = c(surv$strata, length(idx))
if (length(vars) == 0) {
names(surv$strata)[i] = ""
} else {
label = ""
for (j in 1:ncol(data)) {
temp = colnames(data)[j]
label = paste0(label, temp, "=", legend[[temp]][data2[start, j]])
if (j < ncol(data)) {
label = paste0(label, ", ")
}
}
names(surv$strata)[i] = label
}
start = end + 1
}
surv$type = "right"
names(surv$n.risk) = NULL
names(surv$n.event) = NULL
names(surv$n.censor) = NULL
class(surv) = "survfitDistributed"
return(invisible(surv))
}
survfitDistributed = function(x = NULL, formula = NULL, data = NULL) {
if (class(x) != "vdracox") {
warning("The first parameter must be a vdracox object.")
return(invisible(NULL))
}
if (is.null(data) && is.null(formula)) {
return(survfitDistributed.stats(x))
}
if (!("matrix" %in% class(data)) && !("data.frame" %in% class(data))) {
warning(paste("the data must either be a matrix or a data.frame.",
"Please use the same data that you used for the distributed regression."))
return(invisible(NULL))
}
if (class(formula) != "formula" || !validFormula2(formula)) {
warning(paste("The formula must be of the form \"~ var1 + ... + vark\" where the variables",
"are found in the data. The formula can also be \"~ 1\"."))
}
return(survfitDistributed.formula(x, formula, data))
} |
library(mlsjunkgen)
mat <- matrix(0.95516)
mat2 <- matrix(c(0.95516, 0.66908, 0.21235, 0.34488, 0.11995, 0.56398, 0.59235,
0.11432, 0.33525, 0.70271, 0.41810, 0.31337, 0.91985, 0.37872,
0.28042), nrow = 5, ncol = 3)
test_that("mlsjunkgenm returns the correct matrix", {
expect_equal(mlsjunkgenm(w = 1, x = 2, y = 3, z = 4), mat)
expect_equal(mlsjunkgenm(nrow = 5, ncol = 3, w = 1, x = 2, y = 3, z = 4), mat2)
expect_error(mlsjunkgenm(nrow = "X", ncol = 1, w = 1, x = 2, y = 3, z = 4), "Invalid input. Please ensure nrow and ncol are numeric.")
expect_error(mlsjunkgenm(w = "X", x = 2, y = 3, z = 4), "Invalid input. Please ensure all seeds are numeric.")
}) |
library(rnn)
sample_dim <- 9
time_dim <- 200
X <- data.frame()
Y <- data.frame()
bias_phase <- rnorm(sample_dim)
bias_frequency = runif(sample_dim,min=5,max=25)
for(i in seq(sample_dim)){
X <- rbind(X,sin(seq(time_dim)/bias_frequency[i]+bias_phase[i])+rnorm(time_dim,mean=0,sd=0.2))
Y <- rbind(Y,cos(seq(time_dim)/bias_frequency[i]+bias_phase[i])+rnorm(time_dim,mean=0,sd=0.2))
}
X <- as.matrix(X)
Y <- as.matrix(Y)
X <- (X-min(X))/(max(X)-min(X))
Y <- (Y-min(Y))/(max(Y)-min(Y))
model <- trainr(Y = Y[seq(sample_dim-2),],X = X[seq(sample_dim-2),],learningrate = 0.05,hidden_dim = c(16),numepochs=500,batch_size = 1,momentum = 0,learningrate_decay = 1)
layout(cbind(seq(sample_dim-2),c((sample_dim-1):sample_dim,rep(sample_dim+1,sample_dim-4))))
par(mar=c(1.5,2,1,1),xaxt="s",yaxt="s",mgp=c(1.5,0.5,0),oma=c(0,0,4,0))
for(i in seq(sample_dim)){
plot(X[i,],type="l",col="green",ylim=c(0,1),xlab="",ylab="")
par(new=T)
plot(Y[i,],type="l",ylim=c(0,1),xlab="",ylab="")
par(new=T)
plot(predictr(model,X)[i,],type="l",ylim=c(0,1),col="red",xlab="",ylab="")
}
plot(colMeans(model$error),type="l",xlab="",ylab="",xlim=c(1,500))
title(main="Left: Training time series - Right: Test time series and learning curve
Green: X, noisy cosinus - Black: Y, noisy sinus - Red: network prediction
The network learns to represent the bias in phasis and frequencies",outer=T) |
fun_cov_parallel <- function(b, theta, var.h, arglist, cl) {
n <- arglist$n
length_b <- length(b)
profile_hess <- matrix(NA, length_b, length_b)
bone <- matrix(NA, nrow = length_b*(length_b+1)/2, ncol = 2)
com_ele <- 1
for ( ej in 1:length_b) {
for (ek in 1:ej) {
bone[com_ele,] <- c(ej, ek)
com_ele <- com_ele + 1
}
}
h <- var.h * n^(-1/2)
if (is.null(cl)) {
vbone <- t(apply(bone, 1, fun_covij, b = b, length_b = length_b, h = h, arglist = arglist))
} else {
lbone <- split(bone, row(bone))
if (inherits(cl, "cluster")) {
parallel_fun <- if (isTRUE(getOption("pboptions")$use_lb)) parLapplyLB else parLapply
vbone0 <- parallel_fun(cl, lbone, fun_covij, b = b, length_b = length_b, h = h, arglist = arglist)
} else {
vbone0 <- mclapply(lbone, fun_covij, b = b, length_b = length_b, h = h, arglist = arglist)
}
vbone <- t(sapply(vbone0, function(x) x))
}
for (i in 1:nrow(bone)) {
profile_hess[vbone[i,1], vbone[i,2]] <- vbone[i,3]
}
profile_hess[upper.tri(profile_hess)] <- t(profile_hess)[upper.tri(profile_hess)]
vbeta <- abeta <- numeric(length_b)
abeta[abs(b) > 0] <- 1/abs((b[abs(b) > 0])^2)
abeta[b == 0] <- 10e10
part1 <- profile_hess + diag(n*theta*abeta)
inv_part1 <- solve(part1)
vbeta[abs(b) > 0] <- 1/abs((b[abs(b) > 0])^2)
vbeta[b == 0] = 0.0
part2 <- profile_hess + diag(n*theta*vbeta)
inv_hess <- solve(profile_hess)
cov <- inv_part1 %*% part2 %*% inv_hess %*% part2 %*% inv_part1
return(cov)
} |
translude <- function (colors, alpha = 0.6) {
L <- pmax(length(colors), length(alpha))
colors <- rep(colors, length.out = L)
alpha <- rep(alpha, length.out = L)
rgb <- as.matrix(col2rgb(colors)/255)
colors2 <- rgb(red = rgb["red", ], green = rgb["green", ],
blue = rgb["blue", ], alpha = alpha)
}
parNamesSymm <- function(nlev) {
if (nlev == 1L) return(character(0))
mat <- matrix(NA, nrow = nlev, ncol = nlev)
a <- col(mat)[upper.tri(mat)]
b <- row(mat)[upper.tri(mat)]
paste("theta", a, b, sep = "_")
}
checkPar <- function(value, parN, parNames, default) {
if (is.null(value)) {
value <- rep(default, parN)
}
if (length(value) != parN) {
stop("value must have length ", parN, " with values ",
" for ", parNames)
}
if (!is.null(nm <- names(value))) {
if (!setequal(nm, parNames)) {
stop("bad names provided for value")
}
value <- value[match(nm, parNames)]
}
names(value) <- parNames
value
}
contr.helmod <- function(n) {
A <- stats::contr.helmert(n = n)
n1 <- n - 1L
norm <- sqrt((1:n1)^2 + 1L:n1)
sweep(A, MARGIN = 2L, STATS = norm, FUN = "/")
}
symIndices <- function(n, diag = FALSE) {
if (diag) stop("'diag = TRUE' not implemented yet")
j <- rep.int(1L:(n - 1L), times = (n - 1L):1L)
i <- sequence((n - 1L):1L) + j
kL <- (j - 1L) * n + i
kU <- (i - 1L) * n + j
list(i = i, j = j, kL = kL, kU = kU)
}
optimMethods <- function(optimMethod = NULL,
optimFun = c("both", "nloptr::nloptr",
"stats::optim")) {
name <- NULL
optimFun <- match.arg(optimFun)
if (optimFun != "nloptr::nloptr") {
oMO <- eval(formals(optim)$method)
globLocO <- rep("L", length(oMO))
derNoO <- rep("N", length(oMO))
names(globLocO) <- names(derNoO) <- oMO
globLocO["SANN"] <- "G"
derNoO[c("BFGS", "L-BFGS-B", "CG")] <- "P"
} else {
oMO <- globLocO <- derNoO <- character(0)
}
if (optimFun != "stats::optim") {
nO <- nloptr.get.default.options()
oMN <- subset(nO, name == "algorithm")$possible_values
oMN <- strsplit(oMN, ", ")[[1]]
GLND <- regmatches(oMN, regexpr("[LG][DN]", oMN))
globLocN <- substr(GLND, start = 1, stop = 1)
derNoN <- substr(GLND, start = 2, stop = 2)
} else {
oMN <- globLocN <- derNoN <- character(0)
}
df <- data.frame(optimFun = c(rep("stats::optim", length(oMO)),
rep("nloptr::nloptr", length(oMN))),
optimMethod = c(oMO, oMN),
globLoc = c(globLocO, globLocN),
derNo = c(derNoO, derNoN),
stringsAsFactors = FALSE)
if (!is.null(optimMethod)) {
ind <- grep(tolower(optimMethod), tolower(df$optimMethod))
df <- df[ind, ]
}
df
}
parseGroupList <- function(groupList, prefix = "gr", sep = "/") {
flat <- unlist(groupList)
if (any(duplicated(flat))) {
stop("'groupList' contains duplicated elements")
}
if (is.null(names(groupList)) || any(names(groupList) == "")) {
ng <- paste0(prefix, 1:length(groupList))
} else {
ng <- names(groupList)
}
lg <- sapply(groupList, length)
group <- character(0)
nestedLevels <- character(0)
for (i in seq_along(lg)) {
group <- c(group, rep(ng[i], lg[i]))
nestedLevels <-
c(nestedLevels, paste(ng[i], groupList[[i]], sep = sep))
}
list(group = group,
nestedLevels = nestedLevels,
levels = flat)
}
covAsVec <- function(par, object, X) {
coef(object) <- par
C <- covMat(object = object, X = X, compGrad = TRUE)
grad <- attr(C, "gradient")
C <- as.vector(C)
dim(grad) <- c(length(C), length(par))
attr(C, "gradient") <- grad
C
}
covAsVec2 <- function(xNew, object, X) {
C <- covMat(object = object, X = X, Xnew = xNew,
deriv = TRUE)
der <- attr(C, "der")
C <- as.vector(C)
dim(der) <- c(length(C), object@d)
attr(C, "der") <- der
C
} |
x_mat <- matrix(c(.1, .1, .3,
.32, .4, .1,
0, 0, 0), ncol = 3, byrow = TRUE)
y_mat <- matrix(c(.1, .1, .3,
.4, .29, .1,
0, 0, 0), ncol = 3, byrow = TRUE)
test_that(".factor_corres works", {
expect_equal(.factor_corres(x_mat, y_mat)$diff_corres, 1)
expect_equal(.factor_corres(y_mat, x_mat)$diff_corres_cross, 1)
expect_equal(.factor_corres(x_mat,
matrix(0, ncol = 3, nrow = 3))$diff_corres, 2)
expect_equal(.factor_corres(x_mat,
matrix(0, ncol = 3, nrow = 3))$diff_corres_cross, 2)
expect_equal(.factor_corres(matrix(0, ncol = 3, nrow = 3),
x_mat)$diff_corres, 2)
expect_equal(.factor_corres(matrix(0, ncol = 3, nrow = 3),
x_mat)$diff_corres_cross, 2)
expect_equal(.factor_corres(x_mat, x_mat)$diff_corres, 0)
expect_equal(.factor_corres(x_mat, x_mat)$diff_corres_cross, 0)
})
rm(x_mat, y_mat) |
isStrictlyPositiveIntegerOrNanOrInfScalarOrNull <- function(argument, default = NULL, stopIfNot = FALSE, message = NULL, argumentName = NULL) {
checkarg(argument, "N", default = default, stopIfNot = stopIfNot, nullAllowed = TRUE, n = 1, zeroAllowed = FALSE, negativeAllowed = FALSE, positiveAllowed = TRUE, nonIntegerAllowed = FALSE, naAllowed = FALSE, nanAllowed = TRUE, infAllowed = TRUE, message = message, argumentName = argumentName)
} |
tagList(
logo_and_name(),
div(class = "home-links",
div(id = "model-name",
br(),
h2("Model:"),
h4(.model_name))),
br(), br(), br(), br(),
includeHTML("html/home_page_links.html")
) |
set.seed(1)
X <- big_attachExtdata()
str(test <- big_colstats(X))
ind <- 1:100
str(test2 <- big_colstats(X, ind.row = ind))
plot(test$sum, test2$sum)
abline(lm(test2$sum ~ test$sum), col = "red", lwd = 2)
X.ind <- X[ind, ]
all.equal(test2$sum, colSums(X.ind))
all.equal(test2$var, apply(X.ind, 2, var))
means <- test2$sum / length(ind)
all.equal(means, colMeans(X.ind))
sds <- sqrt(test2$var)
all.equal(sds, apply(X.ind, 2, sd)) |
setMethod(f="calibrateIntensity",
signature=signature(object="MassSpectrum"),
definition=function(object,
method=c("TIC", "PQN", "median"),
range, ...) {
method <- match.arg(method)
switch(method,
"TIC" = ,
"median" = {
.transformIntensity(object, fun=.calibrateIntensitySimple,
offset=0L,
scaling=.scalingFactor(object, method=method,
range=range))
},
"PQN" = {
stop(dQuote("PQN"),
" is not supported for a single MassSpectrum object.")
})
})
setMethod(f="calibrateIntensity",
signature=signature(object="list"),
definition=function(object,
method=c("TIC", "PQN", "median"), range, ...) {
.stopIfNotIsMassSpectrumList(object)
method <- match.arg(method)
switch(method,
"TIC" = ,
"median" = {
.lapply(object, calibrateIntensity, method=method, range=range, ...)
},
"PQN" = {
.calibrateProbabilisticQuotientNormalization(object, range=range)
}
)
}) |
.seqinrEnv <- new.env()
choosebank <- function(bank = NA,
host = "pbil.univ-lyon1.fr",
port = 5558,
server = FALSE,
blocking = TRUE,
open = "a+",
encoding = "",
verbose = FALSE,
timeout = 5,
infobank = FALSE,
tagbank = NA){
if(verbose){
cat("Verbose mode is on, parameter values are:\n")
cat(paste(" bank = ", deparse(substitute(bank)), "\n"))
cat(paste(" host = ", deparse(substitute(host)), "\n"))
cat(paste(" port = ", deparse(substitute(port)), "\n"))
cat(paste(" timeout = ", deparse(substitute(timeout)), "seconds \n"))
cat(paste(" infobank = ", deparse(substitute(infobank)), "\n"))
cat(paste(" tagbank = ", deparse(substitute(tagbank)), "\n"))
}
if( !is.na(tagbank) ){
if(verbose) cat("I'm checking the tagbank parameter value...\n")
if( !(tagbank %in% c("TEST", "TP", "DEV")) ){
if(verbose) cat("... and I was able to detect an error.\n")
stop("non allowed value for tagbank parameter.\n")
} else {
if(verbose) cat("... and everything is OK up to now.\n")
}
}
if(verbose) cat("I'm ckecking that sockets are available on this build of R...\n")
if( !capabilities("sockets") ){
stop("Sockets are not available on this build of R.")
} else {
if(verbose) cat("... yes, sockets are available on this build of R.\n")
}
if(verbose) cat("I'm trying to open the socket connection...\n")
oldtimeout <- getOption("timeout")
options(timeout = timeout)
socket <- try( socketConnection( host = host, port = port, server = server,
blocking = blocking, open = open, encoding = encoding))
options(timeout = oldtimeout)
if(inherits(socket, "try-error")) {
errmess <- paste("I wasn't able to open the socket connection:\n",
" o Check that your are connected to the internet.\n",
" o Check that port", port, "is not closed by a firewall.\n",
" o Try to increase timeout value (current is", timeout, "seconds).\n")
stop(errmess)
} else {
if(verbose) cat("... yes, I was able to open the socket connection.\n")
}
if(verbose) cat("I'm trying to read answer from server...\n")
rep1 <- readLines(socket, n = 1)
if(verbose) cat(paste("... answer from server is:", rep1, "\n"))
clientid(socket = socket, verbose = verbose)
resdf <- kdb(tag = tagbank, socket = socket)
nbank <- nrow(resdf)
if( is.na(bank) ){
close(socket)
if(verbose) cat("No bank argument was given...\n")
if( !infobank ){
if(verbose) cat("infobank parameter is FALSE, I'm just returning bank names\n")
return(resdf$bank)
} else {
if(verbose) cat("infobank parameter is TRUE, I'm returning all bank infos\n")
return(resdf)
}
} else {
if(verbose) cat("I'm trying to open the bank from server...\n")
resacnucopen <- acnucopen(bank, socket)
if(verbose) cat("... and everything is OK up to now.\n")
if(verbose) cat("I'm trying to get information on the bank...\n")
bankhelp <- ghelp(item = "CONT", file = "HELP", socket = socket, catresult = FALSE)
bankrel <- bankhelp[2]
if(verbose) cat("... and everything is OK up to now.\n")
status <- "unknown"
for(i in seq_len(nbank)){
if (resdf[i,1] == bank) status <- resdf[i,2]
}
res <- list(socket = socket,
bankname = bank,
banktype = resacnucopen$type,
totseqs = resacnucopen$totseqs,
totspecs = resacnucopen$totspecs,
totkeys = resacnucopen$totkeys,
release = bankrel,
status = status,
details = bankhelp)
assign("banknameSocket", res, .seqinrEnv)
invisible(res)
}
} |
OMLTask = R6Class("OMLTask",
public = list(
id = NULL,
cache_dir = NULL,
initialize = function(id, cache = getOption("mlr3oml.cache", FALSE)) {
self$id = assert_count(id, coerce = TRUE)
self$cache_dir = get_cache_dir(cache)
initialize_cache(self$cache_dir)
},
print = function() {
catf("<OMLTask:%i:%s> (%ix%i)", self$id, self$name, self$nrow, self$ncol)
}
),
active = list(
name = function() {
self$desc$task_name
},
desc = function() {
if (is.null(private$.desc)) {
private$.desc = cached(download_task_desc, "task_desc", self$id, cache_dir = self$cache_dir)
}
private$.desc
},
data_id = function() {
self$desc$input$source_data$data_set_id
},
data = function() {
if (is.null(private$.data)) {
private$.data = OMLData$new(self$data_id, cache = self$cache_dir)
}
private$.data
},
nrow = function() {
self$data$nrow
},
ncol = function() {
self$data$ncol
},
target_names = function() {
source_data = self$desc$input$source_data
targets = switch(self$desc$task_type,
"Supervised Classification" =,
"Supervised Regression" = source_data$target_feature,
"Survival Analysis" = unlist(source_data[c("target_feature_left", "target_feature_right", "target_feature_event")], use.names = FALSE),
stopf("Unsupoorted task type '%s'", self$desc$task_type)
)
make.names(targets)
},
feature_names = function() {
setdiff(c(self$data$target_names, self$data$feature_names), self$target_names)
},
task = function() {
name = self$name
data = self$data$data
target = self$target_names
miss = setdiff(target, names(data))
if (length(miss)) {
stopf("Task %i could not be created: target '%s' not found in data", self$id, miss[1L])
}
constructor = switch(self$desc$task_type,
"Supervised Classification" = new_task_classif,
"Supervised Regression" = new_task_regr,
"Survival Analysis" = new_task_surv,
stopf("Unsupoorted task type '%s'", self$desc$task_type)
)
task = constructor(name, data, target = target)
task$backend$hash = sprintf("mlr3oml::task_%i", self$id)
task
},
resampling = function() {
if (is.null(private$.resampling)) {
type = NULL
splits = cached(download_task_splits, "task_splits", self$id, self$desc, cache_dir = self$cache_dir)
train_sets = splits[type == "TRAIN", list(row_id = list(as.integer(rowid) + 1L)),
keyby = c("repeat.", "fold")]$row_id
test_sets = splits[type == "TEST", list(row_id = list(as.integer(rowid) + 1L)),
keyby = c("repeat.", "fold")]$row_id
resampling = mlr3::ResamplingCustom$new()
private$.resampling = resampling$instantiate(self$task, train_sets = train_sets, test_sets = test_sets)
}
private$.resampling
},
tags = function() {
self$desc$tag
}
),
private = list(
.data = NULL,
.desc = NULL,
.resampling = NULL
)
)
as_task.OMLTask = function(x, ...) {
x$task
} |
mapAccessions <- function(df, long, lat, y = NULL){
if(is.null(y)) {
leaflet::leaflet() %>% leaflet::addTiles() %>%
leaflet::addProviderTiles('Esri.WorldTopoMap') %>%
leaflet::addCircleMarkers(data = df, lng = df[[long]], lat = df[[lat]],
color = "
radius = 5,
fill = TRUE,
fillColor = "
fillOpacity = 0.2, weight = 2)
}
else {
df.na.omit <- df[!is.na(df[[y]]), ]
if (is.numeric(df[[y]])){
pal <- leaflet::colorNumeric(
palette = c("
domain = df[[y]],
na.color = "
)
}
else {
pal <- leaflet::colorFactor(
palette = c("
domain = df[[y]],
na.color = "
)
}
leaflet::leaflet() %>% leaflet::addTiles() %>%
leaflet::addProviderTiles('Esri.WorldTopoMap') %>%
leaflet::addCircleMarkers(data = df.na.omit, lng = df.na.omit[[long]], lat = df.na.omit[[lat]],
color = ~pal(df.na.omit[[y]]),
radius = 5,
fill = TRUE,
fillColor = ~pal(df.na.omit[[y]]),
label = ~df.na.omit[[y]],
fillOpacity = 0.2, weight = 2, group = "withoutNAs") %>%
leaflet::addCircleMarkers(data = df, lng = df[[long]], lat = df[[lat]],
color = ~pal(df[[y]]),
radius = 5,
fill = TRUE,
fillColor = ~pal(df[[y]]),
label = ~df[[y]],
fillOpacity = 0.2, weight = 2, group = "withNAs") %>%
leaflet::addLegend("bottomright", pal = pal, values = df[[y]], opacity = 1, title = y) %>%
addLayersControl(baseGroups = c("withNAs","withoutNAs"),
options = layersControlOptions(collapsed = FALSE))
}
} |
library(xts)
library(qrmdata)
library(rmgarch)
data("FTSE")
data("SMI")
INDEXES <- merge(FTSE, SMI, all = FALSE)
plot.zoo(INDEXES)
FTSE.X <- diff(log(FTSE))[-1]
SMI.X <- diff(log(SMI))[-1]
INDEXES.X <- merge(FTSE.X, SMI.X, all = FALSE)
plot.zoo(INDEXES.X)
data <- INDEXES.X['2006-01-01/2009-12-31']
pairs(as.zoo(data))
dim(data)
uspec <- ugarchspec(variance.model = list(model = "sGARCH", garchOrder = c(1,1)),
mean.model = list(armaOrder = c(1,0), include.mean = TRUE),
distribution.model = "std")
fit.marg1 <- ugarchfit(spec = uspec, data = data[,1])
fit.marg2 <- ugarchfit(spec = uspec, data = data[,2])
marginspec <- multispec(replicate(2, uspec))
mspec <- dccspec(marginspec, dccOrder = c(1,1), model = "DCC", distribution = "mvt")
mod <- dccfit(mspec,data)
mod
coef(mod)
coef(fit.marg1)
coef(fit.marg2)
plot(mod, which = 2)
plot(mod, which = 3)
plot(mod, which = 4)
plot(mod, which = 5)
copspec <- cgarchspec(uspec = marginspec,
distribution.model = list(copula = "mvt", method = "ML",
time.varying = TRUE, transformation = "parametric"))
mod2 <- cgarchfit(copspec, data)
mod2
likelihood(mod2)
likelihood(mod)
cbind(coef(mod), coef(mod2)) |
expected <- eval(parse(text="FALSE"));
test(id=0, code={
argv <- eval(parse(text="list(c(3.14159265358988, 3.14159265358988, 3.14159265358983, 3.14159265358982, 3.14159265358974, 3.14159265358989, 3.14159265358976, 3.14159265358993, 3.14159265358997, 3.14159265358984, 3.14159265358969, 3.14159265358989, 3.14159265358977, 3.14159265358964, 3.14159265358982, 3.14159265358969, 3.14159265358968, 3.1415926535898, 3.14159265358961, 3.14159265358967, 3.14159265358983, 3.14159265358997, 3.14159265358987, 3.14159265358995, 3.14159265358992, 3.14159265358996, 3.14159265358965, 3.14159265358964, 3.14159265358997, 3.14159265358968, 3.14159265358995, 3.14159265358961, 3.14159265358993, 3.14159265358985, 3.14159265358996, 3.14159265358964, 3.1415926535898, 3.1415926535896, 3.14159265358964, 3.14159265358994, 3.14159265358964, 3.14159265358962, 3.14159265358985, 3.14159265358962, 3.14159265358977, 3.14159265358973, 3.14159265358969, 3.14159265358987, 3.14159265358978, 3.14159265358965, 3.14159265358991, 3.14159265358997, 3.14159265358979, 3.1415926535897, 3.14159265358974, 3.14159265358977, 3.14159265358985, 3.14159265358982, 3.14159265358981, 3.14159265358984, 3.14159265358991, 3.14159265358989, 3.14159265358978, 3.14159265358967, 3.1415926535899, 3.14159265358998, 3.14159265358992, 3.14159265358972, 3.14159265358984, 3.14159265358974, 3.14159265358969, 3.14159265358984, 3.14159265358983, 3.14159265358995, 3.14159265358963, 3.14159265358996, 3.14159265358976, 3.14159265358973, 3.14159265358995, 3.14159265358965, 3.14159265358966, 3.1415926535898, 3.14159265358965, 3.14159265358992, 3.14159265358959, 3.14159265358988, 3.14159265358988, 3.14159265358974, 3.14159265358994, 3.14159265358996, 3.1415926535897, 3.14159265358973, 3.14159265358971, 3.14159265358986, 3.14159265358998, 3.14159265358984, 3.14159265358988, 3.1415926535896, 3.1415926535897, 3.14159265358985, 3.14159265358983))"));
do.call(`is.language`, argv);
}, o=expected); |
ADPclustering=function(Data,ClusterNo=NULL,PlotIt=FALSE,...){
if (!requireNamespace('ADPclust', quietly = TRUE)) {
message(
'Subordinate clustering package (ADPclust) is missing. No computations are performed.
Please install the package which is defined in "Suggests".'
)
return(
list(
Cls = rep(1, nrow(Data)),
Object = "Subordinate clustering package (ADPclust) is missing.
Please install the package which is defined in 'Suggests'."
)
)
}
if (!requireNamespace('cluster', quietly = TRUE)) {
message(
'Subordinate clustering package (cluster) is missing although its imported in ADPclust. No computations are performed.
Please install the package which is defined in "Suggests".'
)
return(
list(
Cls = rep(1, nrow(Data)),
Object = "Subordinate clustering package (cluster) is missing although its imported in ADPclust.
Please install the package which is defined in 'Suggests'."
)
)
}
if(is.null(ClusterNo))
adp=ADPclust::adpclust(Data,...)
else
adp=ADPclust::adpclust(Data,nclust=ClusterNo,...)
Cls=as.numeric(adp$clusters)
Cls=ClusterRename(Cls,Data)
if(PlotIt){
ClusterPlotMDS(Data,Cls)
}
return(list(Cls=Cls,Object=adp))
} |
context("prep_access")
test_that("prep_access fails when it can't find things", {
workdir <- tempdir()
oldworkdir <- getwd()
setwd(workdir)
on.exit({
setwd(oldworkdir)
})
expect_error(prep_access())
})
test_that("prep_access errors when no valid file paths are found", {
workdir <- tempdir()
oldworkdir <- getwd()
setwd(workdir)
on.exit({
setwd(oldworkdir)
})
create_spice()
expect_error(prep_access())
})
test_that("prep_access works correctly", {
workdir <- tempdir()
oldworkdir <- getwd()
setwd(workdir)
on.exit({
setwd(oldworkdir)
})
data_path <- file.path("data", "mydata.csv")
file.create(data_path)
writeLines(c("a,b,c", "1,2,3"), data_path)
create_spice()
prep_access()
expect_length(readLines("data/metadata/access.csv"), 2)
}) |
test_that("Only accepts ts", {
expect_error(bflSmooth(1:10, 4))
})
test_that("Only accepts integer frequencies", {
expect_error(bflSmooth(ts(1:10,freq=0.5), 1))
})
test_that("Only accepts strictly positive high frequencies", {
expect_error(bflSmooth(ts(1:10,freq=1), 0))
})
test_that("Only accepts frequencies that are multiples of the lower one", {
expect_error(bflSmooth(ts(1:10,freq=4), 5))
})
test_that("Only accepts one dimensional time-series", {
expect_error(bflSmooth(ts(matrix(1:20,10,2),freq=4), 12))
})
test_that("Smoothing works", {
expect_identical(bflSmooth(ts(1:10,start=c(2010,2),freq=4), 4),
ts(1:10,start=c(2010,2),freq=4))
expect_identical(bflSmooth(ts(rep(3,12),start=c(2010,2),freq=4), 12),
ts(as.double(rep(1,36)),start=c(2010,4),freq=12))
expect_equal(bflSmooth(ts((1:4)^2,freq=1,start=1990), 4),
ts(c(0.1188207,0.1712924,0.2762359,0.4336510,
0.6435379,0.8700775,1.1132698,1.3731148,
1.6496125,2.0003666,2.4253770,2.9246439,
3.4981672,3.9283096,4.2150712,4.3584520),start=1990,freq=4))
expect_equal(bflSmooth(ts(sin(4:7),start=c(2010,3),freq=4),12),
ts(c(-0.22957412,-0.24659415,-0.28063423,
-0.33169433,-0.33541968,-0.29181026,
-0.20086608,-0.09733435,0.01878493,
0.14749174,0.23329629,0.27619856),start=c(2010,7),freq=12))
})
test_that("cache works for smoothing", {
bflSmooth_matrices_cache <- bflSmooth_matrices_factory()
expect_identical(bflSmooth_matrices_cache(20,12,NULL,TRUE),bflSmooth_matrices_impl(20,12,NULL,TRUE))
expect_identical(bflSmooth_matrices_cache(20,12,NULL,TRUE),bflSmooth_matrices_impl(20,12,NULL,TRUE))
set.seed(10)
randomarg <- function(n) {
lfserie <- ts(arima.sim(n,model = list(order=c(1,1,0),ar=0.7)),freq=sample(1:4,1,T),start=2010)
hf_freq <- sample(1:4,1,T)*frequency(lfserie)
list(lfserie,hf_freq)
}
randomargs <- lapply(rep(30,150),randomarg)
randomres <- function(notused) lapply(randomargs,function(x) bflSmooth(x[[1]],x[[2]]))
reslist <- lapply(rep(1,100),randomres)
expect_true(all(sapply(reslist, FUN = identical, randomres(1))))
set.seed(3)
randomarg <- function(n) {
lfserie <- ts(arima.sim(n,model = list(order=c(1,1,0),ar=0.7)),freq=sample(1:4,1,T),start=2010)
hf_freq <- sample(1:4,1,T)*frequency(lfserie)
weights <- ts(arima.sim(hf_freq/frequency(lfserie)*length(lfserie),model = list(order=c(1,1,0),ar=0.7))[-1],freq=hf_freq,start=2010)
list(lfserie,hf_freq,weights)
}
randomargs <- lapply(rep(30,150),randomarg)
randomres <- function(notused) lapply(randomargs,function(x) bflSmooth(x[[1]],x[[2]],x[[3]]))
reslist <- lapply(rep(1,100),randomres)
expect_true(all(sapply(reslist, FUN = identical, randomres(1))))
})
test_that("error weights", {
expect_error(bflSmooth(construction,12,weights = 14),"must be either NULL or a one-dimensional ts")
expect_error(bflSmooth(aggregate(turnover,1),12,weights=ts(1:(12*20),freq=4,start=2000)),"frequency of the weights")
bflSmooth(aggregate(turnover,1),12,weights=window(turnover,start=c(2000,1),end=c(2019,12)))
expect_error(bflSmooth(aggregate(turnover,1),12,weights=window(cbind(turnover,turnover),start=c(2000,1),end=c(2019,12)))," must be one-dimensional")
expect_error(bflSmooth(aggregate(turnover,1),12,weights=window(turnover,start=c(1999,1),end=c(2018,12),extend=TRUE)),"same start than the expected high-frequency")
expect_error(bflSmooth(aggregate(turnover,1),12,weights=window(turnover,start=c(2000,1),extend=TRUE)),"same end than the expected high-frequency")
expect_warning(bflSmooth(construction,12,lfserie.is.rate = TRUE),
"weights is NULL. Ignoring")
})
test_that("weights", {
expect_equal(aggregate.ts(bflSmooth(construction,12,window(turnover,end=c(2019,12))),1),
construction)
ben <- bflSmooth(window(airmiles,start=1949),weights=AirPassengers,nfrequency = frequency(AirPassengers))
expect_equal(aggregate.ts(ben),window(airmiles,start=1949))
ben2 <- bflSmooth(window(airmiles,start=1949)/aggregate.ts(AirPassengers),weights=AirPassengers,nfrequency = frequency(AirPassengers),lfserie.is.rate = TRUE)
expect_equal(ben/ben2,AirPassengers)
expect_equal(ben2,ts(c(4.38416593613805, 4.38642057449218, 4.39105063539796,
4.39833794864968, 4.40822212214856, 4.42054211029787, 4.43557974289189,
4.45359671902528, 4.47459303869805, 4.49832713351511, 4.52445678158343,
4.55268002240914, 4.58327868578653, 4.61459639823386, 4.64670193835782,
4.67968909516756, 4.7135203530594, 4.74813318602728, 4.78367765648578,
4.82028506904769, 4.857955423713, 4.89661368927441, 4.93610355071672,
4.97630620862837, 5.01738423062517, 5.05579669714754, 5.09145169248946,
5.12383448869724, 5.15322083288894, 5.17944527679372, 5.20239752156436,
5.22169152123561, 5.23732727580746, 5.24958053239796, 5.25885572011357,
5.26544696921355, 5.26898661687382, 5.27292827554719, 5.27729310370801,
5.28211166359706, 5.28735574391516, 5.29303004654551, 5.29921685444401,
5.30594437890982, 5.3132408312421, 5.32102863036815, 5.32926545933924,
5.337906650265, 5.34700392386059, 5.35877690091137, 5.37322558141736,
5.39089602730818, 5.41177458703559, 5.43577935131015, 5.46310144180723,
5.49402754103989, 5.52866686139405, 5.56654159868128, 5.60729681264733,
5.65050930529673, 5.69646575914253, 5.73842295122341, 5.77669454912878,
5.81035915431476, 5.83957360057606, 5.8642006583423, 5.88365220088336,
5.89718326767439, 5.90497029673444, 5.907679831691, 5.90589999927418,
5.90014050931679, 5.88989165198604, 5.8790191935297, 5.86754632572516,
5.85538543519115, 5.84253136819935, 5.82898154788562, 5.81462001536295,
5.79932050428774, 5.78312682135063, 5.76612915679705, 5.74842543146492,
5.73011098932796, 5.71108017895583, 5.69480481373812, 5.68121697777313,
5.6707047619277, 5.66322935711514, 5.6588392746938, 5.65807784187722,
5.66132344726057, 5.66849847267047, 5.6791178045234, 5.6927060315075,
5.70892357411432, 5.7281100118523, 5.74336109137696, 5.75485171749777,
5.76189476417748, 5.76459017702151, 5.76285050362513, 5.75583869954295,
5.74301755714585, 5.72436209003249, 5.70065936984552, 5.67262150902355,
5.64077322199503, 5.60472721953895, 5.57419102859664, 5.54880813195249,
5.5292915640377, 5.51541445026052, 5.50741987054069, 5.50647460849287,
5.51348616248405, 5.52868140710596, 5.55042360423246, 5.57798351410437,
5.61056707565059, 5.64861183272664, 5.68035635104226, 5.70611564253547,
5.72476966475999, 5.73649342434805, 5.74086690538228, 5.73698007337506,
5.72350287792143, 5.70024281172592, 5.66887993845797, 5.6303942952581,
5.58557341197137, 5.53366476007916, 5.48554433146994, 5.44097592996781,
5.40021392068528, 5.36363985129116, 5.33135365093678, 5.30392764112533,
5.28215217059932, 5.26588188786587, 5.25422651503113, 5.24675908208479,
5.24283459177722), start=1949,freq=12))
})
|
rmmSuggest=function(charString,fullFieldDepth=FALSE){
if(substr(charString,1,1)=='$') charString=substr(charString,2,nchar(charString))
dd=utils::read.csv(system.file("extdata/dataDictionary.csv",
package='rangeModelMetadata'),stringsAsFactors=FALSE)
dd=.rmmLeftJustify(dd)
out=sapply(c('type','suggestions'),function(x) NULL)
fields=unlist(strsplit(charString,'$',fixed=TRUE))
dd1=dd
for(i in 1:length(fields)) dd1=subset(dd1,dd1[,i]==fields[i])
if(nrow(dd1)>1){
if(!fullFieldDepth){
suggestions=unique(dd1[,i+1])
suggestions=make.names(suggestions)
suggestions=gsub('\\.(\\w?)', '\\U\\1', suggestions, perl=TRUE)
out$suggestions=paste0(charString,"$",suggestions)
} else {
out$suggestions=as.vector(apply(dd1[,1:4],1,function(x) gsub('$NA','',paste0(x,collapse='$'),fixed=T)))
}
out$type=names(dd1)[i+1]
} else {
out$type=dd1$type
out$suggestions= strsplit(dd1$example,'; ')[[1]]
out$suggestions=gsub(';','',out$suggestions)
}
out
} |
context("patch level lsm_p_enn metric")
landscapemetrics_patch_landscape_value <- lsm_p_enn(landscape)
test_that("lsm_p_enn is typestable", {
expect_is(lsm_p_enn(landscape), "tbl_df")
expect_is(lsm_p_enn(landscape_stack), "tbl_df")
expect_is(lsm_p_enn(landscape_brick), "tbl_df")
expect_is(lsm_p_enn(landscape_list), "tbl_df")
})
test_that("lsm_p_enn returns the desired number of columns", {
expect_equal(ncol(landscapemetrics_patch_landscape_value), 6)
})
test_that("lsm_p_enn returns in every column the correct type", {
expect_type(landscapemetrics_patch_landscape_value$layer, "integer")
expect_type(landscapemetrics_patch_landscape_value$level, "character")
expect_type(landscapemetrics_patch_landscape_value$class, "integer")
expect_type(landscapemetrics_patch_landscape_value$id, "integer")
expect_type(landscapemetrics_patch_landscape_value$metric, "character")
expect_type(landscapemetrics_patch_landscape_value$value, "double")
}) |
DthetaphiTra <-
function(R,S,a=1,b=1,theta=1/3) {
if (checkingTra(R)==1 & checkingTra(S)==1) {
r=nrow(R)
s=nrow(S)
c=matrix(nrow=r,ncol=s)
d=matrix(nrow=r,ncol=s)
e=matrix(nrow=r,ncol=s)
f=matrix(nrow=r,ncol=s)
Dthetaphicua=matrix(nrow=r,ncol=s)
integrand = function(x) {x*dbeta(x,a,b)}
int<-integrate(integrand, 0,1)$val
integrandsq = function(x) {x^2*dbeta(x,a,b)}
intsq<-integrate(integrandsq, 0,1)$val
for (i in 1:r) {
for (j in 1:s) {
c[i,j]=((R[i,1]+R[i,4])-(S[j,1]+S[j,4]))/2
d[i,j]=((R[i,4]-R[i,1])-(S[j,4]-S[j,1]))/2
e[i,j]=((R[i,2]+R[i,3])-(S[j,2]+S[j,3]))/2-c[i,j]
f[i,j]=((R[i,3]-R[i,2])-(S[j,3]-S[j,2]))/2-d[i,j]
Dthetaphicua[i,j]=c[i,j]^2+theta*d[i,j]^2 +
(e[i,j]^2+theta*f[i,j]^2)*intsq +
2*(c[i,j]*e[i,j]+theta*d[i,j]*f[i,j])*int
}
}
Dthetaphi=sqrt(Dthetaphicua)
return(Dthetaphi)
}
} |
combined_events <- function(marks, scores, event_names, event, seconds, ...){
UseMethod("combined_events")
}
combined_events.default <- function(marks, scores, event_names, event, seconds, ...){
total <- sum(scores, na.rm = TRUE)
if (seconds == FALSE) {
marks <- mapply(num_to_char, marks, event_names)
}
result <- list(results = data.frame(event = c(event_names, "TOTAL"),
mark = c(unlist(marks), NA),
score = c(scores, total)),
marks = stats::setNames(unlist(marks), event_names),
scores = stats::setNames(scores, event_names),
score_total = total)
names(result$results)[1] <- event
class(result) <- "combined_events"
result
}
print.combined_events <- function(x, ...){
print(x$results)
} |
tensorflow::install_tensorflow |
`plot.cardiPeakwindow` <-
function (x, y, add=FALSE, ...){
peaks <- x
x <- peaks$data$x
y <- peaks$data$y
smd <- peaks$smd.indices
thecol <- rep(3, length(peaks$peakid))
thecol[smd] <- 2
if (add) {
lines(x[smd], y[smd], col = "red", lwd = 2)
text(x[smd], y[smd], peaks$peakid[smd], col = "red", pos = 3)
} else {
plot(x, y, type = "l", ...)
lines(x[smd], y[smd], col = "red", lwd = 2)
text(x, y, peaks$peakid, col = thecol, pos = 3)
}
} |
pat_dailySoH <- function(
pat = NULL,
SoH_functions = c("PurpleAirSoH_dailyPctDC",
"PurpleAirSoH_dailyPctReporting",
"PurpleAirSoH_dailyPctValid",
"PurpleAirSoH_dailyMetFit",
"PurpleAirSoH_dailyABFit",
"PurpleAirSoH_dailyABtTest")
) {
MazamaCoreUtils::stopIfNull(pat)
if ( !pat_isPat(pat) )
stop("Parameter 'pat' is not a valid 'pa_timeseries' object.")
if ( pat_isEmpty(pat) )
stop("Parameter 'pat' has no data.")
SoH_list <- list()
for ( SoH_function in SoH_functions ) {
result <- try({
FUN <- get(SoH_function)
}, silent = TRUE)
if ( ! "try-error" %in% class(result) ) {
result <- try({
SoH_list[[SoH_function]] <- FUN(pat)
}, silent = TRUE)
}
if ( "try-error" %in% class(result) ) {
localTime <- lubridate::with_tz(pat$data$datetime, tzone = pat$meta$timezone)
hour <- lubridate::hour(localTime)
start <- lubridate::floor_date(localTime[ min(which(hour == 0)) ], unit = "hour")
end <- lubridate::floor_date(localTime[ max(which(hour == 23)) ], unit = "hour")
days <- dplyr::tibble(datetime = MazamaCoreUtils::dateSequence(start, end, timezone = pat$meta$timezone))
SoH_list[[SoH_function]] <- rep_len(as.numeric(NA), length.out = length(days$datetime))
}
}
datetime <- SoH_list[[1]]$datetime
SoHData_list <- lapply(SoH_list, dplyr::select, -.data$datetime)
SoH_tbl <-
dplyr::bind_cols(SoHData_list) %>%
dplyr::mutate(
datetime = !!datetime, .before = 1
)
return(SoH_tbl)
}
if ( FALSE ) {
library(AirSensor)
pat <- example_pat_failure_B
SoH_functions = c("PurpleAirSoH_dailyPctDC",
"PurpleAirSoH_dailyPctReporting",
"PurpleAirSoH_dailyPctValid",
"PurpleAirSoH_dailyMetFit",
"PurpleAirSoH_dailyABFit",
"PurpleAirSoH_dailyABtTest")
} |
{}
NULL
"%vin%" <- function(x, table){
out <- match(x, table, nomatch=0) > 0
if (anyNA(table)){
out[!out] <- NA
}
out[is.na(x)] <- NA
out
}
`~` <- function(lhs, rhs){
Lvars <- all.vars(substitute(lhs))
Rvars <- all.vars(substitute(rhs))
condition <- do.call(paste, c(mget(Lvars, parent.frame()), sep="|"))
consequent <- do.call(paste0, c(mget(Rvars, parent.frame()), sep="|"))
cf <- .Call("R_fdcheck", condition, consequent)
cf == seq_along(cf)
}
`%->%` <- `~`
matchvars <- function(L,env){
if( length(L) == 0 ){
TRUE
} else {
sapply(L,as.character)
}
}
is_unique <- function(...){
d <- data.frame(...)
!duplicated(d) & !duplicated(d, fromLast=TRUE)
}
all_unique <- function(...){
!anyDuplicated(data.frame(...))
}
n_unique <- function(...){
nrow(unique(data.frame(...)))
}
is_complete <- function(...){
stats::complete.cases(data.frame(...))
}
all_complete <- function(...){
all(stats::complete.cases(data.frame(...)))
}
exists_any <- function(rule, by = NULL, na.rm=FALSE){
parent <- parent.frame()
. <- get(".", parent)
if (is.null(by)) by <- character(nrow(.))
rule <- as.expression(substitute(rule))
unsplit(lapply(split(., f=by), function(d){
res <- eval(rule, envir=d, enclos=parent)
ntrue <- sum(res, na.rm=na.rm)
rep(ntrue >= 1, nrow(d))
}), by)
}
exists_one <- function(rule, by=NULL, na.rm=FALSE){
parent <- parent.frame()
. <- get(".", parent)
if (is.null(by)) by <- character(nrow(.))
rule <- as.expression(substitute(rule))
unsplit(lapply(split(., f=by), function(d){
res <- eval(rule, envir=d, enclos=parent)
ntrue <- sum(res, na.rm=na.rm)
rep(ntrue == 1, nrow(d))
}), by)
} |
context("addSexAndAgeToGroup")
library(testthat)
library(nprcgenekeepr)
library(lubridate)
data("qcBreeders")
data("qcPed")
skip_if_not(exists("qcBreeders"))
skip_if_not(exists("qcPed"))
test_that("addSexAndAgeToGroup forms the correct dataframe", {
df <- addSexAndAgeToGroup(ids = qcBreeders, ped = qcPed)
expect_equal(length(df), 3)
expect_equal(length(df[["ids"]]), 29)
expect_equal(names(df), c("ids", "sex", "age"))
expect_equal(df$ids[1], "Q0RGP7")
expect_equal(as.character(df$sex[1]), "F")
expect_equal(df$age[df$id == "Q0RGP7"], qcPed$age[qcPed$id == "Q0RGP7"],
tolerance = 0.2, scale = 18)
}
) |
test_that("default", {
head_text <- heading_text("Test Time")
expect_identical(
head_text$attribs$class,
"govuk-heading-xl"
)
expect_identical(
shiny::HTML("Test Time"),
head_text$children[[1]]
)
})
test_that("medium_works", {
head_text <- heading_text("Test Time", "m")
expect_identical(
head_text$attribs$class,
"govuk-heading-m"
)
expect_identical(
shiny::HTML("Test Time"),
head_text$children[[1]]
)
}) |
stability.par <-
function(data,rep,MSerror,alpha=0.1,main=NULL,cova=FALSE,
name.cov=NULL,file.cov=0,console=FALSE){
KK <- "Environmental index"
y<-data
if (cova) {
x<- as.matrix(file.cov)
KK <- name.cov
}
A <- nrow(y)
M <- ncol(y)
N <- rep
MKE <- MSerror
RR <- main
FM0 <- qf(1-alpha,M-1,M * (A - 1) * (N - 1))
dimA <- rep(0,A); dim(dimA)<-A
SHV=MV=SU=MV1=FF=GY=U1=G=SI=B=SA=S=FS=FSS=R=GYS=F1=GY=NN=X1=X1M=W=GYY=MMM=dimA
dimA <- rep(0,M); dim(dimA)<-M
SHM= MM= II= X2= X2M=dimA
dimA <- rep(0,A*M); dim(dimA)<-c(A,M)
U= G1=dimA
SV = SM = GG1 = L= SMES = 0
SS<- sum(y^2)
SHT<- sum(y)
SHV<-apply(y,1,sum)
MV<-SHV / M
SV<-sum(SHV^2)/M
FK <- SHT ^ 2 / (A * M)
SKV <- (SV - FK) * N
MKV = SKV / (A - 1)
SHM<-apply(y,2,sum)
MM<-SHM / A
SM <-sum(SHM^2)/A
II<-MM - SHT / (A * M)
SKM <- (SM - FK) * N
MKM <- SKM / (M - 1)
SKT <- (SS - FK) * N
SKVM <-SKT - SKV - SKM; MKVM <- SKVM / ((A - 1) * (M - 1))
y<-as.matrix(y)
for ( i in 1: A) {
U[i, ] <- y[i, ] - MM
}
SU<-apply(U,1,sum)
U1 <- SU/ M
b <- 1 / ((M - 1) * (A - 1) * (A - 2))
for ( i in 1:A) {
G[i] <- sum((U[i,] - U1[i])^2)
}
GG1 <- sum(G)
SI <- b * (A * (A - 1) * G - GG1) * N
if ( cova ) ZZ <- sum(x^2)
if (!cova ) IN <- sum(II^2)
for ( i in 1: A) {
if ( cova ) B[i] <- sum( (U[i,] - U1[i])* x)/ZZ
if (!cova ) B[i] <- sum( (U[i,] - U1[i])*II)/IN
}
for ( i in 1: A) {
if ( cova ) SA[i] <- sum((U[i,]-U1[i]- B[i]*x)^2)
if (!cova ) SA[i] <- sum((U[i,]-U1[i]- B[i]*II)^2)
}
L <- sum(SA)
S <- (A / ((A - 2) * (M - 2))) * (SA - L / (A * (A - 1))) * N
KI <- ((A - 1) * (M - 2)) / A
SS<-sum(S * KI)
SKMB <- SS
MS <- SKMB / ((A - 1) * (M - 2))
SKH <- SKVM - SKMB
MKH <- SKH / (A - 1)
SHKLT <- M * (A - 1) * (N - 1)
T05 <- qt(0.95,SHKLT)
DMV05 <- T05 * sqrt(2 * MKE / (N * M))
SMES <- sum(MV)
MES <- SMES / A
for ( i in 1: A) {
if( MV[i] > MES) MV1[i] <- 1
if( MV[i] >= (MES + DMV05) ) MV1[i] <- 2
if( MV[i] >= (MES + 2 * DMV05)) MV1[i] <- 3
if( MV[i] < MES ) MV1[i] <- -1
if( MV[i] <= (MES - DMV05)) MV1[i] <- -2
if( MV[i] <= (MES - 2 * DMV05)) MV1[i] <- -3
}
FV <- MKV / MKVM; FM <- MKM / MKE
FVM <-MKVM / MKE; FH <- MKH / MS
FMS <- MS / MKE
FS <- SI / MKE
FSS <- S / MKE
SHF2 <-(A - 1) * (M - 1); SHF1 = (A - 1)
F05 <- qf(0.95,SHF1, SHF2)
F01 <- qf(0.99,SHF1, SHF2)
pvalue <- round(1-pf( FV ,SHF1, SHF2),3)
DD<-paste("",pvalue)
if (pvalue <0.001) DD<-"<0.001"
SHF2 <- M * (A - 1) * (N - 1)
SHF1 <- M - 1
F05 <- qf(0.95,SHF1, SHF2)
F01 <- qf(0.99,SHF1, SHF2)
pvalue<- round(1-pf( FM ,SHF1, SHF2),3)
NNN<-paste("",pvalue)
if (pvalue <0.001) NNN<-"<0.001"
SHF2 <- M * (A - 1) * (N - 1); SHF1 <- (A - 1) * (M - 1)
F05 <- qf(0.95,SHF1, SHF2)
F01 <- qf(0.99,SHF1, SHF2)
pvalue <- round(1-pf( FVM ,SHF1, SHF2),3)
LL<-paste("",pvalue)
if (pvalue <0.001) LL<-"<0.001"
SHF2 <- (A - 1) * (M - 2); SHF1 <- A - 1
F05 <- qf(0.95,SHF1, SHF2)
F01 <- qf(0.99,SHF1, SHF2)
pvalue <- round(1-pf( FH ,SHF1, SHF2),3)
HH<-paste("",pvalue)
if (pvalue <0.001) HH<-"<0.001"
SHF2 <- M * (A - 1) * (N - 1); SHF1 <- (A - 1) * (M - 2)
F05 <- qf(0.95,SHF1, SHF2)
F01 <- qf(0.99,SHF1, SHF2)
pvalue <- round(1-pf( FMS ,SHF1, SHF2),4)
BB<-paste("",pvalue)
if (pvalue <0.001) BB<-"<0.001"
SHF2 <- M * (A - 1) * (N - 1)
SHF1 <- M - 1
F05 <- qf(0.95,SHF1, SHF2)
F01 <- qf(0.99,SHF1, SHF2)
for ( i in 1: A) {
if(FS[i] >= F01 ) NN[i] <- "**"
if( (FS[i] < F01) & (FS[i] >= F05) ) NN[i] <- "*"
if( FS[i] < F05 ) NN[i] <- "ns"
}
SHF2 <- M * (A - 1) * (N - 1)
SHF1 <- M - 2
F05 <- qf(0.95,SHF1, SHF2)
F01 <- qf(0.99,SHF1, SHF2)
for ( i in 1: A) {
if( FSS[i] >= F01 ) MMM[i] <- "**"
if( (FSS[i] < F01) & (FSS[i] >= F05) ) MMM[i] <- "*"
if( FSS[i] < F05 ) MMM[i] <- "ns"
}
if(console){
cat("\n","INTERACTIVE PROGRAM FOR CALCULATING SHUKLA'S STABILITY VARIANCE AND KANG'S")
cat("\n"," YIELD - STABILITY (YSi) STATISTICS")
cat("\n",RR,"\n",KK," - covariate \n")
cat("\n","Analysis of Variance\n")
}
fuentes<- c( "Total ","Genotypes", "Environments","Interaction",
"Heterogeneity" , "Residual","Pooled Error")
gl <- c(A*M-1, A-1, M-1, (M - 1) * (A - 1), A - 1,(A - 1) * (M - 2),M * (A - 1) * (N - 1))
SC <- round(c(SKT, SKV, SKM , SKVM ,SKH, SKMB),4)
SC <- c(SC,0)
CM <- round(c(MKV, MKM, MKVM ,MKH ,MS,MKE),4)
CM <- c(0,CM)
Fcal<-round(c(FV,FM,FVM,FH,FMS),2)
Fcal<-c(0,Fcal,0)
resul<-c(" ",DD,NNN,LL,HH,BB," ")
Z<-data.frame(gl,SC,CM,Fcal, resul)
names(Z)<-c("Df","Sum Sq","Mean Sq","F value","Pr(>F)")
rownames(Z)<- fuentes
Z[7,c(2,4)]<-" ";Z[1,c(3,4)]<-" ";
if(console){
cat("\n")
print(Z)
}
Z1<-Z
X1<-apply(y,1,sum)
X1M<- X1/ M
X2<-apply(y,2,sum)
X2M <- X2 / A
SH <- sum(X2)
MM1 <- SH / (A * M)
for ( i in 1: A) {
W[i] <- sum((y[i,] - X1M[i] - X2M + MM1) ^ 2) * N
}
MV<-round(MV,6); SI<-round(SI,6); S<-round(S,6); W<-round(W,6);
Z<-data.frame(MV, SI, NN, S,MMM,W)
names(Z)<-c("Mean","Sigma-square",".","s-square",".","Ecovalence")
rownames(Z)<-rownames(y)
if(console){
cat("\n","Genotype. Stability statistics\n\n")
print(Z)
}
Z2<-Z
Z<-as.matrix(Z)
FF <- SI / MKE
SHF2 <- M * (A - 1) * (N - 1); SHF1 <- M - 1
F05 <- qf(0.95,SHF1, SHF2)
F01 <- qf(0.99,SHF1, SHF2)
for ( i in 1: A) {
if( FF[i] < FM0) F1[i] <- 0
if( FF[i] >= FM0) F1[i] <- -2
if( FF[i] >= F05) F1[i] <- -4
if( FF[i] >= F01) F1[i] <- -8
}
for ( i in 1: A) {
R0 <- 1
for ( j in 1: A) {
if( MV[j] < MV[i]) R0 <- R0 + 1
}
R[i] <- R0
}
for ( i in 1: A) GY[i] <- R[i] + MV1[i]
for ( i in 1: A) GYS[i] <- GY[i] + F1[i]
SGYS<-0
for ( i in 1: A) SGYS <- SGYS + GYS[i]
MGYS <- SGYS / A
for ( i in 1: A) {
GYY[i]<-""
if( GYS[i] > MGYS ) GYY[i] <- "+"
}
names<-c("Yield","Rank","Adj.rank","Adjusted","Stab.var","Stab.rating","YSi","..." )
Z<-data.frame(MV, R, MV1,GY,SI,F1,GYS,GYY)
rownames(Z)<-rownames(y)
names(Z)<-names
Z3<-Z
if(console){
cat("\n\nSignif. codes: 0 '**' 0.01 '*' 0.05 'ns' 1\n\n")
cat("Simultaneous selection for yield and stability (++)\n\n")
print(Z)
cat("\n","Yield Mean:", MES)
cat("\n","YS Mean:", MGYS)
cat("\n","LSD (0.05):", DMV05)
cat("\n",rep("-",11))
cat("\n","+ selected genotype" )
cat("\n","++ Reference: Kang, M. S. 1993. Simultaneous selection for yield")
cat("\n","and stability: Consequences for growers. Agron. J. 85:754-757." )
cat("\n")
}
out<-list(analysis=Z1,statistics =Z2,stability =Z3)
invisible(out)
} |
b_bootstrap <- function(data, statistic, n1=1000, n2=1000, use_weights=FALSE, weight_arg=NULL, ...) {
dirichlet_weights <- matrix(stats::rexp(NROW(data) * n1, 1) , ncol=NROW(data), byrow=TRUE)
dirichlet_weights <- dirichlet_weights / rowSums(dirichlet_weights)
if(use_weights) {
stat_call <- quote(statistic(data, w, ...))
names(stat_call)[3] <- weight_arg
boot_sample <- apply(dirichlet_weights, 1, function(w) {
eval(stat_call)
})
} else {
if(is.null(dim(data)) || length(dim(data)) < 2) {
boot_sample <- apply(dirichlet_weights, 1, function(w) {
data_sample <- sample(data, size=n2, replace=TRUE, prob=w)
statistic(data_sample, ...)
})
} else {
boot_sample <- apply(dirichlet_weights, 1, function(w) {
index_sample <- sample(nrow(data), size=n2, replace=TRUE, prob=w)
statistic(data[index_sample, , drop=FALSE], ...)
})
}
}
if(is.null(dim(boot_sample)) || length(dim(boot_sample)) < 2) {
boot_sample
} else {
as.data.frame(t(boot_sample))
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.