code
stringlengths 1
13.8M
|
---|
skeleton.decomp <-
function (A, r, thr = 1e-10, num.iter = 30)
{
.is.sparse(A)
if (min(dim(A)) < r) {
stop("Please specify the rank as min(dim(A)) > rank")
}
J = seq_len(r)
iter <- 1
RecError <- c()
RelChange <- c()
RecError[1] <- 1e+10
RelChange[1] <- thr * 10
while ((RelChange[iter] > thr) && (iter <= num.iter)) {
A_bar <- A
C <- A[, J]
C <- as(C, "sparseMatrix")
Q <- qr.Q(qr(C))
Q <- as(Q, "sparseMatrix")
I <- maxvol(Q)
R <- A[I, ]
R <- Matrix::t(R)
R <- as(R, "sparseMatrix")
Q <- qr.Q(qr(R))
Q <- as(Q, "sparseMatrix")
J <- maxvol(Q)
iter <- iter + 1
Q_hat <- Q[J, ]
A <- A[, J] %*% Matrix::t(Q %*% solve(Q_hat))
RecError[iter] <- sqrt(sum((A - A_bar)^2))
RelChange[iter] <- abs(RecError[iter - 1] - RecError[iter])/RecError[iter]
}
U <- solve(A[I, J])
U <- as(U, "sparseMatrix")
R <- Matrix::t(R)
R <- as(R, "sparseMatrix")
.is.sparse(C)
.is.sparse(U)
.is.sparse(R)
list(C = C, U = U, R = R, rowidx = I, colidx = J, RecError = RecError,
RelChange = RelChange)
} |
"rental"
|
binom.bayes <- function(x, n,
conf.level = 0.95,
type = c("highest", "central"),
prior.shape1 = 0.5,
prior.shape2 = 0.5,
tol = .Machine$double.eps^.5,
maxit = 1000, ...) {
if(prior.shape1 <= 0 || prior.shape2 <= 0)
stop("priors must be strictly positive.")
if((length(x) != length(n)) && length(x) == 1)
x <- rep(x, length(n))
if((length(x) != length(n)) && length(n) == 1)
n <- rep(n, length(x))
ends <- x == 0 | x == n
alpha <- rep(1 - conf.level, length(x))
alpha[!ends] <- alpha[!ends] * 0.5
a <- x + prior.shape1
b <- n - x + prior.shape2
p <- a/(a + b)
lcl <- qbeta(alpha, a, b)
ucl <- qbeta(1 - alpha, a, b)
type <- match.arg(type)
error <- vector("logical", length(lcl))
if(any(!ends) && (type == "highest")) {
ci <- .C("binom_bayes",
as.integer(x[!ends]),
as.integer(n[!ends]),
as.double(a[!ends]),
as.double(b[!ends]),
as.double(alpha[!ends]),
lcl = as.double(lcl[!ends]),
ucl = as.double(ucl[!ends]),
as.integer(sum(!ends)),
maxit = as.integer(maxit),
tol = as.double(tol),
error = error[!ends],
PACKAGE = "binom")
lcl[!ends] <- ci$lcl
ucl[!ends] <- ci$ucl
error[!ends] <- as.logical(ci$error)
if(any(ci$error)) {
nerr <- sum(ci$error)
msg1 <- sprintf("%s confidence interval%s ", nerr, if(nerr > 1) "s" else "")
msg2 <- "failed to converge (marked by '*').\n"
msg3 <- " Try changing 'tol' to a different value."
warning(msg1, msg2, msg3)
}
}
lcl[x == 0] <- 0
ucl[x == n] <- 1
sig <- pbeta(lcl, a, b) + 1 - pbeta(ucl, a, b)
res <- data.frame(method = "bayes",
x = x,
n = n,
shape1 = a,
shape2 = b,
mean = p,
lower = lcl,
upper = ucl,
sig = sig)
if(any(error))
res$method <- factor(sprintf("bayes%s", ifelse(ci$error, "*", "")))
attr(res, "conf.level") <- conf.level
attr(res, "type") <- type
res
}
rbind.zero <- function(data, which.x = c("max", "min", "both"), row.only = FALSE) {
if (nrow(data) == 0) return(data)
which.x <- match.arg(which.x)
if (which.x == "both") {
data.max <- rbind.zero(data, which.x = "max", row.only = TRUE)
data.min <- rbind.zero(data, which.x = "min", row.only = TRUE)
data.x <- rbind(data.max, data.min)
} else {
which.fn <- if (which.x == "max") which.max else which.min
data.x <- data[which.fn(data$xx), ]
data.x$yy <- 0
}
if (row.only) data.x else rbind(data, data.x)
}
build.density.data <- function(i, x, bayes) {
y <- dbeta(x, bayes$shape1[i], bayes$shape2[i])
b <- bayes[rep(i, length(x)), ]
data <- data.frame(xx = x, yy = y, b)
data <- data[is.finite(data$yy), ]
all.labels <- sprintf("x = %d, n = %d, conf.level = %0.2f",
bayes$x, bayes$n, 1 - bayes$sig)
all.labels <- reorder(all.labels, bayes$mean)
data$label <- all.labels[i]
row.names(data) <- NULL
data.lower <- data[data$xx < data$lower, ]
data.upper <- data[data$xx > data$upper, ]
data.central <- data[data$xx >= data$lower & data$xx <= data$upper, ]
data.lower <- rbind.zero(data.lower, which.x = "max")
data.upper <- rbind.zero(data.upper, which.x = "min")
data.central <- rbind.zero(data.central, which.x = "both")
list(lower = data.lower, upper = data.upper, central = data.central)
}
binom.bayes.densityplot <- function(bayes,
npoints = 500,
fill.central = "lightgray",
fill.lower = "steelblue",
fill.upper = fill.lower,
alpha = 0.8, ...) {
stopifnot(require(ggplot2))
x <- seq(0, 1, length.out = npoints)
datalist <- lapply(1:nrow(bayes), build.density.data, x = x, bayes = bayes)
data.lower <- do.call(rbind, lapply(datalist, "[[", "lower"))
data.upper <- do.call(rbind, lapply(datalist, "[[", "upper"))
data.central <- do.call(rbind, lapply(datalist, "[[", "central"))
gg <- ggplot(data.central, aes_string(x = "xx", y = "yy"))
gg <- gg + geom_polygon(fill = fill.central, alpha = alpha)
if (nrow(data.lower) > 0) {
gg <- gg + geom_polygon(data = data.lower, fill = fill.lower, alpha = alpha)
}
if (nrow(data.upper) > 0) {
gg <- gg + geom_polygon(data = data.upper, fill = fill.upper, alpha = alpha)
}
gg <- gg + facet_wrap(~ label)
gg <- gg + xlim(c(0, 1)) + xlab(NULL)
gg <- gg + ylim(c(0, max(data.central$y))) + ylab("Beta Density")
gg <- gg + theme_bw()
gg
} |
dtt_day_decimal <- function(x, ...) {
UseMethod("dtt_day_decimal")
}
dtt_day_decimal.Date <- function(x, ...) {
as.numeric(dtt_day(x))
}
dtt_day_decimal.POSIXct <- function(x, ...) {
dtt_day(x) + dtt_hour_decimal(x) / 24
} |
cat(crayon::yellow("test-extractors-spprec.R (notably ranCoefs)"))
{
data("blackcap")
fake <- blackcap
fake$grp <- rep(1:2,7)
fake$migStatus <- fake$migStatus +(fake$grp==2)
fake$ID <- gl(7,2)
fake$grp <- factor(fake$grp)
(dd <- fitme(migStatus ~ 1 + (0+grp|ID),data=fake, control.HLfit=list(sparse_precision=FALSE), fixed=list(phi=0.1)))
(ss <- fitme(migStatus ~ 1 + (0+grp|ID),data=fake, control.HLfit=list(sparse_precision=T), fixed=list(phi=0.1)))
ranef(ss)
ranef(dd)
p1 <- predict(dd)
p2 <- predict(ss)
p3 <- fixef(dd)[[1]] +(dd$ZAlist[[1]] %*% dd$strucList[[1]] %*% (t(ranef(dd, type="uncorrelated")[[1]]))[1:14])[,1]
p4 <- fixef(dd)[[1]] +(get_ZALMatrix(dd) %*% t(ranef(dd, type="uncorrelated")[[1]])[1:14])[,1]
p5 <- fixef(dd)[[1]] +(ss$ZAlist[[1]] %*% ss$strucList[[1]] %*% (t(ranef(ss, type="uncorrelated")[[1]]))[1:14])[,1]
p6 <- fixef(dd)[[1]] +(get_ZALMatrix(ss) %*% t(ranef(ss, type="uncorrelated")[[1]])[1:14])[,1]
(crit <- diff(range(p1-p2,p1-p3,p1-p4,p1-p5,p1-p6)))
testthat::test_that(paste0("ranef corrMatrix(mv()...): criterion was ",signif(crit,4)," >1e-8"),
testthat::expect_true(crit<1e-8) )
} |
negate <- function(.p) {
compose(`!`, as_mapper(.p))
} |
bigssa <-
function(formula,data=NULL,type=NULL,nknots=NULL,rparm=NA,
lambdas=NULL,skip.iter=TRUE,se.fit=FALSE,rseed=1234,
gcvopts=NULL,knotcheck=TRUE,gammas=NULL,weights=NULL,
random=NULL,remlalg=c("FS","NR","EM","none"),
remliter=500,remltol=10^-4,remltau=NULL) {
if(class(data)[1]=="makessa"){
ssafit <- ssawork(formula,data)
} else{
ssamk <- makessa(formula,data,type,nknots,rparm,
lambdas,skip.iter,se.fit,rseed,
gcvopts,knotcheck,gammas,weights,
random,remlalg,remliter,remltol,remltau)
ssafit <- ssawork(formula,ssamk)
}
ssafit <- c(ssafit,list(call=formula))
class(ssafit) <- "bigssa"
return(ssafit)
} |
acc_univariate_outlier <- function(resp_vars = NULL, label_col, study_data,
meta_data, exclude_roles, n_rules = 4,
max_non_outliers_plot = 10000) {
rvs <- resp_vars
if (length(n_rules) != 1 || !is.numeric(n_rules) ||
!all(util_is_integer(n_rules)) ||
!(n_rules %in% 1:4)) {
util_warning(
"The formal n_rules is not an integer of 1 to 4, default (%d) is used.",
formals(acc_univariate_outlier)$n_rules,
applicability_problem = TRUE)
n_rules <- formals(acc_univariate_outlier)$n_rules
}
if (length(max_non_outliers_plot) != 1 ||
!is.numeric(max_non_outliers_plot) ||
!all(util_is_integer(max_non_outliers_plot)) ||
(max_non_outliers_plot < 0)) {
util_warning(
c("The formal max_non_outliers_plot is not an integer >= 0,",
"default (%d) is used."),
formals(acc_univariate_outlier)$max_non_outliers_plot,
applicability_problem = TRUE)
max_non_outliers_plot <-
formals(acc_univariate_outlier)$max_non_outliers_plot
}
util_prepare_dataframes()
util_correct_variable_use("resp_vars",
allow_more_than_one = TRUE,
allow_null = TRUE,
allow_any_obs_na = TRUE,
need_type = "integer | float"
)
if (is.null(meta_data[[DATA_TYPE]]) ||
any(is.na(meta_data[[DATA_TYPE]]))) {
if (is.null(meta_data[[DATA_TYPE]])) {
which_na <- rep(TRUE, nrow(meta_data))
} else {
which_na <- is.na(meta_data[[DATA_TYPE]])
}
meta_data[[DATA_TYPE]][which_na] <-
prep_datatype_from_data(resp_vars = meta_data[[label_col]][which_na],
study_data = ds1)
list_of_types <-
paste(sQuote(meta_data[[label_col]][which_na]),
'->',
sQuote(meta_data[[DATA_TYPE]][which_na]))
if (length(list_of_types) > 5) {
dts <- "..."
} else {
dts <- NULL
}
list_of_types <- c(head(list_of_types, 5), dts)
list_of_types <- paste0(list_of_types, collapse = ", ")
util_warning(c(
"No %s for all or some variables defined in the metadata.",
"I guessed them based on data: %s"),
dQuote(DATA_TYPE),
list_of_types,
applicability_problem = TRUE
)
}
if (length(rvs) == 0) {
rvs <- meta_data[[label_col]][meta_data[[DATA_TYPE]] %in%
c(DATA_TYPES$FLOAT, DATA_TYPES$INTEGER)]
util_warning(paste0("The following variables: ",
paste0(rvs, collapse = ", "), " were selected."),
applicability_problem = TRUE)
if (length(rvs) == 0) {
util_error(paste0("No variables suitable data type defined."),
applicability_problem = TRUE)
}
rvs <- intersect(rvs, colnames(ds1))
} else {
isfloat <- meta_data[[DATA_TYPE]] %in%
c(DATA_TYPES$FLOAT, DATA_TYPES$INTEGER)
isrvs <-
meta_data[[label_col]] %in% rvs
if (!all(isfloat | !isrvs)) {
rvs <- meta_data[[label_col]][isfloat & isrvs]
if (!all(!isrvs | isfloat)) util_warning(paste0("Only: ",
paste0(rvs, collapse = ", "),
" are defined to be of type float or integer."),
applicability_problem = TRUE)
}
}
if (!missing(exclude_roles)) {
if (!(all(exclude_roles %in% meta_data[[VARIABLE_ROLE]]))) {
util_warning(
"Specified VARIABLE_ROLE not in meta_data. No exclusion applied.",
applicability_problem = TRUE)
} else {
which_vars_not <- meta_data[[label_col]][meta_data[[VARIABLE_ROLE]] %in%
exclude_roles]
if (length(intersect(rvs, which_vars_not)) > 0) {
util_warning(paste0("Study variables: ",
paste(dQuote(intersect(rvs, which_vars_not)),
collapse = ", "), " have been excluded."),
applicability_problem = TRUE)
}
rvs <- setdiff(rvs, which_vars_not)
}
}
whicharenum <- vapply(FUN.VALUE = logical(1), ds1[, rvs, drop = FALSE],
function(x) is.numeric(x))
if (!all(whicharenum)) {
util_warning(paste0(
"Variables ", paste0(dQuote(rvs[!whicharenum]), collapse = ", "),
" are not of type float or integer and will be removed",
" from univariate outlier analysis."
), applicability_problem = TRUE)
rvs <- rvs[whicharenum]
}
intcheck <- vapply(FUN.VALUE = logical(1), ds1[, rvs, drop = FALSE],
function(x) all(util_is_integer(x), na.rm = TRUE))
if (any(intcheck)) {
util_warning(paste0(
"Variables: ", paste0(dQuote(rvs[intcheck]), collapse = ", "),
" show integer values only, but will be nonetheless considered."
), applicability_problem = FALSE)
}
if (length(rvs) > 0) {
rvs <-
util_no_value_labels(
resp_vars = rvs,
meta_data = meta_data,
label_col = label_col,
warn = TRUE,
stop = TRUE
)
}
ds1_ll <- ds1
if (length(rvs) == 0) {
util_error("No suitable response variables left.",
applicability_problem = TRUE)
} else if (length(rvs) == 1) {
ds2 <- ds1_ll[, rvs, drop = FALSE]
ds2$variable <- rvs
ds2 <- ds2[, c(2, 1)]
names(ds2) <- c("variable", "value")
} else {
ds2 <- melt(ds1_ll[, c(rvs)], measure.vars = rvs)
}
ds2$value <- as.numeric(ds2$value)
ds2plot <- ds2[!is.na(ds2$value), ]
if (nrow(ds2plot) * ncol(ds2plot) == 0) {
util_error("No data left, aborting.",
applicability_problem = FALSE)
}
ds2plot$tukey <- NA
ds2plot$sixsigma <- NA
ds2plot$hubert <- NA
ds2plot$sigmagap <- NA
ds2plotOL <- ds2plot %>%
dplyr::group_by(variable) %>%
dplyr::mutate(tukey = util_tukey(value)) %>%
dplyr::mutate(sixsigma = util_sixsigma(value)) %>%
dplyr::mutate(hubert = util_hubert(value)) %>%
dplyr::mutate(sigmagap = util_sigmagap(value))
ds2plot <- as.data.frame(ds2plotOL)
ds2plot$Rules <- apply(ds2plot[, 3:6], 1, sum)
ds2plot$tlta <- ifelse(ds2plot$Rules >= n_rules, 1, 0)
for (i in unique(ds2plot$variable)) {
if (length(ds2plot$tlta[ds2plot$tlta == 1 & ds2plot$variable == i]) > 0) {
ds2plot$tlta[ds2plot$tlta == 1 & ds2plot$variable == i] <- ifelse(
ds2plot$value[ds2plot$tlta == 1 & ds2plot$variable == i] <
median(ds2plot$value[ds2plot$variable == i], na.rm = TRUE),
-1, 1
)
}
}
st1 <- aggregate(ds2plot$value, list(ds2plot$variable), mean)
colnames(st1) <- c("Variables", "Mean")
st1$"SD" <- aggregate(ds2plot$value, list(ds2plot$variable), sd)$x
st1$"Median" <- aggregate(ds2plot$value, list(ds2plot$variable), median)$x
st1$"Skewness" <-
aggregate(ds2plot$value, list(ds2plot$variable), robustbase::mc)$x
st1$"Tukey (N)" <- aggregate(ds2plot$tukey, list(ds2plot$variable), sum)$x
st1$"6-Sigma (N)" <-
aggregate(ds2plot$sixsigma, list(ds2plot$variable), sum)$x
st1$"Hubert (N)" <- aggregate(ds2plot$hubert, list(ds2plot$variable), sum)$x
st1$"Sigma-gap (N)" <-
aggregate(ds2plot$sigmagap, list(ds2plot$variable), sum)$x
st1$"Most likely (N)" <- aggregate(ds2plot$Rules, list(ds2plot$variable),
function(x) {
sum(x >= n_rules)
})$x
st1$"To low (N)" <- aggregate(ds2plot$tlta, list(ds2plot$variable),
function(x) {
sum(x == -1)
})$x
st1$"To high (N)" <- aggregate(ds2plot$tlta, list(ds2plot$variable),
function(x) {
sum(x == 1)
})$x
st1$GRADING <- ifelse(st1$"Most likely (N)" > 0, 1, 0)
st1$Mean <- round(st1$Mean, digits = 2)
st1$Median <- round(st1$Median, digits = 2)
st1$SD <- round(st1$SD, digits = 2)
st1$Skewness <- round(st1$Skewness, digits = 2)
plot_list <- list()
disc_cols <- c("
names(disc_cols) <- c(0:4)
for (i in unique(ds2plot$variable)) {
ds_i <- subset(ds2plot, variable == i)
n_non_ol <- sum(ds_i$Rules == 0)
if (max_non_outliers_plot < n_non_ol) {
dsi_non_ol <- ds_i[ds_i$Rules == 0, , FALSE]
dsi_ol <- ds_i[ds_i$Rules > 0, , FALSE]
subsel_non_ol <- sample(seq_len(nrow(dsi_non_ol)),
size =
min(max_non_outliers_plot, nrow(dsi_non_ol)))
ds_i <- rbind.data.frame(dsi_non_ol[subsel_non_ol, , FALSE], dsi_ol)
util_warning(
c("For %s, %d from %d non-outlier data values were",
"sampled to avoid large plots."),
dQuote(i),
max_non_outliers_plot,
n_non_ol,
applicability_problem = FALSE
)
}
ds_i$Rules <- factor(ds_i$Rules)
if (nrow(ds_i) > 0) {
p_i <- ggplot(ds_i, aes(x = variable, y = value)) +
geom_jitter(data = ds_i, position = position_jitter(0.1),
aes(color = Rules, alpha = 0.5, size =
as.numeric(Rules) / 10)) +
scale_size_continuous(range = c(0.01, 3), guide = "none") +
scale_color_manual(values = disc_cols) +
facet_wrap(vars(variable), scales = "free") +
scale_alpha(guide = "none") +
theme_minimal()
} else {
p_i <- ggplot() +
annotate("text", x = 0, y = 0, label =
sprintf("No outliers detected for %s", dQuote(i))) +
theme(
axis.line = element_blank(),
axis.text.x = element_blank(),
axis.text.y = element_blank(),
axis.ticks = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
legend.position = "none",
panel.background = element_blank(),
panel.border = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
plot.background = element_blank()
)
}
plot_list[[i]] <- util_set_size(p_i, width_em = 10, height_em = 25)
}
return(list(SummaryTable = st1, SummaryPlotList = plot_list))
}
acc_robust_univariate_outlier <- acc_univariate_outlier |
context("Test implementation of canberra distance ...")
P <- 1:10 / sum(1:10)
Q <- 20:29 / sum(20:29)
V <- -10:10
W <- -20:0
test_dist_matrix <- function(x, FUN) {
dist.fun <- match.fun(FUN)
res.dist.matrix <- matrix(NA_real_, nrow(x), nrow(x))
for (i in 1:nrow(x)) {
for (j in 1:nrow(x)) {
res.dist.matrix[i, j] <- dist.fun(x[i, ], x[j, ])
}
}
return(res.dist.matrix[lower.tri(res.dist.matrix, diag = FALSE)])
}
test_canberra_dist <- function(P, Q) {
sum(abs((P) - (Q)) / ((P) + (Q)))
}
test_that("distance(method = 'canberra') computes the correct distance value.",
{
expect_equal(as.vector(philentropy::distance(rbind(P, Q), method = "canberra")),
test_canberra_dist(P, Q))
expect_equal(as.vector(philentropy::distance(rbind(P, Q), method = "canberra")),
as.vector(stats::dist(base::rbind(P, Q), method = "canberra")))
distMat <-
rbind(rep(0.2, 5), rep(0.1, 5), c(5, 1, 7, 9, 5))
dist.vals <-
distance(distMat, method = "canberra")
expect_equal(dist.vals[lower.tri(dist.vals, diag = FALSE)],
test_dist_matrix(distMat, FUN = test_canberra_dist))
})
test_that(
"distance(method = 'canberra') computes the correct distance value when P_i and Q_i are 0 -> 0/0 is then replaced by 0.",
{
A <- c(0, 0.25, 0.25, 0, 0.25, 0.25)
B <- c(0, 0, 0.25, 0.25, 0.25, 0.25)
canb <- function(x, y) {
dist <- vector(mode = "numeric", length = 1)
dist <- 0
for (i in 1:length(x)) {
if ((abs(x[i] - y[i]) == 0) | ((x[i] + y[i]) == 0)) {
dist = dist
} else {
dist = dist + (abs(x[i] - y[i]) / ((x[i]) + (y[i])))
}
}
return(dist)
}
expect_equal(as.vector(philentropy::distance(rbind(A, B), method = "canberra")), canb(A, B))
}
) |
as.matrix.process_matrix <- function(x, rownames=NULL, rownames.value=NULL, ...) {
if (is.factor(x$antecedent) && is.factor(x$consequent)) {
labels <- sort(union(levels(x$antecedent),levels(x$consequent)))
} else {
labels <- sort(unique(c(as.character(x$antecedent), as.character(x$consequent))))
}
x$antecedent <- factor(x$antecedent, labels)
x$consequent <- factor(x$consequent, labels)
m <- stats::xtabs(formula = n ~ antecedent + consequent,
data = x,
subset = NULL)
m <- matrix(m, nrow = length(labels), ncol = length(labels))
colnames(m) <- labels
rownames(m) <- labels
names(dimnames(m)) <- c("antecedent", "consequent")
m
} |
`fun.RPRS.lm` <-
function (data, rs.init = c(-1.5, 1.5), leap = 3, FUN = "runif.sobol",no=10000)
{
RPRS <- fun.fit.gl.v.lm(a=rs.init[1], b=rs.init[2], data=data,
fun=fun.auto.perc.rs,no=no, leap = leap, FUN = FUN)$unique.optim.result
RPRS <- fun.fit.gl.v.lma(RPRS[1], RPRS[2], RPRS[3], RPRS[4],
data, "rs")
return(RPRS)
} |
tam_max_abs <- function( list1, list2, label )
{
res <- max( abs( list1[[ label ]] - list2[[ label ]]), na.rm=TRUE )
return(res)
} |
ffs_rates <- function(m1 = NULL, m2 = NULL, M = NULL, method = "dennett"){
if(method == "dennett"){
P <- m1
diag(P) <- 0
PS <- P/sum(P)
x <- M * PS
}
if(method == "rogers-von-rabenau"){
g <- solve(a = m1, b = m2)
pop_expose <- matrix(colSums(m1), nrow = nrow(m1), ncol = ncol(m1))
x <- g * pop_expose
diag(x) <- 0
}
dn <- dimnames(m1)
names(dn) <- c("orig", "dest")
dimnames(x) <- dn
return(x)
} |
blandPowerCurve <- function(samplesizes = seq(10, 100, 1),
mu = 0,
SD,
delta,
conf.level = .95,
agree.level = .95){
if(length(mu) > 1) {
stop("aLength of mu cannot be greater than 1")
}
if(length(SD) > 1) {
stop("Length of SD cannot be greater than 1")
}
alpha = 1-conf.level
gamma = 1-agree.level
final_dat = data.frame()
dat_grid = expand.grid(conf.level,agree.level,delta)
colnames(dat_grid) = c("conf.level","agree.level","delta")
for(i in 1:nrow(dat_grid)){
LOA <- estimateLimitsOfAgreement(mu = mu,
SD = SD,
agree.level = dat_grid$agree.level[i])
df <- lapply(samplesizes, function(this_n) {
LOA %>%
estimateConfidenceIntervals(n = this_n,
conf.level = dat_grid$conf.level[i]) %>%
estimateTypeIIerror(delta = dat_grid$delta[i]) %>%
estimatePowerFromBeta() %>%
unlist(recursive = FALSE) %>%
as.data.frame()
})
result <- do.call(rbind, df) %>%
select(CI.n,LOA.mu,LOA.SD,beta.delta,
power.power) %>%
rename(mu = LOA.mu,
SD = LOA.SD,
N = CI.n,
delta = beta.delta,
power = power.power) %>%
mutate(agree.level = dat_grid$agree.level[i],
conf.level = dat_grid$conf.level[i])
final_dat = rbind(final_dat,result)
}
final_dat = final_dat %>%
mutate(power = ifelse(power < 0, 0,power))
class(final_dat) <- list("powerCurve","data.frame")
return(final_dat)
} |
`%>>%` <- pipe_op |
NULL
setGeneric("runLog", function(scenario) standardGeneric("runLog"))
setMethod("runLog", signature(scenario = "character"), function(scenario) {
return(SyncroSimNotFound(scenario))
})
setMethod("runLog", signature(scenario = "Scenario"), function(scenario) {
tt <- command(list(list = NULL, runlog = NULL, lib = .filepath(scenario), sid = .scenarioId(scenario)), .session(scenario))
if (grepl("The scenario is not a result scenario", tt[1], fixed = TRUE)) {
tt <- tt[1]
return(tt)
}
outString <- paste(tt, collapse = "\n")
writeLines(outString)
return(outString)
}) |
context("updateCheckboxGroupButtons")
test_that("Send message", {
session <- as.environment(list(
ns = identity,
getCurrentTheme = function() NULL,
sendInputMessage = function(inputId, message) {
session$lastInputMessage = list(id = inputId, message = message)
}
))
updateCheckboxGroupButtons(session = session, inputId = "idawcbsw", choices = c("A", "B", "C"), status = "primary")
resultCBGB <- session$lastInputMessage
expect_null(resultCBGB$message$selected)
expect_true(grepl('"idawcbsw"', resultCBGB$message$options))
expect_true(grepl('primary', resultCBGB$message$options))
})
test_that("Works in modules", {
createModuleSession <- function(moduleId) {
session <- as.environment(list(
ns = shiny::NS(moduleId),
getCurrentTheme = function() NULL,
sendInputMessage = function(inputId, message) {
session$lastInputMessage = list(id = inputId, message = message)
}
))
session
}
session <- createModuleSession("modA")
updateCheckboxGroupButtons(session = session, inputId = "idawcbsw", choices = c("A", "B", "C"), status = "primary")
resultCBGB <- session$lastInputMessage
expect_equal(object = resultCBGB$id, expected = "idawcbsw")
expect_true(grepl('"modA-idawcbsw"', resultCBGB$message$options))
}) |
context("test-augmentlhs")
test_that("augment works", {
expect_error(augmentLHS(randomLHS(10, 4), NA))
expect_error(augmentLHS(randomLHS(10, 4), NaN))
expect_error(augmentLHS(randomLHS(10, 4), Inf))
set.seed(1976)
temp <- randomLHS(10, 4)
temp[1,1] <- NA
expect_error(augmentLHS(temp, 5))
set.seed(1976)
temp <- randomLHS(10, 4)
temp[1,1] <- 2
expect_error(augmentLHS(temp, 5))
set.seed(1976)
expect_true(checkLatinHypercube(augmentLHS(randomLHS(4, 2), 4)))
set.seed(1977)
expect_true(checkLatinHypercube(augmentLHS(randomLHS(3, 3), 3)))
set.seed(1977)
expect_true(checkLatinHypercube(augmentLHS(randomLHS(4, 1), 2)))
temp <- randomLHS(7, 2)
temp <- augmentLHS(temp, 1)
expect_equal(nrow(temp), 8)
expect_true(checkLatinHypercube(augmentLHS(randomLHS(7, 2), 7)))
expect_true(checkLatinHypercube(augmentLHS(randomLHS(10, 5), 10)))
expect_error(augmentLHS(c(1,2), 5))
expect_error(augmentLHS(randomLHS(10,3), c(5,9)))
expect_error(augmentLHS(randomLHS(10,3), -1))
expect_error(augmentLHS(randomLHS(10,3), 2.2))
A <- augmentLHS(randomLHS(1,4), 1)
expect_true(checkLatinHypercube(A))
}) |
tar_workspace <- function(
name,
envir = parent.frame(),
packages = TRUE,
source = TRUE,
script = targets::tar_config_get("script"),
store = targets::tar_config_get("store")
) {
force(envir)
name <- tar_deparse_language(substitute(name))
tar_assert_chr(name)
tar_assert_scalar(name)
workspace <- workspace_read(name = name, path_store = store)
workspace_populate(workspace)
workspace_assign(workspace, envir)
if (packages) {
command <- workspace$target$command
build_load_packages(command$packages, command$library)
}
if (source) {
eval(parse(text = readLines(script)), envir = envir)
}
set.seed(workspace$target$command$seed)
invisible()
} |
.r_to_gdal_datatype <- function(x) {
if (nchar(x) == 0 || is.na(x)) return("")
xout <- toupper(x[1L])
xout <- c("RAW" = "Byte", "INTEGER" = "Int32", "DOUBLE" = "Float64", "NUMERIC" = "Float64",
"BYTE" = "Byte",
"UINT16" = "Int32", "INT16" = "Int32",
"UINT32" = "Int32", "INT32" = "Int32",
"FLOAT32" = "Float32", "FLOAT64" = "Float64")[xout]
if (is.na(xout)) {
message(sprintf("unknown 'band_output_type = \'%s\'', ignoring", x))
xout <- ""
}
xout
}
vapour_read_raster <- function(x, band = 1, window, resample = "nearestneighbour", ..., sds = NULL, native = FALSE, set_na = TRUE, band_output_type = "") {
band_output_type <- .r_to_gdal_datatype(band_output_type)
datasourcename <- sds_boilerplate_checks(x, sds = sds)
resample <- tolower(resample)
if (!resample %in% c("nearestneighbour", "average", "bilinear", "cubic", "cubicspline",
"gauss", "lanczos", "mode")) {
warning(sprintf("resample mode '%s' is unknown", resample))
}
ri <- vapour_raster_info(x, sds = sds)
if (native && !missing(window)) warning("'window' is specified, so 'native = TRUE' is ignored")
if (native && missing(window)) window <- c(0, 0, ri$dimXY, ri$dimXY)
if (!is.numeric(band) || band < 1 || length(band) < 1 || anyNA(band)) {
stop("'band' must be an integer of length 1, and be greater than 0")
}
if (any(band > ri$bands)) stop(sprintf("specified 'band = %i', but maximum band number is %i", band, ri$bands))
stopifnot(length(window) %in% c(4L, 6L))
if (length(window) == 4L) window <- c(window, window[3:4])
if (any(window[1:2] < 0)) stop("window cannot index lower than 0")
if (any(window[1:2] > (ri$dimXY-1))) stop("window offset cannot index higher than grid dimension")
if (any(window[3:4] < 1)) stop("window size cannot be less than 1")
if (any((window[1:2] + window[3:4]) > ri$dimXY)) stop("window size cannot exceed grid dimension")
if (any(window[5:6] < 1)) stop("requested output dimension cannot be less than 1")
vals <- lapply(band, function(iband) {
raster_io_gdal_cpp(dsn = datasourcename, window = window, band = iband, resample = resample[1L], band_output_type = band_output_type)[[1L]]
})
if (set_na && !is.raw(vals[[1L]][1L])) {
for (i in seq_along(vals)) {
vals[[i]][vals[[i]] == ri$nodata_value] <- NA
}
}
names(vals) <- sprintf("Band%i",band)
vals
}
vapour_read_raster_raw <- function(x, band = 1,
window,
resample = "nearestneighbour", ...,
sds = NULL, native = FALSE, set_na = TRUE) {
if (length(band) > 1) message("_raw output implies one band, using only the first")
vapour_read_raster(x, band = band, window = window, resample = resample, ..., sds = sds,
native = native, set_na = set_na, band_output_type = "Byte")[[1L]]
}
vapour_read_raster_int <- function(x, band = 1,
window,
resample = "nearestneighbour", ...,
sds = NULL, native = FALSE, set_na = TRUE) {
if (length(band) > 1) message("_int output implies one band, using only the first")
vapour_read_raster(x, band = band, window = window, resample = resample, ..., sds = sds,
native = native, set_na = set_na, band_output_type = "Int32")[[1L]]
}
vapour_read_raster_dbl <- function(x, band = 1,
window,
resample = "nearestneighbour", ...,
sds = NULL, native = FALSE, set_na = TRUE) {
if (length(band) > 1) message("_dbl output implies one band, using only the first")
vapour_read_raster(x, band = band, window = window, resample = resample, ..., sds = sds,
native = native, set_na = set_na, band_output_type = "Float64")[[1L]]
}
vapour_read_raster_chr <- function(x, band = 1,
window,
resample = "nearestneighbour", ...,
sds = NULL, native = FALSE, set_na = TRUE) {
if (length(band) == 2 || length(band) > 4) message("_chr output implies one, three or four bands ...")
if (length(band) == 2L) band <- band[1L]
if (length(band) > 4) band <- band[1:4]
bytes <- vapour_read_raster(x, band = band, window = window, resample = resample, ..., sds = sds,
native = native, set_na = set_na, band_output_type = "Byte")
as.vector(grDevices::as.raster(array(unlist(bytes, use.names = FALSE), c(length(bytes[[1]]), 1, max(c(3, length(bytes)))))))
}
vapour_read_raster_hex <- function(x, band = 1,
window,
resample = "nearestneighbour", ...,
sds = NULL, native = FALSE, set_na = TRUE) {
vapour_read_raster_chr(x, band = band, window = window, resample = resample, sds = sds,
native = native, set_na = set_na, ...)
}
vapour_warp_raster <- function(x, bands = 1L,
extent = NULL,
dimension = NULL,
projection = "",
set_na = TRUE,
source_wkt = NULL,
source_extent = 0.0,
resample = "near",
silent = TRUE, ...,
source_geotransform = 0.0, geotransform = NULL,
band_output_type = "",
warp_options = "",
transformation_options = "") {
band_output_type <- .r_to_gdal_datatype(band_output_type)
args <- list(...)
if (projection == "" && "wkt" %in% names(args)) {
projection <- args$wkt
message("please use 'projection = ' rather than 'wkt = ', use of 'wkt' is deprecated and will be removed in a future version")
}
if (is.numeric(bands) && any(bands < 1)) stop("all 'bands' index must be >= 1")
if (is.null(bands)) bands <- 0
if(!is.numeric(bands)) stop("'bands' must be numeric (integer), start at 1")
bands <- as.integer(bands)
if(!is.numeric(extent)) {
if (isS4(extent)) {
extent <- c(extent@xmin, extent@xmax, extent@ymin, extent@ymax)
} else if (is.matrix(extent)) {
extent <- extent[c(1, 3, 2, 4)]
} else {
stop("'extent' must be numeric 'c(xmin, xmax, ymin, ymax)'")
}
}
if(!length(extent) == 4L) stop("'extent' must be of length 4")
if (any(diff(extent)[c(1, 3)] == 0)) stop("'extent' expected to be 'c(xmin, xmax, ymin, ymax)', zero x or y range not permitted")
if (length(source_extent) > 1 && any(diff(source_extent)[c(1, 3)] == 0)) stop("'extent' expected to be 'c(xmin, xmax, ymin, ymax)', zero x or y range not permitted")
if (!all(diff(extent)[c(1, 3)] > 0)) message("'extent' expected to be 'c(xmin, xmax, ymin, ymax)', negative values detected (ok for expert use)")
if (length(source_extent) > 1 && !all(diff(source_extent)[c(1, 3)] > 0)) message("'extent' expected to be 'c(xmin, xmax, ymin, ymax)', negative values detected (ok for expert use)")
dud_dimension <- FALSE
if (is.null(dimension) && nchar(projection) < 1) {
dud_dimension <- TRUE
dimension <- c(2, 2)
}
if(!is.numeric(dimension)) stop("'dimension' must be numeric")
if(!length(dimension) == 2L) stop("'dimension must be of length 2'")
if(!all(dimension > 0)) stop("'dimension' values must be greater than 0")
if(!all(is.finite(dimension))) stop("'dimension' values must be finite and non-missing")
if (dud_dimension) dimension <- 0
if(!(length(source_geotransform) == 1 && source_geotransform == 0.0)) message("'source geotransform' is deprecated and now ignored, please use 'extent'")
if (length(source_extent) > 1) {
if (!is.numeric(source_extent)) {
stop("'source_extent' must be numeric, of length 4 c(xmin, xmax, ymin, ymax)")
}
if (!all(is.finite(source_extent))) stop("'source_extent' values must be finite and non missing")
}
if (!is.null(geotransform)) message("'geotransform' is deprecated and now ignored, used 'extent'")
if(!is.null(source_wkt)) {
if (!is.character(source_wkt)) stop("source_wkt must be character")
if(!silent) {
if(!nchar(source_wkt) > 10) message("short 'source_wkt', possibly invalid?")
}
}
if (!silent) {
if(!nchar(projection) > 0) message("target 'projection' not provided, read will occur from from source in native projection")
}
if (is.null(source_wkt)) source_wkt <- ""
resample <- tolower(resample[1L])
if (resample == "gauss") {
warning("Gauss resampling not available for warper, using NearestNeighbour")
resample <- "near"
}
rso <- c("near", "bilinear", "cubic", "cubicspline", "lanczos", "average",
"mode", "max", "min", "med", "q1", "q3", "sum")
if (!resample %in% rso) {
warning(sprintf("%s resampling not available for warper, using near", resample))
resample <- "near"
}
warp_options <- warp_options[!is.na(warp_options)]
if (length(warp_options) < 1) warp_options <- ""
transformation_options <- transformation_options[!is.na(transformation_options)]
if (length(transformation_options) < 1) transformation_options <- ""
vals <- warp_in_memory_gdal_cpp(x, source_WKT = source_wkt,
target_WKT = projection,
target_extent = as.numeric(extent),
target_dim = as.integer(dimension),
bands = as.integer(bands),
source_extent = as.numeric(source_extent),
resample = resample,
silent = silent,
band_output_type = band_output_type,
warp_options = warp_options,
transformation_options = transformation_options)
if (length(bands) == 1 && bands == 0) {
bands <- seq_along(vals)
}
names(vals) <- make.names(sprintf("Band%i",bands), unique = TRUE)
vals
}
vapour_warp_raster_raw <- function(x, bands = 1L,
extent = NULL,
dimension = NULL,
projection = "",
set_na = TRUE,
source_wkt = NULL,
source_extent = 0.0,
resample = "near",
silent = TRUE, ...,
warp_options = "",
transformation_options = "") {
if (length(bands) > 1 ) message("_raw output implies one band, ignoring all but the first")
vapour_warp_raster(x,
bands = bands,
extent = extent,
dimension = dimension,
projection = projection,
set_na = set_na,
source_wkt = source_wkt,
source_extent = source_extent,
resample = resample,
silent = silent,
band_output_type = "Byte",
warp_options = warp_options,
transformation_options = transformation_options, ...)[[1L]]
}
vapour_warp_raster_int <- function(x, bands = 1L,
extent = NULL,
dimension = NULL,
projection = "",
set_na = TRUE,
source_wkt = NULL,
source_extent = 0.0,
resample = "near",
silent = TRUE, ...,
warp_options = "",
transformation_options = "") {
if (length(bands) > 1 ) message("_int output implies one band, ignoring all but the first")
vapour_warp_raster(x,
bands = bands,
extent = extent,
dimension = dimension,
projection = projection,
set_na = set_na,
source_wkt = source_wkt,
source_extent = source_extent,
resample = resample,
silent = silent,
band_output_type = "Int32",
warp_options = warp_options,
transformation_options = transformation_options, ...)[[1L]]
}
vapour_warp_raster_dbl <- function(x, bands = 1L,
extent = NULL,
dimension = NULL,
projection = "",
set_na = TRUE,
source_wkt = NULL,
source_extent = 0.0,
resample = "near",
silent = TRUE, ...,
warp_options = "",
transformation_options = "") {
if (length(bands) > 1 ) message("_dbl output implies one band, ignoring all but the first")
vapour_warp_raster(x,
bands = bands,
extent = extent,
dimension = dimension,
projection = projection,
set_na = set_na,
source_wkt = source_wkt,
source_extent = source_extent,
resample = resample,
silent = silent,
band_output_type = "Float64",
warp_options = warp_options,
transformation_options = transformation_options, ...)[[1L]]
}
vapour_warp_raster_chr <- function(x, bands = 1L,
extent = NULL,
dimension = NULL,
projection = "",
set_na = TRUE,
source_wkt = NULL,
source_extent = 0.0,
resample = "near",
silent = TRUE, ...,
warp_options = "",
transformation_options = "") {
if (length(bands) == 2 || length(bands) > 4) message("_chr output implies one, three or four bands ...")
if (length(bands) == 2L) bands <- bands[1L]
if (length(bands) > 4) bands <- bands[1:4]
bytes <- vapour_warp_raster(x,
bands = bands,
extent = extent,
dimension = dimension,
projection = projection,
set_na = set_na,
source_wkt = source_wkt,
source_extent = source_extent,
resample = resample,
silent = silent,
band_output_type = "Byte",
warp_options = warp_options,
transformation_options = transformation_options, ...)
as.vector(grDevices::as.raster(array(unlist(bytes, use.names = FALSE), c(length(bytes[[1]]), 1, max(c(3, length(bytes)))))))
}
vapour_warp_raster_hex <- function(x, bands = 1L,
extent = NULL,
dimension = NULL,
projection = "",
set_na = TRUE,
source_wkt = NULL,
source_extent = 0.0,
resample = "near",
silent = TRUE, ...,
warp_options = "",
transformation_options = "") {
vapour_warp_raster_chr(x,
bands = bands,
extent = extent,
dimension = dimension,
projection = projection,
set_na = set_na,
source_wkt = source_wkt,
source_extent = source_extent,
resample = resample,
silent = silent,
warp_options = warp_options,
transformation_options = transformation_options, ...)
} |
focal_metrics <- function(x,
window,
metrics,
progress,
...) {
if (any(dim(window) %% 2 == 0)) {
stop("The window must have uneven sides.", call. = FALSE)
}
if (class(x)[1] == "matrix") {
x <- raster(x)
}
number_metrics <- length(metrics)
points <- landscapemetrics::raster_to_points(x)[, 2:4]
n_row <- nrow(window)
n_col <- ncol(window)
warning_messages <- character(0)
result <- withCallingHandlers(expr = {lapply(seq_along(metrics), function(current_metric) {
if (progress) {
cat("\r> Progress metrics: ", current_metric, "/", number_metrics)
}
raster::focal(x = x, w = window, fun = function(x) {
.calculate_met_focal(landscape = x,
n_row = n_row,
n_col = n_col,
points = points,
what = metrics[[current_metric]],
...)},
pad = TRUE, padValue = NA)
})},
warning = function(cond) {
warning_messages <<- c(warning_messages, conditionMessage(cond))
invokeRestart("muffleWarning")})
names(result) <- metrics
if (progress) {cat("\n")}
if (length(warning_messages) > 0) {
warning_messages <- unique(warning_messages)
lapply(warning_messages, function(x){ warning(x, call. = FALSE)})
}
return(result)
}
.calculate_met_focal <- function(landscape,
n_row,
n_col,
points,
what,
...) {
raster_window <- matrix(landscape, n_row, n_col)
foo <- get(what, mode = "function")
arguments <- names(formals(foo))[-1]
arguments_provided <- substitute(...())
arguments_values <- list(raster_window)
if (!is.null(arguments_provided)) {
arguments_provided <- arguments_provided[order(names(arguments_provided))]
arguments[arguments %in% names(arguments_provided)] <- arguments_provided
if ("..." %in% arguments & "na.rm" %in% names(arguments_provided)) {
arguments <- c(arguments, arguments_provided[which(names(arguments_provided) == "na.rm")])
}
if ("..." %in% arguments) {
dots_ind <- which(arguments == "...")
arguments <- arguments[-dots_ind]
}
arguments_values <- c(arguments_values, arguments)
}
result <- do.call(what = foo,
args = arguments_values)
return(result)
} |
context("write_problem")
test_that("correct result (lp format)", {
skip_on_cran()
skip_if_not_installed("Rsymphony")
cost <- raster::raster(matrix(c(1, 2, 2, NA), ncol = 4))
locked_in <- 2
locked_out <- 1
features <- raster::stack(raster::raster(matrix(c(2, 1, 1, 0), ncol = 4)),
raster::raster(matrix(c(10, 10, 10, 10), ncol = 4)))
p <- problem(cost, features) %>%
add_min_set_objective() %>%
add_absolute_targets(c(2, 10)) %>%
add_locked_in_constraints(locked_in) %>%
add_locked_out_constraints(locked_out) %>%
add_default_solver(gap = 0, verbose = FALSE)
path <- tempfile(fileext = ".lp")
write_problem(p, path)
expect_true(file.exists(path))
expect_true(any(grepl("Subject To", readLines(path), fixed = TRUE)))
})
test_that("correct result (mps format)", {
skip_on_cran()
skip_if_not_installed("Rsymphony")
cost <- raster::raster(matrix(c(1, 2, 2, NA), ncol = 4))
locked_in <- 2
locked_out <- 1
features <- raster::stack(raster::raster(matrix(c(2, 1, 1, 0), ncol = 4)),
raster::raster(matrix(c(10, 10, 10, 10), ncol = 4)))
p <- problem(cost, features) %>%
add_min_set_objective() %>%
add_absolute_targets(c(2, 10)) %>%
add_locked_in_constraints(locked_in) %>%
add_locked_out_constraints(locked_out) %>%
add_default_solver(gap = 0, verbose = FALSE)
path <- tempfile(fileext = ".mps")
write_problem(p, path)
expect_true(file.exists(path))
expect_true(any(grepl("ROWS", readLines(path), fixed = TRUE)))
})
test_that("invalid inputs", {
skip_on_cran()
skip_if_not_installed("Rsymphony")
cost <- raster::raster(matrix(c(1, 2, 2, NA), ncol = 4))
locked_in <- 2
locked_out <- 1
features <- raster::stack(raster::raster(matrix(c(2, 1, 1, 0), ncol = 4)),
raster::raster(matrix(c(10, 10, 10, 10), ncol = 4)))
p <- problem(cost, features) %>%
add_absolute_targets(c(2, 10)) %>%
add_locked_in_constraints(locked_in) %>%
add_locked_out_constraints(locked_out)
expect_error({
p %>%
add_min_set_objective()
write_problem(path = tempfile(fileext = ".asdf"))
})
expect_error({
p %>%
write_problem(p, tempfile(fileext = ".lp"))
})
}) |
context("spec (responses): 1.34")
test_sample_responses("1.34") |
excel_export <- function(x, file, table_names=as.character(1:length(x)), ...) {
if(class(x)=="data.frame") x <- list(x)
names(x) <- table_names
writexl::write_xlsx(x, file)
} |
my.pPropType.lognormal_RW <- function(n.G, log.phi.Obs, phi.Curr,
p.Curr, hp.param){
nu.Phi.Curr <- p.Curr[2]
sigma.Phi.Curr <- p.Curr[3]
log.phi.Curr <- log(phi.Curr)
p.DrawScale <- .cubfitsEnv$all.DrawScale$p[1]
p.DrawScale.prev <- .cubfitsEnv$all.DrawScale$p.prev[1]
if(.CF.CONF$estimate.Phi.noise)
{
sigmaW.Curr <-
sqrt(1 / rgamma(1, shape = (n.G - 1) / 2,
rate = sum((log.phi.Obs - log.phi.Curr)^2) / 2))
}else{
sigmaW.Curr <- p.Curr[1]
}
proplist <- my.proposesigmaPhi.RW_Norm(
sigma.Phi.Curr,
sigma.Phi.DrawScale = p.DrawScale,
sigma.Phi.DrawScale.prev = p.DrawScale.prev)
list.Curr <- list(nu.Phi = nu.Phi.Curr, sigma.Phi = sigma.Phi.Curr)
ret <- my.drawRestrictHP(proplist, list.Curr, phi.Curr)
my.update.acceptance("p", ret$accept)
my.update.adaptive("p", ret$accept)
ret <- c(sigmaW.Curr, ret$nu.Phi, ret$sigma.Phi)
ret
} |
cr_rpt_names <- function() {
return(c("rpt_rec_num",
"prvdr_ctrl_type_cd",
"prvdr_num",
"npi",
"rpt_stus_cd",
"fy_bgn_dt",
"fy_end_dt",
"proc_dt",
"initl_rpt_sw",
"last_rpt_sw",
"trnsmtl_num",
"fi_num",
"adr_vndr_cd",
"fi_creat_dt",
"util_cd",
"npr_dt",
"spec_ind",
"fi_rcpt_dt"))
} |
collpcm.get.MKL.latent.positions <- function( nw, beta, latentpos )
{
sample <- length( beta )
n <- nw$call$Y$gal$n
d <- nw$call$d
ExY <- matrix( 0 , nrow=n, ncol=n )
for( l in 1:sample )
{
if( d > 1 ) Xl <- latentpos[,,l] else Xl <- latentpos[,l]
eta <- beta[l] - as.matrix( dist(Xl) )
ExY <- ExY + 1 / ( 1 + exp(-eta) )
}
ExY <- ExY/sample
nw$EofY <- ExY
optim.control <- list( fnscale=-1, maxit=1000 )
start.val <- c( mean(beta), as.vector( nw$sample$Xref ) )
optim.ret <- optim( par = start.val, fn = collpcm.get.llike, gr = collpcm.get.grad.llike, nw, method = "L-BFGS-B", lower=rep(-Inf,n*d+1), control=optim.control )
if( inherits( optim.ret, "try-error" ) ){
warning("\n BFGS did not converge to find MKL positions\n")
return( NULL )
}
retlist <- list()
retlist[[ "beta" ]] <- optim.ret$par[1]
if( d > 1 )
retlist[[ "X" ]] <- matrix( optim.ret$par[2:(n*d+1)], nrow=n, ncol=d )
else
retlist[[ "X" ]] <- optim.ret$par[2:(n+1)]
if( d > 1 )
{
m <- colMeans( retlist$X )
retlist$X <- t( apply( retlist$X, 1, function(z) {z-m} ) )
}else{
retlist$X <- retlist$X - mean( retlist$X )
}
return( retlist )
}
|
parse_eff_tox_dose_finding_outcomes <- function(outcome_string) {
if(outcome_string == '') return(list())
valid_str_match <- '^\\s*(\\d+[ETNB]+\\s*)+$'
cohort_str_match <- '\\d+[ETNB]+'
dl_str_match <- '\\d+'
outcomes_match_str <- '[ETNB]+'
cohorts <- list()
cohort_id <- 1
if(stringr::str_detect(outcome_string, valid_str_match)) {
cohort_strs <- stringr::str_extract_all(
outcome_string, cohort_str_match)[[1]]
for(cohort_str in cohort_strs) {
c_dl <- as.integer(stringr::str_extract(cohort_str, dl_str_match))
if(c_dl <= 0) stop('Dose-levels must be strictly positive integers.')
c_outcomes <- stringr::str_extract(cohort_str, outcomes_match_str)
cohorts[[cohort_id]] <- list(dose = c_dl, outcomes = c_outcomes)
cohort_id <- cohort_id + 1
}
} else {
stop(paste0('"', outcome_string, '" is not a valid outcome string.
A valid example is "1N 2NE 3BB 2NT"'))
}
cohorts
} |
nichedispl <-
function (P1,P2 = NULL, D = NULL, q1 = NULL, q2 = NULL, mode="multiple", Np1 = NULL, Np2=NULL, Nq1 = NULL, Nq2 = NULL,nboot = 1000, alpha = 0.05) {
MODES <- c("single", "multiple", "pairwise")
mode <- match.arg(mode, MODES)
if(mode =="multiple" || mode =="single") {
if(is.null(P2)) stop("P2 cannot be null in mode 'multiple' or 'single'")
if (!inherits(P1, "data.frame") || !inherits(P2, "data.frame")) stop("P1 and P2 should be dataframes")
if (mode == "multiple" && (nrow(P1) != nrow(P2))) stop("Resource use dataframes do not have the same number of rows")
if (ncol(P1) != ncol(P2)) stop("Resource use dataframes do not have the same number of columns (resources)")
if (!is.null(Np1) && !is.null(Np2)) {
if (!inherits(Np1, "numeric") || !inherits(Np2, "numeric")) stop("Np1 and Np2 should be numeric vectors")
Np1 = as.integer(Np1)
Np2 = as.integer(Np2)
}
if((!is.null(Nq1) && is.null(Nq2)) || (is.null(Nq1) && !is.null(Nq2))) stop("Nq1 and Nq2 should be both either NULL or contain numeric values")
if (!is.null(Nq1) && !is.null(Nq2)) {
if (!inherits(Nq1, "numeric") || !inherits(Nq2, "numeric")) stop("Nq1 and Nq2 should be numeric")
Nq1 = as.integer(Nq1)
Nq2 = as.integer(Nq2)
}
if (!is.null(D)) {
if (!inherits(D, "dist"))
stop("Object of class 'dist' expected for distance")
D <- as.matrix(D)
if (ncol(P1) != nrow(D)) stop("The number of columns in P1 must be equal to the number of items in D")
if (ncol(P2) != nrow(D)) stop("The number of columns in P2 must be equal to the number of items in D")
D <- as.dist(D)
}
} else if(mode=="pairwise") {
if (!inherits(P1, "data.frame")) stop("P1 should be a dataframe")
if (!is.null(D)) {
if (!inherits(D, "dist")) stop("Object of class 'dist' expected for distance")
D <- as.matrix(D)
if (ncol(P1) != nrow(D)) stop("The number of columns in P1 must be equal to the number of items in D")
D <- as.dist(D)
}
}
if (!is.null(q1)) {
if (length(q1) != ncol(P1)) stop("The number of items in q must be equal to the number of columns in P1 and P2")
q1 = q1/sum(q1)
} else {
q1 = rep(1/ncol(P1), ncol(P1))
}
if (!is.null(q2)) {
if (length(q2) != ncol(P2)) stop("The number of items in q must be equal to the number of columns in P1 and P2")
q2 = q2/sum(q2)
} else {
q2 = rep(1/ncol(P2), ncol(P2))
}
if (is.null(D)) D <- as.dist((matrix(1, ncol(P1), ncol(P1)) - diag(rep(1, ncol(P1)))))
nichevar1<-function(f, D) {
if (is.na(sum(f))) v <- NA
else if (sum(f) < 1e-16) v <- 0
else v <- (f %*% (as.matrix(D)^2) %*% f)/(2*(sum(f)^2))
return(v)
}
getF<-function(p,q=NULL) {
if(!is.null(q)) {
a = p/q
return(a/sum(a))
} else {
return(p/sum(p))
}
}
disp1<-function(f1,f2, D) {
if (is.na(sum(f1)) || is.na(sum(f2))) cd <- NA
else if (sum(f1) < 1e-16 || sum(f2) < 1e-16) cd <- 0
else {
cd <- sqrt(((f1 %*% (as.matrix(D)^2) %*% f2)/(sum(f1)*sum(f2))) - nichevar1(f1,D)-nichevar1(f2,D))
}
return(cd)
}
if(mode=="multiple") {
if((!is.null(Np1) && !is.null(Np2))) nc = 3
else nc = 1
CD <- as.data.frame(matrix(NA,nrow=nrow(P1), ncol=nc))
for(i in 1:nrow(P1)) rownames(CD)[i] <- paste(row.names(P1)[i],"vs",row.names(P2)[i])
for (i in 1:nrow(P1)) {
pi1 = as.numeric(P1[i,])
pi2 = as.numeric(P2[i,])
CD[i,1] = disp1(getF(pi1,q1),getF(pi2,q2),D)
if(!is.null(Np1) && !is.null(Np2)) {
BCD = vector("numeric",length=nboot)
if(!is.na(sum(pi1)) && !is.na(sum(pi2))) {
bsamp1 = rmultinom(nboot,Np1[i],getF(pi1))
bsamp2 = rmultinom(nboot,Np2[i],getF(pi2))
if(!is.null(Nq1) && !is.null(Nq2)) {
qsamp1 = rmultinom(nboot,Nq1,q1)
qsamp2 = rmultinom(nboot,Nq2,q2)
}
for(b in 1:nboot) {
if(!is.null(Np1) && !is.null(Np2)) BCD[b] = disp1(getF(bsamp1[,b], qsamp1[,b]),getF(bsamp2[,b], qsamp2[,b]),D)
else BCD[b] = disp1(getF(bsamp1[,b], q1),getF(bsamp2[,b], q2),D)
}
BCD = BCD[!is.na(BCD)]
z0 = qnorm(sum(BCD<CD[i,1])/length(BCD))
lj = floor(length(BCD)*pnorm(2*z0+qnorm(alpha/2)))
uj = floor(length(BCD)*pnorm(2*z0+qnorm(1-(alpha/2))))
if(lj > 0 && uj > 0 && lj!=uj) {
sbcd = sort(BCD)
CD[i,2] = sbcd[lj]
CD[i,3] = sbcd[uj]
}
}
}
}
if(nc==1) names(CD) <- "CD"
else names(CD) <- c("CD","LC", "UC")
return(CD)
}
if(mode=="single") {
CD <- as.data.frame(matrix(NA,nrow=1, ncol=3))
rownames(CD) <- "Centr.Displ."
CD[1, 1] <- disp1(getF(colSums(P1, na.rm=TRUE), q),getF(colSums(P2, na.rm=TRUE), q),D)
BCD = vector("numeric",length=nboot)
if(!is.null(Nq1) && !is.null(Nq2)) {
qsamp1 = rmultinom(nboot,Nq1,q1)
qsamp2 = rmultinom(nboot,Nq2,q2)
}
for (b in 1:nboot) {
p1samp = colSums(P1[sample(1:nrow(P1),replace=TRUE),], na.rm=TRUE)
p2samp = colSums(P2[sample(1:nrow(P2),replace=TRUE),], na.rm=TRUE)
if(!is.null(Nq1) && !is.null(Nq2)) BCD[b] = disp1(getF(p1samp, qsamp1[,b]),getF(p2samp, qsamp2[,b]),D)
else BCD[b] = disp1(getF(p1samp, q1),getF(p2samp, q2),D)
}
BCD = BCD[!is.na(BCD)]
z0 = qnorm(sum(BCD<CD[1,1])/length(BCD))
lj = floor(length(BCD)*pnorm(2*z0+qnorm(alpha/2)))
uj = floor(length(BCD)*pnorm(2*z0+qnorm(1-(alpha/2))))
if(lj > 0 && uj > 0 && lj!=uj) {
sbcd = sort(BCD)
CD[1,2] = sbcd[lj]
CD[1,3] = sbcd[uj]
}
names(CD) <- c("CD", "LC", "UC")
return(CD)
}
if(mode=="pairwise") {
CD <- matrix(0, nrow = nrow(P1), ncol = nrow(P1))
rownames(CD)<-rownames(P1)
colnames(CD)<-rownames(P1)
if(!is.null(Np1)) {
LC <- CD
UC <- CD
}
for (i in 1:(nrow(P1)-1)) {
for (j in (i+1):nrow(P1)) {
pi = as.numeric(P1[i, ])
pj = as.numeric(P1[j, ])
CD[i,j] <- disp1(getF(pi, q), getF(pj, q), D)
CD[j,i] <- CD[i,j]
if (!is.null(Np1)) {
BCD = vector("numeric", length = nboot)
if (!is.na(sum(pi)) && !is.na(sum(pj))) {
bsampi = rmultinom(nboot, Np1[i], getF(pi))
bsampj = rmultinom(nboot, Np1[j], getF(pj))
if (!is.null(Nq1)) qsamp1 = rmultinom(nboot, Nq1, q1)
for (b in 1:nboot) {
if (!is.null(Nq1)) BCD[b] = disp1(getF(bsampi[, b], qsamp1[, b]), getF(bsampj[, b], qsamp1[, b]), D)
else BCD[b] = disp1(getF(bsampi[, b], q), getF(bsampj[, b], q), D)
}
BCD = BCD[!is.na(BCD)]
z0 = qnorm(sum(BCD < CD[i,j])/length(BCD))
lj = floor(length(BCD) * pnorm(2 * z0 + qnorm(alpha/2)))
uj = floor(length(BCD) * pnorm(2 * z0 + qnorm(1 - (alpha/2))))
if (lj > 0 && uj > 0 && lj != uj) {
sbcd = sort(BCD)
LC[i, j] = LC[j, i] =sbcd[lj]
UC[i, j] = UC[j, i] =sbcd[uj]
}
}
}
}
}
if(!is.null(Np1)) {
return(list(CD=CD, LC=LC, UC = UC))
}
else return(CD)
}
} |
test_that("long families are wrapped", {
out <- roc_proc_text(rd_roclet(), "
long_function_name_________________________1 <- function() {}
long_function_name_________________________2 <- function() {}
long_function_name_________________________3 <- function() {}
long_function_name_________________________4 <- function() {}
")[[1]]
seealso <- out$get_value("seealso")
expect_true(grepl("^Other Long family name:", seealso))
expect_equal(str_count(seealso, "\n"), 3)
})
test_that("special names escaped in family tag", {
out <- roc_proc_text(rd_roclet(), "
f <- function() {}
'%+%' <- function(a, b) {}
")[[1]]
seealso <- out$get_value("seealso")
expect_true(grepl("^Other Long family name:", seealso))
expect_match(seealso, "\\\\%\\+\\\\%")
})
test_that("family links to name only, not all aliases", {
out <- roc_proc_text(rd_roclet(), "
f <- function() {}
g <- function() {}
")[[1]]
seealso <- out$get_value("seealso")
expect_true(grepl("^Other many aliases:", seealso))
expect_equal(str_count(seealso, fixed("\\code{\\link")), 1)
})
test_that("families listed in same order as input", {
out <- roc_proc_text(rd_roclet(), "
foo <- function() {}
bar <- function() {}
baz <- function() {}
")[[2]]
seealso <- out$get_value("seealso")
expect_match(seealso[1], "^Other b")
expect_match(seealso[2], "^Other a")
})
test_that("only functions get () suffix", {
out <- roc_proc_text(rd_roclet(), "
foo <- function() {}
bar <- 1:10
")
expect_equal(out[[1]]$get_value("seealso"), "Other a: \n\\code{\\link{bar}}")
expect_equal(out[[2]]$get_value("seealso"), "Other a: \n\\code{\\link{foo}()}")
})
test_that("family also included in concepts", {
out <- roc_proc_text(rd_roclet(), "
foo <- function() {}
")[[1]]
expect_equal(out$get_value("concept"), "a")
})
test_that("custom family prefixes can be set", {
owd <- setwd(tempdir())
on.exit(setwd(owd), add = TRUE)
roxy_meta_set("rd_family_title", list(a = "Custom prefix: "))
out <- roc_proc_text(rd_roclet(), "
foo <- function() {}
bar <- function() {}
")[[1]]
expect_match(out$get_value("seealso"), "^Custom prefix:")
}) |
renderYTraces = function(opt.paths, over.time = "dob") {
opt.paths = ensureVector(opt.paths, cl = "OptPath", n = 1L, names = "opt.path")
assertList(opt.paths, types = "OptPath", min.len = 1L, names = "unique")
assertChoice(over.time, choices = c("dob", "exec.time"))
run.name = names(opt.paths)
y.name = NULL
minimize = NULL
data = data.frame()
fronts = lapply(seq_along(opt.paths), function(j) {
run = opt.paths[[j]]
name = names(as.data.frame(run, include.x = FALSE, include.rest = FALSE, stringsAsFactors = TRUE))
if (j == 1L) {
y.name <<- name
minimize <<- run$minimize
}
if (length(name) != 1L) {
stopf("Must always have 1 objective in opt path. But found: %i", length(name))
}
if (!y.name == name) {
stopf("Must always have the same objective in opt path: %s (first ones taken). But found here: %s",
y.name, name)
}
if (!minimize == run$minimize) {
stopf("Must always have the same 'minimize' settings for objective in opt path: %s (first one taken).
But found here: %s", minimize, run$minimize)
}
if (over.time == "dob") {
df = data.frame(
y = getOptPathY(op = run),
time = getOptPathDOB(op = run),
stringsAsFactors = TRUE
)
}
if (over.time == "exec.time") {
times = seq(0, sum(getOptPathExecTimes(run)), length.out = 128)
df = getOptPathColAtTimes(run, times)
df = df[, c(y.name, "time")]
}
cbind(df, .algo = run.name[j], stringsAsFactors = TRUE)
})
data = do.call(rbind, fronts)
mean.data = aggregate(data[[y.name]], by = list(data$time, data$.algo), FUN = mean)
names(mean.data) = c("time", ".algo", y.name)
pl = ggplot2::ggplot(data, ggplot2::aes_string(x = "time", y = y.name, group = ".algo",
linetype = ".algo", col = ".algo"))
if (over.time == "dob") {
pl = pl + ggplot2::geom_point(size = 3)
}
pl = pl + ggplot2::geom_line(data = mean.data, size = 1)
return(pl)
}
plotYTraces = function(opt.paths, over.time = "dob") {
print(renderYTraces(opt.paths, over.time))
return(invisible(NULL))
} |
batchExpandGrid = function(reg, fun, ..., more.args = list()) {
checkRegistry(reg, strict = TRUE, writeable = TRUE)
assertFunction(fun)
args = list(...)
ns = names(args)
if (length(args) == 0L)
return(invisible(integer(0L)))
if(!all(vlapply(args, is.vector)))
stop("All args in '...' must be vectors!")
checkMoreArgs(more.args)
reserved = c("KEEP.OUT.ATTRS", "stringsAsFactors")
if (any(reserved %in% ns))
stopf("You cannot use the reserved arg names %s in ... args!", collapse(reserved))
args$KEEP.OUT.ATTRS = FALSE
args$stringsAsFactors = FALSE
grid = do.call(expand.grid, args)
if (is.null(ns))
colnames(grid) = NULL
do.call(batchMap, c(as.list(grid), list(reg = reg, fun = fun, more.args = more.args)))
return(setRowNames(grid, as.character(getJobIds(reg))))
} |
mixest2 <- function(y,x,mods=NULL,ftype=NULL,V=NULL,W=NULL,atype=NULL,Tvar=NULL)
{
if (is.null(atype)) { atype <- 0 }
if (atype==0)
{
out <- .mixest2a(y=y,x=x,mods=mods,ftype=ftype,V=V,W=W,Tvar=Tvar)
}
else
{
out <- .mixest2b(y=y,x=x,mods=mods,ftype=ftype,V=V,W=W,Tvar=Tvar)
}
return(out)
}
|
library(RSNNS)
basePath <- ("./")
data(snnsData)
inputs <- snnsData$spirals.pat[,inputColumns(snnsData$spirals.pat)]
outputs <- snnsData$spirals.pat[,outputColumns(snnsData$spirals.pat)]
snnsObject <- SnnsRObjectFactory()
snnsObject$setLearnFunc('RBF-DDA')
snnsObject$setUpdateFunc('Topological_Order')
snnsObject$setUnitDefaults(0,0,1,0,1,'Act_Logistic','Out_Identity')
snnsObject$createNet(c(2,2), fullyConnectedFeedForward = FALSE)
patset <- snnsObject$createPatSet(inputs, outputs)
snnsObject$setCurrPatSet(patset$set_no)
snnsObject$shufflePatterns(TRUE)
snnsObject$DefTrainSubPat()
snnsObject$saveNet(paste(basePath,"rbfDDA_spiralsSnnsR_untrained.net",sep=""),"rbfDDA_spiralsSnnsR_untrained")
parameters <- c(0.4, 0.2, 5)
res <- snnsObject$learnAllPatterns(parameters)
predictions <- snnsObject$predictCurrPatSet("output", c(0))
p <- encodeClassLabels(predictions, method="WTA", l=0, h=0)
t <- encodeClassLabels(outputs)
confusionMatrix(t,p)
snnsObject$saveNet(paste(basePath,"rbfDDA_spiralsSnnsR.net",sep=""),"rbfDDA_spiralsSnnsR")
snnsObject$saveNewPatterns(paste(basePath,"rbfDDA_spiralsSnnsR.pat",sep=""), patset$set_no); |
library(cort)
knitr::opts_chunk$set(
collapse = TRUE,
comment = "
fig.width = 7,
fig.height = 7
)
set.seed(1)
data("LifeCycleSavings")
pseudo_data <- (apply(LifeCycleSavings,2,rank,ties.method="max")/(nrow(LifeCycleSavings)+1))
pairs(pseudo_data,lower.panel=NULL)
(cop <- cbCopula(x = pseudo_data,m = 5,pseudo = TRUE))
simu <- rCopula(n = 1000,copula = cop)
pairs(rbind(simu,pseudo_data),
col=c(rep("black",nrow(simu)),rep("red",nrow(pseudo_data))),
gap=0,
lower.panel=NULL,cex=0.5) |
slashTerms <-
function (x)
{
if (!("/" %in% all.names(x)))
return(x)
if (x[[1]] != as.name("/"))
stop("unparseable formula for grouping factor")
list(slashTerms(x[[2]]), slashTerms(x[[3]]))
} |
suppressMessages(library(optparse))
suppressMessages(library(DirichletMultinomial))
suppressMessages(library(TailRank))
suppressMessages(library(doMC))
arguments <- parse_args(OptionParser(usage = "%prog [options] counts_file", description="LeafCutter outlier splicing command line tool. Required inputs:\n <counts_file>: Intron usage counts file. Must be .txt or .txt.gz, output from clustering pipeline.\n",option_list=list(
make_option(c("-o","--output_prefix"), default = "leafcutter_outlier", help="The prefix for the output files,<prefix>_pVals.txt (containing p-value for each intron), <prefix>_clusterPvals.txt (containing p-value for each cluster) and <prefix>_effSize.txt (containing the effect sizes for each intron) [default %default]"),
make_option(c("-s","--max_cluster_size"), default=50, help="Don't test clusters with more introns than this [default %default]"),
make_option(c("-c","--min_coverage"), default=20, help="Require min_samples_per_group samples in each group to have at least this many reads [default %default]"),
make_option(c("-t","--timeout"), default=30, help="Maximum time (in seconds) allowed for a single optimization run [default %default]"),
make_option(c("-p","--num_threads"), default=1, help="Number of threads to use [default %default]"))),
positional_arguments = 1)
opt=arguments$opt
counts_file <- arguments$args[1]
cat("Loading counts from",counts_file,"\n")
if (!file.exists(counts_file)) stop("File ",counts_file," does not exist")
df <- read.table(counts_file, header=T, check.names = F)
introns <- rownames(df)
counts <- as.matrix(df)
if (opt$num_threads >1){
registerDoMC(opt$num_threads)
}
cat("Settings:\n")
print(opt)
combineClusterPvals <- function(pVals){
pVals <- as.numeric(pVals)
return(pbeta(min(pVals),1,length(pVals),lower.tail = TRUE))
}
computeRightPvalBB <- function(count,totalCount,alpha,beta){
if(count>0){
val <- 1-pbb(count-1,totalCount,alpha,beta)
}else{
val <- 1
}
val <- max(.Machine$double.eps,min(val,1-.Machine$double.eps))
return(val)
}
computeLeftPvalBB <- function(count,totalCount,alpha,beta){
val <- pbb(count,totalCount,alpha,beta)
val <- max(.Machine$double.eps,min(val,1-.Machine$double.eps))
return(val)
}
compute_effectSize_Pvalue <- function(count,totalCount,alphaEst,index,sumAlphaEst,clustMean){
pRight <- computeRightPvalBB(count,totalCount,alphaEst[index],sumAlphaEst-alphaEst[index])
pLeft <- computeLeftPvalBB(count,totalCount,alphaEst[index],sumAlphaEst-alphaEst[index])
pVal <- min(1,2*min(pRight,pLeft))
testVal <- (count/totalCount)
effectSize <- testVal - clustMean[index]
return(c(pVal=pVal,effectSize=effectSize,testVal=testVal))
}
runOutlierAnalysisCluster <- function(dataMat,alphaEst){
cluID <- strsplit(colnames(dataMat)[1],":")[[1]][4]
effectSizesMat <- dataMat
pValsMat <- dataMat
testMat <- dataMat
sumAlphaEst <- sum(alphaEst)
clustMean <- alphaEst/sumAlphaEst
for(sample in seq_along(rownames(dataMat))){
totalCount <- sum(dataMat[sample,])
for(junction in seq_along(colnames(dataMat))){
val <- compute_effectSize_Pvalue(dataMat[sample,junction],totalCount,alphaEst,junction,sumAlphaEst,clustMean)
effectSizesMat[sample,junction] <- val["effectSize"]
pValsMat[sample,junction] <- val["pVal"]
testMat[sample,junction] <- val["testVal"]
}
}
combinedClustPval <- as.data.frame(apply(pValsMat,1,combineClusterPvals))
colnames(combinedClustPval) <- cluID
return(list(effectSizesMat=effectSizesMat,pValsMat=pValsMat,combinedClustPval=combinedClustPval,
clustMean=clustMean,testMat=testMat))
}
processCluster <- function(dataMat){
dataMat <- dataMat+1
dm_fit <- DirichletMultinomial::dmn(dataMat,1)
res <- runOutlierAnalysisCluster(dataMat,dm_fit@fit$Estimate)
}
writeResToFile <- function(results,output_prefix,verboseOutput=F){
write.table( t(results[[1]]$pValsMat),
paste0(output_prefix,"_pVals.txt"), quote=F, col.names = T, row.names = T, sep="\t")
write.table( t(results[[1]]$effectSizesMat),
paste0(output_prefix,"_effSize.txt"), quote=F, col.names = T, row.names = T, sep="\t")
write.table( t(results[[1]]$combinedClustPval),
paste0(output_prefix,"_clusterPvals.txt"), quote=F, col.names = T, row.names = T, sep="\t")
if(verboseOutput){
write.table( results[[1]]$clustMean,
paste0(output_prefix,"_controlMean.txt"), quote=F, col.names = F, row.names = T, sep="\t")
write.table( t(results[[1]]$testMat),
paste0(output_prefix,"_test.txt"), quote=F, col.names = T, row.names = T, sep="\t")
}
for (index in setdiff(seq_along(results),1)){
write.table( t(results[[index]]$pValsMat),
paste0(output_prefix,"_pVals.txt"), quote=F, col.names = F, row.names = T, sep="\t", append = T)
write.table( t(results[[index]]$effectSizesMat),
paste0(output_prefix,"_effSize.txt"), quote=F, col.names = F, row.names = T, sep="\t", append = T)
write.table( t(results[[index]]$combinedClustPval),
paste0(output_prefix,"_clusterPvals.txt"), quote=F, col.names = F, row.names = T, sep="\t", append = T)
if(verboseOutput){
write.table( results[[index]]$clustMean,
paste0(output_prefix,"_controlMean.txt"), quote=F, col.names = F, row.names = T, sep="\t", append = T)
write.table( t(results[[index]]$testMat),
paste0(output_prefix,"_test.txt"), quote=F, col.names = F, row.names = T, sep="\t", append = T)
}
}
}
processAllClusters <- function(counts,output_prefix="leafcutter_outlier",max_cluster_size=10,min_coverage=20,timeout=10){
splitIntrons <- do.call(rbind,strsplit(rownames(counts),":"))
cluster_ids <- paste(splitIntrons[,1],splitIntrons[,4],sep = ":")
cluster_sizes <- as.data.frame(table(cluster_ids))
clu_names <- as.character(cluster_sizes$cluster_ids)
cluster_sizes <- cluster_sizes$Freq
names(cluster_sizes) <- clu_names
results <- foreach (cluster_name=clu_names, .errorhandling = "remove") %dopar% {
if (cluster_sizes[cluster_name] > max_cluster_size)
stop("Too many introns in cluster")
junctions_in_cluster=which(cluster_ids==cluster_name)
if (length(junctions_in_cluster) <= 1)
stop("<=1 junction in cluster")
cluster_counts <- t(counts[ junctions_in_cluster, ])
sample_totals <- rowSums(cluster_counts)
if (sum(sample_totals>=min_coverage)<=1)
stop("<=1 sample with coverage>min_coverage")
res <- R.utils::withTimeout( {
processCluster(cluster_counts)
}, timeout=timeout, onTimeout="error" )
res$cluster_name <- cluster_name
res
}
writeResToFile(results,output_prefix)
results
}
cat("Running outlier splicing analysis...\n")
results <- processAllClusters(counts,
output_prefix=opt$output_prefix,
max_cluster_size=opt$max_cluster_size,
min_coverage=opt$min_coverage,
timeout=opt$timeout)
cat("Finished outlier splicing analysis.\n") |
expected <- eval(parse(text="structure(c(\" 0.228763\", \"-0.000666\", \"\", \"0.08909\", \"0.00426\", \"\", \"0.08899\", \"0.00426\", \"\", \"6.59\", \"0.02\", \"6.02\", \"1.00\", \"1.00\", \"3.06\", \"0.01\", \"0.88\", \"0.12\"), .Dim = c(3L, 6L), .Dimnames = list(c(\"male\", \"tt(agechf), linear\", \"tt(agechf), nonlin\"), c(\"coef\", \"se(coef)\", \"se2\", \"Chisq\", \"DF\", \"p\")))"));
test(id=0, code={
argv <- eval(parse(text="list(structure(c(\" 0.228763\", \"-0.000666\", \"\", \"0.08909\", \"0.00426\", \"\", \"0.08899\", \"0.00426\", \"\", \"6.59\", \"0.02\", \"6.02\", \"1.00\", \"1.00\", \"3.06\", \"0.01\", \"0.88\", \"0.12\"), .Dim = c(3L, 6L), .Dimnames = list(c(\"male\", \"tt(agechf), linear\", \"tt(agechf), nonlin\"), c(\"coef\", \"se(coef)\", \"se2\", \"Chisq\", \"DF\", \"p\"))), c(\"male\", \"tt(agechf), linear\", \"tt(agechf), nonlin\"), c(\"coef\", \"se(coef)\", \"se2\", \"Chisq\", \"DF\", \"p\"), FALSE, FALSE, NULL)"));
.Internal(prmatrix(argv[[1]], argv[[2]], argv[[3]], argv[[4]], argv[[5]], argv[[6]]));
}, o=expected); |
compute_iid_decomposition<-function(t,n,cause,F01t,St,weights,T,delta,marker,MatInt0TcidhatMCksurEff){
Cases<-(T< t & delta==cause)
Controls_1<-(T> t)
Controls_2<-(T< t & delta!=cause & delta!=0)
if(sum(Controls_2)>0){
compute_iid_decomposition_competing_risks(t,n,cause,F01t,St,weights,T,delta,marker,MatInt0TcidhatMCksurEff)
}else{
compute_iid_decomposition_survival(t,n,cause,F01t,St,weights,T,delta,marker,MatInt0TcidhatMCksurEff)
}
} |
if (!isGeneric("add")) {
setGeneric("add", function(x,w,echo,...)
standardGeneric("add"))
}
setMethod('add', signature(x='list',w='character'),
function(x,w='sdm',echo=TRUE,...) {
if (missing(w)) w <- 'sdm'
if (missing(echo) || !is.logical(echo)) echo <- TRUE
if (w %in% c('sdm','sdmMethod','model','sdmCorrelativeMethod')) {
m <- do.call('.create.sdmCorrelativeMethod',x)
if (inherits(m,"sdmCorrelativeMethod")) {
.sdmMethods$addMethod(m,echo)
} else stop('The method is not added to the sdmMethods contrainer!')
}
}
)
if (!isGeneric("getmethod")) {
setGeneric("getmethod", function(x,w,...)
standardGeneric("getmethod"))
}
setMethod('getmethod', signature(x='character'),
function(x,w,...) {
if (missing(w)) w <- 'sdm'
if (w == 'sdm') {
if (!.sdmOptions$getOption('sdmLoaded')) .addMethods()
x <- .methodFix(x)
if (!is.na(x)) .sdmMethods$Methods[[x]]
else stop('the specified method does not exist!')
}
}
)
if (!isGeneric("getmethodNames")) {
setGeneric("getmethodNames", function(w,...)
standardGeneric("getmethodNames"))
}
setMethod('getmethodNames', signature(w='ANY'),
function(w,alt,...) {
if (missing(w)) w <- 'sdm'
if (missing(alt)) alt <- TRUE
if (!.sdmOptions$getOption('sdmLoaded')) .addMethods()
if (w == 'sdm') .sdmMethods$getMethodNames(alt=alt)
}
)
.addMethods <- function() {
methodInfo <- NULL
n <- .sdmMethods$getMethodNames(alt=FALSE)
lst <- list.files(system.file("methods/sdm", package="sdm"),pattern='R$',full.names = TRUE)
for (l in lst) {
source(l,local=TRUE)
pkg <- methodInfo$packages
pkg <- pkg[!pkg == '.tmp']
if (!methodInfo$name[1] %in% n && all(.is.installed(pkg))) {
try(add(x=methodInfo,'sdm',echo=FALSE),silent = TRUE)
}
}
.sdmOptions$addOption('sdmLoaded',TRUE)
}
.create.sdmCorrelativeMethod <- function(name,packages=NULL,modelTypes=NULL,fitParams,fitSettings=NULL,settingRules=NULL,fitFunction,predictParams=NULL,predictSettings=NULL,predictFunction=NULL,tuneParams=NULL,metadata=NULL,...) {
m <- new('sdmCorrelativeMethod',name=name[1])
if (length(name) > 1) m@aliases <- name[2:length(name)]
Installed <- TRUE
if (!is.null(packages) && is.character(packages)) {
m@packages <- packages
w <- .is.installed(m@packages)
if (!all(w)) Installed <- FALSE
else {
for (i in seq_along(m@packages)) w[i] <- require(m@packages[i],character.only=TRUE)
}
} else m@packages <- NULL
if (!is.null(modelTypes)) {
modelTypes <- tolower(modelTypes)
for (i in 1:length(modelTypes)) {
if (modelTypes[i] %in% c('po','presenceonly','presence-only','presence')) {
modelTypes[i] <- 'po'
} else if (modelTypes[i] %in% c('pa','presenceabsence','presence-absence')) {
modelTypes[i] <- 'pa'
} else if (modelTypes[i] %in% c('pb','presenceb','presence-background','presence-pseudo','presence-pseudoabsence','ppa','psa')) {
modelTypes[i] <- 'pb'
} else if (modelTypes[i] %in% c('ab','abundance')) {
modelTypes[i] <- 'ab'
} else if (modelTypes[i] %in% c('n','nominal','multinominal')) {
modelTypes[i] <- 'n'
} else {
warning(paste('modelType',modelTypes[i],'is unknown, it is ignored!'))
modelTypes[i] <- NA
}
}
m@modelTypes <- modelTypes
}
if (is.list(fitParams)) {
n <- names(fitParams)
if (is.null(n)) stop('fitParams is not appropriately defined; example: list(formula="standard.formula",data="sdmDataFrame")')
m@fitParams <- fitParams
} else stop('fitParams should be a list')
if (!is.null(fitSettings)) {
if (!is.list(fitSettings)) stop('fitSettings should be a list!')
n <- names(fitSettings)
if (is.null(n)) stop('fitSettings is not appropriately defined; example: list(family=link(binomial),ntrees=1000)')
if ('' %in% n) {
w <- which(n == '')
for (ww in w) {
if (is.character(n[ww])) names(fitSettings)[ww] <- n[w]
else stop('fitSettings is not appropriately defined; example: list(family=link(binomial),ntrees=1000)')
}
}
m@fitSettings <- fitSettings
}
if (!is.null(settingRules)) {
if (!is.function(settingRules)) stop('settingRules should be a function!')
m@settingRules <- settingRules
}
if (class(fitFunction) == "character") {
if (length(strsplit(fitFunction,'::')[[1]]) == 2) fitFunction <- strsplit(fitFunction,'::')[[1]][2]
if (exists(fitFunction,mode='function')) {
if (environmentName(environment(get(fitFunction))) == "R_GlobalEnv") {
if (is.null([email protected])) [email protected] <- new.env()
assign(fitFunction,get(fitFunction),envir = [email protected])
m@packages <- unique(c(m@packages,'.temp'))
}
m@fitFunction <- eval(parse(text=paste("function(params) {do.call(",fitFunction,",params)}")))
} else if (length(strsplit(fitFunction,':::')[[1]]) == 2 && class(eval(parse(text=fitFunction))) == 'function') {
if (!exists(strsplit(fitFunction,':::')[[1]][2],mode='function')) {
m@fitFunction <- eval(parse(text=paste("function(params) {do.call(",strsplit(fitFunction,':::')[[1]][2],",params)}")))
} else {
if (is.null([email protected])) [email protected] <- new.env()
assign(strsplit(fitFunction,':::')[[1]][2],eval(parse(text=fitFunction)),envir = [email protected])
m@packages <- unique(c(m@packages,'.temp'))
m@fitFunction <- eval(parse(text=paste("function(params) {do.call(",strsplit(fitFunction,':::')[[1]][2],",params)}")))
}
} else if (!Installed) {
m@fitFunction <- eval(parse(text=paste("function(params) {do.call(",fitFunction,",params)}")))
} else stop('fitFunction cannot be identified!')
} else if (class(fitFunction) == 'function') {
if (is.null([email protected])) [email protected] <- new.env()
if (!paste(m@name,'_fit',sep='') %in% ls([email protected])) fn <-paste(m@name,'_fit',sep='')
else stop('the user defined function in fitFunction cannot be registered because an object with a similar name exists in the container!')
assign(fn,fitFunction,envir = [email protected])
m@packages <- unique(c(m@packages,'.temp'))
m@fitFunction <- eval(parse(text=paste("function(params) {do.call(",fn,",params)}")))
} else stop('fitFunction cannot be identified!')
if (!is.null(predictFunction)) {
if (class(predictFunction) == "character") {
if (length(strsplit(predictFunction,'::')[[1]]) == 2) predictFunction <- strsplit(predictFunction,'::')[[1]][2]
if (exists(predictFunction,mode='function')) {
if (environmentName(environment(get(predictFunction))) == "R_GlobalEnv") {
if (is.null([email protected])) [email protected] <- new.env()
assign(predictFunction,get(predictFunction),envir = [email protected])
m@packages <- unique(c(m@packages,'.temp'))
}
m@predictFunction <- eval(parse(text=paste("function(params) {do.call(",predictFunction,",params)}")))
} else if (length(strsplit(predictFunction,':::')[[1]]) == 2 && class(eval(parse(text=predictFunction))) == 'function') {
if (!exists(strsplit(predictFunction,':::')[[1]][2],mode='function')) {
m@predictFunction <- eval(parse(text=paste("function(params) {do.call(",strsplit(predictFunction,':::')[[1]][2],",params)}")))
} else {
if (is.null([email protected])) [email protected] <- new.env()
assign(strsplit(predictFunction,':::')[[1]][2],eval(parse(text=predictFunction)),envir = [email protected])
m@packages <- unique(c(m@packages,'.temp'))
m@predictFunction <- eval(parse(text=paste("function(params) {do.call(",strsplit(predictFunction,':::')[[1]][2],",params)}")))
}
} else if (!Installed) {
m@predictFunction <- eval(parse(text=paste("function(params) {do.call(",predictFunction,",params)}")))
} else stop('predictFunction cannot be identified!')
} else if (class(predictFunction) == 'function') {
if (is.null([email protected])) [email protected] <- new.env()
if (!paste(m@name,'_predict',sep='') %in% ls([email protected])) fn <-paste(m@name,'_predict',sep='')
else stop('the user defined function in predictFunction cannot be registered because an object with a similar name exists in the container!')
assign(fn,predictFunction,envir = [email protected])
m@packages <- unique(c(m@packages,'.temp'))
m@predictFunction <- eval(parse(text=paste("function(params) {do.call(",fn,",params)}")))
} else stop('predictFunction cannot be identified!')
if (!is.null(predictParams)) {
if (is.list(predictParams)) {
n <- names(predictParams)
if (is.null(n)) stop('predictParams is not appropriately defined; example: list(newdata="sdmDataFrame")')
m@predictParams <- predictParams
} else stop('predictParams should be a list')
}
if (!is.null(predictSettings)) {
if (is.list(predictSettings)) m@predictSettings <- predictSettings
else stop('predictSettings should be a list!')
}
}
if (!is.null(tuneParams)) {
if (!is.list(tuneParams)) stop('tuneParams should be a list; example: list(ntrees=seq(500,3000,by=200))')
n <- names(tuneParams)
if (is.null(n)) stop('tuneParams is not appropriately defined; example: list(ntrees=seq(500,3000,by=200))')
m@tuneParams <- tuneParams
}
if (inherits(metadata,'.Metadata')) m@metadata <- metadata
else m@metadata <- .newMetadata(...)
m
}
.update.sdmCorrelativeMethod <- function(m,...) {
name=NULL;packages=NULL;modelTypes=NULL;fitParams=NULL;fitSettings=NULL;settingRules=NULL;fitFunction=NULL;predictParams=NULL;predictSettings=NULL;predictFunction=NULL;tuneParams=NULL;metadata=NULL
dot <- list(...)
n <- tolower(names(dot))
for (i in seq_along(n)) {
if (any(!is.na(pmatch(c("nam"),n[i])))) name <- dot[[i]]
else if (any(!is.na(pmatch(c("pac"),n[i])))) packages <- dot[[i]]
else if (any(!is.na(pmatch(c("mod"),n[i])))) modelTypes <- dot[[i]]
else if (any(!is.na(pmatch(c("fits"),n[i])))) fitSettings <- dot[[i]]
else if (any(!is.na(pmatch(c("fitp"),n[i])))) fitParams <- dot[[i]]
else if (any(!is.na(pmatch(c("set"),n[i])))) settingRules <- dot[[i]]
else if (any(!is.na(pmatch(c("fitf"),n[i])))) fitFunction <- dot[[i]]
else if (any(!is.na(pmatch(c("predicts"),n[i])))) predictSettings <- dot[[i]]
else if (any(!is.na(pmatch(c("predictp"),n[i])))) predictParams <- dot[[i]]
else if (any(!is.na(pmatch(c("predictf"),n[i])))) predictFunction <- dot[[i]]
else if (any(!is.na(pmatch(c("tun"),n[i])))) tuneParams <- dot[[i]]
else if (any(!is.na(pmatch(c("met"),n[i])))) metadata <- dot[[i]]
}
if (length(name) > 1) m@aliases <- name[2:length(name)]
Installed <- TRUE
if (!is.null(packages) && is.character(packages)) {
m@packages <- packages
w <- .is.installed(m@packages)
if (!all(w)) Installed <- FALSE
} else m@packages <- NULL
if (!is.null(modelTypes)) {
modelTypes <- tolower(modelTypes)
for (i in 1:length(modelTypes)) {
if (modelTypes[i] %in% c('po','presenceonly','presence-only','presence')) {
modelTypes[i] <- 'po'
} else if (modelTypes[i] %in% c('pa','presenceabsence','presence-absence')) {
modelTypes[i] <- 'pa'
} else if (modelTypes[i] %in% c('pb','presenceb','presence-background','presence-pseudo','presence-pseudoabsence','ppa','psa')) {
modelTypes[i] <- 'pb'
} else if (modelTypes[i] %in% c('ab','abundance')) {
modelTypes[i] <- 'ab'
} else if (modelTypes[i] %in% c('n','nominal','multinominal')) {
modelTypes[i] <- 'n'
} else {
warning(paste('modelType',modelTypes[i],'is unknown, it is ignored!'))
modelTypes[i] <- NA
}
}
m@modelTypes <- modelTypes
}
if (is.list(fitParams)) {
n <- names(fitParams)
if (is.null(n)) stop('fitParams is not appropriately defined; example: list(formula="standard.formula",data="sdmDataFrame")')
m@fitParams <- fitParams
} else stop('fitParams should be a list')
if (!is.null(fitSettings)) {
if (!is.list(fitSettings)) stop('fitSettings should be a list!')
n <- names(fitSettings)
if (is.null(n)) stop('fitSettings is not appropriately defined; example: list(family=link(binomial),ntrees=1000)')
if ('' %in% n) {
w <- which(n == '')
for (ww in w) {
if (is.character(n[ww])) names(fitSettings)[ww] <- n[w]
else stop('fitSettings is not appropriately defined; example: list(family=link(binomial),ntrees=1000)')
}
}
m@fitSettings <- fitSettings
}
if (!is.null(settingRules)) {
if (!is.function(settingRules)) stop('settingRules should be a function!')
m@settingRules <- settingRules
}
if (class(fitFunction) == "character") {
if (length(strsplit(fitFunction,'::')[[1]]) == 2) fitFunction <- strsplit(fitFunction,'::')[[1]][2]
if (exists(fitFunction,mode='function')) {
if (environmentName(environment(get(fitFunction))) == "R_GlobalEnv") {
if (is.null([email protected])) [email protected] <- new.env()
assign(fitFunction,get(fitFunction),envir = [email protected])
m@packages <- unique(c(m@packages,'.temp'))
}
m@fitFunction <- eval(parse(text=paste("function(params) {do.call(",fitFunction,",params)}")))
} else if (length(strsplit(fitFunction,':::')[[1]]) == 2 && class(eval(parse(text=fitFunction))) == 'function') {
if (!exists(strsplit(fitFunction,':::')[[1]][2],mode='function')) {
m@fitFunction <- eval(parse(text=paste("function(params) {do.call(",strsplit(fitFunction,':::')[[1]][2],",params)}")))
} else {
if (is.null([email protected])) [email protected] <- new.env()
assign(strsplit(fitFunction,':::')[[1]][2],eval(parse(text=fitFunction)),envir = [email protected])
m@packages <- unique(c(m@packages,'.temp'))
m@fitFunction <- eval(parse(text=paste("function(params) {do.call(",strsplit(fitFunction,':::')[[1]][2],",params)}")))
}
} else if (!Installed) {
m@fitFunction <- eval(parse(text=paste("function(params) {do.call(",fitFunction,",params)}")))
} else stop('fitFunction cannot be identified!')
} else if (class(fitFunction) == 'function') {
if (is.null([email protected])) [email protected] <- new.env()
if (!paste(m@name,'.fit',sep='') %in% ls([email protected])) fn <-paste(m@name,'.fit',sep='')
else stop('the user defined function in fitFunction cannot be registered because an object with a similar name exists in the container!')
assign(fn,fitFunction,envir = [email protected])
m@packages <- unique(c(m@packages,'.temp'))
m@fitFunction <- eval(parse(text=paste("function(params) {do.call(",fn,",params)}")))
} else stop('fitFunction cannot be identified!')
if (!is.null(predictFunction)) {
if (class(predictFunction) == "character") {
if (length(strsplit(predictFunction,'::')[[1]]) == 2) predictFunction <- strsplit(predictFunction,'::')[[1]][2]
if (exists(predictFunction,mode='function')) {
if (environmentName(environment(get(predictFunction))) == "R_GlobalEnv") {
if (is.null([email protected])) [email protected] <- new.env()
assign(predictFunction,get(predictFunction),envir = [email protected])
m@packages <- unique(c(m@packages,'.temp'))
}
m@predictFunction <- eval(parse(text=paste("function(params) {do.call(",predictFunction,",params)}")))
} else if (length(strsplit(predictFunction,':::')[[1]]) == 2 && class(eval(parse(text=predictFunction))) == 'function') {
if (!exists(strsplit(predictFunction,':::')[[1]][2],mode='function')) {
m@predictFunction <- eval(parse(text=paste("function(params) {do.call(",strsplit(predictFunction,':::')[[1]][2],",params)}")))
} else {
if (is.null([email protected])) [email protected] <- new.env()
assign(strsplit(predictFunction,':::')[[1]][2],eval(parse(text=predictFunction)),envir = [email protected])
m@packages <- unique(c(m@packages,'.temp'))
m@predictFunction <- eval(parse(text=paste("function(params) {do.call(",strsplit(predictFunction,':::')[[1]][2],",params)}")))
}
} else if (!Installed) {
m@predictFunction <- eval(parse(text=paste("function(params) {do.call(",predictFunction,",params)}")))
} else stop('predictFunction cannot be identified!')
} else if (class(predictFunction) == 'function') {
if (is.null([email protected])) [email protected] <- new.env()
if (!paste(m@name,'.predict',sep='') %in% ls([email protected])) fn <-paste(m@name,'.predict',sep='')
else stop('the user defined function in predictFunction cannot be registered because an object with a similar name exists in the container!')
assign(fn,predictFunction,envir = [email protected])
m@packages <- unique(c(m@packages,'.temp'))
m@predictFunction <- eval(parse(text=paste("function(params) {do.call(",fn,",params)}")))
} else stop('predictFunction cannot be identified!')
if (!is.null(predictParams)) {
if (is.list(predictParams)) {
n <- names(predictParams)
if (is.null(n)) stop('predictParams is not appropriately defined; example: list(newdata="sdmDataFrame")')
m@predictParams <- predictParams
} else stop('predictParams should be a list')
}
if (!is.null(predictSettings)) {
if (is.list(predictSettings)) m@predictSettings <- predictSettings
else stop('predictSettings should be a list!')
}
}
if (!is.null(tuneParams)) {
if (!is.list(tuneParams)) stop('tuneParams should be a list; example: list(ntrees=seq(500,3000,by=200))')
n <- names(tuneParams)
if (is.null(n)) stop('tuneParams is not appropriately defined; example: list(ntrees=seq(500,3000,by=200))')
m@tuneParams <- tuneParams
}
if (inherits(metadata,'.Metadata')) m@metadata <- metadata
else m@metadata <- .newMetadata(...)
m
}
.movEnv <- function(e1,e2) {
n1 <- ls(envir = e1)
for (n in n1) assign(n,e1[[n]],envir = e2)
rm(list=n1,envir = e1)
e2
}
.movEnv2sdm <- function(e) {
n1 <- ls(envir = e)
for (n in n1) {
if (!exists(n,envir = as.environment("package:sdm"))) assign(n,e[[n]],envir = as.environment("package:sdm"))
}
rm(list=n1,envir = e)
}
.newMetadata <- function(...) {
dot <- list(...)
if (length(dot) > 0) {
w <- unlist(lapply(dot, function(x) inherits(x,'.Metadata')))
if (any(w)) return(dot[[which(w)]])
else {
ndot <- unlist(lapply(tolower(names(dot)),function(x) paste(strsplit(x,'')[[1]][1:3],collapse='')))
w <- ndot %in% c('ful','cre','aut','web','cit','hel','des','dat','lic','url')
if (any(w)) {
m <- new(".Metadata")
w <- which(ndot == 'aut')
if (length(w) > 0) {
if (is.list(dot[[w]])) m@authors <- dot[[w]]
else if (is.character(dot[[w]])) m@authors <- list(dot[[w]])
}
w <- which(ndot == 'cre')
if (length(w) > 0) {
if (is.list(dot[[w]])) m@creators <- dot[[w]]
else if (is.character(dot[[w]])) m@creators <- list(dot[[w]])
}
w <- which(ndot == 'tit')
if (length(w) > 0) m@title <- dot[[w]]
w <- which(ndot == 'url' | ndot == 'web')
if (length(w) > 0) m@url <- dot[[w]]
w <- which(ndot == 'cit')
if (length(w) > 0) m@citations <- dot[[w]]
w <- which(ndot == 'hel')
if (length(w) > 0) m@Help <- dot[[w]]
w <- which(ndot == 'des')
if (length(w) > 0) m@description <- dot[[w]]
w <- which(ndot == 'dat')
if (length(w) > 0) m@date <- dot[[w]]
w <- which(ndot == 'lic')
if (length(w) > 0) m@license <- dot[[w]]
return(m)
}
}
}
}
.sdmMethods <- new('.sdmMethodsContainer') |
HeteroFun <-
function(x1, x2, B, conf=0.95) {
est <- HeteroEstFun(x1, x2)
se <- BootstrapFunMa(x1, x2, B, FunName=HeteroEstFun)
CI <- logCI(x1, x2, est, se, conf)
out <- matrix(c(est, se, CI), nrow = 1)
out <- data.frame(out)
rownames(out) <- c("Heterogeneous(ACE-shared)")
colnames(out) <- c("Estimator", "Est_s.e.",
paste(conf*100, "% Lower"), paste(conf*100, "% Upper"))
return(out)
} |
anova(coag.lm) |
library(hamcrest)
expected <- c(-0x1.4acd9d300fa62p+0 + 0x0p+0i, -0x1.0bbad9fc41bfp-4 + 0x1.01f92c660efe9p+0i,
0x1.19380fd536f4p-8 + 0x1.ab4a73d73f8dbp-5i, 0x1.ad8151a20c9b6p-1 + 0x1.b5b19cdac0bdep-8i,
-0x1.353a0ab8b1389p-1 + 0x1.aa0b22848d24ap-4i, 0x1.2298737f9ce25p-4 + -0x1.1bc28d2d5f8dap-2i,
-0x1.707e1be92e433p-1 + 0x0p+0i, 0x1.2298737f9ce3p-4 + 0x1.1bc28d2d5f8d9p-2i,
-0x1.353a0ab8b1388p-1 + -0x1.aa0b22848d25p-4i, 0x1.ad8151a20c9b6p-1 + -0x1.b5b19cdac0b5ep-8i,
0x1.19380fd536f8p-8 + -0x1.ab4a73d73f8cdp-5i, -0x1.0bbad9fc41bddp-4 + -0x1.01f92c660efeap+0i
)
assertThat(stats:::fft(z=c(-0.126860623154969, -0.10123264214249, -0.434997561740203,
-0.269735619727489, -0.171271465566357, -0.0167997948195955,
-0.40834718228913, 0.0621093646728128, 0.214592699496007, -0.0284288480588249,
-0.0790718949436458, 0.0678436473967385))
, identicalTo( expected, tol = 1e-6 ) ) |
context("Continuous convolution kernel density estimator")
set.seed(5)
dat <- data.frame(
F1 = factor(rbinom(10, 4, 0.1), 0:4),
Z1 = ordered(rbinom(10, 5, 0.5), 0:5),
Z2 = ordered(rpois(10, 1), 0:10),
X1 = rnorm(10),
X2 = rexp(10)
)
set.seed(5)
fit <- cckde(dat)
test_that("Recognizes discrete variables", {
expect_length(attr(fit$x_cc, "i_disc"), 6)
})
test_that("Works with numeric and data.frame input", {
expect_error(cckde(sapply(dat, as.numeric), bw = 0))
})
test_that("bw parameter works", {
expect_error(cckde(dat, bw = 0))
expect_error(cckde(dat, bw = rep(0, 8)))
bw <- rep(0.5, 8)
names(bw) <- expand_names(dat)
new_fit <- cckde(dat, bw = bw)
expect_equal(new_fit$bw, bw)
})
test_that("mult parameter works", {
expect_error(cckde(dat, mult = 0))
set.seed(5)
new_fit <- cckde(dat, mult = 2)
expect_equal(2 * fit$bw, new_fit$bw)
set.seed(5)
new_fit <- cckde(dat, mult = 1:8)
expect_equal(1:8 * fit$bw, new_fit$bw)
})
test_that("Density works", {
expect_error(dcckde(dat, 1))
expect_is(dcckde(dat[1, ], fit), "numeric")
expect_gte(min(dcckde(dat, fit)), 0)
expect_equal(dcckde(dat, fit), predict(fit, dat))
}) |
setGeneric("toString", function(x,...) standardGeneric("toString"))
setMethod("toString", "ANY", base::toString)
setMethod("print", "ANY", base::print)
setMethod("toString", "VTableTree", function(x, widths = NULL, col_gap = 3, linesep = "\u2014") {
mat <- matrix_form(x, indent_rownames = TRUE)
if (is.null(widths)) {
widths <- propose_column_widths(x, mat_form = mat)
}
stopifnot(length(widths) == ncol(mat$strings))
body <- mat$strings
aligns <- mat$aligns
keep_mat <- mat$display
spans <- mat$spans
ri <- mat$row_info
ref_fnotes <- mat$ref_footnotes
nr <- nrow(body)
nl_header <- attr(mat, "nlines_header")
cell_widths_mat <- matrix(rep(widths, nr), nrow = nr, byrow = TRUE)
nc <- ncol(cell_widths_mat)
for (i in seq_len(nrow(body))) {
if (any(!keep_mat[i, ])) {
j <- 1
while (j <= nc) {
nj <- spans[i, j]
j <- if (nj > 1) {
js <- seq(j, j + nj - 1)
cell_widths_mat[i, js] <- sum(cell_widths_mat[i, js]) + col_gap * (nj - 1)
j + nj
} else {
j + 1
}
}
}
}
content <- matrix(mapply(padstr, body, cell_widths_mat, aligns), ncol = ncol(body))
content[!keep_mat] <- NA
gap_str <- strrep(" ", col_gap)
ncchar <- sum(widths) + (length(widths) - 1) * col_gap
div <- substr(strrep(linesep, ncchar), 1, ncchar)
txt_head <- apply(head(content, nl_header), 1, .paste_no_na, collapse = gap_str)
txt_body <- apply(tail(content, -nl_header), 1, .paste_no_na, collapse = gap_str)
allts <- all_titles(x)
titles_txt <- if(any(nzchar(allts))) c(allts, "", div) else NULL
allfoots <- all_footers(x)
footer_txt <- c(if(length(ref_fnotes) > 0) c(div, "", ref_fnotes),
if(any(nzchar(allfoots))) c(div, "", allfoots))
paste0(paste(c(titles_txt, txt_head, div, txt_body, footer_txt), collapse = "\n"), "\n")
})
pad_vert_center <- function(x, len) {
needed <- len - length(x)
if(needed < 0) stop("got vector already longer than target length this shouldn't happen")
if(needed > 0) {
bf <- ceiling(needed/2)
af <- needed - bf
x <- c(if(bf > 0) rep("", bf), x, if(af > 0) rep("", af))
}
x
}
pad_vert_top <- function(x, len) {
c(x, rep("", len - length(x)))
}
pad_vert_bottom <- function(x, len) {
c(rep("", len - length(x)), x)
}
pad_vec_to_len <- function(vec, len, cpadder = pad_vert_top, rlpadder = cpadder) {
dat <- unlist(lapply(vec[-1], cpadder, len = len))
dat <- c(rlpadder(vec[[1]], len = len), dat)
matrix(dat, nrow = len)
}
rep_vec_to_len <- function(vec, len, ...) {
matrix(unlist(lapply(vec, rep, times = len)),
nrow = len)
}
safe_strsplit <- function(x, split, ...) {
ret <- strsplit(x, split, ...)
lapply(ret, function(reti) if(length(reti) == 0) "" else reti)
}
.expand_mat_rows_inner <- function(i, mat, row_nlines, expfun, ...) {
leni <- row_nlines[i]
rw <- mat[i,]
if(is.character(rw))
rw <- safe_strsplit(rw, "\n", fixed = TRUE)
expfun(rw, len = leni, ...)
}
expand_mat_rows <- function(mat, row_nlines = apply(mat, 1, nlines), expfun = pad_vec_to_len, ...) {
rinds <- 1:nrow(mat)
exprows <- lapply(rinds, .expand_mat_rows_inner,
mat = mat,
row_nlines = row_nlines,
expfun = expfun,
...)
do.call(rbind, exprows)
}
spans_to_viscell <- function(spans) {
if(!is.vector(spans))
spans <- as.vector(spans)
myrle <- rle(spans)
unlist(mapply(function(vl, ln) rep(c(TRUE, rep(FALSE, vl - 1L)),
times = ln/vl),
SIMPLIFY = FALSE,
vl = myrle$values,
ln = myrle$lengths),
recursive = FALSE)
}
matrix_form <- function(tt, indent_rownames = FALSE) {
stopifnot(is(tt, "VTableTree"))
header_content <- .tbl_header_mat(tt)
sr <- make_row_df(tt)
body_content_strings <- if (NROW(sr) == 0) {
character()
} else {
cbind(as.character(sr$label), get_formatted_cells(tt))
}
tsptmp <- lapply(collect_leaves(tt, TRUE, TRUE), function(rr) {
sp <- row_cspans(rr)
rep(sp, times = sp)
})
body_spans <- if (nrow(tt) > 0) {
cbind(1L, do.call(rbind, tsptmp))
} else {
matrix(1, nrow = 0, ncol = ncol(tt) + 1)
}
body <- rbind(header_content$body, body_content_strings)
spans <- rbind(header_content$span, body_spans)
row.names(spans) <- NULL
space <- matrix(rep(0, length(body)), nrow = nrow(body))
aligns <- matrix(rep("center", length(body)), nrow = nrow(body))
aligns[, 1] <- "left"
display <- matrix(rep(TRUE, length(body)), ncol = ncol(body))
print_cells_mat <- spans == 1L
if(!all(print_cells_mat)) {
display_rws <- lapply(seq_len(nrow(spans)),
function(i) {
print_cells <- print_cells_mat[i,]
row <- spans[i,]
if (!all(print_cells)) {
myrle <- rle(row)
print_cells <- spans_to_viscell(row)
}
print_cells
})
display <- do.call(rbind, display_rws)
}
nr_header <- nrow(header_content$body)
if (indent_rownames) {
body[, 1] <- indent_string(body[, 1], c(rep(0, nr_header), sr$indent))
}
col_ref_strs <- matrix(vapply(header_content$footnotes, function(x) {
if(length(x) == 0)
""
else
paste(vapply(x, format_fnote_ref, ""), collapse = " ")
}, ""), ncol = ncol(body))
body_ref_strs <- get_ref_matrix(tt)
body <- matrix(paste0(body,
rbind(col_ref_strs,
body_ref_strs)),
nrow = nrow(body),
ncol = ncol(body))
row_nlines <- apply(body, 1, nlines)
nrows <- nrow(body)
if (any(row_nlines > 1)) {
hdr_inds <- 1:nr_header
tl <- body[hdr_inds, 1]
body <- rbind(expand_mat_rows(body[hdr_inds, , drop = FALSE], row_nlines[hdr_inds], cpadder = pad_vert_bottom),
expand_mat_rows(body[-1*hdr_inds,, drop = FALSE], row_nlines[-hdr_inds]))
spans <- expand_mat_rows(spans, row_nlines, rep_vec_to_len)
aligns <- expand_mat_rows(aligns, row_nlines, rep_vec_to_len)
display <- expand_mat_rows(display, row_nlines, rep_vec_to_len)
nlines_header <- sum(row_nlines[1:nr_header])
body[1:nr_header,1] <- c(tl, rep("", nr_header - length(tl)))
} else {
nlines_header <- nr_header
}
ref_fnotes <- get_formatted_fnotes(tt)
structure(
list(
strings = body,
spans = spans,
aligns = aligns,
display = display,
row_info = sr,
line_grouping = rep(1:nrows, times = row_nlines),
ref_footnotes = ref_fnotes
),
nlines_header = nlines_header,
nrow_header = nr_header)
}
format_fnote_ref <- function(fn) {
if(length(fn) == 0 || (is.list(fn) && all(vapply(fn, function(x) length(x) == 0, TRUE))))
return("")
else if(is.list(fn) && all(vapply(fn, is.list, TRUE)))
return(vapply(fn, format_fnote_ref, ""))
if(is.list(fn)) {
inds <- unlist(lapply(unlist(fn), function(x) if(is(x, "RefFootnote")) x@index else NULL))
} else {
inds <- fn@index
}
if(length(inds) > 0) {
paste0(" {", paste(inds, collapse = ", "), "}")
} else {
""
}
}
format_fnote_note <- function(fn) {
if(length(fn) == 0 || (is.list(fn) && all(vapply(fn, function(x) length(x) == 0, TRUE))))
return(character())
if(is.list(fn)) {
return(unlist(lapply(unlist(fn), format_fnote_note)))
}
if(is(fn, "RefFootnote")) {
paste0("{", fn@index, "} - ", fn@value)
} else {
NULL
}
}
.fn_ind_extractor <- function(strs) {
res <- suppressWarnings(as.numeric(gsub("\\{([[:digit:]]+)\\}.*", "\\1", strs)))
if(!(sum(is.na(res)) %in% c(0L, length(res))))
stop("Got NAs mixed with non-NAS for extracted footnote indices. This should not happen")
res
}
.colref_mat_helper <- function(vals, span) {
val <- paste(lapply(vals, format_fnote_ref), collapse = " ")
if(length(val) == 0)
val <- ""
rep(val, times = span)
}
get_colref_matrix <- function(tt) {
cdf <- make_col_df(tt, visible_only=FALSE)
objs <- cdf$col_fnotes
spans <- cdf$total_span
vals <- mapply(.colref_mat_helper,
vals = objs,
span = spans)
vals
}
get_ref_matrix <- function(tt) {
if(ncol(tt) == 0 || nrow(tt) == 0) {
return(matrix("", nrow = nrow(tt), ncol = ncol(tt) + 1L))
}
rows <- collect_leaves(tt, incl.cont = TRUE, add.labrows = TRUE)
lst <- unlist(lapply(rows, cell_footnotes), recursive = FALSE)
cstrs <- unlist(lapply(lst, format_fnote_ref))
bodymat <- matrix(cstrs,
byrow = TRUE,
nrow = nrow(tt),
ncol = ncol(tt))
cbind(vapply(rows, function(rw) format_fnote_ref(row_footnotes(rw)), ""), bodymat)
}
get_formatted_fnotes <- function(tt) {
colresfs <- unlist(make_col_df(tt, visible_only = FALSE)$col_fnotes)
colstrs <- unlist(lapply(colresfs, format_fnote_note))
rows <- collect_leaves(tt, incl.cont = TRUE, add.labrows = TRUE)
lst <- unlist(lapply(rows, cell_footnotes), recursive = FALSE)
cellstrs <- unlist(lapply(lst, format_fnote_note))
rstrs <- unlist(lapply(rows, function(rw) format_fnote_note(row_footnotes(rw))))
allstrs <- c(colstrs, rstrs, cellstrs)
inds <- .fn_ind_extractor(allstrs)
allstrs[order(inds)]
}
.cleaf_depths <- function(ctree = coltree(cinfo), depth = 1, cinfo) {
if(is(ctree, "LayoutColLeaf"))
return(depth)
unlist(lapply(tree_children(ctree), .cleaf_depths, depth = depth + 1))
}
.do_tbl_h_piece2 <- function(tt) {
coldf <- make_col_df(tt, visible_only = FALSE)
remain <- seq_len(nrow(coldf))
chunks <- list()
cur <- 1
retmat <- NULL
while(length(remain) > 0) {
rw <- remain[1]
inds <- coldf$leaf_indices[[rw]]
endblock <- which(coldf$abs_pos == max(inds))
stopifnot(endblock >= rw)
chunks[[cur]] <- .do_header_chunk(coldf[rw:endblock,])
remain <- remain[remain > endblock]
cur <- cur + 1
}
chunks <- .pad_tops(chunks)
lapply(seq_len(length(chunks[[1]])),
function(i) {
DataRow(unlist(lapply(chunks, `[[`, i), recursive = FALSE))
})
}
.pad_end <- function(lst, padto, ncols) {
curcov <- sum(vapply(lst, cell_cspan, 0L))
if(curcov == padto)
return(lst)
c(lst, list(rcell("", colspan = padto - curcov)))
}
.pad_tops <- function(chunks) {
lens <- vapply(chunks, length, 1L)
padto <- max(lens)
needpad <- lens != padto
if(all(!needpad))
return(chunks)
chunks[needpad] <- lapply(chunks[needpad],
function(chk) {
span <- sum(vapply(chk[[length(chk)]], cell_cspan, 1L))
needed <- padto - length(chk)
c(replicate(rcell("", colspan = span),
n = needed),
chk)
})
chunks
}
.do_header_chunk <- function(coldf) {
nleafcols <- length(coldf$leaf_indices[[1]])
spldfs <- split(coldf, lengths(coldf$path))
toret <- lapply(seq_along(spldfs),
function(i) {
rws <- spldfs[[i]]
thisbit <- lapply(seq_len(nrow(rws)),
function(ri) {
rcell(rws[ri, "label", drop = TRUE], colspan = rws$total_span[ri],
footnotes = rws[ri, "col_fnotes", drop = TRUE][[1]])
})
.pad_end(thisbit, nleafcols)
})
toret
}
.tbl_header_mat <- function(tt) {
clyt <- coltree(tt)
rows <- .do_tbl_h_piece2(tt)
cinfo <- col_info(tt)
nc <- ncol(tt)
body <- matrix(rapply(rows, function(x) {
cs <- row_cspans(x)
if (is.null(cs)) cs <- rep(1, ncol(x))
rep(row_values(x), cs)
}), ncol = nc, byrow = TRUE)
span <- matrix(rapply(rows, function(x) {
cs <- row_cspans(x)
if (is.null(cs)) cs <- rep(1, ncol(x))
rep(cs, cs)
}), ncol = nc, byrow = TRUE)
fnote <- do.call(rbind,
lapply(rows, function(x) {
cell_footnotes(x)
}))
if (disp_ccounts(cinfo)) {
counts <- col_counts(cinfo)
cformat <- colcount_format(cinfo)
body <- rbind(body, vapply(counts, format_rcell, character(1), cformat))
span <- rbind(span, rep(1, nc))
fnote <- rbind(fnote, rep(list(list()), nc))
}
tl <- top_left(cinfo)
lentl <- length(tl)
nli <- nrow(body)
if(lentl == 0)
tl <- rep("", nli)
else if(lentl > nli) {
npad <- lentl - nli
body <- rbind(matrix("", nrow = npad, ncol = ncol(body)), body)
span <- rbind(matrix(1, nrow = npad, ncol = ncol(span)), span)
fnote <- rbind(matrix(list(), nrow = npad, ncol = ncol(body)), fnote)
} else if (lentl < nli)
tl <- c(tl, rep("", nli - lentl))
list(body = cbind(tl, body, deparse.level = 0), span = cbind(1, span),
footnotes = cbind(list(list()), fnote))
}
setGeneric("get_formatted_cells", function(obj) standardGeneric("get_formatted_cells"))
setMethod("get_formatted_cells", "TableTree",
function(obj) {
lr <- get_formatted_cells(tt_labelrow(obj))
ct <- get_formatted_cells(content_table(obj))
els <- lapply(tree_children(obj), get_formatted_cells)
if (ncol(ct) == 0 && ncol(lr) != ncol(ct)) {
ct <- lr[NULL, ]
}
do.call(rbind, c(list(lr), list(ct), els))
})
setMethod("get_formatted_cells", "ElementaryTable",
function(obj) {
lr <- get_formatted_cells(tt_labelrow(obj))
els <- lapply(tree_children(obj), get_formatted_cells)
do.call(rbind, c(list(lr), els))
})
setMethod("get_formatted_cells", "TableRow",
function(obj) {
default_format <- if (is.null(obj_format(obj))) "xx" else obj_format(obj)
format <- lapply(row_cells(obj), function(x) {
format <- obj_format(x)
if (is.null(format))
default_format
else
format
})
matrix(unlist(Map(function(val, format, spn) {
stopifnot(is(spn, "integer"))
rep(paste(format_rcell(val, format), collapse = ", "), spn)
}, row_values(obj), format, row_cspans(obj))), ncol = ncol(obj))
})
setMethod("get_formatted_cells", "LabelRow",
function(obj) {
nc <- ncol(obj)
if (labelrow_visible(obj)) {
matrix(rep("", nc), ncol = nc)
} else {
matrix(character(0), ncol = nc)
}
})
propose_column_widths <- function(x, mat_form = matrix_form(x, indent_rownames = TRUE)) {
stopifnot(is(x, "VTableTree"))
body <- mat_form$strings
spans <- mat_form$spans
aligns <- mat_form$aligns
display <- mat_form$display
chars <- nchar(body)
has_spans <- spans != 1
chars_ns <- chars
chars_ns[has_spans] <- 0
widths <- apply(chars_ns, 2, max)
if (any(has_spans)) {
has_row_spans <- apply(has_spans, 1, any)
chars_sp <- chars[has_row_spans, , drop = FALSE]
spans_sp <- spans[has_row_spans, , drop = FALSE]
disp_sp <- display[has_row_spans, , drop = FALSE]
nc <- ncol(spans)
for (i in seq_len(nrow(chars_sp))) {
for (j in seq_len(nc)) {
if (disp_sp[i, j] && spans_sp[i, j] != 1) {
i_cols <- seq(j, j + spans_sp[i, j] - 1)
nchar_i <- chars_sp[i, j]
cw_i <- widths[i_cols]
available_width <- sum(cw_i)
if (nchar_i > available_width) {
widths[i_cols] <- cw_i + spread_integer(nchar_i - available_width, length(cw_i))
}
}
}
}
}
widths
}
remove_consecutive_numbers <- function(x) {
stopifnot(is.wholenumber(x), is.numeric(x), !is.unsorted(x))
if (length(x) == 0) return(integer(0))
if (!is.integer(x)) x <- as.integer(x)
sel <- rep(TRUE, length(x))
x[c(TRUE, diff(x) != 1)]
}
empty_string_after <- function(x, indices) {
if (length(indices) > 0) {
offset <- 0
for (i in sort(indices)) {
x <- append(x, "", i + offset)
offset <- offset + 1
}
}
x
}
spread_integer <- function(x, len) {
stopifnot(
is.wholenumber(x), length(x) == 1, x >= 0,
is.wholenumber(len), length(len) == 1, len >= 0,
!(len == 0 && x > 0)
)
if (len == 0) {
integer(0)
} else {
y <- rep(floor(x/len), len)
i <- 1
while (sum(y) < x) {
y[i] <- y[i] + 1
if (i == len) {
i <- 1
} else {
i <- i + 1
}
}
y
}
}
is.wholenumber <- function(x, tol = .Machine$double.eps^0.5){
abs(x - round(x)) < tol
}
indent_string <- function(x, indent = 0, incr = 2, including_newline = TRUE) {
if (length(x) > 0) {
indent <- rep_len(indent, length.out = length(x))
incr <- rep_len(incr, length.out = length(x))
}
indent_str <- strrep(" ", (indent > 0) * indent * incr)
if (including_newline) {
x <- unlist(mapply(function(xi, stri) {
gsub("\n", stri, xi, fixed = TRUE)
}, x, paste0("\n", indent_str)))
}
paste0(indent_str, x)
}
.paste_no_na <- function(x, ...) {
paste(na.omit(x), ...)
}
padstr <- function(x, n, just = c("center", "left", "right")) {
just <- match.arg(just)
if (length(x) != 1) stop("length of x needs to be 1 and not", length(x))
if (is.na(n) || !is.numeric(n) || n < 0) stop("n needs to be numeric and > 0")
if (is.na(x)) x <- "<NA>"
nc <- nchar(x)
if (n < nc) stop("\"", x, "\" has more than ", n, " characters")
switch(
just,
center = {
pad <- (n - nc)/2
paste0(spaces(floor(pad)), x, spaces(ceiling(pad)))
},
left = paste0(x, spaces(n - nc)),
right = paste0(spaces(n - nc), x)
)
}
spaces <- function(n) {
strrep(" ", n)
}
mat_as_string <- function(mat, nheader = 1, colsep = " ", linesep = "\u2014") {
colwidths <- apply(apply(mat, c(1, 2), nchar), 2, max)
rows_formatted <- apply(mat, 1, function(row) {
paste(unlist(mapply(padstr, row, colwidths, "left")), collapse = colsep)
})
header_rows <- seq_len(nheader)
paste(c(rows_formatted[header_rows],
substr(strrep(linesep, nchar(rows_formatted[1])), 1, nchar(rows_formatted[1])),
rows_formatted[-header_rows]), collapse = "\n")
} |
display_document_info <- function(df, min_rad, info_columns = INFO_COLUMNS) {
doc_info_text <- ""
if (DATE_BASED_CORPUS == TRUE) {
doc_info_text <- paste(sep = "<br>",
doc_info_text,
paste0(
tags$b("Date: "),
format_date(df$Date[min_rad])
))
}
if ("Title" %in% info_columns) {
doc_info_text <- paste(sep = "<br>",
doc_info_text,
paste0(tags$b("Title: "),
df$Title[min_rad]))
}
if ("URL" %in% info_columns) {
doc_info_text <- paste(
sep = "<br>",
doc_info_text,
paste0(
tags$b("URL: "),
"<a href='",
df$URL[min_rad],
"'a <- tb target='_blank'>",
df$URL[min_rad],
"</a>"
)
)
}
doc_info_text <- paste(doc_info_text,
sep = "<br>",
paste0(
tags$b("Word count: "),
stringi::stri_count_words(df$Text[min_rad])
))
other_columns <-
info_columns[!info_columns %in% c("Date", "Title", "URL")]
for (i in seq_along(other_columns)) {
doc_info_text <- paste(
doc_info_text,
sep = "<br>",
paste0(
tags$b(other_columns[i]),
tags$b(": "),
df[[other_columns[i]]][min_rad]
)
)
}
return(stringr::str_replace(doc_info_text, "^<br>*", ""))
} |
context("Upgrade")
test_that("the version of renv in a project can be changed (upgraded)", {
skip_on_cran()
skip_sometimes()
renv_tests_scope()
init()
load()
upgrade(version = "0.5.0")
project <- getwd()
expect_equal(renv_activate_version(project), "0.5.0")
}) |
rules_df <- data.frame( rule = c("height > 0", "weight > 0", "height < 1.5 * weight")
, name = c(1,1,2)
)
rules <- validator(.data = rules_df)
nms <- names(rules)
expect_equal(nms, unique(nms)) |
rvinvchisq <- function (n=1, df, scale=1) {
return(scale / (rvchisq(n=n, df=df) / df))
} |
test_that("use_[cran|bioc]_badge() don't error", {
create_local_package()
expect_error_free(use_cran_badge())
expect_error_free(use_bioc_badge())
})
test_that("use_lifecycle_badge() handles bad and good input", {
create_local_package()
expect_snapshot(error = TRUE, {
use_lifecycle_badge()
use_lifecycle_badge("eperimental")
})
expect_error_free(use_lifecycle_badge("stable"))
})
test_that("use_binder_badge() needs a github repository", {
skip_if_no_git_user()
create_local_project()
use_git()
expect_error(use_binder_badge(), class = "usethis_error_bad_github_remote_config")
})
test_that("use_badge() does nothing if badge seems to pre-exist", {
create_local_package()
href <- "https://cran.r-project.org/package=foo"
writeLines(href, proj_path("README.md"))
expect_false(use_badge("foo", href, "SRC"))
}) |
library(pa)
load("returns.test.RData")
data(jan)
b1 <- brinson(x = jan)
result <- returns(b1, var = "sector")
stopifnot(all.equal(result, truth))
r1 <- regress(jan)
result.r1 <- returns(r1, var = "sector")
stopifnot(all.equal(result.r1, truth.r1))
data(quarter)
b2 <- brinson(x = quarter)
result.multi <- returns(b2, var = "sector")
stopifnot(all.equal(result.multi, truth.multi))
r2 <- regress(quarter)
result.multi.r2 <- returns(r2, var = "sector")
stopifnot(all.equal(result.multi.r2, truth.multi.r2)) |
streaks <- function(x, ...)
UseMethod("streaks")
streaks.default <- function(x,
up = 0.2, down = -up,
initial.state = NA,
y = NULL,
...) {
start <- 1
end <- NA
state <- tolower(initial.state)
results <- data.frame(start = numeric(0),
end = numeric(0),
state = character(0))
if (is.null(y)) y <- rep.int(1, length(x))
if (is.na(state)) {
hi <- x[1]/y[1]
lo <- x[1]/y[1]
hi.t <- 1
lo.t <- 1
} else if (state == "up") {
hi <- x[1]/y[1]
lo <- NA
hi.t <- 1
lo.t <- NA
} else if (state == "down") {
hi <- NA
lo <- x[1]/y[1]
hi.t <- NA
lo.t <- 1
}
for (t in 2:length(x)) {
dx <- x[t]/x[t - 1] / (y[t]/y[t - 1])
xy.i <- x[t]/y[t]
if (is.na(state)) {
if (dx >= 1) {
if (xy.i > hi) {
hi <- xy.i
hi.t <- t
}
if ( (x[t]/x[lo.t]) / (y[t]/y[lo.t]) - 1 >= up) {
state <- "up"
if (lo.t == 1) {
lo <- NA
lo.t <- NA
start <- 1
} else {
results <- rbind(results,
data.frame(start = 1,
end = lo.t,
state = NA))
start <- lo.t
}
}
} else if (dx < 1) {
if (xy.i < lo) {
lo <- xy.i
lo.t <- t
}
if ( (x[t]/x[hi.t]) / (y[t]/y[hi.t]) - 1 <= down) {
state <- "down"
if (hi.t == 1) {
hi <- NA
hi.t <- NA
start <- 1
} else {
results <- rbind(results,
data.frame(start = 1,
end = hi.t,
state = NA))
start <- hi.t
}
}
}
} else if (state == "up") {
if (dx >= 1) {
if (xy.i > hi) {
hi <- xy.i
hi.t <- t
}
} else if (dx < 1 && (x[t]/x[hi.t]) / (y[t]/y[hi.t]) - 1 < down) {
results <- rbind(results,
data.frame(start = start,
end = hi.t,
state = state))
state <- "down"
start <- hi.t
lo.t <- t
lo <- xy.i
hi.t <- NA
hi <- NA
}
} else if (state == "down") {
if (dx <= 1) {
if (xy.i < lo) {
lo <- xy.i
lo.t <- t
}
} else if (dx > 1 && (x[t]/x[lo.t]) / (y[t]/y[lo.t]) - 1 > up) {
results <- rbind(results,
data.frame(start = start,
end = lo.t,
state = state))
state <- "up"
start <- lo.t
lo.t <- NA
lo <- NA
hi.t <- t
hi <- xy.i
}
}
}
results <- rbind(results,
data.frame(start = start,
end = length(x),
state = state))
results[["return"]] <- x[results$end]/x[results$start] /
(y[results$end]/y[results$start]) - 1
results
}
streaks.zoo <- function(x,
up = 0.2,
down = -up,
initial.state = NA,
y = NULL, ...) {
t <- index(x)
ans <- streaks.default(x = coredata(x),
up = up,
down = down,
initial.state = initial.state,
y = coredata(y), ...)
ans$start <- t[ans$start]
ans$end <- t[ans$end]
ans
}
streaks.NAVseries <- function(x,
up = 0.2,
down = -up,
initial.state = NA,
bm = NULL, ...) {
xx <- as.zoo(x)
streaks.zoo(xx,
up = up, down = down,
initial.state = initial.state, y = bm, ...)
} |
formatPlotlyClinData <- function(
pl, data,
idVar = "USUBJID",
pathVar = NULL, pathDownload = TRUE,
idFromDataPlot = FALSE,
idVarPlot = "key", labelVarPlot = NULL,
highlightOn = "plotly_click",
highlightOff = "plotly_doubleclick",
id = paste0("plotClinData", sample.int(n = 1000, size = 1)),
verbose = FALSE){
idVarInit <- idVar
pl <- pl %>% layout(
legend =
list(
itemclick = "toggleothers",
itemdoubleclick = "toggle"
)
)
pl <- pl %>% highlight(on = highlightOn, off = highlightOff)
if(!is.null(pathVar)){
if(!is.null(idVar) && length(idVar) > 1){
data$key <- do.call(interaction, data[, idVar])
idVar <- "key"
}
dataPPDf <- unique(data[, c(idVar, pathVar)])
dataPPDf[, pathVar] <- getPathHyperlink(dataPPDf[, pathVar])
idxDupl <- which(duplicated(dataPPDf[, idVar]))
if(length(idxDupl) > 0){
dataDupl <- merge(dataPPDf, dataPPDf[idxDupl, idVar, drop = FALSE])
rownames(dataDupl) <- NULL
stop(paste0("Different ", sQuote(pathVar), " available for specific ",
idVarInit, ":\n",
paste(capture.output(print(dataDupl)), collapse = "\n")
))
}
dataPP <- dataPPDf[, c(idVar, pathVar)]
colnames(dataPP) <- c("key", "path")
jsPatientProfiles <- JS("function(el, x, data){",
paste0("getPatientProfilesPlotly(el, x, data,",
"fromdata=", tolower(idFromDataPlot), ",",
"idvar=", sQuote(idVarPlot), ",",
"labelplot=", sQuote(id), ",",
"labelvar=", ifelse(is.null(labelVarPlot), 'null', sQuote(labelVarPlot)), ",",
"download=", tolower(pathDownload), ",",
"verbose=", tolower(verbose),
");"
),
"}")
pl <- pl %>% onRender(
jsCode = jsPatientProfiles,
data = dataPP
)
prepCntArgs <- c(list(x = pl), getJsDepClinDataReview(type = "patientProfiles"))
pl <- do.call(prependContent, prepCntArgs)
}
return(pl)
} |
m_not_found_sp_altclass <-
"Not found. Consider checking the spelling or alternate classification"
m_more_than_one_found <-
"More than one %s found for taxon '%s'; refine query or set ask=TRUE"
m_na_ask_false <-
"NA due to ask=FALSE & > 1 result"
m_na_ask_false_no_direct <-
"NA due to ask=FALSE & no direct match found" |
fbed.geeglm.reps <- function(y, x, id, prior = NULL, reps, univ = NULL, alpha = 0.05, wei = NULL, K = 0, test = "testIndGEELogistic", correl = "exchangeable", se = "jack") {
dm <- dim(x)
p <- dm[2]
n <- dm[1]
ind <- 1:p
sig <- log(alpha)
stat <- numeric(p)
sela <- NULL
card <- 0
sa <- NULL
pva <- NULL
zevar <- Rfast::check_data(x)
if ( sum( zevar > 0 ) > 0 ) x[, zevar] <- rnorm( n * length(zevar) )
priorindex <- NULL
if ( !is.null(prior) ) {
x <- cbind(x, prior)
priorindex <- c( (p + 1):dim(x)[2] )
}
x <- cbind(x, reps)
xronoi <- dim(x)[2] + 1
x <- as.data.frame(x)
if ( test == "testIndGEELogistic" ) {
oiko <- binomial(logit)
} else if ( test == "testIndGEEPois" ) {
oiko <- poisson(log)
} else if ( test == "testIndGEEGamma" ) {
oiko <- Gamma(log)
} else if ( test == "testIndNormLog" ) {
oiko <- gaussian(log)
}
runtime <- proc.time()
if ( is.null(univ) ) {
if ( is.null(prior) ) {
for ( i in ind ) {
fit2 <- try( geepack::geeglm( y ~., data = x[, c(xronoi, i)], family = oiko, id = id, waves = reps, weights = wei, corstr = correl, std.err = se ), silent = TRUE )
if ( identical( class(fit2), "try-error" ) ) {
stat[i] <- 0
} else stat[i] <- anova(fit2)[2, 2]
}
} else {
for ( i in ind ) {
fit2 <- try( geepack::geeglm( y ~., data = x[, c(xronoi, priorindex, i)], family = oiko, id = id, waves = reps, weights = wei, corstr = correl, std.err = se ), silent = TRUE)
if ( identical( class(fit2), "try-error" ) ) {
stat[i] <- 0
} else {
mod <- anova(fit2)
nr <- dim(mod)[1]
stat[i] <- mod[nr, 2]
}
}
}
n.tests <- p
pval <- pchisq(stat, 1, lower.tail = FALSE, log.p = TRUE)
univ <- list()
univ$stat <- stat
univ$pvalue <- pval
univ$stat[zevar] <- 0
univ$pvalue[zevar] <- 0
} else {
n.tests <- 0
stat <- univ$stat
pval <- univ$pvalue
}
s <- which(pval < sig)
if ( length(s) > 0 ) {
sel <- which.min(pval)
sela <- sel
s <- s[ - which(s == sel) ]
sa <- stat[sel]
pva <- pval[sel]
stat <- numeric(p )
while ( sum(s>0) > 0 ) {
for ( i in ind[s] ) {
fit2 <- try( geepack::geeglm( y ~., data = x[, c(xronoi, priorindex, sela, i)], family = oiko, id = id, waves = reps, weights = wei, corstr = correl, std.err = se ), silent = TRUE)
if ( identical( class(fit2), "try-error" ) ) {
stat[i] <- 0
} else {
mod <- anova(fit2)
nr <- dim(mod)[1]
stat[i] <- mod[nr, 2]
}
}
n.tests <- n.tests + length( ind[s] )
pval <- pchisq(stat, 1, lower.tail = FALSE, log.p = TRUE)
s <- which(pval < sig)
sel <- which.min(pval) * ( length(s) > 0 )
sa <- c(sa, stat[sel])
pva <- c(pva, pval[sel])
sela <- c(sela, sel[sel>0])
s <- s[ - which(s == sel) ]
if (sel > 0) stat <- rep(0, p)
}
card <- sum(sela > 0)
if (K == 1) {
d0 <- length(sela)
for ( i in ind[-c(sela, zevar)] ) {
fit2 <- try( geepack::geeglm( y ~., data = x[, c(xronoi, priorindex, sela, i)], family = oiko, id = id, waves = reps, weights = wei, corstr = correl, std.err = se ), silent = TRUE)
if ( identical( class(fit2), "try-error" ) ) {
stat[i] <- 0
} else {
mod <- anova(fit2)
nr <- dim(mod)[1]
stat[i] <- mod[nr, 2]
}
}
n.tests[2] <- length( ind[-c(sela, zevar)] )
pval <- pchisq(stat, 1, lower.tail = FALSE, log.p = TRUE)
s <- which(pval < sig)
sel <- which.min(pval) * ( length(s)>0 )
sa <- c(sa, stat[sel])
pva <- c(pva, pval[sel])
sela <- c(sela, sel[sel>0])
s <- s[ - which(s == sel) ]
if (sel > 0) stat <- numeric(p)
while ( sum(s>0) > 0 ) {
d0 <- length(sela)
for ( i in ind[s] ) {
fit2 <- try( geepack::geeglm( y ~., data = x[, c(xronoi, priorindex, sela, i)], family = oiko, id = id, waves = reps, weights = wei, corstr = correl, std.err = se ), silent = TRUE)
if ( identical( class(fit2), "try-error" ) ) {
stat[i] <- 0
} else {
mod <- anova(fit2)
nr <- dim(mod)[1]
stat[i] <- mod[nr, 2]
}
}
n.tests[2] <- n.tests[2] + length( ind[s] )
pval <- pchisq(stat, 1, lower.tail = FALSE, log.p = TRUE)
s <- which(pval < sig)
sel <- which.min(pval) * ( length(s)>0 )
sa <- c(sa, stat[sel])
pva <- c(pva, pval[sel])
sela <- c(sela, sel[sel>0])
s <- s[ - which(s == sel) ]
if (sel > 0) stat <- numeric(p)
}
card <- c(card, sum(sela>0) )
}
if ( K > 1) {
for ( i in ind[-c(sela, zevar)] ) {
fit2 <- try( geepack::geeglm( y ~., data = x[, c(xronoi, priorindex, sela, i)], family = oiko, id = id, waves = reps, weights = wei, corstr = correl, std.err = se ), silent = TRUE)
if ( identical( class(fit2), "try-error" ) ) {
stat[i] <- 0
} else {
mod <- anova(fit2)
nr <- dim(mod)[1]
stat[i] <- mod[nr, 2]
}
}
n.tests[1] <- length( ind[-c(sela, zevar)] )
pval <- pchisq(stat, 1, lower.tail = FALSE, log.p = TRUE)
s <- which(pval < sig)
sel <- which.min(pval) * ( length(s)>0 )
sa <- c(sa, stat[sel])
pva <- c(pva, pval[sel])
sela <- c(sela, sel[sel>0])
s <- s[ - which(s == sel) ]
if (sel > 0) stat <- numeric(p)
while ( sum(s > 0) > 0 ) {
for ( i in ind[s] ) {
fit2 <- try( geepack::geeglm( y ~., data = x[, c(xronoi, priorindex, sela, i)], family = oiko, id = id, waves = reps, weights = wei, corstr = correl, std.err = se ), silent = TRUE)
if ( identical( class(fit2), "try-error" ) ) {
stat[i] <- 0
} else {
mod <- anova(fit2)
nr <- dim(mod)[1]
stat[i] <- mod[nr, 2]
}
}
n.tests[2] <- n.tests[2] + length( ind[s] )
pval <- pchisq(stat, 1, lower.tail = FALSE, log.p = TRUE)
s <- which(pval < sig)
sel <- which.min(pval) * ( length(s)>0 )
sa <- c(sa, stat[sel])
pva <- c(pva, pval[sel])
sela <- c(sela, sel[sel>0])
s <- s[ - which(s == sel) ]
if (sel > 0) stat <- numeric(p)
}
card <- c(card, sum(sela > 0) )
vim <- 1
while ( vim < K & card[vim + 1] - card[vim] > 0 ) {
vim <- vim + 1
for ( i in ind[-c(sela, zevar)] ) {
fit2 <- try( geepack::geeglm( y ~., data = x[, c(xronoi, priorindex, sela, i)], family = oiko, id = id, waves = reps, weights = wei, corstr = correl, std.err = se ), silent = TRUE)
if ( identical( class(fit2), "try-error" ) ) {
stat[i] <- 0
} else {
mod <- anova(fit2)
nr <- dim(mod)[1]
stat[i] <- mod[nr, 2]
}
}
n.tests[vim + 1] <- length( ind[-c(sela, zevar)] )
pval <- pchisq(stat, 1, lower.tail = FALSE, log.p = TRUE)
s <- which(pval < sig)
sel <- which.min(pval) * ( length(s)>0 )
sa <- c(sa, stat[sel])
pva <- c(pva, pval[sel])
sela <- c(sela, sel[sel>0])
s <- s[ - which(s == sel) ]
if (sel > 0) stat <- numeric(p)
while ( sum(s > 0) > 0 ) {
for ( i in ind[s] ) {
fit2 <- try( geepack::geeglm( y ~., data = x[, c(xronoi, priorindex, sela, i)], family = oiko, id = id, waves = reps, weights = wei, corstr = correl, std.err = se ), silent = TRUE)
if ( identical( class(fit2), "try-error" ) ) {
stat[i] <- 0
} else {
mod <- anova(fit2)
nr <- dim(mod)[1]
stat[i] <- mod[nr, 2]
}
}
n.tests[vim + 1] <- n.tests[vim + 1] + length( ind[s] )
pval <- pchisq(stat, 1, lower.tail = FALSE, log.p = TRUE)
s <- which(pval < sig)
sel <- which.min(pval) * ( length(s)>0 )
sa <- c(sa, stat[sel])
pva <- c(pva, pval[sel])
sela <- c(sela, sel[sel>0])
s <- s[ - which(s == sel) ]
if (sel > 0) stat <- numeric(p)
}
card <- c(card, sum(sela>0) )
}
}
}
runtime <- proc.time() - runtime
len <- sum( sela > 0 )
if (len > 0) {
res <- cbind(sela[1:len], sa[1:len], pva[1:len] )
info <- matrix(nrow = length(card), ncol = 2)
info[, 1] <- card
info[, 2] <- n.tests
} else {
res <- matrix(c(0, 0, 0), ncol = 3)
info <- matrix(c(0, p), ncol = 2)
}
colnames(res) <- c("Vars", "stat", "log p-value")
rownames(info) <- paste("K=", 1:length(card)- 1, sep = "")
colnames(info) <- c("Number of vars", "Number of tests")
list(univ = univ, res = res, info = info, runtime = runtime)
} |
blca.em.se<-
function(fit, x, counts.n=1)
{
if(inherits(x,"data.blca")){
counts.n<- x$counts.n
x<- x$data
}
G<- length(fit$classprob)
if(G==1){
ret<- NULL
ret$itemprob<- fit$itemprob.sd
ret$classprob<- fit$classprob.sd
ret$convergence<- 1
return(ret)
}
if(G>1) M<- ncol(fit$itemprob) else M<- length(fit$itemprob)
rm.ind<- c(which(fit$itemprob< 1e-5), which(fit$itemprob>(1 - 1e-5)))
val.ind<- fit$itemprob[rm.ind]
if(length(rm.ind)>0) parvec<- c(fit$itemprob[-rm.ind], fit$classprob[-G]) else parvec<- c(fit$itemprob, fit$classprob[-G])
H1<- fdHess(pars=parvec,fun=lp1.intern, dat=x,counts.n=counts.n, rm.ind=rm.ind, prior.list=fit$prior, val.ind=val.ind, G0=G, M0=M)
if(length(rm.ind)>0) parvec<- c(fit$itemprob[-rm.ind], fit$classprob[-1]) else parvec<- c(fit$itemprob, fit$classprob[-1])
H2<- fdHess(pars=parvec,fun=lp2.intern, dat=x, counts.n=counts.n,rm.ind=rm.ind, prior.list=fit$prior, val.ind=val.ind, G0=G, M0=M)
SE1<- diag(solve(-H1$Hessian))
SE2<- diag(solve(-H2$Hessian))
if(any(SE1<0) | any(SE2<0)){
warning("Diagonal entries of Observed Information matrix are not all positive - some posterior standard deviations will be undefined.")
SE1[SE1<0]<- NaN
SE2[SE2<0]<- NaN
}
if(length(rm.ind)==0){
if(all(eigen(H1$Hessian, symmetric=TRUE, only.values=TRUE)$values<0)){
convergence<- 1
} else {
convergence<- 2
}
} else convergence<- 4
len.parvec.theta<- G*M - length(rm.ind)
se.theta<- rep(0, G*M)
if(length(rm.ind)>0) se.theta[-c(rm.ind)]<- SE1[1:len.parvec.theta] else se.theta<- SE1[1:len.parvec.theta]
se.theta[rm.ind]<- 0
ret<- NULL
ret$itemprob<- sqrt(matrix(se.theta, G, M))
ret$classprob<- sqrt(c( SE1[(len.parvec.theta + 1):(len.parvec.theta + G-1)], SE2[len.parvec.theta +G-1] ))
ret$convergence<- convergence
ret
} |
library(AER)
data(Affairs)
data(Affairs, package='AER')
mydata=Affairs
summary(mydata)
dim(mydata)
x=mydata$affairs
table(x)
round(prop.table(table(x)),3)
round(prop.table(table(x))*100,1)
x=mydata$gender
round(prop.table(table(x))*100)
x=mydata$children
round(prop.table(table(x))*100)
median(mydata$age)
mydata$ynaffair[mydata$affairs > 0 ] = 1
mydata$ynaffair[mydata$affairs == 0 ] = 0
mydata$ynaffair = factor(mydata$ynaffair,
levels=c(0,1), labels=c('No','Yes'))
table(mydata$ynaffair)
fit.full = glm(ynaffair ~ gender + age + yearsmarried + children + religiousness + education + occupation + rating, data=mydata, family=binomial())
summary(fit.full)
fit.reduced = glm(ynaffair ~ age + yearsmarried + religiousness
+ rating, data=mydata, family=binomial())
summary(fit.reduced)
anova(fit.reduced, fit.full, test='Chisq')
coef(fit.reduced)
exp(coef(fit.reduced))
exp(confint(fit.reduced))
1.106^10
testdata = data.frame(rating=c(1,2,3,4,5), age=mean(mydata$age),
yearsmarried=mean(mydata$yearsmarried),
religiousness = mean(mydata$religiousness))
testdata
testdata$predict = predict(fit.reduced, newdata=testdata, type='response')
testdata
testdata = data.frame(rating=mean(mydata$rating), age=seq(17,57,10),
yearsmarried=mean(mydata$yearsmarried),
religiousness = mean(mydata$religiousness))
testdata
testdata$predict = predict(fit.reduced, newdata=testdata, type='response')
testdata
deviance(fit.reduced)/ df.residual(fit.reduced)
fit.b = glm(ynaffair ~ age + yearsmarried + religiousness
+ rating, data=mydata, family=binomial())
fit.qb= glm(ynaffair ~ age + yearsmarried + religiousness
+ rating, data=mydata, family=quasibinomial())
pchisq(summary(fit.qb)$dispersion * fit.b$df.residual, fit.b$df.residual, lower=F) |
PrepInferenceAssumption <- function( M, CItp=2, qt = c(0.025,0.975), SampCIType=1, carm = 2 ) {
K <- M$K
nm2 <- c( paste0( "Main",carm,"wts" ), paste0( "Samp",carm,"wts" ) )
Mwts1 <- M$Main1wts
Mwts2 <- M[[ nm2[1] ]]
Swts1 <- M$Samp1wts
Swts2 <- M[[ nm2[2] ]]
nm <- paste("T",K,sep="")
meannms <- c("MEm","ME0","ME1","MSm","MS0","MS1")
sdnms <- c("Em","E0","E1","Sm","S0","S1")
Mmean1 <- Mwts1[ Mwts1[,"type"] %in% meannms, c(nm,"type") ]
Mmean2 <- Mwts2[ Mwts2[,"type"] %in% meannms, c(nm,"type") ]
names(Mmean1) <- c("M1","type")
names(Mmean2) <- c("M2","type")
Msd1 <- Mwts1[ Mwts1[,"type"] %in% sdnms, c(nm,"type") ]
Msd2 <- Mwts2[ Mwts2[,"type"] %in% sdnms, c(nm,"type") ]
names(Msd1) <- c("Msd1","type")
names(Msd2) <- c("Msd2","type")
Msd1[,"type"] <- paste0("M",Msd1[,"type"])
Msd2[,"type"] <- paste0("M",Msd2[,"type"])
Smean1 <- Swts1[ Swts1[,"type"] %in% meannms, c(nm,"type","sub") ]
Smean2 <- Swts2[ Swts2[,"type"] %in% meannms, c(nm,"type","sub") ]
names(Smean1) <- c("S1","type","sub")
names(Smean2) <- c("S2","type","sub")
Ssd1 <- Swts1[ Swts1[,"type"] %in% sdnms, c(nm,"type","sub") ]
Ssd2 <- Swts2[ Swts2[,"type"] %in% sdnms, c(nm,"type","sub") ]
names(Ssd1) <- c("Ssd1","type","sub")
names(Ssd2) <- c("Ssd2","type","sub")
Ssd1[,"type"] <- paste0("M",Ssd1[,"type"])
Ssd2[,"type"] <- paste0("M",Ssd2[,"type"])
T1 <- merge(Mmean1,Msd1,by=c("type"))
T11 <- merge(Smean1,Ssd1,by=c("type","sub"))
T1 <- merge(T1,T11,by=c("type"))
T2 <- merge(Mmean2,Msd2,by=c("type"))
T22 <- merge(Smean2,Ssd2,by=c("type","sub"))
T2 <- merge(T2,T22,,by=c("type"))
All <- merge(T1,T2,by=c("type","sub"))
All$Di1 <- All$S1 - All$M1
All$ti1 <- (All$S1 - All$M1)/All$Ssd1
All$Di2 <- All$S2 - All$M2
All$ti2 <- (All$S2 - All$M2)/All$Ssd2
All$Md <- All$M2 - All$M1
All$Mdi <- ( All$S2 - All$M2) - ( All$S1 - All$M1 )
All$Mtdi <- (( All$S2 - All$M2) - ( All$S1 - All$M1 )) / sqrt( All$Ssd2^2 + All$Ssd1^2 )
All$Sd <- All$S2 - All$S1
if (SampCIType == 1) {
agg1 <- aggregate( M1 ~ type, data = All, mean )
agg2 <- aggregate( M2 ~ type, data = All, mean )
aggd <- aggregate( Md ~ type, data = All, mean )
qagg1 <- aggregate( S1 ~ type, data = All, function(x) {quantile(x,qt)} )
qagg2 <- aggregate( S2 ~ type, data = All, function(x) {quantile(x,qt)} )
qaggd <- aggregate( Sd ~ type, data = All, function(x) {quantile(x,qt)} )
agg1 <- merge(agg1,qagg1,by=c("type"))
agg2 <- merge(agg2,qagg2,by=c("type"))
aggd <- merge(aggd,qaggd,by=c("type"))
tab3 <- merge(agg1,agg2,by=c("type"))
tab3 <- merge(tab3,aggd,by=c("type"))
tab3 <- as.data.frame( as.list(tab3) )
stab <- tab3
} else if (SampCIType==2) {
agg1 <- aggregate( M1 ~ type, data = All, mean )
agg2 <- aggregate( M2 ~ type, data = All, mean )
aggd <- aggregate( Md ~ type, data = All, mean )
sagg1 <- aggregate( S1 ~ type, data = All, mean )
sagg2 <- aggregate( S2 ~ type, data = All, mean )
saggd <- aggregate( Sd ~ type, data = All, mean )
qagg1 <- aggregate( Di1 ~ type, data = All, function(x) {quantile(x,qt)} )
qagg2 <- aggregate( Di2 ~ type, data = All, function(x) {quantile(x,qt)} )
qaggd <- aggregate( Mdi ~ type, data = All, function(x) {quantile(x,qt)} )
agg1 <- merge(agg1,sagg1,by=c("type"))
agg1 <- merge(agg1,qagg1,by=c("type"))
agg2 <- merge(agg2,sagg2,by=c("type"))
agg2 <- merge(agg2,qagg2,by=c("type"))
aggd <- merge(aggd,saggd,by=c("type"))
aggd <- merge(aggd,qaggd,by=c("type"))
tmp32 <- merge(agg1,agg2,by=c("type"))
tmp32 <- merge(tmp32,aggd,by=c("type"))
tmp32 <- as.data.frame( as.list(tmp32))
names(tmp32) <- c("type","M1","S1","lb1","ub1","M2","S2","lb2","ub2","Md","Sd","lbd","ubd")
tab32 <- tmp32
tab32[,"lb1"] <- tmp32[,"S1"] - tmp32[,"ub1"]
tab32[,"ub1"] <- tmp32[,"S1"] - tmp32[,"lb1"]
tab32[,"lb2"] <- tmp32[,"S2"] - tmp32[,"ub2"]
tab32[,"ub2"] <- tmp32[,"S2"] - tmp32[,"lb2"]
tab32[,"lbd"] <- tmp32[,"Sd"] - tmp32[,"ubd"]
tab32[,"ubd"] <- tmp32[,"Sd"] - tmp32[,"lbd"]
tab32 <- tab32[,c("type","M1","lb1","ub1","M2","lb2","ub2","Md","lbd","ubd")]
stab <- tab32
} else if (SampCIType==3) {
agg1 <- aggregate( M1 ~ type, data = All, mean )
agg2 <- aggregate( M2 ~ type, data = All, mean )
aggd <- aggregate( Md ~ type, data = All, mean )
sagg1 <- aggregate( S1 ~ type, data = All, mean )
sagg2 <- aggregate( S2 ~ type, data = All, mean )
saggd <- aggregate( Sd ~ type, data = All, mean )
qagg1 <- aggregate( abs(Di1) ~ type, data = All, function(x) {quantile(x,0.95)} )
qagg2 <- aggregate( abs(Di2) ~ type, data = All, function(x) {quantile(x,0.95)} )
qaggd <- aggregate( abs(Mdi) ~ type, data = All, function(x) {quantile(x,0.95)} )
agg1 <- merge(agg1,sagg1,by=c("type"))
agg1 <- merge(agg1,qagg1,by=c("type"))
agg2 <- merge(agg2,sagg2,by=c("type"))
agg2 <- merge(agg2,qagg2,by=c("type"))
aggd <- merge(aggd,saggd,by=c("type"))
aggd <- merge(aggd,qaggd,by=c("type"))
tmp33 <- merge(agg1,agg2,by=c("type"))
tmp33 <- merge(tmp33,aggd,by=c("type"))
tmp33 <- as.data.frame( as.list(tmp33))
names(tmp33) <- c("type","M1","S1","q1","M2","S2","q2","Md","Sd","qd")
tab33 <- tmp33
tab33[,"lb1"] <- tmp33[,"S1"] - tmp33[,"q1"]
tab33[,"ub1"] <- tmp33[,"S1"] + tmp33[,"q1"]
tab33[,"lb2"] <- tmp33[,"S2"] - tmp33[,"q2"]
tab33[,"ub2"] <- tmp33[,"S2"] + tmp33[,"q2"]
tab33[,"lbd"] <- tmp33[,"Sd"] - tmp33[,"qd"]
tab33[,"ubd"] <- tmp33[,"Sd"] + tmp33[,"qd"]
tab33 <- tab33[,c("type","M1","lb1","ub1","M2","lb2","ub2","Md","lbd","ubd")]
stab <- tab33
} else if (SampCIType==4) {
agg1 <- aggregate( M1 ~ type, data = All, mean )
agg2 <- aggregate( M2 ~ type, data = All, mean )
aggd <- aggregate( Md ~ type, data = All, mean )
sdagg1 <- aggregate( Msd1 ~ type, data = All, mean )
sdagg2 <- aggregate( Msd2 ~ type, data = All, mean )
sagg1 <- aggregate( S1 ~ type, data = All, mean )
sagg2 <- aggregate( S2 ~ type, data = All, mean )
saggd <- aggregate( Sd ~ type, data = All, mean )
qagg1 <- aggregate( ti1 ~ type, data = All, function(x) {quantile(x,qt)} )
qagg2 <- aggregate( ti2 ~ type, data = All, function(x) {quantile(x,qt)} )
qaggd <- aggregate( Mtdi ~ type, data = All, function(x) {quantile(x,qt)} )
agg1 <- merge(agg1,sagg1,by=c("type"))
agg1 <- merge(agg1,sdagg1,by=c("type"))
agg1 <- merge(agg1,qagg1,by=c("type"))
agg2 <- merge(agg2,sagg2,by=c("type"))
agg2 <- merge(agg2,sdagg2,by=c("type"))
agg2 <- merge(agg2,qagg2,by=c("type"))
aggd <- merge(aggd,saggd,by=c("type"))
aggd <- merge(aggd,qaggd,by=c("type"))
tmp44 <- merge(agg1,agg2,by=c("type"))
tmp44 <- merge(tmp44,aggd,by=c("type"))
tmp44 <- as.data.frame( as.list(tmp44))
names(tmp44) <- c("type","M1","S1","sd1","lb1","ub1","M2","S2","sd2","lb2","ub2","Md","Sd","lbd","ubd")
tmp44$sdd <- sqrt( tmp44[,"sd1"]^2 + tmp44[,"sd2"]^2 )
tab44 <- tmp44
tab44[,"lb1"] <- tmp44[,"S1"] - tmp44[,"ub1"] * tmp44[,"sd1"]
tab44[,"ub1"] <- tmp44[,"S1"] - tmp44[,"lb1"] * tmp44[,"sd1"]
tab44[,"lb2"] <- tmp44[,"S2"] - tmp44[,"ub2"] * tmp44[,"sd2"]
tab44[,"ub2"] <- tmp44[,"S2"] - tmp44[,"lb2"] * tmp44[,"sd2"]
tab44[,"lbd"] <- tmp44[,"Sd"] - tmp44[,"ubd"] * tmp44[,"sdd"]
tab44[,"ubd"] <- tmp44[,"Sd"] - tmp44[,"lbd"] * tmp44[,"sdd"]
tab44 <- tab44[,c("type","M1","lb1","ub1","M2","lb2","ub2","Md","lbd","ubd")]
stab <- tab44
} else if (SampCIType==5) {
agg1 <- aggregate( M1 ~ type, data = All, mean )
agg2 <- aggregate( M2 ~ type, data = All, mean )
aggd <- aggregate( Md ~ type, data = All, mean )
sdagg1 <- aggregate( Msd1 ~ type, data = All, mean )
sdagg2 <- aggregate( Msd2 ~ type, data = All, mean )
sagg1 <- aggregate( S1 ~ type, data = All, mean )
sagg2 <- aggregate( S2 ~ type, data = All, mean )
saggd <- aggregate( Sd ~ type, data = All, mean )
qagg1 <- aggregate( abs(ti1) ~ type, data = All, function(x) {quantile(x,0.95)} )
qagg2 <- aggregate( abs(ti2) ~ type, data = All, function(x) {quantile(x,0.95)} )
qaggd <- aggregate( abs(Mtdi) ~ type, data = All, function(x) {quantile(x,0.95)} )
agg1 <- merge(agg1,sagg1,by=c("type"))
agg1 <- merge(agg1,sdagg1,by=c("type"))
agg1 <- merge(agg1,qagg1,by=c("type"))
agg2 <- merge(agg2,sagg2,by=c("type"))
agg2 <- merge(agg2,sdagg2,by=c("type"))
agg2 <- merge(agg2,qagg2,by=c("type"))
aggd <- merge(aggd,saggd,by=c("type"))
aggd <- merge(aggd,qaggd,by=c("type"))
tmp55 <- merge(agg1,agg2,by=c("type"))
tmp55 <- merge(tmp55,aggd,by=c("type"))
tmp55 <- as.data.frame( as.list(tmp55))
names(tmp55) <- c("type","M1","S1","sd1","q1","M2","S2","sd2","q2","Md","Sd","qd")
tmp55$sdd <- sqrt( tmp55[,"sd1"]^2 + tmp55[,"sd2"]^2 )
tab55 <- tmp55
tab55[,"lb1"] <- tmp55[,"S1"] - tmp55[,"q1"] * tmp55[,"sd1"]
tab55[,"ub1"] <- tmp55[,"S1"] + tmp55[,"q1"] * tmp55[,"sd1"]
tab55[,"lb2"] <- tmp55[,"S2"] - tmp55[,"q2"] * tmp55[,"sd2"]
tab55[,"ub2"] <- tmp55[,"S2"] + tmp55[,"q2"] * tmp55[,"sd2"]
tab55[,"lbd"] <- tmp55[,"Sd"] - tmp55[,"qd"] * tmp55[,"sdd"]
tab55[,"ubd"] <- tmp55[,"Sd"] + tmp55[,"qd"] * tmp55[,"sdd"]
tab55 <- tab55[,c("type","M1","lb1","ub1","M2","lb2","ub2","Md","lbd","ubd")]
stab <- tab55
}
cnms <- paste0(c("lb","ub"),CItp)
onms <- c("Est",cnms)
selvars <- c("Est",cnms,"trt")
ECI0 <- M$ECI[ M$ECI[,"alpha1"] == 0 & M$ECI[,"alpha2"] == 0 & M$ECI[,"k"] == K,
selvars ]
Line4 <- cbind( "MEX", ECI0[ ECI0[,"trt"] == 1, onms],
ECI0[ ECI0[,"trt"] == 2, onms],
ECI0[ ECI0[,"trt"] == 0, onms])
names(stab) <- c("type","M1","lb1","ub1","M2","lb2","ub2","Md","lbd","ubd")
names(Line4) <- c("type","M1","lb1","ub1","M2","lb2","ub2","Md","lbd","ubd")
ETab3 <- rbind(stab,Line4)
SCI0 <- M$SCI[ M$SCI[,"alpha1"] == 0 & M$SCI[,"alpha2"] == 0 & M$SCI[,"k"] == K,
selvars ]
Line4 <- cbind( "MSX", SCI0[ SCI0[,"trt"] == 1, onms],
SCI0[ SCI0[,"trt"] == 2, onms],
SCI0[ SCI0[,"trt"] == 0, onms])
names(Line4) <- c("type","M1","lb1","ub1","M2","lb2","ub2","Md","lbd","ubd")
STab3 <- rbind(ETab3,Line4)
rownames(STab3) <- NULL
return( STab3 )
} |
mstCriteria <- function(design,plot2d="FALSE")
{
m <- design
n<-nrow(m)
D<-as.matrix(dist(m))
diag(D)<-rep(Inf,n)
x<-rep(0,n-1)
Peak<-1
p<-Peak
Tree<-list(c())
Tree<-rep(Tree,n)
if(plot2d)
{plot(m,pch=19,col="blue",xlab="",ylab="")}
for( i in 1:(n-1))
{
if(length(Peak)==1)
{
new_p<-which.min(as.numeric(D[p,]))
x[i]<-D[p,new_p]
D[new_p,Peak]=D[Peak,new_p]<-Inf
Peak<-c(Peak,new_p)
}
else{
A<-D[Peak,]
w<-which.min(A)
reste<-w%%nrow(A)
if(reste!=0)
{p<-Peak[reste]
new_p<-w %/% nrow(A)+1}
else
{p<-Peak[nrow(A)]
new_p<-w %/% nrow(A)}
x[i]<-D[p,new_p]
D[new_p,Peak]<-D[Peak,new_p]<-Inf
Peak<-c(Peak,new_p)
}
Tree[[p]]<-c(Tree[[p]],new_p)
Tree[[new_p]]<-c(Tree[[new_p]],p)
if (plot2d=="TRUE")
{lines(rbind(m[p,],m[new_p,]),col='red')}
}
return(list(tree=Tree,stats=c(mean(x),sqrt(var(x)))))
} |
context("n-gram tabulation")
test_that("tabulating unpositioned n-grams",{
data(human_cleave)
expect_identical(table_ngrams(human_cleave[, -10], c("15.15_0", "11.11_0"), human_cleave[, 10])[["target1"]],
c(5, 0))
})
test_that("tabulating positioned n-grams",{
data(human_cleave)
expect_identical(table_ngrams(human_cleave[, -10], c("1_15.15_0", "1_11.11_0"), human_cleave[, 10])[["target1"]],
c(2, 0))
}) |
library(rxSeq)
options(width = 80)
rcA = readCounts(index=data.A$index, y=data.A$y[1:2,], n=data.A$n[1:2,], n0B=data.A$n0B[1:2,],
kappas=data.A$kappas, geneids=data.A$geneids[1:2])
rcX = readCounts(index=data.X$index, y=data.X$y[1:2,], n=data.X$n[1:2,], n0B=data.X$n0B[1:2,],
kappas=data.X$kappas, geneids=data.X$geneids[1:2], tausB=data.X$tausB, chrom="X")
trecase.A.out = process(rcA)
trecase.X.out = process(rcX)
names(trecase.A.out)
trecase.A.out$pval[,1:2]
names(trecase.X.out)
trecase.X.out$pval[,1:2]
nLogLik(res=trecase.A.out, rc=rcA, genei=1)$nll
nLogLik(res=trecase.X.out, rc=rcX, genei=1)$nll
rcA$model = "short"
trec.A.out = process(rcA)
names(trec.A.out)
trec.A.out$pval[,1:2]
rcX$model = "short"
trec.X.out = process(rcX)
names(trec.X.out)
trec.X.out$pval[,1:2]
nLogLik(res=trec.A.out, rc=rcA, genei=1)$nll
nLogLik(res=trec.X.out, rc=rcX, genei=1)$nll
get.tausB(n=data.X$n, n0B=data.X$n0B, geneids=data.X$geneids,
Xist.ID="ENSMUSG00000086503")
data.X$tausB
get.tausB(n=data.X$n, n0B=data.X$n0B, geneids=data.X$geneids, Xist.ID = "")
dat.A = simRX(b0f=.5, b0m=.6, b1f=.3, b1m=.4, beta_sex=.1, beta_dom=.1, n.simu=1E1)
names(dat.A)
dat.X = simRX(b0f=.5, b0m=.6, b1f=.3, b1m=.4, beta_sex=.1, beta_dom=.1, n.simu=1E1,
is.X=TRUE, tauB=.3)
names(dat.X) |
exp_color_fun <-
function(x_min, x_max, steepness = 1, num_cols = 256){
x <- seq(0,1,length.out = num_cols)
y <- exp(steepness*x)-1
x_range <- x_max - x_min
scaled_y <- (y*x_range)/max(y)
shifted_y <- scaled_y + x_min
return(shifted_y)
} |
RVT1 <-
function(y, X, maf=0.05, perm=100)
{
Xy_perm = my_check(y, X, perm)
y = Xy_perm$y
X = Xy_perm$X
perm = Xy_perm$perm
if (mode(maf) != "numeric" || length(maf) != 1
|| maf <= 0 || maf >= 1)
stop("argument 'maf' must be a value between 0 and 1")
MAFs = colSums(X, na.rm=TRUE) / (2*nrow(X))
rare = sum(MAFs < maf)
if (rare == 0)
stop(paste("\n", "Oops: No rare variants below maf=",
maf, " were detected. Try a larger maf", sep=""))
X.new = X[ , MAFs < maf]
X.new[X.new == 2] = 1
x.prop = rowMeans(X.new, na.rm=TRUE)
y.new = y - mean(y)
U = sum(y.new * x.prop)
xv = sum((x.prop - mean(x.prop))^2)
V = mean(y) * (1 - mean(y)) * xv
score = sum(U^2 / V)
if (is.na(score) || is.infinite(score) || is.nan(score))
score = 0
asym.pval = 1 - pchisq(score, 1)
perm.pval = NA
if (perm > 0)
{
x.perm = rep(0, perm)
ymean = mean(y)
for (i in 1:perm)
{
perm.sample = sample(1:length(y))
y.perm = y[perm.sample] - ymean
U.perm = sum(y.perm * x.prop)
x.perm[i] = sum(U.perm^2 / V)
}
perm.pval = sum(x.perm > score) / perm
}
name = "RVT1: Rare Variant Test 1"
arg.spec = c(sum(y), length(y)-sum(y), ncol(X), rare, maf, perm)
arg.spec = as.character(arg.spec)
names(arg.spec) = c("cases", "controls", "variants", "rarevar", "maf", "n.perms")
res = list(rvt1.stat = score,
asym.pval = asym.pval,
perm.pval = perm.pval,
args = arg.spec,
name = name)
class(res) = "assoctest"
return(res)
} |
plotAdjustedArea = function(x, perc=TRUE, time.labels=NULL,
category.name=NULL, category.type=NULL){
y = list(`Accumulated area` = x@accuracySummary)
if(!is.null(time.labels))
y = x@accuracyByPeriod[time.labels]
if(is.null(y))
stop("time.labels out of bounds", call. = TRUE)
if(is.null(category.name)){
category.name = rownames(x@accuracySummary$ProportionMatrix)
category.name = category.name[-length(category.name)]
}
if(!is.null(category.type))
category.name = switch(pmatch(category.type,c("numeric","letter")),
as.character(seq(1:length(category.name))),
LETTERS[1:length(category.name)]
)
time.labels = names(y)
if(is.null(time.labels))
time.labels = seq_along(y)
df = do.call("rbind", lapply(time.labels, function(i){
df = data.frame(y[[i]]$AreaUncertainty)
total_area = sum(unlist(df$Mapped))
df_m = data.frame(t(df$Mapped))
names(df_m) = category.name
df_m = melt(df_m, measure.vars = names(df_m))
df_m$Legend = "Mapped"
df_m$ci = as.numeric(NA)
df_m$Period = i
df_a = data.frame(t(df$Adjusted))
names(df_a) = category.name
df_a = melt(df_a, measure.vars = names(df_a))
df_a$Legend = "Adjusted"
df_a$ci = as.numeric(df$ci)
df_a$Period = i
df = rbind(df_m, df_a)
if(perc){
df$value = df$value/total_area
df$ci = df$ci/total_area
}
df
}))
limits = aes_string(ymax = "value + ci", ymin = "value - ci")
dodge = position_dodge(width=0.9)
gp = ggplot(df, aes_string(fill="Legend", y="value", x="variable")) +
facet_wrap(~Period, scales = "free") +
geom_bar(position="dodge", stat="identity", na.rm=TRUE) +
geom_errorbar(limits, position=dodge, width=0.25, na.rm=TRUE) +
xlab("") +
ylab("Area")
if(perc)
gp = gp + scale_y_continuous(labels = percent)
gp
} |
czvalues <- function(moduleWebObject, weighted=FALSE, level="higher"){
if(!isCorrectModuleWebObject(moduleWebObject)) stop("This function cannot be applied to this type of object!")
zvalues <- function(web, weighted=FALSE){
if (weighted){
depL <- web/matrix(rowSums(web), nrow = NROW(web), ncol = NCOL(web), byrow = FALSE)
k.is <- colSums(depL)
} else {
k.is <- colSums(web>0)
}
out <- (k.is - mean(k.is))/sd(k.is)
out
}
modInfo <- listModuleInformation(moduleWebObject)
modules <- modInfo[[2]]
nModules <- length(modInfo[[2]])
if (nModules < 2) stop("This web has no modules.")
web <- moduleWebObject@originalWeb
if (level == "lower") web <- t(web)
if (weighted){
depL <- web/matrix(rowSums(web), nrow = NROW(web), ncol = NCOL(web), byrow = FALSE)
k.i <- colSums(depL)
} else {
k.i <- colSums(web>0)
}
z.values.all <- web[1,]
k.it.for.t <- matrix(NA, ncol=NCOL(web), nrow=nModules)
for (t in 1:nModules){
h.level.species.names <- if (level =="higher") modules[[t]][[2]] else modules[[t]][[1]]
l.level.species.names <- if (level =="higher") modules[[t]][[1]] else modules[[t]][[2]]
if (weighted){
depL.mod1 <- web[l.level.species.names, , drop=FALSE]/matrix(rowSums(web[l.level.species.names, , drop=FALSE]), nrow = NROW(web[l.level.species.names, , drop=FALSE]), ncol = NCOL(web), byrow = FALSE)
k.it <- colSums(depL.mod1)
} else {
k.it <- colSums(web[l.level.species.names, , drop=FALSE] > 0)
}
k.it.for.t[t,] <- (k.it/ k.i)^2
z.values.all[h.level.species.names] <- zvalues(web[l.level.species.names, h.level.species.names , drop=FALSE], weighted=weighted)
}
c.values.all <- 1 - colSums(k.it.for.t)
names(c.values.all) <- names(z.values.all)
z.values.all[is.nan(z.values.all)] <- 0
return(list("c"=c.values.all, "z"=z.values.all))
}
|
`mapPies` <- function( dF
,nameX="LON", nameY="LAT"
,nameZs=c(names(dF)[3],names(dF)[4])
,zColours=c(1:length(nameZs))
,ratio = 1
,addCatLegend = TRUE
,symbolSize = 1
,maxZVal=NA
, xlim=NA
, ylim=NA
, mapRegion = "world"
, borderCol = "grey"
, oceanCol=NA
, landCol=NA
,add=FALSE
,main=''
,lwd = 0.5
,... )
{
functionName <- as.character(sys.call()[[1]])
if ( mapRegion == 'data' )
{
xlim <- c( min(dF[,nameX], na.rm=TRUE),max(dF[,nameX], na.rm=TRUE) )
ylim <- c( min(dF[,nameY], na.rm=TRUE),max(dF[,nameY], na.rm=TRUE) )
}
if (class(dF)=="SpatialPolygonsDataFrame")
{
centroidCoords <- coordinates(dF)
dF[['nameX']] <- centroidCoords[,1]
dF[['nameY']] <- centroidCoords[,2]
nameX <- 'nameX'
nameY <- 'nameY'
if (!add)
{
lims <- rwmNewMapPlot(mapToPlot=dF,oceanCol=oceanCol,mapRegion=mapRegion, xlim=xlim, ylim=ylim)
xlim <- lims$xlim
ylim <- lims$ylim
plot( dF, add=TRUE, border=borderCol, col=landCol, main=main, lwd=lwd )
}
dF <- dF@data
} else if (!add)
{
lims <- rwmNewMapPlot(mapToPlot=getMap(),oceanCol=oceanCol,mapRegion=mapRegion, xlim=xlim, ylim=ylim)
xlim <- lims$xlim
ylim <- lims$ylim
plot( getMap(), add=TRUE, border=borderCol, col=landCol, main=main, lwd=lwd )
}
dF[is.na(dF)] <- 0
maxSumValues <- 0
for (locationNum in 1:length(dF[,nameZs[1]]))
{
sumValues <- sum( dF[ locationNum, nameZs ], na.rm=TRUE )
if ( sumValues > maxSumValues ) maxSumValues <- sumValues
}
symbolMaxSize <- 0.02*max( xlim[2]-xlim[1], (ylim[2]-ylim[1])*ratio )
symbolScale <- symbolMaxSize / sqrt( maxSumValues )
cat("symbolMaxSize=",symbolMaxSize," maxSumValues=",maxSumValues," symbolScale=",symbolScale,"\n")
for (locationNum in 1:length(dF[,nameZs[1]]))
{
sliceValues <- as.numeric( dF[ locationNum, nameZs ] )
if (sum(sliceValues, na.rm=TRUE)==0) next
cumulatProps <- c(0,cumsum(sliceValues)/sum(sliceValues, na.rm=TRUE))
pointsInCircle = 360
radius <- sqrt(sum(sliceValues, na.rm=TRUE))*symbolScale
radius <- radius*symbolSize
for ( sliceNum in 1:length(sliceValues) ) {
n <- max(2, floor((pointsInCircle * (cumulatProps[sliceNum+1]-cumulatProps[sliceNum]))))
P <- list( x= ratio * radius * cos(2*pi*seq(cumulatProps[sliceNum],cumulatProps[sliceNum+1],length=n))+ dF[ locationNum, nameX ],
y= radius * sin(2*pi*seq(cumulatProps[sliceNum],cumulatProps[sliceNum+1],length=n))+ dF[ locationNum, nameY ] )
polygon(c(P$x,dF[ locationNum, nameX ]),c(P$y,dF[ locationNum, nameY ]),col=zColours[sliceNum])
}
}
if (addCatLegend)
legend("bottomleft", legend=nameZs, fill=zColours, cex=0.7, bg="white")
radius <- symbolMaxSize*symbolSize
plotExtents <- par('usr')
plotS <- plotExtents[3]
plotW <- plotExtents[1]
plotN <- plotExtents[4]
plotE <- plotExtents[2]
centreE <- plotW + radius*2*ratio
centreN <- plotN - radius*2
t <- seq(0,2*pi,length=100)
P <- list( x= ratio * radius * cos(t)+ centreE,
y= radius * sin(t)+ centreN )
str(P)
}
|
changeValues <- function (sss, df){
col.names <- names(df)
whichHasValues <- which(with(sss$variables,
(hasValues & !type %in% c("multiple", "quantity")) |
(hasValues & type == "multiple" & subfields != "0")
))
changeSingleValue <- function(i){
if(i %in% whichHasValues){
codeFrame <- sss$codes[sss$codes$ident == sss$variables$ident[i],
c("code", "codevalues")]
codeFrame <- rbind(codeFrame, c(0, NA), c(" ", NA))
codeFrame$codevalues[match(as.character(df[, i]),
as.character(codeFrame$code))]
} else {
df[, i]
}
}
df <- fastdf(lapply(seq_along(df), changeSingleValue))
names(df) <- col.names
df
}
addQtext <- function(sss, df){
attr(df, "variable.labels") <- sss$variables$label
df
} |
library(testthat)
source("common.R")
describe("2.2. The `then` Method", {
describe("2.2.1. Both onFulfilled and onRejected are optional arguments:", {
it("2.2.1.1. If onFulfilled is not a function, it must be ignored.", {
})
it("2.2.1.1. If onRejected is not a function, it must be ignored.", {
})
})
describe("2.2.2. If onFulfilled is a function:", {
it("2.2.2.1. it must be called after promise is fulfilled, with promise’s value as its first argument.", {
x <- NULL
p <- ext_promise()
p$promise %>% then(function(value) { x <<- value })
wait_for_it()
expect_identical(x, NULL)
p$resolve(10)
wait_for_it()
expect_identical(x, 10)
})
it("2.2.2.2. it must not be called before promise is fulfilled.", {
})
it("2.2.2.3. it must not be called more than once.", {
})
})
describe("2.2.3. If onRejected is a function,", {
it("2.2.3.1. it must be called after promise is rejected, with promise’s reason as its first argument.", {
x <- NULL
p <- ext_promise()
p$promise %>% then(onRejected = function(reason) { x <<- reason })
wait_for_it()
expect_identical(x, NULL)
p$reject(simpleError("boom"))
wait_for_it()
expect_identical(x, simpleError("boom"))
})
})
describe("2.2.4. onFulfilled or onRejected must not be called until the execution context stack contains only platform code. [3.1].", {
it(" ", {
x <- NULL
promise(~resolve(TRUE)) %>% then(function(value) {x <<- value})
expect_identical(x, NULL)
wait_for_it()
expect_identical(x, TRUE)
})
})
describe("2.2.5. onFulfilled and onRejected must be called as functions (i.e. with no this value). [3.2]", {
})
describe("2.2.6. `then` may be called multiple times on the same promise.", {
it("2.2.6.1. If/when promise is fulfilled, all respective onFulfilled callbacks must execute in the order of their originating calls to then.", {
p <- ext_promise()
callbacks_called <- 0L
results <- new.env(parent = emptyenv())
lapply(1:10, function(i) {
results[[as.character(i)]] <- p$promise %>%
then(function(value) {
callbacks_called <<- callbacks_called + 1L
expect_identical(callbacks_called, i)
value
})
})
p$resolve(cars)
wait_for_it()
lapply(as.list(results), function(x) {
expect_identical(extract(x), cars)
})
})
})
describe("2.2.6.2. If/when promise is rejected, all respective onRejected callbacks must execute in the order of their originating calls to then.", {
p <- ext_promise()
callbacks_called <- 0L
results <- new.env(parent = emptyenv())
lapply(1:10, function(i) {
results[[as.character(i)]] <- p$promise %>%
catch(function(err) {
callbacks_called <<- callbacks_called + 1L
expect_identical(callbacks_called, i)
err
})
})
p$reject(simpleError("an error"))
wait_for_it()
lapply(as.list(results), function(x) {
expect_identical(extract(x), simpleError("an error"))
})
})
describe("2.2.7. `then` must return a promise [3.3].", {
it(" ", {
promise(~{}) %>% then() %>% is.promise() %>% expect_true()
})
it("2.2.7.1. If either onFulfilled or onRejected returns a value x, run the Promise Resolution Procedure [[Resolve]](promise2, x).", {
p1 <- promise(~resolve(TRUE)) %>% then(~"foo")
expect_identical(extract(p1), "foo")
p2 <- promise(~reject("boom")) %>% catch(~"bar")
expect_identical(extract(p2), "bar")
})
it("2.2.7.2. If either onFulfilled or onRejected throws an exception e, promise2 must be rejected with e as the reason.", {
p1 <- promise(~resolve(TRUE)) %>% then(~stop("foo"))
expect_error(extract(p1), "^foo$")
p2 <- promise(~reject("boom")) %>% catch(~stop("bar"))
expect_error(extract(p2), "^bar$")
})
it("2.2.7.3. If onFulfilled is not a function and promise1 is fulfilled, promise2 must be fulfilled with the same value as promise1.", {
p <- promise(~resolve("baz")) %>% then()
expect_identical(extract(p), "baz")
})
it("2.2.7.4. If onRejected is not a function and promise1 is rejected, promise2 must be rejected with the same reason as promise1.", {
p <- promise(~reject("qux")) %>% then()
expect_error(extract(p), "^qux$")
})
})
}) |
plot.RunRMDVal <- function(x, ...){
thislist <- list(...);
if (length(thislist$cycle) == 0){cycle <- 1;
}else{cycle <- thislist$cycle;}
res <- x;
cycle.index <- seq((cycle-1)*length(res$sdose)+1, (cycle)*length(res$sdose), 1);
nTTP <- res$nTTP.p[cycle.index,];
dose <- rep(res$sdose, times = dim(nTTP)[2])
nTTP <- c(nTTP)
dat <- data.frame(nTTP = nTTP, dose = dose);
ggplot(data = dat, aes(x = factor(dose), y = nTTP, group = factor(dose))) +
stat_boxplot(geom ='errorbar', width = 0.5) + geom_boxplot() + ylab(paste("nTTP Posterior Cycle ",cycle,sep='')) +
geom_hline(yintercept = 0.28, color = "blue", linetype = 2) + xlab("Dose") +
scale_x_discrete(breaks = 1:6, labels = 1:6)
} |
setCacheDir = function(cacheDir=NULL) { .setDir("RCACHE.DIR", cacheDir) }
getCacheDir = function() { getOption("RCACHE.DIR") }
setSharedCacheDir = function(sharedCacheDir=NULL) { .setDir("RESOURCES.RCACHE", sharedCacheDir) }
setCacheBuildDir = function(cacheBuildDir=NULL) { .setDir("RBUILD.DIR", cacheBuildDir) }
simpleCacheOptions = function() {
message("RESOURCES.RCACHE:\t", getOption("RESOURCES.RCACHE"))
message("RCACHE.DIR:\t", getCacheDir())
message("RBUILD.DIR:\t", getOption("RBUILD.DIR"))
message("SIMPLECACHE.ENV:\t", getOption("SIMPLECACHE.ENV"))
}
addCacheSearchEnvironment = function(addEnv) {
options(SIMPLECACHE.ENV=append(addEnv, getOption("SIMPLECACHE.ENV")))
}
resetCacheSearchEnvironment = function() {
options(SIMPLECACHE.ENV=NULL)
}
.setDir = function(optname, dirpath=NULL) {
diropts = list(ifelse(is.null(dirpath), getwd(), dirpath))
names(diropts) = optname
do.call(options, diropts)
} |
loon_sidebarPanel.l_hist <- function(loon.grob,
tabPanelName,
colorList,
selectBy,
linkingGroup,
linkingGroups,
loonWidgetsInfo,
showWorldView, displayedPanel) {
tabPanel(
title = tabPanelName,
if(showWorldView) {
shiny::plotOutput(outputId=paste0(tabPanelName, "plot_world_view"),
height = "200px")
},
h6(""),
collapse_button("Plot", tabPanelName),
tagsDivPlot(loon.grob, tabPanelName, loonWidgetsInfo,
linkingGroup, displayedPanel),
collapse_button("Select", tabPanelName),
tagsDivSelect(loon.grob, tabPanelName, loonWidgetsInfo, displayedPanel),
collapse_button("Linking", tabPanelName),
tagsDivLink(loon.grob, tabPanelName, loonWidgetsInfo,
linkingGroup, linkingGroups, displayedPanel),
collapse_button("Modify", tabPanelName),
tagsDivModify(loon.grob, tabPanelName, colorList, loonWidgetsInfo, displayedPanel),
collapse_button("Layer", tabPanelName),
tagsDivLayer(loon.grob, tabPanelName, loonWidgetsInfo, displayedPanel)
)
} |
with_mock_api({
test_that("ff_standings returns a tibble for each platform currently programmed", {
skippy()
dlf_conn <- ff_connect("mfl", 37920, season = 2019)
dlf_standings <- ff_standings(dlf_conn)
expect_tibble(dlf_standings, any.missing = FALSE, min.rows = 16)
jml_conn <- ff_connect("sleeper", "522458773317046272", season = 2020)
jml_standings <- ff_standings(jml_conn)
dlp <- sleeper_connect(2020, "521379020332068864")
dlp_standings <- ff_standings(dlp)
got_conn <- fleaflicker_connect(season = 2020, league_id = 206154)
got_standings <- ff_standings(got_conn, include_allplay = FALSE, include_potentialpoints = FALSE)
got_schedule <- ff_schedule(got_conn, week = 4)
got_potentialpoints <- .flea_add_potentialpoints(got_schedule, got_conn)
tony_conn <- espn_connect(season = 2020, league_id = 899513)
tony_standings <- ff_standings(tony_conn)
expect_tibble(jml_standings, any.missing = FALSE, nrows = 12)
expect_tibble(dlp_standings, any.missing = FALSE, nrows = 12)
expect_tibble(got_standings, any.missing = FALSE, nrows = 16)
expect_tibble(got_potentialpoints, nrows = 16)
expect_tibble(tony_standings, any.missing = FALSE, nrows = 10)
})
}) |
MiNorm <- function(Matrix, posNC, method="none", leaveNC=TRUE, BGcor=FALSE){
if (BGcor == TRUE){
Matrix <- limma::backgroundCorrect(Matrix, method="normexp")
}
normatrix <- Matrix
if (method == "SQN"){
normatrix <- SQN::SQN(Matrix, ctrl.id=posNC)
}
if (method == "center"){
meanss <- apply(Matrix, MARGIN = 1, mean)
normatrix <- (Matrix-meanss)
}
if (method == "scale"){
sdss <- apply(Matrix, MARGIN = 1, stats::sd)
normatrix <- (Matrix/sdss)
}
if (method == "standardize"){
meanss <- apply(Matrix, MARGIN = 1, mean)
sdss <- apply(Matrix, MARGIN = 1, stats::sd)
normatrix <- ((Matrix-meanss)/sdss)
}
if (method == "range"){
minss <- apply(Matrix, MARGIN = 1, min)
maxss <- apply(Matrix, MARGIN = 1, max)
normatrix <- ((Matrix-minss)/(maxss-minss))
}
if (method == "QN"){
normatrix <- preprocessCore::normalize.quantiles(Matrix, copy=F)
}
if (method == "Loess"){
normatrix <- limma::normalizeCyclicLoess(Matrix, method = "fast")
}
rownames(normatrix)<-rownames(Matrix)
if (leaveNC == FALSE){
normatrix <- normatrix[-posNC, ]
}
return(normatrix)
} |
mdp_relative_value_iteration <- function(P, R, epsilon, max_iter) {
start<-as.POSIXlt(Sys.time())
if ( nargs() > 3 & ifelse(!missing(epsilon), epsilon < 0, F) ) {
print('--------------------------------------------------------')
print('MDP Toolbox ERROR: epsilon must be upper than 0')
print('--------------------------------------------------------')
} else if ( nargs() > 4 & ifelse(!missing(max_iter), max_iter <= 0, F) ) {
print('--------------------------------------------------------')
print('MDP Toolbox ERROR: The maximum number of iteration must be upper than 0')
print('--------------------------------------------------------')
} else {
if (is.list(P)) {
S <- dim(P[[1]])[1]
A <- length(P)
} else {
S <- dim(P)[1]
A <- dim(P)[3]
}
PR <- mdp_computePR(P,R)
if (nargs() < 4) {
max_iter <- 1000
}
if (nargs() < 3) {
epsilon <- 0.01
}
U <- numeric(S)
iter <- 0
is_done <- F
Q1 <- numeric(A)
Q2 <- matrix(0, S, A)
while (!is_done) {
iter <- iter + 1
if (is.list(P)) {
for ( a in 1:A) {
Q1[a] <- PR[S,a] + P[[a]][S,]%*%U
}
} else {
for ( a in 1:A) {
Q1[a] <- PR[S,a] + P[S,,a]%*%U
}
}
g <- max(Q1)
if (is.list(P)) {
for ( a in 1:A) {
Q2[,a] <- as.matrix(PR[,a] + P[[a]]%*%U)
}
} else {
for ( a in 1:A) {
Q2[,a] <- PR[,a] + P[,,a]%*%U
}
}
Unext <- apply(Q2, 1, max)
policy <- apply(Q2, 1, which.max)
Unext <- Unext - g
variation <- mdp_span(Unext-U)
if (variation < epsilon) {
is_done <- T
print('MDP Toolbox: iterations stopped, epsilon-optimal policy found')
} else if (iter == max_iter) {
is_done <- T
print('MDP Toolbox: iterations stopped by maximum number of iteration condition')
} else {
U <- Unext
}
}
}
end <-as.POSIXlt(Sys.time())
return(list(U, policy, g, end-start))
} |
sinar_pois <- function(n_row, n_col, a10, a01, a11, l) {
n_row1 <- n_row + 1e+2
n_col1 <- n_col + 1e+2
e <- matrix(stats::rpois(n_row1 * n_col1, l),
nrow = n_row1, ncol = n_col1)
x <- e
for (j in 2:n_col1) {
for (i in 2:n_row1) {
x[i, j] <- stats::rbinom(1, x[i - 1, j], a10) +
stats::rbinom(1, x[i, j - 1], a01) +
stats::rbinom(1, x[i - 1, j - 1], a11) + e[i, j]
}
}
x[(n_row1 - n_row + 1):n_row1, (n_col1 - n_col + 1):n_col1]
}
cls <- function(X) {
n_row <- nrow(X)
n_col <- ncol(X)
mA <- matrix(0, nrow = 4, ncol = 4)
for (i in 2:n_row) {
for (j in 2:n_col) {
mA <- mA +
rbind(c(X[i - 1, j], X[i, j - 1], X[i - 1, j - 1], 1) * X[i - 1, j],
c(X[i - 1, j], X[i, j - 1], X[i - 1, j - 1], 1) * X[i, j - 1],
c(X[i - 1, j], X[i, j - 1], X[i - 1, j - 1], 1) * X[i - 1, j - 1],
c(X[i - 1, j], X[i, j - 1], X[i - 1, j - 1], 1))
}
}
mb <- vector("double", 4)
for (i in 2:n_row) {
for (j in 2:n_col) {
mb <- mb +
c(X[i - 1, j] * X[i, j],
X[i, j - 1] * X[i, j],
X[i - 1, j - 1] * X[i, j],
X[i, j])
}
}
est <- solve(mA, mb)
names(est) <- c("a10", "a01", "a11", "mu")
est
}
emp_V <- function(X) {
V <- matrix(0, 4, 4)
for (i in 2:nrow(X)) {
for (j in 2:ncol(X)) {
V <- V +
rbind(c(X[i - 1, j], X[i, j - 1], X[i - 1, j - 1], 1) * X[i - 1, j],
c(X[i - 1, j], X[i, j - 1], X[i - 1, j - 1], 1) * X[i, j - 1],
c(X[i - 1, j], X[i, j - 1], X[i - 1, j - 1], 1) * X[i - 1, j - 1],
c(X[i - 1, j], X[i, j - 1], X[i - 1, j - 1], 1))
}
}
V / ((nrow(X) - 1) * (ncol(X) - 1))
}
teo_V <- function(a10, a01, a11, mu_e, s2_e) {
mu <- mu_e / (1 - a10 - a01 - a11)
A <- rbind(c(-1, a10, a01, a11),
c(a10, a11, -1, a01),
c(a01, -1, a11, a10),
c(a11, a10, a01, -1))
b <- c(-mu_e^2 - s2_e - 2 * mu * mu_e * (a10 + a01 + a11),
-mu * mu_e, -mu * mu_e, -mu * mu_e)
K <- solve(A, b)
names(K) <- c("k00", "k01", "k10", "k11")
V <- rbind(c(K["k00"], K["k11"], K["k01"], mu),
c(K["k11"], K["k00"], K["k10"], mu),
c(K["k01"], K["k10"], K["k00"], mu),
c(mu, mu, mu, 1))
V
}
emp_W <- function(X) {
theta <- cls(X)
W <- matrix(0, 4, 4)
for (i in 2:nrow(X)) {
for (j in 2:ncol(X)) {
U <- X[i, j] - (theta["a10"] * X[i - 1, j] + theta["a01"] *
X[i, j - 1] +
theta["a11"] * X[i - 1, j - 1] + theta["mu"])
W <- W +
U^2 * rbind(c(X[i - 1, j], X[i, j - 1], X[i - 1, j - 1], 1) *
X[i - 1, j],
c(X[i - 1, j], X[i, j - 1], X[i - 1, j - 1], 1) *
X[i, j - 1],
c(X[i - 1, j], X[i, j - 1], X[i - 1, j - 1], 1) *
X[i - 1, j - 1],
c(X[i - 1, j], X[i, j - 1], X[i - 1, j - 1], 1))
}
}
W / ((nrow(X) - 1) * (ncol(X) - 1))
}
var_sinar <- function(X) {
theta <- cls(X)
lambda <- (1 + theta["a10"]^2 - theta["a01"]^2 - theta["a11"]^2 -
sqrt((1 + theta["a10"]^2 - theta["a01"]^2 - theta["a11"]^2)^2 -
4 * (theta["a10"] + theta["a01"] * theta["a11"])^2)) /
(2 * (theta["a10"] + theta["a01"] * theta["a11"]))
eta <- (theta["a01"] + theta["a11"] * lambda) /
(1 - theta["a10"] * lambda)
g00 <- mean((X - mean(X))^2)
s2 <- g00 *
(1 - (theta["a10"] + theta["a01"] * theta["a11"]) * lambda -
(theta["a01"] + theta["a10"] * theta["a11"]) * eta -
theta["a11"]^2) - mean(X) *
(theta["a10"] * (1 - theta["a10"]) +
theta["a01"] * (1 - theta["a01"]) +
theta["a11"] * (1 - theta["a11"]))
names(s2) <- NULL
s2
}
emp_cov <- function(X) {
cov <- MASS::ginv(emp_V(X)) %*% emp_W(X) %*% MASS::ginv(emp_V(X)) /
((nrow(X) - 1) * (ncol(X) - 1))
colnames(cov) <- c("a10", "a01", "a11", "mu")
rownames(cov) <- c("a10", "a01", "a11", "mu")
cov
}
var_hat_sigma <- function(X) {
m <- mean(X)
g00 <- mean((X - mean(X))^2)
f <- function(theta) {
names(theta) <- c("a10", "a01", "a11", "mu")
lambda <- (1 + theta["a10"]^2 - theta["a01"]^2 - theta["a11"]^2 -
sqrt((1 + theta["a10"]^2 - theta["a01"]^2 - theta["a11"]^2)^2 -
4 * (theta["a10"] + theta["a01"] * theta["a11"])^2)) /
(2 * (theta["a10"] + theta["a01"] * theta["a11"]))
eta <- (theta["a01"] + theta["a11"] * lambda) /
(1 - theta["a10"] * lambda)
s2 <- g00 *
(1 - (theta["a10"] + theta["a01"] * theta["a11"]) * lambda -
(theta["a01"] + theta["a10"] * theta["a11"]) * eta -
theta["a11"]^2) - m *
(theta["a10"] * (1 - theta["a10"]) +
theta["a01"] * (1 - theta["a01"]) +
theta["a11"] * (1 - theta["a11"]))
names(s2) <- NULL
s2
}
g <- matrix(numDeriv::grad(f, cls(X)), ncol = 1)
t(g) %*% emp_cov(X) %*% g
} |
strflsh1 <-
function(x,y){
a1 = mycount1(y)
if(max(a1$ct)<4.5) return(0)
a2 = c(1:length(a1$ct))[a1$ct > 4.5]
a3 = a1$v[a2]
a4 = sort(x[y == a3],decreasing=TRUE)
straight1(a4)
} |
readMortTable <- function(dirFileName, header = T){
mortTable <- read.csv(file = dirFileName, header = header, sep = ",")
colnames(mortTable) <- c("age", "F", "M")
return(mortTable)
}
calcMortFactors <- function(inPolicy, mortTable, dT = 1 / 12){
birDate <- as.POSIXlt(inPolicy[1, "birthDate"])
curDate <- as.POSIXlt(inPolicy[1, "currentDate"])
matDate <- as.POSIXlt(inPolicy[1, "matDate"])
if (curDate < birDate) stop("Current date is prior to birth date.")
if (matDate < birDate) stop("Maturity date is prior to birth date.")
if (matDate < curDate) stop("Maturity date is prior to current date.")
monBirCur <- 12 * (curDate$year - birDate$year) +
(curDate$mon - birDate$mon)
monMatCur <- 12 * (matDate$year - birDate$year) +
(matDate$mon - birDate$mon)
ageCur <- monBirCur / 12
x0 <- floor(ageCur)
t0 <- ageCur - x0
xT <- floor(monMatCur / 12)
gender <- ifelse(inPolicy[1, "gender"] == "F", "female", "male")
maxRng <- c(x0:mortTable$age[length(mortTable$age)])
ageRngLen <- xT - x0 + 1
annualQ <- mortTable[mortTable$age %in% maxRng, gender]
annualQ <- c(annualQ, rep(1, as.numeric(ageRngLen > length(maxRng)) *
(ageRngLen - length(annualQ))))
p <- rep(1)
pq <- rep(1)
xCur <- x0
tCur <- t0
dP <- 1
step <- 1
while (dP > 0.00001) {
baseQ <- annualQ[xCur - x0 + 1]
dtQxt <- (dT * baseQ) / (1 - tCur * baseQ)
p[step + 1] <- p[step] * (1 - dtQxt)
dP <- p[step + 1]
pq[step] <- p[step] * dtQxt
ageCur <- ageCur + dT
xCur <- floor(ageCur + 1e-10)
tCur <- ifelse(abs(ageCur - xCur) < 1e-10, 0, ageCur - xCur)
step <- step + 1
}
return(mortFactors = data.frame(pq = pq, p = p[-1]))
}
projectDBRP <- function(inPolicy, oneFundScen, dT = 1 / 12, pq, p, df){
numStep <- dim(oneFundScen)[1]
if (((length(pq) >= numStep) &&
(length(p) >= numStep) &&
(length(df) >= numStep)) == FALSE) {
stop("df, pq, and p must have length > numStep from oneFundScen")
}
DA <- rep(0, numStep)
LA <- rep(0, numStep)
RC <- rep(0, numStep)
dAV <- rep(0, numStep)
dFee <- rep(0, numStep)
for (step in 1:numStep) {
dPartialAV <- inPolicy[1, grep("fundValue", colnames(inPolicy))] *
oneFundScen[step, ] *
(1 - inPolicy[1, grep("fundFee", colnames(inPolicy))] * dT)
if (as.POSIXlt(inPolicy[1, "currentDate"])$mon ==
as.POSIXlt(inPolicy[1, "issueDate"])$mon) {
raw <- sum(dPartialAV)
dFee[step] <- sum(raw * inPolicy[1, "riderFee"])
dPartialAV <- dPartialAV - dPartialAV * inPolicy[1, "riderFee"] -
dPartialAV * inPolicy[1, "baseFee"]
dAV[step] <- sum(dPartialAV)
inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV
} else {
dAV[step] <- sum(dPartialAV)
inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV
}
DA[step] <- max(0, inPolicy[1, "gbAmt"] - dAV[step])
LA[step] <- 0
RC[step] <- dFee[step]
newDate <- as.POSIXlt(inPolicy[1, "currentDate"])
newDate$mon <- newDate$mon + 1
inPolicy[1, "currentDate"] <- as.Date(newDate)
}
pq <- pq[1:numStep] * df[1:numStep]
p <- p[1:numStep] * df[1:numStep]
return(list(DA = pq %*% DA, LA = p %*% LA, RC = p %*% RC,
outPolicy = inPolicy))
}
projectDBRU <- function(inPolicy, oneFundScen, dT = 1 / 12, pq, p, df){
numStep <- dim(oneFundScen)[1]
if (((length(pq) >= numStep) &&
(length(p) >= numStep) &&
(length(df) >= numStep)) == FALSE) {
stop("df, pq, and p must have length > numStep from oneFundScen")
}
DA <- rep(0, numStep)
LA <- rep(0, numStep)
RC <- rep(0, numStep)
dAV <- rep(0, numStep)
dFee <- rep(0, numStep)
for (step in 1:numStep) {
dPartialAV <- inPolicy[1, grep("fundValue", colnames(inPolicy))] *
oneFundScen[step, ] *
(1 - inPolicy[1, grep("fundFee", colnames(inPolicy))] * dT)
if (as.POSIXlt(inPolicy[1, "currentDate"])$mon ==
as.POSIXlt(inPolicy[1, "issueDate"])$mon) {
raw <- sum(dPartialAV)
dFee[step] <- sum(raw * inPolicy[1, "riderFee"])
dPartialAV <- dPartialAV - dPartialAV * inPolicy[1, "riderFee"] -
dPartialAV * inPolicy[1, "baseFee"]
dAV[step] <- sum(dPartialAV)
inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV
inPolicy[1, "gbAmt"] <- inPolicy[1, "gbAmt"] *
(1 + inPolicy[1, "rollUpRate"])
} else {
dAV[step] <- sum(dPartialAV)
inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV
}
DA[step] <- max(0, inPolicy[1, "gbAmt"] - dAV[step])
LA[step] <- 0
RC[step] <- dFee[step]
newDate <- as.POSIXlt(inPolicy[1, "currentDate"])
newDate$mon <- newDate$mon + 1
inPolicy[1, "currentDate"] <- as.Date(newDate)
}
pq <- pq[1:numStep] * df[1:numStep]
p <- p[1:numStep] * df[1:numStep]
return(list(DA = pq %*% DA, LA = p %*% LA, RC = p %*% RC,
outPolicy = inPolicy))
}
projectDBSU <- function(inPolicy, oneFundScen, dT = 1 / 12, pq, p, df){
numStep <- dim(oneFundScen)[1]
if (((length(pq) >= numStep) &&
(length(p) >= numStep) &&
(length(df) >= numStep)) == FALSE) {
stop("df, pq, and p must have length > numStep from oneFundScen")
}
DA <- rep(0, numStep)
LA <- rep(0, numStep)
RC <- rep(0, numStep)
dAV <- rep(0, numStep)
dFee <- rep(0, numStep)
for (step in 1:numStep) {
dPartialAV <- inPolicy[1, grep("fundValue", colnames(inPolicy))] *
oneFundScen[step, ] *
(1 - inPolicy[1, grep("fundFee", colnames(inPolicy))] * dT)
if (as.POSIXlt(inPolicy[1, "currentDate"])$mon ==
as.POSIXlt(inPolicy[1, "issueDate"])$mon){
raw <- sum(dPartialAV)
dFee[step] <- sum(raw * inPolicy[1, "riderFee"])
dPartialAV <- dPartialAV - dPartialAV *
inPolicy[1, "riderFee"] - dPartialAV * inPolicy[1, "baseFee"]
dAV[step] <- sum(dPartialAV)
inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV
inPolicy[1, "gbAmt"] <- max(dAV[step], inPolicy[1, "gbAmt"])
} else {
dAV[step] <- sum(dPartialAV)
inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV
}
DA[step] <- max(0, inPolicy[1, "gbAmt"] - dAV[step])
LA[step] <- 0
RC[step] <- dFee[step]
newDate <- as.POSIXlt(inPolicy[1, "currentDate"])
newDate$mon <- newDate$mon + 1
inPolicy[1, "currentDate"] <- as.Date(newDate)
}
pq <- pq[1:numStep] * df[1:numStep]
p <- p[1:numStep] * df[1:numStep]
return(list(DA = pq %*% DA, LA = p %*% LA, RC = p %*% RC,
outPolicy = inPolicy))
}
projectABRP <- function(inPolicy, oneFundScen, dT = 1 / 12, pq, p, df){
numStep <- dim(oneFundScen)[1]
if (((length(pq) >= numStep) &&
(length(p) >= numStep) &&
(length(df) >= numStep)) == FALSE) {
stop("df, pq, and p must have length > numStep from oneFundScen")
}
DA <- rep(0, numStep)
LA <- rep(0, numStep)
RC <- rep(0, numStep)
dAV <- rep(0, numStep)
dFee <- rep(0, numStep)
for (step in 1:numStep) {
dPartialAV <- inPolicy[1, grep("fundValue", colnames(inPolicy))] *
oneFundScen[step, ] *
(1 - inPolicy[1, grep("fundFee", colnames(inPolicy))] * dT)
if (as.POSIXlt(inPolicy[1, "currentDate"])$mon ==
as.POSIXlt(inPolicy[1, "issueDate"])$mon) {
raw <- sum(dPartialAV)
dFee[step] <- sum(raw * inPolicy[1, "riderFee"])
dPartialAV <- dPartialAV - dPartialAV *
inPolicy[1, "riderFee"] - dPartialAV * inPolicy[1, "baseFee"]
dAV[step] <- sum(dPartialAV)
inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV
} else {
dAV[step] <- sum(dPartialAV)
inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV
}
DA[step] <- 0
LA[step] <- 0
RC[step] <- dFee[step]
if (inPolicy[1, "matDate"] == inPolicy[1, "currentDate"]) {
if (inPolicy[1, "gbAmt"] < dAV[step]) inPolicy[1, "gbAmt"] <- dAV[step]
else {
LA[step] <- max(0, inPolicy[1, "gbAmt"] - dAV[step])
numFund <- length(inPolicy[1, grep("fundValue", colnames(inPolicy))])
if (dAV[step] > 0.00001) {
inPolicy[1, grep("fundValue", colnames(inPolicy))] <-
inPolicy[1, grep("fundValue", colnames(inPolicy))] *
inPolicy[1, "gbAmt"] / dAV[step]
}else {
inPolicy[1, grep("fundValue", colnames(inPolicy))] <-
inPolicy[1, "gbAmt"] / numFund
}
}
}
newDate <- as.POSIXlt(inPolicy[1, "currentDate"])
newDate$mon <- newDate$mon + (as.POSIXlt(inPolicy[1, "currentDate"]) -
as.POSIXlt(inPolicy[1, "issueDate"]))
inPolicy[1, "issueDate"] <- inPolicy[1, "currentDate"]
inPolicy[1, "matDate"] <- as.Date(newDate)
newDate <- as.POSIXlt(inPolicy[1, "currentDate"])
newDate$mon <- newDate$mon + 1
inPolicy[1, "currentDate"] <- as.Date(newDate)
}
pq <- pq[1:numStep] * df[1:numStep]
p <- p[1:numStep] * df[1:numStep]
return(list(DA = pq %*% DA, LA = p %*% LA, RC = p %*% RC,
outPolicy = inPolicy))
}
projectABRU <- function(inPolicy, oneFundScen, dT = 1 / 12, pq, p, df){
numStep <- dim(oneFundScen)[1]
if (((length(pq) >= numStep) &&
(length(p) >= numStep) &&
(length(df) >= numStep)) == FALSE) {
stop("df, pq, and p must have length > numStep from oneFundScen")
}
DA <- rep(0, numStep)
LA <- rep(0, numStep)
RC <- rep(0, numStep)
dAV <- rep(0, numStep)
dFee <- rep(0, numStep)
for (step in 1:numStep) {
dPartialAV <- inPolicy[1, grep("fundValue", colnames(inPolicy))] *
oneFundScen[step, ] *
(1 - inPolicy[1, grep("fundFee", colnames(inPolicy))] * dT)
if (as.POSIXlt(inPolicy[1, "currentDate"])$mon ==
as.POSIXlt(inPolicy[1, "issueDate"])$mon) {
raw <- sum(dPartialAV)
dFee[step] <- sum(raw * inPolicy[1, "riderFee"])
dPartialAV <- dPartialAV - dPartialAV *
inPolicy[1, "riderFee"] - dPartialAV * inPolicy[1, "baseFee"]
dAV[step] <- sum(dPartialAV)
inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV
inPolicy[1, "gbAmt"] <- inPolicy[1, "gbAmt"] *
(1 + inPolicy[1, "rollUpRate"])
} else {
dAV[step] <- sum(dPartialAV)
inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV
}
DA[step] <- 0
LA[step] <- 0
RC[step] <- dFee[step]
if (inPolicy[1, "matDate"] == inPolicy[1, "currentDate"]) {
if (inPolicy[1, "gbAmt"] < dAV[step]) inPolicy[1, "gbAmt"] <- dAV[step]
else {
LA[step] <- max(0, inPolicy[1, "gbAmt"] - dAV[step])
numFund <- length(inPolicy[1, grep("fundValue", colnames(inPolicy))])
if (dAV[step] > 0.00001) {
inPolicy[1, grep("fundValue", colnames(inPolicy))] <-
inPolicy[1, grep("fundValue", colnames(inPolicy))] *
inPolicy[1, "gbAmt"] / dAV[step]
}else {
inPolicy[1, grep("fundValue", colnames(inPolicy))] <-
inPolicy[1, "gbAmt"] / numFund
}
}
}
newDate <- as.POSIXlt(inPolicy[1, "currentDate"])
newDate$mon <- newDate$mon + (as.POSIXlt(inPolicy[1, "currentDate"]) -
as.POSIXlt(inPolicy[1, "issueDate"]))
inPolicy[1, "issueDate"] <- inPolicy[1, "currentDate"]
inPolicy[1, "matDate"] <- as.Date(newDate)
newDate <- as.POSIXlt(inPolicy[1, "currentDate"])
newDate$mon <- newDate$mon + 1
inPolicy[1, "currentDate"] <- as.Date(newDate)
}
pq <- pq[1:numStep] * df[1:numStep]
p <- p[1:numStep] * df[1:numStep]
return(list(DA = pq %*% DA, LA = p %*% LA, RC = p %*% RC,
outPolicy = inPolicy))
}
projectABSU <- function(inPolicy, oneFundScen, dT = 1 / 12, pq, p, df){
numStep <- dim(oneFundScen)[1]
if (((length(pq) >= numStep) &&
(length(p) >= numStep) &&
(length(df) >= numStep)) == FALSE) {
stop("df, pq, and p must have length > numStep from oneFundScen")
}
DA <- rep(0, numStep)
LA <- rep(0, numStep)
RC <- rep(0, numStep)
dAV <- rep(0, numStep)
dFee <- rep(0, numStep)
for (step in 1:numStep) {
dPartialAV <- inPolicy[1, grep("fundValue", colnames(inPolicy))] *
oneFundScen[step, ] *
(1 - inPolicy[1, grep("fundFee", colnames(inPolicy))] * dT)
if (as.POSIXlt(inPolicy[1, "currentDate"])$mon ==
as.POSIXlt(inPolicy[1, "issueDate"])$mon){
raw <- sum(dPartialAV)
dFee[step] <- sum(raw * inPolicy[1, "riderFee"])
dPartialAV <- dPartialAV - dPartialAV *
inPolicy[1, "riderFee"] - dPartialAV * inPolicy[1, "baseFee"]
dAV[step] <- sum(dPartialAV)
inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV
inPolicy[1, "gbAmt"] <- max(dAV[step], inPolicy[1, "gbAmt"])
} else {
dAV[step] <- sum(dPartialAV)
inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV
}
DA[step] <- 0
LA[step] <- 0
RC[step] <- dFee[step]
if (inPolicy[1, "matDate"] == inPolicy[1, "currentDate"]){
if (inPolicy[1, "gbAmt"] < dAV[step]) inPolicy[1, "gbAmt"] <- dAV[step]
else {
LA[step] <- max(0, inPolicy[1, "gbAmt"] - dAV[step])
numFund <- length(inPolicy[1, grep("fundValue", colnames(inPolicy))])
if (dAV[step] > 0.00001){
inPolicy[1, grep("fundValue", colnames(inPolicy))] <-
inPolicy[1, grep("fundValue", colnames(inPolicy))] *
inPolicy[1, "gbAmt"] / dAV[step]
}else {
inPolicy[1, grep("fundValue", colnames(inPolicy))] <-
inPolicy[1, "gbAmt"] / numFund
}
}
}
newDate <- as.POSIXlt(inPolicy[1, "currentDate"])
newDate$mon <- newDate$mon + (as.POSIXlt(inPolicy[1, "currentDate"]) -
as.POSIXlt(inPolicy[1, "issueDate"]))
inPolicy[1, "issueDate"] <- inPolicy[1, "currentDate"]
inPolicy[1, "matDate"] <- as.Date(newDate)
newDate <- as.POSIXlt(inPolicy[1, "currentDate"])
newDate$mon <- newDate$mon + 1
inPolicy[1, "currentDate"] <- as.Date(newDate)
}
pq <- pq[1:numStep] * df[1:numStep]
p <- p[1:numStep] * df[1:numStep]
return(list(DA = pq %*% DA, LA = p %*% LA, RC = p %*% RC,
outPolicy = inPolicy))
}
projectIBRP <- function(inPolicy, oneFundScen, dT = 1 / 12, pq, p, df){
numStep <- dim(oneFundScen)[1]
if (((length(pq) >= numStep) &&
(length(p) >= numStep) &&
(length(df) >= numStep)) == FALSE) {
stop("df, pq, and p must have length > numStep from oneFundScen")
}
DA <- rep(0, numStep)
LA <- rep(0, numStep)
RC <- rep(0, numStep)
dAV <- rep(0, numStep)
dFee <- rep(0, numStep)
ag <- 0
dP <- 1
nY <- 0
r <- 0.05
while (dP > 0.00001) {
if (nY * 12 < length(p)) dP <- p[nY * 12 + 1]
else dP <- 0
ag <- ag + dP * exp(-r * nY)
nY <- nY + 1
}
aT <- 0
dP <- 1
nY <- 0
while (dP > 0.00001) {
dFR <- 0
if (nY * 12 < numStep) dFR <- df[nY * 12 + 1]
else dFR <- df[numStep - 1]
if (nY * 12 < length(p)) dP <- p[nY * 12 + 1]
else dP <- 0
aT <- aT + dP * exp(-dFR * nY)
nY <- nY + 1
}
for (step in 1:numStep) {
dPartialAV <- inPolicy[1, grep("fundValue", colnames(inPolicy))] *
oneFundScen[step, ] *
(1 - inPolicy[1, grep("fundFee", colnames(inPolicy))] * dT)
if (as.POSIXlt(inPolicy[1, "currentDate"])$mon ==
as.POSIXlt(inPolicy[1, "issueDate"])$mon){
raw <- sum(dPartialAV)
dFee[step] <- sum(raw * inPolicy[1, "riderFee"])
dPartialAV <- dPartialAV - dPartialAV *
inPolicy[1, "riderFee"] - dPartialAV * inPolicy[1, "baseFee"]
dAV[step] <- sum(dPartialAV)
inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV
} else {
dAV[step] <- sum(dPartialAV)
inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV
}
DA[step] <- 0
LA[step] <- 0
RC[step] <- dFee[step]
if (inPolicy[1, "matDate"] == inPolicy[1, "currentDate"]) {
LA[step] <- max(inPolicy[1, "gbAmt"] * aT / ag - dAV[step], 0)
}
newDate <- as.POSIXlt(inPolicy[1, "currentDate"])
newDate$mon <- newDate$mon + 1
inPolicy[1, "currentDate"] <- as.Date(newDate)
}
pq <- pq[1:numStep] * df[1:numStep]
p <- p[1:numStep] * df[1:numStep]
return(list(DA = pq %*% DA, LA = p %*% LA, RC = p %*% RC,
outPolicy = inPolicy))
}
projectIBRU <- function(inPolicy, oneFundScen, dT = 1 / 12, pq, p, df){
numStep <- dim(oneFundScen)[1]
if (((length(pq) >= numStep) &&
(length(p) >= numStep) &&
(length(df) >= numStep)) == FALSE) {
stop("df, pq, and p must have length > numStep from oneFundScen")
}
DA <- rep(0, numStep)
LA <- rep(0, numStep)
RC <- rep(0, numStep)
dAV <- rep(0, numStep)
dFee <- rep(0, numStep)
ag <- 0
dP <- 1
nY <- 0
r <- 0.05
while (dP > 0.00001) {
if (nY * 12 < length(p)) dP <- p[nY * 12 + 1]
else dP <- 0
ag <- ag + dP * exp(-r * nY)
nY <- nY + 1
}
aT <- 0
dP <- 1
nY <- 0
while (dP > 0.00001){
dFR <- 0
if (nY * 12 < numStep) dFR <- df[nY * 12 + 1]
else dFR <- df[numStep - 1]
if (nY * 12 < length(p)) dP <- p[nY * 12 + 1]
else dP <- 0
aT <- aT + dP * exp(-dFR * nY)
nY <- nY + 1
}
for (step in 1:numStep) {
dPartialAV <- inPolicy[1, grep("fundValue", colnames(inPolicy))] *
oneFundScen[step, ] *
(1 - inPolicy[1, grep("fundFee", colnames(inPolicy))] * dT)
if (as.POSIXlt(inPolicy[1, "currentDate"])$mon ==
as.POSIXlt(inPolicy[1, "issueDate"])$mon) {
raw <- sum(dPartialAV)
dFee[step] <- sum(raw * inPolicy[1, "riderFee"])
dPartialAV <- dPartialAV - dPartialAV *
inPolicy[1, "riderFee"] - dPartialAV * inPolicy[1, "baseFee"]
dAV[step] <- sum(dPartialAV)
inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV
inPolicy[1, "gbAmt"] <- inPolicy[1, "gbAmt"] *
(1 + inPolicy[1, "rollUpRate"])
} else {
dAV[step] <- sum(dPartialAV)
inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV
}
DA[step] <- 0
LA[step] <- 0
RC[step] <- dFee[step]
if (inPolicy[1, "matDate"] == inPolicy[1, "currentDate"]) {
LA[step] <- max(inPolicy[1, "gbAmt"] * aT / ag - dAV[step], 0)
}
newDate <- as.POSIXlt(inPolicy[1, "currentDate"])
newDate$mon <- newDate$mon + 1
inPolicy[1, "currentDate"] <- as.Date(newDate)
}
pq <- pq[1:numStep] * df[1:numStep]
p <- p[1:numStep] * df[1:numStep]
return(list(DA = pq %*% DA, LA = p %*% LA, RC = p %*% RC,
outPolicy = inPolicy))
}
projectIBSU <- function(inPolicy, oneFundScen, dT = 1 / 12, pq, p, df){
numStep <- dim(oneFundScen)[1]
if (((length(pq) >= numStep) &&
(length(p) >= numStep) &&
(length(df) >= numStep)) == FALSE) {
stop("df, pq, and p must have length > numStep from oneFundScen")
}
DA <- rep(0, numStep)
LA <- rep(0, numStep)
RC <- rep(0, numStep)
dAV <- rep(0, numStep)
dFee <- rep(0, numStep)
ag <- 0
dP <- 1
nY <- 0
r <- 0.05
while (dP > 0.00001) {
if (nY * 12 < length(p)) dP <- p[nY * 12 + 1]
else dP <- 0
ag <- ag + dP * exp(-r * nY)
nY <- nY + 1
}
aT <- 0
dP <- 1
nY <- 0
while (dP > 0.00001) {
dFR <- 0
if (nY * 12 < numStep) dFR <- df[nY * 12 + 1]
else dFR <- df[numStep - 1]
if (nY * 12 < length(p)) dP <- p[nY * 12 + 1]
else dP <- 0
aT <- aT + dP * exp(-dFR * nY)
nY <- nY + 1
}
for (step in 1:numStep) {
dPartialAV <- inPolicy[1, grep("fundValue", colnames(inPolicy))] *
oneFundScen[step, ] *
(1 - inPolicy[1, grep("fundFee", colnames(inPolicy))] * dT)
if (as.POSIXlt(inPolicy[1, "currentDate"])$mon ==
as.POSIXlt(inPolicy[1, "issueDate"])$mon) {
raw <- sum(dPartialAV)
dFee[step] <- sum(raw * inPolicy[1, "riderFee"])
dPartialAV <- dPartialAV - dPartialAV *
inPolicy[1, "riderFee"] - dPartialAV * inPolicy[1, "baseFee"]
dAV[step] <- sum(dPartialAV)
inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV
inPolicy[1, "gbAmt"] <- max(dAV[step], inPolicy[1, "gbAmt"])
} else {
dAV[step] <- sum(dPartialAV)
inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV
}
DA[step] <- 0
LA[step] <- 0
RC[step] <- dFee[step]
if (inPolicy[1, "matDate"] == inPolicy[1, "currentDate"]) {
LA[step] <- max(inPolicy[1, "gbAmt"] * aT / ag - dAV[step], 0)
}
newDate <- as.POSIXlt(inPolicy[1, "currentDate"])
newDate$mon <- newDate$mon + 1
inPolicy[1, "currentDate"] <- as.Date(newDate)
}
pq <- pq[1:numStep] * df[1:numStep]
p <- p[1:numStep] * df[1:numStep]
return(list(DA = pq %*% DA, LA = p %*% LA, RC = p %*% RC,
outPolicy = inPolicy))
}
projectMBRP <- function(inPolicy, oneFundScen, dT = 1 / 12, pq, p, df){
numStep <- dim(oneFundScen)[1]
if (((length(pq) >= numStep) &&
(length(p) >= numStep) &&
(length(df) >= numStep)) == FALSE) {
stop("df, pq, and p must have length > numStep from oneFundScen")
}
DA <- rep(0, numStep)
LA <- rep(0, numStep)
RC <- rep(0, numStep)
dAV <- rep(0, numStep)
dFee <- rep(0, numStep)
for (step in 1:numStep) {
dPartialAV <- inPolicy[1, grep("fundValue", colnames(inPolicy))] *
oneFundScen[step, ] *
(1 - inPolicy[1, grep("fundFee", colnames(inPolicy))] * dT)
if (as.POSIXlt(inPolicy[1, "currentDate"])$mon ==
as.POSIXlt(inPolicy[1, "issueDate"])$mon){
raw <- sum(dPartialAV)
dFee[step] <- sum(raw * inPolicy[1, "riderFee"])
dPartialAV <- dPartialAV - dPartialAV *
inPolicy[1, "riderFee"] - dPartialAV * inPolicy[1, "baseFee"]
dAV[step] <- sum(dPartialAV)
inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV
} else {
dAV[step] <- sum(dPartialAV)
inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV
}
DA[step] <- 0
LA[step] <- 0
RC[step] <- dFee[step]
if (inPolicy[1, "matDate"] == inPolicy[1, "currentDate"]) {
LA[step] <- max(inPolicy[1, "gbAmt"] - dAV, 0)
}
newDate <- as.POSIXlt(inPolicy[1, "currentDate"])
newDate$mon <- newDate$mon + 1
inPolicy[1, "currentDate"] <- as.Date(newDate)
}
pq <- pq[1:numStep] * df[1:numStep]
p <- p[1:numStep] * df[1:numStep]
return(list(DA = pq %*% DA, LA = p %*% LA, RC = p %*% RC,
outPolicy = inPolicy))
}
projectMBRU <- function(inPolicy, oneFundScen, dT = 1 / 12, pq, p, df){
numStep <- dim(oneFundScen)[1]
if (((length(pq) >= numStep) &&
(length(p) >= numStep) &&
(length(df) >= numStep)) == FALSE) {
stop("df, pq, and p must have length > numStep from oneFundScen")
}
DA <- rep(0, numStep)
LA <- rep(0, numStep)
RC <- rep(0, numStep)
dAV <- rep(0, numStep)
dFee <- rep(0, numStep)
for (step in 1:numStep) {
dPartialAV <- inPolicy[1, grep("fundValue", colnames(inPolicy))] *
oneFundScen[step, ] *
(1 - inPolicy[1, grep("fundFee", colnames(inPolicy))] * dT)
if (as.POSIXlt(inPolicy[1, "currentDate"])$mon ==
as.POSIXlt(inPolicy[1, "issueDate"])$mon) {
raw <- sum(dPartialAV)
dFee[step] <- sum(raw * inPolicy[1, "riderFee"])
dPartialAV <- dPartialAV - dPartialAV *
inPolicy[1, "riderFee"] - dPartialAV * inPolicy[1, "baseFee"]
dAV[step] <- sum(dPartialAV)
inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV
inPolicy[1, "gbAmt"] <- inPolicy[1, "gbAmt"] *
(1 + inPolicy[1, "rollUpRate"])
} else {
dAV[step] <- sum(dPartialAV)
inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV
}
DA[step] <- 0
LA[step] <- 0
RC[step] <- dFee[step]
if (inPolicy[1, "matDate"] == inPolicy[1, "currentDate"]) {
LA[step] <- max(inPolicy[1, "gbAmt"] - dAV, 0)
}
newDate <- as.POSIXlt(inPolicy[1, "currentDate"])
newDate$mon <- newDate$mon + 1
inPolicy[1, "currentDate"] <- as.Date(newDate)
}
pq <- pq[1:numStep] * df[1:numStep]
p <- p[1:numStep] * df[1:numStep]
return(list(DA = pq %*% DA, LA = p %*% LA, RC = p %*% RC,
outPolicy = inPolicy))
}
projectMBSU <- function(inPolicy, oneFundScen, dT = 1 / 12, pq, p, df){
numStep <- dim(oneFundScen)[1]
if (((length(pq) >= numStep) &&
(length(p) >= numStep) &&
(length(df) >= numStep)) == FALSE) {
stop("df, pq, and p must have length > numStep from oneFundScen")
}
DA <- rep(0, numStep)
LA <- rep(0, numStep)
RC <- rep(0, numStep)
dAV <- rep(0, numStep)
dFee <- rep(0, numStep)
for (step in 1:numStep) {
dPartialAV <- inPolicy[1, grep("fundValue", colnames(inPolicy))] *
oneFundScen[step, ] *
(1 - inPolicy[1, grep("fundFee", colnames(inPolicy))] * dT)
if (as.POSIXlt(inPolicy[1, "currentDate"])$mon ==
as.POSIXlt(inPolicy[1, "issueDate"])$mon){
raw <- sum(dPartialAV)
dFee[step] <- sum(raw * inPolicy[1, "riderFee"])
dPartialAV <- dPartialAV - dPartialAV *
inPolicy[1, "riderFee"] - dPartialAV * inPolicy[1, "baseFee"]
dAV[step] <- sum(dPartialAV)
inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV
inPolicy[1, "gbAmt"] <- max(dAV[step], inPolicy[1, "gbAmt"])
} else {
dAV[step] <- sum(dPartialAV)
inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV
}
DA[step] <- 0
LA[step] <- 0
RC[step] <- dFee[step]
if (inPolicy[1, "matDate"] == inPolicy[1, "currentDate"]){
LA[step] <- max(inPolicy[1, "gbAmt"] - dAV, 0)
}
newDate <- as.POSIXlt(inPolicy[1, "currentDate"])
newDate$mon <- newDate$mon + 1
inPolicy[1, "currentDate"] <- as.Date(newDate)
}
pq <- pq[1:numStep] * df[1:numStep]
p <- p[1:numStep] * df[1:numStep]
return(list(DA = pq %*% DA, LA = p %*% LA, RC = p %*% RC,
outPolicy = inPolicy))
}
projectWBRP <- function(inPolicy, oneFundScen, dT = 1 / 12, pq, p, df){
numStep <- dim(oneFundScen)[1]
if (((length(pq) >= numStep) &&
(length(p) >= numStep) &&
(length(df) >= numStep)) == FALSE) {
stop("df, pq, and p must have length > numStep from oneFundScen")
}
numFund <- dim(oneFundScen)[2]
DA <- rep(0, numStep)
LA <- rep(0, numStep)
RC <- rep(0, numStep)
dAV <- rep(0, numStep)
dFee <- rep(0, numStep)
for (step in 1:numStep) {
dPartialAV <- inPolicy[1, grep("fundValue", colnames(inPolicy))] *
oneFundScen[step, ] *
(1 - inPolicy[1, grep("fundFee", colnames(inPolicy))] * dT)
if (as.POSIXlt(inPolicy[1, "currentDate"])$mon ==
as.POSIXlt(inPolicy[1, "issueDate"])$mon){
raw <- sum(dPartialAV)
dFee[step] <- sum(raw * inPolicy[1, "riderFee"])
dPartialAV <- dPartialAV - dPartialAV *
inPolicy[1, "riderFee"] - dPartialAV * inPolicy[1, "baseFee"]
dAV[step] <- sum(dPartialAV)
inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV
dWAG <- inPolicy[1, "gbAmt"] * inPolicy[1, "wbWithdrawalRate"]
dWA <- min(dWAG, inPolicy[1, "gmwbBalance"])
inPolicy[1, "gmwbBalance"] <- inPolicy[1, "gmwbBalance"] - dWA
inPolicy[1, "withdrawal"] <- inPolicy[1, "withdrawal"] + dWA
if (inPolicy[1, "gmwbBalance"] < 0.0001) inPolicy[1, "gbAmt"] <- 0
dAV[step] <- max(0, dAV[step] - dWA)
if (dAV[step] > 0.00001) {
inPolicy[1, grep("fundValue", colnames(inPolicy))] <-
inPolicy[1, grep("fundValue", colnames(inPolicy))] *
(dAV[step] / (dAV[step] + dWA))
} else {
inPolicy[1, grep("fundValue", colnames(inPolicy))] <- rep(0, numFund)
}
} else {
dAV[step] <- sum(dPartialAV)
inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV
}
DA[step] <- 0
LA[step] <- max(0, dWA - dAV[step])
RC[step] <- dFee[step]
newDate <- as.POSIXlt(inPolicy[1, "currentDate"])
newDate$mon <- newDate$mon + 1
inPolicy[1, "currentDate"] <- as.Date(newDate)
}
pq <- pq[1:numStep] * df[1:numStep]
p <- p[1:numStep] * df[1:numStep]
return(list(DA = pq %*% DA, LA = p %*% LA, RC = p %*% RC,
outPolicy = inPolicy))
}
projectWBRU <- function(inPolicy, oneFundScen, dT = 1 / 12, pq, p, df){
numStep <- dim(oneFundScen)[1]
if (((length(pq) >= numStep) &&
(length(p) >= numStep) &&
(length(df) >= numStep)) == FALSE) {
stop("df, pq, and p must have length > numStep from oneFundScen")
}
numFund <- dim(oneFundScen)[2]
DA <- rep(0, numStep)
LA <- rep(0, numStep)
RC <- rep(0, numStep)
dAV <- rep(0, numStep)
dFee <- rep(0, numStep)
for (step in 1:numStep) {
dPartialAV <- inPolicy[1, grep("fundValue", colnames(inPolicy))] *
oneFundScen[step, ] *
(1 - inPolicy[1, grep("fundFee", colnames(inPolicy))] * dT)
if (as.POSIXlt(inPolicy[1, "currentDate"])$mon ==
as.POSIXlt(inPolicy[1, "issueDate"])$mon) {
raw <- sum(dPartialAV)
dFee[step] <- sum(raw * inPolicy[1, "riderFee"])
dPartialAV <- dPartialAV - dPartialAV *
inPolicy[1, "riderFee"] - dPartialAV * inPolicy[1, "baseFee"]
dAV[step] <- sum(dPartialAV)
inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV
inPolicy[1, "gbAmt"] <- inPolicy[1, "gbAmt"] *
(1 + inPolicy[1, "rollUpRate"])
dWAG <- inPolicy[1, "gbAmt"] * inPolicy[1, "wbWithdrawalRate"]
dWA <- min(dWAG, inPolicy[1, "gmwbBalance"])
inPolicy[1, "gmwbBalance"] <- inPolicy[1, "gmwbBalance"] - dWA
inPolicy[1, "withdrawal"] <- inPolicy[1, "withdrawal"] + dWA
if (inPolicy[1, "gmwbBalance"] < 0.0001) inPolicy[1, "gbAmt"] <- 0
dAV[step] <- max(0, dAV[step] - dWA)
if (dAV[step] > 0.00001) {
inPolicy[1, grep("fundValue", colnames(inPolicy))] <-
inPolicy[1, grep("fundValue", colnames(inPolicy))] *
(dAV[step] / (dAV[step] + dWA))
} else {
inPolicy[1, grep("fundValue", colnames(inPolicy))] <- rep(0, numFund)
}
} else {
dAV[step] <- sum(dPartialAV)
inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV
}
DA[step] <- 0
LA[step] <- max(0, dWA - dAV[step])
RC[step] <- dFee[step]
newDate <- as.POSIXlt(inPolicy[1, "currentDate"])
newDate$mon <- newDate$mon + 1
inPolicy[1, "currentDate"] <- as.Date(newDate)
}
pq <- pq[1:numStep] * df[1:numStep]
p <- p[1:numStep] * df[1:numStep]
return(list(DA = pq %*% DA, LA = p %*% LA, RC = p %*% RC,
outPolicy = inPolicy))
}
projectWBSU <- function(inPolicy, oneFundScen, dT = 1 / 12, pq, p, df){
numStep <- dim(oneFundScen)[1]
if (((length(pq) >= numStep) &&
(length(p) >= numStep) &&
(length(df) >= numStep)) == FALSE) {
stop("df, pq, and p must have length > numStep from oneFundScen")
}
numFund <- dim(oneFundScen)[2]
DA <- rep(0, numStep)
LA <- rep(0, numStep)
RC <- rep(0, numStep)
dAV <- rep(0, numStep)
dFee <- rep(0, numStep)
for (step in 1:numStep) {
dPartialAV <- inPolicy[1, grep("fundValue", colnames(inPolicy))] *
oneFundScen[step, ] *
(1 - inPolicy[1, grep("fundFee", colnames(inPolicy))] * dT)
if (as.POSIXlt(inPolicy[1, "currentDate"])$mon ==
as.POSIXlt(inPolicy[1, "issueDate"])$mon) {
raw <- sum(dPartialAV)
dFee[step] <- sum(raw * inPolicy[1, "riderFee"])
dPartialAV <- dPartialAV - dPartialAV *
inPolicy[1, "riderFee"] - dPartialAV * inPolicy[1, "baseFee"]
dAV[step] <- sum(dPartialAV)
inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV
inPolicy[1, "gbAmt"] <- max(dAV[step], inPolicy[1, "gbAmt"])
dWAG <- inPolicy[1, "gbAmt"] * inPolicy[1, "wbWithdrawalRate"]
dWA <- min(dWAG, inPolicy[1, "gmwbBalance"])
inPolicy[1, "gmwbBalance"] <- inPolicy[1, "gmwbBalance"] - dWA
inPolicy[1, "withdrawal"] <- inPolicy[1, "withdrawal"] + dWA
if (inPolicy[1, "gmwbBalance"] < 0.0001) inPolicy[1, "gbAmt"] <- 0
dAV[step] <- max(0, dAV[step] - dWA)
if (dAV[step] > 0.00001) {
inPolicy[1, grep("fundValue", colnames(inPolicy))] <-
inPolicy[1, grep("fundValue", colnames(inPolicy))] *
(dAV[step] / (dAV[step] + dWA))
} else {
inPolicy[1, grep("fundValue", colnames(inPolicy))] <- rep(0, numFund)
}
} else {
dAV[step] <- sum(dPartialAV)
inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV
}
DA[step] <- 0
LA[step] <- max(0, dWA - dAV[step])
RC[step] <- dFee[step]
newDate <- as.POSIXlt(inPolicy[1, "currentDate"])
newDate$mon <- newDate$mon + 1
inPolicy[1, "currentDate"] <- as.Date(newDate)
}
pq <- pq[1:numStep] * df[1:numStep]
p <- p[1:numStep] * df[1:numStep]
return(list(DA = pq %*% DA, LA = p %*% LA, RC = p %*% RC,
outPolicy = inPolicy))
}
projectDBAB <- function(inPolicy, oneFundScen, dT = 1 / 12, pq, p, df){
numStep <- dim(oneFundScen)[1]
if (((length(pq) >= numStep) &&
(length(p) >= numStep) &&
(length(df) >= numStep)) == FALSE) {
stop("df, pq, and p must have length > numStep from oneFundScen")
}
DA <- rep(0, numStep)
LA <- rep(0, numStep)
RC <- rep(0, numStep)
dAV <- rep(0, numStep)
dFee <- rep(0, numStep)
for (step in 1:numStep) {
dPartialAV <- inPolicy[1, grep("fundValue", colnames(inPolicy))] *
oneFundScen[step, ] *
(1 - inPolicy[1, grep("fundFee", colnames(inPolicy))] * dT)
if (as.POSIXlt(inPolicy[1, "currentDate"])$mon ==
as.POSIXlt(inPolicy[1, "issueDate"])$mon) {
raw <- sum(dPartialAV)
dFee[step] <- sum(raw * inPolicy[1, "riderFee"])
dPartialAV <- dPartialAV - dPartialAV *
inPolicy[1, "riderFee"] - dPartialAV * inPolicy[1, "baseFee"]
dAV[step] <- sum(dPartialAV)
inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV
inPolicy[1, "gbAmt"] <- max(dAV[step], inPolicy[1, "gbAmt"])
} else {
dAV[step] <- sum(dPartialAV)
inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV
}
DA[step] <- max(0, inPolicy[1, "gbAmt"] - dAV[step])
LA[step] <- 0
RC[step] <- dFee[step]
if (inPolicy[1, "matDate"] == inPolicy[1, "currentDate"]) {
if (inPolicy[1, "gbAmt"] < dAV[step]) inPolicy[1, "gbAmt"] <- dAV[step]
else {
LA[step] <- max(0, inPolicy[1, "gbAmt"] - dAV[step])
numFund <- length(inPolicy[1, grep("fundValue", colnames(inPolicy))])
if (dAV[step] > 0.00001) {
inPolicy[1, grep("fundValue", colnames(inPolicy))] <-
inPolicy[1, grep("fundValue", colnames(inPolicy))] *
inPolicy[1, "gbAmt"] / dAV[step]
} else {
inPolicy[1, grep("fundValue", colnames(inPolicy))] <-
inPolicy[1, "gbAmt"] / numFund
}
}
}
newDate <- as.POSIXlt(inPolicy[1, "currentDate"])
newDate$mon <- newDate$mon + (as.POSIXlt(inPolicy[1, "currentDate"]) -
as.POSIXlt(inPolicy[1, "issueDate"]))
inPolicy[1, "issueDate"] <- inPolicy[1, "currentDate"]
inPolicy[1, "matDate"] <- as.Date(newDate)
newDate <- as.POSIXlt(inPolicy[1, "currentDate"])
newDate$mon <- newDate$mon + 1
inPolicy[1, "currentDate"] <- as.Date(newDate)
}
pq <- pq[1:numStep] * df[1:numStep]
p <- p[1:numStep] * df[1:numStep]
return(list(DA = pq %*% DA, LA = p %*% LA, RC = p %*% RC,
outPolicy = inPolicy))
}
projectDBIB <- function(inPolicy, oneFundScen, dT = 1 / 12, pq, p, df){
numStep <- dim(oneFundScen)[1]
if (((length(pq) >= numStep) &&
(length(p) >= numStep) &&
(length(df) >= numStep)) == FALSE) {
stop("df, pq, and p must have length > numStep from oneFundScen")
}
DA <- rep(0, numStep)
LA <- rep(0, numStep)
RC <- rep(0, numStep)
dAV <- rep(0, numStep)
dFee <- rep(0, numStep)
ag <- 0
dP <- 1
nY <- 0
r <- 0.05
while (dP > 0.00001) {
if (nY * 12 < length(p)) dP <- p[nY * 12 + 1]
else dP <- 0
ag <- ag + dP * exp(-r * nY)
nY <- nY + 1
}
aT <- 0
dP <- 1
nY <- 0
while (dP > 0.00001) {
dFR <- 0
if (nY * 12 < numStep) dFR <- df[nY * 12 + 1]
else dFR <- df[numStep - 1]
if (nY * 12 < length(p)) dP <- p[nY * 12 + 1]
else dP <- 0
aT <- aT + dP * exp(-dFR * nY)
nY <- nY + 1
}
for (step in 1:numStep) {
dPartialAV <- inPolicy[1, grep("fundValue", colnames(inPolicy))] *
oneFundScen[step, ] *
(1 - inPolicy[1, grep("fundFee", colnames(inPolicy))] * dT)
if (as.POSIXlt(inPolicy[1, "currentDate"])$mon ==
as.POSIXlt(inPolicy[1, "issueDate"])$mon){
raw <- sum(dPartialAV)
dFee[step] <- sum(raw * inPolicy[1, "riderFee"])
dPartialAV <- dPartialAV - dPartialAV *
inPolicy[1, "riderFee"] - dPartialAV * inPolicy[1, "baseFee"]
dAV[step] <- sum(dPartialAV)
inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV
inPolicy[1, "gbAmt"] <- max(dAV[step], inPolicy[1, "gbAmt"])
} else{
dAV[step] <- sum(dPartialAV)
inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV
}
DA[step] <- max(0, inPolicy[1, "gbAmt"] - dAV[step])
LA[step] <- 0
RC[step] <- dFee[step]
if (inPolicy[1, "matDate"] == inPolicy[1, "currentDate"]) {
LA[step] <- max(inPolicy[1, "gbAmt"] * aT / ag - dAV[step], 0)
}
newDate <- as.POSIXlt(inPolicy[1, "currentDate"])
newDate$mon <- newDate$mon + 1
inPolicy[1, "currentDate"] <- as.Date(newDate)
}
pq <- pq[1:numStep] * df[1:numStep]
p <- p[1:numStep] * df[1:numStep]
return(list(DA = pq %*% DA, LA = p %*% LA, RC = p %*% RC,
outPolicy = inPolicy))
}
projectDBMB <- function(inPolicy, oneFundScen, dT = 1 / 12, pq, p, df){
numStep <- dim(oneFundScen)[1]
if (((length(pq) >= numStep) &&
(length(p) >= numStep) &&
(length(df) >= numStep)) == FALSE) {
stop("df, pq, and p must have length > numStep from oneFundScen")
}
DA <- rep(0, numStep)
LA <- rep(0, numStep)
RC <- rep(0, numStep)
dAV <- rep(0, numStep)
dFee <- rep(0, numStep)
for (step in 1:numStep) {
dPartialAV <- inPolicy[1, grep("fundValue", colnames(inPolicy))] *
oneFundScen[step, ] *
(1 - inPolicy[1, grep("fundFee", colnames(inPolicy))] * dT)
if (as.POSIXlt(inPolicy[1, "currentDate"])$mon ==
as.POSIXlt(inPolicy[1, "issueDate"])$mon) {
raw <- sum(dPartialAV)
dFee[step] <- sum(raw * inPolicy[1, "riderFee"])
dPartialAV <- dPartialAV - dPartialAV *
inPolicy[1, "riderFee"] - dPartialAV * inPolicy[1, "baseFee"]
dAV[step] <- sum(dPartialAV)
inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV
inPolicy[1, "gbAmt"] <- max(dAV[step], inPolicy[1, "gbAmt"])
} else {
dAV[step] <- sum(dPartialAV)
inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV
}
DA[step] <- max(0, inPolicy[1, "gbAmt"] - dAV[step])
LA[step] <- 0
RC[step] <- dFee[step]
if (inPolicy[1, "matDate"] == inPolicy[1, "currentDate"]) {
LA[step] <- max(inPolicy[1, "gbAmt"] - dAV, 0)
}
newDate <- as.POSIXlt(inPolicy[1, "currentDate"])
newDate$mon <- newDate$mon + 1
inPolicy[1, "currentDate"] <- as.Date(newDate)
}
pq <- pq[1:numStep] * df[1:numStep]
p <- p[1:numStep] * df[1:numStep]
return(list(DA = pq %*% DA, LA = p %*% LA, RC = p %*% RC,
outPolicy = inPolicy))
}
projectDBWB <- function(inPolicy, oneFundScen, dT = 1 / 12, pq, p, df){
numStep <- dim(oneFundScen)[1]
if (((length(pq) >= numStep) &&
(length(p) >= numStep) &&
(length(df) >= numStep)) == FALSE) {
stop("df, pq, and p must have length > numStep from oneFundScen")
}
numFund <- dim(oneFundScen)[2]
DA <- rep(0, numStep)
LA <- rep(0, numStep)
RC <- rep(0, numStep)
dAV <- rep(0, numStep)
dFee <- rep(0, numStep)
for (step in 1:numStep) {
dPartialAV <- inPolicy[1, grep("fundValue", colnames(inPolicy))] *
oneFundScen[step, ] *
(1 - inPolicy[1, grep("fundFee", colnames(inPolicy))] * dT)
if (as.POSIXlt(inPolicy[1, "currentDate"])$mon ==
as.POSIXlt(inPolicy[1, "issueDate"])$mon) {
raw <- sum(dPartialAV)
dFee[step] <- sum(raw * inPolicy[1, "riderFee"])
dPartialAV <- dPartialAV - dPartialAV *
inPolicy[1, "riderFee"] - dPartialAV * inPolicy[1, "baseFee"]
dAV[step] <- sum(dPartialAV)
inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV
inPolicy[1, "gbAmt"] <- max(dAV[step], inPolicy[1, "gbAmt"])
dWAG <- inPolicy[1, "gbAmt"] * inPolicy[1, "wbWithdrawalRate"]
dWA <- min(dWAG, inPolicy[1, "gmwbBalance"])
inPolicy[1, "gmwbBalance"] <- inPolicy[1, "gmwbBalance"] - dWA
inPolicy[1, "withdrawal"] <- inPolicy[1, "withdrawal"] + dWA
if (inPolicy[1, "gmwbBalance"] < 0.0001) inPolicy[1, "gbAmt"] <- 0
dAV[step] <- max(0, dAV[step] - dWA)
if (dAV[step] > 0.00001) {
inPolicy[1, grep("fundValue", colnames(inPolicy))] <-
inPolicy[1, grep("fundValue", colnames(inPolicy))] *
(dAV[step] / (dAV[step] + dWA))
} else {
inPolicy[1, grep("fundValue", colnames(inPolicy))] <- rep(0, numFund)
}
} else {
dAV[step] <- sum(dPartialAV)
inPolicy[1, grep("fundValue", colnames(inPolicy))] <- dPartialAV
}
DA[step] <- max(0, inPolicy[1, "gbAmt"] - dAV[step])
LA[step] <- max(0, dWA - dAV[step])
RC[step] <- dFee[step]
newDate <- as.POSIXlt(inPolicy[1, "currentDate"])
newDate$mon <- newDate$mon + 1
inPolicy[1, "currentDate"] <- as.Date(newDate)
}
pq <- pq[1:numStep] * df[1:numStep]
p <- p[1:numStep] * df[1:numStep]
return(list(DA = pq %*% DA, LA = p %*% LA, RC = p %*% RC,
outPolicy = inPolicy))
}
valuateOnePolicy <- function(inPolicy, mortTable, fundScen, dT = 1 / 12, df){
if (inPolicy[1, "matDate"] <= inPolicy[1, "currentDate"]) {
return(policyValue = 0)
}
Type <- inPolicy[1, "productType"]
projectFun <- get(paste0("project", Type))
startDate <- as.POSIXlt(inPolicy[1, "currentDate"])
endDate <- as.POSIXlt(inPolicy[1, "matDate"])
numStep <- 12 * (endDate$year - startDate$year) +
(endDate$mon - startDate$mon)
scenDim <- dim(fundScen)
if (length(scenDim) == 2){
numScen <- 1
} else {
numScen <- scenDim[1]
}
mortFactors <- calcMortFactors(inPolicy, mortTable, dT)
pq <- matrix(mortFactors[, "pq"], nrow = 1)
p <- matrix(mortFactors[, "p"], nrow = 1)
DA <- c()
LA <- c()
RC <- c()
for (scen in 1:numScen) {
if (numScen == 1) {
curScen <- matrix(fundScen[1:numStep, ], nrow = numStep)
VABenefits <- projectFun(inPolicy, curScen, dT, pq, p, df)
DA[scen] <- VABenefits$DA
LA[scen] <- VABenefits$LA
RC[scen] <- VABenefits$RC
} else {
curScen <- matrix(fundScen[scen, 1:numStep, ], nrow = numStep)
VABenefits <- projectFun(inPolicy, curScen, dT, pq, p, df)
DA[scen] <- VABenefits$DA
LA[scen] <- VABenefits$LA
RC[scen] <- VABenefits$RC
}
}
DAs <- sum(DA)
LAs <- sum(LA)
RCs <- sum(RC)
return(list(policyValue = (DAs + LAs) / numScen,
riskCharge = RCs / numScen))
}
ageOnePolicy <- function(inPolicy, mortTable, fundScen, scenDates,
dT = 1 / 12, targetDate, df){
scenStartDate <- as.POSIXlt(scenDates[1])
targetDate <- as.Date(targetDate)
if (targetDate <= inPolicy[1, "currentDate"]) {
warning("No aging required. Original policy returned.")
return(inPolicy)
}
if (targetDate > max(scenDates)) {
msg <- "No aging performed. Target date beyond the last date of
historical scenario date 2016-02-01"
msg <- gsub("[\r\n]", " ", msg)
stop(msg)
}
if (min(scenDates) > inPolicy[1, "currentDate"]) {
msg <- "No aging performed. Current date before the first date of
historical scenario date 2001-08-01"
msg <- gsub("[\r\n]", " ", msg)
stop(msg)
}
rollEndDate <- as.POSIXlt(targetDate)
rollEndDate$mon <- rollEndDate$mon - 1
type <- inPolicy[1, "productType"]
projectFun <- get(paste0("project", type))
startDate <- as.POSIXlt(inPolicy[1, "currentDate"])
endDate <- as.POSIXlt(inPolicy[1, "matDate"])
numStep <- 12 * (endDate$year - startDate$year) +
(endDate$mon - startDate$mon)
agingStartIndx <- 12 * (startDate$year - scenStartDate$year) +
(startDate$mon - scenStartDate$mon)
agingEndIndx <- 12 * (rollEndDate$year - scenStartDate$year) +
(rollEndDate$mon - scenStartDate$mon)
rngScenIndx <- c(agingStartIndx:agingEndIndx)
curScen <- fundScen[rngScenIndx, ]
mortFactors <- calcMortFactors(inPolicy, mortTable, dT)
actFactors <- mortFactors * df[1:numStep]
pq <- matrix(actFactors[, "pq"], nrow = 1)
p <- matrix(actFactors[, "p"], nrow = 1)
VABenefits <- projectFun(inPolicy, curScen, dT, pq, p, df)
outPolicy <- VABenefits$outPolicy
return(outPolicy)
}
valuatePortfolio <- function(inPortfolio, mortTable, fundScen, dT = 1 / 12, df){
numPolicy <- nrow(inPortfolio)
vecVal <- rep(0, numPolicy)
vecRC <- rep(0, numPolicy)
for (i in 1:numPolicy){
curPolicy <- inPortfolio[i, ]
outTemp <- valuateOnePolicy(curPolicy, mortTable, fundScen, dT, df)
vecVal[i] <- outTemp$policyValue
vecRC[i] <- outTemp$riskCharge
}
return (list(portVal = sum(vecVal), portRC = sum(vecRC),
vecVal = vecVal, vecRC = vecRC))
}
agePortfolio <- function(inPortfolio, mortTable, fundScen, scenDates,
dT = 1 / 12, targetDate, df){
numPolicy <- nrow(inPortfolio)
for (i in 1:numPolicy){
inPolicy <- inPortfolio[i, ]
inPortfolio[i, ] <- ageOnePolicy(inPolicy, mortTable, fundScen, scenDates,
dT = 1 / 12, targetDate, df)
}
return (outPortfolio = inPortfolio)
} |
setGeneric(name="inputs<-",def=function(.Object, ..., value){
standardGeneric("inputs<-")
})
setGeneric(name="inputs",def=function(.Object, ..., value){standardGeneric("inputs")})
setMethod(f = "inputs",signature = "webprocess",
definition = function(.Object, ..., value){
if (missing(...)){
return(.Object@processInputs)
} else {
ret_idx <- which(names(.Object@processInputs) %in% as.character(expand.grid(...,stringsAsFactors = F)))
return(.Object@processInputs[ret_idx])
}
})
setOldClass("xml_document")
setMethod(f = "inputs",signature = "xml_document",
definition = function(.Object, ...){
inputXpath <- "//wps:Execute/wps:DataInputs/wps:Input"
inputs <- xml2::xml_find_all(.Object, inputXpath, ns = pkg.env$NAMESPACES)
results <- list()
names <- c()
for (i in 1:length(inputs)) {
xmlList <- xml2::as_list(inputs[[i]])
if (!is.null(xmlList$Data) && !is.null(xmlList$Data$LiteralData)) {
results <- c(results, xmlList$Data$LiteralData)
names <- c(names, xmlList$Identifier)
}
}
names(results) <- names
return(results)
})
setMethod(f = "inputs<-",signature = c("webprocess",'missing'),
definition = function(.Object, ..., value){
args <- expand.grid(..., stringsAsFactors = FALSE)
for (i in seq_len(ncol(args))){
.Object <- .setInput(.Object, names(args)[i], args[[i]])
}
return(.Object)
})
setMethod(f = "inputs<-",signature = c("webprocess",'character'),
definition = function(.Object, ..., value){
name <- as.character(expand.grid(..., stringsAsFactors = FALSE))
.Object <- .setInput(.Object, name, value)
return(.Object)
})
setMethod(f = "inputs<-",signature = c("webprocess",'list'),
definition = function(.Object, ..., value){
for (i in seq_len(length(value))){
.Object <- .setInput(.Object, names(value)[i], value[[i]])
}
inputs(.Object, ...)
return(.Object)
})
.setInput <- function(.Object, name, arg){
if (is.logical(arg)){
arg <- ifelse(isTRUE(arg),'true','false')
}
if (!is.character(arg))
stop('Process inputs values must be characters.')
if (!name %in% names(.Object@processInputs))
stop(name,' not found in ', paste(c('url', 'algorithm', 'version', 'email', 'wait', names(inputs(.Object))), collapse = ', '), call. = FALSE)
.Object@processInputs[[name]] <- arg
return(.Object)
} |
methylation.recode<-function(data){
data.recode<-apply(data,2,cluster)
return(data.recode)
} |
.gonad_mature_fq <- function(data, niter, seed){
model_glm <- glm(stage ~ x, data = data, family = binomial(link = "logit"))
smry_model <- summary(model_glm)
set.seed(seed)
n_coef <- list()
for(i in seq_len(niter)){
new_data <- data[sample(nrow(data), replace = TRUE), ]
model_boot <- glm(stage ~ x, data = new_data, family = binomial(link = "logit"))
glm_coef <- coef(model_boot)
n_coef <- rbind(n_coef, glm_coef)
}
A <- as.numeric(n_coef[,1])
B <- as.numeric(n_coef[,2])
L50 <- -A/B
create_x <- cbind(1, data$x)
x_fq <- as.matrix(create_x) %*% t(as.matrix(cbind(A,B)))
pred_fq <- 1 / (1 + exp(-x_fq))
qtl <- round(matrixStats::rowQuantiles(pred_fq, probs = c(0.025, 0.5, 0.975)), 3)
fitted <- qtl[, 2]
lower <- qtl[, 1]
upper <- qtl[, 3]
estimate <- list(model = smry_model,
parameters_A = A,
parameters_B = B,
L50 = L50,
lower = lower,
fitted = fitted,
upper = upper)
return(estimate)
}
.gonad_mature_bayes <- function(data, niter, seed){
set.seed(seed)
model_bayes <- MCMCpack::MCMClogit(data$stage ~ data$x, mcmc = niter, thin = 1)
smry_model <- summary(model_bayes)
A <- as.numeric(model_bayes[,1])
B <- as.numeric(model_bayes[,2])
L50 <- -A/B
create_x <- cbind(1, data$x)
x_bayes <- as.matrix(create_x) %*% t(model_bayes)
pred_bayes <- 1 / (1 + exp(-x_bayes))
qtl <- round(matrixStats::rowQuantiles(pred_bayes, probs = c(0.025, 0.5, 0.975)), 3)
fitted <- qtl[, 2]
lower <- qtl[, 1]
upper <- qtl[, 3]
estimate <- list(model = smry_model,
parameters_A = A,
parameters_B = B,
L50 = L50,
lower = lower,
fitted = fitted,
upper = upper)
return(estimate)
} |
rate_limit <- function() {
res <- httr::HEAD("https://api.digitalocean.com/v2/sizes", do_oauth())
headers <- httr::headers(res)
structure(
list(
limit = as.integer(headers$`ratelimit-limit`),
remaining = as.integer(headers$`ratelimit-remaining`),
reset = as.POSIXct(as.integer(headers$`ratelimit-reset`),
origin = "1970-01-01")
),
class = "do_rate"
)
}
print.do_rate <- function(x, ...){
cat("Rate limit: ", x$limit, '\n', sep = "")
cat("Remaining: ", x$remaining, '\n', sep = "")
diff <- difftime(x$reset, Sys.time(), units = "secs")
cat("Resets in: ", time(diff), "\n", sep = "")
}
time <- function(x) {
x <- as.integer(x)
if (x > 3600) {
paste0(x %/% 3600, " hours")
} else if (x > 300) {
paste0(x %/% 60, " minutes")
} else if (x > 60) {
paste0(round(x / 60, 1), " minutes")
} else {
paste0(x, "s")
}
} |
int_user_name_formats_bg_bg = c(
'{{last_name_female}}.{{first_name_female}}',
'{{last_name_male}}.{{first_name_male}}',
'{{last_name_male}}.{{first_name_male}}',
'{{first_name_male}}.{{last_name_male}}',
'{{first_name}}
'?{{last_name}}',
'{{first_name}}{{year}}'
)
int_email_formats_bg_bg = c('{{user_name}}@{{free_email_domain}}',
'{{user_name}}@{{domain_name}}')
int_free_email_domains_bg_bg = c(
'gmail.com', 'yahoo.com', 'hotmail.com', 'mail.bg', 'abv.bg', 'dir.bg'
)
int_tlds_bg_bg = c('bg', 'com', 'biz', 'info', 'net', 'org', 'edu')
int_replacements_bg_bg = list(
c("\u0411", "b"),
c("\u0413", "r"),
c("\u0414", "d"),
c("\u0416", "zh"),
c("\u0417", "z"),
c("\u0418", "i"),
c("\u0419", "i"),
c("\u041b", "l"),
c("\u041f", "p"),
c("\u0424", "f"),
c("\u0426", "ts"),
c("\u0427", "ch"),
c("\u0428", "sh"),
c("\u0429", "sht"),
c("\u042a", "u"),
c("\u042c", ""),
c("\u042e", "yu"),
c("\u042f", "ya"),
c("\u0431", "b"),
c("\u0432", "v"),
c("\u0434", "d"),
c("\u0436", "zh"),
c("\u0437", "z"),
c("\u0438", "i"),
c("\u0439", "i"),
c("\u043a", "k"),
c("\u043b", "l"),
c("\u043c", "m"),
c("\u043d", "n"),
c("\u043f", "p"),
c("\u0442", "t"),
c("\u0444", "f"),
c("\u0446", "ts"),
c("\u0447", "ch"),
c("\u0448", "sh"),
c("\u0449", "sht"),
c("\u044a", "u"),
c("\u044c", ""),
c("\u044e", "yu"),
c("\u044f", "ya"),
c("\u0411", "b"),
c("\u0413", "r"),
c("\u0414", "d"),
c("\u0416", "zh"),
c("\u0417", "z"),
c("\u0418", "i"),
c("\u0419", "i"),
c("\u041b", "l"),
c("\u041f", "p"),
c("\u0424", "f"),
c("\u0426", "ts"),
c("\u0427", "ch"),
c("\u0428", "sh"),
c("\u0429", "sht"),
c("\u042a", "u"),
c("\u042c", ""),
c("\u042e", "yu"),
c("\u042f", "ya"),
c("\u0431", "b"),
c("\u0432", "v"),
c("\u0434", "d"),
c("\u0436", "zh"),
c("\u0437", "z"),
c("\u0438", "i"),
c("\u0439", "i"),
c("\u043a", "k"),
c("\u043b", "l"),
c("\u043c", "m"),
c("\u043d", "n"),
c("\u043f", "p"),
c("\u0442", "t"),
c("\u0444", "f"),
c("\u0446", "ts"),
c("\u0447", "ch"),
c("\u0448", "sh"),
c("\u0449", "sht"),
c("\u044a", "u"),
c("\u044c", ""),
c("\u044e", "yu"),
c("\u044f", "ya")
) |
test.function_by_PoolIndex=function(){
poolNames=c('barrel','glass','belly')
timeSymbol='t'
funcOfVars=function(belly,glass,barrel,t){
c(belly,glass,barrel,t)
}
funcOfVec=by_PoolIndex(funcOfVars,poolNames,timeSymbol)
belly=1;glass=2;barrel=3;t=4
rev=c(belly,glass,barrel,t)
res_vars=funcOfVars(belly,glass,barrel,t)
checkEquals(res_vars,rev)
vec=c(barrel,glass,belly)
res_vec=funcOfVec(vec,t)
checkEquals(res_vec,rev)
leaf_resp=function(leaf_pool_content){leaf_pool_content*4}
leaf_resp(1)
poolNames=c(
'some_thing'
,'some_thing_else'
,'some_thing_altogther'
,'leaf_pool_content'
)
leaf_resp_vec=by_PoolIndex(leaf_resp,poolNames,timeSymbol='t')
leaf_resp_vec(c(1,27,3,1),5)
}
test.NonLinDecompOp_with_linear_fluxes_by_Index=function(){
n<-3
k<-3
intfs=c(
InternalFlux_by_PoolIndex(
sourceIndex=1
,destinationIndex=2
,func=function(X,t){
barrel=X[[1]]
k*barrel
}
)
)
ofs=c(
OutFlux_by_PoolIndex(
sourceIndex=1
,func=function(X,t){
barrel=X[[1]]
k*barrel
}
)
)
BFunc=getCompartmentalMatrixFunc(
UnBoundNonLinDecompOp(
internal_fluxes=intfs
,out_fluxes=ofs
,numberOfPools=n
,timeSymbol='t'
)
)
iv<-c(barrel=1,glass=1,belly=1)
B<-BFunc(iv,t=0)
print(B)
checkEquals(
B
,matrix(
nrow=n
,ncol=n
,byrow=TRUE
,c(
-2*k,0,0
,k,0,0
,0,0,0
)
)
)
}
test.NonLinDecompOp_with_linear_fluxes_by_Name=function(){
iv<-c(barrel=1,glass=1,belly=1)
state_variable_names=names(iv)
timeSymbol='t'
times=0:10
n<-3
k<-.02
B_ref=matrix(
nrow=n
,ncol=n
,byrow=TRUE
,c(
-2*k,0,0
,k,0,0
,0,0,0
)
)
ifrs=ConstantInternalFluxRateList_by_PoolName(
list(
ConstantInternalFluxRate_by_PoolName(sourceName='barrel',destinationName='glass',rate_constant=k)
)
)
ofrs=ConstantOutFluxRateList_by_PoolName(
list(
ConstantOutFluxRate_by_PoolName(sourceName='barrel',rate_constant=k)
)
)
cop=ConstLinDecompOp(
internal_flux_rates=ifrs
,out_flux_rates=ofrs
,poolNames=state_variable_names
)
ifl_cl=ConstInFluxes(c(1,0,0))
mod_cl=GeneralModel(
t=times
,A=cop
,ivList=iv
,inputFluxes=ifl_cl
,timeSymbol='t'
)
I_func_cl=getFunctionDefinition(ifl_cl)
I_cl_0=I_func_cl(0)
pe(I_cl_0)
rhs_cl=getRightHandSideOfODE(mod_cl)
rhs_cl_0=rhs_cl(iv,0)
pe(rhs_cl_0)
intfs=InternalFluxList_by_PoolName(
list(
InternalFlux_by_PoolName(
sourceName='barrel'
,destinationName='glass'
,func=function(barrel,t){
k*barrel
}
)
)
)
ofs=OutFluxList_by_PoolName(
c(
OutFlux_by_PoolName(
sourceName='barrel'
,func=function(barrel,t){
k*barrel
}
)
)
)
ifs=InFluxList_by_PoolName(
c(
InFlux_by_PoolName(
destinationName='barrel'
,func=function(barrel,glass,t){
1
}
)
)
)
obn<- UnBoundNonLinDecompOp_by_PoolNames(
internal_fluxes=intfs
,out_fluxes=ofs
,timeSymbol='t'
)
BFunc=getCompartmentalMatrixFunc(
obn
,timeSymbol
,state_variable_names
)
iv<-c(barrel=1,glass=1,belly=1)
B_0<-BFunc(iv,t=0)
pp('B_0')
B_0_cl=cop@mat
pp('B_0_cl')
checkEquals(B_0,B_ref)
mod=GeneralModel(
t=times
,A=obn
,ivList=iv
,inputFluxes=ifs
,timeSymbol='t'
)
mod_cl=GeneralModel(
t=times
,A=cop
,ivList=iv
,inputFluxes=ifl_cl
,timeSymbol='t'
)
I_func=getFunctionDefinition(
mod@inputFluxes
,timeSymbol=timeSymbol
,poolNames=names(iv)
)
I_func_cl=getFunctionDefinition(
mod_cl@inputFluxes
)
I_0=I_func(iv,0)
I_0_cl=I_func_cl(0)
rhs=getRightHandSideOfODE(mod)
rhs_0=rhs(iv,0)
sol=getC(mod)
co2=getReleaseFlux(mod)
sol_cl=getC(mod_cl)
co2_cl=getReleaseFlux(mod_cl)
plot(x=times,y=co2[,1])
lines(x=times,y=co2_cl[,1],col='red')
} |
library(tfestimators)
response <- function() "Species"
features <- function() setdiff(names(iris), response())
set.seed(123)
partitions <- modelr::resample_partition(iris, c(test = 0.2, train = 0.8))
iris_train <- as.data.frame(partitions$train)
iris_test <- as.data.frame(partitions$test)
feature_columns <- feature_columns(
column_numeric(features())
)
classifier <- dnn_classifier(
feature_columns = feature_columns,
hidden_units = c(10, 20, 10),
n_classes = 3
)
iris_input_fn <- function(data) {
input_fn(data, features = features(), response = response())
}
train(classifier, input_fn = iris_input_fn(iris_train))
predictions <- predict(classifier, input_fn = iris_input_fn(iris_test))
evaluation <- evaluate(classifier, input_fn = iris_input_fn(iris_test)) |
library(testthat)
library(Revticulate)
test_that(
"Testing getRevObj()",
{
skip_on_os("windows")
clearRev()
for(i in 1:10){
rand <- runif(10)
num <- sum(rand)
var <- paste(letters[as.integer((rand*26))], collapse = "")
doRev(var %+% " <- " %+% num)
expect_identical(as.character(round(num, 6)), getRevObj(var))
}
clearRev()
for(i in 1:8){
doRev("testtree <- simTree(" %+% "2 ^ " %+% as.integer(runif(1, 1, 5)) %+% ")")
expect_s3_class(getRevObj("testtree", coerce = TRUE), "phylo")
clearRev()
}
for(i in 1:8){
doRev("b ~ dnDirichlet([0, 1, 0, 1])")
expect_equal(sum(getRevObj("b", coerce = TRUE), na.rm = TRUE), 1)
clearRev()
}
clearRev()
for(i in 1:10){
doRev("var1 <- " %+% i %+% "; var2 := var1 * 10")
expect_equal(getRevObj("var1", coerce = TRUE), i)
expect_equal(getRevObj("var2", coerce = TRUE), i*10)
doRev("var1 <- " %+% (i*10))
expect_equal(getRevObj("var1", coerce = TRUE), i*10)
expect_equal(getRevObj("var2", coerce = TRUE), i*100)
}
clearRev()
}
) |
identifyLoners <- function(v, nMax = 10) UseMethod("identifyLoners")
identifyLoners.factor <- function(v, nMax = 10) identifyLonersF(v, nMax = nMax)
identifyLoners.labelled <- function(v, nMax = 10) identifyLonersL(v, nMax = nMax)
identifyLoners.haven_labelled <- function(v, nMax = 10) identifyLonersL(v, nMax = nMax)
identifyLoners.character <- function(v, nMax = 10) identifyLonersC(v, nMax = nMax)
identifyLoners <- checkFunction(identifyLoners, "Identify levels with < 6 obs.",
c("character", "factor"))
identifyLonersMessage <- "Note that the following levels have at most five observations:"
identifyLonersF <- function(v, nMax) {
v <- factor(na.omit(v))
vLev <- levels(v)
lonerOcc <- vLev[which(table(v) <= 5)]
if (length(lonerOcc) > 0) {
problem <- TRUE
problemValues <- lonerOcc
} else {
problem <- FALSE
problemValues <- NULL
}
outMessage <- messageGenerator(list(problem=problem,
problemValues=problemValues),
message = identifyLonersMessage,
nMax = nMax)
checkResult(list(problem = problem, message = outMessage,
problemValues = problemValues))
}
identifyLonersC <- function(v, nMax) {
v <- factor(v)
identifyLonersF(v, nMax)
}
identifyLonersL <- function(v, nMax) {
v <- dataMaid_as_factor(v)
identifyLonersF(v, nMax)
} |
head(mtcars)
our.data <- mtcars
our.data$mpg[5] <- our.data$mpg[5] * -1
our.data[4:6,]
library(dplyr)
our.data %>%
group_by(cyl) %>%
summarise(avg.mpg=mean(mpg))
not.empty.p <- function(x) if(x=="") return(FALSE)
seven.digit.p <- function(x) nchar(x)==7
example.data <- data.frame(x=c(8, 9, 6, 5, 9, 5, 6, 7,
8, 9, 6, 5, 5, 6, 7),
y=c(82, 91, 61, 49, 40, 49, 57,
74, 78, 90, 61, 49, 51, 62, 68))
(example.data) |
"[.shingle" <-
function(x, subset, drop = FALSE)
{
if (!is.shingle(x)) stop("x must be a shingle")
ans <- as.numeric(x)[subset]
attr(ans, "levels") <- levels(x)
class(attr(ans, "levels")) <- "shingleLevel"
if (drop) {
xlvs <- levels(ans)
dl <- logical(nlevels(ans))
for (i in seq_along(dl))
dl[i] <- any( ans >= xlvs[[i]][1] & ans <= xlvs[[i]][2] )
attr(ans, "levels") <- xlvs[dl]
class(attr(ans, "levels")) <- "shingleLevel"
}
class(ans) <- "shingle"
ans
}
make.list.from.intervals <- function(x)
{
if (ncol(x) != 2) stop("x must be matrix with 2 columns")
ans <- vector(mode = "list", length = nrow(x))
for (i in seq_len(nrow(x))) ans[[i]] <- x[i,]
ans
}
equal.count <-
function(x, ...)
{
attr(x, "levels") <- make.list.from.intervals(co.intervals(x,...))
class(attr(x, "levels")) <- "shingleLevel"
class(x) <- "shingle"
x
}
shingle <-
function(x, intervals=sort(unique(x)))
{
if (ncol(as.matrix(intervals))==1)
intervals <- cbind(intervals, intervals, deparse.level = 0)
else if (ncol(as.matrix(intervals)) > 2)
stop("bad value of 'intervals'")
attr(x, "levels") <- make.list.from.intervals(intervals)
class(attr(x, "levels")) <- "shingleLevel"
class(x) <- "shingle"
x
}
as.data.frame.shingle <- as.data.frame.factor
is.shingle <-
function(x) inherits(x, "shingle")
as.shingle <-
function(x) if (is.shingle(x)) x else shingle(x)
summary.shingle <-
function(object, showValues = FALSE, ...)
print.shingle(object, showValues = showValues, ...)
as.character.shingleLevel <- function(x, ...)
{
interval2string <- function(x)
{
stopifnot(length(x) == 2)
if (x[1] == x[2]) paste("{ ", x[1], " }", sep = "")
else paste("[ ", x[1], ", ", x[2], " ]", sep = "")
}
sapply(x, interval2string)
}
print.shingleLevel <- function(x, ...)
{
print(do.call("rbind", x))
invisible(x)
}
print.shingle <- function(x, showValues = TRUE, ...)
{
if (showValues)
{
cat(gettext("\nData:\n"))
print(as.numeric(x))
}
l <- levels(x)
n <- nlevels(x)
if (n < 1) cat(gettext("\nno intervals\n"))
else {
int <- data.frame(min = numeric(n), max = numeric(n), count = numeric(n))
for (i in 1:n) {
int$min[i] <- l[[i]][1]
int$max[i] <- l[[i]][2]
int$count[i] <- length(x[x>=l[[i]][1] & x<=l[[i]][2]])
}
cat(gettext("\nIntervals:\n"))
print(int)
olap <- numeric(n-1)
for (i in seq_len(n - 1))
olap[i] <-
length(x[x >= l[[i]][1] & x <= l[[i]][2] &
x >= l[[i+1]][1] & x <= l[[i+1]][2]])
cat(gettext("\nOverlap between adjacent intervals:\n"))
print(olap)
}
invisible(x)
}
plot.shingle <-
function(x,
panel = panel.shingle,
xlab = gettext("Range"),
ylab = gettext("Panel"),
...)
{
ocall <- sys.call(sys.parent()); ocall[[1]] <- quote(plot)
panel.shingle <-
function(x, y,
col = plot.polygon$col,
lty = plot.polygon$lty,
lwd = plot.polygon$lwd,
alpha = plot.polygon$alpha,
border = plot.polygon$border,
...)
{
plot.polygon <- trellis.par.get("plot.polygon")
n <- nlevels(y)
if (n > 0)
lrect(xleft = x[1 + 2 * (0:(n-1))],
xright = x[2 + 2 * (0:(n-1))],
y = 1:n,
height = 0.5,
col = col,
lty = lty,
alpha = alpha,
border = border,
...)
}
x <- levels(x)
ans <-
bwplot(factor(rep(seq_len(length(x)), each = 2)) ~ unlist(x),
xlab = xlab, ylab = ylab,
panel = panel, ...)
ans$call <- ocall
ans
} |
expectedDIFplot <-
function(OBJ, axis, quants, main, xlim, ylim, xlab, ylab, ...){
par(ask=TRUE)
par(oma=c(1,1,1,2))
if(missing(main)){main="Pairwise Expected Scores\n"}
grps<-OBJ$groups
ngrps<-length(grps)
if(missing(xlim)){xlim<-c(min(OBJ$subjscore),max(OBJ$subjscore))}
if(missing(ylim)){ylim<-c(min(OBJ$subjscore),max(OBJ$subjscore))}
for(i in 1:(ngrps-1)){
for(j in (i+1):ngrps){
grp1<-OBJ$DIF[[i]]
grp2<-OBJ$DIF[[j]]
eval1<-apply(grp1$OCC[,-c(1:3)],2,function(x)sum(x*grp1$OCC[,3]))
eval2<-apply(grp2$OCC[,-c(1:3)],2,function(x)sum(x*grp2$OCC[,3]))
plot(eval1,eval2,type="l",ylab=OBJ$groups[j], xlab=OBJ$groups[i], xlim=xlim, ylim=ylim, main=main, ...)
axis(3,at=quants, lab=labels(quants),tck=0)
abline(v=quants,col="blue",lty=2)
axis(4,at=quants, lab=labels(quants),las=2,tck=0)
abline(h=quants,col="blue",lty=2)
abline(0,1,lty=2)
}
}
} |
objectiveFunctionEvaluation <- function(x=NULL,
xnew,
fun,
seedFun=NA,
noise=FALSE,...){
if(!is.null(x))
x <- data.matrix(x)
xnew <- data.matrix(xnew)
if(nrow(xnew)==0)
return(NULL)
if(exists(as.character(substitute(.Random.seed))))
SAVESEED<-.Random.seed
else
SAVESEED=NULL
if(noise & !is.na(seedFun)){
seed <- numeric(nrow(xnew))
for(i in 1:nrow(xnew)){
xnewi <- xnew[i,]
x <- rbind(x,xnewi)
repetitions <- sum(apply(x,1,identical,xnewi)) -1
seed[i] <- seedFun + repetitions
}
nms <- names(formals(fun))
passSeed <- FALSE
if(length(nms)>1){
passSeed <- names(formals(fun)[2])=="seed"
}
if(passSeed){
ynew <- fun(xnew,seed,...)
}else{
ynew <- NULL
for(i in 1:nrow(xnew)){
set.seed(seed[i])
ynew <- rbind(ynew,fun(xnew[i,,drop=FALSE],...))
}
}
}else{
ynew <- fun(xnew,...)
}
if(!is.null(SAVESEED))
assign(".Random.seed", SAVESEED, envir=globalenv())
if(!is.matrix(ynew) & nrow(xnew)>1)
ynew <- matrix(data = ynew,
ncol = 1)
if(!is.matrix(ynew) & nrow(xnew)==1)
ynew <- matrix(data = ynew,
nrow = 1)
ynew
} |
updateK <-
function(G, K, z, mu, sigma2, Id, lambda)
{
dens = rep(0,G)
for(i in 1:length(K))
{
for(g in 1:G)
{
dens[g] = dmvnorm(z[i,], mu[g,], sigma2[g]*Id)
}
if(sum(is.na(dens))!= 0){dens[c(1:G)[is.na(dens)]] = 0}
K[i] = c(1:G)[rmultinom(1,1,lambda[i,]*dens) == T]
}
K
} |
as_ypr_population <- function(x, ...) UseMethod("as_ypr_population")
as_ypr_population.data.frame <- function(x, ...) {
chk_unused(...)
do.call("ypr_population", x)
} |
cond.multinom <- function(model) {
if (!"multinom" %in% class(model)) {stop("model not recognized")}
if (!"Hessian" %in% names(model)) {
model <- update(model,.~.,Hess=TRUE,trace=FALSE)
}
evd <- eigen(model$Hessian,symmetric=TRUE,only.values=TRUE)$values
cond <- sqrt(max(evd)/min(evd))
result <- c("Condition number"=cond,"Log10(condition)"=log10(cond))
return(result)
} |
sizedsubtree <-function(tree,Size=NULL,time.limit=10)
{
if (!requireNamespace("R.utils", quietly = TRUE)) {
stop("Package \"R.utils\" needed for this function to work. Please install it.",
call. = FALSE)
}
szs<-function(tree,size=Size){
if(is.null(size)) size<-Ntip(tree)/10
nod <- array()
repeat {
i = 1
node <- sample(seq(Ntip(tree)+2, dim(tree$edge)[1]-1), 1)
a <- length(tips(tree, node))
i = i + 1
if (a >= size & a <= (Ntip(tree)-1)/2)
nod[i] <- node
else nod[i] <- 1
if (nod[i] > 1) {
break
}
if (is.na(sum(nod))) nod <- na.omit(nod) else nod <- nod
}
return(nod[2])
}
R.utils::withTimeout({szs(tree,Size)},timeout=time.limit,onTimeout="silent")->a
if(is.null(a)) print("searching time exceeded the time limit. It is possible there is no clade large enough to satisfy the condition") else return(a)
} |
print.ordforpred <-
function(x, ...) {
cat("\n")
cat(paste("Predicted values of ", length(x$ypred), " observation(s).", sep=""), "\n")
cat("\n")
cat("Classes of ordinal target variable:", "\n")
cat(paste(paste("\"", levels(x$ypred), "\"", sep=""), collapse=", "), "\n")
} |
bloom_prediction2 <-
function (HourChillTable,Chill_req,Heat_req,permutations=FALSE,
Chill_model="Chill_Portions",Heat_model="GDH",Start_JDay=305,infocol=NULL)
{
if(is.null(HourChillTable)) stop("no HourChillTable provided.",call. = FALSE)
if(!is.data.frame(HourChillTable)) stop("HourChillTable is not a data.frame.",call. = FALSE)
if(is.null(Chill_model)) stop("no Chill_model provided.",call. = FALSE)
if(!Chill_model %in% colnames(HourChillTable)) stop("HourChillTable doesn't contain a column named ",Chill_model,call. = FALSE)
if(!Heat_model %in% colnames(HourChillTable)) stop("HourChillTable doesn't contain a column named ",Heat_model,call. = FALSE)
if(!is.numeric(HourChillTable[,Chill_model])) stop("column ",Chill_model," not numeric.",call. = FALSE)
if(!is.numeric(HourChillTable[,Heat_model])) stop("column ",Heat_model," not numeric.",call. = FALSE)
if(is.null(Chill_req)) stop("no Chill_req provided.",call. = FALSE)
if(!is.numeric(Chill_req)) stop("Chill_req not numeric.",call. = FALSE)
if(is.null(Heat_req)) stop("no Heat_req provided.",call. = FALSE)
if(!is.numeric(Heat_req)) stop("Heat_req not numeric.",call. = FALSE)
if(is.null(Start_JDay)) stop("no Start_JDay provided.",call. = FALSE)
if(Start_JDay>366) stop("Start_JDay can't be greater than 366",call. = FALSE)
if(Start_JDay<1) stop("Start_JDay can't be less than 1",call. = FALSE)
if(!"JDay" %in% colnames(HourChillTable))
HourChillTable<-make_JDay(HourChillTable)
if(!"Season" %in% colnames(HourChillTable))
{HourChillTable[which(HourChillTable$JDay>=Start_JDay),"Season"]<-
HourChillTable[which(HourChillTable$JDay>=Start_JDay),"Year"]
HourChillTable[which(HourChillTable$JDay<Start_JDay),"Season"]<-
HourChillTable[which(HourChillTable$JDay<Start_JDay),"Year"]-1}
cc<-HourChillTable[,Chill_model]
hh<-HourChillTable[,Heat_model]
sea<-HourChillTable$Season
stdd<-HourChillTable$JDay
for (s in unique(sea))
cc[which(sea==s)]<-cc[which(sea==s)]-cc[which(sea==s&stdd==round(Start_JDay))][1]
for (s in unique(sea))
hh[which(sea==s)]<-hh[which(sea==s)]-hh[which(sea==s&stdd==round(Start_JDay))][1]
if(permutations)
reqs<-expand.grid(unique(sea),Chill_req,Heat_req) else
{if(!length(Chill_req)==length(Heat_req))
stop("Chill_req and Heat_req are of different length.",call. = FALSE)
reqs<-data.frame(Chill_req,Heat_req)
reqs<-data.frame(Season=rep(unique(sea),each=nrow(reqs)),reqs)}
colnames(reqs)<-c("Season","Creq","Hreq")
if(!permutations & !is.null(infocol))
if(length(infocol)==length(Chill_req)) reqs<-cbind(infocol,reqs)
results <- data.frame()
chill <- cc
chill2 <- chill[c(2:length(chill), 1)]
heat<-hh
heat2 <- heat[c(2:length(heat), 1)]
seas<-HourChillTable$Season
unireq<-unique(reqs[,c("Creq","Season")])
unireq[,"Chill_comp"]<-as.numeric(sapply(1:nrow(unireq),function(x)
which(chill<unireq$Creq[x]&chill2>=unireq$Creq[x]&seas==unireq$Season[x])[1]))
unireq[which(!is.na(unireq$Chill_comp)),"Heat_on_CR"]<-
as.numeric(sapply(unireq$Chill_comp[which(!is.na(unireq$Chill_comp))],function(x)
HourChillTable[x,Heat_model]))
unireq[which(!is.na(unireq$Chill_comp)),"Chill_comp"]<-
as.numeric(sapply(unireq$Chill_comp[which(!is.na(unireq$Chill_comp))],function(x)
HourChillTable$JDay[x]))
reqs<-merge(reqs,unireq,by = c("Creq","Season"))
reqs[,"Heat_on_stage"]<-reqs$Hreq+reqs$Heat_on_CR
heatstage<-reqs$Heat_on_stage
reqseas<-reqs$Season
reqs[,"Heat_comp"]<-as.numeric(sapply(1:nrow(reqs),function(x)
which(heat<heatstage[x]&heat2>=heatstage[x]&seas==reqseas[x])[1]))
reqs[which(!is.na(reqs$Heat_comp)),"Pheno_date"]<-
as.numeric(sapply(reqs$Heat_comp[which(!is.na(reqs$Heat_comp))],function(x)
HourChillTable$JDay[x]))
if("infocol" %in% colnames(reqs))
reqs<-reqs[,c("infocol","Season","Creq","Hreq","Chill_comp","Pheno_date")] else
reqs<-reqs[,c("Season","Creq","Hreq","Chill_comp","Pheno_date")]
return(reqs)
} |
{}
{}
SparseBrainVectorSource <- function(metaInfo, indices, mask) {
stopifnot(length(dim(metaInfo)) >= 3)
stopifnot(all(indices >= 1 & indices <= dim(metaInfo)[4]))
D <- dim(metaInfo)[1:3]
if (is.vector(mask) && length(mask) < prod(D)) {
m <- array(FALSE, D)
m[mask] <- TRUE
mask <- m
} else if (identical(dim(mask), as.integer(D))) {
mask <- as.array(mask)
} else if (is.vector(mask) && length(mask) == prod(D)) {
mask <- array(mask, D)
} else {
stop("illegal mask argument with dim: ", paste(dim(mask), collapse=", "))
}
if (!inherits(mask, "LogicalBrainVolume")) {
mspace <- BrainSpace(dim(mask), metaInfo@spacing, metaInfo@origin, metaInfo@spatialAxes)
mask <- LogicalBrainVolume(mask, mspace)
}
stopifnot(all(dim(mask) == D))
new("SparseBrainVectorSource", metaInfo=metaInfo, indices=indices, mask=mask)
}
SparseBrainVector <- function(data, space, mask, source=NULL, label="") {
stopifnot(inherits(space, "BrainSpace"))
if (!inherits(mask, "LogicalBrainVolume")) {
mspace <- BrainSpace(dim(space)[1:3], spacing(space), origin(space), axes(space), trans(space))
mask <- LogicalBrainVolume(as.logical(mask), mspace)
}
stopifnot(inherits(mask, "LogicalBrainVolume"))
D4 <- if (is.matrix(data)) {
Nind <- sum(mask == TRUE)
if (nrow(data) == Nind) {
data <- t(data)
nrow(data)
} else if (ncol(data) == Nind) {
nrow(data)
} else {
stop(paste("matrix with dim:", dim(data), " does not match mask cardinality: ", Nind))
}
} else if (length(dim(data)) == 4) {
mat <- apply(data, 4, function(vals) vals)
data <- t(mat[mask==TRUE,])
dim(data)[4]
}
if (ndim(space) == 3) {
space <- addDim(space, nrow(data))
}
stopifnot(ndim(space) == 4)
if (is.null(source)) {
meta <- BrainMetaInfo(dim(space), spacing(space), origin(space), "FLOAT", label)
source <- new("BrainSource", metaInfo=meta)
}
new("SparseBrainVector", source=source, space=space, mask=mask, data=data, map=IndexLookupVolume(space(mask), as.integer(which(mask))))
}
setMethod(f="loadData", signature=c("SparseBrainVectorSource"),
def=function(x) {
meta <- x@metaInfo
meta <- x@metaInfo
nels <- prod(meta@Dim[1:3])
ind <- x@indices
M <- x@mask > 0
reader <- dataReader(meta, offset=0)
dat4D <- readElements(reader, prod(meta@Dim[1:4]))
datlist <- lapply(1:length(ind), function(i) {
offset <- (nels * (ind[i]-1))
dat4D[(offset+1):(offset + nels)][M]
})
close(reader)
arr <- do.call(rbind, datlist)
bspace <- BrainSpace(c(meta@Dim[1:3], length(ind)), meta@spacing, meta@origin, meta@spatialAxes)
SparseBrainVector(arr, bspace, x@mask)
})
setMethod(f="indices", signature=signature(x="SparseBrainVector"),
def=function(x) {
indices(x@map)
})
setMethod(f="coords", signature=signature(x="SparseBrainVector"),
def=function(x,i) {
if (missing(i)) {
return(coords(x@map, indices(x@map)))
}
coords(x@map, i)
})
setMethod("eachVolume", signature=signature(x="SparseBrainVector", FUN="function", withIndex="logical", mask="missing"),
def=function(x, FUN, withIndex=FALSE, mask, ...) {
lapply(1:nrow(x@data), function(i) {
if (withIndex) {
FUN(takeVolume(x, i), i,...)
} else {
FUN(takeVolume(x, i), ...)
}
})
})
setMethod("eachVolume", signature=signature(x="SparseBrainVector", FUN="function", withIndex="missing", mask="missing"),
def=function(x, FUN, withIndex, ...) {
lapply(1:nrow(x@data), function(i) FUN(takeVolume(x, i), ...))
})
setMethod("eachVolume", signature=signature(x="SparseBrainVector", FUN="function", withIndex="missing", mask="LogicalBrainVolume"),
def=function(x, FUN, withIndex, mask, ...) {
mask.idx <- which(mask > 0)
lapply(1:nrow(x@data), function(i) FUN(takeVolume(x, i)[mask.idx], ...))
})
setMethod(f="eachSeries", signature=signature(x="SparseBrainVector", FUN="function", withIndex="logical"),
def=function(x, FUN, withIndex=FALSE, ...) {
ret <- list()
if (withIndex) {
idx <- indices(x)
for (i in 1:NCOL(x@data)) {
ret[[i]] <- FUN(x@data[,i], idx[i])
}
} else {
for (i in 1:NCOL(x@data)) {
ret[[i]] <- FUN(x@data[,i])
}
}
ret
})
setMethod(f="seriesIter", signature=signature(x="SparseBrainVector"),
def=function(x) {
len <- NCOL(x@data)
i <- 0
nextEl <- function() {
i <<- i+1
if (i <= len) {
x@data[,i]
} else {
stop("StopIteration")
}
}
hasNx <- function() {
i < len
}
obj <- list(nextElem = nextEl, hasNext=hasNx)
class(obj) <- c("seriesIter", "abstractiter", "iter")
obj
})
setMethod(f="series", signature=signature(x="SparseBrainVector", i="matrix"),
def=function(x,i) {
idx <- gridToIndex(x@mask, i)
callGeneric(x,idx)
})
setMethod("series", signature(x="SparseBrainVector", i="numeric"),
def=function(x,i, j, k) {
if (missing(j) && missing(k)) {
idx <- lookup(x, as.integer(i))
idx.nz <- idx[idx!=0]
if (length(idx.nz) == 0) {
matrix(0, dim(x)[4], length(i))
} else {
mat <- matrix(0, dim(x)[4], length(i))
mat[, idx != 0] <- x@data[,idx.nz]
mat
}
} else {
vdim <- dim(x)
slicedim <- vdim[1] * vdim[2]
idx <- slicedim*(k-1) + (j-1)*vdim[1] + i
callGeneric(x, idx)
}
})
setMethod(f="concat", signature=signature(x="SparseBrainVector", y="SparseBrainVector"),
def=function(x,y,...) {
if (!all(indices(x) == indices(y))) {
stop("cannot concatenate arguments with different index maps")
}
ndat <- rbind(x@data, y@data)
d1 <- dim(x)
d2 <- dim(y)
ndim <- c(d1[1:3], d1[4] + d2[4])
nspace <- BrainSpace(ndim, spacing(x@space), origin(x@space), axes(x@space), trans(x@space))
ret <- SparseBrainVector(ndat, nspace, mask=x@mask)
rest <- list(...)
if (length(rest) >= 1) {
ret <- Reduce("concat", c(ret, rest))
}
return(ret)
})
setMethod(f="lookup", signature=signature(x="SparseBrainVector", i="numeric"),
def=function(x,i) {
lookup(x@map, i)
})
setMethod(f="[", signature=signature(x = "SparseBrainVector", i = "numeric", j = "missing"),
def=function (x, i, j, k, m, ..., drop=TRUE) {
callGeneric(x, i, 1:(dim(x)[2]))
}
)
setMethod(f="[", signature=signature(x = "SparseBrainVector", i = "missing", j = "missing"),
def=function (x, i, j, k, m, ..., drop=TRUE) {
callGeneric(x, 1:(dim(x)[1]), 1:(dim(x)[2]))
}
)
setMethod(f="[", signature=signature(x = "SparseBrainVector", i = "missing", j = "numeric"),
def=function (x, i, j, k, m, ..., drop=TRUE) {
callGeneric(x, i:(dim(x)[1]), j)
}
)
setMethod(f="[", signature=signature(x = "SparseBrainVector", i = "numeric", j = "numeric"),
def = function (x, i, j, k, m, ..., drop = TRUE) {
if (missing(k))
k = 1:(dim(x)[3])
vmat <- as.matrix(expand.grid(i,j,k,m))
ind <- .gridToIndex3D(dim(x)[1:3], vmat[,1:3,drop = FALSE])
mapped <- cbind(lookup(x, ind), m)
vals <- unlist(apply(mapped, 1, function(i) {
if (i[1] == 0) {
0
} else {
x@data[i[2], i[1]]
}
}))
dim(vals) <- c(length(i),length(j),length(k),length(m))
if (drop) {
drop(vals)
} else {
vals
}
})
setMethod(f="subVector", signature=signature(x="SparseBrainVector", i="numeric"),
def=function(x, i) {
idx <- which(x@mask > 0)
bspace <- dropDim(space(x))
res <- lapply(i, function(i) x@data[i,])
res <- do.call("cbind", res)
SparseBrainVector(res, bspace, x@mask)
})
setMethod(f="takeVolume", signature=signature(x="SparseBrainVector", i="numeric"),
def=function(x, i, merge=FALSE) {
idx <- which(x@mask > 0)
bspace <- dropDim(space(x))
res <- lapply(i, function(i) x@data[i,])
if (length(res) > 1 && merge) {
res <- do.call("cbind", res)
SparseBrainVector(res, bspace, x@mask)
} else {
if (length(res) == 1) {
BrainVolume(res[[1]], bspace, indices=idx)
} else {
lapply(res, function(x) BrainVolume(x, bspace, indices=idx))
}
}
})
setAs(from="SparseBrainVector", to="matrix",
function(from) {
ind <- indices(from)
out <- matrix(dim(from)[4], length(ind))
out[, ind] <- from@data
out
})
setMethod(f="as.matrix", signature=signature(x = "SparseBrainVector"), def=function(x) {
as(x, "matrix")
})
setMethod(f="as.list", signature=signature(x = "SparseBrainVector"), def=function(x) {
D4 <- dim(x)[4]
lapply(1:D4, function(i) takeVolume(x,i))
})
setMethod("show",
signature=signature(object="SparseBrainVector"),
def=function(object) {
cat("an instance of class", class(object), "\n\n")
cat(" dimensions: ", dim(object), "\n")
cat(" voxel spacing: ", spacing(object), "\n")
cat(" cardinality: ", length(object@map@indices))
cat("\n\n")
})
|
context("ml pipeline")
skip_databricks_connect()
sc <- testthat_spark_connection()
training <- dplyr::tibble(
id = 0:3L,
text = c(
"a b c d e spark",
"b d",
"spark f g h",
"hadoop mapreduce"
),
label = c(1, 0, 1, 0)
)
training_tbl <- testthat_tbl("training")
test <- dplyr::tibble(
id = 4:7L,
text = c("spark i j k", "l m n", "spark hadoop spark", "apache hadoop")
)
test_tbl <- testthat_tbl("test")
test_that("ml_pipeline() returns a c('ml_pipeline', 'ml_estimator', 'ml_pipeline_stage')", {
p <- ml_pipeline(sc)
expect_equal(class(p), c("ml_pipeline", "ml_estimator", "ml_pipeline_stage"))
expect_equal(ml_stages(p), NULL)
expect_equal(jobj_class(spark_jobj(p))[1], "Pipeline")
uid_prefix <- gsub(pattern = "_.+$", replacement = "", p$uid)
expect_equal(uid_prefix, "pipeline")
})
test_that("ml_pipeline() combines pipeline_stages into a pipeline", {
tokenizer <- ft_tokenizer(sc, "x", "y")
binarizer <- ft_binarizer(sc, "in", "out", 0.5)
pipeline <- ml_pipeline(tokenizer, binarizer)
individual_stage_uids <- c(tokenizer$uid, binarizer$uid)
expect_equal(pipeline$stage_uids, individual_stage_uids)
expect_equal(class(pipeline), c("ml_pipeline", "ml_estimator", "ml_pipeline_stage"))
})
test_that("we can create nested pipelines", {
p0 <- ml_pipeline(sc)
tokenizer <- ft_tokenizer(sc, "x", "y")
pipeline <- ml_pipeline(p0, tokenizer)
expect_equal(class(ml_stage(pipeline, 1))[1], "ml_pipeline")
expect_equal(ml_stage(pipeline, 1) %>% ml_stages(), NULL)
})
test_that("ml_transformer.ml_pipeline() works as expected", {
tokenizer <- ft_tokenizer(sc, "x", "y")
binarizer <- ft_binarizer(sc, "in", "out", 0.5)
p1 <- ml_pipeline(tokenizer, binarizer)
p2 <- ml_pipeline(sc) %>%
ft_tokenizer("x", "y") %>%
ft_binarizer("in", "out", 0.5)
p1_params <- p1 %>%
ml_stages() %>%
lapply(ml_param_map) %>%
lapply(as.environment)
p2_params <- p2 %>%
ml_stages() %>%
lapply(ml_param_map) %>%
lapply(as.environment)
expect_equal(p1_params, p2_params)
expect_equal(class(p2)[1], "ml_pipeline")
})
test_that("empty pipeline has no stages", {
expect_null(ml_pipeline(sc) %>% ml_stages())
})
test_that("pipeline printing works", {
output <- capture.output(ml_pipeline(sc))
expect_identical(output[1], "Pipeline (Estimator) with no stages")
output <- capture.output(ml_pipeline(ft_binarizer(sc, "in", "out")))
expect_identical(output[1], "Pipeline (Estimator) with 1 stage")
output <- capture.output(ml_pipeline(ft_binarizer(sc, "in", "out"), ml_logistic_regression(sc)))
expect_identical(output[1], "Pipeline (Estimator) with 2 stages")
expect_identical(output[4], " |--1 Binarizer (Transformer)")
expect_identical(output[6], " | (Parameters -- Column Names)")
expect_identical(output[9], " |--2 LogisticRegression (Estimator)")
})
test_that("Error when specifying formula without tbl_spark for ml_ routines", {
expect_error(
ml_pipeline(sc) %>%
ml_logistic_regression(Species ~ Petal_Length),
"`formula` may only be specified when `x` is a `tbl_spark`\\."
)
expect_error(
ml_logistic_regression(sc, Species ~ Petal_Length),
"`formula` may only be specified when `x` is a `tbl_spark`\\."
)
}) |
Kenv.csr <- function(nptg,poly,nsim,s,quiet=FALSE)
{
kmax <- rep(0,length=length(s))
kmin <- rep(1.0E34,length=length(s))
for(isim in (1:nsim)){
if(!quiet)cat('Doing simulation ',isim,'\n')
khsim <- khat(csr(poly,nptg),poly,s)
kmax <- pmax(kmax,khsim)
kmin <- pmin(kmin,khsim)
}
list(lower=kmin,upper=kmax)
} |
spn_P_epiSEIR_node <- function(params,cube){
nE <- params$nE
nL <- params$nL
nP <- params$nP
nEIP <- params$nEIP
if(nE < 2 || nL < 2 || nP < 2 || nEIP < 2){
warning(paste0("A shape parameter ('nE', 'nL', 'nP', 'nEIP') of 1 implies ",
"exponentially distributed dwell times in that compartment."))
}
nG <- cube$genotypesN
g <- cube$genotypesID
eggs <- file.path("E",1:nE,"_",rep(g, each = nE), fsep = "")
larvae <- file.path("L",1:nL,"_",rep(g, each = nL), fsep = "")
pupae <- file.path("P",1:nP,"_",rep(g, each = nP), fsep = "")
females_unmated <- file.path("U",g, fsep = "_")
stages <- c("S",paste0("E",1:nEIP),"I")
females <- file.path("F", rep(x = rep(x = g, each = nG), times = nEIP+2),
rep(x = g, times = nG*(nEIP+2)),
rep(x = stages, each = nG^2),
fsep = "_")
males <- file.path("M",g, fsep = "_")
humans <- c("H_S","H_E","H_I","H_R")
ix <- list()
ix$egg <- matrix(data = seq_along(eggs),nrow = nE,byrow = FALSE,dimnames = list(1:nE,g))
ix$larvae <- matrix(data = seq_along(larvae) + nG*nE,nrow = nL,byrow = FALSE,dimnames = list(1:nL,g))
ix$pupae <- matrix(data = seq_along(pupae) + nG*(nE + nL),nrow = nP,byrow = FALSE,dimnames = list(1:nP,g))
ix$females_unmated <- setNames(object = seq_along(females_unmated) + nG*(nE + nL + nP), nm = g)
ix$females <- aperm(a = array(data = seq_along(females) + nG*(nE + nL + nP + 1),dim = c(nG,nG,nEIP+2),
dimnames = list(g,g,stages)),
perm = c(2,1,3),resize = TRUE)
ix$males <- setNames(object = seq_along(males) + nG*(nE+nL+nP+nG*(nEIP+2) + 1), nm = g)
ix$humans <- setNames(object = seq_along(humans) + nG*(nE+nL+nP+nG*(nEIP+2) + 2),nm = humans)
u <- c(eggs,larvae,pupae,females_unmated,females,males,humans)
return(list("ix" = list(ix),
"u" = u) )
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.