code
stringlengths 1
13.8M
|
---|
`qbbox` <-structure(function
(
lat,
lon,
TYPE = c("all", "quantile")[1],
margin = list(m=c(1,1,1,1), TYPE = c("perc", "abs")[1]),
q.lat = c(0.1,0.9),
q.lon = c(0.1,0.9),
verbose=0
){
if (TYPE == "all"){
latR <- range(lat,na.rm=TRUE);
lonR <- range(lon,na.rm=TRUE)
} else if (TYPE == "quantile"){
latR <- quantile(lat, q.lat, na.rm=TRUE);
lonR <- quantile(lon, q.lon, na.rm=TRUE);
}
if (!is.null(margin)){
m <- margin$m;
lat.center <- latR[1] + diff(latR)/2;
lon.center <- lonR[1] + diff(lonR)/2;
if (margin$TYPE == "perc"){
dlon <- c(-1,1)*(1+m[c(2,4)]/100)*diff(lonR)/2;
dlat <- c(-1,1)*(1+m[c(1,3)]/100)*diff(latR)/2;
} else if (margin$TYPE == "abs"){
dlon <- c(-1,1)*(m[c(2,4)] + diff(lonR)/2);
dlat <- c(-1,1)*(m[c(1,3)] + diff(latR)/2);
}
lonR.margin <- lon.center + dlon;
latR.margin <- lat.center + dlat;
if (verbose>1) {
cat("old/new lon range:");print(lonR);print(lonR.margin);
cat("old/new lat range:");print(latR);print(latR.margin);
}
return(list(latR=latR.margin, lonR=lonR.margin))
}
return(list(latR=latR, lonR=lonR))
}, ex = function(){
lat = 37.85 + rnorm(100, sd=0.001);
lon = -120.47 + rnorm(100, sd=0.001);
lat[1:5] <- lat[1:5] + rnorm(5, sd =.01);
lon[1:5] <- lon[1:5] + rnorm(5, sd =.01);
qbbox(lat, lon, TYPE = "quantile");
qbbox(lat, lon, TYPE = "all");
qbbox(lat, lon, margin = list(m = c(10, 10, 10, 10), TYPE = c("perc", "abs")[1]));
}) |
archetypes_funct <- function(data, k, weights = NULL, maxIterations = 100,
minImprovement = sqrt(.Machine$double.eps),
maxKappa = 1000, verbose = FALSE, saveHistory = FALSE,
family = archetypesFamily("original"), PM = PM, ...)
{
mycall <- match.call()
famargs <- list(...)
memento <- NULL
snapshot <- function(i) {
a <- list(archetypes = as.archetypes(t(family$rescalefn(x, family$undummyfn(x, zs))),
k, alphas = t(alphas), betas = t(betas), rss = rss,
kappas = kappas,
zas = t(family$rescalefn(x, family$undummyfn(x, zas))),
residuals = resid, reweights = reweights, weights = weights,
family = list(class = family$class)))
memento$save(i, a)
}
printIter <- function(i) {
cat(i, ": rss = ", formatC(rss, 8, format = "f"), ", improvement = ",
formatC(imp, 8, format = "f"), "\n", sep = "")
}
x1 <- t(data)
x1 <- family$scalefn(x1, ...)
x1 <- family$dummyfn(x1, ...)
x0 <- family$globweightfn(x1, weights, ...)
x <- x0
n <- ncol(x)
m <- nrow(x)
init <- family$initfn(x, k, ...)
betas <- init$betas
alphas <- init$alphas
zas <- NULL
zs <- x %*% betas
resid <- zs[1:(nrow(zs) - 1),] %*% alphas - x[1:(nrow(x) - 1),]
rss <- family$normfn(resid, PM, ...)/n
reweights <- rep(1, n)
kappas <- c(alphas = kappa(alphas), betas = kappa(betas),
zas = -Inf, zs = kappa(zs))
isIll <- c(kappas) > maxKappa
errormsg <- NULL
if (saveHistory) {
memento <- new.memento()
snapshot(0)
}
i <- 1
imp <- +Inf
tryCatch(while ((i <= maxIterations) & (imp >= minImprovement)) {
reweights <- family$reweightsfn(resid, reweights, ...)
x <- family$weightfn(x0, reweights, ...)
alphas <- family$alphasfn(alphas, zs, x, ...)
zas <- family$zalphasfn(alphas, x, ...)
resid1n <- zas[1:(nrow(zas) - 1),] %*% alphas - x[1:(nrow(x) - 1),]
rss1 <- family$normfn(resid1n, PM, ...)/n
kappas[c("alphas", "zas")] <- c(kappa(alphas), kappa(zas))
betas <- family$betasfn(betas, x, zas, ...)
zs <- x %*% betas
kappas[c("betas", "zs")] <- c(kappa(betas), kappa(zs))
alphas0 <- family$alphasfn(alphas, zs, x0, ...)
resid <- zs[1:(nrow(zs) - 1),] %*% alphas0 - x0[1:(nrow(x0) - 1),]
rss2 <- family$normfn(resid, PM, ...)/n
imp <- rss - rss2
rss <- rss2
kappas <- c(alphas = kappa(alphas), betas = kappa(betas),
zas = kappa(zas), zs = kappa(zs))
isIll <- isIll & (kappas > maxKappa)
if (verbose)
printIter(i)
if (saveHistory)
snapshot(i)
i <- i + 1
}, error = function(e) errormsg <<- e)
if (!is.null(errormsg)) {
warning("k=", k, ": ", errormsg)
return(as.archetypes(NULL, k, NULL, NA, iters = i, call = mycall,
history = history, kappas = kappas))
}
if (any(isIll))
warning("k=", k, ": ", paste(names(isIll)[isIll], collapse = ", "),
" > maxKappa", sep = "")
alphas <- family$alphasfn(alphas, zs, x1)
betas <- family$betasfn(betas, x1, zs)
zs <- family$undummyfn(x1, zs)
zs <- family$rescalefn(x1, zs)
resid <- zs %*% alphas - t(data)
return(as.archetypes(t(zs), k, t(alphas), rss, iters = (i - 1), call = mycall, history = memento, kappas = kappas,
betas = t(betas), family = family, familyArgs = famargs,
residuals = t(resid), weights = weights, reweights = reweights,
scaling = attr(x1, ".Meta")))
} |
knitr::opts_chunk$set(
collapse = TRUE,
comment = "
)
library(GlmSimulatoR)
library(MASS)
set.seed(1)
simdata <- simulate_inverse_gaussian(N = 100000, link = "1/mu^2",
weights = c(1, 2, 3), unrelated = 3)
scopeArg <- list(
lower = Y ~ 1,
upper = Y ~ X1 + X2 + X3 + Unrelated1 + Unrelated2 + Unrelated3
)
startingModel <- glm(Y ~ 1, data = simdata, family = inverse.gaussian(link = "1/mu^2"))
glmSearch <- stepAIC(startingModel, scopeArg, trace = 0)
summary(glmSearch)
rm(simdata, scopeArg, glmSearch, startingModel)
set.seed(2)
simdata <- simulate_inverse_gaussian(N = 100000, link = "1/mu^2",
weights = c(1, 2, 3), unrelated = 20)
scopeArg <- list(
lower = Y ~ 1,
upper = Y ~ X1 + X2 + X3 + Unrelated1 + Unrelated2 + Unrelated3 + Unrelated3 +
Unrelated4 + Unrelated5 + Unrelated6 + Unrelated7 + Unrelated8 + Unrelated9 +
Unrelated10 + Unrelated11 + Unrelated12 + Unrelated13 + Unrelated14 + Unrelated15 +
Unrelated16 + Unrelated17 + Unrelated18 + Unrelated19 + Unrelated20
)
startingModel <- glm(Y ~ 1, data = simdata, family = inverse.gaussian(link = "1/mu^2"))
glmSearch <- stepAIC(startingModel, scopeArg, trace = 0)
summary(glmSearch)
rm(simdata, scopeArg, glmSearch, startingModel)
set.seed(3)
simdata <- simulate_inverse_gaussian(N = 1000, link = "1/mu^2",
weights = c(1, 2, 3), unrelated = 3)
scopeArg <- list(
lower = Y ~ 1,
upper = Y ~ X1 + X2 + X3 + Unrelated1 + Unrelated2 + Unrelated3
)
startingModel <- glm(Y ~ 1, data = simdata, family = inverse.gaussian(link = "1/mu^2"))
glmSearch <- stepAIC(startingModel, scopeArg, trace = 0)
summary(glmSearch)
rm(simdata, scopeArg, glmSearch, startingModel)
set.seed(4)
simdata <- simulate_inverse_gaussian(N = 1000, link = "1/mu^2",
weights = c(1, 2, 3), unrelated = 20)
scopeArg <- list(
lower = Y ~ 1,
upper = Y ~ X1 + X2 + X3 + Unrelated1 + Unrelated2 + Unrelated3 + Unrelated3 +
Unrelated4 + Unrelated5 + Unrelated6 + Unrelated7 + Unrelated8 + Unrelated9 +
Unrelated10 + Unrelated11 + Unrelated12 + Unrelated13 + Unrelated14 + Unrelated15 +
Unrelated16 + Unrelated17 + Unrelated18 + Unrelated19 + Unrelated20
)
startingModel <- glm(Y ~ 1, data = simdata, family = inverse.gaussian(link = "1/mu^2"))
glmSearch <- stepAIC(startingModel, scopeArg, trace = 0)
summary(glmSearch)
rm(simdata, scopeArg, glmSearch, startingModel) |
NULL
ml_gbt_classifier <- function(x, formula = NULL, max_iter = 20, max_depth = 5,
step_size = 0.1, subsampling_rate = 1,
feature_subset_strategy = "auto", min_instances_per_node = 1L,
max_bins = 32, min_info_gain = 0, loss_type = "logistic",
seed = NULL, thresholds = NULL, checkpoint_interval = 10,
cache_node_ids = FALSE, max_memory_in_mb = 256,
features_col = "features", label_col = "label",
prediction_col = "prediction", probability_col = "probability",
raw_prediction_col = "rawPrediction",
uid = random_string("gbt_classifier_"), ...) {
check_dots_used()
UseMethod("ml_gbt_classifier")
}
ml_gbt_classifier.spark_connection <- function(x, formula = NULL, max_iter = 20, max_depth = 5,
step_size = 0.1, subsampling_rate = 1,
feature_subset_strategy = "auto", min_instances_per_node = 1L,
max_bins = 32, min_info_gain = 0, loss_type = "logistic",
seed = NULL, thresholds = NULL, checkpoint_interval = 10,
cache_node_ids = FALSE, max_memory_in_mb = 256,
features_col = "features", label_col = "label",
prediction_col = "prediction", probability_col = "probability",
raw_prediction_col = "rawPrediction",
uid = random_string("gbt_classifier_"), ...) {
.args <- list(
max_iter = max_iter,
max_depth = max_depth,
step_size = step_size,
subsampling_rate = subsampling_rate,
feature_subset_strategy = feature_subset_strategy,
min_instances_per_node = min_instances_per_node,
max_bins = max_bins,
min_info_gain = min_info_gain,
loss_type = loss_type,
seed = seed,
thresholds = thresholds,
checkpoint_interval = checkpoint_interval,
cache_node_ids = cache_node_ids,
max_memory_in_mb = max_memory_in_mb,
features_col = features_col,
label_col = label_col,
prediction_col = prediction_col,
probability_col = probability_col,
raw_prediction_col = raw_prediction_col
) %>%
c(rlang::dots_list(...)) %>%
validator_ml_gbt_classifier()
stage_class <- "org.apache.spark.ml.classification.GBTClassifier"
jobj <- (
if (spark_version(x) < "2.2.0") {
spark_pipeline_stage(
x, stage_class, uid,
features_col = .args[["features_col"]],
label_col = .args[["label_col"]], prediction_col = .args[["prediction_col"]]
)
} else {
spark_pipeline_stage(
x, stage_class, uid,
features_col = .args[["features_col"]],
label_col = .args[["label_col"]],
prediction_col = .args[["prediction_col"]],
probability_col = .args[["probability_col"]],
raw_prediction_col = .args[["raw_prediction_col"]]
)
}) %>% (
function(obj) {
do.call(
invoke,
c(obj, "%>%", Filter(
function(x) !is.null(x),
list(
list("setCheckpointInterval", .args[["checkpoint_interval"]]),
list("setMaxBins", .args[["max_bins"]]),
list("setMaxDepth", .args[["max_depth"]]),
list("setMinInfoGain", .args[["min_info_gain"]]),
list("setMinInstancesPerNode", .args[["min_instances_per_node"]]),
list("setCacheNodeIds", .args[["cache_node_ids"]]),
list("setMaxMemoryInMB", .args[["max_memory_in_mb"]]),
list("setLossType", .args[["loss_type"]]),
list("setMaxIter", .args[["max_iter"]]),
list("setStepSize", .args[["step_size"]]),
list("setSubsamplingRate", .args[["subsampling_rate"]]),
jobj_set_param_helper(obj, "setFeatureSubsetStrategy", .args[["feature_subset_strategy"]], "2.3.0", "auto"),
jobj_set_param_helper(obj, "setThresholds", .args[["thresholds"]]),
jobj_set_param_helper(obj, "setSeed", .args[["seed"]])
)
))
)
})
new_ml_gbt_classifier(jobj)
}
ml_gbt_classifier.ml_pipeline <- function(x, formula = NULL, max_iter = 20, max_depth = 5,
step_size = 0.1, subsampling_rate = 1,
feature_subset_strategy = "auto", min_instances_per_node = 1L,
max_bins = 32, min_info_gain = 0, loss_type = "logistic",
seed = NULL, thresholds = NULL, checkpoint_interval = 10,
cache_node_ids = FALSE, max_memory_in_mb = 256,
features_col = "features", label_col = "label",
prediction_col = "prediction", probability_col = "probability",
raw_prediction_col = "rawPrediction",
uid = random_string("gbt_classifier_"), ...) {
stage <- ml_gbt_classifier.spark_connection(
x = spark_connection(x),
formula = formula,
max_iter = max_iter,
max_depth = max_depth,
step_size = step_size,
subsampling_rate = subsampling_rate,
feature_subset_strategy = feature_subset_strategy,
min_instances_per_node = min_instances_per_node,
max_bins = max_bins,
min_info_gain = min_info_gain,
loss_type = loss_type,
seed = seed,
thresholds = thresholds,
checkpoint_interval = checkpoint_interval,
cache_node_ids = cache_node_ids,
max_memory_in_mb = max_memory_in_mb,
features_col = features_col,
label_col = label_col,
prediction_col = prediction_col,
probability_col = probability_col,
raw_prediction_col = raw_prediction_col,
uid = uid,
...
)
ml_add_stage(x, stage)
}
ml_gbt_classifier.tbl_spark <- function(x, formula = NULL, max_iter = 20, max_depth = 5,
step_size = 0.1, subsampling_rate = 1,
feature_subset_strategy = "auto", min_instances_per_node = 1L,
max_bins = 32, min_info_gain = 0, loss_type = "logistic",
seed = NULL, thresholds = NULL, checkpoint_interval = 10,
cache_node_ids = FALSE, max_memory_in_mb = 256,
features_col = "features", label_col = "label",
prediction_col = "prediction", probability_col = "probability",
raw_prediction_col = "rawPrediction",
uid = random_string("gbt_classifier_"),
response = NULL, features = NULL,
predicted_label_col = "predicted_label", ...) {
formula <- ml_standardize_formula(formula, response, features)
stage <- ml_gbt_classifier.spark_connection(
x = spark_connection(x),
formula = NULL,
max_iter = max_iter,
max_depth = max_depth,
step_size = step_size,
subsampling_rate = subsampling_rate,
feature_subset_strategy = feature_subset_strategy,
min_instances_per_node = min_instances_per_node,
max_bins = max_bins,
min_info_gain = min_info_gain,
loss_type = loss_type,
seed = seed,
thresholds = thresholds,
checkpoint_interval = checkpoint_interval,
cache_node_ids = cache_node_ids,
max_memory_in_mb = max_memory_in_mb,
features_col = features_col,
label_col = label_col,
prediction_col = prediction_col,
probability_col = probability_col,
raw_prediction_col = raw_prediction_col,
uid = uid,
...
)
if (is.null(formula)) {
stage %>%
ml_fit(x)
} else {
ml_construct_model_supervised(
new_ml_model_gbt_classification,
predictor = stage,
formula = formula,
dataset = x,
features_col = features_col,
label_col = label_col,
predicted_label_col = predicted_label_col
)
}
}
validator_ml_gbt_classifier <- function(.args) {
.args <- ml_validate_decision_tree_args(.args)
.args[["thresholds"]] <- cast_nullable_double_list(.args[["thresholds"]])
.args[["max_iter"]] <- cast_scalar_integer(.args[["max_iter"]])
.args[["step_size"]] <- cast_scalar_double(.args[["step_size"]])
.args[["subsampling_rate"]] <- cast_scalar_double(.args[["subsampling_rate"]])
.args[["loss_type"]] <- cast_choice(.args[["loss_type"]], "logistic")
.args[["feature_subset_strategy"]] <- cast_string(.args[["feature_subset_strategy"]])
.args
}
new_ml_gbt_classifier <- function(jobj) {
v <- jobj %>%
spark_connection() %>%
spark_version()
if (v < "2.2.0") {
new_ml_predictor(jobj, class = "ml_gbt_classifier")
} else {
new_ml_probabilistic_classifier(jobj, class = "ml_gbt_classifier")
}
}
new_ml_gbt_classification_model <- function(jobj) {
v <- jobj %>%
spark_connection() %>%
spark_version()
if (v < "2.2.0") {
new_ml_prediction_model(
jobj,
feature_importances = possibly_null(~ read_spark_vector(jobj, "featureImportances")),
num_classes = possibly_null(~ invoke(jobj, "numClasses"))(),
total_num_nodes = function() invoke(jobj, "totalNumNodes"),
tree_weights = invoke(jobj, "treeWeights"),
trees = function() {
invoke(jobj, "trees") %>%
purrr::map(new_ml_decision_tree_regression_model)
},
class = "ml_multilayer_perceptron_classification_model"
)
} else {
new_ml_probabilistic_classification_model(
jobj,
feature_importances = possibly_null(~ read_spark_vector(jobj, "featureImportances")),
num_classes = possibly_null(~ invoke(jobj, "numClasses"))(),
total_num_nodes = function() invoke(jobj, "totalNumNodes"),
tree_weights = invoke(jobj, "treeWeights"),
trees = function() {
invoke(jobj, "trees") %>%
purrr::map(new_ml_decision_tree_regression_model)
},
class = "ml_gbt_classification_model"
)
}
} |
mcmc_bite <- function(model, log.file = "bite_mcmc.log", sampling.freq = 1000, print.freq = 1000,
ncat = 1, beta.param = 0.3, ngen = 5000000, burnin = 0)
{
it <- ngen/ncat
if(burnin < 1) burnin <- burnin*it
if (ncat > 1) {
beta.class <- heat_par(ncat, beta.param)
} else {
beta.class <- 1
}
if(print.freq > 0){
cat("setting initial conditions\n")
}
pars.lik0 <- model$lik$init
lik0 <- model$lik$model(pars.lik0, model$data$traits, model$data$counts)
pars.priors0 <- list()
priors0 <- c()
hpriors0 <- list()
for(p in 1:length(model$priors)){
pars.priors0[[p]] <- model$priors[[p]]$init
priors0[p] <- model$priors[[p]]$value
hpriors0[[p]] <- unlist(mapply(do.call, model$priors[[p]]$hprior, lapply(pars.priors0[[p]], list))[1,])
}
if(print.freq > 0){
cat("generation\tposterior\n")
cat(paste(model$header, collapse = "\t"), "\n", append = FALSE, file = log.file)
}
it.beta <- 1
bet <- beta.class[it.beta]
if(ncat > 1) cat("beta = ", bet, "\n")
update.freq <- c(model$lik$update.freq, sapply(model$priors, function(x) x$update.freq))
update.freq <- cumsum(update.freq/sum(update.freq))
proposals <- c(0,0,0)
proposals.accepted <- c(0,0,0)
post0 <- (sum(lik0) + sum(priors0 * bet) + sum(unlist(hpriors0)))
for (i in 1:(it*ncat)) {
r <- min(which(runif(1) <= update.freq))
proposals[r] <- proposals[r] + 1
if (r == 1)
{
ind <- sample(1:model$data$n, model$lik$n.u, replace = FALSE)
u = runif(1)
lik1 <- lik0
pars.lik1 <- pars.lik0
priors1 <- priors0
hasting.ratio <- 0
for(p in 1:length(model$priors)){
tmp <- model$lik$prop[[p]](i = pars.lik0[[p]][ind], d = model$lik$ws[[p]][ind], u)
pars.lik0[[p]][ind] <- tmp$v
hasting.ratio <- hasting.ratio + tmp$lnHastingsRatio
priors0[[p]] <- model$priors[[p]]$model(x = pars.lik0[[p]], n = model$data$n, pars = pars.priors0[[p]],
Pi = model$priors[[p]]$Pi, par.n = 0,
data = model$priors[[p]]$data, map = model$priors[[p]]$map)$loglik
}
lik0 <- model$lik$model(pars.lik0, model$data$traits, model$data$counts)
} else {
p <- r - 1
par.n <- sample(1:length(model$priors[[p]]$prop), 1)
u = runif(1)
pars.priors1 <- pars.priors0
priors1 <- priors0
hpriors1 <- hpriors0
tmp <- model$priors[[p]]$prop[[par.n]](i = pars.priors0[[p]][par.n], d = model$priors[[p]]$ws[par.n], u)
pars.priors0[[p]][par.n] <- tmp$v
mat1 <- model$priors[[p]]$data
mat0 <- try(model$priors[[p]]$model(x = pars.lik0[[p]], n = model$data$n, pars = pars.priors0[[p]],
Pi = model$priors[[p]]$Pi, par.n = par.n,
data = model$priors[[p]]$data,
map = model$prior[[p]]$map), silent = TRUE)
if(any(grepl("Error", mat0))){
priors0[p] <- -Inf
} else {
model$priors[[p]]$data <- mat0$data
priors0[p] <- mat0$loglik
hpriors0[[p]] <- unlist(mapply(do.call, model$priors[[p]]$hprior, lapply(pars.priors0[[p]], list))[1,])
}
hasting.ratio <- tmp$lnHastingsRatio
}
post1 <- post0
post0 <- (sum(lik0) + sum(priors0 * bet) + sum(unlist(hpriors0)))
if(any(is.infinite(c(lik0, priors0, unlist(hpriors0))))){
pr <- -Inf
} else {
pr <- post0 - post1 + hasting.ratio
}
if (pr >= log(runif(1))){
proposals.accepted[r] <- proposals.accepted[r] + 1
} else {
post0 <- post1
if (r == 1){
pars.lik0 <- pars.lik1
lik0 <- lik1
priors0 <- priors1
} else {
pars.priors0 <- pars.priors1
priors0 <- priors1
hpriors0 <- hpriors1
model$priors[[p]]$data <- mat1
}
}
if (i %% sampling.freq == 0 & i >= burnin) {
cat(paste(c(i, post0, sum(lik0), priors0, unlist(sapply(1:length(model$priors), function(p) c(pars.priors0[[p]], pars.lik0[[p]]))), sum(proposals.accepted)/i, bet), collapse = "\t"), "\n",
append=TRUE, file=log.file)
}
if(print.freq > 0){
if (i %% print.freq == 0) {
cat(i,'\t',post0,'\n')
}
}
if(i%%it == 0 & i < ngen){
it.beta = it.beta+1
bet <- beta.class[it.beta]
cat("beta = ", bet, "\n")
}
}
if(print.freq > 0){
acceptance.results <- proposals.accepted / proposals
names(acceptance.results) <- names(proposals) <- c("Likelihood parameters",sprintf("prior.%s",names(model$priors)))
cat("\nEffective proposal frequency\n")
print(proposals/ngen)
cat("\nAcceptance ratios\n")
print(acceptance.results)
}
} |
knitr::opts_chunk$set(echo = TRUE)
species_df = data.frame(
species = c("a", "b", "c"),
trait_value = c(-1, 0, 0.5)
)
species_distance = dist(c(a = -1, b = 0, c = 0.5))
species_distance
alternative_distinctiveness = function(pres_mat, distance_obj, given_T) {
dist_mat = as.matrix(distance_obj)
kept_sp = funrar:::species_in_common(pres_mat, dist_mat)
dist_mat = dist_mat[kept_sp, kept_sp, drop = FALSE]
corr_dist = dist_mat
corr_dist[dist_mat > given_T] = 0
corr_dist[dist_mat <= given_T] = 1
diag(corr_dist) = 0
di_mat = apply(pres_mat, 1, function(given_pres) {
index_mat = given_pres %*% (dist_mat * corr_dist)
denom_mat = given_pres %*% corr_dist
index_mat = index_mat / denom_mat
index_mat[given_pres == 0] = NA
index_mat[is.nan(index_mat)] = 1
return(index_mat)
})
di_mat = t(di_mat)
dimnames(di_mat) = dimnames(pres_mat)
di_df = funrar::matrix_to_stack(di_mat, "Di")
di_df$given_range = given_T
return(di_df)
}
presence_matrix = matrix(c(rep(1, 3), 1, 0, 1, 1, 1, 0), nrow = 3, ncol = 3,
dimnames = list(site = c("s1", "s2", "s3"),
species = c("a", "b", "c")))
all_T = lapply(seq(0.5, 1.5, length.out = 50),
function(given_number) alternative_distinctiveness(
presence_matrix,
species_distance,
given_number))
all_T = do.call(rbind.data.frame, all_T)
library(ggplot2)
ggplot(all_T, aes(given_range, Di, color = species)) +
geom_line(size = 1, alpha = 1/2) +
facet_grid(~site) +
labs(x = "Fixed distance range",
y = "Functional Distinctiveness",
color = "Species")
library("funrar")
data("aravo", package = "ade4")
mat = as.matrix(aravo$spe)
mat[mat > 0] = 1
tra = aravo$traits[, c("Height", "SLA", "N_mass")]
dist_mat = compute_dist_matrix(tra, metric = "gower")
dist_mat = (dist_mat - min(dist_mat))/diff(range(dist_mat))
names(dimnames(mat)) = c("site", "species")
all_ranges = lapply(seq(0, 1, length.out = 50), function(given_range) {
alternative_distinctiveness(mat, as.dist(dist_mat), given_range)
})
all_ranges = do.call(rbind.data.frame, all_ranges)
ggplot(subset(all_ranges, site %in% c("AR07", "AR51", "AR02")),
aes(given_range, Di, group = species)) +
geom_line(alpha = 1/3) +
facet_wrap(~site) +
labs(x = "Maximum Distance Range Considered\n(Trait Range)",
y = "Functional Distinctiveness")
all_ranges$scaled_Di = ifelse(
all_ranges$Di != 1,
all_ranges$Di / all_ranges$given_range,
all_ranges$Di)
ggplot(subset(all_ranges, site %in% c("AR07", "AR51", "AR02")),
aes(given_range, scaled_Di, group = species)) +
geom_line(alpha = 1/4) +
facet_wrap(~site) +
labs(x = "Considered Trait Range\n(Functional Distance)",
y = "Scaled Functional Distinctiveness\n(over trait range)")
ab_mat = matrix(c(rep(1/3, 3), 1/6, 1/6, 4/6, 4/6, 1/6, 1/6), nrow = 3, ncol = 3,
dimnames = list(site = c("s1", "s2", "s3"),
species = c("a", "b", "c")),
byrow = TRUE)
alternative_distinctiveness_abundance = function(abund_mat, dist_matrix,
given_range) {
dist_mat = dist_matrix
kept_sp = funrar:::species_in_common(abund_mat, dist_mat)
dist_mat = dist_mat[kept_sp, kept_sp, drop = FALSE]
corr_dist = dist_mat
corr_dist[dist_mat > given_range] = 0
corr_dist[dist_mat <= given_range] = 1
diag(corr_dist) = 0
di_mat = apply(abund_mat, 1, function(given_ab) {
index_mat = given_ab %*% (dist_mat * corr_dist)
denom_mat = given_ab %*% corr_dist
index_mat = (index_mat / denom_mat) * (1 - denom_mat)
index_mat[given_ab == 0 | is.na(given_ab)] = NA
index_mat[is.nan(index_mat)] = 1
return(index_mat)
})
di_mat = t(di_mat)
dimnames(di_mat) = dimnames(abund_mat)
di_df = funrar::matrix_to_stack(di_mat, "Di")
di_df$given_range = given_range
return(di_df)
}
ab_di_all_ranges = lapply(seq(0, 1.5, length.out = 50),
function(given_number) alternative_distinctiveness_abundance(ab_mat,
as.matrix(species_distance),
given_number))
ab_di_all_ranges = do.call(rbind.data.frame, ab_di_all_ranges)
ggplot(ab_di_all_ranges, aes(given_range, Di, color = species)) +
geom_line(size = 1) +
facet_wrap(~site, labeller = as_labeller(c(s1 = "s1 (1/3 rel. abund each)",
s2 = "s2 (a=1/6, b=1/6, c=4/6)",
s3 = "s3 (a=4/6, b=1/6, c=1/6)"))) +
labs(x = "Considered Range",
y = "Functional Distinctiveness") |
print.eefAnalytics <- function(x,...) {
Checks <- sum(x$Function %in% c("srtBayes","crtBayes","mstBayes") )
if(Checks==0){Approach="Frequentist"}else{Approach="Bayesian"}
cat("\nModel Info:")
cat("\n method: ", x$Method)
cat("\n Design: ", toupper(substr(x$Function,1,3)))
cat("\n Approach: ", Approach )
cat("\n function: ", x$Function)
cat("\n---------\n")
cat("\n")
ES0=x$ES
ES1= x$Unconditional$ES
cat("Result for: Conditional effect size")
cat("\n")
print(ES0)
cat("\n")
cat("Result for: Unconditional effect size")
cat("\n")
print(ES1)
cat("\n")
if(sum(x$Function %in% c("srtBayes","crtBayes","mstBayes") )==0){
cat("Please use summary to get more results")
}else{
cat("Please use summary to get more results")
cat("\nAnd use the model object to check for convergence")
}
}
summary.eefAnalytics <- function(object,...){
Checks <- sum(object$Function %in% c("srtBayes","crtBayes","mstBayes") )
cat("\n method: ", object$Method)
cat("\n Design: ", object$Function)
if(Checks>0){cat("\n observations: ", length(object$Model$y))}
res <- object
if(Checks>0){
Beta1 <- data.frame( summary(object$Model,pars=c("alpha","beta")))
res$Beta <- cbind(object$Beta,round(Beta1[,c("sd","n_eff","Rhat")],2))
}
cat("\n")
Beta <- res$Beta
print(Beta)
cat("\n")
ES0=object$ES
ES1= object$Unconditional$ES
cat("Result for: Conditional effect size")
cat("\n")
print(ES0)
cat("\n")
cat("Result for: Unconditional effect size")
cat("\n")
print(ES1)
cat("\n")
class(res) <- "eefAnalyticssummary"
invisible(res)
} |
lab.qcs <- function(x, ...)
{
if(is.null(x) || !inherits(x, "lab.qcdata"))
stop("x must be an objects of class (or extending) 'lab.qcdata'")
p <- length(unique(x$laboratory))
m <- length(unique(x$material))
n <- length(unique(x$replicate))
material<-unique(x$material)
laboratory<-unique(x$laboratory)
stat.material <- data.frame(mean = vector(,length = m),
S = vector(,length = m),
S_r = vector(,length = m),
S_B = vector(,length = m),
S_R = vector(,length = m))
statistics <- data.frame(laboratory = vector(,length = p*m),
material = vector(,length = p*m),
mean.i = vector(,length = p*m),
s.i = vector(,length = p*m))
data <- x$x
statistics[,1] <- as.factor(rep(laboratory,each = m))
statistics[,2] <- as.factor(rep(material,p))
statistics[,3] <- c(tapply(data,list(x$material,x$laboratory),mean))
statistics[,4] <- c(tapply(data,list(x$material,x$laboratory),sd))
stat.material[,1] <- tapply(statistics$mean.i,statistics$material,mean)
stat.material[,2] <- tapply(statistics$s.i,statistics$material,sd)
f.S_r <- function(s.i) {sqrt(mean(s.i^2))}
S_r <- stat.material[,3] <- tapply(statistics$s.i,statistics$material,f.S_r)
S_B <- stat.material[,4] <- tapply(statistics$mean.i,statistics$material,sd)
stat.material[,5] <- sqrt(S_B^2 + ((n-1)/n)*S_r^2)
rownames(stat.material) <- material
result <- list (lab.qcdata = x, statistics.Laboratory = statistics,
statistics.material = stat.material, p = p, n = n, m = m )
oldClass(result)<-c("lab.qcs")
attr(result, "object.name") <- attributes(x)$data.name
attr(result, "type.data") <- "lab.qcs"
return(result)
}
print.lab.qcs <- function(x, ...) str(x,1)
summary.lab.qcs <- function(object, ...)
{
type.data <- attributes(object)$type.data
cat("\nNumber of laboratories: ", object$p)
cat("\nNumber of materials: ", object$m)
cat("\nNumber of replicate: ", object$n)
result <- switch(type.data,
"lab.qcs" = {
cat("\nSummary for Laboratory (means):\n")
st <- with(object$lab.qcdata,
tapply(x,
list(material,
laboratory), mean))
print(st)
cat("\nSummary for Laboratory (Deviations):\n")
st <- with(object$lab.qcdata,
tapply(x,
list(material,
laboratory), sd))
print(st)
cat("\nSummary for Material:\n")
print(object$statistics.material)
},
"h.qcs" = {
cat("\nCritical value: ", object[[7]])
cat("\nBeyond limits of control:", "\n")
print(object[[8]])
},
"k.qcs" ={
cat("\nCritical value: ", object[[7]])
cat("\nBeyond limits of control:", "\n")
print(object[[8]])
})
invisible()
}
h.qcs <- function(x, ...) {
UseMethod("h.qcs")
}
h.qcs.default <- function(x, var.index=1,replicate.index = 2, material.index = 3,
laboratory.index=4, data.name = NULL, alpha = 0.05, ...)
{
if (is.null(data.name)) data.name <- "Statistical Mandel h"
obj<-lab.qcdata(data = x, var.index=var.index,replicate.index = replicate.index,
material.index = material.index,
laboratory.index=laboratory.index, data.name = data.name)
result<-h.qcs.lab.qcdata(x = obj, alpha = alpha)
return(result)
}
h.qcs.lab.qcdata <- function(x, alpha = 0.05, ...)
{
if(is.null(x) || !inherits(x, "lab.qcdata"))
stop("x must be an objects of class (or extending) 'lab.qcdata'")
data.name <- attributes(x)$data.name
x.lab.qcs <- lab.qcs(x)
statistics <- x.lab.qcs$statistics.material
mean.i <- x.lab.qcs$statistics.Laboratory$mean.i
p <- x.lab.qcs$p
n <- x.lab.qcs$n
m <- x.lab.qcs$m
hcrit <- (p-1)*qt((1-alpha/2),(p-2))/sqrt(p*(p-2+qt((1-alpha/2),(p-2))^2))
material <- row.names(x.lab.qcs$statistics.material)
laboratory <- unique(x.lab.qcs[[1]]$laboratory)
h.i <- matrix(,nrow = p,ncol = m)
for(i in 1:m)
{
ind <- x.lab.qcs$statistics.Laboratory$material==material[i]
h.i[,i] <- (mean.i[ind]-statistics$mean[i])/statistics$S[i]
}
colnames(h.i) <- material
rownames(h.i) <- laboratory
violations <- abs(h.i) <= hcrit
result <- list (lab.qcdata = x, lab.qcs = x.lab.qcs, p = p, n = n, m = m,
h = h.i, h.critial = hcrit, violations = violations, data.name = data.name )
oldClass(result) <- c("lab.qcs")
attr(result, "object.name") <- data.name
attr(result, "type.data") <- "h.qcs"
return(result)
}
k.qcs <- function(x, ...) {
UseMethod("k.qcs")
}
k.qcs.default <- function(x, var.index=1,replicate.index = 2, material.index = 3,
laboratory.index=4, data.name = NULL, alpha = 0.05, ...)
{
if (is.null(data.name)) data.name <- "Statistical Mandel k"
obj<-lab.qcdata(data = x, var.index=var.index,replicate.index = replicate.index,
material.index = material.index,
laboratory.index=laboratory.index, data.name = data.name)
result<-k.qcs.lab.qcdata(x = obj, alpha = alpha)
return(result)
}
k.qcs.lab.qcdata<- function(x, alpha = 0.05, ...)
{
if(is.null(x) || !inherits(x, "lab.qcdata"))
stop("x must be an objects of class (or extending) 'lab.qcdata'")
data.name <- attributes(x)$data.name
x.lab.qcs <- lab.qcs(x)
statistics <- x.lab.qcs$statistics.material
s.i <- x.lab.qcs$statistics.Laboratory$s.i
p <- x.lab.qcs$p
n <- x.lab.qcs$n
m <- x.lab.qcs$m
v1<-(p-1)*(n-1)
v2<-n-1
kcrit <- sqrt(p/(1+(p-1)*qf(alpha,v1,v2,lower.tail=TRUE)))
material <- row.names(x.lab.qcs$statistics.material)
laboratory <- unique(x.lab.qcs[[1]]$laboratory)
k.i<-matrix(,nrow =p ,ncol =m )
for(i in 1:m)
{
ind <- x.lab.qcs$statistics.Laboratory$material==material[i]
k.i[,i] <- s.i[ind]/statistics$S_r[i]
}
colnames(k.i) <- material
row.names(k.i) <- laboratory
violations <- k.i <= kcrit
result <- list (lab.qcdata = x, lab.qcs = x.lab.qcs, p = p, n = n, m = m,
k = k.i, k.critical = kcrit, violations = violations, data.name = data.name )
oldClass(result) <- c("lab.qcs")
attr(result, "object.name") <- data.name
attr(result, "type.data") <- "k.qcs"
return(result)
}
cochran.test <- function(x, ...) {
UseMethod("cochran.test")
}
cochran.test.default <- function(x, var.index=1,replicate.index = 2, material.index = 3,
laboratory.index=4, data.name = NULL, alpha = 0.05, ...)
{
if (is.null(data.name)) data.name <- "Statistical Mandel k"
obj<-lab.qcdata(data = x, var.index=var.index,replicate.index = replicate.index,
material.index = material.index,
laboratory.index=laboratory.index, data.name = data.name)
result<-cochran.test.lab.qcdata(x = obj, alpha = alpha)
return(result)
}
cochran.test.lab.qcdata<-function(x, alpha = 0.05,...){
if(!is.null(x) & !inherits(x, "lab.qcdata") & !is.list(x))
stop("x must be an objects of class (or extending) 'lab.qcdata'")
x.lab.qcs <- lab.qcs(x)
stat <- x.lab.qcs$statistics.Laboratory
material <- row.names(x.lab.qcs$statistics.material)
laboratory <- unique(x$laboratory)
S2max <- tapply(stat$s.i,stat$material,max)
ind.max <- tapply(stat$s.i,stat$material,which.max)
laboratory.max <- laboratory[ind.max]
p <- x.lab.qcs$p
n <- x.lab.qcs$n
m <- x.lab.qcs$m
C <- vector()
p.value <- vector()
v1 <- (p-1)*(n-1);
v2 <- n-1
Ccrit <- 1/(1+(p-1)*qf(alpha/p,v1,v2,lower.tail=TRUE))
for(i in 1:m){
C[i] <- S2max[i]/sum((stat$s.i[stat$material == material[i]])^2)
p.value[i] <- round(pf(C[i],v1,v2,lower.tail=T),4)
}
result <- list(result = data.frame(Smax = laboratory.max, Material = material,
C = C, p.value = p.value),C.critical = Ccrit, alpha.test = alpha/p)
oldClass(result) <- c("cochran.test")
return(result)
}
print.cochran.test <- function(x, ...) {
cat("\nTest Cochran", "\n")
cat("\n Critical value:",x[[2]],"\n")
cat("\n Alpha test:",x[[3]],"\n")
print(x[[1]])}
grubbs.test <- function(x, ...) {
UseMethod("grubbs.test")
}
grubbs.test.default <- function(x, var.index=1,replicate.index = 2, material.index = 3,
laboratory.index=4, data.name = NULL, alpha = 0.05, ...)
{
if (is.null(data.name)) data.name <- "Statistical Mandel k"
obj<-lab.qcdata(data = x, var.index=var.index,replicate.index = replicate.index,
material.index = material.index,
laboratory.index=laboratory.index, data.name = data.name)
result<-grubbs.test.lab.qcdata(x = obj, alpha = alpha)
return(result)
}
grubbs.test.lab.qcdata <-function(x, alpha = 0.05,...){
x.lab.qcs <- lab.qcs(x)
stat <- x.lab.qcs$statistics.Laboratory
material <- row.names(x.lab.qcs$statistics.material)
laboratory <- unique(x$laboratory)
p <- x.lab.qcs$p
n <- x.lab.qcs$n
m <- x.lab.qcs$m
Gh <- vector()
Gl <- vector()
S <- vector()
ph.value <- vector()
pl.value <- vector()
mean.i <- stat$mean.i
mean <- x.lab.qcs$statistics.material$mean
S <- x.lab.qcs$statistics.material$S
ind.max <- tapply(stat$mean.i,stat$material,which.max)
ind.min <- tapply(stat$mean.i,stat$material,which.min)
laboratory.max <- laboratory[ind.max]
laboratory.min <- laboratory[ind.min]
for(i in 1:m){
Gl[i] <- (mean[i] - mean.i[stat$material == material[i]][ind.min[i]])/S[i]
pl.value[i] <- round(pt(Gl[i],(p-1),lower.tail=F),4)
Gh[i] <- (mean.i[stat$material == material[i]][ind.max[i]] - mean[i] )/S[i]
ph.value[i] <- round(pt(Gh[i],(p-1),lower.tail=F),4)
}
gcrit <- (n-1)*qt((1-alpha/p),(n-2))/sqrt(n*(n-2+(qt((1-alpha/p),(n-2)))^2))
result <- list(result = data.frame(Material = material, Gmax = laboratory.max,
G.max = Gh,
p.value.max = ph.value, Gmin = laboratory.min,
G.min = Gl,
p.value.min = pl.value),G.critical = gcrit,
alpha.test = alpha/p)
oldClass(result) <- c("grubbs.test")
return(result)
}
print.grubbs.test <- function(x, ...) {
cat("\nTest Grubbs", "\n")
cat("\n Critical value:",x[[2]],"\n")
cat("\n Alpha test:",x[[3]],"\n")
print(x[[1]])}
lab.aov <- function(x, ...) {
UseMethod("lab.aov")
}
lab.aov.default <- function(x, var.index=1,replicate.index = 2, material.index = 3,
laboratory.index=4, data.name = NULL, level = 0.95,plot = FALSE, pages = 0, ...)
{
if (is.null(data.name)) data.name <- "Statistical Mandel k"
obj<-lab.qcdata(data = x, var.index=var.index,replicate.index = replicate.index,
material.index = material.index,
laboratory.index=laboratory.index, data.name = data.name)
result<-lab.aov.lab.qcdata(x = obj, level = level,plot = plot, pages = pages)
return(result)
}
lab.aov.lab.qcdata <- function(x,level = 0.95,plot = FALSE, pages = 0,...){
aovModel <- list()
conf <- list()
.Pairs <- list()
material <- unique(x$material)
m <- length(material)
if(plot ==TRUE){
n.plots <- m
if (pages > 0)
if (pages > n.plots)
pages <- n.plots
if (pages < 0)
pages <- 0
if (pages != 0) {
ppp <- n.plots%/%pages
if (n.plots%%pages != 0) {
ppp <- ppp + 1
while (ppp * (pages - 1) >= n.plots) pages <- pages - 1
}
c <- r <- trunc(sqrt(ppp))
if (c < 1)
r <- c <- 1
if (c * r < ppp)
c <- c + 1
if (c * r < ppp)
r <- r + 1
oldpar <- par(mfrow = c(r, c))
}
else {
ppp <- 1
oldpar <- par()
}
}
for (i in 1:m){
indm<-x$material==material[i]
y <- x$x[indm]
laboratory <- x$laboratory[indm]
data <- data.frame(y,laboratory)
aovModel[[i]] <- aov(y ~ laboratory,data=data)
.Pairs[[i]] <- glht(aovModel[[i]], linfct = mcp(laboratory = "Tukey"))
conf[[i]] <- confint(.Pairs[[i]],level = level)
}
if(plot ==TRUE){
old.oma <- par(oma=c(0,5,0,0))
for (i in 1:m){
title <- paste(level*100,"%"," ","Confidence Level",sep="")
subtitle = paste("Material",material[i])
plot(confint(.Pairs[[i]],level = level), main=title,sub = subtitle)
}
par(old.oma)
}
par(mfrow=c(1,1))
names(conf)<- paste("Material:",material)
names(.Pairs)<-paste("Material:",material)
names(aovModel)<-paste("Material:",material)
for (i in 1:m) {cat("\n AOV of Material:",material[i])
print(summary(aovModel[[i]]))
print(summary(.Pairs[[i]]))
print(conf[[i]])
}
result <- list(Models = aovModel,Confidence =conf)
return(result)
invisible()
} |
jTuffTest <-
function(n,v,p,test_significant){
statistic <- -2*log(p*(1-p)^(v-1)/((1/v)*(1-1/v)^(v-1)));
Quantile <- qchisq(1-test_significant,1)
rslt <- statistic <= Quantile
return(c(statistic,Quantile,rslt))
} |
pc.sel <- function(target, dataset, threshold = 0.05) {
Rfast2::pc.sel(target, dataset, alpha = threshold)
} |
move_layerInvisible_grob <- function(loon.grob, currentLayer, ...) {
obj <- character(0)
class(obj) <- names(loon.grob$children)
UseMethod("move_layerInvisible_grob", obj)
}
move_layerInvisible_grob.l_plot <- function(loon.grob, currentLayer, ...) {
if(currentLayer == "scatterplot") {
args <- list(...)
pointsTreeName <- args$pointsTreeName
N <- args$N
set_deactive_grob(loon.grob, index = seq(N), pointsTreeName = pointsTreeName)
} else {
grid::setGrob(
gTree = loon.grob,
gPath = currentLayer,
newGrob = set_deactive_layer(
currentLayer_grob = grid::getGrob(loon.grob, currentLayer)
)
)
}
}
move_layerInvisible_grob.l_hist <- function(loon.grob, currentLayer, ...) {
if(currentLayer == "histogram") {
grid::setGrob(
gTree = loon.grob,
gPath = currentLayer,
newGrob = grob(name = "histogram")
)
} else {
grid::setGrob(
gTree = loon.grob,
gPath = currentLayer,
newGrob = set_deactive_layer(
currentLayer_grob = grid::getGrob(loon.grob, currentLayer)
)
)
}
}
move_layerInvisible_grob.l_graph <- function(loon.grob, currentLayer, ...) {
args <- list(...)
N <- args$N
if(currentLayer == "graph") {
set_deactive_grob(loon.grob, index = seq(N))
} else {
grid::setGrob(
gTree = loon.grob,
gPath = currentLayer,
newGrob = set_deactive_layer(
currentLayer_grob = grid::getGrob(loon.grob, currentLayer)
)
)
}
}
set_deactive_layer <- function(currentLayer_grob) {
if(grepl(currentLayer_grob$name, pattern = "l_layer_polygon:")) {
do.call(grob, getGrobArgs(currentLayer_grob))
} else if(grepl(currentLayer_grob$name,pattern = "l_layer_line:")) {
do.call(grob, getGrobArgs(currentLayer_grob))
} else if(grepl(currentLayer_grob$name,pattern = "l_layer_rectangle:")) {
do.call(grob, getGrobArgs(currentLayer_grob))
} else if(grepl(currentLayer_grob$name, pattern = "l_layer_oval:")) {
do.call(grob, getGrobArgs(currentLayer_grob))
} else if(grepl(currentLayer_grob$name, pattern = "l_layer_text:")) {
do.call(grob, getGrobArgs(currentLayer_grob))
} else if(grepl(currentLayer_grob$name,pattern = "l_layer_points:")) {
do.call(grob, getGrobArgs(currentLayer_grob))
} else if(grepl(currentLayer_grob$name, pattern = "l_layer_texts:")) {
args <- list()
lapply(1:length(currentLayer_grob$children),
function(i) {
args[[i]] <<- getGrobArgs(currentLayer_grob$children[[i]])
}
)
gTree(
children = do.call(
gList,
lapply(1:length(currentLayer_grob$children),
function(i) {
do.call(grob, args[[i]])
})
),
name = currentLayer_grob$name,
gp = currentLayer_grob$gp,
vp = currentLayer_grob$vp
)
} else if(grepl(currentLayer_grob$name,pattern = "l_layer_polygons:")) {
args <- list()
lapply(1:length(currentLayer_grob$children),
function(i) {
args[[i]] <<- getGrobArgs(currentLayer_grob$children[[i]])
}
)
gTree(
children = do.call(
gList,
lapply(1:length(currentLayer_grob$children),
function(i) {
do.call(grob, args[[i]])
})
),
name = currentLayer_grob$name,
gp = currentLayer_grob$gp,
vp = currentLayer_grob$vp
)
} else if(grepl(currentLayer_grob$name,pattern = "l_layer_rectangles:")) {
args <- list()
lapply(1:length(currentLayer_grob$children),
function(i) {
args[[i]] <<- getGrobArgs(currentLayer_grob$children[[i]])
}
)
gTree(
children = do.call(
gList,
lapply(1:length(currentLayer_grob$children),
function(i) {
do.call(grob, args[[i]])
})
),
name = currentLayer_grob$name,
gp = currentLayer_grob$gp,
vp = currentLayer_grob$vp
)
} else if(grepl(currentLayer_grob$name,pattern = "l_layer_lines:")) {
args <- list()
lapply(1:length(currentLayer_grob$children),
function(i) {
args[[i]] <<- getGrobArgs(currentLayer_grob$children[[i]])
}
)
gTree(
children = do.call(
gList,
lapply(1:length(currentLayer_grob$children),
function(i) {
do.call(grob, args[[i]])
})
),
name = currentLayer_grob$name,
gp = currentLayer_grob$gp,
vp = currentLayer_grob$vp
)
} else stop("unspecified layer name")
} |
BrainAtlas <- R6::R6Class(
classname = 'brain-atlas',
portable = TRUE,
cloneable = FALSE,
public = list(
subject_code = '',
atlas_type = 'aparc_aseg',
object = NULL,
group = NULL,
set_subject_code = function( subject_code ){
if( self$has_atlas ){
self$object$subject_code <- subject_code
self$group$subject_code <- subject_code
self$object$name <- sprintf('Atlas - %s (%s)', self$atlas_type, subject_code)
self$group$name <- sprintf('Atlas - %s (%s)', self$atlas_type, subject_code)
}
self$subject_code <- subject_code
},
set_group_position = function(...){
pos <- c(...)
stopifnot2(is.numeric(pos) && length(pos) == 3, msg = "Position must be numeric of length 3")
self$group$position <- pos
},
initialize = function(
subject_code, atlas_type, atlas, position = NULL
){
self$object <- atlas
self$group <- atlas$group
self$set_subject_code( subject_code )
self$atlas_type <- stringr::str_replace_all(atlas_type, '[\\W]', '_')
if( length(position) == 3 ){
self$set_group_position( position )
}
},
print = function( ... ){
cat('Subject\t\t:', self$subject_code, end = '\n')
cat('Atlas type\t:', self$atlas_type, end = '\n')
if( !self$has_atlas ){
warning('No atlas found!')
}
invisible( self )
}
),
active = list(
has_atlas = function(){
if( !is.null(self$object) &&
R6::is.R6(self$object) &&
'DataCubeGeom2' %in% class(self$object)){
return(TRUE)
}
return(FALSE)
}
)
)
NULL
add_voxel_cube <- function(brain, name, cube, size = c(256, 256, 256), matrix_world = NULL){
stopifnot2(length(size) == 3 && all(size > 0), msg = "add_voxel_cube: `size` must be length of 3 and all positive")
stopifnot2(is.null(matrix_world) || (
length(matrix_world) == 16 && is.matrix(matrix_world) && nrow(matrix_world) == 4
), msg = "add_voxel_cube: `matrix_world` must be either NULL or a 4x4 matrix")
re <- brain
if("multi-rave-brain" %in% class(brain)){
brain <- brain$template_object
}
subject <- brain$subject_code
nm <- sprintf("Atlas - %s (%s)", name, subject)
group <- GeomGroup$new(name = nm)
group$subject_code <- subject
if(length(matrix_world) == 16){
group$trans_mat <- matrix_world
}
geom <- DataCubeGeom2$new(
name = nm, dim = dim(cube),
half_size = size / 2, group = group,
position = c(0,0,0), value = cube)
geom$subject_code <- subject
obj <- BrainAtlas$new(
subject_code = subject, atlas_type = name,
atlas = geom, position = c(0, 0, 0 ))
brain$add_atlas( atlas = obj )
invisible(re)
}
create_voxel_cube <- function(mni_ras, value, colormap,
keys = colormap$get_key(value),
dimension = c(256,256,256)){
stopifnot2(length(dimension) == 3, msg = "`voxel_cube`: dimension length must be 3")
stopifnot2(max(abs(mni_ras)) < 128, msg = "`voxel_cube`: mni_ras should range from -127 to 127")
stopifnot2(nrow(mni_ras) == length(keys), msg = "`voxel_cube`: data value must be consistent with MNI RAS")
if(!is.matrix(mni_ras)){
mni_ras <- as.matrix(mni_ras)
}
mni_ras <- mni_ras + 128
cube <- array(0L, dimension)
ratio <- dimension / c(256, 256, 256)
for(i in seq_len(nrow(mni_ras))){
tmp <- round((mni_ras[i,]) * ratio)
if(cube[tmp[1], tmp[2], tmp[3]] == 0){
cube[tmp[1], tmp[2], tmp[3]] <- keys[[i]]
}
}
add_to_brain <- function(brain, name){
add_voxel_cube(brain, name, cube)
}
re <- list(
cube = cube,
dimension = dimension,
add_to_brain = add_to_brain
)
if(!missing(colormap)){
re$colormap <- colormap
}
re
} |
multi_marginal<-function(weights,costA){
if (!requireNamespace("Rsymphony", quietly = TRUE)) {
warning("Package Rsymphony not detected: Please install Rsymphony for optimal performance if you are not using a Mac.")
Rsym<-FALSE
}
else{
Rsym<-TRUE
}
D<-dim(costA)
const<-gen_constraints_multi(D)
costVec<-build_MM_costvec(costA,const)
rhs<-unlist(weights)
if (Rsym){
out<-Rsymphony::Rsymphony_solve_LP(obj=costVec,mat=const,dir=rep("==",sum(D)),rhs=rhs,max=FALSE)
}
else{
out<-lpSolve::lp("min",costVec,const,rep("==",sum(D)),rhs)
}
optMMCoupling<-build_MMCoupling(out,const,D)
return(list(MMCoupling=optMMCoupling,cost=out$objval))
} |
file2pdf=function(file,selected=NULL,...){
data=readCSVComment(file)
preprocessing=readComment(file)
if(!is.null(selected)){
count=nrow(data)
accept=which((selected>0) & (selected<=count))
data<-data[selected[accept],]
}
data2pdf(data=data,preprocessing=preprocessing,...)
}
file2HTML=function(file,selected=NULL,...){
data=readCSVComment(file)
preprocessing=readComment(file)
if(!is.null(selected)){
count=nrow(data)
accept=which((selected>0) & (selected<=count))
data<-data[selected[accept],]
}
data2HTML(data=data,preprocessing=preprocessing,...)
}
file2pptx=function(file,selected=NULL,...){
data=readCSVComment(file)
preprocessing=readComment(file)
if(!is.null(selected)){
count=nrow(data)
accept=which((selected>0) & (selected<=count))
data<-data[selected[accept],]
}
data2pptx(data=data,preprocessing=preprocessing,...)
}
file2pptx2=function(file,selected=NULL,...){
data=readCSVComment(file)
preprocessing=readComment(file)
if(!is.null(selected)){
count=nrow(data)
accept=which((selected>0) & (selected<=count))
data<-data[selected[accept],]
}
data2pptx2(data=data,preprocessing=preprocessing,...)
}
file2docx=function(file,selected=NULL,...){
data=readCSVComment(file)
preprocessing=readComment(file)
if(!is.null(selected)){
count=nrow(data)
accept=which((selected>0) & (selected<=count))
data<-data[selected[accept],]
}
data2docx(data=data,preprocessing=preprocessing,...)
}
file2docx2=function(file,selected=NULL,...){
data=readCSVComment(file)
preprocessing=readComment(file)
if(!is.null(selected)){
count=nrow(data)
accept=which((selected>0) & (selected<=count))
data<-data[selected[accept],]
}
data2docx2(data=data,preprocessing=preprocessing,...)
}
file2plotzip=function(file,selected=NULL,...){
data=readCSVComment(file)
preprocessing=readComment(file)
if(!is.null(selected)){
count=nrow(data)
accept=which((selected>0) & (selected<=count))
data<-data[selected[accept],]
}
data2plotzip(data=data,preprocessing=preprocessing,...)
} |
context ("Scenario of un wanted inputs")
test_that("NA values are avoided",{
expect_that(dGHGBB(NA,4,0.1,3,3),
throws_error("NA or Infinite or NAN values in the Input"))
})
test_that("Infinite values are avoided",{
expect_that(dGHGBB(Inf,4,0.1,3,3),
throws_error("NA or Infinite or NAN values in the Input"))
})
test_that("NAN values are avoided",{
expect_that(dGHGBB(NaN,4,0.1,3,3),
throws_error("NA or Infinite or NAN values in the Input"))
})
context("Binomial random variable or trial value issues")
test_that("Random variable higher than Trial value",{
expect_that(dGHGBB(5,4,0.2,3,3),
throws_error("Binomial random variable cannot be greater than binomial trial value"))
})
test_that("Negativity",{
expect_that(dGHGBB(-3,4,0.2,3,3),
throws_error("Binomial random variable or binomial trial value cannot be negative"))
})
test_that("Random variable higher than Trial value",{
expect_that(dGHGBB(-8,-4,0.2,3,3),
throws_error("Binomial random variable or binomial trial value cannot be negative"))
})
context("Shape parameter issues")
test_that("shape parameter a",{
expect_that(dGHGBB(2,4,-3,3,6),
throws_error("Shape parameters cannot be less than or equal to zero"))
})
test_that("shape parameter b",{
expect_that(dGHGBB(2,4,1,-3,5),
throws_error("Shape parameters cannot be less than or equal to zero"))
})
test_that("shape parameter c",{
expect_that(dGHGBB(2,4,1,1,-3),
throws_error("Shape parameters cannot be less than or equal to zero"))
}) |
library(metacoder)
library(testthat)
context("Calculations")
x = parse_tax_data(hmp_otus, class_cols = "lineage", class_sep = ";",
class_key = c(tax_rank = "info", tax_name = "taxon_name"),
class_regex = "^(.+)__(.+)$")
test_that("Counting the number of samples with reads", {
result <- calc_n_samples(x, data = "tax_data")
expect_equal(colnames(result), c("taxon_id", "n_samples"))
expect_equivalent(unlist(result[1, "n_samples"]), 17)
result <- calc_n_samples(x, data = "tax_data", drop = TRUE)
expect_true(is.vector(result))
result <- calc_n_samples(x, data = "tax_data", cols = hmp_samples$sample_id[1:5])
expect_equal(colnames(result), c("taxon_id", "n_samples"))
result <- calc_n_samples(x, data = "tax_data", groups = hmp_samples$body_site)
expect_equal(colnames(result), c("taxon_id", unique(hmp_samples$body_site)))
result <- calc_n_samples(x, data = "tax_data", groups = hmp_samples$body_site,
out_names = c("A", "B", "C", "D", "E"))
expect_equal(colnames(result), c("taxon_id", c("A", "B", "C", "D", "E")))
result <- calc_n_samples(x, data = "tax_data", cols = hmp_samples$sample_id[1])
expect_equal(result,
calc_n_samples(x, data = "tax_data", cols = as.factor(hmp_samples$sample_id[1])))
expect_equal(result,
calc_n_samples(x, data = "tax_data",
cols = colnames(x$data$tax_data) == hmp_samples$sample_id[1]))
expect_equal(result,
calc_n_samples(x, data = "tax_data",
cols = which(colnames(x$data$tax_data) == hmp_samples$sample_id[1])))
})
test_that("Observation proportions", {
result <- calc_obs_props(x, "tax_data")
expect_equal(colnames(x$data$tax_data)[-(1:3)], colnames(result)[-1])
expect_true(all(result$`700016050` == x$data$tax_data$`700016050` / sum(x$data$tax_data$`700016050`)))
col_subset <- c("700035949", "700097855", "700100489")
result <- calc_obs_props(x, "tax_data", cols = col_subset)
expect_equal(col_subset, colnames(result)[-1])
result <- calc_obs_props(x, "tax_data", cols = 4:6)
expect_equal(col_subset, colnames(result)[-1])
result <- calc_obs_props(x, "tax_data",
cols = startsWith(colnames(x$data$tax_data), "70001"))
expect_equal(colnames(x$data$tax_data)[startsWith(colnames(x$data$tax_data), "70001")],
colnames(result)[-1])
expect_warning(result <- calc_obs_props(x, "tax_data", other_cols = TRUE))
expect_true(all(c("otu_id", "lineage") %in% colnames(result)))
result <- calc_obs_props(x, "tax_data", cols = col_subset,
other_cols = 2:3)
expect_true(all(c("otu_id", "lineage") %in% colnames(result)))
result <- calc_obs_props(x, "tax_data", cols = col_subset,
out_names = c("a", "b", "c"))
expect_equal(colnames(result), c("taxon_id", "a", "b", "c"))
result <- calc_obs_props(x, data = "tax_data", cols = hmp_samples$sample_id[1])
expect_equal(result,
calc_obs_props(x, data = "tax_data", cols = as.factor(hmp_samples$sample_id[1])))
expect_equal(result,
calc_obs_props(x, data = "tax_data",
cols = colnames(x$data$tax_data) == hmp_samples$sample_id[1]))
expect_equal(result,
calc_obs_props(x, data = "tax_data",
cols = which(colnames(x$data$tax_data) == hmp_samples$sample_id[1])))
})
test_that("Summing counts per taxon", {
result <- calc_taxon_abund(x, "tax_data")
expect_equivalent(sum(x$data$tax_data$`700035949`), result$`700035949`[1])
expect_equal(calc_taxon_abund(x, "tax_data", cols = 4:5),
calc_taxon_abund(x, "tax_data", cols = c("700035949", "700097855")))
result <- calc_taxon_abund(x, "tax_data", groups = hmp_samples$sex)
expect_equal(colnames(result), c("taxon_id", "female", "male"))
total_counts <- sum(x$data$tax_data[, hmp_samples$sample_id])
result <- calc_taxon_abund(x, "tax_data", groups = hmp_samples$sex,
out_names = c("Women", "Men"))
expect_equal(colnames(result), c("taxon_id", "Women", "Men"))
expect_equal(total_counts, sum(result[1, c("Women", "Men")]))
result <- calc_taxon_abund(x, "tax_data", cols = hmp_samples$sample_id,
groups = rep("total", nrow(hmp_samples)))
expect_equivalent(total_counts, result$total[1])
result <- calc_taxon_abund(x, data = "tax_data", cols = hmp_samples$sample_id[1])
expect_equal(result,
calc_taxon_abund(x, data = "tax_data", cols = as.factor(hmp_samples$sample_id[1])))
expect_equal(result,
calc_taxon_abund(x, data = "tax_data",
cols = colnames(x$data$tax_data) == hmp_samples$sample_id[1]))
expect_equal(result,
calc_taxon_abund(x, data = "tax_data",
cols = which(colnames(x$data$tax_data) == hmp_samples$sample_id[1])))
})
test_that("Comparing groups of samples", {
x$data$otu_table <- calc_obs_props(x, data = "tax_data", cols = hmp_samples$sample_id)
x$data$tax_table <- calc_taxon_abund(x, data = "otu_table", cols = hmp_samples$sample_id)
expect_warning(x$data$diff_table <- compare_groups(x, data = "tax_table",
cols = hmp_samples$sample_id,
groups = hmp_samples$body_site))
expect_equal(nrow(x$data$diff_table),
ncol(combn(length(unique(hmp_samples$body_site)), 2)) * nrow(x$data$tax_table))
})
test_that("Rarefying observation counts", {
result <- rarefy_obs(x, "tax_data")
expect_equal(length(unique(colSums(result[, hmp_samples$sample_id]))), 1)
result <- rarefy_obs(x, data = "tax_data", cols = hmp_samples$sample_id[1])
expect_equal(result,
rarefy_obs(x, data = "tax_data", cols = as.factor(hmp_samples$sample_id[1])))
expect_equal(result,
rarefy_obs(x, data = "tax_data",
cols = colnames(x$data$tax_data) == hmp_samples$sample_id[1]))
expect_equal(result,
rarefy_obs(x, data = "tax_data",
cols = which(colnames(x$data$tax_data) == hmp_samples$sample_id[1])))
})
test_that("Converting low counts to zero", {
result <- zero_low_counts(x, "tax_data")
expect_equal(sum(result[, hmp_samples$sample_id] == 1), 0)
result <- zero_low_counts(x, data = "tax_data", cols = hmp_samples$sample_id[1])
expect_equal(result,
zero_low_counts(x, data = "tax_data", cols = as.factor(hmp_samples$sample_id[1])))
expect_equal(result,
zero_low_counts(x, data = "tax_data",
cols = colnames(x$data$tax_data) == hmp_samples$sample_id[1]))
expect_equal(result,
zero_low_counts(x, data = "tax_data",
cols = which(colnames(x$data$tax_data) == hmp_samples$sample_id[1])))
}) |
competitions <- function(username, password, version = "v5",
baseurl = "https://data.statsbombservices.com/api/"){
comp.url <- paste0(baseurl, version, "/competitions")
raw.comp.api <- GET(url = comp.url, authenticate(username, password))
competitions.string <- rawToChar(raw.comp.api$content)
comp <- fromJSON(competitions.string, flatten = T)
return(comp)
} |
NULL
NULL
add_min_largest_shortfall_objective <- function(x, budget) {
assertthat::assert_that(
inherits(x, "ConservationProblem"),
is.numeric(budget),
all(is.finite(budget)),
all(budget >= 0.0),
isTRUE(min(budget) > 0),
length(budget) == 1 ||
length(budget) == number_of_zones(x))
if (length(budget) == 1) {
p <- numeric_parameter("budget", budget, lower_limit = 0,
upper_limit = sum(x$planning_unit_costs(),
na.rm = TRUE))
} else {
p <- numeric_parameter_array("budget", budget, x$zone_names(),
lower_limit = 0,
upper_limit = colSums(x$planning_unit_costs(),
na.rm = TRUE))
}
x$add_objective(pproto(
"MinimumLargestShortfallObjective",
Objective,
name = "Minimum largest shortfall objective",
parameters = parameters(p),
apply = function(self, x, y) {
assertthat::assert_that(inherits(x, "OptimizationProblem"),
inherits(y, "ConservationProblem"))
invisible(rcpp_apply_min_largest_shortfall_objective(
x$ptr, y$feature_targets(), y$planning_unit_costs(),
self$parameters$get("budget")[[1]]))
}))
} |
library(filesstrings)
library(xfun)
knitr::knit("vignettes/mra-simulation.Rmd.orig", output = "vignettes/mra-simulation.Rmd", envir = new.env())
gsub_file("vignettes/mra-simulation.Rmd", "figure/", "")
mra_images <- list.files("figure/")[grep(".png", list.files("figure/"))]
file.move(paste0("figure/", mra_images), destinations = "./vignettes/", overwrite = TRUE)
if(length(dir("figure/", all.files = TRUE)) ==0)
file.remove("./figure/*")
file.remove("./figure/")
cache_files <- list.files("cache/")
file.remove(paste0("cache/", cache_files))
file.remove("./cache")
knitr::purl("vignettes/mra-simulation.Rmd.orig", output = "vignettes/mra-simulation.R")
devtools::build_vignettes()
pkgdown::build_site() |
tabPanel('Summary', value = 'tab_summary',
fluidPage(
fluidRow(
column(8, align = 'left',
h4('Summary Statistics'),
p('Generate descriptive statistics for continuous data.')
),
column(4, align = 'right',
actionButton(inputId='sumrylink1', label="Help", icon = icon("question-circle"),
onclick ="window.open('https://descriptr.rsquaredacademy.com/reference/ds_summary_stats.html', '_blank')"),
actionButton(inputId='sumrylink3', label="Demo", icon = icon("video-camera"),
onclick ="window.open('https://www.youtube.com/watch?v=cq6_1SQjNmM', '_blank')")
)
),
hr(),
fluidRow(
column(4, align = 'right',
br(),
br(),
h5('Variable:')
),
column(2, align = 'left',
br(),
selectInput("var_summary", label = '',
choices = "", selected = "", width = '150px'
),
bsTooltip("var_summary", "Select a variable.",
"bottom", options = list(container = "body"))
),
column(6, align = 'left',
br(),
br(),
actionButton(inputId = 'submit_summary', label = 'Submit', width = '120px', icon = icon('check')),
bsTooltip("submit_summary", "Click here to view summary statistics.",
"bottom", options = list(container = "body"))
)
),
fluidRow(
br(),
br(),
column(12, align = 'center',
verbatimTextOutput('summary')
)
)
)
) |
r_behavior_stream_single <- function(mu, lambda, F_event, F_interim, stream_length,
equilibrium, p0, tuning) {
start_state <- stats::rbinom(1, 1, p0)
if (equilibrium) {
if (start_state) {
b_stream <- F_event$r_eq(1, mu)
} else {
b_stream <- F_interim$r_eq(1, lambda)
}
} else {
if (start_state) {
b_stream <- F_event$r_gen(1, mu)
} else {
b_stream <- F_interim$r_gen(1, lambda)
}
}
cum_length <- b_stream
cum_size <- 1
while (cum_length < stream_length) {
extend_size <- ceiling(tuning * (stream_length - cum_length) / (mu + lambda))
event_times <- F_event$r_gen(n=extend_size, mean = mu)
interim_times <- F_interim$r_gen(n=extend_size, mean = lambda)
b_stream <- append(b_stream, cum_length + cumsum(
if (start_state) c(rbind(interim_times, event_times))
else c(rbind(event_times, interim_times))))
cum_size <- cum_size + 2 * extend_size
cum_length <- b_stream[cum_size]
}
list(start_state=start_state, b_stream = b_stream[b_stream < stream_length])
}
r_behavior_stream <- function(n, mu, lambda, F_event, F_interim, stream_length,
equilibrium = TRUE, p0 = 0, tuning = 2) {
mu_vec <- rep(mu, length.out = n)
lambda_vec <- rep(lambda, length.out = n)
p0_vec <- if (equilibrium) mu_vec / (mu_vec + lambda_vec) else rep(p0, length.out = n)
BS <- list(stream_length = stream_length,
b_streams = mapply(r_behavior_stream_single,
mu = mu_vec,
lambda = lambda_vec,
p0 = p0_vec,
MoreArgs = list(F_event = F_event, F_interim = F_interim,
stream_length = stream_length,
equilibrium = equilibrium,
tuning = tuning),
SIMPLIFY = FALSE))
class(BS) <- "behavior_stream"
return(BS)
}
r_PIR <- function(n, mu, lambda, stream_length, F_event, F_interim,
interval_length, rest_length = 0, summarize = FALSE,
equilibrium = TRUE, p0 = 0, tuning = 2){
if (equilibrium) p0 <- mu / (mu + lambda)
n_intervals <- floor(stream_length / interval_length)
start_time <- interval_length * (0:(n_intervals - 1))
end_time <- start_time + interval_length - rest_length
samples <- replicate(n, {
BS <- r_behavior_stream_single(mu = mu, lambda = lambda,
F_event = F_event, F_interim = F_interim,
stream_length = stream_length,
equilibrium = equilibrium,
p0 = p0, tuning = tuning)
IntRec_single(b_stream = BS, start_time = start_time, end_time = end_time)
})
if (summarize) colMeans(samples) else t(samples)
}
r_WIR <- function(n, mu, lambda, stream_length, F_event, F_interim,
interval_length, rest_length = 0, summarize = FALSE,
equilibrium = TRUE, p0 = 0, tuning = 2){
if (equilibrium) p0 <- mu / (mu + lambda)
n_intervals <- floor(stream_length / interval_length)
start_time <- interval_length * (0:(n_intervals - 1))
end_time <- start_time + interval_length - rest_length
samples <- replicate(n, {
BS <- r_behavior_stream_single(mu = mu, lambda = lambda,
F_event = F_event, F_interim = F_interim,
stream_length = stream_length,
equilibrium = equilibrium,
p0 = p0, tuning = tuning)
IntRec_single(b_stream = BS, start_time = start_time, end_time = end_time, partial = FALSE)
})
if (summarize) colMeans(samples) else t(samples)
}
r_MTS <- function(n, mu, lambda, stream_length, F_event, F_interim,
interval_length, summarize = FALSE, equilibrium = TRUE,
p0 = 0, tuning = 2) {
if (equilibrium) p0 <- mu / (mu + lambda)
moments <- seq(interval_length * summarize, stream_length, interval_length)
samples <- replicate(n, {
BS <- r_behavior_stream_single(mu = mu, lambda = lambda,
F_event = F_event, F_interim = F_interim,
stream_length = stream_length,
equilibrium = equilibrium,
p0 = p0, tuning = tuning)
MTS_single(b_stream = BS, moments = moments)
})
if(summarize) colMeans(samples) else t(samples)
}
r_continuous_recording <- function(n, mu, lambda, stream_length, F_event, F_interim,
equilibrium = TRUE, p0 = 0, tuning = 2) {
if (equilibrium) p0 <- mu / (mu + lambda)
samples <- replicate(n, {
BS <- r_behavior_stream_single(mu = mu, lambda = lambda,
F_event = F_event, F_interim = F_interim,
stream_length = stream_length,
equilibrium = equilibrium,
p0 = p0, tuning = tuning)
CDR_single(b_stream = BS, stream_length = stream_length)
})
samples
}
r_event_counting <- function(n, mu, lambda, stream_length, F_event, F_interim,
equilibrium = TRUE, p0 = 0, tuning = 2) {
if (equilibrium) p0 <- mu / (mu + lambda)
samples <- replicate(n,{
BS <- r_behavior_stream_single(mu = mu, lambda = lambda,
F_event = F_event, F_interim = F_interim,
stream_length = stream_length,
equilibrium = equilibrium,
p0 = p0, tuning = tuning)
floor((length(BS$b_stream) + 1 - BS$start_state)/2)
})
samples
}
r_AIR <- function(n, mu, lambda, stream_length, F_event, F_interim,
interval_length, rest_length = 0,
equilibrium = TRUE, p0 = 0, tuning = 2) {
if (equilibrium) p0 <- mu / (mu + lambda)
moments <- seq(0, stream_length, interval_length)
n_intervals <- floor(stream_length / interval_length)
start_time <- interval_length * (0:(n_intervals - 1))
end_time <- start_time + interval_length - rest_length
samples <- replicate(n, {
BS <- r_behavior_stream_single(mu = mu, lambda = lambda,
F_event = F_event, F_interim = F_interim,
stream_length = stream_length,
equilibrium = equilibrium,
p0 = p0, tuning = tuning)
augmented_recording_single(b_stream = BS, moments = moments,
start_time = start_time, end_time = end_time)
})
samples
} |
jNewRapGarch <-
function(x0, y){
b <- x0
N <- length(x0)
threshold <- 10 ^ (-10)
maxloop <- 100
delta <- threshold + 1
numloop <- 1
while (abs(delta) > threshold & numloop < maxloop){
numloop <- numloop + 1
maxvalue <- ham(b, y)
Func <- dham(b, y)
Grad <- ddham(b, y)
Ginv <- solve(Grad)
Inov <- Func %*% Ginv
a <- b
stepp <- 1
if (Inov[1] > 0) {
stepp <- min(stepp, b[1] / Inov[1])
}
if (Inov[2] > 0) {
stepp <- min(stepp, b[2] / Inov[2])
}
if (Inov[3] > 0) {
stepp <- min(stepp, b[3] / Inov[3])
}
if (Inov[1] + Inov[2] < 0) {
stepp = min(stepp, (b[1] + b[2] - 0.99999) / (Inov[1] + Inov[2]))
}
Record <- 0.1
for (i in 1:10){
for (j in 1:N){
b[j] <- a[j] - Inov[j] *(i-1) * stepp / 10
}
tam <- ham(b, y)
if (tam > maxvalue) {
maxvalue <- tam
Record <- i
}
}
for (j in 1:N){
b[j] <- a[j] - Inov[j] * (Record - 0.1) * stepp / 10
}
delta <- abs(max(Inov)) * Record * stepp / 10
}
return(b)
} |
context("find.clusters tests")
test_that("find.clusters works with pre-defined clusters", {
skip_on_cran()
data(nancycats)
f <- file()
options(adegenet.testcon = f)
twos <- paste(rep(2, nPop(nancycats)), collapse = "\n")
write(twos, f)
expect_warning(capture.output(res <- find.clusters(nancycats, clust = pop(nancycats), n.pca = 100)))
expect_equal(length(levels(res$grp)), nPop(nancycats) * 2)
expect_equal(length(res$grp), nInd(nancycats))
options(adegenet.testcon = stdin())
close(f)
}) |
test_that("accu_model", {
testthat::skip_if_not_installed("e1071")
testthat::expect_true(round(
accu_model(
f = Sex ~ GOL + NOL + BNL,
x = Howells,
y = Howells,
byPop = FALSE
)[[2]][[3]][[1]],
3
) == 0.789)
testthat::expect_true(round(
accu_model(
f = Sex ~ GOL + NOL + BNL,
x = Howells,
y = Howells,
byPop = TRUE,
Pop = 2
)[[2]][[1]][[3]][[1]],
3
) == 0.91)
set.seed(123)
testthat::expect_true(round(
accu_model(
f = Sex ~ GOL + NOL + BNL,
x = Howells,
byPop = FALSE
)[[2]][[3]][[1]],
3
) == 0.811)
set.seed(123)
testthat::expect_true(round(
accu_model(
f = Sex ~ GOL + NOL + BNL,
x = Howells,
byPop = TRUE,
Pop = 2
)[[2]][[3]][[3]][[1]],
3
) == 0.762)
testthat::expect_error(
accu_model(
f = Sex ~ GOL + NOL + BNL,
x = matrix(NA),
byPop = 50,
Pop = 200,
y = matrix(NA),
plot = 98,
Sex = 500,
post. = "ll",
ref. = "kl"
)
)
testthat::expect_warning(accu_model(
f = Sex ~ GOL + NOL + BNL,
x = Howells,
byPop = TRUE,
Pop = NULL
))
}) |
parseRayStation <- function(x, planInfo=FALSE, courseAsID=FALSE, ...) {
planInfo <- as.character(planInfo)
getElem <- function(pattern, ll, trim=TRUE, iCase=FALSE, collWS=TRUE) {
line <- ll[grep(pattern, ll)]
elem <- sub("^.+?:[[:blank:]]*([[:alnum:][:punct:][:blank:]]+$)", "\\1",
line, ignore.case=iCase, perl=TRUE)
elem <- if(trim) {
trimWS(elem, side="both")
} else {
elem
}
if(collWS) {
collWS(elem)
} else {
elem
}
}
getDoseUnit <- function(ll) {
line <- ll[grep("^
elem <- sub("^.+:[[:blank:]]+(GY|CGY)$", "\\1", line, perl=TRUE, ignore.case=TRUE)
toupper(trimWS(elem))
}
sStart <- grep("^
sLen <- diff(c(sStart, length(x)+1))
if((length(sLen) < 1L) || all(sLen < 1L)) {
stop("No structures found")
}
structList <- Map(function(start, len) x[start:(start+len-1)], sStart, sLen)
header <- x[seq_len(sStart[1]-1)]
patName <- getElem("
patID <- getElem("^
plan <- getElem("^
DVHdate <- NA_character_
doseRx <- if(tolower(planInfo) == "doserx") {
doseRxUnit <- toupper(sub("^.+[[:blank:]][.[:digit:]]+(c?Gy).*$", "\\1",
plan, perl=TRUE, ignore.case=TRUE))
if(!grepl("^(GY|CGY)$", doseRxUnit)) {
warning("Could not determine dose Rx unit")
doseRxUnit <- NA_character_
}
drx <- sub("^.+[[:blank:]]([.[:digit:]]+)c?Gy.*$", "\\1",
plan, perl=TRUE, ignore.case=TRUE)
as.numeric(drx)
} else {
doseRxUnit <- NA_character_
warning("No info on prescribed dose")
NA_real_
}
isoDoseRx <- if(tolower(planInfo) == "doserx") {
warning("Iso-dose-Rx is assumed to be 100")
100
} else {
warning("No info on % for dose")
NA_real_
}
getDVH <- function(strct, info) {
doseRx <- info$doseRx
doseRxUnit <- info$doseRxUnit
isoDoseRx <- info$isoDoseRx
structure <- getElem("^
structVol <- NA_real_
doseMin <- NA_real_
doseMax <- NA_real_
doseAvg <- NA_real_
doseMed <- NA_real_
doseMode <- NA_real_
doseSD <- NA_real_
volumeUnit <- "CC"
doseUnit <- getDoseUnit(strct)
if(!grepl("^(GY|CGY)$", doseUnit)) {
warning("Could not determine dose measurement unit")
doseUnit <- NA_character_
}
if(!is.na(doseUnit) && !is.na(doseRxUnit)) {
if((doseUnit == "GY") && (doseRxUnit == "CGY")) {
doseRx <- doseRx/100
} else if((doseUnit == "CGY") && (doseRxUnit == "GY")) {
doseRx <- doseRx*100
}
}
colHead <- grep("^
dvhStart <- colHead+1
dvhLen <- length(strct) - dvhStart + 1
if((length(dvhLen) < 1L) || dvhLen < 1L) {
stop("No DVH data found")
}
if(all(!nzchar(strct[dvhStart:length(strct)]))) {
return(NULL)
}
dvh <- data.matrix(read.table(text=strct[dvhStart:length(strct)],
header=FALSE, stringsAsFactors=FALSE,
colClasses=rep("numeric", 2),
comment.char="", nrows=dvhLen))
colnames(dvh) <- c("dose", "volumeRel")
dvh <- cbind(dvh, volume=structVol*(dvh[ , "volumeRel"]/100))
dvh <- cbind(dvh, doseRel=dvh[ , "dose"]*isoDoseRx / doseRx)
stopifnot(isIncreasing(dvh))
DVHtype <- dvhType(dvh)
DVH <- list(dvh=dvh,
patName=info$patName,
patID=info$patID,
date=info$date,
DVHtype=DVHtype,
plan=info$plan,
course=info$course,
quadrant=info$quadrant,
structure=structure,
structVol=structVol,
doseUnit=doseUnit,
volumeUnit=volumeUnit,
doseMin=doseMin,
doseMax=doseMax,
doseRx=doseRx,
doseRxUnit=doseRxUnit,
isoDoseRx=isoDoseRx,
doseAvg=doseAvg,
doseMed=doseMed,
doseMode=doseMode,
doseSD=doseSD)
if(DVHtype == "differential") {
warning("I assume differential DVH is per unit dose\nbut I have no information on this")
DVH$dvh <- convertDVH(dvh, toType="cumulative",
toDoseUnit="asis", perDose=TRUE)
DVH$dvhDiff <- dvh
}
class(DVH) <- "DVHs"
return(DVH)
}
info <- list(patID=patID, patName=patName, date=DVHdate, plan=plan,
doseRx=doseRx, doseRxUnit=doseRxUnit, isoDoseRx=isoDoseRx)
dvhL <- lapply(structList, getDVH, info=info)
dvhL <- Filter(Negate(is.null), dvhL)
names(dvhL) <- sapply(dvhL, function(y) y$structure)
if(length(unique(names(dvhL))) < length(dvhL)) {
warning("Some structures have the same name - this can lead to problems")
}
class(dvhL) <- "DVHLst"
attr(dvhL, which="byPat") <- TRUE
return(dvhL)
} |
tvPhi<- function (x, nstep = 10, ...)
{
if (!inherits(x, "tvvar"))
stop("\nPlease provide an object of class 'tvvar', generated by 'tvVAR()'.\n")
nstep <- abs(as.integer(nstep))
neq <- x$neq
p <- x$p
obs <- x$obs
A <- tvAcoef(x)
if (nstep >= p)
{
As <- array(0, dim = c(obs,neq, neq, nstep + 1))
for (i in (p + 1):(nstep + 1))
{
As[,, , i] <- array(0, dim=c(obs,neq, neq))
}
}
else
{
As <- array(0, dim = c(obs, neq, neq, p))
}
for (i in 1:p)
{
As[,, , i] <- A[[i]]
}
Phi <- array(0, dim = c(obs, neq, neq, nstep + 1))
for ( t in 1:obs)
{
Phi[t, , , 1] <- diag(neq)
Phi[t, , , 2] <- Phi[t,,,1] %*% As[t, , , 1]
if (nstep > 1) {
for (i in 3:(nstep + 1))
{
tmp1 <- Phi[t, , , 1] %*% As[t,,,i-1]
tmp2 <- matrix(0, nrow = neq, ncol = neq)
idx <- (i - 2):1
for (j in 1:(i - 2))
{
tmp2 <- tmp2 + Phi[t,, , j + 1] %*% As[t,, , idx[j]]
}
Phi[t, , , i] <- tmp1 + tmp2
}
}
}
return(Phi)
} |
FI.brm <-
function( params,
theta,
type = c("expected", "observed"),
resp = NULL )
{
if( type == "expected" )
resp <- NULL
params <- rbind(params)
if( is.null(resp) & type == "observed" )
stop( "need response scalar/vector to calculate observed information" )
if( mode(params) != "numeric" | mode(theta) != "numeric" )
stop( "params and theta need to be numeric" )
if( !is.null(resp) & mode(resp) != "numeric" )
stop( "resp needs to be numeric" )
if( type == "expected" ){
p <- p.brm(theta, params)
q <- 1 - p
pder1 <- pder1.brm(theta, params)
info <- pder1^2 / ( p * q )
}
if( type == "observed" ){
info <- -lder2.brm(resp, theta, params)
}
if( length(theta) == 1 ){
i.info <- info
t.info <- sum(info)
} else{
i.info <- info
t.info <- rowSums(i.info)
}
sem <- ifelse(test = signif(t.info) > 0, yes = sqrt( 1 / t.info ), no = NA)
return( list( item = drop(i.info), test = t.info, sem = sem, type = type ) )
} |
test_that("example works", {
df <- adnimerge %>% dplyr::filter(VISCODE == 'bl')
model <- df %>% aba_model() %>%
set_groups(everyone()) %>%
set_outcomes(PET_ABETA_STATUS_bl) %>%
set_predictors(
PLASMA_PTAU181_bl,
PLASMA_NFL_bl,
c(PLASMA_PTAU181_bl, PLASMA_NFL_bl)
) %>%
set_covariates(AGE, GENDER, EDUCATION) %>%
set_stats('glm') %>%
fit()
model_summary <- model %>% aba_summary()
expect_error(
model_screen <- model %>%
aba_screen(
threshold = seq(0.25, 0.75, by = 0.25),
cost_multiplier = c(4, 8),
include_n = 1000,
ntrials = 3,
verbose = TRUE
),
NA
)
}) |
grid.pattern_rose <- function(x = c(0, 0, 1, 1), y = c(1, 0, 0, 1), id = 1L, ...,
colour = gp$col %||% "grey20",
fill = gp$fill %||% "grey80",
angle = 30, density = 0.2,
spacing = 0.05, xoffset = 0, yoffset = 0,
frequency = 0.1,
grid = "square", type = NULL, subtype = NULL,
rot = 0,
alpha = gp$alpha %||% NA_real_, linetype = gp$lty %||% 1,
size = gp$lwd %||% 1,
use_R4.1_clipping = getOption("ggpattern_use_R4.1_clipping",
getOption("ggpattern_use_R4.1_features")),
png_device = NULL, res = getOption("ggpattern_res", 72),
default.units = "npc", name = NULL,
gp = gpar(), draw = TRUE, vp = NULL) {
if (missing(colour) && hasName(l <- list(...), "color")) colour <- l$color
grid.pattern("rose", x, y, id,
colour = colour, fill = fill, angle = angle,
density = density, spacing = spacing, xoffset = xoffset, yoffset = yoffset,
scale = scale, frequency = frequency,
grid = grid, type = type, subtype = subtype, rot = rot,
use_R4.1_clipping = use_R4.1_clipping, png_device = png_device, res = res,
alpha = alpha, linetype = linetype, size = size,
default.units = default.units, name = name, gp = gp , draw = draw, vp = vp)
}
create_pattern_rose <- function(params, boundary_df, aspect_ratio, legend = FALSE) {
default.units <- "bigpts"
boundary_df <- convert_polygon_df_units(boundary_df, default.units)
params <- convert_params_units(params, default.units)
vpm <- get_vp_measurements(default.units)
spacing <- params$pattern_spacing
grid <- params$pattern_grid
grid_xy <- get_xy_grid(params, vpm)
fill <- alpha(params$pattern_fill, params$pattern_alpha)
col <- alpha(params$pattern_colour, params$pattern_alpha)
lwd <- params$pattern_size * .pt
lty <- params$pattern_linetype
density <- params$pattern_density
rot <- params$pattern_rot
frequency <- params$pattern_frequency
n_par <- max(lengths(list(fill, col, lwd, lty, density, rot, frequency)))
fill <- rep(fill, length.out = n_par)
col <- rep(col, length.out = n_par)
lwd <- rep(lwd, length.out = n_par)
lty <- rep(lty, length.out = n_par)
density <- rep(density, length.out = n_par)
rot <- rep(rot, length.out = n_par)
frequency <- rep(frequency, length.out = n_par)
density_max <- max(density)
radius_mult <- switch(grid, hex = 0.578, 0.5)
radius_max <- radius_mult * spacing * density_max
if (is.null(params$pattern_type) || is.na(params$pattern_type))
params$pattern_type <- switch(grid, square = "square", "hex")
m_pat <- get_pattern_matrix(params$pattern_type, params$pattern_subtype, grid_xy, n_par)
gl <- gList()
for (i_par in seq(n_par)) {
radius_outer <- radius_mult * spacing * density[i_par]
xy_rose <- get_xy_rose(frequency[i_par], params, radius_outer, rot[i_par])
xy_par <- get_xy_par(grid_xy, i_par, m_pat, grid, spacing)
if (length(xy_par$x) == 0) next
xy_par <- rotate_xy(xy_par$x, xy_par$y, params$pattern_angle, vpm$x, vpm$y)
gp <- gpar(fill = fill[i_par], col = col[i_par], lwd = lwd[i_par], lty = lty[i_par])
name <- paste0("rose.", i_par)
grob <- points_to_rose_grob(xy_par, xy_rose, gp, default.units, name)
gl <- append_gList(gl, grob)
}
clippee <- gTree(children = gl)
clipper <- convert_polygon_df_to_polygon_grob(boundary_df, default.units = "bigpts")
clippingPathGrob(clippee, clipper,
use_R4.1_clipping = params$pattern_use_R4.1_clipping,
png_device = params$pattern_png_device,
res = params$pattern_res, name = "rose")
}
get_xy_rose <- function(frequency, params, radius_outer, rot) {
theta <- to_radians(seq.int(from = 0, to = 12 * 360, by = 3))
x <- radius_outer * cos(frequency * theta) * cos(theta)
y <- radius_outer * cos(frequency * theta) * sin(theta)
rose_angle <- rot + params$pattern_angle
rotate_xy(x, y, rose_angle, 0, 0)
}
points_to_rose_grob <- function(xy_par, xy_rose, gp, default.units, name) {
points_mat <- as.data.frame(xy_par)
df_polygon <- as.data.frame(xy_rose)
l_xy <- lapply(seq(nrow(points_mat)),
function(i_r) {
x0 <- points_mat[i_r, 1]
y0 <- points_mat[i_r, 2]
df <- df_polygon
df$x <- df$x + x0
df$y <- df$y + y0
df
})
df <- do.call(rbind, l_xy)
if (is.null(df)) {
nullGrob()
} else {
df$id <- rep(seq(nrow(points_mat)), each = nrow(df_polygon))
pathGrob(x = df$x, y = df$y, id = df$id,
default.units = default.units, gp = gp, name = name)
}
} |
generate_blin <- function(S, L, tmax, lag=1, tau=1, sigmaY=1,
muAB=0, sigmaAB=1, rankA=S, rankB=L,
use_cov=TRUE, seed=NA, sparse=NA)
{
binary <- FALSE
gen_type="biten"
if(is.numeric(seed)){ set.seed(seed) }
Y <- array(rnorm(S*L*tmax, 0, sigmaY), c(S, L, tmax))
if(tmax <= lag){
stop("Input 'tmax' must be larger than lag.")
}
if(use_cov){
X1 <- array(1, c(S,L,tmax,1))
X2 <- array(sample(c(0,1), S*L*tmax, replace=T), c(S,L,tmax,1))
X3 <- array(rnorm(S*L*tmax), c(S,L,tmax,1))
X <- abind::abind(X1,X2,X3)
p <- dim(X)[4]
beta_true <- matrix(rep(1,p), nrow=1)
Xbeta <- drop(amprod(X, beta_true, 4))
} else {
X <- NULL
Xbeta <- 0
beta_true <- NA
}
U_true <- matrix(rnorm(S*rankA, muAB, sigmaAB), S, rankA)
V_true <- matrix(rnorm(S*rankA, muAB, sigmaAB), S, rankA)
W_true <- matrix(rnorm(L*rankB, muAB, sigmaAB), L, rankB)
Z_true <- matrix(rnorm(L*rankB, -muAB, sigmaAB), L, rankB)
A_true <- tcrossprod(U_true, V_true)
BT_true <- tcrossprod(Z_true, W_true)
if(is.numeric(sparse)){
if(sparse >=0 & sparse <= 1){
Aind <- matrix(sample(c(0,1), S^2, replace=T, prob=c(1-sparse, sparse)), S, S)
Bind <- matrix(sample(c(0,1), L^2, replace=T, prob=c(1-sparse, sparse)), L, L)
A_true <- Aind*A_true
BT_true <- Bind*BT_true
} else {stop("Input 'sparse' must be a numeric between zero and 1 or FALSE")}
}
if (strtrim(gen_type,3) == "bit") {
A_true <- A_true*S^1.5/rankA
BT_true <- BT_true*L^1.5/rankB
A_true <- A_true / 2 / max(abs(A_true))
BT_true <- BT_true / 2 / max(abs(BT_true))
E <- tau*array(rnorm(S*L*tmax), c(S,L,tmax))
for(t in (lag+1):tmax){
if(lag>1){
D <- apply(Y[,,(t-lag):(t-1),drop=FALSE], 1:2, sum)
} else {
D <- Y[,,t-1,drop=TRUE]
}
if(use_cov){
Xbt <- Xbeta[,,t, drop=TRUE]
} else {
Xbt <- 0
}
Y[,,t] <- Xbt + A_true %*%D + D %*% t(BT_true) + E[,,t,drop=TRUE]
}
} else { stop("Invalid model type for prediction")}
return(list(Y=Y, X=X, E=E, beta=beta_true, A=t(A_true), B=t(BT_true), call=match.call()))
} |
print.occurrence.threshold <- function(x, ...) {
cat("Evaluation statistic:", x$statistic, "\n")
cat("\n")
cat("Moments for thresholds:", "\n")
cat("\n")
print( summary(x$thresholds) )
cat("\n")
if(x$statistic == "delta.ss") {
cat("Probability threshold with minimum delta sensitivity/specificity = ",
names(x$thresholds)[min(which(x$thresholds == min(x$thresholds)))], "\n")
}
else if (x$statistic == "sum.ss") {
cat("Probability threshold with maximum cummlative sensitivity/specificity = ",
names(x$thresholds)[min(which(x$thresholds == max(x$thresholds)))], "\n")
}
else if (x$statistic == "kappa") {
cat("Probability threshold with maximum kappa = ",
names(x$thresholds)[min(which(x$thresholds == max(x$thresholds)))], "\n")
}
} |
NULL
qqplot_RMAWGEN_Tx <- function (Tx_mes,Tx_gen,Tn_gen,Tn_mes,Tx_spline=NULL,Tn_spline=NULL,xlab="observed",ylab="simulated",when=1:nrow(Tx_mes),main=names(Tx_gen),station,pdf=NULL,xlim=range(Tx_mes),ylim=xlim,cex=0.4,cex.main=1.0,cex.lab=1.0,cex.axis=1.0){
if (!is.null(pdf)) pdf(pdf)
N <- length(main)
Q <- as.integer(N/2)
par(mfrow=c(Q,Q))
for(i in 1:N) {
if (is.null(Tx_spline)) {
qqplot(Tx_mes[when,station],Tx_gen[[i]][when,station],xlab=xlab,ylab=ylab,main=main[i],cex=cex,cex.main=cex.main,cex.lab=cex.lab,cex.axis=cex.axis,xlim=xlim,ylim=ylim)
} else {
qqplot(Tx_mes[when,station]-Tx_spline[when,station],Tx_gen[[i]][when,station]-Tx_spline[when,station],xlab=xlab,ylab=ylab,main=main[i],cex=cex,cex.main=cex.main,cex.lab=cex.lab,cex.axis=cex.axis,xlim=xlim,ylim=ylim)
}
abline(0,1)
}
if (!is.null(pdf)) dev.off()
}
NULL
qqplot_RMAWGEN_Tn <- function (Tx_mes,Tx_gen,Tn_gen,Tn_mes,Tx_spline=NULL,Tn_spline=NULL,xlab="observed",ylab="simulated",when=1:nrow(Tn_mes),main=names(Tn_gen),station,pdf=NULL,xlim=range(Tn_mes),ylim=xlim,cex=0.4,cex.main=1.0,cex.lab=1.0,cex.axis=1.0){
if (!is.null(pdf)) pdf(pdf)
N <- length(main)
Q <- as.integer(N/2)
par(mfrow=c(Q,Q))
for(i in 1:N) {
if (is.null(Tn_spline)) {
qqplot(Tn_mes[when,station],Tn_gen[[i]][when,station],xlab=xlab,ylab=ylab,main=main[i],cex=cex,cex.main=cex.main,cex.lab=cex.lab,cex.axis=cex.axis,xlim=xlim,ylim=ylim)
} else {
qqplot(Tn_mes[when,station]-Tn_spline[when,station],Tn_gen[[i]][when,station]-Tn_spline[when,station],xlab=xlab,ylab=ylab,main=main[i],cex=cex,cex.main=cex.main,cex.lab=cex.lab,cex.axis=cex.axis,xlim=xlim,ylim=ylim)
}
abline(0,1)
}
if (!is.null(pdf)) dev.off()
}
NULL
qqplot_RMAWGEN_deltaT <- function (Tx_mes,Tx_gen,Tn_gen,Tn_mes,xlab="observed",ylab="simulated",when=1:nrow(Tx_mes),main=names(Tx_gen),station,pdf=NULL,xlim=range(Tx_mes-Tn_mes),ylim=xlim,cex=0.4,cex.main=1.0,cex.lab=1.0,cex.axis=1.0){
if (!is.null(pdf)) pdf(pdf)
N <- length(main)
Q <- as.integer(N/2)
par(mfrow=c(Q,Q))
for(i in 1:N) {
qqplot(Tx_mes[when,station]-Tn_mes[when,station],Tx_gen[[i]][when,station]-Tn_gen[[i]][when,station],xlab=xlab,ylab=ylab,main=main[i],cex=cex,cex.main=cex.main,cex.lab=cex.lab,cex.axis=cex.axis,xlim=xlim,ylim=ylim)
abline(0,1)
}
if (!is.null(pdf)) dev.off()
}
NULL
qqplot_RMAWGEN_prec <- function (prec_mes,prec_gen,xlab="observed",ylab="simulated",when=1:nrow(prec_mes),main=names(prec_gen),station,pdf=NULL,xlim=range(prec_mes),ylim=xlim,cex=0.4,cex.main=1.0,cex.lab=1.0,cex.axis=1.0,lag=1){
if (!is.null(pdf)) pdf(pdf)
N <- length(main)
Q <- as.integer(N/2)
par(mfrow=c(Q,Q))
for(i in 1:N) {
qqplot.lagged(x=prec_mes[when,station],y=prec_gen[[i]][when,station],lag=lag,xlab=xlab,ylab=ylab,main=main[i],cex=cex,cex.main=cex.main,cex.lab=cex.lab,cex.axis=cex.axis,xlim=xlim,ylim=ylim)
abline(0,1)
}
if (!is.null(pdf)) dev.off()
} |
gnlmm3 <- function(y=NULL, distribution="normal", mu=NULL, shape=NULL,
nest=NULL, family=NULL, linear=NULL, pmu=NULL, pshape=NULL,
pfamily=NULL, psd=NULL, exact=FALSE, wt=1, scale=NULL, points=10,
common=FALSE, delta=1, envir=parent.frame(), print.level=0,
typsize=abs(p), ndigit=10, gradtol=0.00001, stepmax=10*sqrt(p%*%p),
steptol=0.00001, iterlim=100, fscale=1){
pburr <- function(q, m, s, f) 1-(1+(q/m)^s)^-f
pglogis <- function(y, m, s, f) (1+exp(-sqrt(3)*(y-m)/(s*pi)))^-f
pgweibull <- function(y, s, m, f) (1-exp(-(y/m)^s))^f
phjorth <- function(y, m, s, f) 1-(1+s*y)^(-f/s)*exp(-(y/m)^2/2)
pginvgauss <- function(y, m, s, f)
.C("pginvgauss_c",
as.double(y),
as.double(m),
as.double(s),
as.double(f),
len=as.integer(n),
eps=as.double(1.0e-6),
pts=as.integer(5),
max=as.integer(16),
err=integer(1),
res=double(n),
PACKAGE="repeated")$res
ppowexp <- function(y, m, s, f){
z <- .C("ppowexp_c",
as.double(y),
as.double(m),
as.double(s),
as.double(f),
len=as.integer(n),
eps=as.double(1.0e-6),
pts=as.integer(5),
max=as.integer(16),
err=integer(1),
res=double(n),
PACKAGE="repeated")$res
ifelse(y-m>0,0.5+z,0.5-z)}
dpvfpois <- function(y, m, s, f)
.C("dpvfp_c",
as.integer(y),
as.double(m),
as.double(s),
as.double(f),
as.integer(length(y)),
as.double(rep(1,length(y))),
res=double(length(y)),
PACKAGE="repeated")$res
pskewlaplace <- function(q,m,s,f){
u <- (q-m)/s
ifelse(u>0,1-exp(-f*abs(u))/(1+f^2),f^2*exp(-abs(u)/f)/(1+f^2))}
call <- sys.call()
distribution <- match.arg(distribution,c("normal","inverse Gauss",
"logistic","Hjorth","gamma","Burr","Weibull","extreme value",
"Student t","power exponential","power variance function Poisson",
"skew Laplace"))
if(common){
if(sum(is.function(mu)+is.function(shape)+is.function(family))<2&&sum(inherits(mu,"formula")+inherits(shape,"formula")+inherits(family,"formula"))<2)
stop("with common parameters, at least two of mu, shape, and family must be functions or formulae")
if((!is.function(mu)&&!inherits(mu,"formula")&&!is.null(mu))||(!is.function(shape)&&!inherits(shape,"formula")&&!is.null(shape))||(!is.function(family)&&!inherits(family,"formula")&&!is.null(family)))
stop("with common parameters, mu, shape, and family must either be functions, formulae, or NULL")
if(!is.null(linear))stop("linear cannot be used with common parameters")}
if(!is.null(scale))scale <- match.arg(scale,c("identity","log",
"reciprocal","exp"))
npl <- length(pmu)
nps <- length(pshape)
npf <- length(pfamily)
if(is.null(psd))stop("An initial value of psd must be supplied")
np <- npl+nps+npf+1
n <- if(inherits(envir,"repeated")||inherits(envir,"response"))sum(nobs(envir))
else if(inherits(envir,"data.frame"))dim(envir)[1]
else if(is.vector(y,mode="numeric"))length(y)
else if(is.matrix(y))dim(y)[1]
else sum(nobs(y))
if(n==0)stop(paste(deparse(substitute(y)),"not found or of incorrect type"))
respenv <- exists(deparse(substitute(y)),envir=parent.frame())&&
inherits(y,"repeated")&&!inherits(envir,"repeated")
if(respenv){
if(dim(y$response$y)[2]>1)
stop("gnlr3 only handles univariate responses")
if(!is.null(y$NAs)&&any(y$NAs))
stop("gnlr3 does not handle data with NAs")}
envname <- if(respenv)deparse(substitute(y))
else if(inherits(envir,"repeated")||inherits(envir,"response"))
deparse(substitute(envir))
else NULL
lin1 <- lin2 <- lin3 <- NULL
if(is.list(linear)){
lin1 <- linear[[1]]
lin2 <- linear[[2]]
lin3 <- linear[[3]]}
else lin1 <- linear
if(inherits(lin1,"formula")&&is.null(mu)){
mu <- lin1
lin1 <- NULL}
if(inherits(lin2,"formula")&&is.null(shape)){
shape <- lin2
lin2 <- NULL}
if(inherits(lin3,"formula")&&is.null(family)){
family <- lin3
lin3 <- NULL}
if(inherits(lin1,"formula")){
lin1model <- if(respenv){
if(!is.null(attr(finterp(lin1,.envir=y,.name=envname),"parameters")))
attr(finterp(lin1,.envir=y,.name=envname),"model")}
else {if(!is.null(attr(finterp(lin1,.envir=envir,.name=envname),"parameters")))
attr(finterp(lin1,.envir=envir,.name=envname),"model")}}
else lin1model <- NULL
if(inherits(lin2,"formula")){
lin2model <- if(respenv){
if(!is.null(attr(finterp(lin2,.envir=y,.name=envname),"parameters")))
attr(finterp(lin2,.envir=y,.name=envname),"model")}
else {if(!is.null(attr(finterp(lin2,.envir=envir,.name=envname),"parameters")))
attr(finterp(lin2,.envir=envir,.name=envname),"model")}}
else lin2model <- NULL
if(inherits(lin3,"formula")){
lin3model <- if(respenv){
if(!is.null(attr(finterp(lin3,.envir=y,.name=envname),"parameters")))
attr(finterp(lin3,.envir=y,.name=envname),"model")}
else {if(!is.null(attr(finterp(lin3,.envir=envir,.name=envname),"parameters")))
attr(finterp(lin3,.envir=envir,.name=envname),"model")}}
else lin3model <- NULL
if(inherits(lin1,"formula")){
tmp <- attributes(if(respenv)finterp(lin1,.envir=y,.name=envname)
else finterp(lin1,.envir=envir,.name=envname))
lf1 <- length(tmp$parameters)
if(!is.character(tmp$model))stop("linear must be a W&R formula")
if(length(tmp$model)==1){
if(is.null(mu))mu <- ~1
else stop("linear must contain covariates")}
rm(tmp)}
else lf1 <- 0
if(inherits(lin2,"formula")){
tmp <- attributes(if(respenv)finterp(lin2,.envir=y,.name=envname)
else finterp(lin2,.envir=envir,.name=envname))
lf2 <- length(tmp$parameters)
if(!is.character(tmp$model))stop("linear must be a W&R formula")
if(length(tmp$model)==1){
if(is.null(shape))shape <- ~1
else stop("linear must contain covariates")}
rm(tmp)}
else lf2 <- 0
if(inherits(lin3,"formula")){
tmp <- attributes(if(respenv)finterp(lin3,.envir=y,.name=envname)
else finterp(lin3,.envir=envir,.name=envname))
lf3 <- length(tmp$parameters)
if(!is.character(tmp$model))stop("linear must be a W&R formula")
if(length(tmp$model)==1){
if(is.null(family))family <- ~1
else stop("linear must contain covariates")}
rm(tmp)}
else lf3 <- 0
mu2 <- sh2 <- fa2 <- NULL
if(respenv||inherits(envir,"repeated")||inherits(envir,"tccov")||inherits(envir,"tvcov")||inherits(envir,"data.frame")){
if(inherits(mu,"formula")){
mu2 <- if(respenv)finterp(mu,.envir=y,.name=envname)
else finterp(mu,.envir=envir,.name=envname)}
if(inherits(shape,"formula")){
sh2 <- if(respenv)finterp(shape,.envir=y,.name=envname)
else finterp(shape,.envir=envir,.name=envname)}
if(inherits(family,"formula")){
fa2 <- if(respenv)finterp(family,.envir=y,.name=envname)
else finterp(family,.envir=envir,.name=envname)}
if(is.function(mu)){
if(is.null(attr(mu,"model"))){
tmp <- parse(text=deparse(mu)[-1])
mu <- if(respenv)fnenvir(mu,.envir=y,.name=envname)
else fnenvir(mu,.envir=envir,.name=envname)
mu2 <- mu
attr(mu2,"model") <- tmp}
else mu2 <- mu}
if(is.function(shape)){
if(is.null(attr(shape,"model"))){
tmp <- parse(text=deparse(shape)[-1])
shape <- if(respenv)fnenvir(shape,.envir=y,.name=envname)
else fnenvir(shape,.envir=envir,.name=envname)
sh2 <- shape
attr(sh2,"model") <- tmp}
else sh2 <- shape}
if(is.function(family)){
if(is.null(attr(family,"model"))){
tmp <- parse(text=deparse(family)[-1])
family <- if(respenv)fnenvir(family,.envir=y,.name=envname)
else fnenvir(family,.envir=envir,.name=envname)
fa2 <- family
attr(fa2,"model") <- tmp}
else fa2 <- family}}
else {
if(is.function(mu)&&is.null(attr(mu,"model")))mu <- fnenvir(mu)
if(is.function(shape)&&is.null(attr(shape,"model")))
shape <- fnenvir(shape)
if(is.function(family)&&is.null(attr(family,"model")))
family <- fnenvir(family)}
if(inherits(mu,"formula")){
if(npl==0)stop("formula for mu cannot be used if no parameters are estimated")
linarg <- if(lf1>0) "linear" else NULL
mu3 <- if(respenv)finterp(mu,.envir=y,.name=envname,.args=linarg)
else finterp(mu,.envir=envir,.name=envname,.args=linarg)
npt1 <- length(attr(mu3,"parameters"))
if(is.character(attr(mu3,"model"))){
if(length(attr(mu3,"model"))==1){
tmp <- attributes(mu3)
mu3 <- function(p) p[1]*rep(1,n)
attributes(mu3) <- tmp}}
else {
if(npl!=npt1&&!common&&lf1==0){
cat("\nParameters are ")
cat(attr(mu3,"parameters"),"\n")
stop(paste("pmu should have",npt1,"estimates"))}
if(is.list(pmu)){
if(!is.null(names(pmu))){
o <- match(attr(mu3,"parameters"),names(pmu))
pmu <- unlist(pmu)[o]
if(sum(!is.na(o))!=length(pmu))stop("invalid estimates for mu - probably wrong names")}
else pmu <- unlist(pmu)}}}
else if(!is.function(mu)){
mu3 <- function(p) p[1]*rep(1,n)
npt1 <- 1}
else {
mu3 <- mu
npt1 <- length(attr(mu3,"parameters"))-(lf1>0)}
if(lf1>0){
if(is.character(attr(mu3,"model")))
stop("mu cannot be a W&R formula if linear is supplied")
dm1 <- if(respenv)wr(lin1,data=y)$design
else wr(lin1,data=envir)$design
if(is.null(mu2))mu2 <- mu3
mu1 <- function(p)mu3(p,dm1%*%p[(npt1+1):(npt1+lf1)])}
else {
if(lf1==0&&length(mu3(pmu))==1){
mu1 <- function(p) mu3(p)*rep(1,n)
attributes(mu1) <- attributes(mu3)}
else {
mu1 <- mu3
rm(mu3)}}
if(is.null(attr(mu1,"parameters"))){
attributes(mu1) <- if(is.function(mu)){
if(!inherits(mu,"formulafn")){
if(respenv)attributes(fnenvir(mu,.envir=y))
else attributes(fnenvir(mu,.envir=envir))}
else attributes(mu)}
else attributes(fnenvir(mu1))}
nlp <- npt1+lf1
if(!common&&nlp!=npl)stop(paste("pmu should have",nlp,"initial estimates"))
npl <- if(common) 1 else npl+1
npl1 <- if(common&&!inherits(lin2,"formula")) 1 else nlp+2
np1 <- npl+nps
if(inherits(shape,"formula")){
if(nps==0&&!common)
stop("formula for shape cannot be used if no parameters are estimated")
old <- if(common)mu1 else NULL
linarg <- if(lf2>0) "linear" else NULL
sh3 <- if(respenv)finterp(shape,.envir=y,.start=npl1,.name=envname,.old=old,.args=linarg)
else finterp(shape,.envir=envir,.start=npl1,.name=envname,.old=old,.args=linarg)
npt2 <- length(attr(sh3,"parameters"))
if(is.character(attr(sh3,"model"))){
if(length(attr(sh3,"model"))==1){
tmp <- attributes(sh3)
sh3 <- function(p) p[npl1]*rep(1,n)
sh2 <- fnenvir(function(p) p[1]*rep(1,n))
attributes(sh3) <- tmp}}
else {
if(nps!=npt2&&!common&&lf2==0){
cat("\nParameters are ")
cat(attr(sh3,"parameters"),"\n")
stop(paste("pshape should have",npt2,"estimates"))}
if(is.list(pshape)){
if(!is.null(names(pshape))){
o <- match(attr(sh3,"parameters"),names(pshape))
pshape <- unlist(pshape)[o]
if(sum(!is.na(o))!=length(pshape))stop("invalid estimates for shape - probably wrong names")}
else pshape <- unlist(pshape)}}}
else if(!is.function(shape)){
sh3 <- function(p) p[npl1]*rep(1,n)
sh2 <- fnenvir(function(p) p[1]*rep(1,n))
npt2 <- 1}
else {
sh3 <- function(p) shape(p[npl1:np])
attributes(sh3) <- attributes(shape)
npt2 <- length(attr(sh3,"parameters"))-(lf2>0)}
if(lf2>0){
if(is.character(attr(sh3,"model")))
stop("shape cannot be a W&R formula if linear is supplied")
dm2 <- if(respenv)wr(lin2,data=y)$design
else wr(lin2,data=envir)$design
if(is.null(sh2))sh2 <- sh3
sh1 <- sh3(p,dm2%*%p[(npl1+lf2-1):np])}
else {
sh1 <- sh3
rm(sh3)}
if(is.null(attr(sh1,"parameters"))){
attributes(sh1) <- if(is.function(shape)){
if(!inherits(shape,"formulafn")){
if(respenv)attributes(fnenvir(shape,.envir=y))
else attributes(fnenvir(shape,.envir=envir))}
else attributes(shape)}
else attributes(fnenvir(sh1))}
nlp <- npt2+lf2
if(!common&&nlp!=nps)stop(paste("pshape should have",nlp,"initial estimates"))
nps1 <- if(common&&!inherits(family,"formula")) 1
else if(common&&inherits(family,"formula"))
length(attr(mu1,"parameters"))+nlp+1
else np1+1
if(inherits(family,"formula")){
if(npf==0&&!common)
stop("formula for family cannot be used if no parameters are estimated")
old <- if(common)c(mu1,sh1) else NULL
linarg <- if(lf3>0) "linear" else NULL
fa3 <- if(respenv)finterp(family,.envir=y,.start=nps1,.name=envname,.old=old,.args=linarg)
else finterp(family,.envir=envir,.start=nps1,.name=envname,.old=old,.args=linarg)
npt3 <- length(attr(fa3,"parameters"))
if(is.character(attr(fa3,"model"))){
if(length(attr(fa3,"model"))==1){
tmp <- attributes(fa3)
fa3 <- function(p) p[nps1]*rep(1,n)
fa2 <- fnenvir(function(p) p[1]*rep(1,n))
attributes(fa3) <- tmp}}
else {
if(npf!=npt3&&!common&&lf3==0){
cat("\nParameters are ")
cat(attr(fa3,"parameters"),"\n")
stop(paste("pfamily should have",npt3,"estimates"))}
if(is.list(pfamily)){
if(!is.null(names(pfamily))){
o <- match(attr(fa3,"parameters"),names(pfamily))
pfamily <- unlist(pfamily)[o]
if(sum(!is.na(o))!=length(pfamily))stop("invalid estimates for family - probably wrong names")}
else pfamily <- unlist(pfamily)}}}
else if(!is.function(family)){
fa3 <- function(p) p[nps1]*rep(1,n)
fa2 <- fnenvir(function(p) p[1]*rep(1,n))
npt3 <- 1}
else {
fa3 <- function(p) family(p[nps1:np])
attributes(fa3) <- attributes(family)
npt3 <- length(attr(fa3,"parameters"))-(lf3>0)}
if(lf3>0){
if(is.character(attr(fa3,"model")))
stop("family cannot be a W&R formula if linear is supplied")
dm3 <- if(respenv)wr(lin3,data=y)$design
else wr(lin3,data=envir)$design
if(is.null(fa2))fa2 <- fa3
fa1 <- fa3(p,dm3%*%p[(nps1+lf3-1):np])}
else {
fa1 <- fa3
rm(fa3)}
if(is.null(attr(fa1,"parameters"))){
attributes(fa1) <- if(is.function(family)){
if(!inherits(family,"formulafn")){
if(respenv)attributes(fnenvir(family,.envir=y))
else attributes(fnenvir(family,.envir=envir))}
else attributes(family)}
else attributes(fnenvir(fa1))}
nlp <- npt3+lf3
if(!common&&nlp!=npf)stop(paste("pfamily should have",nlp,"initial estimates"))
if(common){
nlp <- length(unique(c(attr(mu1,"parameters"),attr(sh1,"parameters"),attr(fa1,"parameters"))))
if(nlp!=npl)stop(paste("with a common parameter model, pmu should contain",nlp,"estimates"))}
pmu <- c(pmu,psd)
p <- c(pmu,pshape,pfamily)
type <- "unknown"
if(respenv){
if(inherits(envir,"repeated")&&(length(nobs(y))!=length(nobs(envir))||any(nobs(y)!=nobs(envir))))
stop("y and envir objects are incompatible")
if(!is.null(y$response$wt)&&any(!is.na(y$response$wt)))
wt <- as.vector(y$response$wt)
if(!is.null(y$response$delta))
delta <- as.vector(y$response$delta)
type <- y$response$type
respname <- colnames(y$response$y)
y <- response(y)}
else if(inherits(envir,"repeated")){
if(!is.null(envir$NAs)&&any(envir$NAs))
stop("gnlr3 does not handle data with NAs")
cn <- deparse(substitute(y))
if(length(grep("\"",cn))>0)cn <- y
if(length(cn)>1)stop("only one y variable allowed")
col <- match(cn,colnames(envir$response$y))
if(is.na(col))stop(paste("response variable",cn,"not found"))
type <- envir$response$type[col]
respname <- colnames(envir$response$y)[col]
y <- envir$response$y[,col]
if(!is.null(envir$response$n)&&!all(is.na(envir$response$n[,col])))
y <- cbind(y,envir$response$n[,col]-y)
else if(!is.null(envir$response$censor)&&!all(is.na(envir$response$censor[,col])))
y <- cbind(y,envir$response$censor[,col])
if(!is.null(envir$response$wt))wt <- as.vector(envir$response$wt)
if(!is.null(envir$response$delta))
delta <- as.vector(envir$response$delta[,col])}
else if(inherits(envir,"data.frame")){
respname <- deparse(substitute(y))
y <- envir[[deparse(substitute(y))]]}
else if(inherits(y,"response")){
if(dim(y$y)[2]>1)stop("gnlr3 only handles univariate responses")
if(!is.null(y$wt)&&any(!is.na(y$wt)))wt <- as.vector(y$wt)
if(!is.null(y$delta))delta <- as.vector(y$delta)
type <- y$type
respname <- colnames(y$y)
y <- response(y)}
else respname <- deparse(substitute(y))
if(any(is.na(y)))stop("NAs in y - use rmna")
censor <- length(dim(y))==2&&dim(y)[2]==2
if(censor&&all(y[,2]==1)){
y <- y[,1]
censor <- FALSE}
if(censor){
y[,2] <- as.integer(y[,2])
if(any(y[,2]!=-1&y[,2]!=0&y[,2]!=1))
stop("Censor indicator must be -1s, 0s, and 1s")
cc <- ifelse(y[,2]==1,1,0)
rc <- ifelse(y[,2]==0,1,ifelse(y[,2]==-1,-1,0))
lc <- ifelse(y[,2]==-1,0,1)
if(any(delta<=0&y[,2]==1))
stop("All deltas for uncensored data must be positive")
else {
delta <- ifelse(delta<=0,0.000001,delta)
delta <- ifelse(y[,1]-delta/2<=0,delta-0.00001,delta)}}
else {
if(!is.vector(y,mode="numeric"))stop("y must be a vector")
if(min(delta)<=0)stop("All deltas for must be positive")}
if(distribution=="power variance function Poisson"){
if(type!="unknown"&&type!="discrete")
stop("discrete data required")
if(censor)stop("censoring not allowed for power variance function Poisson")
if(any(y<0))stop("All response values must be >= 0")}
else if(distribution!="logistic"&&distribution!="Student t"&&
distribution!="power exponential"&&distribution!="skew Laplace"){
if(type!="unknown"&&type!="duration"&&type!="continuous")
stop("duration data required")
if((censor&&any(y[,1]<=0))||(!censor&&any(y<=0)))
stop("All response values must be > 0")}
else if(type!="unknown"&&type!="continuous"&&type!="duration")
stop("continuous data required")
if(min(wt)<0)stop("All weights must be non-negative")
if(length(wt)==1)wt <- rep(wt,n)
if(length(delta)==1)delta <- rep(delta,n)
if(is.null(nest))stop("A nest vector must be supplied")
else if(length(nest)!=n)stop("nest must be the same length as the other variables")
if(is.factor(nest))nest <- as.numeric(nest)
nind <- length(unique(nest))
od <- length(nest)==nind
i <- rep(1:n,points)
ii <- rep(1:nind,points)
k <- NULL
for(j in 1:points)k <- c(k,nest+(j-1)*max(nest))
k <- as.integer(k)
quad <- gauss.hermite(points)
sd <- quad[rep(1:points,rep(n,points)),1]
qw <- quad[rep(1:points,rep(nind,points)),2]
if(is.null(scale)){
if(distribution=="normal"||distribution=="logistic"||
distribution=="Student t"||distribution=="power exponential"||
distribution=="skew Laplace")scale <- "identity"
else scale <- "log"}
mu4 <- if(scale=="identity") function(p) mu1(p)[i]+p[npl]*sd
else if(scale=="log") function(p) exp(log(mu1(p))[i]+p[npl]*sd)
else if(scale=="reciprocal") function(p) 1/(1/mu1(p)[i]+p[npl]*sd)
else if(scale=="exp") function(p) log(exp(mu1(p))[i]+p[npl]*sd)
if(any(is.na(mu1(pmu))))stop("The location regression returns NAs: probably invalid initial values")
if(any(is.na(sh1(p))))stop("The shape regression returns NAs: probably invalid initial values")
if(any(is.na(fa1(p))))stop("The family regression returns NAs: probably invalid initial values")
if (!censor){
ret <- switch(distribution,
normal={
if(exact){
fcn <- function(p) {
m <- mu4(p)
s <- exp(0.5*sh1(p))
f <- fa1(p)
y <- y^f/f
jy <- y^(2*f-1)*delta/(2*f)
norm <- sign(f)*pnorm(0,m,s)
-wt*(log((pnorm(y+jy,m,s)-pnorm(y-jy,m,s)))
-log(1-(f<0)-norm))}
const <- 0}
else {
fcn <- function(p) {
m <- mu4(p)
s <- exp(0.5*sh1(p))
f <- fa1(p)
norm <- sign(f)*pnorm(0,m,s)
-wt*((f-1)*log(y)+log(dnorm(y^f/f,m,s))
-log(1-(f<0)-norm))}
const <- -wt*log(delta)}},
"power exponential"={
if(exact){
fcn <- function(p) {
m <- mu4(p)
s <- exp(0.5*sh1(p))
f <- exp(fa1(p))
-wt*log(ppowexp(y+delta/2,m,s)
-ppowexp(y-delta/2,m,s,f))}
const <- 0}
else {
fcn <- function(p) {
t <- 0.5*sh1(p)
f <- exp(fa1(p))
b <- 1+1/(2*f)
wt*(t+(abs(y-mu4(p))/exp(t))^(2*f)/2+
lgamma(b)+b*log(2))}
const <- -wt*log(delta)}},
"inverse Gauss"={
if(exact){
fcn <- function(p) {
m <- mu4(p)
s <- exp(sh1(p))
f <- fa1(p)
-wt*log(pginvgauss(y+delta/2,m,s,f)
-pginvgauss(y-delta/2,m,s,f))}
const <- 0}
else {
fcn <- function(p) {
m <- mu4(p)
s <- exp(sh1(p))
f <- fa1(p)
-wt*(-f*log(m)+(f-1)*log(y)-
log(2*besselK(1/(s*m),abs(f)))-
(1/y+y/m^2)/(2*s))}
const <- -wt*log(delta)}},
logistic={
if(exact){
fcn <- function(p) {
m <- mu4(p)
s <- exp(sh1(p))
f <- exp(fa1(p))
-wt*log(pglogis(y+delta/2,m,s,f)
-pglogis(y-delta/2,m,s,f))}
const <- 0}
else {
fcn <- function(p) {
t <- sh1(p)
m <- (y-mu4(p))/exp(t)*sqrt(3)/pi
wt*(-fa1(p)+m+t+(exp(fa1(p))+1)*
log(1+exp(-m)))}
const <- -wt*(log(delta*sqrt(3)/pi))}},
Hjorth={
if(exact){
fcn <- function(p) {
m <- mu4(p)
s <- exp(sh1(p))
f <- fa1(p)
-wt*log(phjorth(y+delta/2,m,s,f)-
phjorth(y-delta/2,m,s,f))}
const <- 0}
else {
fcn <- function(p) {
m <- mu4(p)
s <- exp(sh1(p))
f <- fa1(p)
-wt*(-f*log(1+s*y)/s-(y/m)^2/2+
log(y/m^2+f/(1+s*y)))}
const <- -wt*log(delta)}},
gamma={
if(exact){
fcn <- function(p) {
m <- mu4(p)
s <- exp(sh1(p))
f <- exp(fa1(p))
u <- (m/s)^f
-wt*log(pgamma((y+delta/2)^f,s,scale=u)
-pgamma((y-delta/2)^f,s,scale=u))}
const <- 0}
else {
fcn <- function(p) {
m <- mu4(p)
t <- sh1(p)
s <- exp(t)
u <- fa1(p)
f <- exp(u)
v <- s*f
-wt*(v*(t-log(m))-(s*y/m)^f+u+(v-1)*log(y)
-lgamma(s))}
const <- -wt*log(delta)}},
Burr={
if(exact){
fcn <- function(p) {
m <- mu4(p)
s <- exp(sh1(p))
f <- exp(fa1(p))
-wt*log(pburr(y+delta/2,m,s,f)-
pburr(y-delta/2,m,s,f))}
const <- 0}
else {
fcn <- function(p) {
m <- mu4(p)
s <- exp(sh1(p))
f <- exp(fa1(p))
y1 <- y/m
-wt*(log(f*s/m)+(s-1)*log(y1)
-(f+1)*log(1+y1^s))}
const <- -wt*log(delta)}},
Weibull={
if(exact){
fcn <- function(p) {
m <- mu4(p)
s <- exp(sh1(p))
f <- exp(fa1(p))
-wt*log(pgweibull(y+delta/2,s,m,f)
-pgweibull(y-delta/2,s,m,f))}
const <- 0}
else {
fcn <- function(p) {
m <- mu4(p)
t <- sh1(p)
s <- exp(t)
u <- fa1(p)
f <- exp(u)
y1 <- (y/m)^s
-wt*(t+u+(s-1)*log(y)-s*log(m)+
(f-1)*log(1-exp(-y1))-y1)}
const <- -wt*log(delta)}},
"Student t"={
if(exact){
fcn <- function(p) {
m <- mu4(p)
s <- exp(0.5*sh1(p))
f <- exp(fa1(p))
-wt*log(pt((y+delta/2-m)/s,f)-
pt((y-delta/2-m)/s,f))}
const <- 0}
else {
fcn <- function(p) {
s <- exp(0.5*sh1(p))
-wt*log(dt((y-mu4(p))/s,exp(fa1(p)))/s)}
const <- -wt*(log(delta))}},
"extreme value"={
if(exact){
fcn <- function(p) {
m <- mu4(p)
s <- exp(sh1(p))
f <- fa1(p)
y1 <- y^f/f
ey <- exp(y1)
jey <- y^(f-1)*ey*delta/2
norm <- sign(f)*exp(-m^-s)
-wt*(log((pweibull(ey+jey,s,m)
-pweibull(ey-jey,s,m))/
(1-(f>0)+norm)))}
const <- 0}
else {
fcn <- function(p) {
m <- mu4(p)
t <- sh1(p)
s <- exp(t)
f <- fa1(p)
y1 <- y^f/f
norm <- sign(f)*exp(-m^-s)
-wt*(t+s*(y1-log(m))-(exp(y1)/m)^s
+(f-1)*log(y)-log(1-(f>0)+norm))}
const <- -wt*log(delta)}},
"power variance function Poisson"={
fcn <- function(p) {
m <- mu4(p)
-wt*log(dpvfpois(y,m,exp(sh1(p))/m,
fa1(p)))}
const <- 0},
"skew Laplace"={
if(exact){
fcn <- function(p) {
m <- mu4(p)
s <- exp(sh1(p))
f <- exp(fa1(p))
-wt*log(pskewlaplace(y+delta/2,m,s,f)
-pskewlaplace(y-delta/2,m,s,f))}
const <- 0}
else {
fcn <- function(p) {
m <- mu4(p)
s <- exp(sh1(p))
t <- sh1(p)
u <- fa1(p)
f <- exp(u)
-wt*(u+ifelse(y>m,-f*(y-m),(y-m)/f)/
s-log(1+f^2)-t)}
const <- -wt*log(delta)}})}
else {
ret <- switch(distribution,
normal={
if(exact){
fcn <- function(p) {
m <- mu4(p)
s <- exp(0.5*sh1(p))
f <- fa1(p)
yy <- y[,1]^f/f
jy <- y[,1]^(2*f-1)*delta/(2*f)
norm <- sign(f)*pnorm(0,m,s)
-wt*(cc*log((pnorm(yy+jy,m,s)-
pnorm(yy-jy,m,s)))+log(lc-rc*(pnorm(yy,
m,s)-(f>0)*norm)))/(1-(f<0)-norm)}
const <- 0}
else {
fcn <- function(p) {
m <- mu4(p)
t <- sh1(p)
s <- exp(0.5*t)
f <- fa1(p)
norm <- sign(f)*pnorm(0,m,s)
-wt*(cc*(-(t+((y[,1]^f/f-m)/s)^2)/2+(f-1)*
log(y[,1]))+log(lc-rc
*(pnorm(y[,1]^f/f,m,s)
-(f>0)*norm)))/(1-(f<0)-norm)}
const <- wt*cc*(log(2*pi)/2-log(delta))}},
"power exponential"={
if(exact){
fcn <- function(p) {
m <- mu4(p)
s <- exp(0.5*sh1(p))
f <- fa1(p)
-wt*(cc*log(ppowexp(y[,1]+delta/2,m,s,f)-
ppowexp(y[,1]-delta/2,m,s,f))
+log(lc-rc*ppowexp(y[,1],m,s,f)))}
const <- 0}
else {
fcn <- function(p) {
m <- mu4(p)
t <- 0.5*sh1(p)
s <- exp(t)
f <- exp(fa1(p))
b <- 1+1/(2*f)
-wt*(cc*(-t-(abs(y[,1]-mu4(p))/s)^(2*f)/2-
lgamma(b)-b*log(2))+log(lc-rc
*ppowexp(y[,1],m,s,f)))}
const <- -wt*cc*(log(delta))}},
"inverse Gauss"={
if(exact){
fcn <- function(p) {
m <- mu4(p)
s <- exp(sh1(p)/2)
f <- fa1(p)
-wt*(cc*log(pginvgauss(y[,1]+delta/2,m,s,f)
-pginvgauss((y[,1]-delta/2)^f/f,m,s,f))+
log(lc-rc*pginvgauss(y[,1]^f/f,m,s,f)))}
const <- 0}
else {
fcn <- function(p) {
m <- mu4(p)
s <- exp(sh1(p))
f <- fa1(p)
-wt*(cc*(-f*log(m)+(f-1)*log(y[,1])-
log(2*besselK(1/(s*m),abs(f)))-
(1/y[,1]+y[,1]/m^2)/(2*s))+log(lc-rc
*pginvgauss(y[,1],m,s,f)))}
const <- -wt*cc*(log(delta))}},
logistic={
if(exact){
fcn <- function(p) {
m <- mu4(p)
s <- exp(sh1(p))*sqrt(3)/pi
f <- exp(fa1(p))
-wt*(cc*log(pglogis(y[,1]+delta/2,m,s,f)-
pglogis(y[,1]-delta/2,m,s,f))
+log(lc-rc*pglogis(y[,1],m,s,f)))}
const <- 0}
else {
fcn <- function(p) {
m <- mu4(p)
s <- exp(sh1(p))*sqrt(3)/pi
y1 <- (y[,1]-m)/s
u <- fa1(p)
f <- exp(u)
-wt*(cc*(u-y1-log(s)-(f+1)*log(1+exp(-y1)))
+log(lc-rc*pglogis(y[,1],m,s,f)))}
const <- -wt*cc*log(delta)}},
Hjorth={
if(exact){
fcn <- function(p) {
m <- mu4(p)
s <- exp(sh1(p))
f <- fa1(p)
-wt*(cc*log(phjorth(y[,1]+delta/2,m,s,f)-
phjorth(y[,1]-delta/2,m,s,f))
+log(lc-rc*phjorth(y[,1],m,s,f)))}
const <- 0}
else {
fcn <- function(p) {
m <- mu4(p)
s <- exp(sh1(p))
f <- fa1(p)
-wt*(cc*(-f*log(1+s*y[,1])/s-(y[,1]/m)^2/2+
log(y[,1]/m^2+f/(1+s*y[,1])))+
log(lc-rc*phjorth(y[,1],m,s,f)))}
const <- -wt*cc*log(delta)}},
gamma={
if(exact){
fcn <- function(p) {
m <- mu4(p)
s <- exp(sh1(p))
f <- exp(fa1(p))
u <- (m/s)^f
-wt*(cc*log(pgamma((y[,1]+delta/2)^f,s,
scale=u)-pgamma((y[,1]-delta/2)^f,s,
scale=u))+log(lc-rc*pgamma(y[,1]^f,s,
scale=u)))}
const <- 0}
else {
fcn <- function(p) {
m <- mu4(p)
t <- sh1(p)
s <- exp(t)
u <- fa1(p)
f <- exp(u)
v <- s*f
-wt*(cc*(v*(t-log(m))-(s*y[,1]/m)^f+u+(v-1)
*log(y[,1])-lgamma(s))+log(lc-rc
*pgamma(y[,1]^f,s,scale=(m/s)^f)))}
const <- -wt*cc*log(delta)}},
Burr={
if(exact){
fcn <- function(p) {
m <- mu4(p)
s <- exp(sh1(p))
f <- exp(fa1(p))
-wt*(cc*log(pburr(y[,1]+delta/2,m,s,f)-
pburr(y[,1]-delta/2,m,s,f))
+log(lc-rc*pburr(y[,1],m,s,f)))}
const <- 0}
else {
fcn <- function(p) {
m <- mu4(p)
s <- exp(sh1(p))
f <- exp(fa1(p))
y1 <- y[,1]/m
-wt*(cc*(log(f*s/m)+(s-1)*log(y1)
-(f+1)*log(1+y1^s))+
log(lc-rc*pburr(y[,1],m,s,f)))}
const <- -wt*cc*log(delta)}},
Weibull={
if(exact){
fcn <- function(p) {
m <- mu4(p)
s <- exp(sh1(p))
f <- exp(fa1(p))
-wt*(cc*log(pgweibull(y[,1]+delta/2,s,m,f)-
pgweibull(y[,1]-delta/2,s,m,f))
+log(lc-rc*pgweibull(y[,1],s,m,f)))}
const <- 0}
else {
fcn <- function(p) {
m <- mu4(p)
t <- sh1(p)
s <- exp(t)
u <- fa1(p)
f <- exp(u)
y1 <- (y[,1]/m)^s
-wt*(cc*(t+u+(s-1)*log(y[,1])-s*log(m)+
(f-1)*log(1-exp(-y1))-y1)+log(lc-rc*
pgweibull(y[,1],s,m,f)))}
const <- -wt*cc*log(delta)}},
"Student t"={
if(exact){
fcn <- function(p) {
m <- mu4(p)
s <- exp(0.5*sh1(p))
f <- exp(fa1(p))
-wt*(cc*log(pt((y[,1]+delta/2-m)/s,f)-
pt((y[,1]-delta/2-m)/s,f))
+log(lc-rc*pt((y[,1]-m)/s,f)))}
const <- 0}
else {
fcn <- function(p) {
m <- mu4(p)
s <- exp(0.5*sh1(p))
f <- exp(fa1(p))
-wt*(cc*log(dt((y[,1]-m)/s,f)/s)
+log(lc-rc*pt((y[,1]-m)/s,f)))}
const <- -wt*cc*(log(delta))}},
"extreme value"={
if(exact){
fcn <- function(p) {
m <- mu4(p)
s <- exp(sh1(p))
f <- fa1(p)
y1 <- y[,1]^f/f
ey <- exp(y1)
jey <- y[,1]^(f-1)*ey*delta/2
norm <- sign(f)*exp(-m^-s)
ind <- f>0
-wt*(cc*log(pweibull(ey+jey,s,m)-
pweibull(ey-jey,s,m))
+log(lc-rc*(pweibull(ey,s,m)-ind+
(f>0)*norm))-log(1-ind+norm))}
const <- 0}
else {
fcn <- function(p) {
m <- mu4(p)
t <- sh1(p)
s <- exp(t)
f <- fa1(p)
y1 <- y[,1]^f/f
ey <- exp(y1)
norm <- sign(f)*exp(-m^-s)
ind <- f>0
-wt*(cc*(t+s*(y1-log(m))-(ey/m)^s
+(f-1)*log(y[,1]))+log(lc-rc*
(pweibull(ey,s,m)-ind+(f>0)*norm))-
log(1-ind+norm))}
const <- -wt*cc*log(delta)}},
"skew Laplace"={
if(exact){
fcn <- function(p) {
m <- mu4(p)
s <- exp(sh1(p))
f <- exp(fa1(p))
-wt*(cc*log(pskewlaplace(y[,1]+delta/2,m,s,f)
-pskewlaplace(y[,1]-delta/2,m,s,f))
+log(lc-rc*pskewlaplace(y[,1],m,s,f)))}
const <- 0}
else {
fcn <- function(p) {
m <- mu4(p)
t <- sh1(p)
s <- exp(t)
u <- fa1(p)
f <- exp(u)
-wt*(cc*(u+ifelse(y>m,-f*(y-m),(y-m)/f)/
s-log(1+f^2)-t)+log(lc-rc
*pskewlaplace(y[,1],m,s,f)))}
const <- -wt*cc*log(delta)}})}
fn <- function(p) {
under <- 0
if(od)pr <- -fcn(p)
else {
pr <- NULL
for(i in split(fcn(p),k))pr <- c(pr,-sum(i))}
if(any(is.na(pr)))stop("NAs - unable to calculate probabilities.\n Try other initial values.")
if(max(pr)-min(pr)>1400){
if(print.level==2)cat("Log probabilities:\n",pr,"\n\n")
stop("Product of probabilities is too small to calculate.\n Try fewer points.")}
if(any(pr > 700))under <- 700-max(pr)
else if(any(pr < -700))under <- -700-min(pr)
tmp <- NULL
for(i in split(qw*exp(pr+under),ii))tmp <- c(tmp,sum(i))
-sum(log(tmp)-under)}
if(fscale==1)fscale <- fn(p)
if(is.na(fn(p)))
stop("Likelihood returns NAs: probably invalid initial values")
z0 <- nlm(fn,p=p,hessian=TRUE,print.level=print.level,typsize=typsize,
ndigit=ndigit,gradtol=gradtol,stepmax=stepmax,steptol=steptol,
iterlim=iterlim,fscale=fscale)
z0$minimum <- z0$minimum+sum(const)
fitted.values <- as.vector(mu4(z0$estimate))
residuals <- y-fitted.values
if(np==0)cov <- NULL
else if(np==1){
cov <- 1/z0$hessian
se <- as.vector(sqrt(cov))}
else {
a <- if(any(is.na(z0$hessian))||any(abs(z0$hessian)==Inf))0
else qr(z0$hessian)$rank
if(a==np)cov <- solve(z0$hessian)
else cov <- matrix(NA,ncol=np,nrow=np)
se <- sqrt(diag(cov))}
if(!is.null(mu2))mu1 <- mu2
if(!is.null(sh2))sh1 <- sh2
if(!is.null(fa2))fa1 <- fa2
z1 <- list(
call=call,
delta=delta,
distribution=distribution,
likefn=fcn,
respname=respname,
mu=mu1,
shape=sh1,
family=fa1,
linear=list(lin1,lin2,lin3),
linmodel=list(lin1model,lin2model,lin3model),
common=common,
scale=scale,
points=points,
prior.weights=wt,
censor=censor,
maxlike=z0$minimum,
fitted.values=fitted.values,
residuals=residuals,
aic=z0$minimum+np,
df=sum(wt)-np,
coefficients=z0$estimate,
npl=npl,
npm=0,
nps=nps,
npf=npf,
se=se,
cov=cov,
corr=cov/(se%o%se),
gradient=z0$gradient,
iterations=z0$iterations,
code=z0$code)
class(z1) <- "gnlm"
return(z1)} |
library(nsRFA)
data(FEH1000)
sitedata <- am[am[,1]==69023, ]
serieplot(sitedata[,4], sitedata[,3], ylim=c(0,200),
xlab="year", ylab="Max annual peak [m3/s]")
MSC <- MSClaio2008(sitedata[,4], crit="AIC")
MSC
summary(MSC)
MSC <- MSClaio2008(sitedata[,4], crit="AICc")
MSC
summary(MSC)
MSC <- MSClaio2008(sitedata[,4], crit="BIC")
MSC
summary(MSC)
MSC <- MSClaio2008(sitedata[,4], crit="ADC")
MSC
summary(MSC)
MSC <- MSClaio2008(sitedata[,4])
MSC
summary(MSC)
plot(MSC) |
quiet <- function(expr, all=TRUE) {
if (Sys.info()["sysname"] == "Windows") {
file <- "NUL"
} else {
file <- "/dev/null"
}
if (all) {
suppressWarnings(suppressMessages(
suppressPackageStartupMessages(capture.output(expr,
file=file))))
} else {
capture.output(expr, file=file)
}
} |
library(rAltmetric)
library(magrittr)
library(purrr)
ids <- list(c(
"10.1038/nature09210",
"10.1126/science.1187820",
"10.1016/j.tree.2011.01.009",
"10.1086/664183"
))
alm <- function(x) altmetrics(doi = x) %>% altmetric_data()
results <- pmap_df(ids, alm)
library(dplyr)
knitr::kable(results %>% select(title, doi, starts_with("cited"))) |
drybrush <- function(raster_dem, aggregation_factor = 10, max_colour_altitude = 30, opacity = 0.5, elevation_palette = c("
rasterBase <- raster::aggregate(raster_dem, fun = min, fact = 10)
rasterBase <- raster::resample(rasterBase, raster_dem)
drybrush_distance <- raster_dem - rasterBase
drybrush_distance[is.na(drybrush_distance)] <- 0
drybrush_distance[drybrush_distance < 0] <- 0
drybrush_distance_std <- drybrush_distance / max_colour_altitude
drybrush_distance_std[drybrush_distance_std > 1] <- 1
elevation_overlay <- elevation_shade(drybrush_distance_std, elevation_palette = elevation_palette)
elevation_overlay[,,4] <- opacity
elevation_overlay
} |
gregElasticNet <- function(
y, xsample, xpop, pi = NULL, alpha=1, model="linear", pi2 = NULL, var_est =FALSE, var_method="LinHB", datatype = "raw", N = NULL, lambda = "lambda.min", B = 1000, cvfolds = 10){
if(!(typeof(y) %in% c("numeric", "integer", "double"))){
stop("Must supply numeric y. For binary variable, convert to 0/1's.")
}
if(!is.element(var_method, c("LinHB", "LinHH", "LinHTSRS", "LinHT", "bootstrapSRS"))){
message("Variance method input incorrect. It has to be \"LinHB\", \"LinHH\", \"LinHT\", \"LinHTSRS\", or \"bootstrapSRS\".")
return(NULL)
}
if(!is.element(model, c("linear","logistic"))){
message("Method input incorrect, has to be either \"linear\" or \"logistic\"")
return(NULL)
}
if(is.null(N)){
if(datatype=="raw"){
N <- dim(as.matrix(xpop))[1]
}else{
N <- sum(pi^(-1))
message("Assuming N can be approximated by the sum of the inverse inclusion probabilities.")
}
}
xsample.d <- model.matrix(~., data = data.frame(xsample))
xsample <- data.frame(xsample.d[,-1])
xsample.dt <- t(xsample.d)
y <- as.vector(y)
n <- length(y)
if(is.null(pi)){
message("Assuming simple random sampling")
}
if (is.null(pi)) {
pi <- rep(length(y)/N, length(y))
}
weight <- as.vector(pi^(-1))
if(model=="linear"){
fam <- "gaussian"
} else{
fam <- "binomial"
}
cv <- cv.glmnet(x = as.matrix(xsample), y = y, alpha = alpha, weights = weight, nfolds = cvfolds,family=fam, standardize=FALSE)
if(lambda=="lambda.min"){
lambda_opt <- cv$lambda.min
}
if(lambda=="lambda.1se"){
lambda_opt <- cv$lambda.1se
}
pred.mod <- glmnet(x = as.matrix(xsample), y = y, alpha = alpha, family=fam, standardize = FALSE, weights=weight)
elasticNet_coef <- predict(pred.mod,type = "coefficients",s = lambda_opt)[1:dim(xsample.d)[2],]
y.hats.s <- predict(cv,newx = as.matrix(xsample), s = lambda_opt, type="response")
if (model == "logistic") {
if (datatype != "raw"){
message("For the Logistic Elastic Net Estimator, user must supply all x values for population. Populations totals or means for x are not enough.")
return(NULL)
}
xpop <- data.frame(model.matrix(~., data = xpop))[,-1]
xpop <- dplyr::select_(xpop, .dots=names(xsample))
xpop_d <- model.matrix(~., data = xpop)
y.hats.U <- predict(cv,newx = xpop_d[,-1], s = lambda_opt, type = "response")
t <- sum(y.hats.U) + t(y-y.hats.s)%*%pi^(-1)
if ( var_est == TRUE){
if (var_method != "bootstrapSRS") {
varEst <- varMase(y = (y - y.hats.s),pi = pi,pi2 = pi2,method = var_method, N = N)
}
if(var_method == "bootstrapSRS"){
dat <- cbind(y,pi, xsample.d)
t_boot <- boot(data = dat, statistic = logisticGregElasticNett, R = B, xpopd = xpop_d, alpha=alpha, lambda=lambda_opt, parallel = "multicore", ncpus = 2)
varEst <- var(t_boot$t)*n/(n-1)*(N-n)/(N-1)
}
}
}
if (model == "linear") {
if (datatype=="raw"){
xpop <- data.frame(model.matrix(~., data = xpop))[,-1]
xpop <- dplyr::select_(xpop, .dots=names(xsample))
xpop_d <- model.matrix(~., data = xpop)
xpop_d <- apply(xpop_d,2,sum)
}
if (datatype=="totals"){
xpop_d <- unlist(c(N,xpop[names(xsample)]))
}
if (datatype=="means"){
xpop_d <- unlist(c(N,xpop[names(xsample)]*N))
}
t <- elasticNet_coef %*% (xpop_d) + t(y-y.hats.s)%*%pi^(-1)
if ( var_est == TRUE ) {
if ( var_method != "bootstrapSRS") {
varEst <- varMase(y = (y-y.hats.s),pi = pi,pi2 = pi2,method = var_method, N = N)
}
if ( var_method == "bootstrapSRS"){
dat <- cbind(y,pi, xsample.d)
t_boot <- boot(data = dat, statistic = gregElasticNett, R = B, xpopd = xpop_d, alpha=alpha, lambda=lambda_opt, parallel = "multicore", ncpus = 2)
varEst <- var(t_boot$t)*n/(n-1)*(N-n)/(N-1)
}
}
}
if(var_est==TRUE){
return(list( pop_total = as.numeric(t),
pop_mean = as.numeric(t)/N,
pop_total_var=varEst,
pop_mean_var=varEst/N^2,
coefficients = elasticNet_coef))
}else{
return(list( pop_total = as.numeric(t),
pop_mean = as.numeric(t)/N,
coefficients = elasticNet_coef))
}
} |
library(openintro)
library(usethis)
openintro_colors <- IMSCOL[, 1]
openintro_palettes <- list(
main = openintro_cols("blue", "green", "pink", "yellow", "red"),
two = openintro_cols("blue", "green"),
three = openintro_cols("blue", "green", "pink"),
four = openintro_cols("blue", "green", "pink", "yellow"),
five = openintro_cols("blue", "green", "pink", "yellow", "red"),
six = openintro_cols("blue", "green", "pink", "yellow", "red", "gray"),
cool = openintro_cols("blue", "green"),
hot = openintro_cols("pink", "yellow", "red"),
gray = openintro_cols("lgray", "gray", "black")
)
use_data(openintro_colors, overwrite = TRUE)
use_data(openintro_palettes, overwrite = TRUE) |
'dse15b' |
ORRRR <- function(y, x, z = NULL, mu = TRUE, r = 1,
initial_size = 100, addon = 10,
method = c("SMM", "SAA"),
SAAmethod = c("optim", "MM"),
...,
initial_A = matrix(rnorm(P*r), ncol = r),
initial_B = matrix(rnorm(Q*r), ncol = r),
initial_D = matrix(rnorm(P*R), ncol = R),
initial_mu = matrix(rnorm(P)),
initial_Sigma = diag(P),
ProgressBar = requireNamespace("lazybar"),
return_data = TRUE){
if (ProgressBar && !requireNamespace("lazybar", quietly = TRUE)) {
stop("Package \"lazybar\" needed for progress bar to work. Please install it.",
call. = FALSE)
}
method <- method[[1]]
if(!method %in% c("SMM", "SAA")) stop("Unrecognised method")
if(method == "SAA") SAAmethod <- SAAmethod[[1]] else SAAmethod <- "NULL"
if(method == "SAA" && !SAAmethod %in% c("optim", "MM")) stop("Unrecognised SAAmethod")
if(SAAmethod == "MM"){
RRRR_argument <- list(...)
if(is.null(RRRR_argument$itr)) RRRR_argument$itr <- 10
if(is.null(RRRR_argument$earlystop)) RRRR_argument$earlystop <- 1e-4
}
if(return_data){
returned_data <- list(y=y, x=x, z=z)
} else {
returned_data <- NULL
}
N <- nrow(y)
P <- ncol(y)
Q <- ncol(x)
if(NCOL(initial_A) != r)
stop("Mismatched dimension. The column number of initial_A is not the same as r.")
if(NCOL(initial_B) != r)
stop("Mismatched dimension. The column number of initial_B is not the same as r.")
if(NROW(initial_A) != P)
stop("Mismatched dimension. The row number of initial_A is not the same as P.")
if(NROW(initial_mu) != P)
stop("Mismatched dimension. The row number (length) of initial_mu is not the same as P.")
if(NROW(initial_B) != Q)
stop("Mismatched dimension. The row number of initial_B is not the same as Q.")
if(!is.null(z)){
z <- as.matrix(z)
R <- ncol(z)
if(NCOL(initial_D) != R)
stop("Mismatched dimension. The column number of initial_D is not the same as the column number of variable z.")
if(NROW(initial_D) != P)
stop("Mismatched dimension. The row number of initial_D is not the same as P.")
if(mu){
if(SAAmethod != "MM"){
z <- cbind(z, 1)
initial_D <- cbind(initial_D, initial_mu)
}
}
znull <- FALSE
} else {
R <- 0
znull <- TRUE
if (mu){
if(SAAmethod != "MM")
z <- matrix(rep(1, N))
initial_D <- initial_mu
}
}
muorz <- mu || !znull
if(nrow(y) != nrow(x)){
if(!is.null(z) && nrow(y) != ncol(z))
stop("The numbers of realizations are not consistant in the inputs.")
}
yy <- y
xx <- x
if(SAAmethod != "MM"|| (SAAmethod=="MM" && !znull))
zz <- z
if(method=="SMM" || SAAmethod=="MM"){
A <- list()
B <- list()
Pi <- list()
D <- list()
MM_mu <- list()
Sigma <- list()
A[[1]] <- initial_A
B[[1]] <- initial_B
Pi[[1]] <- A[[1]] %*% t(B[[1]])
if(muorz)
D[[1]] <- initial_D
if(SAAmethod == "MM")
MM_mu[[1]] <- initial_mu
Sigma[[1]] <- initial_Sigma
ybar <- list()
xbar <- list()
zbar <- list()
Mbar <- list()
} else if(method=="SAA"){
if(SAAmethod == "optim"){
make_symm <- function(m) {
m[upper.tri(m)] <- t(m)[upper.tri(m)]
return(m)
}
para <- list()
if(muorz){
para[[1]] <- c(initial_A, initial_B, initial_D,
initial_Sigma[lower.tri(initial_Sigma, diag = TRUE)])
} else {
para[[1]] <- c(initial_A, initial_B,
initial_Sigma[lower.tri(initial_Sigma, diag = TRUE)])
}
}
}
xk <- list()
wk <- list()
itr <- (N-initial_size)/addon +1
obj <- numeric(itr+1)
runtime <- vector("list",itr+1)
if(ProgressBar)
pb <- lazybar::lazyProgressBar(itr, method = "drift")
runtime[[1]] <- Sys.time()
for (k in seq_len(itr)) {
if(k==1){
y <- t(yy[seq(1, k*initial_size), ])
x <- t(xx[seq(1, k*initial_size), ])
if(muorz && (SAAmethod != "MM" || (SAAmethod=="MM" && !znull)))
z <- t(zz[seq(1, k*initial_size), ])
} else {
if(floor(itr) < itr && k==floor(itr)){
y <- t(yy)
x <- t(xx)
if(muorz && (SAAmethod != "MM" || (SAAmethod=="MM" && !znull)))
z <- t(zz)
} else {
y <- t(yy[seq(1, initial_size+(k-1)*addon),])
x <- t(xx[seq(1, initial_size+(k-1)*addon),])
if(muorz && (SAAmethod != "MM" || (SAAmethod=="MM" && !znull)))
z <- t(zz[seq(1, initial_size+(k-1)*addon),])
}
}
N <- ncol(y)
if(method == "SMM"){
if(muorz){
temp <- t(y - Pi[[k]] %*% x - D[[k]] %*% z) %>% split(seq_len(nrow(.)))
} else {
temp <- t(y - Pi[[k]] %*% x) %>% split(seq_len(nrow(.)))
}
xk[[k]] <- sapply(temp, function(tem) t(tem) %*% solve(Sigma[[k]]) %*% tem)
wk[[k]] <- 1/(1 + xk[[k]])
dwk <- diag(sqrt(wk[[k]]))
ybar[[k]] <- y %*% dwk
xbar[[k]] <- x %*% dwk
if(muorz){
zbar[[k]] <- z %*% dwk
Mbar[[k]] <- diag(1, N) - t(zbar[[k]]) %*% solve(zbar[[k]] %*% t(zbar[[k]])) %*% zbar[[k]]
R_0 <- ybar[[k]] %*% Mbar[[k]]
R_1 <- xbar[[k]] %*% Mbar[[k]]
} else {
R_0 <- ybar[[k]]
R_1 <- xbar[[k]]
}
S_01 <- 1/N * R_0 %*% t(R_1)
S_10 <- 1/N * R_1 %*% t(R_0)
S_00 <- 1/N * R_0 %*% t(R_0)
S_11 <- 1/N * R_1 %*% t(R_1)
SSSSS <- solve(expm::sqrtm(S_11)) %*% S_10 %*% solve(S_00) %*% S_01 %*% solve(expm::sqrtm(S_11))
V <- eigen(SSSSS)$vectors[, seq_len(r)]
B[[k + 1]] <- solve(expm::sqrtm(S_11)) %*% V
A[[k + 1]] <- S_01 %*% B[[k + 1]]
Pi[[k + 1]] <- A[[k + 1]] %*% t(B[[k + 1]])
if(muorz){
D[[k + 1]] <- (ybar[[k]] - A[[k + 1]] %*% t(B[[k + 1]]) %*% xbar[[k]]) %*% t(zbar[[k]]) %*% solve(zbar[[k]] %*% t(zbar[[k]]))
Sigma[[k + 1]] <- (P + 1)/(N - 2) *
(ybar[[k]] - A[[k + 1]] %*% t(B[[k + 1]]) %*%
xbar[[k]] - D[[k + 1]] %*% zbar[[k]]) %*%
t(ybar[[k]] - A[[k + 1]] %*% t(B[[k + 1]]) %*%
xbar[[k]] - D[[k + 1]] %*% zbar[[k]])
} else {
Sigma[[k + 1]] <- (P + 1)/(N - 2) *
(ybar[[k]] - A[[k + 1]] %*% t(B[[k + 1]]) %*%
xbar[[k]]) %*%
t(ybar[[k]] - A[[k + 1]] %*% t(B[[k + 1]]) %*%
xbar[[k]])
}
obj[[k]] <- 1/2 * log(det(Sigma[[k]])) +(1+P)/(2*(N)) * sum(log(1+xk[[k]]))
} else if(method=="SAA"){
if(SAAmethod == "optim"){
if(muorz){
ne_log_likihood_loss <- function(para){
A <- matrix(para[1:(P*r)], nrow = P)
B <- matrix(para[(P*r+1):(2*P*r)], nrow = P)
D <- matrix(para[(2*P*r+1):(P*r*2+length(initial_D))], nrow = P)
Sigma <- matrix(nrow = P, ncol = P)
Sigma[lower.tri(Sigma,diag=TRUE)] <- para[(P*r*2+length(initial_D)+1):(length(para))]
Sigma <- make_symm(Sigma)
if(!matrixcalc::is.positive.definite(Sigma))
return(Inf)
Pi <- A %*% t(B)
temp <- t(y - Pi %*% x - D %*% z) %>% split(seq_len(nrow(.)))
xk <- sapply(temp, function(tem) t(tem) %*% solve(Sigma) %*% tem)
return(1/2 * log(det(Sigma)) +(1+P)/(2*(N-2)) * sum(log(1+xk)))
}
} else {
ne_log_likihood_loss <- function(para){
A <- matrix(para[1:(P*r)], nrow = P)
B <- matrix(para[(P*r+1):(2*P*r)], nrow = P)
Sigma <- matrix(nrow = P, ncol = P)
Sigma[lower.tri(Sigma,diag=TRUE)] <- para[(2*P*r+1):(length(para))]
Sigma <- make_symm(Sigma)
if(!matrixcalc::is.positive.definite(Sigma))
return(Inf)
Pi <- A %*% t(B)
temp <- t(y - Pi %*% x ) %>% split(seq_len(nrow(.)))
xk <- sapply(temp, function(tem) t(tem) %*% solve(Sigma) %*% tem)
return(1/2 * log(det(Sigma)) +(1+P)/(2*(N)) * sum(log(1+xk)))
}
}
sub_res <- stats::optim(para[[k]], ne_log_likihood_loss, ...)
para[[k+1]] <- sub_res$par
obj[[k+1]] <- sub_res$value
} else if(SAAmethod == "MM"){
if(!znull){
sub_res <- RRRR(y=t(y), x=t(x), z = t(z), mu = mu, r=r,
initial_A = A[[k]],
initial_B = B[[k]],
initial_D = D[[k]],
initial_mu = MM_mu[[k]],
initial_Sigma = Sigma[[k]],
itr = RRRR_argument$itr,
earlystop = RRRR_argument$earlystop)
D[[k+1]] <- sub_res$D
} else {
sub_res <- RRRR(y=t(y), x=t(x), mu = mu, r=r,
initial_A = A[[k]],
initial_B = B[[k]],
initial_D = NULL,
initial_mu = MM_mu[[k]],
initial_Sigma = Sigma[[k]],
itr = RRRR_argument$itr,
earlystop = RRRR_argument$earlystop)
}
MM_mu[[k+1]] <- sub_res$mu
A[[k+1]] <- sub_res$A
B[[k+1]] <- sub_res$B
Sigma[[k+1]] <- sub_res$Sigma
obj[[k+1]] <- sub_res$obj
}
if(k==1){
if(SAAmethod != "MM"){
initial_Pi <- initial_A %*% t(initial_B)
if(muorz){
temp <- t(y - initial_Pi %*% x - initial_D %*% z) %>% split(seq_len(nrow(.)))
} else {
temp <- t(y - initial_Pi %*% x ) %>% split(seq_len(nrow(.)))
}
xk <- sapply(temp, function(tem) t(tem) %*% solve(initial_Sigma) %*% tem)
obj[[1]] <- 1/2 * log(det(initial_Sigma)) +(1+P)/(2*(N)) * sum(log(1+xk))
} else {
initial_Pi <- initial_A %*% t(initial_B)
if(mu && znull){
temp <- t(y - initial_Pi %*% x - initial_mu %*% matrix(rep(1, ncol(x)), nrow = 1)) %>% split(seq_len(nrow(.)))
} else if(!mu && !znull){
temp <- t(y - initial_Pi %*% x - initial_D %*% z) %>% split(seq_len(nrow(.)))
} else {
temp <- t(y - initial_Pi %*% x ) %>% split(seq_len(nrow(.)))
}
xk <- sapply(temp, function(tem) t(tem) %*% solve(initial_Sigma) %*% tem)
obj[[1]] <- 1/2 * log(det(initial_Sigma)) +(1+P)/(2*(N)) * sum(log(1+xk))
}
}
}
if(ProgressBar)
pb$tick()$print()
runtime[[k+1]] <- Sys.time()
}
if(method == "SMM"){
if(muorz){
temp <- t(y - Pi[[k+1]] %*% x - D[[k+1]] %*% z) %>% split(seq_len(nrow(.)))
} else {
temp <- t(y - Pi[[k+1]] %*% x) %>% split(seq_len(nrow(.)))
}
xkk <- sapply(temp, function(tem) t(tem) %*% solve(Sigma[[k+1]]) %*% tem)
obj[[k+1]] <- 1/2 * log(det(Sigma[[k+1]])) +(1+P)/(2*(N)) * sum(log(1+xkk))
} else if(method == "SAA"){
if(SAAmethod == "optim"){
A <- lapply(para, function(para) matrix(para[1:(P*r)], nrow = P))
B <- lapply(para, function(para) matrix(para[(P*r+1):(2*P*r)], nrow = P))
if(muorz){
D <- lapply(para, function(para) matrix(para[(2*P*r+1):(P*r*2+length(initial_D))], nrow = P))
Sigma <- lapply(para,
function(para){
Sigma <- matrix(nrow = P, ncol = P)
Sigma[lower.tri(Sigma,diag=TRUE)] <- para[(P*r*2+length(initial_D)+1):(length(para))]
Sigma <- make_symm(Sigma)
return(Sigma)
})
} else {
Sigma <- lapply(para,
function(para){
Sigma <- matrix(nrow = P, ncol = P)
Sigma[lower.tri(Sigma,diag=TRUE)] <- para[(2*P*r+1):(length(para))]
Sigma <- make_symm(Sigma)
return(Sigma)
})
}
}
}
if(SAAmethod != "MM"){
if(mu){
mu <- lapply(D[sapply(D, function(x) !is.null(x))], function(x) x[,ncol(x)])
if(!znull)
D <- lapply(D[sapply(D, function(x) !is.null(x))], function(x) x[,seq_len(ncol(x)-1)])
} else {
mu <- NULL
}
} else {
if(mu){
mu <- MM_mu
} else {
mu <- NULL
}
}
if(znull){
D <- NULL
}
history <- list(mu = mu, A = A, B = B, D = D, Sigma = Sigma, obj = obj, runtime = c(0,diff(do.call(base::c,runtime))))
output <- list(method = method,
SAAmethod = SAAmethod,
spec = list(N = N, P = P, R = R, r = r, initial_size = initial_size, addon = addon),
history = history,
mu = mu[[length(mu)]],
A = A[[length(A)]],
B = B[[length(B)]],
D = D[[length(D)]],
Sigma = Sigma[[length(Sigma)]],
obj = obj[[length(obj)]],
data = returned_data)
return(new_ORRRR(output))
} |
Best.Index <-
function (tree = tree, distribution = distribution, jtip = jtip,
replicates=replicates, success=c(success) ) {
rank <- Rank.Indices(Calculate.Index(tree = tree,distribution = distribution))
aciertos <- NULL
aciertos$I <- aciertos$Ie <- aciertos$Is <- aciertos$Ise <- aciertos$W <- aciertos$We <- aciertos$Ws <- aciertos$Wse <-0
for (i in 1:replicates){
jack <- Rank.Indices(Calculate.Index(tree = tree, distribution = distribution, jtip))
if(all(rank$I[success] == jack$I[success])){
ok = 1}else{
ok=0
}
aciertos$I <- aciertos$I+ok
if(all(rank$Ie[success] == jack$Ie[success])){
ok = 1}else{
ok=0
}
aciertos$Ie <- aciertos$Ie+ok
if(all(rank$Is[success] == jack$Is[success])){
ok = 1}else{
ok=0
}
aciertos$Is <- aciertos$Is+ok
if(all(rank$Ise[success] == jack$Ise[success])){
ok = 1}else{
ok=0
}
aciertos$Ise <- aciertos$Ise+ok
if(all(rank$W[success] == jack$W[success])){
ok = 1}else{
ok=0
}
aciertos$W <- aciertos$I+ok
if(all(rank$We[success] == jack$We[success])){
ok = 1}else{
ok=0
}
aciertos$We <- aciertos$We+ok
if(all(rank$Ws[success] == jack$Ws[success])){
ok = 1}else{
ok=0
}
aciertos$Ws <- aciertos$Ws+ok
if(all(rank$Wse[success] == jack$Wse[success])){
ok = 1}else{
ok=0
}
aciertos$Wse <- aciertos$Wse+ok
}
aciertos <- as.data.frame(aciertos)
aciertos <- aciertos/replicates*100
return(aciertos)
} |
rd <- lsa[which(lsa[,"domain"] == "reading"),]
rd15 <- rd[rd$year == 2015, ]
rd15_1 <- rd15[rd15$nest == 1, ]
suppressMessages(txt <- capture.output ( m_withoutCross <- repMean(datL = rd15, ID="idstud", type = "JK2", PSU = "jkzone", repInd = "jkrep",
imp="imp", nest="nest", groups = c("sex"), group.splits = 0:1,
cross.differences = FALSE, dependent = "score", na.rm=FALSE, doCheck=TRUE, linkErr = "leScore", crossDiffSE="old",
engine = "BIFIEsurvey")))
suppressMessages(txt2 <- capture.output ( m_oldCross <- repMean(datL = rd15, ID="idstud", type = "JK2", PSU = "jkzone", repInd = "jkrep",
imp="imp", nest="nest", groups = c("sex"), group.splits = 0:1,
cross.differences = TRUE, dependent = "score", na.rm=FALSE, doCheck=TRUE, linkErr = "leScore", crossDiffSE="old",
engine = "BIFIEsurvey")))
test_that("No cross differences", {
expect_equal(m_withoutCross[["SE_correction"]], NULL)
expect_false("SE_correction" %in% names(m_withoutCross))
})
test_that("Old cross differences", {
expect_equal(class(m_oldCross[["SE_correction"]]), c("old", "list"))
expect_equal(m_oldCross[["SE_correction"]][[1]], NULL)
})
rd15$sex_logic <- as.logical(as.numeric(rd15$sex) - 1)
test_that("error for two logical grouping variables", {
expect_error(capture.output(repMean(datL = rd15, ID="idstud", type = "JK2", PSU = "jkzone", repInd = "jkrep",
imp="imp", nest="nest", groups = c("sex_logic", "mig"), group.splits = 0:1,
cross.differences = FALSE, dependent = "score", na.rm=FALSE, doCheck=TRUE, linkErr = "leScore", crossDiffSE="old")),
"Factor levels of grouping variables are not disjunct.")
})
test_that("error for string with multiple categories to jk2.mean", {
rd15_2 <- rd15_1
rd15_2$country <- as.character(rd15_2$country)
expect_error(test <- repMean(datL = rd15_2, wgt = "wgt", imp = "imp", dependent = "country", ID = "idstud"),
"Dependent variable 'country' has to be of class 'integer' or 'numeric'.")
})
test_that("PISA runs through", {
expect_silent(suppressWarnings(suppressMessages(txt2 <- capture.output(m_oldCross <- repMean(datL = rd15, ID="idstud",
type = "JK2", PSU = "jkzone", repInd = "jkrep",
imp="imp", nest="nest", groups = c("sex"), group.splits = 0:1,
cross.differences = TRUE, dependent = "score", na.rm=FALSE,
doCheck=TRUE, linkErr = "leScore", crossDiffSE="rep")))))
}) |
checkIndex <- function(
findex=NULL,
mindex=NULL,
gindex=NULL,
excludefp=TRUE,
fast=FALSE,
warn=!quiet,
logfile=localtestdir(".", "misc/ExampleTests/warnings.txt"),
quiet=rdwdquiet()
)
{
alldupli <- function(x) duplicated(x) | duplicated(x, fromLast=TRUE)
out <- paste0("\ncheckIndex results at ", as.character(Sys.time()), " for\n", dwdbase)
itime <- file.mtime("data/fileIndex.rda")
if(!is.na(itime)) out <- paste0(out, "\nFile 'data/fileIndex.rda' was last modified ", itime)
out <- paste0(out, berryFunctions::traceCall(skip=1), "-------")
if(!is.null(findex)){
if(!quiet) message("Checking fileIndex...")
duplifile <- findex[!grepl("minute",findex$res),]
duplifile <- duplifile[alldupli(duplifile[,1:4]),]
duplifile <- duplifile[!is.na(duplifile$id),]
duplifile <- duplifile[duplifile$res!="subdaily" & duplifile$var!="standard_format",]
if(nrow(duplifile)>0)
{
rvp <- paste(duplifile$res,duplifile$var,duplifile$per, sep="/")
per_folder <- lapply(unique(rvp), function(p)
{i <- unique(duplifile$id[rvp==p])
paste0("- ", berryFunctions::round0(length(i), pre=2, flag=" "), " at ", p, "; ",
berryFunctions::truncMessage(i, ntrunc=10, prefix=""))
})
per_folder <- paste(unlist(per_folder), collapse="\n")
out <- c(out, "IDs with duplicate files:", per_folder)
}
duplifile <- findex[findex$ismeta &
grepl("txt$", findex$path) &
findex$res != "multi_annual",]
duplifile$rvp <- paste(duplifile$res, duplifile$var, duplifile$per, sep="/")
duplifile <- duplifile$path[alldupli(duplifile$rvp)]
if(length(duplifile)>0)
out <- c(out, "Duplicate 'Beschreibung' files:", paste("-",duplifile))
}
if(!is.null(mindex)){
if(!quiet) message("Checking metaIndex...")
newout <- function(out,ids,colcomp,column,textvar,unit="")
{
new <- sapply(ids, function(i)
{tt <- sort(table(mindex[colcomp==i,column]), decreasing=TRUE)
unname(paste0("- ", textvar,"=",i, ": ", paste0(tt,"x",names(tt),unit, collapse=", ")))
})
c(out, new)
}
id_uni <- unique(mindex$Stations_id)
eletol <- 2.1
id_ele <- pbapply::pbsapply(id_uni, function(i)
any(abs(diff(mindex[mindex$Stations_id==i,"Stationshoehe"]))>eletol))
if(any(id_ele))
{
out <- c(out,paste0("Elevation differences >",eletol,"m:"))
out <- newout(out, id_uni[id_ele], mindex$Stations_id, "Stationshoehe", "ID", "m")
}
if(!fast){
loctol <- 0.040
id_loc <- pbapply::pbsapply(id_uni, function(i)
maxlldist("geoBreite","geoLaenge", mindex[mindex$Stations_id==i,], each=FALSE)>loctol)
mindex$coord <- paste(mindex$geoBreite, mindex$geoLaenge, sep="_")
if(any(id_loc))
{
out <- c(out, paste0("Location differences >",loctol*1000,"m:"))
out <- newout(out, id_uni[id_loc], mindex$Stations_id, "coord", "ID")
}
}
id_name <- pbapply::pbsapply(id_uni, function(i)
length(unique(mindex[mindex$Stations_id==i,"Stationsname"]))>1)
if(any(id_name))
{
out <- c(out, "Different names per id:")
out <- newout(out, id_uni[id_name], mindex$Stations_id, "Stationsname", "ID")
}
name_uni <- unique(mindex$Stationsname)
name_id <- pbapply::pbsapply(name_uni, function(n)
length(unique(mindex[mindex$Stationsname==n,"Stations_id"]))>1)
if(excludefp) name_id[name_uni=="Suderburg"] <- FALSE
if(any(name_id))
{
out <- c(out, "More than one id per name:")
out <- newout(out, name_uni[name_id], mindex$Stationsname,"Stations_id", "Name")
}
}
if(!is.null(findex) & !is.null(mindex) & FALSE){
if(!quiet) message("Comparing fileIndex and metaIndex date ranges...")
findex$start <- as.Date(findex$start, "%Y%m%d")
findex$end <- as.Date(findex$end, "%Y%m%d")
mindex$von_datum <- as.Date(as.character(mindex$von_datum), "%Y%m%d")
mindex$bis_datum <- as.Date(as.character(mindex$bis_datum), "%Y%m%d")
m2 <- mindex[mindex$res=="annual" & mindex$var=="more_precip" & mindex$per=="historical" & mindex$hasfile,]
f2 <- findex[findex$res=="annual" & findex$var=="more_precip" & findex$per=="historical" & !is.na(findex$id),]
mf <- merge(m2[,c("Stations_id", "von_datum", "bis_datum")],
f2[,c("id", "start", "end")], by.x="Stations_id", by.y="id")
rm(m2, f2)
mf$diff_von <- round(as.integer(mf$start - mf$von_datum)/365,2)
mf$diff_bis <- round(as.integer(mf$end - mf$bis_datum)/365,2)
colnames(mf) <- gsub("_datum", "_meta", colnames(mf))
colnames(mf) <- gsub("start", "von_file", colnames(mf))
colnames(mf) <- gsub("end", "bis_file", colnames(mf))
mf[mf$diff_von > 5,]
mf[mf$diff_bis < -30,]
}
if(!is.null(gindex)){
if(!quiet) message("Checking geoIndex...")
columns <- !colnames(gindex) %in% c("display","col")
fpid <- c(14306,921, 13967,13918, 14317,3024, 2158,7434, 785,787, 15526, 5248,5249, 396,397)
gindex_id <- gindex
if(excludefp) gindex_id <- gindex[!gindex$id %in% fpid,]
coord <- paste(gindex_id$lon, gindex_id$lat, sep="_")
if(anyDuplicated(coord))
{
out <- c(out, "Coordinates used for more than one station:")
new <- sapply(coord[duplicated(coord)], function(c){
g <- gindex_id[coord==c, ]
t <- toString(paste0(g$nfiles+g$nonpublic, "x ID=", g$id, " (", g$name, ")"))
paste0("- ", c, ": ", t)
})
out <- c(out, new)
}
}
logfileprint <- if(!is.null(logfile)) paste0(" openFile('",
normalizePath(logfile,winslash="/", mustWork=FALSE),"')") else ""
if(length(out)>2 & warn) warning("There are issues in the indexes.", logfileprint)
out <- c(out, "\n")
out <- paste(out, collapse="\n")
if(!is.null(logfile)) cat(out, file=logfile, append=TRUE)
return(invisible(out))
} |
na.omit.data.frame.mvmeta <-
function (object, ...) {
n <- length(object)
omit <- FALSE
omit2 <- TRUE
vars <- seq_len(n)
if(!is.null(y <- model.response(object))) vars <- vars[-1]
for(j in vars) {
x <- object[[j]]
if (!is.atomic(x)) next
x <- is.na(x)
d <- dim(x)
if (is.null(d)||length(d)!=2L)
omit <- omit | x
else for(ii in 1L:d[2L]) omit <- omit|x[,ii]
}
if(!is.null(y)) {
y <- is.na(y)
d <- dim(y)
if (is.null(d)||length(d)!=2L)
omit2 <- omit2 & y
else for(ii in 1L:d[2L]) omit2 <- omit2&y[,ii]
}
omit <- omit|omit2
xx <- object[!omit,,drop=FALSE]
if (any(omit>0L)) {
temp <- seq(omit)[omit]
names(temp) <- attr(object,"row.names")[omit]
attr(temp,"class") <- "omit"
attr(xx,"na.action") <- temp
}
xx
} |
owid_map <- function(data = data.frame(), col = 4, palette = "Reds", mode = "plot", year = NULL) {
.year <- year
if (colnames(data)[3] == "date") {
colnames(data)[3] <- "year"
}
if (is.null(year)) {
data <- data %>%
filter(year == max(year))
} else {
if (!is.numeric(year)) {
stop("year must be numeric")
} else if (!year %in% unique(data$year)) {
stop(paste("There is no data for", year))
} else {
data <- data %>%
filter(year == .year)
}
}
if (is.numeric(col)) {
value <- colnames(data)[col]
colnames(data)[col] <- "value"
} else {
value <- col
colnames(data)[colnames(data) == value] <- "value"
}
title <- attributes(data)$data_info[[1]]$display$name
world <- world_map_data()
map_data <- world %>%
left_join(data, by = c("owid_name" = "entity"))
if (mode == "plot") {
map_data %>%
ggplot2::ggplot(ggplot2::aes(fill = value, id = .data$owid_name)) +
ggplot2::geom_sf(size = 0.05, colour = "black") +
ggplot2::scale_fill_distiller(palette = palette, direction = 1, na.value = "grey80") +
ggplot2::labs(title = title) +
theme_owid() +
ggplot2::theme(axis.line.x = ggplot2::element_blank(),
axis.ticks = ggplot2::element_blank(),
panel.grid = ggplot2::element_blank(),
panel.grid.major = ggplot2::element_blank(),
axis.text = ggplot2::element_blank(),
legend.position = "bottom",
legend.title = ggplot2::element_blank(),
legend.key.width = ggplot2::unit(2, units = "cm"),
legend.key.height = ggplot2::unit(0.3, units = "cm"),
plot.title = element_text(vjust = 1))
} else if (mode == "view") {
pal <- leaflet::colorNumeric(
palette = palette,
domain = map_data$value
)
pal_leg <- leaflet::colorNumeric(
palette = palette,
domain = map_data$value,
na.color = NA
)
labels <- sprintf(
"<strong>%s</strong><br/>%g",
map_data$owid_name, map_data$value
) %>% lapply(htmltools::HTML)
map_data %>%
leaflet::leaflet() %>%
leaflet:: addPolygons(
fillColor = ~pal(value),
weight = 0.2,
opacity = 1,
color = "black",
dashArray = "1",
fillOpacity = 0.7,
highlight = leaflet::highlightOptions(
weight = 2,
color = "
dashArray = "",
fillOpacity = 0.7,
bringToFront = TRUE
),
label = labels,
labelOptions = leaflet::labelOptions(
style = list("font-weight" = "normal", padding = "3px 8px"),
textsize = "15px",
direction = "auto"
)
) %>%
leaflet::addLegend(pal = pal_leg, values = ~value, opacity = 0.7, title = NULL,
position = "bottomleft", labFormat = leaflet::labelFormat()) %>%
leaflet::addControl(paste0("<b>", title, "<b/>"), position = "topright") %>%
leaflet::addTiles("", attribution = "<a href = 'https://ourworldindata.org/' title = 'Research and data to make progress against the world\u2019s largest problems'>Our World In Data | <a/><a href = 'https://www.naturalearthdata.com/' title = 'Made with Natural Earth. Free vector and raster map data'>Natural Earth Data<a/>")
}
}
world_map_data <- function() {
world <- readRDS(system.file("extdata", "world_map_sf.rds", package = "owidR"))
return(world)
} |
test_that('parse_phase1_outcomes "" correctly', {
x <- parse_phase1_outcomes('', as_list = FALSE)
expect_true(is.data.frame(x))
expect_equal(nrow(x), 0)
})
test_that('parse_phase1_outcomes parses "" correctly to list', {
x <- parse_phase1_outcomes('', as_list = TRUE)
expect_true(is.list(x))
expect_equal(x$num_patients, 0)
expect_equal(x$dose, integer(length = 0))
expect_equal(x$tox, integer(length = 0))
})
test_that('parse_phase1_outcomes parses "1NNN 3NTT" correctly', {
x <- parse_phase1_outcomes('1NNN 3NTT', as_list = FALSE)
expect_true(is.data.frame(x))
expect_equal(nrow(x), 6)
expect_equal(x$dose, c(1, 1, 1, 3, 3, 3))
expect_equal(x$tox, c(0, 0, 0, 0, 1, 1))
})
test_that('parse_phase1_outcomes parses "1NNN 3NTT" correctly to list', {
x <- parse_phase1_outcomes('1NNN 3NTT', as_list = TRUE)
expect_true(is.list(x))
expect_equal(x$num_patients, 6)
expect_equal(x$dose, c(1, 1, 1, 3, 3, 3))
expect_equal(x$tox, c(0, 0, 0, 0, 1, 1))
})
test_that('parse_phase1_outcomes parses "1N2T2N2N2N" correctly', {
x <- parse_phase1_outcomes('1N2T2N2N2N', as_list = FALSE)
expect_true(is.data.frame(x))
expect_equal(nrow(x), 5)
expect_equal(x$dose, c(1, 2, 2, 2, 2))
expect_equal(x$tox, c(0, 1, 0, 0, 0))
})
test_that('parse_phase1_outcomes parses "1N2T2N2N2N" correctly to list', {
x <- parse_phase1_outcomes('1N2T2N2N2N', as_list = TRUE)
expect_true(is.list(x))
expect_equal(x$num_patients, 5)
expect_equal(x$dose, c(1, 2, 2, 2, 2))
expect_equal(x$tox, c(0, 1, 0, 0, 0))
})
test_that('parse_phase1_outcomes parses "5T" correctly', {
x <- parse_phase1_outcomes('5T', as_list = FALSE)
expect_true(is.data.frame(x))
expect_equal(nrow(x), 1)
expect_equal(x$dose, c(5))
expect_equal(x$tox, c(1))
})
test_that('parse_phase1_outcomes parses "5T" correctly to list', {
x <- parse_phase1_outcomes('5T', as_list = TRUE)
expect_true(is.list(x))
expect_equal(x$num_patients, 1)
expect_equal(x$dose, c(5))
expect_equal(x$tox, c(1))
})
test_that('parse_phase1_outcomes parses "1NTT 2T 2NTNNTN 3N" correctly', {
x <- parse_phase1_outcomes('1NTT 2T 2NTNNTN 3N', as_list = FALSE)
expect_true(is.data.frame(x))
expect_equal(nrow(x), 11)
expect_equal(x$dose, c(1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 3))
expect_equal(x$tox, c(0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0))
})
test_that('parse_phase1_outcomes parses "1NTT 2T 2NTNNTN 3N" correctly to list', {
x <- parse_phase1_outcomes('1NTT 2T 2NTNNTN 3N', as_list = TRUE)
expect_true(is.list(x))
expect_equal(x$num_patients, 11)
expect_equal(x$dose, c(1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 3))
expect_equal(x$tox, c(0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0))
})
test_that('parse_phase1_outcomes parses "96NTT 40T 1NTNNTN 174N" correctly', {
x <- parse_phase1_outcomes('96NTT 40T 1NTNNTN 174N', as_list = FALSE)
expect_true(is.data.frame(x))
expect_equal(nrow(x), 11)
expect_equal(x$dose, c(96, 96, 96, 40, 1, 1, 1, 1, 1, 1, 174))
expect_equal(x$tox, c(0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0))
})
test_that('parse_phase1_outcomes parses "96NTT 40T 1NTNNTN 174N" correctly to list', {
x <- parse_phase1_outcomes('96NTT 40T 1NTNNTN 174N', as_list = TRUE)
expect_true(is.list(x))
expect_equal(x$num_patients, 11)
expect_equal(x$dose, c(96, 96, 96, 40, 1, 1, 1, 1, 1, 1, 174))
expect_equal(x$tox, c(0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0))
})
test_that('parse_phase1_outcomes parses " 1NTT 2T 2NTNNTN 2N" correctly', {
x <- parse_phase1_outcomes(' 1NTT 2T 2NTNNTN 2N', as_list = FALSE)
expect_true(is.data.frame(x))
expect_equal(nrow(x), 11)
expect_equal(x$dose, c(1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2))
expect_equal(x$tox, c(0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0))
})
test_that('parse_phase1_outcomes parses " 1NTT 2T 2NTNNTN 2N" correctly to list', {
x <- parse_phase1_outcomes(' 1NTT 2T 2NTNNTN 2N', as_list = TRUE)
expect_true(is.list(x))
expect_equal(x$num_patients, 11)
expect_equal(x$dose, c(1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2))
expect_equal(x$tox, c(0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0))
})
test_that('parse_phase1_outcomes parses "12NTT Nigel Farage" with error', {
expect_error(parse_phase1_outcomes('12NTT Nigel Farage', as_list = FALSE))
})
test_that('parse_phase1_outcomes parses "12NTT Nigel Farage" to list with error', {
expect_error(parse_phase1_outcomes('12NTT Nigel Farage', as_list = TRUE))
})
test_that('parse_phase1_outcomes parses " 1NTT 2.0T 2NTNNTN 2N" with error', {
expect_error(parse_phase1_outcomes(' 1NTT 2.0T 2NTNNTN 2N', as_list = FALSE))
})
test_that('parse_phase1_outcomes parses " 1NTT 2.0T 2NTNNTN 2N" to list with error', {
expect_error(parse_phase1_outcomes(' 1NTT 2.0T 2NTNNTN 2N', as_list = TRUE))
})
test_that('parse_phase1_outcomes parses ".1NTT 2T 2NTNNTN 2N" with error', {
expect_error(parse_phase1_outcomes('.1NTT 2T 2NTNNTN 2N', as_list = FALSE))
})
test_that('parse_phase1_outcomes parses ".1NTT 2T 2NTNNTN 2N" to list with error', {
expect_error(parse_phase1_outcomes('.1NTT 2T 2NTNNTN 2N', as_list = TRUE))
})
test_that('parse_phase1_outcomes parses "12NTT 2T 2NTNNTN -1N" with error', {
expect_error(parse_phase1_outcomes('12NTT 2T 2NTNNTN -1N', as_list = FALSE))
})
test_that('parse_phase1_outcomes parses "12ETT 2T 2NTNNTN -1N" to list with error', {
expect_error(parse_phase1_outcomes('12ETT 2T 2NTNNTN -1N', as_list = TRUE))
})
test_that('parse_phase1_outcomes parses "-12NTT 2T 2NTNNTN 1N" with error', {
expect_error(parse_phase1_outcomes('-12NTT 2T 2NTNNTN 1N', as_list = FALSE))
})
test_that('parse_phase1_outcomes parses "-12NTT 2T 2NTNNTN 1N" to list with error', {
expect_error(parse_phase1_outcomes('-12NTT 2T 2NTNNTN 1N', as_list = TRUE))
})
test_that('parse_phase1_outcomes parses "12NTT 2T -2NTNNTN 1N" with error', {
expect_error(parse_phase1_outcomes('12NTT 2T -2NTNNTN 1N', as_list = FALSE))
})
test_that('parse_phase1_outcomes parses "12NTT 2T -2NTNNTN 1N" to list with error', {
expect_error(parse_phase1_outcomes('12NTT 2T -2NTNNTN 1N', as_list = TRUE))
})
test_that('parse_phase1_outcomes parses "1T 0NN" with error', {
expect_error(parse_phase1_outcomes('1T 0NN', as_list = FALSE))
})
test_that('parse_phase1_outcomes parses "1T 0NN" to list with error', {
expect_error(parse_phase1_outcomes('1T 0NN', as_list = TRUE))
})
test_that('parse_phase1_outcomes parses "0NNTTNNTT" with error', {
expect_error(parse_phase1_outcomes('0NNTTNNTT', as_list = FALSE))
})
test_that('parse_phase1_outcomes parses "0NNTTNNTT" to list with error', {
expect_error(parse_phase1_outcomes('0NNTTNNTT', as_list = TRUE))
})
test_that('parse_phase1_outcomes parses " " with error', {
expect_error(parse_phase1_outcomes(' ', as_list = FALSE))
})
test_that('parse_phase1_outcomes parses " " to list with error', {
expect_error(parse_phase1_outcomes(' ', as_list = TRUE))
})
test_that('parse_phase1_outcomes parses "1NT TNT" with error', {
expect_error(parse_phase1_outcomes('1NT TNT', as_list = FALSE))
})
test_that('parse_phase1_outcomes parses "1NT TNT" to list with error', {
expect_error(parse_phase1_outcomes('1NT TNT', as_list = TRUE))
})
test_that('parse_phase1_outcomes parses "1NT T3NT" with error', {
expect_error(parse_phase1_outcomes('1NT T3NT', as_list = FALSE))
})
test_that('parse_phase1_outcomes parses "1NT T3NT" to list with error', {
expect_error(parse_phase1_outcomes('1NT T3NT', as_list = TRUE))
})
test_that('parse_phase1_outcomes parses "1NT 3TNT 4" with error', {
expect_error(parse_phase1_outcomes('1NT 3TNT 4', as_list = FALSE))
})
test_that('parse_phase1_outcomes parses "1NT 3TNT 4" to list with error', {
expect_error(parse_phase1_outcomes('1NT 3TNT 4', as_list = TRUE))
}) |
mix <- function(formula, data, weights, cWeights=FALSE, center_group=NULL,
center_grand=NULL, max_iteration=10, nQuad=13L, run=TRUE,
verbose=FALSE, acc0=120, keepAdapting=FALSE, start=NULL,
fast=FALSE, family=NULL) {
call <- match.call()
if(!inherits(formula, "formula")) stop(paste0("The argument ", sQuote("formula"), " must be a formula."))
if(!inherits(data, "data.frame")) stop(paste0("The argument ", sQuote("data"), " must be a data.frame."))
if(length(class(data)) > 1) {
data <- as.data.frame(data)
}
if(nQuad <= 0) stop(paste0("The argument ", sQuote("nQuad"), " must be a positive integer."))
if(!inherits(run, "logical")) stop(paste0("The argument ", sQuote("run"), " must be a logical."))
if(!inherits(verbose, "logical")) stop(paste0("The argument ", sQuote("verbose"), " must be a logical."))
if(!inherits(weights, "character")) stop(paste0("The argument ", sQuote("weights"), " must be a character vector of weight column names in ", sQuote("data"), "."))
if(any(!weights %in% colnames(data))) stop(paste0("The argument ", sQuote("weights"), " must specify valid columns in ", sQuote("data"), "."))
if(acc0 <= 0) stop(paste0("The argument ", sQuote("acc0"), " must be a positive integer."))
if(!missing(fast)) warning(paste0("The ", sQuote("fast"), " argument is deprecated."))
if(any(grepl("[|].*[.]",attributes(terms(formula))$term.labels))) stop("The formula is not valid for mix. The name of conditioning variables must not contain a dot. Try renaming variables after | in the fomrula so they do not contain a dot.")
if(any(is.na(data[ , c(all.vars(formula), weights)]))) {
cl <- call("model.frame",
formula=formula(paste0("~", paste0(unique(c(all.vars(formula), weights)),collapse=" + "))),
data=data)
dt <- eval(cl, parent.frame(1L))
warning(paste0("There were ", sum(nrow(data) - nrow(dt)), " rows with missing data. These have been removed."))
data <- dt
rm(dt)
}
if(length(weights) == 1) {
stop(paste0("The argument ", sQuote("weights"), " must be a list of column names with length equal to levels."))
}
data[apply(data[ , weights] <= 0, 1, any), weights] <- NA
if(any(is.na(data[ , weights]))) {
warning(paste0("There were ", sum(complete.cases(data)==FALSE), " rows with non-positive weights. These have been removed."))
data <- data[complete.cases(data), ]
}
if(!is.null(family)) {
if(inherits(family, "character")) {
family <- do.call(family, args=list())
}
if(!inherits(family, "family")) {
stop(paste0("The family argument must be of class ", dQuote("family"), "."))
}
family$lnl <- switch(family$family,
binomial = function(y, mu, w, sd) {
w * dbinom(x=y, size=rep(1,length(y)), prob=mu, log=TRUE)
},
poisson = function(y, mu, w, sd) {
w * dpois(x=y, lambda=mu, log=TRUE)
},
gaussian = function(y, mu, w, sd) {
w * dnorm(x=y, mean=mu, sd=sd, log=TRUE)
},
Gamma = function(y, mu, w, sd) {
stop("The gamma family is not implemented.")
},
inverse.gaussian = function(y, mu, w, sd) {
stop("The inverse Gaussian family is not implemented.")
},
function(y, mu, w, sd) {
stop(paste0("Unknown family."))
}
)
}
adapter <- "MAP"
weights0 <- weights
acc0 <- round(acc0)
nQuad <- round(nQuad)
lformula <- lFormula(formula=formula, data=data)
unparsedGroupNames <- names(lformula$reTrms$cnms)
groupParser <- function(groupi) {
all.vars(formula(paste0("~", groupi)))
}
groupNames <- rev(unique(unlist(lapply(unparsedGroupNames, groupParser))))
data <- data[do.call(order, lapply(rev(groupNames), function(colN) data[ , colN])), ]
if(!is.null(center_group)) {
if (any(grep(":|/", names(center_group)))) {
nested_groups <- names(center_group)[grep(":|/", names(center_group))]
for (var in nested_groups){
vars <- unlist(strsplit(var , ":|/"))
data[,var] <- paste0(data[ , vars[1]], ":", data[ , vars[2]])
}
}
if(!all(names(center_group) %in% names(data))){
stop("Not all centering group variables are found in the data set. ")
} else {
for(name in names(center_group)) {
lev <- min(which(groupNames %in% unlist(strsplit(name,":|/"))))
X <- sparse.model.matrix(center_group[[name]],data=data)
vars <- colnames(X)[-1]
X <- cbind(X, data[ , weights0[lev]])
colnames(X)[ncol(X)] <- weights0[lev]
data[ , vars] <- sapply(vars, function(var){
X[ , var] -
ave(X[ , var] * X[ , weights0[lev]], data[ , name])/
(nrow(X)/sum(X[ , weights0[lev]]))
})
rm(X)
}
}
}
if(!is.null(center_grand)){
X <- sparse.model.matrix(center_grand, data=data)
vars <- colnames(X)[-1]
data[ , vars] <- sapply(vars, function(var){X[ , var] - ave(X[ , var])})
rm(X)
}
row.names(data) <- NULL
if(is.null(family)) {
if(verbose) {
cat("Using lmer to get an approximate (unweighted) estimate and model structure.\n")
}
suppressWarnings(lme <- lmer(formula, data, REML=FALSE))
} else {
if(verbose) {
cat("Using glmer to get an approximate (unweighted) estimate and model structure.\n")
}
lme <- glmer(formula, data, family=family)
}
mf <- model.frame(lme)
responseCol <- attributes(attributes(mf)$terms)$response
y <- as.numeric(mf[ , responseCol])
if(!is.null(family) && family$family == "binomial") {
if(length(unique(y)) == 2) {
y <- ifelse(y == min(y), 0, 1)
}
if(any(!y %in% c(0,1))) {
stop("For a binomial model the outcomes must be 0 or 1.")
}
}
model_matrix <- getME(lme,"mmList")
z_groups <- names(model_matrix)
all_groups <- names(summary(lme)$ngrps)
groupNames <- all_groups
wgts0 <- data[ , weights]
if(cWeights) {
for(i in (ncol(wgts0)-1):1) {
wgts0[ , i] <- wgts0[ , i] * wgts0[ , i+1]
}
}
missingGroupVars <- all_groups[!all_groups %in% names(data)]
presentVars <- all_groups[all_groups %in% names(data)]
for(i in seq_along(presentVars)) {
if(inherits(data[, presentVars[i]], "factor")) {
data[, presentVars[i]] <- droplevels(data[, presentVars[i]])
}
}
all_groups_lowest_level <- all_groups
for(mgi in seq_along(missingGroupVars)) {
vars <- rownames(attr(terms.formula(as.formula(paste(". ~", paste(missingGroupVars[mgi], collapse="+"))) ), "factors"))[-1]
for(i in seq_along(vars)) {
if(inherits(data[, vars[i]], "factor")) {
data[, vars[i]] <- droplevels(data[, vars[i]])
}
}
data[ , missingGroupVars[mgi]] <- apply(data[ , vars], 1, function(x) {
paste(x, collapse=":")
})
vtab <- lapply(vars, function(x) {
tab <- table(data[ , x])
sum(tab>0)
})
all_groups_lowest_level[all_groups_lowest_level == all_groups[mgi]] <- vars[which.max(unlist(vtab))]
}
Z <- list(NULL)
ZFull <- list(NULL)
n_rows <- nrow(data)
for (i in 1:length(all_groups)){
z_to_merge <- grepl(paste0("[|]\\W", all_groups[i], "$"), z_groups)
Z_i <- matrix( unlist(model_matrix[z_to_merge], use.names=FALSE), nrow=n_rows)
ZFull <- c(ZFull, list(Z_i))
if(i > 1) {
Z_i <- Z_i[!duplicated(data[ , all_groups[i-1]]), , drop=FALSE]
}
Z <- c(Z, list(Z_i))
}
nz <- list(0)
for(i in 1:length(Z)) {
if(!is.null(Z[[i]])) {
nz[[i]] <- ncol(Z[[i]])
}
}
levels <- length(Z)
if(length(weights) != levels) {
stop(paste0("The argument ", sQuote("weights"), " must be a list of column names with length equal to levels."))
}
weights <- list()
for(i in 1:length(nz)) {
df <- data.frame(w=unname(wgts0[ , i]), stringsAsFactors=FALSE)
if(i < length(nz)) {
df$indexp1 <- data[ , all_groups[i]]
}
if(i > 1) {
df$index <- data[ , all_groups[i-1]]
rvar <- function(x) {
if(length(x) <=1) {
return(0)
} else {
return(var(x))
}
}
agg <- aggregate(w ~ index, data=df, FUN=rvar)
if(any(agg$w > sqrt(.Machine$double.eps))) {
stop(paste0("Some level-", i+1, " weights vary within group."))
}
df <- df[!duplicated(df$index), ]
}
weights[[i]] <- df
}
y_label <- as.character(formula[[2]])
k <- length(lmeb <- getME(lme, "fixef"))
parlme <- c(lmeb)
lmesummary <- summary(lme)
ngrp <- lmesummary$ngrps
if(length(unique(ngrp)) != length(ngrp)) {
stop("This does not appear to be a nested model. Some levels of this model have the same number of subject/groups as the level above them.")
}
ngrpW <- lapply(weights, function(wdf) {
return(list(mean=mean(wdf$w), sum=sum(wdf$w), min=min(wdf$w), max=max(wdf$w)))
})
lmeVarDF <- data.frame(lmesummary$varcor)
parlme <- c(parlme, lmeVarDF$vcov)
if(is.null(start)) {
est0 <- parlme
} else {
if(length(start) != length(parlme)) {
stop(paste0("Expecting argument ", sQuote("start"), " to have ", length(est0), " elements, found ", length (start), " elements."))
}
est0 <- start
names(est0) <- names(parlme)
}
ind <- 1
while(sum(grepl(paste0("\\.", ind, "$"), lmeVarDF$grp)) > 0) {
lmeVarDF$grp <- sub(paste0("\\.", ind, "$"), "", lmeVarDF$grp)
ind <- ind + 1
}
lmeVarDF$sdcor <- NULL
lmeVarDF$ngrp <- NA
lmeVarDF$grp <- gsub(".", ":", lmeVarDF$grp, fixed=TRUE)
for(vari in 1:nrow(lmeVarDF)) {
if(lmeVarDF$grp[vari] == "Residual") {
lmeVarDF$ngrp[vari] <- nrow(data)
} else {
lmeVarDF$ngrp[vari] <- ngrp[names(ngrp) == lmeVarDF$grp[vari]]
}
}
ngrp2 <- rev(sort(unique(lmeVarDF$ngrp)))
for(grpi in 1:length(ngrp2)) {
lmeVarDF$level[ngrp2[grpi] == lmeVarDF$ngrp] <- grpi + ifelse("Residual" %in% lmeVarDF$grp, 0, 1)
}
varCorrect <- is.na(lmeVarDF$var2) & lmeVarDF$vcov < 1
if(any(varCorrect)) {
lmeVarDF$vcov[varCorrect] <- pmax(log(lmeVarDF$vcov[varCorrect]) + 1, -3.59)
}
names(est0)[-(1:k)] <- lmeVarDF$grp
lmeVarDF$fullGroup <- paste0(lmeVarDF$grp, ifelse(!is.na(lmeVarDF$var1), paste0(".", lmeVarDF$var1), ""))
covarianceConstructor <- covMat2Cov(lmeVarDF)
C <- covarianceConstructor(est0[-(1:k)])
X <- getME(lme, "X")
if(is.null(family)){
Z <- getME(lme, "Z")
temp_Z <- getME(lme, "Ztlist")
z_levels <- unique(lmeVarDF[lmeVarDF$fullGroup%in%names(temp_Z),c("fullGroup","level")])
Zlist <- list()
for (i in 2:levels){
z_names <- z_levels[z_levels$level==i,"fullGroup"]
Zlist[[i-1]] <- Matrix::t(do.call(rbind, temp_Z[z_names]))
}
pointers <- getME(lme, "Gp")
grp_level <- lmeVarDF$level
names(grp_level) <- lmeVarDF$grp
ref_comps <- names(getME(lme, "cnms"))
Zlevels <- unique(grp_level[ref_comps])
group_id_list <- lapply(all_groups, FUN=function(x){
res <- data.frame(data[,x], as.numeric(as.factor(data[,x])))
colnames(res) <- c(x, "index")
res
})
group_id <- do.call(cbind, group_id_list)
cn <- c()
names(all_groups) <- make.names(all_groups)
for(i in 1:length(all_groups)) {
cn <- c(cn, all_groups[i], paste0(all_groups[i], "_index"))
}
colnames(group_id) <- cn
group_id <- group_id[ , c(all_groups, paste0(all_groups, "_index"))]
weights_list <- lapply(1:length(weights), FUN=function(wi) {
if(wi == 1) {
return(weights[[wi]]$w)
}
x <- weights[[wi]]
x <- x[order( as.numeric(as.factor(x$index)) ), ]
x$w
})
weights_list_cond <- weights_list
if(levels > 2 ){
cWeights <- cbind(group_id, wgts0)
for (level in 1:(levels-1)){
cWeights[ , weights0[level]] <- cWeights[ , weights0[level]] / cWeights[ , weights0[level + 1]]
}
weights_list_cond[[1]] <- cWeights[ , weights0[1]]
for (level in 2:levels){
weights_list_cond[[level]] <- cWeights[!duplicated(cWeights[,all_groups[level-1]]), weights0[level]]
}
}
theta <- getME(lme, "theta")
theta1 <- theta
for(i in 1:length(theta1)) {
theta1[i] <- 1
}
group_id <- group_id[ , c(paste0(all_groups, "_index"), all_groups)]
bsqG <- devG(y, X, Zlist=Zlist, Zlevels=Zlevels, weights=weights_list, weightsC = weights_list_cond,
groupID = group_id,
lmeVarDF = lmeVarDF,
v0=theta1)
if(verbose) {
message("Fitting weighted model.")
}
opt <- bobyqa(fn=bsqG, par=theta)
names(opt$par) <- names(theta)
bsq <- bsqG(opt$par, getBS=TRUE)
if(verbose) {
message("Estimating covariance.")
}
bhatq <- bsq(opt$par, robustSE=TRUE)
b2 <- function(f, optpar, b, sigma0, inds) {
function(x) {
sigma <- x[length(x)]
x <- x[-length(x)]
xp <- optpar
xp[inds] <- x
names(xp) <- names(optpar)
f(v=xp, sigma=sigma, beta=b)$lnl
}
}
varDF <- lmeVarDF[,c("grp", "var1", "var2", "vcov", "ngrp", "level")]
varVC <- list(Residual=bhatq$sigma^2)
varDF$vcov <- 0
varDF$SEvcov <- NA
j_mat_list <- list()
for(li in 2:levels) {
iDelta <- bhatq$iDelta[[li]]
iDeltai <- bhatq$sigma^2 * (iDelta %*% t(iDelta))
varDFi <- varDF[varDF$level %in% c(li,1),]
thetaNamesi <- ifelse(is.na(varDFi$var2), paste0(varDFi$grp,".", varDFi$var1), paste0(varDFi$grp, ".", varDFi$var2, ".", varDFi$var1))[-nrow(varDFi)]
inds <- names(opt$par) %in% thetaNamesi
ihes <- -1*getHessian(b2(f=bsq, optpar=opt$par, b=bhatq$b, sigma0=bhatq$sigma, inds=inds),
x=c(opt$par[inds], sigma=bhatq$sigma))
eihes <- eigen(ihes)
if(max(eihes$values)/min(eihes$values) >= 1/((.Machine$double.eps)^0.25)) {
warning("Numerical instability in estimating the standard error of variance terms. Consider the variance term standard errors approximate.")
ihes <- nearPD(ihes, posd.tol=400*sqrt(.Machine$double.eps))$mat
}
theta_cov_mat <- solve(ihes)
colnames(theta_cov_mat) <- rownames(theta_cov_mat) <- c(names(opt$par[inds]),"sigma")
J <- bhatq$Jacobian[rownames(theta_cov_mat), colnames(theta_cov_mat)]
preVCi <- theta_cov_mat %*% J %*% theta_cov_mat
preVCi <- preVCi[c(thetaNamesi, "sigma"), c(thetaNamesi, "sigma")]
cn <- colnames(iDeltai)
sigma2 <- bhatq$sigma^2
j_list <- list()
for(ii in 1:nrow(iDeltai)) {
for(jj in ii:ncol(iDeltai)) {
varDFi$grad <- 0
if(ii==jj) {
varDF[varDF$level==li & varDF$var1==cn[ii] & is.na(varDF$var2),"vcov"] <- iDeltai[ii,ii]
varDFi$grad[varDFi$var1 %in% rownames(iDelta)[ii] & is.na(varDFi$var2)] <- sigma2 * 2 * iDelta[ii,ii]
if(ii > 1){
for(iii in 1:(ii-1)) {
varDFi$grad[(varDFi$var1 %in% rownames(iDelta)[ii] | varDFi$var2 %in% rownames(iDelta)[ii]) & (varDFi$var1 %in% rownames(iDelta)[iii] | varDFi$var2 %in% rownames(iDelta)[iii])] <- sigma2 * 2 * iDelta[ii,iii]
}
}
varDFi$grad[nrow(varDFi)] <- 2 * iDeltai[ii,ii]/sqrt(sigma2)
varDF[varDF$level==li & varDF$var1==cn[ii] & is.na(varDF$var2),"SEvcov"] <- sqrt(t(varDFi$grad) %*% preVCi %*% varDFi$grad)
j_list <- c(j_list,list(varDFi$grad))
} else {
varDF[varDF$level %in% li & varDF$var1 %in% cn[ii] & varDF$var2 %in% cn[jj],"vcov"] <- iDeltai[ii,jj]
varDF[varDF$level %in% li & varDF$var1 %in% cn[jj] & varDF$var2 %in% cn[ii],"vcov"] <- iDeltai[ii,jj]
if(any(varDF$level==li & (( varDF$var1==cn[ii] & varDF$var2 %in% cn[jj]) | (varDF$var1==cn[jj] & varDF$var2 %in% cn[ii])))) {
for(iii in 1:min(ii, jj)) {
if(ii == iii) {
varDFi$grad[(varDFi$var1 %in% rownames(iDelta)[ii] | varDFi$var2 %in% rownames(iDelta)[ii]) & is.na(varDFi$var2)] <- sigma2 * iDelta[jj,iii]
} else {
varDFi$grad[(varDFi$var1 %in% rownames(iDelta)[ii] | varDFi$var2 %in% rownames(iDelta)[ii]) & (varDFi$var1 %in% rownames(iDelta)[iii] | varDFi$var2 %in% rownames(iDelta)[iii])] <- sigma2 * iDelta[jj,iii]
}
if(jj == iii) {
varDFi$grad[(varDFi$var1 %in% rownames(iDelta)[jj] | varDFi$var2 %in% rownames(iDelta)[jj]) & is.na(varDFi$var2)] <- sigma2 * iDelta[ii,iii]
} else {
varDFi$grad[(varDFi$var1 %in% rownames(iDelta)[jj] | varDFi$var2 %in% rownames(iDelta)[jj]) & (varDFi$var1 %in% rownames(iDelta)[iii] | varDFi$var2 %in% rownames(iDelta)[iii])] <- sigma2 * iDelta[ii,iii]
}
}
varDFi$grad[nrow(varDFi)] <- 2 * iDeltai[ii,jj]/sqrt(sigma2)
varDF[varDF$level==li & (( varDF$var1==cn[ii] & varDF$var2 %in% cn[jj]) | (varDF$var1==cn[jj] & varDF$var2 %in% cn[ii])),"SEvcov"] <- sqrt(t(varDFi$grad) %*% preVCi %*% varDFi$grad)
j_list <- c(j_list,list(varDFi$grad))
}
}
}
}
jacobian <- matrix(unlist(j_list),ncol=length(j_list[[1]]),byrow=T)
var_mat_var <- jacobian %*% preVCi %*% t(jacobian)
rownames(var_mat_var) <-colnames(var_mat_var) <- names(theta)[names(theta) %in% rownames(preVCi)]
j_list <- list()
j_mat_list[[li-1]] <- var_mat_var
if(li==2) {
varDF[varDF$level==1,"SEvcov"] <- sqrt((2*sqrt(sigma2))^2*preVCi[nrow(preVCi),ncol(preVCi)])
}
varVC <- c(varVC, list(iDeltai))
names(varVC)[li] <- (varDF$grp[varDF$level %in% li])[1]
}
var_of_var <- bdiag(j_mat_list)
rownames(var_of_var) <- colnames(var_of_var) <- unlist(sapply(j_mat_list,FUN=rownames))
varDF$vcov[varDF$grp=="Residual"] <- bhatq$sigma^2
varDF$fullGroup <- paste0(varDF$grp,ifelse(!is.na(varDF$var1),paste0(".",varDF$var1),""))
vars <- varDF$vcov[is.na(varDF$var2)]
names(vars) <- varDF$fullGroup[is.na(varDF$var2)]
nobs <- nrow(X)
names(nobs) <- "Number of obs"
ngroups <- c(nobs, ngrp)
var_between <- sum(varDF[which(!is.na(varDF$var1) & is.na(varDF$var2)),"vcov"])
var_within <- varDF$vcov[varDF$grp=="Residual"]
ICC <- var_between/(var_between+var_within)
env <- environment(bsq)
covMat <- env$lmeVarDF
cc <- function() {
}
assign("cConstructor", value=cc, envir=env)
res <-list(lnlf=bsq, lnl= bhatq$lnl, coef = bhatq$b, ranefs=bhatq$ranef,
SE = bhatq$seBetaRobust,
vars= vars,
theta=bhatq$theta, call=call,
levels=levels, CMODE=bhatq$ranef,
invHessian=bhatq$cov_mat, ICC=ICC,
is_adaptive=FALSE, sigma=bhatq$sigma, cov_mat=bhatq$varBetaRobust,
ngroups=ngroups, varDF=varDF, varVC=varVC,var_theta=var_of_var,
wgtStats=ngrpW)
class(res) <- "WeMixResults"
return(res)
}
if(verbose) {
cat("Identifying initial integration locations estimates for random effects.\n")
}
MAP0 <- MAP(groups=data[ , all_groups, drop=FALSE], y=y, X=X, levels=levels,
Z=Z, ZFull=ZFull, weights=weights, k=k,
qp=gauss.quad(nQuad, "hermite"),
covariance_constructor=covarianceConstructor, verbose=verbose,
nlmevar=nrow(lmeVarDF)-1, nz=nz, acc=acc0, family=family)
BLUE0 <- BLUE(groups=data[,all_groups,drop=FALSE], y=y, X=X, levels=levels,
Z=Z, ZFull=ZFull, weights=weights, k=k,
qp=gauss.quad(nQuad, "hermite"),
covariance_constructor=covarianceConstructor, verbose=verbose,
nlmevar=nrow(lmeVarDF)-1, nz=nz, acc=acc0, family=family)
bvec <- getME(lme, "b")
bvecCuts <- getME(lme, "Gp")
blist <- vector("list", levels)
startLoc <- 1
comps <- names(getME(lme,"cnms"))
n_rows_z <- list()
for (i in 1:length(comps)){
n_rows_z[i] <- lmeVarDF[lmeVarDF$grp == comps[i],"ngrp"][1]
}
blist <- vector("list", levels)
for(cuti in 2:length(bvecCuts)) {
bmat <- matrix(bvec[startLoc:bvecCuts[cuti]], nrow=n_rows_z[[cuti-1]])
li <- unique(lmeVarDF$level[lmeVarDF$ngrp==nrow(bmat)])
blist[[li]] <- cbind(blist[[li]], bmat)
startLoc <- bvecCuts[cuti] + 1
}
omega0 <- blist
a0 <- MAP0(omega0=omega0, par0=est0)
zScale <- lapply(a0$Qi0, function(Qi0i) {
if(is.null(Qi0i)) {
return(NULL)
}
df <- data.frame(detQ=sapply(Qi0i,det))
for(i in 1:length(groupNames)) {
if(length(unique(data[,groupNames[i]])) == nrow(df)) {
df[,groupNames[i]] <- unique(data[,groupNames[i]])
attr(df,"groups") <- c(attr(df, "groups"), groupNames[i])
}
}
df
})
index <- data.frame(data[,c(groupNames)])
names(index) <- groupNames
for(wi in 2:length(weights)) {
Zgrps <- attr(zScale[[wi]], "groups")
weights[[wi]] <- merge(weights[[wi]],zScale[[wi]][,c(Zgrps, "detQ")],by.x="index", by.y=Zgrps)
}
est <- est0
qp <- gauss.quad(nQuad,"hermite")
fn0 <- param.lnl.quad(y=y,
X=X,
levels=levels,
Z=Z,
ZFull=ZFull,
Qi=a0$Qi,
QiFull=a0$QiFull,
omega=a0$omega,
omegaFull=a0$omegaFull,
W=weights,
k=k,
qp=qp,
cConstructor=covarianceConstructor,
acc0=acc0,
mappedDefault=FALSE,
family=family)
fn0R <- param.lnl.quad(y=y,
X=X,
levels=levels,
Z=Z,
ZFull=ZFull,
Qi=a0$Qi,
QiFull=a0$QiFull,
omega=a0$omega,
omegaFull=a0$omegaFull,
W=weights,
k=k,
qp=qp,
cConstructor=covarianceConstructor,
acc0=acc0,
mappedDefault=TRUE,
family=family)
if(!run) {
return(list(lnlf=fn0R, parlme=parlme, omega0=a0$omega0, lme=lme, adapt=a0, weights=weights))
}
d1 <- rep(Inf, length(est))
oldlnl <- fn0(est, varFloor=-3.59)
a00 <- a0
if(verbose) {
cat("Starting Newton steps.\n")
}
covs_and_vars <- est[-(1:k)]
vars <- covs_and_vars[which(is.na(lmeVarDF$var2))]
not_0_vars <- which(vars>-3)+k
est[-(1:k)] <- ifelse(est[-(1:k)]< -4.6,-4.6,est[-(1:k)])
v <- d1
skipNextHessian <- FALSE
defStepsInds <- list(1:length(est0))
stepIndQueue <- list()
dd1 <- d1
dd2 <- outer(dd1,dd1)
iteration <- 0
varFloorBinding <- FALSE
oldest <- est
while(all(iteration < max_iteration,
any(varFloorBinding & max(est - oldest) > 1e-5 ,
!varFloorBinding & max(abs(dd1[c(1:k, not_0_vars)]/pmax(abs(est[c(1:k, not_0_vars)]), 1e-5))) > 1E-5)
)) {
iteration <- iteration + 1
oldest <- est
if(length(stepIndQueue)==0) {
stepIndQueue <- defStepsInds
}
thisStepInds <- stepIndQueue[[1]]
d1 <- getGrad(fn0, est, thisStepInds)
dd1[thisStepInds] <- d1
if(!skipNextHessian) {
d2 <- getHessian(fn0, est, thisStepInds)
dd2[thisStepInds, thisStepInds] <- d2
}
d2 <- dd2[thisStepInds, thisStepInds]
fact <- 1
v <- rep(0, length(est0))
v[thisStepInds] <- solve(d2) %*% d1
if(verbose) {
cat("step:", iteration, "/", max_iteration, "\n")
cat("lnl:", oldlnl, " max (relative) derivative=", max(abs(dd1[c(1:k,not_0_vars)]/pmax(abs(est[c(1:k,not_0_vars)]), 1e-5))), " ")
cat("\nCurrent solution, gradient, and Newton step:\n")
prnt <- cbind(oldEstimate=est, firstDeriv=dd1, proposedNewtonEstimate=est - v)
rownames(prnt) <- c(names(est0)[1:k], paste0("ln var ", names(est0)[-(1:k)], ""))
colnames(prnt) <- c("previous Est", "firstDeriv", "Newton Step")
print(prnt)
}
newest <- est - fact * v
newlnl <- fn0(newest, varFloor=-3.59)
stp <- 0
while(newlnl < oldlnl) {
stp <- stp + 1
if(verbose) {
cat("Halving step size.\n")
}
fact <- fact/2
if(stp > 5 & fact > 0) {
if(verbose) {
cat("Reversing step direction.\n")
}
fact <- -1
stp <- 0
}
if (stp>10) {
fact <- 0
oldlnl <- oldlnl - 1
}
newest <- est - fact * v
newlnl <- fn0(newest, varFloor=-3.59)
}
if(verbose) {
cat("\n")
}
est <- est - fact * v
oldlnl <- newlnl
if(any(est[-(1:k)] < -3.59)) {
est[-(1:k)] <- ifelse(est[-(1:k)] < -3.59, -3.59, est[-(1:k)])
varFloorBinding <- TRUE
}
if(keepAdapting) {
if(verbose) {
cat("Adapting random effect estimates.\n")
}
if(adapter == "BLUE") {
a0 <- BLUE0(omega0=a0$omega0, par0=est0, Qi0=a0$Qi0)
} else {
a0 <- MAP0(omega0=a0$omega0,
par0=est,
verb=FALSE)
}
zScale <- lapply(a0$Qi0, function(Qi0i) {
if(is.null(Qi0i)) {
return(NULL)
}
df <- data.frame(detQ=sapply(Qi0i,det))
for(i in 1:length(groupNames)) {
if(length(unique(data[,groupNames[i]])) == nrow(df)) {
df[,groupNames[i]] <- unique(data[,groupNames[i]])
attr(df,"groups") <- c(attr(df, "groups"), groupNames[i])
}
}
df
})
for(wi in 2:length(weights)) {
weights[[wi]]$detQ <- NULL
Zgrps <- attr(zScale[[wi]], "groups")
weights[[wi]] <- merge(weights[[wi]],zScale[[wi]][,c(Zgrps, "detQ")],by.x="index", by.y=Zgrps)
}
fn0 <- param.lnl.quad(y=y,
X=X,
levels=levels,
Z=Z,
ZFull=ZFull,
Qi=a0$Qi,
QiFull=a0$QiFull,
omega=a0$omega,
omegaFull=a0$omegaFull,
W=weights,
k=k,
qp=qp,
cConstructor=covarianceConstructor,
acc0=acc0,
mappedDefault=FALSE,
family=family)
fn0R <- param.lnl.quad(y=y,
X=X,
levels=levels,
Z=Z,
ZFull=ZFull,
Qi=a0$Qi,
QiFull=a0$QiFull,
omega=a0$omega,
omegaFull=a0$omegaFull,
W=weights,
k=k,
qp=qp,
cConstructor=covarianceConstructor,
acc0=acc0,
mappedDefault=TRUE,
family=family)
if(max(abs(a00$omega0[[2]] - a0$omega0[[2]])/pmax(abs(a0$omega0[[2]]),1E-10)) < 1E-2) {
if(verbose) {
cat("Done adapting; the mode is not changing sufficiently.\n")
}
keepAdapting <- FALSE
}
if(keepAdapting & max(abs(d1)) <= 1E-3) {
if(verbose) {
cat("Done adapting: close to a solution.\n")
}
keepAdapting <- FALSE
}
a00 <- a0
oldlnl <- fn0(est, varFloor=-3.59)
}
covs_and_vars <- est[-(1:k)]
vars <- covs_and_vars[which(is.na(lmeVarDF$var2))]
not_0_vars <- which(vars > -3) + k
if((!skipNextHessian & max(sum(abs(fact*v)/abs(est))) < (.Machine$double.eps)^0.25) & max(abs(dd2)) < Inf) {
skipNextHessian <- TRUE
} else {
skipNextHessian <- FALSE
}
}
if(verbose) {
message("Itterations complete.")
}
if (iteration >= max_iteration){
}
hessian <- dd2
MAP <- MAP0(omega0=a0$omega0, par0=est, verb=FALSE)$omega0
BLUE <- BLUE0(omega0=a0$omega0, par0=est, Qi0=a0$Qi0, adapt=FALSE, verb=FALSE)
est <- as.numeric(est)
names(est) <- names(parlme)
covs_and_vars <- est[-(1:k)]
vars <- covs_and_vars[which(is.na(lmeVarDF$var2))]
need_fix_vars <- which(vars < 1)
covs_and_vars[need_fix_vars] <- exp(covs_and_vars[need_fix_vars] - 1)
vars <- covs_and_vars
names(vars) <- gsub(":NA", "", paste(lmeVarDF$grp, lmeVarDF$var1, lmeVarDF$var2, sep=":"))
if (length(need_fix_vars) > 0){
warning(paste0("Group variances too small to estimate accurately. The estimated variance in the group level terms(s) ", paste(dQuote(names(vars)[need_fix_vars]), collapse=", "), " is near zero.",
" Very low variance suggests that the data is not hierarchical and that a model without these levels should be considered.",
" If this removes all groups then a non-hierarchical model, such as logistic regression, should be considered."))
hessian <- getHessian(fn0R, c(est[1:k], covs_and_vars+0.0002*need_fix_vars))
}
var_between <- sum(vars[which(!is.na(lmeVarDF$var1) & is.na(lmeVarDF$var2))])
var_within <- vars[which(lmeVarDF$grp=="Residual")]
ICC <- var_between/(var_between+var_within)
nobs <- nrow(X)
names(nobs) <- "Number of obs"
ngroups <- c(nobs, ngrp)
varDF <- lmeVarDF[,c("grp", "var1", "var2", "vcov", "ngrp", "level")]
varDF$vcov <- 0
varDF$fullGroup <- paste0(varDF$grp,ifelse(!is.na(varDF$var1),paste0(".",varDF$var1),""))
varDF$vcov <- vars
res <- list(lnlf=fn0R, lnl=fn0(est, varFloor=-3.59), coef=est[1:k], vars=vars,
call=call, levels=levels, ICC=ICC, CMODE=BLUE,
invHessian=hessian, is_adaptive=TRUE, ngroups=ngroups, varDF=varDF,
wgtStats=ngrpW)
class(res) <- "WeMixResults"
return(res)
}
BLUE <- function(groups, y, X, levels, Z, ZFull, weights0, k, qp,
covariance_constructor, verbose, nlmevar, nz, acc,
family) {
function(omega0, par0, Qi=NULL, Qi0=NULL, verb=verbose, adapt=TRUE) {
weights <- weights0
if(is.null(Qi)) {
Qi <- list(NULL)
QiFull <- list(NULL)
for( oi in 2:length(omega0)) {
map <- groups[,oi-1]
umap <- unique(map)
nzi <- ncol(Z[[oi]])
Qi[[oi]] <- matrix(0, nrow=nzi, ncol=nzi*nrow(weights[[oi-1]]))
for(i in 1:nrow(weights[[oi-1]])) {
Qi[[oi]][1:nzi,(i-1)*nzi+1:nzi] <- Qi0[[oi]][[(1:length(umap))[map[i]==umap] ]]
}
QiFull[[oi]] <- matrix(0, nrow=nzi, ncol=nzi*nrow(X))
for(i in 1:nrow(X)) {
QiFull[[oi]][1:nzi,(i-1)*nzi+1:nzi] <- Qi0[[oi]][[(1:length(umap))[map[i]==umap] ]]
}
}
}
zScale <- lapply(Qi0, function(Qi0i) {
if(is.null(Qi0i)) {
return(NULL)
}
df <- data.frame(detQ=sapply(Qi0i,det))
for(i in 1:ncol(groups)) {
if(length(unique(groups[,i])) == nrow(df)) {
df[,colnames(groups)[i]] <- unique(groups[,i])
attr(df,"groups") <- c(attr(df, "groups"), colnames(groups)[i])
}
}
df
})
for(wi in 2:length(weights)) {
weights[[wi]]$detQ <- NULL
Zgrps <- attr(zScale[[wi]], "groups")
weights[[wi]] <- merge(weights[[wi]], zScale[[wi]][,c(Zgrps, "detQ")],by.x="index", by.y=Zgrps)
}
omega <- buildOmega(omega0=omega0, groups=groups, nrowX=nrow(X))
omegaFull <- buildOmega(omega0=omega0, groups=groups, nrowX=nrow(X), full=TRUE)
Qi0_ <- list(NULL)
Qi_ <- list(NULL)
tmpomega <- list(NULL)
for( oi in 2:length(omega0)) {
omg0 <- omega0[[oi]]
omg1 <- 2*omg0
while( max(abs( (omg1 - omg0) / pmax(abs(omg0), 1E-5))) > 1E-3) {
omg1 <- omg0
tmpomega_ <- c(tmpomega, list(omg0))
nzi <- ncol(Z[[oi]])
f <- param.lnl.quad(y, X, oi, Z, ZFull=ZFull, Qi=Qi, QiFull=QiFull,
omega, omegaFull=omegaFull, W=weights, k, qp,
covariance_constructor, bobyqa=FALSE, verbose=TRUE, acc0=acc,
mappedDefault=FALSE, family=family)
for(ici in 1:ncol(omg0)) {
f0 <- f(par0, top=FALSE, integralMultiplierExponent=0, integralZColumn=ici)
f1 <- f(par0, top=FALSE, integralMultiplierExponent=1, integralZColumn=ici)
omg0[ , ici] <- as.numeric(f1/f0)
}
omega0p <- c(tmpomega, list(omg0))
while( length(omega0p) < length(omega0)) {
omega0p[[length(omega0p)+1]] <- omega0[[length(omega0p)+1]]
}
omega <- buildOmega(omega0=omega0p, groups=groups, nrowX=nrow(X))
omegaFull <- buildOmega(omega0=omega0p, groups=groups, nrowX=nrow(X), full=TRUE)
if(!adapt) {
omg1 <- omg0
}
}
if(verb & adapt) {
cat("BLUE estimates:\n")
print(omg0)
}
if(adapt) {
omg0Full <- buildOmega(omega0=tmpomega_, groups=groups, nrowX=nrow(X), full=TRUE)
derivatives <- genD(adapterLnL(y, X, levels, Z, ZFull, weights, k, qp,
covariance_constructor, omega,
omg0Full,
tmpomega_, par0, verb, Qi, QiFull, oi,
acc, family),
rep(0,sum(unlist(nz)[1:oi], na.rm=TRUE)))
d2 <- derivatives$D[,-(1:nzi),drop=FALSE]
drv <- d2
Qi0_[[oi]] <- lapply(1:nrow(drv), function(i) {
scaleQuadPoints(drv[i,], nzi)
})
map <- groups[,oi-1]
umap <- unique(map)
Qi_[[oi]] <- matrix(0, nrow=nzi, ncol=nzi*nrow(X))
for(i in 1:nrow(X)) {
Qi_[[oi]][1:nzi,(i-1)*nzi+1:nzi] <- Qi0_[[oi]][[(1:length(umap))[map[i]==umap] ]]
}
QiFull[[oi]] <- matrix(0, nrow=nzi, ncol=nzi*nrow(X))
for(i in 1:nrow(X)) {
QiFull[[oi]][1:nz,(i-1)*nz+1:nz] <- Qi0[[oi]][[(1:length(umap))[map[i]==umap] ]]
}
}
tmpomega <- c(tmpomega, list(omg0))
omg0Full <- buildOmega(omega0=tmpomega, groups=groups, nrowX=nrow(X), full=TRUE)
}
if(adapt) {
return(list(omega0=tmpomega, omega=omega, omegaFull=omg0Full, Qi0=Qi0_, Qi=Qi_, QiFull=QiFull))
} else {
return(tmpomega)
}
}
}
MAP <- function(groups, y, X, levels, Z, ZFull, weights, k, qp,
covariance_constructor, verbose, nlmevar, nz, acc, family) {
function(omega0, par0, verb=verbose) {
omega <- buildOmega(omega0=omega0, groups=groups, nrowX=nrow(X))
omegaFull <- buildOmega(omega0=omega0, groups=groups, nrowX=nrow(X), full=TRUE)
Qi0 <- list(NULL)
Qi <- list(NULL)
QiFull <- list(NULL)
tmpomega <- list(NULL)
tmpomegaFull <- list(NULL)
u0 <- 0
for(oi in 2:length(omega0)) {
omg0 <- omega0[[oi]]
omg1 <- 1E20*(omg0+1E-15)
nzi <- nz[[oi]]
iter <- 0
while( iter < 25 & max(abs( (omg1 - omg0) / pmax(abs(omg0), 1E-5))) > 1E-3) {
if(iter >= 1) {
u0 <- max(c(u0, as.vector(abs( (omg1 - omg0) ))))
}
iter <- iter + 1
omg1 <- omg0
tmpomega_ <- c(tmpomega, list(omg0))
toF <- buildOmega(omega0=tmpomega_, groups=groups, nrowX=nrow(X), full=TRUE)
ofn <- adapterLnL(y, X, levels, Z, ZFull, weights, k, qp,
covariance_constructor, omega, toF,
tmpomega_, par0, verb, Qi, QiFull, oi, acc,
family)
d1 <- getJacobian(ofn, rep(0, nz[[oi]], na.rm=TRUE), m=nrow(omg0))
d2 <- getHessian(ofn, rep(0, nz[[oi]], na.rm=TRUE))
omg0 <- lapply(1:length(d2),
function(i) {
step <- solve(d2[[i]]) %*% d1[[i]]
if(iter >= 3) {
step <- 1/2 * step
ii <- 1
while(any(abs(step) > 3*u0/iter)) {
ii <- ii + 1
step <- 1/2 * step
if(ii > 20) {
stop("Ridiculous Newton step proposed, MAP not converging.")
}
}
} else {
step <- 1/2 * step
}
omg0[i,] - step
})
omg0 <- t(do.call(cbind, omg0))
omega0p <- c(tmpomega, list(omg0))
while( length(omega0p) < length(omega0)) {
omega0p[[length(omega0p)+1]] <- omega0[[length(omega0p)+1]]
}
omega <- buildOmega(omega0=omega0p, groups=groups, nrowX=nrow(X))
omegaFull <- buildOmega(omega0=omega0p, groups=groups, nrowX=nrow(X), full=TRUE)
}
if(verb) {
cat("Estimates:\n")
print(omg0)
}
tmpomega <- c(tmpomega, list(omg0))
tmpomegaFull <- omegaFull
drv <- d2
Qi0[[oi]] <- lapply(1:length(drv), function(i) {
ss <- scaleQuadPoints(drv[[i]], nzi)
for(j in 1:nrow(ss)) {
if(ss[j,j] > abs(omg0[i,j])) {
ss[j,j] <- sqrt(ss[j,j]^2 + omg0[i,j]^2)
omg0[i,j] <<- 0
}
}
ss
})
map <- groups[,oi-1]
umap <- unique(map)
nzi <- ncol(Z[[oi]])
Qi[[oi]] <- matrix(0, nrow=nzi, ncol=nzi*nrow(weights[[oi-1]]))
for(i in 1:nrow(weights[[oi-1]])) {
Qi[[oi]][1:nzi,(i-1)*nzi+1:nzi] <- Qi0[[oi]][[(1:length(umap))[map[i]==umap] ]]
}
QiFull[[oi]] <- matrix(0, nrow=nzi, ncol=nzi*nrow(X))
for(i in 1:nrow(X)) {
QiFull[[oi]][1:nzi,(i-1)*nzi+1:nzi] <- Qi0[[oi]][[(1:length(umap))[map[i]==umap] ]]
}
if(oi < length(omega0)) {
df <- data.frame(detQ=sapply(Qi0[[oi]],det))
groupNames <- colnames(groups)[oi-1]
for(i in 1:length(groupNames)) {
df[,groupNames[i]] <- unique(groups[,groupNames[i]])
}
weights[[oi]] <- merge(weights[[oi]],df[,c(groupNames, "detQ")],by.x="index", by.y=groupNames)
}
}
list(omega0=tmpomega, omega=omega, omegaFull=tmpomegaFull, Qi0=Qi0, Qi=Qi, QiFull=QiFull)
}
}
scaleQuadPoints <- function(d2, nz){
solved <- solve(-1*d2)
res <- NULL
tryCatch(res <- chol(solved),
error= function(e) {
tryCatch(solved <- nearPD(solved)$mat,
error=function(e){
solved <<- diag(abs(diag(solved)))
})
res <<- chol(solved)
})
res
}
buildOmega <- function(omega0, groups, nrowX, full=FALSE) {
omega <- list(NULL)
oind <- 1
for(o0i in 2:length(omega0)) {
omega0i <- as.matrix(omega0[[o0i]])
res <- matrix(0, nrow=nrowX, ncol=ncol(omega0i))
noind <- ncol(omega0i)
map <- groups[,o0i-1]
umap <- unique(map)
for(i in 1:length(umap)) {
for(oindi in 1:noind) {
res[which(map==umap[i]),oindi] <- omega0i[i,oindi]
}
}
if(o0i > 2 & !full) {
res <- res[!duplicated(groups[,o0i-2]),]
}
omega <- c(omega, list(res))
oind <- oind + noind
}
omega
}
adapterLnL <- function(y, X, levels, Z, ZFull, weights, k, qp,
covariance_constructor, omega, omegaFull, omega0, par0,
verbose, Qi, QiFull, olvl, acc, family) {
function(par, long=FALSE) {
yadj <- 0
o0 <- omega0
nzi <- 0
for(i in 1:olvl) {
if(!is.null(Z[[i]])) {
ki <- ncol(Z[[i]])
if(i == olvl) {
nzi <- ki
}
if(ki >= 1) {
zAdjust <- apply(ZFull[[i]] * omegaFull[[i]],1,sum)
if(olvl == i) {
zAdjust <- zAdjust + ZFull[[i]] %*% par[1:ki]
for(kii in 1:ki) {
o0[[i]][,kii] <- o0[[i]][,kii] + par[kii]
}
par <- par[-(1:ki)]
}
yadj <- yadj + zAdjust
}
}
}
beta <- par0[1:k]
parC <- covariance_constructor(par0[-(1:k)])
Qi_ <- matrix(0, nrow=nzi, ncol=nzi*nrow(weights[[olvl-1]]))
Qi__ <- c(Qi, list(Qi_))
QiFull_ <- matrix(0, nrow=nzi, ncol=nzi*nrow(X))
QiFull__ <- c(Qi, list(QiFull_))
loglikelihoodByGroup <- calc.lin.lnl.quad(y=y, yhat=X %*% beta + yadj, level=olvl,
Z, Qi=Qi__,
omega=lapply(omega, function(omegai) {0*omegai}),
W=weights, C=parC, qp, top=FALSE,
atPoint=TRUE, verbose=verbose,
acc=acc, ZFull=ZFull,
omegaFull=omegaFull, QiFull=QiFull__,
family=family)
Cl <- parC[[olvl]]
posteriorByGroup <- apply(o0[[olvl]], MARGIN=1, function(p) {
mvnpdfC(as.matrix(p), rep(0, length = length(p)), varcovM=Cl%*%t(Cl), Log=TRUE)
})
if(long) {
return(list(res=loglikelihoodByGroup + posteriorByGroup,
loglikelihoodByGroup=loglikelihoodByGroup,
posteriorByGroup=posteriorByGroup))
}
loglikelihoodByGroup + posteriorByGroup
}
} |
corr_diff <- function (r1, n1, r2, n2, conf.int=0.9, plot=FALSE)
{
if (is.character(r1) == TRUE || is.factor(r1) == TRUE ||
is.character(n1) == TRUE || is.factor(n1) == TRUE) {
error <- "Sorry, data must be numeric or integer values."
stop(error)
}
if (is.character(r2) == TRUE || is.factor(r2) == TRUE ||
is.character(n2) == TRUE || is.factor(n2) == TRUE) {
error <- "Sorry, data must be numeric or integer values."
stop(error)
}
if (length(r1) > 1 || length(n1) > 1 || length(r2) > 1 ||
length(n2) > 1) {
error <- "Please enter only one effect size."
stop(error)
}
diff <- r2 - r1
zcrit <- abs(stats::qnorm((1 - conf.int)/2))
r1.z <- 0.5 * log((1 + r1)/(1 - r1))
r1.sd <- 1/sqrt(n1 - 3)
r1.ll <- r1.z - zcrit * r1.sd
r1.ul <- r1.z + zcrit * r1.sd
r2.z <- 0.5 * log((1 + r2)/(1 - r2))
r2.sd <- 1/sqrt(n2 - 3)
r2.ll <- r2.z - zcrit * r2.sd
r2.ul <- r2.z + zcrit * r2.sd
diff.UL <- diff + sqrt((r2.ul - r2)^2 + (r1 - r1.ll)^2)
diff.LL <- diff - sqrt((r2 - r2.ll)^2 + (r1.ul - r1)^2)
z.diff <- abs(r1.z - r2.z)
z.diff.sd <- sqrt(1/(n1 - 3) + 1/(n2 - 3))
z <- z.diff/z.diff.sd
p <- 2 * (1 - stats::pnorm(z))
dir <- ifelse(r2 > r1, ">", "<")
level <- paste(as.character(100 * conf.int), "%", sep = "")
cat(" Test of Two Correlations:\n")
cat(" diff = ", diff, "\n", sep = "")
cat(" ", level, " CI ", "[", round(diff.LL, digits = 2),
", ", round(diff.UL, digits = 2), "]\n", sep = "")
cat(" p value = ", round(p, digits = 2), "\n\n", sep = "")
inference <- ifelse(diff.LL < 0 && diff.UL > 0,
paste("Inference: Lacking Evidence, r2 = r1, (CI contains 0).", sep = ""),
paste("Inference: Evidence Present, r2 ", dir, " r1, (CI does not contain 0).", sep = ""))
if (plot == TRUE) {
plot(NA, ylim = c(0, 1), xlim = c(diff.LL -
(diff.UL - diff.LL)/10,
(diff.UL) + (diff.UL - diff.LL)/10), bty = "l", yaxt = "n", ylab = "",
xlab = "Difference in Correlations")
graphics::points(x = diff, y = 0.5, pch = 15, cex = 2)
graphics::abline(v = 0, lty = 2, col = "grey")
graphics::segments(diff.LL, 0.5, diff.UL, 0.5, lwd = 3)
graphics::title(main = paste(
"difference = ", round(diff, digits = 2), " \n ",
100 * (conf.int), "% CI [", round(diff.LL, digits = 2),
";", round(diff.UL, digits = 2), "] ", " \n ", inference,
sep = ""), cex.main = 1)
}
rval <- list(diff=diff, diff.LL=diff.LL, diff.UL=diff.UL,
n1=n1, r1=r1, r1.ll=r1.ll, r1.ul=r1.ul,
n2=n2, r2=r2, r2.ll=r2.ll, r2.ul=r2.ul,
p.value=p, inference=inference)
} |
test_that("novel levels can be ignored", {
dat <- data.frame(
y = 1:4,
f = factor(letters[1:4])
)
new <- data.frame(
y = 1:5,
f = factor(letters[1:5])
)
ptype <- vctrs::vec_ptype(dat)
expect_warning(
x <- scream(new, ptype, allow_novel_levels = TRUE),
NA
)
expect_equal(levels(x$f), letters[1:5])
})
test_that("novel levels in a new character vector can be ignored", {
dat <- data.frame(
y = 1:4,
f = factor(letters[1:4])
)
new <- data.frame(
y = 1:5,
f = letters[1:5],
stringsAsFactors = FALSE
)
ptype <- vctrs::vec_ptype(dat)
expect_warning(
x <- scream(new, ptype, allow_novel_levels = TRUE),
NA
)
expect_equal(levels(x$f), new$f)
})
test_that("ignoring novel levels still passes through incompatible classes", {
dat <- data.frame(f = factor(letters[1:4]))
new <- data.frame(f = 1:5)
ptype <- vctrs::vec_ptype(dat)
expect_error(
scream(new, ptype, allow_novel_levels = TRUE),
class = "vctrs_error_incompatible_type"
)
}) |
`second.extinct` <-
function(web, participant="higher", method="abun", nrep=10, details=FALSE, ext.row=NULL, ext.col=NULL){
if (participant=="both" & method=="external") stop("Sorry, that won't work. When you specify the sequence, you have to choose one of the two levels. 'both' won't work.")
if (!is.null(ext.row) & length(ext.row) != NROW(web)) stop("The length of the external row vector is different from the numbers of rows in the network!")
if (!is.null(ext.col) & length(ext.col) != NCOL(web)) stop("The length of the external col vector is different from the numbers of cols in the network!")
if (participant == "higher" & method=="external" & is.null(ext.col)) stop("You need to provide an external sequence of extinction for the higher trophic level!")
if (participant == "lower" & method=="external" & is.null(ext.row)) stop("You need to provide an external sequence of extinction for the lower trophic level!")
one.second.extinct <- function(web=web, participant=participant, method=method, ext.row=ext.row, ext.col=ext.col){
dead <- matrix(nrow=0, ncol=3)
colnames(dead) <- c("no", "ext.lower", "ext.higher")
m2 <- web
i <- 1
repeat {
n <- extinction(m2, participant=participant, method=method, ext.row=ext.row, ext.col=ext.col)
dead <- rbind(dead, c(i, attributes(m2 <- empty(n, count=TRUE))$empty))
if (participant == "lower" & NROW(m2) < 2) break;
if (participant == "higher" & NCOL(m2) < 2) break;
if (participant == "both" & min(dim(m2)) < 2) break;
if (any(dim(n) == 1)) break;
if (method=="external") {
ext.col[ext.col > ext.col[1]] <- ext.col[ext.col > ext.col[1]] - 1
ext.row[ext.row > ext.row[1]] <- ext.row[ext.row > ext.row[1]] - 1
ext.row <- ext.row[-1]
ext.col <- ext.col[-1]
}
i <- i + 1
}
dead2 <- rbind(dead, c(NROW(dead)+1, NROW(m2), NCOL(m2)))
if (participant == "lower" & method== "degree"){
if (length(table(dead[,2])) > 1) dead2[,2] <- 1
}
if (nrow(dead)+1 != nrow(dead2)) stop("PANIC! Something went wrong with the extinct sequence! Please contact the author to fix this!!")
if (participant == "lower") supposed.length <- NROW(web)
if (participant == "higher") supposed.length <- NCOL(web)
if (participant == "both") supposed.length <- NROW(dead2)
if (NROW(dead2) != supposed.length) {
missing <- supposed.length - NROW(dead2)
addit1 <- (NROW(dead2)+1):(NROW(dead2)+missing)
addit2n3 <- rep(0, times=missing)
dead2 <- rbind(dead2, as.matrix(data.frame(addit1, addit2n3, addit2n3)))
}
return(dead2)
}
if (is.vector(method)) sequence = method
if (pmatch(method, c("abundance", "random", "degree", "external")) %in% c(1,3,4)){
out <- one.second.extinct(web=web, participant=participant, method=method, ext.row=ext.row, ext.col=ext.col)
} else {
o <- replicate(nrep, one.second.extinct(web=web, participant=participant, method=method, ext.row=ext.row, ext.col=ext.col), simplify=FALSE)
if (details){
out <- o
} else {
lengths <- sapply(o, nrow)
z <- o[[which.max(lengths)]]
z[,2:3] <- 0
for (k in 1:length(o)) {
nr <- nrow(o[[k]])
z[1:nr, ] <- z[1:nr, ] + o[[k]]
rm(nr)
}
out <- z/length(o)
out[,1] <- 1:max(lengths)
}
}
class(out) <- "bipartite"
attr(out, "exterminated") <- c("both", "lower", "higher")[pmatch(participant, c("both", "lower", "higher"))]
out
} |
do_outline_alpha <- function(rp, alpha)
{
ah = alphahull::ashape(rp,alpha=alpha)
return(ah)
}
do_outline_ball <- function(rp, radius)
{
gb = rgeos::gBuffer(sp::SpatialPoints(rp), quadsegs=2, width=radius)
return(gb)
}
do_outline_raster <- function(pts,res)
{
pts <- as.matrix(pts)
pr <- padded_range(pts,multiply.interval.amount=0.25)
e <- extent(t(pr))
r <- raster::raster(e, ncol=res, nrow=res)
x <- raster::rasterize(pts, r, rep(1, nrow(pts)), fun=mean,background=NA)
w <- raster::rasterToPolygons(x,dissolve=TRUE)
return(w)
}
plot.Hypervolume <- function(x, ...)
{
templist = new("HypervolumeList")
templist@HVList=list(x)
plot.HypervolumeList(templist, ...)
}
extendrange <- function(x,factor=0.5)
{
xmin <- min(x,na.rm=TRUE)
xmax <- max(x,na.rm=TRUE)
xminf <- xmin - (xmax - xmin)*factor
xmaxf <- xmax + (xmax - xmin)*factor
result <- c(xminf, xmaxf)
return(result)
}
plot.HypervolumeList <- function(x,
show.3d=FALSE,plot.3d.axes.id=NULL,
show.axes=TRUE, show.frame=TRUE,
show.random=TRUE, show.density=TRUE,show.data=TRUE,
names=NULL, show.legend=TRUE, limits=NULL,
show.contour=TRUE, contour.lwd=1.5,
contour.type='kde',
contour.alphahull.alpha=0.25,
contour.ball.radius.factor=1,
contour.kde.level=1e-4,
contour.raster.resolution=100,
show.centroid=TRUE, cex.centroid=2,
colors=rainbow(floor(length(x@HVList)*1.5),alpha=0.8),
point.alpha.min=0.2, point.dark.factor=0.5,
cex.random=0.5,cex.data=0.75,cex.axis=0.75,cex.names=1.0,cex.legend=0.75,
num.points.max.data = 1000, num.points.max.random = 2000, reshuffle=TRUE,
plot.function.additional=NULL,
verbose=FALSE,
...)
{
method_is_occupancy <- FALSE
if (class(x) == "Hypervolume")
{
if(identical(x@Method, "n_occupancy") | identical(x@Method, "n_occupancy_test") | identical(x@Method, "n_occupancy_permute")){
method_is_occupancy <- TRUE
}
}
if (class(x)=="HypervolumeList"){
method_list <- unique(unlist(lapply(x@HVList, function(x) x@Method)))
if(identical(method_list, "n_occupancy") | identical(method_list, "n_occupancy_test") | identical(method_list, "n_occupancy_permute")){
method_is_occupancy <- TRUE
}
}
if(method_is_occupancy){
if(identical(class(x)[1], "HypervolumeList")){
for(i in 1:length(x@HVList)){
hv_temp <- x@HVList[[i]]
x@HVList[[i]]@RandomPoints <- hv_temp@RandomPoints[! is.na(hv_temp@ValueAtRandomPoints), ]
x@HVList[[i]]@ValueAtRandomPoints <- hv_temp@ValueAtRandomPoints[! is.na(hv_temp@ValueAtRandomPoints)]
hv_temp <- x@HVList[[i]]
x@HVList[[i]]@RandomPoints <- hv_temp@RandomPoints[hv_temp@ValueAtRandomPoints != 0, ]
x@HVList[[i]]@ValueAtRandomPoints <- hv_temp@ValueAtRandomPoints[hv_temp@ValueAtRandomPoints != 0]
}
}
}
if(method_is_occupancy){
columns_to_remove <- 3
} else {
columns_to_remove <- 2
}
sapply(x@HVList, function(z)
{
if (verbose==TRUE)
{
cat(sprintf("Showing %d random points of %d for %s\n",min(nrow(z@RandomPoints), num.points.max.random), nrow(z@RandomPoints), z@Name))
}
if (show.data && length(z@Data) > 0)
{
npd <- ifelse(all(is.nan(z@Data)), 0, nrow(z@Data))
if (verbose==TRUE)
{
cat(sprintf("Showing %d data points of %d for %s\n",min(num.points.max.data, npd), npd, z@Name))
}
}
})
if (!requireNamespace("alphahull", quietly = TRUE)) {
warning("The package 'alphahull' is needed for contour plotting with contour.type='alphahull'. Please install it to continue.\n\n *** Temporarily setting contour.type='kde'.", call. = FALSE)
contour.type <- 'kde'
}
alldims = sapply(x@HVList, function(z) { z@Dimensionality })
allnames = sapply(x@HVList, function(z) { z@Name })
stopifnot(all(alldims[1] == alldims))
if(method_is_occupancy){
all <- NULL
alldata <- NULL
for (i in 1:length(x@HVList))
{
ivals = sample(nrow(x@HVList[[i]]@RandomPoints), min(c(num.points.max.random, nrow(x@HVList[[i]]@RandomPoints))))
subsampledpoints = data.frame(x@HVList[[i]]@RandomPoints[ivals,,drop=FALSE])
densityvals = x@HVList[[i]]@ValueAtRandomPoints[ivals]
if (nrow(subsampledpoints) > 0)
{
subsampledpoints = cbind(subsampledpoints, ID=rep(i, nrow(subsampledpoints)), Density=(densityvals-min(densityvals,na.rm=TRUE))/(max(densityvals,na.rm=TRUE)-min(densityvals,na.rm=TRUE)), Occupancy = abs(x@HVList[[i]]@ValueAtRandomPoints[ivals]))
subsampledpoints[is.nan(subsampledpoints[,"Density"]),"Density"] <- 1
all <- rbind(all, subsampledpoints)
}
thisdata=x@HVList[[i]]@Data
alldata <- rbind(alldata, cbind(thisdata, ID=rep(i,nrow(thisdata))))
}
} else {
all <- NULL
alldata <- NULL
for (i in 1:length(x@HVList))
{
ivals = sample(nrow(x@HVList[[i]]@RandomPoints), min(c(num.points.max.random, nrow(x@HVList[[i]]@RandomPoints))))
subsampledpoints = data.frame(x@HVList[[i]]@RandomPoints[ivals,,drop=FALSE])
densityvals = x@HVList[[i]]@ValueAtRandomPoints[ivals]
if (nrow(subsampledpoints) > 0)
{
subsampledpoints = cbind(subsampledpoints, ID=rep(i, nrow(subsampledpoints)), Density=(densityvals-min(densityvals,na.rm=TRUE))/(max(densityvals,na.rm=TRUE)-min(densityvals,na.rm=TRUE)))
subsampledpoints[is.nan(subsampledpoints[,"Density"]),"Density"] <- 1
all <- rbind(all, subsampledpoints)
}
thisdata=x@HVList[[i]]@Data
alldata <- rbind(alldata, cbind(thisdata, ID=rep(i,nrow(thisdata))))
}
}
alldata <- as.data.frame(alldata)
if (num.points.max.data < nrow(alldata) && !is.null(num.points.max.data))
{
alldata <- alldata[sample(nrow(alldata), min(c(num.points.max.data, nrow(alldata)))),]
}
if (is.null(all))
{
warning('No random points to plot.')
if (is.null(dimnames(x@HVList[[1]]@RandomPoints)[[2]]))
{
all <- matrix(0,ncol=2+alldims,nrow=1,dimnames=list(NULL,c(paste("X",1:alldims,sep=""),"ID","Density")))
}
else
{
all <- matrix(0,ncol=2+alldims,nrow=1,dimnames=list(NULL,c(dimnames(x@HVList[[1]]@RandomPoints)[[2]],"ID","Density")))
}
all <- as.data.frame(all)
}
if (reshuffle==TRUE)
{
all <- all[sample(nrow(all),replace=FALSE),,drop=FALSE]
alldata <- alldata[sample(nrow(alldata),replace=FALSE),,drop=FALSE]
}
no_names_supplied = FALSE
if (is.null(names))
{
dn = dimnames(all)[[2]]
names = dn[1:(ncol(all)-columns_to_remove)]
no_names_supplied = TRUE
}
if (!is.null(limits) & !is.list(limits))
{
varlimlist = vector('list',ncol(all)-2)
for (i in 1:length(varlimlist))
{
varlimlist[[i]] <- limits
}
limits = varlimlist
}
colorlist <- colors[all$ID]
alphavals <- (all$Density - quantile(all$Density, 0.025, na.rm=T)) / (quantile(all$Density, 0.975, na.rm=T) - quantile(all$Density,0.025, na.rm=T))
alphavals[is.nan(alphavals)] <- 0.5
alphavals[alphavals < 0] <- 0
alphavals[alphavals > 1] <- 1
alphavals <- point.alpha.min + (1 - point.alpha.min)*alphavals
if (show.density==FALSE)
{
alphavals <- rep(1, length(colorlist))
}
for (i in 1:length(colorlist))
{
colorlist[i] <- rgb_2_rgba(colorlist[i], alphavals[i])
}
colorlistdata = colors[alldata$ID]
for (i in 1:length(colorlistdata))
{
colorlistdata[i] <- rgb_2_set_hsv(colorlistdata[i], v=1-point.dark.factor)
}
if (ncol(all) < 2)
{
stop('Plotting only available in n>=2 dimensions.')
}
if (show.3d==FALSE)
{
op = par(no.readonly = T)
par(mfrow=c(ncol(all)-columns_to_remove, ncol(all)-columns_to_remove))
par(mar=c(0,0,0,0))
par(oma=c(0.5,0.5,0.5,0.5))
for (i in 1:(ncol(all)-columns_to_remove))
{
for (j in 1:(ncol(all)-columns_to_remove))
{
if (j > i)
{
plot(all[,j], all[,i],type="n",axes=FALSE,xlim=limits[[j]], ylim=limits[[i]],bty='n')
if(show.random==TRUE)
{
if(method_is_occupancy){
cex.occupancy <- all[, "Occupancy"]
points(all[,j], all[,i], col=colorlist, cex= cex.occupancy / max(cex.occupancy) * cex.random, pch = 16)
} else {
points(all[,j], all[,i], col=colorlist,cex=cex.random,pch=16)
}
}
if (show.data & nrow(alldata) > 0)
{
points(alldata[,j], alldata[,i], col=colorlistdata,cex=cex.data,pch=16)
}
if (show.centroid == TRUE)
{
for (whichid in 1:length(unique(all$ID)))
{
allss <- subset(all, all$ID==whichid)
if(method_is_occupancy){
centroid_x <- weighted.mean(allss[,j], cex.occupancy[all$ID==whichid], na.rm=TRUE)
centroid_y <- weighted.mean(allss[,i], cex.occupancy[all$ID==whichid], na.rm=TRUE)
} else{
centroid_x <- mean(allss[,j],na.rm=TRUE)
centroid_y <- mean(allss[,i],na.rm=TRUE)
}
points(centroid_x, centroid_y, col=colors[whichid],cex=cex.centroid,pch=16)
points(centroid_x, centroid_y, col='white',cex=cex.centroid,pch=1,lwd=1.5)
}
}
if (show.contour==TRUE)
{
for (whichid in 1:length(unique(all$ID)))
{
allss <- subset(all, all$ID==whichid)
if (nrow(allss) > 0)
{
contourx <- allss[,j]
contoury <- allss[,i]
rp = cbind(contourx, contoury)
vol_this = x@HVList[[whichid]]@Volume
density_this = nrow(rp) / vol_this
dim_this = x@HVList[[whichid]]@Dimensionality
radius_critical <- density_this^(-1/dim_this) * contour.ball.radius.factor
if (contour.type=='alphahull')
{
poly_outline = do_outline_alpha(rp=rp, alpha=contour.alphahull.alpha)
plot(poly_outline,add=TRUE,wpoints=FALSE,wlines='none',lwd=contour.lwd,col=colors[whichid])
}
else if (contour.type=='ball')
{
poly_outline <- do_outline_ball(rp=rp, radius=radius_critical)
sp::plot(poly_outline, add=TRUE,lwd=contour.lwd,col=colors[whichid])
}
else if (contour.type=='kde')
{
if (nrow(rp) > 1)
{
m_kde = kde2d(rp[,1], rp[,2], n=50, h=radius_critical)
contour(m_kde, add=TRUE, levels=contour.kde.level,drawlabels=FALSE,lwd=contour.lwd,col=colors[whichid])
}
}
else if (contour.type=='raster')
{
poly_raster <- do_outline_raster(as.matrix(rp),res=contour.raster.resolution)
sp::plot(poly_raster, add=TRUE, lwd=contour.lwd,col=colors[whichid])
}
}
}
}
if (!is.null(plot.function.additional))
{
plot.function.additional(j,i)
}
if (show.frame==TRUE)
{
box()
}
}
else if (j == i)
{
plot(0,0,type="n",xlim=c(0,1),ylim=c(0,1),axes=FALSE)
text(0.5, 0.5, names[j],cex=cex.names)
}
else if (j==1 & i == (ncol(all) - columns_to_remove))
{
plot(0,0,type="n",xlim=c(0,1),ylim=c(0,1),axes=FALSE)
if (show.legend == TRUE)
{
legend('topleft',legend=allnames,text.col=colors,bty='n',cex=cex.legend)
}
}
else
{
plot(0,0,type="n",axes=FALSE)
}
if (j==i+1)
{
if (show.axes==TRUE)
{
axis(side=1,cex.axis=cex.axis)
axis(side=2,cex.axis=cex.axis)
}
}
}
}
par(op)
}
else
{
if (is.null(plot.3d.axes.id))
{
plot.3d.axes.id=1:3
}
if (no_names_supplied==TRUE)
{
axesnames <- names[plot.3d.axes.id]
}
else
{
axesnames <- names
}
if(length(plot.3d.axes.id)!=3) { stop('Must specify three axes') }
if (show.density==TRUE)
{
for (i in 1:length(colorlist))
{
colorlist[i] <- rgb_2_set_hsv(colorlist[i], s=(alphavals[i]^2))
}
}
rgl::plot3d(all[,plot.3d.axes.id],col=colorlist,expand=1.05, xlab=axesnames[1], ylab=axesnames[2], zlab=axesnames[3], xlim=limits[[1]],ylim=limits[[2]],zlim=limits[[3]],size=cex.random,type='p',box=show.frame,axes=show.axes)
if (show.legend==TRUE)
{
for (i in 1:length(allnames))
{
rgl::mtext3d(allnames[i],edge='x-+',line=1+i*cex.legend*1.25,color=colors[i],cex=cex.legend)
}
}
if (show.data)
{
if (!any(is.nan(as.matrix(alldata[,plot.3d.axes.id]))))
{
rgl::points3d(x=alldata[,plot.3d.axes.id[1]], y=alldata[,plot.3d.axes.id[2]], z=alldata[,plot.3d.axes.id[3]], col=colorlistdata,cex=cex.data,pch=16)
}
}
if (show.centroid == TRUE)
{
for (whichid in 1:length(unique(all$ID)))
{
allss <- subset(all, all$ID==whichid)
centroid_1 <- mean(allss[,plot.3d.axes.id[1]],na.rm=TRUE)
centroid_2 <- mean(allss[,plot.3d.axes.id[2]],na.rm=TRUE)
centroid_3 <- mean(allss[,plot.3d.axes.id[3]],na.rm=TRUE)
rgl::points3d(x=centroid_1, y=centroid_2, z=centroid_3, col=colors[whichid],cex=cex.centroid,pch=16)
}
}
}
} |
med_se = function(x,B){
B_median = rep(0,B)
n = length(x)
for (i in 1:B) {
id = sample(1:n,n,replace=T)
B_median[i] = median(x[id])
}
return(sd(B_median)/sqrt(B))
}
tnorm <- function(x){
s=sum(as.vector(x)*as.vector(x))
return(s)
}
tinner = function(A,B){
s = sum(as.vector(A)*as.vector(B))
return(s)
}
krondet = function(X,log=TRUE){
M = length(X)
dimen = sapply(X, ncol)
p = prod(dimen)
logdet = log(sapply(X, det))
mydet = p*sum(logdet/dimen)
if(log){
return(mydet)
}
else{
return(exp(mydet))
}
}
tensrloglk = function(X, espi, Mu, SIG){
n = length(X)
dimen = dim(X[[1]])
M = length(dimen)
p = prod(dimen)
K = length(Mu)
SIGinv = lapply(SIG, MASS::ginv)
Siginv = mkronecker(SIGinv)
logSIGdet = krondet(SIG,log=TRUE)
B = array(list(),K-1)
for (k in 2:K) {
B[[k-1]] = tensr::atrans(Mu[[k]]-Mu[[1]], SIGinv)
}
loglk = 0
for (i in 1:n){
x_mu1 = matrix(X[[i]]-Mu[[1]],ncol=1)
dis_mu1 = t(x_mu1) %*% Siginv %*% x_mu1
logf1 = -p*log(2*pi)/2 - logSIGdet/2 - dis_mu1/2
for (k in 2:K){
temp = espi[1]
logfkoverf1 = tinner(B[[k-1]], X[[i]]-(Mu[[k]]+Mu[[1]])/2)
fkoverf1 = exp(logfkoverf1)
temp = temp + espi[k]*fkoverf1
}
loglk = loglk+log(temp)+logf1
}
return(loglk)
}
distortion <- function(x, y, K){
n=length(y)
muall=array(0,dim=dim(x[[1]]))
for (i in 1:n){
muall=muall+x[[i]]
}
muall=muall/n
mu=array(list(),K)
n.fit=rep(0,K)
for (i in 1:K){
mu[[i]]=array(0,dim=dim(x[[1]]))
}
SSb=0
for (i in 1:n){
mu[[y[i]]]=mu[[y[i]]]+x[[i]]
n.fit[y[i]]=n.fit[y[i]]+1
SSb=SSb+tnorm(x[[i]]-muall)
}
for (i in 1:K){
mu[[i]]=mu[[i]]/n.fit[i]
}
SSw=0
for (i in 1:n){
SSw=SSw+tnorm(x[[i]]-mu[[y[i]]])
}
SSb=SSb-SSw
dist=SSw/SSb
} |
library(lfe)
set.seed(43)
options(lfe.threads=2,digits=5,warn=1)
g1 <- 80
g2 <- 20
g3 <- 12
N <- 1000
clu1 <- sample(g1,N, replace=TRUE)
clu2 <- (clu1 + sample(7,N,replace=TRUE)-1) %% g2
clu3 <- (clu2 + sample(3,N,replace=TRUE)-1) %% g3
clu1 <- factor(clu1)
clu2 <- factor(clu2)
clu3 <- factor(clu3)
ceff1 <- rnorm(nlevels(clu1), sd=0.5)[clu1]
ceff2 <- rnorm(nlevels(clu2), sd=0.4)[clu2]
ceff3 <- rnorm(nlevels(clu3))[clu3]
err1 <- rnorm(nlevels(clu1), sd=0.8)[clu1]
err2 <- rnorm(nlevels(clu2))[clu2]
err3 <- rnorm(nlevels(clu3), sd=0.5)[clu3]
x1 <- ceff1 + 0.3*ceff2 + rnorm(N)
x2 <- ceff2 + 0.2*ceff3 + rnorm(N)
x3e <- ceff3 + 0.2*(ceff2+ceff1) + rnorm(N)
f1 <- factor(sample(8,N,replace=TRUE))
x3 <- as.vector(as(f1,'sparseMatrix') %*% x3e)[f1]/tabulate(f1)[f1]
err <- err1 + err2 + err3 + abs(x1+x2*x3)*rnorm(N)
y <- x1 + x2 + x3 + err
data <- data.frame(y,x1,x2,x3,f1,clu1,clu2,clu3)
clu <- list('clu1', 'clu2', 'clu3')
summary(felm(y ~ x1 + x2 + f1|0|0|clu1+clu2+clu3, data)) |
context("Visualize clinical data with a static scatterplot")
test_that("The axis and color labels are correctly set to the variable names by default in the scatterplot", {
data <- data.frame(
A = c(1, 1, 2, 3),
B = c(2, 4, 1, 3),
C = c("trt1", "trt1", "trt2", "trt2"),
stringsAsFactors = FALSE
)
gPlot <- clinDataReview:::staticScatterplotClinData(
data = data,
xVar = "A", yVar = "B",
aesPointVar = list(color = "C")
)
expect_s3_class(gPlot, "ggplot")
expect_type(gPlot$labels, "list")
expect_equal(object = gPlot$labels$x, expected = "A")
expect_equal(object = gPlot$labels$y, expected = "B")
expect_equal(object = gPlot$labels$colour, expected = "C")
})
test_that("The axis and color labels are correctly extracted from the labels of all variables in the scatterplot", {
data <- data.frame(
A = c(1, 1, 2, 3),
B = c(2, 4, 1, 3),
C = c("trt1", "trt1", "trt2", "trt2"),
stringsAsFactors = FALSE
)
labelVars <- c(A = "var1", B = "var2", C = "colorVar")
gPlot <- clinDataReview:::staticScatterplotClinData(
data = data,
xVar = "A", yVar = "B",
aesPointVar = list(color = "C"),
labelVars = labelVars
)
expect_s3_class(gPlot, "ggplot")
expect_type(gPlot$labels, "list")
expect_equal(object = gPlot$labels$x, expected = "var1")
expect_equal(object = gPlot$labels$y, expected = "var2")
expect_equal(object = gPlot$labels$colour, expected = "colorVar")
})
test_that("A warning is generated if an axis transformation is specified both in the x-axis transformation and general parameters in the scatterplot", {
data <- data.frame(
A = c(1, 1, 2, 3),
B = c(2, 4, 1, 3),
C = c("trt1", "trt1", "trt2", "trt2")
)
expect_warning(
clinDataReview:::staticScatterplotClinData(
data = data,
xVar = "A", yVar = "B",
aesPointVar = list(color = "C"),
xTrans = "log",
xPars = list(trans = "log")
),
"'trans' in parameters for x axis are ignored"
)
}) |
.getReturns4GARCH <- function() {
if ("rugarch" %in% (.packages())) {print("package rugarch is loaded")} else {
eval(parse( text="library(rugarch)"))}
name <- tclvalue(tkgetOpenFile(
filetypes = "{ {RData Files} {.RData} } { {All Files} * }"))
if (name == "")
return(data.frame())
temp=print(load(name))
dataz=eval(parse(text=temp))
dat=na.omit(diff(log(dataz)))
dat=xts::as.xts(dat)
assign("retDF", dat, envir = .JFEEnv)
cat("Returns data is imported sucessfully","\n")
print(tail(dat,2));print(head(dat,2))
cat("\n")
}
.getRawData4GARCH <- function() {
name <- tclvalue(tkgetOpenFile(
filetypes = "{ {RData Files} {.RData} {.rda}} { {All Files} * }"))
if (name == "")
return(data.frame())
temp=print(load(name))
dat=eval(parse(text=temp))
assign("retDF", dat, envir = .JFEEnv)
importedFileName=last(unlist(strsplit(name,"/")))
assign("importedFileName", importedFileName, envir = .JFEEnv)
print(paste("You are loading ",importedFileName,sep=" "))
print(tail(dat,2));print(head(dat,2))
cat("\n")
}
.garch <- function(datx0, home,exoInd,exoGARCH,model,distribution,arch,garch,archm,AR,MA,arfima){
dat=datx0
Y=dat[,home]
archOrder=as.numeric(arch)
garchOrder=as.numeric(garch)
arOrder=as.numeric(AR)
maOrder=as.numeric(MA)
if (archm=="FALSE") {archmTF=eval(parse(text=archm))} else {archmTF=TRUE}
if (arfima=="FALSE") {arfimaTF=eval(parse(text=arfima))} else {arfimaTF=TRUE}
if (exoInd == "None") {
meanSpec=list(armaOrder=c(arOrder,maOrder),include.mean=TRUE,archm=archmTF,archpow = archm, external.regressors = NULL,arfima = arfimaTF)} else {
x_mean=dat[,exoInd]
meanSpec=list(armaOrder=c(arOrder,maOrder),include.mean=TRUE,archm=archmTF,archpow = archm, external.regressors = as.matrix(x_mean),arfima = arfimaTF)
}
if (exoGARCH == "None"){
varSpec=list(model=model,garchOrder=c(archOrder,garchOrder),external.regressors=NULL)} else {
x_garch=dat[,exoGARCH]
varSpec=list(model=model,garchOrder=c(archOrder,garchOrder),external.regressors=as.matrix(x_garch))
}
distSpec=distribution
mySpec=rugarch::ugarchspec(mean.model=meanSpec, variance.model=varSpec, distribution.model=distSpec)
myFit = rugarch::ugarchfit(data= Y, spec=mySpec,solver="hybrid")
cat("\n","Parameter Estimates","\n")
print(round(myFit@fit$matcoef,4))
cat("\n","Nyblom test","\n")
print(rugarch::nyblom(myFit))
cat("\n","Sign Bias Test","\n")
print(rugarch::signbias(myFit))
cat("\n","Goodness-of-Fit Test","\n")
print(rugarch::gof(myFit,c(20,30,40,50)))
cat("\n","Info Criteria","\n")
print(rugarch::infocriteria(myFit))
cat("\n","Likelihood","\n")
print(rugarch::likelihood(myFit))
savedFile=paste0(model,"_",distSpec,"_",archOrder,garchOrder,".RData")
cat("\n","The estimation output is saved as ",savedFile,"\n","at ", getwd())
save(myFit,file=savedFile)
}
.garchMenu <- function(){
retAS=get("retDF",envir = .JFEEnv)
top <- tktoplevel(borderwidth=10)
tkwm.title(top, "Univariate GARCH")
xBox <- .variableListBox(top, colnames(retAS), title="Pick One")
xBoxEXO <- .variableListBox(top, c("None",colnames(retAS)), title="External Xs in MEAN", selectmode = "extended")
xBoxVAREXO <- .variableListBox(top, c("None",colnames(retAS)), title="External Xs in GARCH", selectmode = "extended")
onOK <- function(){
home <- .getSelection(xBox)
exoInd <- .getSelection(xBoxEXO)
exoGARCH <- .getSelection(xBoxVAREXO)
if (ncol(retAS) == 0){
tkmessageBox(message = "You must import a dataset", icon = "error", type = "ok")
return()
}
FREQtype <- tclvalue(freqVariable)
model <- tclvalue(modelVariable)
distribution <- tclvalue(distVariable)
arch <- tclvalue(archVariable)
garch <- tclvalue(garchVariable)
archm <- tclvalue(archmVariable)
AR <- tclvalue(arVariable)
MA <- tclvalue(maVariable)
arfima <- tclvalue(arfimaVariable)
if (FREQtype=="daily"){
x=retAS } else {
transForm=paste0("xts::to.",FREQtype,"(retAS,indexAt='endof',OHLC = FALSE)")
x=eval(parse(text=transForm))
}
.garch(x,home,exoInd,exoGARCH,model,distribution,arch,garch,archm,AR,MA,arfima)
}
tkgrid(.getFrame(xBox),.getFrame(xBoxEXO),.getFrame(xBoxVAREXO), sticky="n")
rightFrame <- tkframe(top)
freqFrame <- tkframe(rightFrame)
.radioButtons(top,name="freq", buttons=c("Daily", "Week", "Month","Quarter"), values=c("daily", "weekly", "monthly", "quarterly"), labels=c("Daily Frequency (Default)", "Weekly Frequency", "Monthly Frequency","Quarterly Frequency"), title="Frequency Conversion")
freqVariable <- freqVariable
tkgrid(freqFrame,rightFrame,sticky="w")
models=c("sGARCH","gjrGARCH","eGARCH","iGARCH","apARCH")
modelFrame <- tkframe(rightFrame)
.radioButtons(top,name="model", buttons=models, values=c("sGARCH","gjrGARCH","eGARCH","iGARCH","apARCH"), labels=c("standard GARCH","gjr GARCH","exponential GARCH","integrated GARCH","asymmetric power GARCH"), title="GARCH models")
modelVariable <- modelVariable
tkgrid(modelFrame,sticky="w")
Dists=c("norm", "snorm", "std", "sstd", "ged","sged", "nig", "jsu")
distFrame <- tkframe(rightFrame)
.radioButtons(top,name="dist", buttons=Dists, values=Dists, labels=c("Normal Distribution", "skewed Normal Distribution", "Student t Distribution", "skewed Student t Distribution", "GED Distribution","skewed GED Distribution", "Negative Inverse Gaussian Distribution", "Johnson's SU-distribution"), title="Distributions")
distVariable <- distVariable
tkgrid(distFrame,sticky="w")
archFrame <- tkframe(rightFrame)
archVariable <- tclVar("1")
archField <- tkentry(archFrame,width="4",textvariable=archVariable)
tkgrid(tklabel(archFrame,text="ARCH term= ", fg="blue"), archField, sticky="w")
tkgrid(archFrame,sticky="w")
garchFrame <- tkframe(rightFrame)
garchVariable <- tclVar("1")
garchField <- tkentry(garchFrame, width="4", textvariable=garchVariable)
tkgrid(tklabel(garchFrame, text="GARCH term = ", fg="blue"), garchField, sticky="w")
tkgrid(garchFrame, sticky="w")
archmFrame <- tkframe(rightFrame)
archmVariable <- tclVar("FALSE")
archmField <- tkentry(archmFrame, width="6", textvariable=archmVariable)
tkgrid(tklabel(archmFrame, text="Garch-in-Mean power. Enter 1 for order", fg="blue"), archmField, sticky="w")
tkgrid(archmFrame, sticky="w")
arFrame <- tkframe(rightFrame)
arVariable <- tclVar("0")
arField <- tkentry(arFrame,width="4",textvariable=arVariable)
tkgrid(tklabel(arFrame,text="AR in mean= ", fg="blue"), arField, sticky="w")
tkgrid(arFrame, sticky="w")
maFrame <- tkframe(rightFrame)
maVariable <- tclVar("0")
maField <- tkentry(maFrame, width="4", textvariable=maVariable)
tkgrid(tklabel(maFrame, text="MA in mean = ", fg="blue"), maField, sticky="w")
tkgrid(maFrame, sticky="w")
arfimaFrame <- tkframe(rightFrame)
arfimaVariable <- tclVar("FALSE")
arfimaField <- tkentry(arfimaFrame, width="6", textvariable=arfimaVariable)
tkgrid(tklabel(arfimaFrame, text="ARFIMA diff. Enter TRUE for yes ", fg="blue"), arfimaField, sticky="w")
tkgrid(arfimaFrame, sticky="w")
buttonsFrame <- tkframe(top,width=250)
tkgrid(buttonsFrame, columnspan=2, sticky="w")
okButton <- tkbutton(buttonsFrame, text = "OK", command = onOK, anchor = "center", relief="ridge", width = "9")
tkbind(top,"Q",function() tcl(okButton,"invoke"))
tkfocus(okButton)
tkconfigure(okButton,foreground="red",font=tkfont.create(size=9,weight="bold"))
tkpack(okButton, side = "left",fill = "x",ipady=2)
quitCMD <- function(){
tkdestroy(top)
}
quitButton<-tkbutton(buttonsFrame, text = "Quit", command = quitCMD, anchor = "center",relief="ridge",width = "9")
tkconfigure(quitButton,foreground="red",font=tkfont.create(size=9,weight="bold"))
tkpack(quitButton, side = "left",fill = "x",ipady=2)
tkfocus(top)
} |
setClass(
Class = "KingOfTheFields"
)
setClass(
Class = "KingOfTheTown"
)
setClass(
Class = "KingOfTheEarth",
contains = c("KingOfTheTown", "KingOfTheFields")
)
setClass(
Class = "KingOfTheSky",
contains = c("KingOfTheEarth")
) |
library(testthat)
library(prodlim)
library(data.table)
context("Prodlim")
test_that("competing risk in case of only one event",{
set.seed(10)
d <- SimSurv(10)
setDT(d)
d[,event:=factor(event,levels=c(0,1),labels=c("0","2"))]
f <- prodlim(Hist(time,event)~X1,data=d)
predict(f,cause="2",times=4,newdata=data.frame(X1=1))
expect_error(predict(f,cause="1",times=4,newdata=data.frame(X1=1)))
set.seed(10)
dd <- SimCompRisk(20)
F <- prodlim(Hist(time,event)~X1,data=dd)
predict(F,cause="1",times=4,newdata=data.frame(X1=0:1))
expect_equal(lapply(predict(F,cause=2,times=4,newdata=data.frame(X1=0:1)),round,4),list(`X1=0`=0.0714,`X1=1`=0))
expect_error(predict(F,cause=3,times=4,newdata=data.frame(X1=0:1)))
expect_error(summary(F,cause=3))
expect_error(plot(F,cause=3))
})
test_that("strata",{
d <- data.frame(time=1:3,status=c(1,0,1),a=c(1,9,9),b=factor(c(0,1,0)))
expect_output(print(prodlim(Hist(time,status)~b+factor(a),data=d)))
})
test_that("prodlim",{
library(lava)
library(riskRegression)
library(etm)
m <- crModel()
addvar(m) <- ~X1+X2+X3+X4+X5+X6
distribution(m,"X3") <- binomial.lvm()
distribution(m,"X4") <- normal.lvm(mean=50,sd=10)
distribution(m,"eventtime1") <- coxWeibull.lvm(scale=1/200)
distribution(m,"censtime") <- coxWeibull.lvm(scale=1/1000)
m <- categorical(m,K=4,eventtime1~X5,beta=c(1,0,0,0),p=c(0.1,0.2,0.3))
m <- categorical(m,K=3,eventtime1~X1,beta=c(2,1,0),p=c(0.3,0.2))
regression(m,to="eventtime1",from=c("X2","X4")) <- c(0.3,0)
regression(m,to="eventtime2",from=c("X2","X4")) <- c(0.6,-0.07)
set.seed(17)
d <- sim(m,200)
d$X1 <- factor(d$X1,levels=c(0,1,2),labels=c("low survival","medium survival","high survival"))
d$X5 <- factor(d$X5,levels=c("0","1","2","3"),labels=c("one","two","three","four"))
d$Event <- factor(d$event,levels=c("0","1","2"),labels=c("0","cause-1","cause-2"))
d$status <- 1*(d$event!=0)
head(d)
s0 <- prodlim(Hist(time,status)~1,data=d)
print(s0)
summary(s0,intervals=TRUE)
stats::predict(s0,times=1:10)
su <- prodlim(Hist(time,status)~1,data=d,subset=d$X1=="medium survival")
print(su)
s1 <- prodlim(Hist(time,status)~X1,data=d)
print(s1)
summary(s1,intervals=TRUE,newdata=data.frame(X1=c("medium survival","high survival","low survival")))
stats::predict(s1,times=0:10,newdata=data.frame(X1=c("medium survival","low survival","high survival")))
s2 <- prodlim(Hist(time,status)~X2,data=d)
print(s2)
summary(s2,intervals=TRUE)
stats::predict(s2,times=0:10,newdata=data.frame(X2=quantile(d$X2)))
s1a <- prodlim(Hist(time,status)~X1+X3,data=d)
print(s1a)
summary(s1a,intervals=TRUE)
stats::predict(s1a,times=0:10,newdata=expand.grid(X1=levels(d$X1),X3=unique(d$X3)))
s3 <- prodlim(Hist(time,status)~X1+X2,data=d)
print(s3)
summary(s3,intervals=TRUE)
stats::predict(s3,times=0:10,newdata=expand.grid(X1=levels(d$X1),X2=c(quantile(d$X2,0.05),median(d$X2))))
f0 <- prodlim(Hist(time,event)~1,data=d)
print(f0)
summary(f0,intervals=TRUE)
stats::predict(f0,times=1:10)
f1 <- prodlim(Hist(time,event)~X1,data=d)
print(f1)
summary(f1,intervals=TRUE,newdata=data.frame(X1=c("medium survival","high survival","low survival")))
stats::predict(f1,times=0:10,newdata=data.frame(X1=c("medium survival","low survival","high survival")))
f2 <- prodlim(Hist(time,event)~X2,data=d)
print(f2)
summary(f2,intervals=TRUE)
stats::predict(f2,times=0:10,newdata=data.frame(X2=quantile(d$X2)))
f1a <- prodlim(Hist(time,event)~X1+X3,data=d)
print(f1a)
summary(f1a,intervals=TRUE)
stats::predict(f1a,times=0:10,newdata=expand.grid(X1=levels(d$X1),X3=unique(d$X3)))
f3 <- prodlim(Hist(time,event)~X1+X2,data=d)
print(f3)
summary(f3,intervals=TRUE)
stats::predict(f3,times=0:10,newdata=expand.grid(X1=levels(d$X1),X2=c(quantile(d$X2,0.05),median(d$X2))))
data(pbc)
prodlim.0 <- prodlim(Hist(time,status!=0)~1,data=pbc)
survfit.0 <- survfit(Surv(time,status!=0)~1,data=pbc)
ttt <- sort(unique(d$time)[d$event==1])
ttt <- ttt[-length(ttt)]
sum0.s <- summary(survfit.0,times=ttt)
testdata <- data.frame(time=c(16.107812,3.657545,1.523978),event=c(0,1,1))
sum0 <- summary(survfit(Surv(time,event)~1,data=testdata),times=sort(testdata$time))
testdata$timeR <- round(testdata$time,1)
sum1 <- summary(survfit(Surv(timeR,event)~1,data=testdata),times=sort(testdata$time))
sum0
sum1
result.survfit <- data.frame(time=sum0.s$time,n.risk=sum0.s$n.risk,n.event=sum0.s$n.event,surv=sum0.s$surv,std.err=sum0.s$std.err,lower=sum0.s$lower,upper=sum0.s$upper)
result.prodlim <- data.frame(summary(prodlim.0,times=ttt)$table[,c("time","n.risk","n.event","n.lost","surv","se.surv","lower","upper")])
cbind(result.survfit[,c("time","n.risk","n.event","surv")],result.prodlim[,c("time","n.risk","n.event","surv")])
a <- round(result.survfit$surv,8)
b <- round(result.prodlim$surv[!is.na(result.prodlim$se.surv)],8)
if (all(a==b)){cat("\nOK\n")}else{cat("\nERROR\n")}
if (all(round(result.survfit$std.err,8)==round(result.prodlim$se.surv[!is.na(result.prodlim$se.surv)],8))){cat("\nOK\n")}else{cat("\nERROR\n")}
pbc <- pbc[order(pbc$time,-pbc$status),]
set.seed(17)
boot <- sample(1:NROW(pbc),size=NROW(pbc),replace=TRUE)
boot.weights <- table(factor(boot,levels=1:NROW(pbc)))
s1 <- prodlim(Hist(time,status>0)~1,data=pbc,caseweights=boot.weights)
s2 <- prodlim(Hist(time,status>0)~1,data=pbc[sort(boot),])
})
test_that("weigths, subset and smoothing",{
d <- SimSurv(100)
f1 <- prodlim(Hist(time,status)~X2,data=d)
f2 <- prodlim(Hist(time,status)~X2,data=d,caseweights=rep(1,100))
expect_equal(f1$surv,f2$surv)
d <- SimSurv(100)
d <- data.frame(d, group = c(rep(1, 70), rep(0,30)))
f1a <- prodlim(Hist(time,status)~X2,data=d, caseweights = rep(1, 100), subset = d$group==1,bandwidth=0.1)
f1b <- prodlim(Hist(time,status)~X2,data=d[d$group==1, ], caseweights = rep(1, 100)[d$group==1], bandwidth=0.1)
f1a$call <- f1b$call
expect_equal(f1a,f1b)
f1 <- prodlim(Hist(time,status)~X1,data=d, subset = d$group==1)
f2 <- prodlim(Hist(time,status)~X1,data=d,caseweights=d$group)
expect_equal(unique(f1$surv),unique(f2$surv))
expect_equal(predict(f1,newdata = d[1, ], times = 5),
predict(f2, newdata = d[1, ], times = 5))
})
test_that("weights and delay",{
library(survival)
library(survey)
library(SmoothHazard)
library(etm)
pbc <- pbc[order(pbc$time,-pbc$status),]
set.seed(17)
pbc$randprob <- abs(rnorm(NROW(pbc)))
dpbc <- svydesign(id=~id, weights=~randprob, strata=NULL, data=pbc)
survey.1<-svykm(Surv(time,status>0)~1, design=dpbc)
prodlim.1 <- prodlim(Hist(time,status>0)~1,data=pbc,caseweights=pbc$randprob)
pbc$entry <- round(pbc$time/5)
survfit.delay <- survfit(Surv(entry,time,status!=0)~1,data=pbc)
prodlim.delay <- prodlim(Hist(time,status!=0,entry=entry)~1,data=pbc)
pbc0 <- pbc
pbc0$entry <- round(pbc0$time/5)
survfit.delay.edema <- survfit(Surv(entry,time,status!=0)~edema,data=pbc0)
prodlim.delay.edema <- prodlim(Hist(time,status!=0,entry=entry)~edema,data=pbc0)
data(abortion)
cif.ab.etm <- etmCIF(Surv(entry, exit, cause != 0) ~ 1,abortion,etype = cause,failcode = 3)
cif.ab.prodlim <- prodlim(Hist(time=exit, event=cause,entry=entry) ~ 1,data=abortion)
plot(cif.ab.etm,lwd=8,col=3)
plot(cif.ab.prodlim,add=TRUE,lwd=4,col=5,cause=3)
data(abortion)
x <- prodlim(Hist(time=exit, event=cause,entry=entry) ~ 1,data=abortion)
x0 <- etmCIF(Surv(entry, exit, cause != 0) ~ 1,abortion,etype = cause)
graphics::par(mfrow=c(2,2))
cif.ab.etm <- etmCIF(Surv(entry, exit, cause != 0) ~ 1,abortion,etype = cause,failcode = 3)
cif.ab.prodlim <- prodlim(Hist(time=exit, event=cause,entry=entry) ~ 1,data=abortion)
data(abortion)
cif.ab.etm <- etmCIF(Surv(entry, exit, cause != 0) ~ group,abortion,etype = cause,failcode = 3)
names(cif.ab.etm[[1]])
head(cbind(cif.ab.etm[[1]]$time,cif.ab.etm[[1]]$n.risk))
cif.ab.prodlim <- prodlim(Hist(time=exit, event=cause,entry=entry) ~ group,data=abortion)
testdata <- data.frame(entry=c(1,5,2,8,5),exit=c(10,6,4,12,33),event=c(0,1,0,1,0))
cif.test.etm <- etmCIF(Surv(entry, exit, event) ~ 1,data=testdata,etype = event,failcode = 1)
cif.test.survival <- survfit(Surv(entry, exit, event) ~ 1,data=testdata)
cif.test.prodlim <- prodlim(Hist(exit,event,entry=entry)~1,data=testdata)
mod <- idmModel(K=10,schedule=0,punctuality=1)
regression(mod,from="X",to="lifetime") <- log(2)
regression(mod,from="X",to="waittime") <- log(2)
regression(mod,from="X",to="illtime") <- log(2)
set.seed(137)
testdata <- round(sim(mod,250),1)
illdata <- testdata[testdata$illstatus==1,]
illdata <- illdata[order(illdata$lifetime,-illdata$seen.exit),]
survfit.delayed.ill <- survfit(Surv(illtime,lifetime,seen.exit)~1,data=illdata)
prodlim.delayed.ill <- prodlim(Hist(lifetime,seen.exit,entry=illtime)~1,data=illdata)
})
test_that("interval censored",{
library(SmoothHazard)
m <- idmModel(scale.illtime=1/70,
shape.illtime=1.8,
scale.lifetime=1/50,
shape.lifetime=0.7,
scale.waittime=1/30,
shape.waittime=0.7)
d <- round(sim(m,6),1)
icens <- prodlim(Hist(time=list(L,R),event=seen.ill)~1,data=d)
})
test_that("left truncation: survival",{
library(prodlim)
library(data.table)
library(survival)
dd <- data.table(entry=c(1,1,56,1,1,225,277,1647,1,1),
time=c(380,46,217,107,223,277,1638,2164,45,40),
status=c(1,0,1,1,0,0,0,1,0,1))
prodlim.delayed <- prodlim(Hist(time,status,entry=entry)~1,data=dd)
data.table(time=prodlim.delayed$time,n.risk=prodlim.delayed$n.risk,n.event=prodlim.delayed$n.event,n.lost=prodlim.delayed$n.lost)
summary(prodlim.delayed,times=c(0,10,56,267,277,1000,2000))
survfit.delayed <- survfit(Surv(entry,time,status)~1,data=dd)
summary(prodlim.delayed,times=c(0,10,40),intervals=TRUE)
summary(survfit.delayed,times=c(0,1,10,40,50))
summary.survfit.delayed <- summary(survfit.delayed,times=c(0,10,56,267,277,1000,2000))
summary.prodlim.delayed <- summary(prodlim.delayed,times=c(0,10,56,267,277,1000,2000),intervals=1)
expect_equal(as.numeric(summary.survfit.delayed$surv),
as.numeric(summary.prodlim.delayed$table[,"surv"]))
}) |
load("EBP/incomedata.RData")
load("EBP/incomedata_woTeruel.RData")
load("EBP/Xoutsamp_AuxVar.RData")
test_that("Does monte_carlo function give benchmark results?", {
suppressWarnings(RNGversion("3.5.0"))
framework <- framework_ebp(income ~ educ1,
Xoutsamp_AuxVar,
"provlab",
incomedata,
"provlab",
4282.081,
custom_indicator = NULL,
na.rm = TRUE,
weights = NULL)
ebp_optpar_bc <- read.csv2("EBP/ebp_optpar_bc.csv", sep = ",",
stringsAsFactors = TRUE)
ebp_shift_bc <- read.csv2("EBP/ebp_shift_bc.csv", sep = ",",
stringsAsFactors = TRUE)
lambda <- as.numeric(as.character(ebp_optpar_bc[,"Optpar"]))
shift <- as.numeric(as.character(ebp_shift_bc))
transformation_par <- data_transformation(fixed = income ~ educ1,
smp_data = framework$smp_data,
transformation = "box.cox",
lambda = lambda
)
mixed_model <- lme(fixed = income~educ1,
data = transformation_par$transformed_data ,
random = as.formula(paste0("~ 1 | as.factor(", framework$smp_domains, ")")),
method = "REML")
est_par <- model_par(mixed_model = mixed_model,
framework = framework
)
gen_par <- gen_model(model_par = est_par,
fixed = income~educ1,
framework = framework
)
set.seed(100)
point <- monte_carlo(transformation = "box.cox",
L = 2,
framework = framework,
lambda = lambda,
shift = shift,
model_par = est_par,
gen_model = gen_par
)
ebp_point_bc <- read.csv2("EBP/ebp_point_bc.csv", sep = ",",
stringsAsFactors = TRUE)
expect_equal(point[,"Quantile_10"],
as.numeric(as.character(ebp_point_bc[,"quant10"])))
expect_equal(point[,"Head_Count"],
as.numeric(as.character(ebp_point_bc[,"hcr"])))
}) |
ICSKATwrapper <- function(left_dmat, right_dmat, initValues, lt, rt, obs_ind, tpos_ind, gMat,
PH=TRUE, nKnots=1, maxIter=3, eps=10^(-6), runOnce = FALSE, returnNull = FALSE) {
xMat <- left_dmat[, 1:(ncol(left_dmat) - nKnots - 2)]
counter <- 0
pass <- FALSE
while (counter < maxIter) {
counter <- counter + 1
if (counter == 1) {
init_beta <- initValues
} else {
init_beta <- stats::runif(n=ncol(left_dmat), min = -1, max = 1)
}
if (PH) {
nullFit <- ICSKAT_fit_null(init_beta=init_beta, lt=lt, rt=rt,
left_dmat=left_dmat, right_dmat=right_dmat,
obs_ind=obs_ind, tpos_ind=tpos_ind, eps=eps, runOnce=runOnce)
} else {
nullFit <- ICSKAT_fit_null_PO(init_beta=init_beta,
lt=lt, rt=rt, left_dmat=left_dmat, right_dmat=right_dmat,
obs_ind=obs_ind, tpos_ind=tpos_ind, eps=eps)
}
if ( (nullFit$err == 1 | nullFit$diff_beta > eps) & runOnce == FALSE) {
next
}
if (PH) {
skatOutput <- ICskat(left_dmat=left_dmat, tpos_ind=tpos_ind, obs_ind=obs_ind,
right_dmat=right_dmat, gMat=gMat, lt=lt, rt=rt,
null_beta=as.numeric(nullFit$beta_fit), Itt=nullFit$Itt)
} else {
skatOutput <- ICskatPO(left_dmat=left_dmat, tpos_ind=tpos_ind, obs_ind=obs_ind,
right_dmat=right_dmat, gMat=gMat, lt=lt, rt=rt,
null_beta=as.numeric(nullFit$beta_fit), Itt=nullFit$Itt)
}
if ( skatOutput$err == 0 | skatOutput$err == 22 | runOnce == TRUE ) {
pass <- TRUE
break
}
}
if (!pass) {
if (nullFit$err == 1 | nullFit$diff_beta > eps) {
skatOutput <- list(p_SKAT=NA, p_burden=NA, complex=NA, err=1, errMsg="Failed null fit")
} else {
a <- 1
}
}
if (returnNull) {
return(list(skatOutput = skatOutput, nullFit = nullFit))
} else {
return(skatOutput)
}
} |
BS.uni.nonpar = function(Y, s, e, N, delta, level = 0){
S = NULL
Dval = NULL
Level = NULL
Parent = NULL
if(e-s <= 2*delta){
return(list(S = S, Dval = Dval, Level = Level, Parent = Parent))
}else{
level = level + 1
parent = matrix(c(s, e), nrow = 2)
a = rep(0, e-s-2*delta+1)
for(t in (s+delta):(e-delta)){
a[t-s-delta+1] = CUSUM.KS(Y, s, e, t, N)
}
best_value = max(a)
best_t = which.max(a) + s + delta - 1
temp1 = BS.uni.nonpar(Y, s, best_t-1, N, delta, level)
temp2 = BS.uni.nonpar(Y, best_t, e, N, delta, level)
S = c(temp1$S, best_t, temp2$S)
Dval = c(temp1$Dval, best_value, temp2$Dval)
Level = c(temp1$Level, level, temp2$Level)
Parent = cbind(temp1$Parent, parent, temp2$Parent)
result = list(S = S, Dval = Dval, Level = Level, Parent = Parent)
class(result) = "BS"
return(result)
}
}
CUSUM.KS = function(Y, s, e, t, N, vector = FALSE){
n_st = sum(N[s:t])
n_se = sum(N[s:e])
n_te = sum(N[(t+1):e])
aux = as.vector(Y[,s:t])
aux = aux[which(is.na(aux)==FALSE)]
temp = ecdf(aux)
vec_y = as.vector(Y[,s:e])
vec_y = vec_y[which(is.na(vec_y)==FALSE)]
Fhat_st = temp(vec_y)
aux = as.vector(Y[,(t+1):e])
aux = aux[which(is.na(aux)==FALSE)]
temp = ecdf(aux)
Fhat_te = temp(vec_y)
if(vector == TRUE){
result = sqrt(n_st * n_te / n_se) * abs(Fhat_te - Fhat_st)
}else{
result = sqrt(n_st * n_te / n_se) * max(abs(Fhat_te - Fhat_st))
}
return(result)
}
WBS.uni.nonpar = function(Y, s, e, Alpha, Beta, N, delta, level = 0){
Alpha_new = pmax(Alpha, s)
Beta_new = pmin(Beta, e)
idx = which(Beta_new - Alpha_new > 2*delta)
Alpha_new = Alpha_new[idx]
Beta_new = Beta_new[idx]
M = length(Alpha_new)
S = NULL
Dval = NULL
Level = NULL
Parent = NULL
if(M == 0){
return(list(S = S, Dval = Dval, Level = Level, Parent = Parent))
}else{
level = level + 1
parent = matrix(c(s, e), nrow = 2)
a = rep(0, M)
b = rep(0, M)
for(m in 1:M){
temp = rep(0, Beta_new[m] - Alpha_new[m] - 2*delta + 1)
for(t in (Alpha_new[m]+delta):(Beta_new[m]-delta)){
temp[t-(Alpha_new[m]+delta)+1] = CUSUM.KS(Y, Alpha_new[m], Beta_new[m], t, N)
}
best_value = max(temp)
best_t = which.max(temp) + Alpha_new[m] + delta - 1
a[m] = best_value
b[m] = best_t
}
m_star = which.max(a)
}
temp1 = WBS.uni.nonpar(Y, s, b[m_star]-1, Alpha, Beta, N, delta, level)
temp2 = WBS.uni.nonpar(Y, b[m_star], e, Alpha, Beta, N, delta, level)
S = c(temp1$S, b[m_star], temp2$S)
Dval = c(temp1$Dval, a[m_star], temp2$Dval)
Level = c(temp1$Level, level, temp2$Level)
Parent = cbind(temp1$Parent, parent, temp2$Parent)
result = list(S = S, Dval = Dval, Level = Level, Parent = Parent)
class(result) = "BS"
return(result)
}
tuneBSuninonpar = function(BS_object, Y, N){
UseMethod("tuneBSuninonpar", BS_object)
}
tuneBSuninonpar.BS = function(BS_object, Y, N){
obs_num = ncol(Y)
Dval = BS_object$Dval
aux = sort(Dval, decreasing = TRUE)
len_tau = 30
tau_grid = rev(aux[1:min(len_tau,length(Dval))]) - 10^{-30}
tau_grid = c(tau_grid, 10)
B_list = c()
for(j in 1:length(tau_grid)){
aux = thresholdBS(BS_object, tau_grid[j])$cpt_hat[,1]
if(length(aux) == 0){
break
}
B_list[[j]] = sort(aux)
}
B_list = unique(B_list)
if(length(B_list) == 0){
return(NULL)
}
if(length(B_list[[1]]) == 0){
return(B_list[[1]])
}
lambda = log(sum(N))/1.5
for(j in 1:(length(B_list))){
B2 = B_list[[j]]
if(j < length(B_list)){
B1 = B_list[[j+1]]
}else if(j == length(B_list)){
B1 = NULL
}
temp = setdiff(B2, B1)
st = -10^15
for(l in 1:length(temp)){
eta = temp[l]
if(length(B1) == 0){
eta1 = 1
eta2 = obs_num
}else if(length(B1) > 0){
for(k in 1:length(B1)){
if(B1[k] > eta){
break
}
}
if(B1[k] > eta){
eta2 = B1[k]
if(k == 1)
eta1 = 1
if(k > 1)
eta1 = B1[k-1] + 1
}
if(B1[k] < eta){
eta1 = B1[k] + 1
eta2 = obs_num
}
}
st_aux = CUSUM.KS(Y, eta1, eta2, eta, N)^2
if(st_aux > st){
st = st_aux
}
}
if(st > lambda){
return(B2)
}
}
return(B1)
} |
nonpar_mstep = function(x, wt, K = 5, lambda0 = 0.5){
nstate = ncol(wt)
emission = list(coef = list(), lambda = numeric(nstate))
lambda = numeric(nstate)
d = ncol(x)
n = nrow(x)
tryCatch(
{
a<-matrix(0,nrow=n,ncol=K^d)
if(object.size(a)>1.8e+9)
warning("The dimension of the data or the degree of the spline is large!
This will result in a very slow progress!")
rm(a)
},
error=function(cond) {
stop("The dimension of the data or the degree of the spline is too large!
There is no enough memory for fitting! Try another emission distribution.")
})
basis = btensor(lapply(1:d, function(i) x[, i]),
df = K, bknots = lapply(1:d,
function(i) c(min(x[, i])-0.01,
max(x[, i])+0.01)))
for(j in 1:nstate){
lambda[j] = lambda0
mloglike_lambda0 = function(beta){
dbeta = beta
for(m in 1:2) dbeta = diff(dbeta)
omega = exp(beta) / sum(exp(beta))
loglike = t(wt[, j]) %*% log(basis %*% omega)-
lambda0/2 * sum(dbeta^2)
return(-loglike)
}
start = runif(K^d)
suppressWarnings(fit <- nlm(mloglike_lambda0, start, hessian = T))
H_lambda0 = -fit$hessian
difference = 1; eps = 1e-6
cntr = 1
beta_hat = list(rep(1, K))
while(difference > eps){
mloglike = function(beta){
dbeta = beta
for(m in 1:2) dbeta = diff(dbeta)
omega = exp(beta) / sum(exp(beta))
inf_index = which(is.infinite(log(basis %*% omega)))
loglike = t(wt[, j]) %*% log(basis %*% omega) -
lambda[j]/2 * sum(dbeta^2)
return(-loglike)
}
start = runif(K^d)
suppressWarnings(fit <- nlm(mloglike, start, hessian = T))
H = -fit$hessian
beta_hat[[cntr+1]] = fit$estimate
df_lambda = tr(ginv(H) %*% H_lambda0)
dbeta = beta_hat[[cntr+1]]
for(m in 1:2) dbeta = diff(dbeta)
lambda[j] = (df_lambda - d)/(sum(dbeta^2))
difference = sum(beta_hat[[cntr+1]] - beta_hat[[cntr]])
cntr = cntr+1
}
emission$coef[[j]] = exp(beta_hat[[cntr]]) / sum(exp(beta_hat[[cntr]]))
emission$lambda[j] = lambda[j]
}
emission
} |
test_that("illegal initializations are rejected", {
expect_silent(NormalDistribution$new(0, 1))
expect_error(NormalDistribution$new("0",1), class="mu_not_numeric")
expect_error(NormalDistribution$new(0,"1"), class="sigma_not_numeric")
})
test_that("distribution name is correct", {
sn <- NormalDistribution$new(0, 1)
expect_identical(sn$distribution(), "N(0,1)")
n <- NormalDistribution$new(42, 1)
expect_identical(n$distribution(), "N(42,1)")
})
test_that("quantile function checks inputs", {
x <- NormalDistribution$new(0, 1)
probs <- c(0.1, 0.2, 0.5)
expect_silent(x$quantile(probs))
probs <- c(0.1, NA, 0.5)
expect_error(x$quantile(probs), class="probs_not_defined")
probs <- c(0.1, "boo", 0.5)
expect_error(x$quantile(probs), class="probs_not_numeric")
probs <- c(0.1, 0.4, 1.5)
expect_error(x$quantile(probs), class="probs_out_of_range")
probs <- c(0.1, 0.2, 0.5)
expect_length(x$quantile(probs),3)
})
test_that("pe, mean, sd and quantiles are returned correctly", {
sn <- NormalDistribution$new(0, 1)
expect_intol(sn$mean(), 0, 0.01)
expect_intol(sn$SD(), 1, 0.01)
probs <- c(0.025, 0.975)
q <- sn$quantile(probs)
expect_intol(q[1], -1.96, 0.05)
expect_intol(q[2], 1.96, 0.05)
})
test_that("random sampling is from a Normal distribution", {
mu <- 0
sigma <- 1
sn <- NormalDistribution$new(mu, sigma)
sn$sample(TRUE)
expect_equal(sn$r(), 0)
n <- 1000
samp <- sapply(1:n, FUN=function(i) {
sn$sample()
rv <- sn$r()
return(rv)
})
expect_length(samp, n)
skip_on_cran()
ht <- ks.test(samp, rnorm(n,mean=mu,sd=sigma))
expect_true(ht$p.value > 0.001)
}) |
zz_format <- function(origin = NULL, usr = NULL) {
usr <- .zz_get_key(usr = usr)
if (is.null(origin) || origin == "") {
endpoint <- zz_config[['format']][[1]]
} else {
endpoint <- paste0(zz_config[['format']][[1]], "/", origin)
}
response <- httr::GET(endpoint,
config = .zz_authenticate(usr = usr),
.zz_user_agent()
)
content <- .zz_parse_response(response = response)
if (!response[['status_code']] %in% c(200, 201)) {
stop(sprintf("Whoops! Zamzar responded with: %s, and a status code of: %d",
content[['errors']][['message']],
response[['status_code']])
)
}
container <- data.frame(target = content[['data']][['name']],
stringsAsFactors = FALSE)
if(length(content[['data']][['name']]) >= 50) {
container <- .zz_do_paging(content = content,
container = container,
endpoint = endpoint,
usr = usr)
}
if (is.null(origin) || origin == "") {
res <- container
} else {
res <- data.frame(target = content[['targets']][['name']],
cost = content[['targets']][['credit_cost']],
stringsAsFactors = FALSE)
}
if (response[['status_code']] %in% c(200, 201)) {
return(res)
} else {
stop(sprintf("Whoops! Zamzar responded with: %s, and status code %d.",
content[['errors']][['message']],
response[['status_code']])
)
}
} |
suppressWarnings(RNGversion("3.5.0"))
set.seed(1, kind = "Mersenne-Twister", normal.kind = "Inversion")
n <- 1000
p <- 10
X <- matrix(rnorm(n * p), nrow = n)
beta <- c(seq(from = 0.1, to = 1, length.out = 5), rep(0, p-5))
y <- rbinom(n, 1, (1 + exp(-X %*% beta))^(-1))
fit <- gds(X, y, family = "binomial")
test_that("gds returns correct object", {
expect_s3_class(fit, "gds")
expect_equal(fit$family, "binomial")
expect_equal(length(fit$beta), 10)
expect_equal(round(fit$beta[[1]], 7), 0.1230598)
expect_equal(round(fit$beta[[3]], 7), 0.5015788)
expect_equal(round(fit$beta[[10]], 7), -0.0454783)
expect_equal(round(fit$intercept, 7), -0.1187412)
expect_equal(fit$num_non_zero, 8)
})
test_that("gds fails when it should", {
expect_error(gds(X))
expect_error(gds(X, lambda = 1:10))
expect_error(gds(X, y, family = "gamma"))
expect_error(gds(list(X), y))
expect_error(gds(X, y, lambda = -1))
})
test_that("S3 methods for gds work", {
expect_output(coef(fit),
regexp = "Non-zero coefficients:")
expect_output(print(fit),
regexp = "Generalized Dantzig Selector with family binomial, with 10 variables fitted with regularization parameter")
expect_s3_class(plot(fit), "ggplot")
})
suppressWarnings(RNGversion("3.5.0"))
set.seed(1, kind = "Mersenne-Twister", normal.kind = "Inversion")
n <- 1000
p <- 50
X <- matrix(rnorm(n * p), nrow = n)
beta <- c(seq(from = 0.1, to = 1, length.out = 5), rep(0, p-5))
y <- X %*% beta + rnorm(n, sd = 0.5)
set.seed(1, kind = "Mersenne-Twister", normal.kind = "Inversion")
fit <- gds(X, y)
set.seed(1, kind = "Mersenne-Twister", normal.kind = "Inversion")
fit2 <- gds(X, y, family = "gaussian")
test_that("default family of gds works",
expect_equal(fit, fit2))
rm(fit2)
test_that("gds returns correct object", {
expect_s3_class(fit, "gds")
expect_equal(fit$family, "gaussian")
expect_equal(length(fit$beta), 50)
expect_equal(round(fit$beta[[1]], 7), 0.1056266)
expect_equal(round(fit$beta[[30]], 7), 0)
expect_equal(fit$num_non_zero, 13)
})
test_that("S3 methods for gds work", {
expect_output(coef(fit),
regexp = "Non-zero coefficients:")
expect_output(coef(fit, all = TRUE), regexp = "Coefficient estimates:")
expect_output(print(fit),
regexp = "Generalized Dantzig Selector with family gaussian")
expect_s3_class(plot(fit), "ggplot")
})
suppressWarnings(RNGversion("3.5.0"))
set.seed(1, kind = "Mersenne-Twister", normal.kind = "Inversion")
n <- 50
p <- 15
X <- matrix(rnorm(n * p), nrow = n)
beta <- c(rep(.2, 5), rep(0, p-5))
y <- rpois(n, exp(X %*% beta))
fit <- gds(X, y, family = "poisson")
test_that("gds returns correct object", {
expect_s3_class(fit, "gds")
expect_equal(fit$family, "poisson")
expect_equal(length(fit$beta), 15)
expect_equal(round(fit$beta[[1]], 7), 0)
expect_equal(round(fit$beta[[3]], 7), 0.3465382)
expect_equal(fit$num_non_zero, 2)
})
test_that("S3 methods for gds work", {
expect_output(coef(fit),
regexp = "Non-zero coefficients:")
expect_output(coef(fit, all = TRUE), regexp = "Coefficient estimates:")
expect_output(print(fit),
regexp = "Generalized Dantzig Selector with family poisson")
expect_s3_class(plot(fit), "ggplot")
}) |
logRankDecSim <- function(randSeq, bias, endp){
stopifnot(is(randSeq, "randSeq"), randSeq@K == 2,
is(bias, "issue"), is(endp, "endpoint"))
biasM <- 1 / getExpectation(randSeq, bias, endp)
followUp <- endp@cenTime - endp@accrualTime
decision <- sapply(1:dim(randSeq@M)[1], function(i) {
timeVar <- rexp(length(biasM[i,]), rate = biasM[i,])
randCenVar <- rexp(length(biasM[i,]), rate = endp@cenRate)
endCenVar <- runif(length(biasM[i,]), min = followUp, max = endp@cenTime )
randVar <- pmin(timeVar, randCenVar, endCenVar)
status <- (randVar == timeVar)*1
if (sum(randSeq@M[i,]) == 0 || sum(randSeq@M[i,]) == length(biasM[i, ])) {
return(FALSE)
} else {
sdf <- survdiff(Surv(randVar, status) ~ randSeq@M[i, ])
p.value <- 1 - pchisq(sdf$chisq, length(sdf$n) - 1)
return(as.numeric(p.value <= bias@alpha))
}
})
decision
}
logRankRejectionProb <- function(randSeq, bias, endp) {
stopifnot(is(randSeq, "randSeq"), randSeq@K == 2,
is(bias, "issue"), is(endp, "endpoint"))
biasM <- 1 / getExpectation(randSeq, bias, endp)
alpha <- bias@alpha
followUp <- endp@cenTime - endp@accrualTime
rej.prob <- sapply(1:dim(randSeq@M)[1], function(i) {
phi <- function(t){ sum( (1-randSeq@M[i,]) * dexp(t, rate = biasM[i,]) ) / sum( dexp(t, rate = biasM[i,]) ) }
pi <- function(t){ sum( (1-randSeq@M[i,]) * (1-pexp(t, rate = biasM[i,])) ) / sum( (1-pexp(t, rate = biasM[i,])) ) }
V <- function(t){ sum( dexp(t, rate = biasM[i,]) ) / randSeq@N *
( 1-pexp(t, rate = endp@cenRate) ) * ( 1-punif(t, min = followUp, max = endp@cenTime) ) }
f1 <- function(t){(phi(t)-pi(t))*V(t)}
f2 <- function(t){pi(t)*(1-pi(t))*V(t)}
up <- endp@cenTime
int1 <- integrate(Vectorize(f1),0,up)$value
int2 <- integrate(Vectorize(f2),0,up)$value
Exp.approx <- int1/sqrt(1/randSeq@N * int2)
qlow <- qnorm( alpha/2 )
qup <- qnorm( 1 - alpha/2 )
rej.prob <- pnorm( qlow, Exp.approx, 1 ) + (1 - pnorm(qup, Exp.approx, 1) )
rej.prob
return(rej.prob)
})
rej.prob
}
|
processLimeSurveyDropouts <- function(lastpage, pagenames = NULL,
relevantPagenames = NULL) {
if ((!requireNamespace("ggplot2", quietly = TRUE)) ||
(!requireNamespace("ggrepel", quietly = TRUE))) {
stop("To process the LimeSurvey dropouts, you need to have both ",
"the {ggplot2} and {ggrepel} packages installed. You can ",
"install them with:\n\n install.packages(",
"c('ggplot2', 'ggrepel'));\n");
}
if (!is.numeric(lastpage)) {
stop("Argument 'lastpage' is not a numeric vector but has class ",
class(lastpage), ". The first nonmissing values are: ",
vecTxtQ(utils::head(stats::complete.cases(lastpage))), ".");
}
res <- list();
res$specificDropout <- data.frame(lastpage = 0:max(lastpage));
if (is.null(pagenames)) pagenames <-
paste('Dropped out at page', seq(from=1, to=max(lastpage + 1)));
if (is.null(relevantPagenames)) relevantPagenames <-
paste('Page', seq(from=1, to=max(lastpage + 1)));
if (length(pagenames) != nrow(res$specificDropout)) {
stop("The vector 'pagenames' must have the same length as the number of pages ",
"in the 'lastpage' vector - but ", length(pagenames), " pagenames were ",
"provided, for ", nrow(res$specificDropout), " lastpages.");
}
totalParticipants <- length(lastpage);
res$specificDropout <- merge(res$specificDropout,
as.data.frame(table(lastpage),
responseName='frequency'),
by='lastpage',
all=TRUE);
res$specificDropout$frequency[is.na(res$specificDropout$frequency)] <- 0;
res$specificDropout <- res$specificDropout[order(as.numeric(res$specificDropout$lastpage)), ];
res$specificDropout$comments <- pagenames;
res$progressiveDropout <- data.frame(frequency = totalParticipants -
utils::head(c(0, utils::tail(cumsum(res$specificDropout$frequency), -1)), -1));
res$progressiveDropout$percentage <- 100 * res$progressiveDropout$frequency /
totalParticipants;
res$progressiveDropout$page <- 1:nrow(res$progressiveDropout);
res$progressiveDropout$prettyPercentage <- paste0(round(res$progressiveDropout$percentage), "%");
res$plots -> list;
res$plots$absoluteDropout <-
ggplot2::ggplot(
res$progressiveDropout,
ggplot2::aes_string(x='page', y='frequency')
) +
ggplot2::geom_point(size=4) +
ggplot2::geom_line(size=1) +
ggplot2::ylab('Number of participants') +
ggplot2::xlab('Page in the questionnaire') +
ggplot2::theme_bw() +
ggrepel::geom_text_repel(ggplot2::aes_string(label='frequency'),
point.padding = ggplot2::unit(1, 'lines'),
min.segment.length = ggplot2::unit(0.05, "lines"),
segment.color="
size=5, nudge_x=1) +
ggplot2::scale_x_continuous(breaks=res$progressiveDropout$page);
res$plots$relativeDropout <-
ggplot2::ggplot(
res$progressiveDropout,
ggplot2::aes_string(x="page", y="percentage")
) +
ggplot2::geom_point(size=4) +
ggplot2::geom_line(size=1) +
ggplot2::ylab('Percentage of participants') +
ggplot2::xlab('Page in the questionnaire') +
ggplot2::theme_bw() +
ggrepel::geom_text_repel(ggplot2::aes_string(label='prettyPercentage'),
point.padding = ggplot2::unit(1, 'lines'),
min.segment.length = ggplot2::unit(0.05, "lines"),
segment.color="
size=5, nudge_x=1) +
ggplot2::scale_x_continuous(breaks=res$progressiveDropout$page);
class(res) <- 'limeSurveyDropouts';
return(res);
} |
computeQuickKrigcov2 <- function(model,integration.points,X.new, precalc.data, F.newdata , c.newdata){
c.xnew.integpoints <- covMat1Mat2(X1=integration.points,X2=X.new, object=model@covariance, nugget.flag=model@[email protected])
cov.std <- c.xnew.integpoints - crossprod(precalc.data$Kinv.c.olddata,c.newdata)
if (is.null(F.newdata))
{kn=cov.std
} else
{ second.member <- t(F.newdata - crossprod(c.newdata,precalc.data$Kinv.F))
cov.F <- precalc.data$first.member%*%second.member
kn <- cov.F+cov.std}
return(kn)
} |
library(testit)
assert('move_leftbrace() works', {
(move_leftbrace(c('abc() {', ' }')) %==% c('abc()', '{', ' }'))
(move_leftbrace(c(' a() {', '}')) %==% c(' a()', ' {', '}'))
(move_leftbrace(rep(c(' a() {', '}'), 5)) %==% rep(c(' a()', ' {', '}'), 5))
(move_leftbrace(c('a', '', 'b')) %==% c('a', '', 'b'))
(move_leftbrace(c('if (TRUE) {', ' if (FALSE) {', ' 1', ' }', '}')) %==%
c('if (TRUE)', '{', ' if (FALSE)', ' {', ' 1', ' }', '}'))
(move_leftbrace(c('if (TRUE) {', ' 1', '} else {', ' 2}')) %==%
c('if (TRUE)', '{', ' 1', '} else', '{', ' 2}'))
})
assert('reindent_lines() works', {
(reindent_lines('') %==% '')
(reindent_lines(c('', '')) %==% c('', ''))
(reindent_lines(' ', n = 2) %==% ' ')
(reindent_lines(c('if (TRUE) {', ' 1', '}'), n = 2) %==% c('if (TRUE) {', ' 1', '}'))
}) |
source("functions.R")
for (dir in appdirs()) {
snapshotPath <- file.path(dir, "R.out.save")
if (upToDate(dir, "R.out.save"))
next
cat("Snapshotting", dir, "\n")
res <- executeApp(dir)
writeLines(res, snapshotPath)
}
invisible() |
aseq.Run <- function(bam.files,aseq.path,genotype.dir,out.dir,mbq,mrq,mdc,model.path,cores,bam.chr.encoding)
{
tryCatch(
{
model = snpgdsOpen(model.path,readonly = F)
snp.list = snpgdsSNPList(model)
vcf = cbind(snp.list$chromosome,pos=snp.list$position,snp.list$snp.id,
as.character(read.gdsn(index.gdsn(model,"snp.ref"))),
as.character(read.gdsn(index.gdsn(model,"snp.alt"))),".",".",".")
colnames(vcf)= c("CHR","POS","ID","REF","ALT","QUAL","FILTER","INFO")
if(bam.chr.encoding)
vcf[,1] = paste("chr",vcf[,1],sep="")
write.table(vcf,file.path(out.dir,"ModelPositions.vcf"),sep="\t",quote=F,row.names=F)
snpgdsClose(model)
if(get.OS()=="linux")
{
aseq.exec = file.path(aseq.path,"ASEQ")
if(!file.exists(aseq.exec))
{
download.file("https://github.com/cibiobcg/EthSEQ_Data/raw/master/ASEQ_binaries/linux64/ASEQ",file.path(aseq.path,"ASEQ"))
Sys.chmod(aseq.exec, mode = "0755", use_umask = TRUE)
}
for (b in bam.files)
{
message.Date(paste("Computing pileup of BAM file ",b,sep=""))
command = paste(aseq.exec," vcf=",file.path(out.dir,"ModelPositions.vcf")," bam=",b," mode=GENOTYPE threads=",cores," htperc=0.2 mbq=",mbq,
" mrq=",mrq," mdc=",mdc," out=",genotype.dir,sep="")
system(command,ignore.stderr = T,ignore.stdout = T)
}
}
if(get.OS()=="osx")
{
aseq.exec = file.path(aseq.path,"ASEQ")
if(!file.exists(aseq.exec))
{
download.file("https://github.com/cibiobcg/EthSEQ_Data/raw/master/ASEQ_binaries/macosx/ASEQ",file.path(aseq.path,"ASEQ"))
Sys.chmod(aseq.exec, mode = "0755", use_umask = TRUE)
}
for (b in bam.files)
{
command = paste(aseq.exec," vcf=",file.path(out.dir,"ModelPositions.vcf")," bam=",b," mode=GENOTYPE threads=",cores," htperc=0.2 mbq=",mbq,
" mrq=",mrq," mdc=",mdc," out=",genotype.dir,sep="")
system(command,ignore.stderr = T,ignore.stdout = T)
}
}
if(get.OS()=="windows")
{
aseq.exec = file.path(aseq.path,"ASEQ.exe")
if(!file.exists(aseq.exec))
{
download.file("https://github.com/cibiobcg/EthSEQ_Data/raw/master/ASEQ_binaries/win32/ASEQ.exe",file.path(aseq.path,"ASEQ.exe"))
}
for (b in bam.files)
{
command = paste(aseq.exec," vcf=",file.path(out.dir,"ModelPositions.vcf")," bam=",b," mode=GENOTYPE threads=",cores," htperc=0.2 mbq=",mbq,
" mrq=",mrq," mdc=",mdc," out=",genotype.dir,sep="")
system(command,ignore.stderr = T,ignore.stdout = T)
}
}
}, error = function(e) {
message.Date(e)
return(FALSE)
})
return(TRUE)
} |
"print.psych" <-
function(x,digits=2,all=FALSE,cut=NULL,sort=FALSE,short=TRUE,lower=TRUE,signif=NULL,...) {
if(length(class(x)) > 1) { value <- class(x)[2] } else {
if((!is.null(x$communality.iterations)) | (!is.null(x$uniquenesses)) | (!is.null(x$rotmat)) | (!is.null(x$Th)) ) {value <- fa }
}
if(all) value <- "all"
if(value == "score.items") value <- "scores"
if(value =="set.cor") value <- "setCor"
switch(value,
esem = {print.psych.esem(x,digits=digits,short=short,cut=cut,...)},
extension = { print.psych.fa(x,digits=digits,all=all,cut=cut,sort=sort,...)},
extend = {print.psych.fa(x,digits=digits,all=all,cut=cut,sort=sort,...)},
fa = {print.psych.fa(x,digits=digits,all=all,cut=cut,sort=sort,...)},
fa.ci = { print.psych.fa.ci(x,digits=digits,all=all,... )},
iclust= { print.psych.iclust(x,digits=digits,all=all,cut=cut,sort=sort,...)},
omega = { print.psych.omega(x,digits=digits,all=all,cut=cut,sort=sort,...)},
omegaSem= {print.psych.omegaSem(x,digits=digits,all=all,cut=cut,sort=sort,...)},
principal ={print.psych.fa(x,digits=digits,all=all,cut=cut,sort=sort,...)},
schmid = { print.psych.schmid(x,digits=digits,all=all,cut=cut,sort=sort,...)},
stats = { print.psych.stats(x,digits=digits,all=all,cut=cut,sort=sort,...)},
vss= { print.psych.vss(x,digits=digits,all=all,cut=cut,sort=sort,...)},
cta = {print.psych.cta(x,digits=digits,all=all,...)},
mediate = {print.psych.mediate(x,digits=digits,short=short,...)},
multilevel = {print.psych.multilevel(x,digits=digits,short=short,...)},
testRetest = {print.psych.testRetest(x,digits=digits,short=short,...)},
bestScales = {print.psych.bestScales(x,digits=digits,short=short,...)},
all= {class(x) <- "list"
print(x,digits=digits) },
alpha = {
cat("\nReliability analysis ",x$title," \n")
cat("Call: ")
print(x$call)
cat("\n ")
print(x$total,digits=digits)
if(!is.null(x$total$ase)){ cat("\n lower alpha upper 95% confidence boundaries\n")
cat(round(c(x$total$raw_alpha - 1.96* x$total$ase, x$total$raw_alpha,x$total$raw_alpha +1.96* x$total$ase),digits=digits) ,"\n")}
if(!is.null(x$boot.ci)) {cat("\n lower median upper bootstrapped confidence intervals\n",round(x$boot.ci,digits=digits))}
cat("\n Reliability if an item is dropped:\n")
print(x$alpha.drop,digits=digits)
cat("\n Item statistics \n")
print(x$item.stats,digits=digits)
if(!is.null(x$response.freq)) {
cat("\nNon missing response frequency for each item\n")
print(round(x$response.freq,digits=digits))}
},
autoR = {cat("\nAutocorrelations \n")
if(!is.null(x$Call)) {cat("Call: ")
print(x$Call)}
print(round(x$autoR,digits=digits))
},
bassAck = {
cat("\nCall: ")
print(x$Call)
nf <- length(x$bass.ack)-1
for (f in 1:nf) {
cat("\n",f,
x$sumnames[[f]])}
if(!short) {
for (f in 1:nf) {
cat("\nFactor correlations\n ")
print(round(x$bass.ack[[f]],digits=digits))}
} else {cat("\nUse print with the short = FALSE option to see the correlations, or use the summary command.")}
},
auc = {cat('Decision Theory and Area under the Curve\n')
cat('\nThe original data implied the following 2 x 2 table\n')
print(x$probabilities,digits=digits)
cat('\nConditional probabilities of \n')
print(x$conditional,digits=digits)
cat('\nAccuracy = ',round(x$Accuracy,digits=digits),' Sensitivity = ',round(x$Sensitivity,digits=digits), ' Specificity = ',round(x$Specificity,digits=digits), '\nwith Area Under the Curve = ', round(x$AUC,digits=digits) )
cat('\nd.prime = ',round(x$d.prime,digits=digits), ' Criterion = ',round(x$criterion,digits=digits), ' Beta = ', round(x$beta,digits=digits))
cat('\nObserved Phi correlation = ',round(x$phi,digits=digits), '\nInferred latent (tetrachoric) correlation = ',round(x$tetrachoric,digits=digits))
},
bestScales = {if(!is.null(x$first.result)) {
cat("\nCall = ")
print(x$Call)
print(x$summary,digits=digits)
items <- x$items
size <- NCOL(items[[1]])
nvar <- length(items)
for(i in 1:nvar) {
if(NCOL(items[[i]]) > 3) {items[[i]] <- items[[i]][,-1]}
if(length( items[[i]][1]) > 0 ) {
items[[i]][,c("mean.r","sd.r")] <- round(items[[i]][,c("mean.r","sd.r")],digits)
}}
cat("\n Best items on each scale with counts of replications\n")
print(items)} else {
df <- data.frame(correlation=x$r,n.items = x$n.items)
cat("The items most correlated with the criteria yield r's of \n")
print(round(df,digits=digits))
if(length(x$value) > 0) {cat("\nThe best items, their correlations and content are \n")
print(x$value) } else {cat("\nThe best items and their correlations are \n")
for(i in 1:length(x$short.key)) {print(round(x$short.key[[i]],digits=digits))}
}
}
},
bifactor = {
cat("Call: ")
print(x$Call)
cat("Alpha: ",round(x$alpha,digits),"\n")
cat("G.6: ",round(x$G6,digits),"\n")
cat("Omega Hierarchical: " ,round(x$omega_h,digits),"\n")
cat("Omega Total " ,round(x$omega.tot,digits),"\n")
print(x$f,digits=digits,sort=sort)
},
circ = {cat("Tests of circumplex structure \n")
cat("Call:")
print(x$Call)
res <- data.frame(x[1:4])
print(res,digits=2)
},
circadian = {if(!is.null(x$Call)) {cat("Call: ")
print(x$Call)}
cat("\nCircadian Statistics :\n")
if(!is.null(x$F)) {
cat("\nCircadian F test comparing groups :\n")
print(round(x$F,digits))
if(short) cat("\n To see the pooled and group statistics, print with the short=FALSE option")
}
if(!is.null(x$pooled) && !short) { cat("\nThe pooled circadian statistics :\n")
print( x$pooled)}
if(!is.null(x$bygroup) && !short) {cat("\nThe circadian statistics by group:\n")
print(x$bygroup)}
if(!is.null(x$phase.rel)) {
cat("\nSplit half reliabilities are split half correlations adjusted for test length\n")
x.df <- data.frame(phase=x$phase.rel,fits=x$fit.rel)
print(round(x.df,digits)) }
if(is.data.frame(x)) {class(x) <- "data.frame"
print(round(x,digits=digits)) }
},
cluster.cor = {
cat("Call: ")
print(x$Call)
cat("\n(Standardized) Alpha:\n")
print(x$alpha,digits)
cat("\n(Standardized) G6*:\n")
print(x$G6,digits)
cat("\nAverage item correlation:\n")
print(x$av.r,digits)
cat("\nNumber of items:\n")
print(x$size)
cat("\nSignal to Noise ratio based upon average r and n \n")
print(x$sn,digits=digits)
cat("\nScale intercorrelations corrected for attenuation \n raw correlations below the diagonal, alpha on the diagonal \n corrected correlations above the diagonal:\n")
print(x$corrected,digits)
},
cluster.loadings = {
cat("Call: ")
print(x$Call)
cat("\n(Standardized) Alpha:\n")
print(x$alpha,digits)
cat("\n(Standardized) G6*:\n")
print(x$G6,digits)
cat("\nAverage item correlation:\n")
print(x$av.r,digits)
cat("\nNumber of items:\n")
print(x$size)
cat("\nScale intercorrelations corrected for attenuation \n raw correlations below the diagonal, alpha on the diagonal \n corrected correlations above the diagonal:\n")
print(x$corrected,digits)
cat("\nItem by scale intercorrelations\n corrected for item overlap and scale reliability\n")
print(x$loadings,digits)
},
cohen.d = {cat("Call: ")
print(x$Call)
cat("Cohen d statistic of difference between two means\n")
if(NCOL(x$cohen.d) == 3) {print(round(x$cohen.d,digits=digits))} else {print( data.frame(round(x$cohen.d[1:3],digits=digits),x$cohen.d[4:NCOL(x$cohen.d)]))}
cat("\nMultivariate (Mahalanobis) distance between groups\n")
print(x$M.dist,digits=digits)
cat("r equivalent of difference between two means\n")
print(round(x$r,digits=digits))
},
cohen.d.by = {cat("Call: ")
print(x$Call)
ncases <- length(x)
for (i in (1:ncases)) {cat("\n Group levels = ",names(x[i]),"\n")
cat("Cohen d statistic of difference between two means\n")
print(x[[i]]$cohen.d,digits=digits)
cat("\nMultivariate (Mahalanobis) distance between groups\n")
print(x[[i]]$M.dist,digits=digits)
cat("r equivalent of difference between two means\n")
print(x[[i]]$r,digits=digits)
}
cat("\nUse summary for more compact output")
},
comorbid = {cat("Call: ")
print(x$Call)
cat("Comorbidity table \n")
print(x$twobytwo,digits=digits)
cat("\nimplies phi = ",round(x$phi,digits), " with Yule = ", round(x$Yule,digits), " and tetrachoric correlation of ", round(x$tetra$rho,digits))
cat("\nand normal thresholds of ",round(-x$tetra$tau,digits))
},
corCi = {
cat("\n Correlations and normal theory confidence intervals \n")
print(round(x$r.ci,digits=digits))
},
cor.ci = {cat("Call:")
print(x$Call)
cat("\n Coefficients and bootstrapped confidence intervals \n")
lowerMat(x$rho)
phis <- x$rho[lower.tri(x$rho)]
cci <- data.frame(lower.emp =x$ci$low.e, lower.norm=x$ci$lower,estimate =phis ,upper.norm= x$ci$upper, upper.emp=x$ci$up.e,p = x$ci$p)
rownames(cci) <- rownames(x$ci)
cat("\n scale correlations and bootstrapped confidence intervals \n")
print(round(cci,digits=digits))
},
cor.cip = {class(x) <- NULL
cat("\n High and low confidence intervals \n")
print(round(x,digits=digits))
},
corr.test = {cat("Call:")
print(x$Call)
cat("Correlation matrix \n")
print(round(x$r,digits))
cat("Sample Size \n")
print(x$n)
if(x$sym) {cat("Probability values (Entries above the diagonal are adjusted for multiple tests.) \n")} else {
if (x$adjust != "none" ) {cat("These are the unadjusted probability values.\n The probability values adjusted for multiple tests are in the p.adj object. \n")}}
print(round(x$p,digits))
if(short) cat("\n To see confidence intervals of the correlations, print with the short=FALSE option\n")
if(!short) {cat("\n Confidence intervals based upon normal theory. To get bootstrapped values, try cor.ci\n")
if(is.null(x$ci.adj)) { ci.df <- data.frame(raw=x$ci) } else {
ci.df <- data.frame(raw=x$ci,lower.adj = x$ci.adj$lower.adj,upper.adj=x$ci.adj$upper.adj)}
print(round(ci.df,digits)) }
},
corr.p = {cat("Call:")
print(x$Call)
cat("Correlation matrix \n")
print(round(x$r,digits))
cat("Sample Size \n")
print(x$n)
if(x$sym) {cat("Probability values (Entries above the diagonal are adjusted for multiple tests.) \n")} else {
if (x$adjust != "none" ) {cat("These are the unadjusted probability values. \n To see the values adjusted for multiple tests see the p.adj object. \n")}}
print(round(x$p,digits))
if(short) cat("\n To see confidence intervals of the correlations, print with the short=FALSE option\n")
if(!short) {cat("\n Confidence intervals based upon normal theory. To get bootstrapped values, try cor.ci\n")
print(round(x$ci,digits)) }
},
cortest= {cat("Tests of correlation matrices \n")
cat("Call:")
print(x$Call)
cat(" Chi Square value" ,round(x$chi,digits)," with df = ",x$df, " with probability <", signif(x$p,digits),"\n" )
if(!is.null(x$z)) cat("z of differences = ",round(x$z,digits),"\n")
},
cor.wt = {cat("Weighted Correlations \n")
cat("Call:")
print(x$Call)
lowerMat(x$r,digits=digits) },
crossV = {cat("Cross Validation\n")
cat("Call:")
print(x$Call)
cat("\nValidities from raw items and from the correlation matrix\n")
cat("Number of unique predictors used = ",x$nvars,"\n")
print(x$crossV,digits=digits)
cat("\nCorrelations based upon item based regressions \n")
lowerMat(x$item.R)
cat("\nCorrelations based upon correlation matrix based regressions\n")
lowerMat(x$mat.R)
},
describe= {if(!is.null(x$signif)) {
if( missing(signif) ) signif <-x$signif
x$signif <- NULL }
if (length(dim(x))==1) {class(x) <- "list"
attr(x,"call") <- NULL
if(!missing(signif)) x <- signifNum(x,digits=signif)
print(round(x,digits=digits))
} else {class(x) <- "data.frame"
if(!missing(signif)) x <- signifNum(x,digits=signif)
print(round(x,digits=digits)) }
},
describeBy = {cat("\n Descriptive statistics by group \n")
if(!is.null(x$Call)){ cat("Call: " )
print(x$Call) }
class(x) <- "by"
print(x,digits=digits)
},
describeData = {if (length(dim(x))==1) {class(x) <- "list"
attr(x,"call") <- NULL
print(round(x,digits=digits))
} else {
cat('n.obs = ', x$n.obs, "of which ", x$complete.cases," are complete cases. Number of variables = ",x$nvar," of which all are numeric ",x$all.numeric," \n")
print(x$variables) }
},
describeFast = { cat("\n Number of observations = " , x$n.obs, "of which ", x$complete.cases," are complete cases. Number of variables = ",x$nvar," of which ",x$numeric," are numeric and ",x$factors," are factors \n")
if(!short) {print(x$result.df) } else {cat("\n To list the items and their counts, print with short = FALSE") }
},
direct = { cat("Call: ")
print(x$Call)
cat("\nDirect Schmid Leiman = \n")
print(x$direct,cut=cut)
} ,
faBy = { cat("Call: ")
print(x$Call)
cat("\n Factor analysis by Groups\n")
cat("\nAverage standardized loadings (pattern matrix) based upon correlation matrix for all cases as well as each group\n")
cat("\nlow and high ", x$quant,"% quantiles\n")
print(x$faby.sum,digits)
if(!short) {
cat("\n Pooled loadings across groups \n")
print(x$mean.loading,digits=digits)
cat("\n Average factor intercorrelations for all cases and each group\n")
print(x$mean.Phi,digits=2)
cat("\nStandardized loadings (pattern matrix) based upon correlation matrix for all cases as well as each group\n")
print(x$loadings,digits=digits)
cat("\n With factor intercorrelations for all cases and for each group\n")
print(x$Phi,digits=2)
if(!is.null(x$fa)) {
cat("\nFactor analysis results for each group\n")
print(x$fa,digits)
} else {print("For a more informative output, print with short=FALSE")}}
},
faCor = { cat("Call: ")
print(x$Call)
if(!short) { cat("\n Factor Summary for first solution\n")
summary(x$f1)
cat("\n Factor Summary for second solution\n")
summary(x$f2)
}
cat("\n Factor correlations between the two solutions\n")
print(x$r,digits=digits)
cat("\n Factor congruence between the two solutions\n")
print(x$congruence,digits=digits)
},
guttman = {
cat("Call: ")
print(x$Call)
cat("\nAlternative estimates of reliability\n")
cat("\nGuttman bounds \nL1 = ",round(x$lambda.1,digits), "\nL2 = ", round(x$lambda.2,digits), "\nL3 (alpha) = ", round(x$lambda.3,digits),"\nL4 (max) = " ,round(x$lambda.4,digits), "\nL5 = ", round(x$lambda.5,digits), "\nL6 (smc) = " ,round(x$lambda.6,digits), "\n")
cat("TenBerge bounds \nmu0 = ",round(x$tenberge$mu0,digits), "mu1 = ", round(x$tenberge$mu1,digits), "mu2 = " ,round(x$tenberge$mu2,digits), "mu3 = ",round(x$tenberge$mu3,digits) , "\n")
cat("\nalpha of first PC = ",round( x$alpha.pc,digits), "\nestimated greatest lower bound based upon communalities= ", round(x$glb,digits),"\n")
cat("\nbeta found by splitHalf = ", round(x$beta,digits),"\n")
} ,
ICC = {cat("Call: ")
print(x$Call)
cat("\nIntraclass correlation coefficients \n")
print(x$results,digits=digits)
cat("\n Number of subjects =", x$n.obs, " Number of Judges = ",x$n.judge)
cat("\nSee the help file for a discussion of the other 4 McGraw and Wong estimates,")
},
iclust.sort = {
nvar <- ncol(x$sort)
x$sort[4:nvar] <- round(x$sort[4:nvar],digits)
print(x$sort)
},
irt.fa = {
cat("Item Response Analysis using Factor Analysis \n")
cat("\nCall: ")
print(x$Call)
if (!is.null(x$plot)) print(x$plot)
if(!short) {
nf <- length(x$irt$difficulty)
for(i in 1:nf) {temp <- data.frame(discrimination=x$irt$discrimination[,i],location=x$irt$difficulty[[i]])
cat("\nItem discrimination and location for factor ",colnames(x$irt$discrimination)[i],"\n")
print(round(temp,digits))}
cat("\n These parameters were based on the following factor analysis\n")
print(x$fa)
} else {summary(x$fa)}
},
irt.poly = {
cat("Item Response Analysis using Factor Analysis \n")
cat("\nCall: ")
print(x$Call)
if (!is.null(x$plot)) print(x$plot)
if(!short) {
nf <- length(x$irt$difficulty)
for(i in 1:nf) {temp <- data.frame(discrimination=x$irt$discrimination[,i],location=x$irt$difficulty[[i]])
cat("\nItem discrimination and location for factor ",colnames(x$irt$discrimination)[i],"\n")
print(round(temp,digits))}
cat("\n These parameters were based on the following factor analysis\n")
print(x$fa)
} else {summary(x$fa) }
},
kappa = {if(is.null(x$cohen.kappa)) {
cat("Call: ")
print(x$Call)
cat("\nCohen Kappa and Weighted Kappa correlation coefficients and confidence boundaries \n")
print(x$confid,digits=digits)
cat("\n Number of subjects =", x$n.obs,"\n")} else {
cat("\nCohen Kappa (below the diagonal) and Weighted Kappa (above the diagonal) \nFor confidence intervals and detail print with all=TRUE\n")
print(x$cohen.kappa,digits=digits)
if(!is.null(x$av.kappa)) cat("\nAverage Cohen kappa for all raters ", round(x$av.kappa,digits=digits))
if(!is.null(x$av.wt)) cat("\nAverage weighted kappa for all raters ",round(x$av.wt,digits=digits))
}
},
mardia = {
cat("Call: ")
print(x$Call)
cat("\nMardia tests of multivariate skew and kurtosis\n")
cat("Use describe(x) the to get univariate tests")
cat("\nn.obs =",x$n.obs," num.vars = ",x$n.var,"\n")
cat("b1p = ",round(x$b1p,digits)," skew = ",round(x$skew,digits ), " with probability <= ", signif(x$p.skew,digits))
cat("\n small sample skew = ",round(x$small.skew,digits ), " with probability <= ", signif(x$p.small,digits))
cat("\nb2p = ", round(x$b2p,digits)," kurtosis = ",round(x$kurtosis,digits)," with probability <= ",signif(x$p.kurt,digits ))
},
mchoice = {
cat("Call: ")
print(x$Call)
cat("\n(Unstandardized) Alpha:\n")
print(x$alpha,digits=digits)
cat("\nAverage item correlation:\n")
print(x$av.r,digits=digits)
if(!is.null(x$item.stats)) {
cat("\nitem statistics \n")
print(round(x$item.stats,digits=digits))}
},
mixed= { cat("Call: ")
print(x$Call)
if(is.null(x$rho)) {if(lower) {lowerMat(x,digits=digits)} else {print(x,digits)} } else {
if(lower) {if(length(x$rho)>1) { lowerMat (x$rho,digits=digits)} else {print(x$rho,digits)}}
}},
omegaDirect ={ cat("Call: ")
print(x$Call)
cat("\nOmega from direct Schmid Leiman = ", round(x$omega.g,digits=digits),"\n")
print.psych.fa(x)
eigenvalues <- diag(t(x$loadings) %*% x$loadings)
cat("\nWith eigenvalues of:\n")
print(eigenvalues,digits=2)
cat("The degrees of freedom for the model is",x$orth.f$dof," and the fit was ",round(x$orth.f$objective,digits),"\n")
if(!is.na(x$orth.f$n.obs)) {cat("The number of observations was ",x$orth.f$n.obs, " with Chi Square = ",round(x$orth.f$STATISTIC,digits), " with prob < ", round(x$orth.f$PVAL,digits),"\n")}
if(!is.null(x$orth.f$rms)) {cat("\nThe root mean square of the residuals is ", round(x$orth.f$rms,digits),"\n") }
if(!is.null(x$orth.f$crms)) {cat("The df corrected root mean square of the residuals is ", round(x$orth.f$crms,digits),"\n") }
if(!is.null(x$orth.f$RMSEA)) {cat("\nRMSEA and the ",x$orth.f$RMSEA[4] ,"confidence intervals are ",round(x$orth.f$RMSEA[1:3],digits+1)) }
if(!is.null(x$orth.f$BIC)) {cat("\nBIC = ",round(x$orth.f$BIC,digits))}
cat("\n Total, General and Subset omega for each subset\n")
colnames(x$om.group) <- c("Omega total for total scores and subscales","Omega general for total scores and subscales ", "Omega group for total scores and subscales")
print(round(t(x$om.group),digits))},
paired.r = {cat("Call: ")
print(x$Call)
print(x$test)
if(is.null(x$z)) {cat("t =",round(x$t,digits))
} else {cat("z =",round(x$z,digits)) }
cat(" With probability = ",round(x$p,digits))
},
pairwise = {cat("Call: ")
print(x$Call)
cat("\nMean correlations within/between scales\n")
lowerMat(x$av.r)
cat("\nPercentage of complete correlations\n")
lowerMat(x$percent)
cat("\nNumber of complete correlations per scale\n")
lowerMat(x$count)
if(!is.null(x$size)) {cat("\nAverage number of pairwise observations per scale\n")
lowerMat(round(x$size))}
cat("\n Imputed correlations (if found) are in the imputed object")
},
pairwiseCounts = {cat("Call: ")
print(x$Call)
cat("\nOverall descriptive statistics\n")
if(!is.null(x$description)) print(x$description)
cat("\nNumber of item pairs <=", x$cut," = ", dim(x$df)[1])
cat("\nItem numbers with pairs <= ",x$cut, " (row wise)", length(x$rows))
cat("\nItem numbers with pairs <= ",x$cut,"(col wise)", length(x$cols))
cat("\nFor names of the offending items, print with short=FALSE")
if(!short) {cat("\n Items names with pairs < ", x$cut," (row wise)\n", names(x$rows))
cat("\n Items names with pairs <=",x$cut," (col wise)\n", names(x$cols))}
cat("\nFor even more details examine the rows, cols and df report" )
},
parallel= {
cat("Call: ")
print(x$Call)
if(!is.null(x$fa.values) & !is.null(x$pc.values) ) {
parallel.df <- data.frame(fa=x$fa.values,fa.sam =x$fa.simr,fa.sim=x$fa.sim,pc= x$pc.values,pc.sam =x$pc.simr,pc.sim=x$pc.sim)
fa.test <- x$nfact
pc.test <- x$ncomp
cat("Parallel analysis suggests that ")
cat("the number of factors = ",fa.test, " and the number of components = ",pc.test,"\n")
cat("\n Eigen Values of \n")
colnames(parallel.df) <- c("Original factors","Resampled data", "Simulated data","Original components", "Resampled components", "Simulated components")
if(any(is.na(x$fa.sim))) parallel.df <- parallel.df[-c(3,6)]
}
if(is.na(fa.test) ) fa.test <- 0
if(is.na(pc.test)) pc.test <- 0
if(!any(is.na(parallel.df))) {print(round(parallel.df[1:max(fa.test,pc.test),],digits))} else {
if(!is.null(x$fa.values)) {cat("\n eigen values of factors\n")
print(round(x$fa.values,digits))}
if(!is.null(x$fa.sim)){cat("\n eigen values of simulated factors\n")
print(round(x$fa.sim,digits))}
if(!is.null(x$pc.values)){cat("\n eigen values of components \n")
print(round(x$pc.values,digits))}
if(!is.null(x$pc.sim)) {cat("\n eigen values of simulated components\n")
print(round(x$pc.sim,digits=digits))}
}
},
partial.r = {cat("partial correlations \n")
print(round(unclass(x),digits))
},
phi.demo = {print(x$tetrachoric)
cat("\nPearson (phi) below the diagonal, phi2tetras above the diagonal\n")
print(round(x$phis,digits))
cat("\nYule correlations")
print(x$Yule)
},
poly= {cat("Call: ")
print(x$Call)
cat("Polychoric correlations \n")
if(!is.null(x$twobytwo)) {
print(x$twobytwo,digits=digits)
cat("\n implies tetrachoric correlation of ",round(-x$rho,digits))} else {
if(!isSymmetric(x$rho)) lower<- FALSE
if(lower) {lowerMat (x$rho,digits) } else {print(x$rho,digits)}
cat("\n with tau of \n")
print(x$tau,digits)
if(!is.null(x$tauy)) print(x$tauy,digits)
}
},
polydi= {cat("Call: ")
print(x$Call)
cat("Correlations of polytomous with dichotomous\n")
print(x$rho,digits)
cat("\n with tau of \n")
print(x$tau,digits)
},
polyinfo = {cat("Item Response Analysis using Factor Analysis \n")
cat("\n Summary information by factor and item")
names(x$sumInfo ) <- paste("Factor",1:length(x$sumInfo))
for (f in 1:length(x$sumInfo)) {
cat("\n Factor = ",f,"\n")
temp <- x$sumInfo[[f]]
temps <- rowSums(temp)
if(sort) {ord <- order(temps,decreasing=TRUE)
temp <- temp[ord,]
temps <- temps[ord]}
temp <- temp[temps > 0,]
summary <- matrix(c(colSums(temp),sqrt(1/colSums(temp)),1-1/colSums(temp)),nrow=3,byrow=TRUE)
rownames(summary) <-c("Test Info","SEM", "Reliability")
temp <- rbind(temp,summary)
if(ncol(temp) == 61) {print(round(temp[,seq(1,61,10)],digits=digits)) } else {print(round(temp,digits=digits))}
}
if(!short) {
cat("\n Average information (area under the curve) \n")
AUC <-x$AUC
max.info <-x$max.info
if(dim(AUC)[2]==1) {item <- 1:length(AUC) } else {item <- 1:dim(AUC)[1]}
if(sort) {
cluster <- apply(AUC,1,which.max)
ord <- sort(cluster,index.return=TRUE)
AUC <- AUC[ord$ix,,drop=FALSE]
max.info <- max.info[ord$ix,,drop=FALSE]
items <- table(cluster)
first <- 1
for (i in 1:length(items)) {
if(items[i] > 0 ) {
last <- first + items[i]- 1
ord <- sort(abs(AUC[first:last,i]),decreasing=TRUE,index.return=TRUE)
AUC[first:last,] <- AUC[item[ord$ix+first-1],]
max.info[first:last,] <- max.info[item[ord$ix+first-1],]
rownames(AUC)[first:last] <- rownames(max.info)[first:last] <- rownames(AUC)[ord$ix+first-1]
first <- first + items[i] }
}
}
print(AUC,digits=digits)
cat("\nMaximum value is at \n")
print(max.info,digits=digits)
}
},
validity = { cat("Call: ")
print(x$Call)
cat("\nPredicted Asymptotic Scale Validity:\n")
print(x$asymptotic,digits)
cat("\n For predicted scale validities, average item validities, or scale reliabilities, print the separate objects")
},
overlap = {
cat("Call: ")
print(x$Call)
cat("\n(Standardized) Alpha:\n")
print(x$alpha,digits)
cat("\n(Standardized) G6*:\n")
print(x$G6,digits)
cat("\nAverage item correlation:\n")
print(x$av.r,digits)
cat("\nMedian item correlation:\n")
print(x$med.r,digits)
cat("\nNumber of items:\n")
print(x$size)
cat("\nSignal to Noise ratio based upon average r and n \n")
print(x$sn,digits=digits)
cat("\nScale intercorrelations corrected for item overlap and attenuation \n adjusted for overlap correlations below the diagonal, alpha on the diagonal \n corrected correlations above the diagonal:\n")
print(x$corrected,digits)
if(short) {cat("\n In order to see the item by scale loadings and frequency counts of the data\n print with the short option = FALSE") } else {
if(!is.null(x$item.cor) ) {
cat("\nItem by scale correlations:\n corrected for item overlap and scale reliability\n" )
print(round(x$item.cor,digits=digits)) }
}
},
frequency = { cat("Response frequencies (of non-missing items) \n")
print(unclass(x),digits=digits)
},
r.test = {cat("Correlation tests \n")
cat("Call:")
print(x$Call)
cat( x$Test,"\n")
if(!is.null(x$t)) {cat(" t value" ,round(x$t,digits)," with probability <", signif(x$p,digits) )}
if(!is.null(x$z)) {cat(" z value" ,round(x$z,digits)," with probability ", round(x$p,digits) )}
if(!is.null(x$ci)) {cat("\n and confidence interval ",round(x$ci,digits) ) }
},
reliability ={cat("Measures of reliability \n")
if(is.list(x)) {
print(x$Call)
x <- x$result.df}
print(round(unclass(x),digits))
},
residuals = { if(NCOL(x) == NROW(x)) {
if (lower) {lowerMat (x,digits=digits)}} else {print(round(unclass(x),digits))}
},
scree = {
cat("Scree of eigen values \nCall: ")
print(x$Call)
if(!is.null(x$fv)) {cat("Eigen values of factors ")
print(round(x$fv,digits))}
if (!is.null(x$pcv)) {cat("Eigen values of Principal Components")
print(round(x$pcv,digits))}
},
scores = {
cat("Call: ")
print(x$Call)
if(x$raw) {
cat("\n(Unstandardized) Alpha:\n") } else {cat("\n(Standardized) Alpha:\n") }
print(x$alpha,digits=digits)
if(!is.null(x$ase)) {cat("\nStandard errors of unstandardized Alpha:\n")
rownames(x$ase) <- "ASE "
print(x$ase,digit=digits) }
if(!is.null(x$alpha.ob)) {cat("\nStandardized Alpha of observed scales:\n")
print(x$alpha.ob,digits=digits)}
cat("\nAverage item correlation:\n")
print(x$av.r,digits=digits)
cat("\nMedian item correlation:\n")
print(x$med.r,digits=digits)
cat("\n Guttman 6* reliability: \n")
print(x$G6,digits=digits)
cat("\nSignal/Noise based upon av.r : \n")
print(x$sn,digits=digits)
cat("\nScale intercorrelations corrected for attenuation \n raw correlations below the diagonal, alpha on the diagonal \n corrected correlations above the diagonal:\n")
if(!is.null(x$alpha.ob)) {cat("\nNote that these are the correlations of the complete scales based on the correlation matrix,\n not the observed scales based on the raw items.\n")}
print(x$corrected,digits)
if(short) {cat("\n In order to see the item by scale loadings and frequency counts of the data\n print with the short option = FALSE") } else {
if(!is.null(x$item.cor) ) {
cat("\nItem by scale correlations:\n corrected for item overlap and scale reliability\n" )
print(round(x$item.corrected,digits=digits)) }
if(!is.null(x$response.freq)) {
cat("\nNon missing response frequency for each item\n")
print(round(x$response.freq,digits=digits))}
}
},
setCor= { cat("Call: ")
print(x$Call)
if(x$raw) {cat("\nMultiple Regression from raw data \n")} else {
cat("\nMultiple Regression from matrix input \n")}
if(!is.null(x$z)) cat("The following variables were partialed out:", x$z, "\n and are included in the calculation of df1 and df2\n")
ny <- NCOL(x$coefficients)
for(i in 1:ny) {cat("\n DV = ",colnames(x$coefficients)[i], "\n")
if(!is.null(x$se)) {result.df <- data.frame( round(x$coefficients[,i],digits),round(x$se[,i],digits),round(x$t[,i],digits),signif(x$Probability[,i],digits),round(x$ci[,i],digits), round(x$ci[,(i +ny)],digits),round(x$VIF,digits))
colnames(result.df) <- c("slope","se", "t", "p","lower.ci","upper.ci", "VIF")
print(result.df)
cat("\nResidual Standard Error = ",round(x$SE.resid[i],digits), " with ",x$df[2], " degrees of freedom\n")
result.df <- data.frame(R = round(x$R[i],digits), R2 = round(x$R2[i],digits), Ruw = round(x$ruw[i],digits),R2uw = round( x$ruw[i]^2,digits), round(x$shrunkenR2[i],digits),round(x$seR2[i],digits), round(x$F[i],digits),x$df[1],x$df[2], signif(x$probF[i],digits+1))
colnames(result.df) <- c("R","R2", "Ruw", "R2uw","Shrunken R2", "SE of R2", "overall F","df1","df2","p")
cat("\n Multiple Regression\n")
print(result.df)
} else {
result.df <- data.frame( round(x$coefficients[,i],digits),round(x$VIF,digits))
colnames(result.df) <- c("slope", "VIF")
print(result.df)
result.df <- data.frame(R = round(x$R[i],digits), R2 = round(x$R2[i],digits), Ruw = round(x$ruw[i],digits),R2uw = round( x$ruw[i]^2,digits))
colnames(result.df) <- c("R","R2", "Ruw", "R2uw")
cat("\n Multiple Regression\n")
print(result.df)
}
}
if(!is.null(x$cancor)) {
cat("\nVarious estimates of between set correlations\n")
cat("Squared Canonical Correlations \n")
print(x$cancor2,digits=digits)
if(!is.null(x$Chisq)) {cat("Chisq of canonical correlations \n")
print(x$Chisq,digits=digits)}
cat("\n Average squared canonical correlation = ",round(x$T,digits=digits))
cat("\n Cohen's Set Correlation R2 = ",round(x$Rset,digits=digits))
if(!is.null(x$Rset.shrunk)){ cat("\n Shrunken Set Correlation R2 = ",round(x$Rset.shrunk,digits=digits))
cat("\n F and df of Cohen's Set Correlation ",round(c(x$Rset.F,x$Rsetu,x$Rsetv), digits=digits))}
cat("\nUnweighted correlation between the two sets = ",round(x$Ruw,digits))
}
},
sim = { if(is.matrix(x)) {x <-unclass(x)
round(x,digits) } else {
cat("Call: ")
print(x$Call)
cat("\n $model (Population correlation matrix) \n")
print(x$model,digits)
if(!is.null(x$reliability)) { cat("\n$reliability (population reliability) \n")
print(x$reliability,digits) }
if(!is.null(x$N) && !is.null(x$r)) {
cat("\n$r (Sample correlation matrix for sample size = ",x$N,")\n")
print(x$r,digits)}
}
},
smoother = {x <- unclass(x)
print(x)
},
split ={ cat("Split half reliabilities ")
cat("\nCall: ")
print(x$Call)
cat("\nMaximum split half reliability (lambda 4) = ",round(x$maxrb,digits=digits))
cat("\nGuttman lambda 6 = ",round(x$lambda6,digits=digits))
cat("\nAverage split half reliability = ",round(x$meanr,digits=digits))
cat("\nGuttman lambda 3 (alpha) = ",round(x$alpha,digits=digits))
cat("\nGuttman lambda 2 = ", round(x$lambda2,digits=digits))
cat("\nMinimum split half reliability (beta) = ",round(x$minrb,digits=digits))
if(x$covar) { cat("\nAverage interitem covariance = ",round(x$av.r,digits=digits)," with median = ", round(x$med.r,digits=digits))} else { cat("\nAverage interitem r = ",round(x$av.r,digits=digits)," with median = ", round(x$med.r,digits=digits))}
if(!is.na(x$ci[1])) {cat("\n ",names(x$ci))
cat("\n Quantiles of split half reliability = ",round(x$ci,digits=digits))}
},
statsBy ={
cat("Statistics within and between groups ")
cat("\nCall: ")
print(x$Call)
cat("Intraclass Correlation 1 (Percentage of variance due to groups) \n")
print(round(x$ICC1,digits))
cat("Intraclass Correlation 2 (Reliability of group differences) \n")
print(round(x$ICC2,digits))
cat("eta^2 between groups \n")
print(round(x$etabg^2,digits))
if(short) { cat("\nTo see the correlations between and within groups, use the short=FALSE option in your print statement.")}
if(!short) {cat("Correlation between groups \n")
lowerMat(x$rbg)
cat("Correlation within groups \n")
lowerMat(x$rwg)
}
cat("\nMany results are not shown directly. To see specific objects select from the following list:\n",names(x))
},
tau = {cat("Tau values from dichotomous or polytomous data \n")
class(x) <- NULL
print(x,digits)
},
tetra = {cat("Call: ")
print(x$Call)
cat("tetrachoric correlation \n")
if(!is.null(x$twobytwo)) {
print(x$twobytwo,digits=digits)
cat("\n implies tetrachoric correlation of ",round(x$rho,digits))} else {if(length(x$rho)>1) {
if(!isSymmetric(x$rho)) lower <- FALSE} else {lower<- FALSE}
if(is.matrix(x$rho) && lower) {lowerMat (x$rho,digits)} else { print(x$rho,digits)}
cat("\n with tau of \n")
print(x$tau,digits)
if(!is.null(x$tauy)) print(x$tauy,digits)
}
},
thurstone = {
cat("Thurstonian scale (case 5) scale values ")
cat("\nCall: ")
print(x$Call)
print(x$scale)
cat("\n Goodness of fit of model ", round(x$GF,digits))
},
KMO = {cat("Kaiser-Meyer-Olkin factor adequacy")
cat("\nCall: ")
print(x$Call)
cat("Overall MSA = ",round(x$MSA,digits))
cat("\nMSA for each item = \n")
print(round(x$MSAi,digits))
},
unidim= {
cat("\nA measure of unidimensionality \n Call: ")
print(x$Call)
cat("\nUnidimensionality index = \n" )
print(round(x$uni,digits=digits))
cat("\nunidim adjusted index reverses negatively scored items.")
cat("\nalpha "," Based upon reverse scoring some items.")
cat ("\naverage and median correlations are based upon reversed scored items")
},
yule = {cat("Yule and Generalized Yule coefficients")
cat("\nCall: ")
print(x$Call)
cat("\nYule coefficient \n")
print(round(x$rho,digits))
cat("\nUpper and Lower Confidence Intervals = \n")
print(round(x$ci,digits))
},
Yule = {cat("Yule and Generalized Yule coefficients")
cat("\nLower CI Yule coefficient Upper CI \n")
print(round(c(x$lower,x$rho,x$upper),digits))
}
)
} |
.check_cpp_func_error <- function(obj, func_name) {
if (obj[["errmsg"]] != "") {
stop(paste0("Internal cpp function (", func_name, "()) failed: ",
obj[["errmsg"]]), call. = FALSE)
}
}
.get_obj <- function(obj, obj_name) {
if (is.null(obj_name) || is.null(obj) || methods::is(obj, obj_name)) {
obj
} else {
.get_obj(attr(obj, "src"), obj_name)
}
}
.get_obj_arg <- function(obj, obj_name, arg_name) {
if (!is.null(obj_name) && !is.na(obj_name)) {
obj <- .get_obj(obj, obj_name)
}
obj_args <- attr(obj, "args")
if (is.null(obj_args)) {
NULL
} else {
obj_args[[arg_name]]
}
}
.create_src_obj <- function(obj, obj_name, func, scores, labels,
...) {
if (missing(obj)) {
if (!is.null(scores) && !is.null(labels)) {
obj <- func(scores = scores, labels = labels, ...)
} else {
stop("The first argument must be specified.", call. = FALSE)
}
}
obj
}
.get_metric_names <- function(mode) {
if (mode == "rocprc" || mode == "prcroc") {
mnames <- c("ROC", "PRC")
} else if (mode == "basic") {
mnames <- c("score", "label", "error", "accuracy", "specificity",
"sensitivity", "precision", "mcc", "fscore")
}
mnames
}
.load_data_table <- function() {
loaded <- TRUE
if (!requireNamespace("data.table", quietly = TRUE)) {
loaded <- FALSE
}
loaded
}
.get_pn_info <- function(object) {
nps <- attr(object, "data_info")[["np"]]
nns <- attr(object, "data_info")[["nn"]]
is_consistant <- TRUE
prev_np <- NA
prev_nn <- NA
np_tot <- 0
nn_tot <- 0
n <- 0
for (i in seq_along(nps)) {
np <- nps[i]
nn <- nns[i]
if ((!is.na(prev_np) && np != prev_np)
|| (!is.na(prev_nn) && nn != prev_nn)) {
is_consistant <- FALSE
}
np_tot <- np_tot + np
nn_tot <- nn_tot + nn
prev_np <- np
prev_nn <- nn
n <- n + 1
}
avg_np <- np_tot / n
avg_nn <- nn_tot / n
prc_base <- avg_np / (avg_np + avg_nn)
list(avg_np = avg_np, avg_nn = avg_nn, is_consistant = is_consistant,
prc_base = prc_base)
} |
summary.cv.clogitL1 = function(object, ...){
minInd = which(object$lambda == object$minCV_lambda)
minCVBeta = object$beta[minInd,]
minCVNZbeta = object$nz_beta[minInd]
minInd = which(object$lambda == object$minCV1se_lambda)
minCV1seBeta = object$beta[minInd,]
minCV1seNZbeta = object$nz_beta[minInd]
list(lambda_minCV=exp(object$minCV_lambda), beta_minCV=minCVBeta, nz_beta_minCV=minCVNZbeta, lambda_minCV1se=exp(object$minCV1se_lambda), beta_minCV1se=minCV1seBeta, nz_beta_minCV1se=minCV1seNZbeta)
} |
"WeatherTask" |
NULL
stored_account <- R6::R6Class("stored_account", inherit=stored_object,
public=list(
type="storage",
id=NULL,
resourceId=NULL,
activeKeyName=NULL,
autoRegenerateKey=NULL,
regenerationPeriod=NULL,
delete=NULL,
remove=function(confirm=TRUE)
{
if(delete_confirmed(confirm, self$name, "storage"))
invisible(self$do_operation(version=NULL, http_verb="DELETE"))
},
regenerate_key=function(key_name)
{
self$do_operation("regeneratekey", body=list(keyName=key_name), http_verb="POST")
},
create_sas_definition=function(sas_name, sas_template, validity_period, sas_type="account",
enabled=TRUE, recovery_level=NULL, ...)
{
attribs <- list(
enabled=enabled,
recoveryLevel=recovery_level
)
attribs <- attribs[!sapply(attribs, is_empty)]
body <- list(
sasType=sas_type,
templateUri=sas_template,
validityPeriod=validity_period,
attributes=attribs,
tags=list(...)
)
op <- construct_path("sas", sas_name)
self$do_operation(op, body=body, encode="json", http_verb="PUT")
},
delete_sas_definition=function(sas_name, confirm=TRUE)
{
if(delete_confirmed(confirm, sas_name, "SAS definition"))
{
op <- construct_path("sas", sas_name)
invisible(self$do_operation(op, http_verb="DELETE"))
}
},
get_sas_definition=function(sas_name)
{
op <- construct_path("sas", sas_name)
self$do_operation(op)
},
list_sas_definitions=function()
{
get_vault_paged_list(self$do_operation("sas"), self$token)
},
show_sas=function(sas_name)
{
secret_url <- self$get_sas_definition(sas_name)$sid
call_vault_url(self$token, secret_url)$value
},
print=function(...)
{
cat("Key Vault managed storage account '", self$name, "'\n", sep="")
cat(" Account:", basename(self$resourceId), "\n")
invisible(self)
}
)) |
fit_and_compare_bm_models = function( trees1,
tip_states1,
trees2,
tip_states2,
Nbootstraps = 0,
Nsignificance = 0,
check_input = TRUE,
verbose = FALSE,
verbose_prefix = ""){
if(verbose) cat(sprintf("%sFitting BM to first tree set..\n",verbose_prefix))
fit1 = fit_bm_model(trees=trees1, tip_states=tip_states1, Nbootstraps=Nbootstraps, check_input=check_input)
if(!fit1$success) return(list(success=FALSE, error=sprintf("Failed to fit BM to tree set 1: %s",fit1$error)))
if(verbose) cat(sprintf("%sFitting BM to second tree set..\n",verbose_prefix))
fit2 = fit_bm_model(trees=trees2, tip_states=tip_states2, Nbootstraps=Nbootstraps, check_input=check_input)
if(!fit2$success) return(list(success=FALSE, error=sprintf("Failed to fit BM to tree set 2: %s",fit2$error)))
log_difference = abs(log(fit1$diffusivity) - log(fit2$diffusivity))
if(Nsignificance>0){
if(verbose) cat(sprintf("%sCalculating statistical significance of ratio D1/D2..\n",verbose_prefix))
if("phylo" %in% class(trees1)) trees1 = list(trees1)
if("phylo" %in% class(trees2)) trees2 = list(trees2)
Ntrees1 = length(trees1)
Ntrees2 = length(trees2)
if(!("list" %in% class(tip_states1))) tip_states1 = list(tip_states1)
if(!("list" %in% class(tip_states2))) tip_states2 = list(tip_states2)
if(verbose) cat(sprintf("%s Fitting common BM model to both tree sets..\n",verbose_prefix))
fit_common = fit_bm_model(trees=c(trees1,trees2), tip_states=c(tip_states1,tip_states2), Nbootstraps=0, check_input=FALSE)
if(verbose) cat(sprintf("%s Assessing significance over %d BM simulations..\n",verbose_prefix,Nsignificance))
random_tip_states1 = vector(mode="list", Ntrees1)
random_tip_states2 = vector(mode="list", Ntrees2)
Ngreater = 0
Nsuccess = 0
for(r in 1:Nsignificance){
for(tr in 1:Ntrees1){
random_tip_states1[[tr]] = simulate_bm_model(trees1[[tr]], diffusivity=fit_common$diffusivity, include_tips=TRUE, include_nodes=FALSE, drop_dims=TRUE)$tip_states
}
for(tr in 1:Ntrees2){
random_tip_states2[[tr]] = simulate_bm_model(trees2[[tr]], diffusivity=fit_common$diffusivity, include_tips=TRUE, include_nodes=FALSE, drop_dims=TRUE)$tip_states
}
random_fit1 = fit_bm_model(trees=trees1, tip_states=random_tip_states1, Nbootstraps=0, check_input=FALSE)
if(!random_fit1$success){
if(verbose) cat(sprintf("%s WARNING: BM fitting failed for random simulation
next;
}
random_fit2 = fit_bm_model(trees=trees2, tip_states=random_tip_states2, Nbootstraps=0, check_input=FALSE)
if(!random_fit2$success){
if(verbose) cat(sprintf("%s WARNING: BM fitting failed for random simulation
next;
}
Nsuccess = Nsuccess + 1
random_log_difference = abs(log(random_fit1$diffusivity) - log(random_fit2$diffusivity))
Ngreater = Ngreater + (random_log_difference>=log_difference)
}
significance = Ngreater / Nsuccess
}
return(list(success = TRUE,
fit1 = fit1,
fit2 = fit2,
log_difference = log_difference,
significance = (if(Nsignificance>0) significance else NULL),
fit_common = (if(Nsignificance>0) fit_common else NULL)))
} |
"confidenceIntervalsPlot" <-
function(){
initializeDialog(title=gettextRcmdr("Confidence Intervals in Simple Linear Regression"))
variablesFrame <- tkframe(top)
.numeric <- Numeric()
xBox <- variableListBox(variablesFrame, .numeric,
title=gettextRcmdr("Explanatory variables (pick one)"))
yBox <- variableListBox(variablesFrame, .numeric, title=gettextRcmdr("Response variable (pick one)"))
UpdateModelNumber()
modelName <- tclVar(paste("RegModel.", getRcmdr("modelNumber"), sep=""))
modelFrame <- tkframe(top)
model <- tkentry(modelFrame, width="20", textvariable=modelName)
subsetBox()
onOK <- function(){
x <- getSelection(xBox)
y <- getSelection(yBox)
closeDialog()
if (0 == length(y)) {
UpdateModelNumber(-1)
errorCondition(recall=confidenceIntervalsPlot, message=gettextRcmdr("You must select a response variable."))
return()
}
if (0 == length(x)) {
UpdateModelNumber(-1)
errorCondition(recall=confidenceIntervalsPlot, message=gettextRcmdr("No explanatory variables selected."))
return()
}
if (is.element(y, x)) {
UpdateModelNumber(-1)
errorCondition(recall=confidenceIntervalsPlot, message=gettextRcmdr("Response and explanatory variables must be different."))
return()
}
subset <- tclvalue(subsetVariable)
if (trim.blanks(subset) == gettextRcmdr("<all valid cases>") || trim.blanks(subset) == ""){
subset <- ""
putRcmdr("modelWithSubset", FALSE)
}
else{
subset <- paste(", subset=", subset, sep="")
putRcmdr("modelWithSubset", TRUE)
}
modelValue <- trim.blanks(tclvalue(modelName))
if (!is.valid.name(modelValue)){
UpdateModelNumber(-1)
errorCondition(recall=confidenceIntervalsPlot, message=sprintf(gettextRcmdr('"%s" is not a valid name.'), modelValue))
return()
}
if (is.element(modelValue, listLinearModels())) {
if ("no" == tclvalue(checkReplace(modelValue, type=gettextRcmdr("Model")))){
UpdateModelNumber(-1)
confidenceIntervalsPlot()
return()
}
}
command <- paste("lm(", y, "~", paste(x, collapse="+"),
", data=", ActiveDataSet(), subset, ")", sep="")
justDoIt(paste(modelValue, " <- ", command, sep=""))
doItAndPrint(paste("summary(", modelValue, ")", sep=""))
activeModel(modelValue)
doItAndPrint(paste("ci.plot(", modelValue, ")", sep=""))
tkfocus(CommanderWindow())
}
OKCancelHelp(helpSubject="ci.plot", model=TRUE)
tkgrid(tklabel(modelFrame, text=gettextRcmdr("Enter name for model:")), model, sticky="w")
tkgrid(modelFrame, sticky="w")
tkgrid(getFrame(yBox), tklabel(variablesFrame, text=" "), getFrame(xBox), sticky="nw")
tkgrid(variablesFrame, sticky="w")
tkgrid(subsetFrame, sticky="w")
tkgrid(buttonsFrame, stick="w")
tkgrid.configure(helpButton, sticky="e")
dialogSuffix(rows=4, columns=1)
} |
chisq.loglog <- function(st,ot.sq)
{
A <- matrix(log(-log(st[1]))-log(-log(st[2:length(st)])),1,(length(st)-1))
SIGMA <- array(ot.sq[1]/((log(st[1]))^2),c(length(st)-1,length(st)-1))
diag(SIGMA) <- (ot.sq[1]/((log(st[1]))^2)) + (ot.sq[2:length(st)]/((log(st[2:length(st)]))^2))
chisq <- A %*% solve(SIGMA) %*% t(A)
chisq.loglog <- chisq
} |
knitr::opts_chunk$set(
collapse = TRUE,
echo = FALSE,
comment = "
)
use_dt <- FALSE
if(requireNamespace("DT", quietly = TRUE)) use_dt <- TRUE
DT::datatable(nflreadr::dictionary_rosters,
options = list(scrollX = TRUE, pageLength = 25),
filter = "top",
rownames = FALSE
) |
error <-
function(a,b)
{
d <- (var(a) - var(b)) * 100/ var(a)
d <- as.numeric(d)
return(d)
} |
.scapa.uv.class<-setClass("scapa.uv.class",contains="capa.class",representation())
scapa.uv.class<-function(data,beta,beta_tilde,min_seg_len,max_seg_len,max_lag,type,
transform,anomaly_types,anomaly_positions,components,start_lags,end_lags,...)
{
.scapa.uv.class(capa.class(data=data,beta=beta,beta_tilde=beta_tilde,min_seg_len=min_seg_len,max_seg_len=max_seg_len,max_lag=max_lag,type=type,
transform=transform,anomaly_types=anomaly_types,anomaly_positions=anomaly_positions,components=components,start_lags=start_lags,end_lags=end_lags)
,...)
}
setMethod("point_anomalies",signature=list("scapa.uv.class"),
function(object,epoch=nrow(object@data))
{
return(callNextMethod(object,epoch=epoch)[,c(1,3)])
})
setMethod("collective_anomalies",signature=list("scapa.uv.class"),
function(object,epoch=nrow(object@data))
{
return(callNextMethod(object,epoch=epoch)[,c(1:2,6:7)])
})
setMethod("plot",signature=list("scapa.uv.class"),function(x,epoch,variate_name=FALSE)
{
if(missing(epoch))
{
epoch<-nrow(x@data)
}
return(plot(as(x,"capa.class"),epoch=epoch,variate_names=variate_name))
})
scapa.uv<-function(x,beta=NULL,beta_tilde=NULL,type="meanvar",min_seg_len=10,max_seg_len=Inf,transform=tierney)
{
x<-to_array(x)
if(dim(x)[2] > 1)
{
stop("data for univariate analysis must have 1 variate. Use capa or capa.mv for multivariate data.")
}
res<-capa(x=x,beta=beta,beta_tilde=beta_tilde,type=type,min_seg_len=min_seg_len,max_seg_len=max_seg_len,transform=transform)
return(
scapa.uv.class(data=res@data,
beta=res@beta,
beta_tilde=res@beta_tilde,
min_seg_len=res@min_seg_len,
max_seg_len=res@max_seg_len,
max_lag=res@max_lag,
type=res@type,
transform=res@transform,
anomaly_types=res@anomaly_types,
anomaly_positions=res@anomaly_positions,
components=res@components,
start_lags=res@start_lags,
end_lags=res@end_lags)
)
} |
callWithoutSumt <- function(theta, fName, ...) {
return( callWithoutArgs( theta, fName = fName,
args = names(formals(sumt)), ... ) )
} |
if(0) {
m = matrix(rnorm(100), 10)
rownames(m) = 1:10
colnames(m) = 1:10
ht = Heatmap(m)
ht = draw(ht)
selectArea(ht)
ht = Heatmap(m, row_km = 2, column_km = 2)
ht = draw(ht)
selectArea(ht)
ht = Heatmap(m, row_km = 2, column_km = 2) + Heatmap(m, row_km = 2, column_km = 2)
ht = draw(ht)
selectArea(ht)
pdf("~/test.pdf")
ht = Heatmap(m)
ht = draw(ht)
selectArea(ht, pos1 = unit(c(1, 1), "cm"), pos2 = unit(c(4, 4), "cm"), verbose = TRUE)
set.seed(123)
ht = Heatmap(m, row_km = 2, column_km = 2)
ht = draw(ht)
selectArea(ht, pos1 = unit(c(1, 1), "cm"), pos2 = unit(c(8, 8), "cm"), verbose = TRUE)
dev.off()
png("~/test-1.png")
ht = Heatmap(m)
ht = draw(ht)
selectArea(ht, pos1 = unit(c(1, 1), "cm"), pos2 = unit(c(4, 4), "cm"), verbose = TRUE)
dev.off()
png("~/test-2.png")
set.seed(123)
ht = Heatmap(m, row_km = 2, column_km = 2)
ht = draw(ht)
selectArea(ht, pos1 = unit(c(1, 1), "cm"), pos2 = unit(c(8, 8), "cm"), verbose = TRUE)
dev.off()
} |
getauthorrecordraw <- function(id, code = NA) {
repec_api_with_id(method = 'getauthorrecordraw', id = id, code = code)
}
get_author_record_raw <- getauthorrecordraw |
overfit_demo <-
function(DF,y=NA,seed=NA,aic=TRUE) {
if(is.na(y)) { stop(paste("Need to specify y variable in quotes\n")) }
if(!is.na(seed)) { set.seed(seed) }
n <- nrow(DF)
selected <- sample(n,n/2,replace=TRUE)
training <- DF[selected,]
holdout <- DF[-selected,]
form1 <- formula( paste(y,"~1") )
form2 <- formula( paste(y,"~.^2") )
null.model <- lm(form1,data=training)
full.model <- lm(form2,data=training)
best.model <- step(null.model,scope=list(lower=null.model,upper=full.model),direction="forward",trace=0)
M <- step(null.model,scope=list(lower=null.model,upper=full.model),direction="forward",trace=0,steps=1)
y.pos <- which(names(holdout)==y)
y.holdout <- holdout[,y.pos]
pred.holdout <- predict(M,newdata=holdout)
RMSE.holdout <- sqrt(mean( (y.holdout-pred.holdout)^2 ))
aic.train <- AIC(M)
RMSE.train <- summary(M)$sigma
for (i in 2:30) {
M <- step(M,scope=list(lower=null.model,upper=full.model),direction="both",steps=1,trace=0,k=.001)
pred.holdout <- predict(M,newdata=holdout)
RMSE.holdout[i] <- sqrt(mean( (y.holdout-pred.holdout)^2 ))
aic.train[i] <- AIC(M)
RMSE.train[i] <- summary(M)$sigma
}
RMSE.holdout <- (RMSE.holdout-min(RMSE.holdout))/(max(RMSE.holdout)-min(RMSE.holdout))
aic.train <- (aic.train-min(aic.train))/(max(aic.train)-min(aic.train))
RMSE.train <- (RMSE.train-min(RMSE.train))/(max(RMSE.train)-min(RMSE.train))
if(aic==TRUE) {
plot( 1:length(aic.train),RMSE.holdout,xlab="
lines( 1:length(aic.train),aic.train,lwd=2,lty=2)
legend("top",c("RMSE(holdout)","AIC(training)"),lwd=2,lty=1:2)
}
if(aic==FALSE) {
plot( 1:length(RMSE.train),RMSE.holdout,xlab="
lines( 1:length(RMSE.train),RMSE.train,lwd=2,lty=2)
legend("top",c("RMSE(holdout)","RMSE(training)"),lwd=2,lty=1:2)
}
} |
.cc_core <- function(qx,qy,numb_cc){
if(!is(qx,"qr")) qx=qr(qx)
if(!is(qy,"qr")) qy=qr(qy)
res <- svd(qr.qty(qx, qr.Q(qy))[1L:qx$rank, ,drop = FALSE],
numb_cc, numb_cc)
names(res)[1]="cor"
return(res)
}
.svd <- function(...){
sv=svd(...)
np=sv$d>1E-12
if(!all(np)){
sv$v=sv$v[,np]
sv$u=sv$u[,np]
sv$d=sv$d[np]
}
sv
}
convert2dummies <- function(Y){
Y=model.matrix(~.+0,data=data.frame(Y))
Y
}
fillnas <- function(Y){
nas=which(is.na(Y),arr.ind = TRUE)
if(nrow(nas)==0) return(Y)
Y[nas]=colMeans(Y[,nas[,2],drop=FALSE],na.rm=TRUE)
Y
}
as_named_matrix <- function(Y,root_name="V"){
Y=as.matrix(Y)
if(is.null(colnames(Y)))
colnames(Y)=paste0(root_name,1:ncol(Y))
Y
}
.get_explaned_variance_proportion <- function(Y,score){
expl_var=sapply(1:ncol(score),function(i){
sc=score[,i,drop=FALSE]
sc=sc/sqrt(sum(sc^2))
proj= sc%*%t(sc)
res=sum(diag((t(Y)%*%proj%*%Y)))
})
res=expl_var/sum(colSums(Y^2))
names(res)=colnames(score)
res
}
.is_svd <- function(X){
if(!is.list(X)) return(FALSE)
setequal(x = names(X),y = c("d","u","v"))
}
.compute_stats <- function (res,svx,svy)
{
xscores = res$data$X %*% res$xcoef
yscores = res$data$Y %*% res$ycoef
if(!is.null(svx)) res$data$X=res$data$X%*%diag(svx$d[1:ncol(res$data$X)])%*%t(svx$v)
if(!is.null(svy)) res$data$Y=res$data$Y%*%diag(svy$d[1:ncol(res$data$Y)])%*%t(svy$v)
corr.X.xscores = cor(res$data$X, xscores, use = "pairwise")
corr.Y.xscores = cor(res$data$Y, xscores, use = "pairwise")
corr.X.yscores = cor(res$data$X, yscores, use = "pairwise")
corr.Y.yscores = cor(res$data$Y, yscores, use = "pairwise")
res$scores=list(xscores = xscores, yscores = yscores)
res$corr= list( corr.X.xscores = corr.X.xscores,
corr.Y.xscores = corr.Y.xscores,
corr.X.yscores = corr.X.yscores,
corr.Y.yscores = corr.Y.yscores)
res$prop_expl_var=
list(X = .get_explaned_variance_proportion(res$data$X,res$scores$xscores),
Y = .get_explaned_variance_proportion(res$data$Y,res$scores$yscores))
res
}
residualize <- function(Y,Z){
HY=Z%*%solve(t(Z)%*%Z)%*%t(Z)%*%Y
Y-HY
}
residualizing_matrix <- function(Z,return_Q=TRUE)
{
res <- list(IH = diag(nrow(Z)) - Z %*% solve(t(Z)%*%Z) %*% t(Z))
res$IH <- (res$IH + t(res$IH))/2
if(return_Q){
ei = eigen(res$IH)
if (any(is.complex(ei$values))) {
warning("Data can not be orthoganalized")
return(NA)
}
ei$vectors <- ei$vectors[, (ei$values > 0.1)]
res$Q=t(ei$vectors)
}
return(res)
} |
rstack <- function() {
s <- new.env(parent = emptyenv())
s$head <- NULL
s$tail <- NULL
s$len <- 0
class(s) <- "rstack"
return(s)
} |
covRob <- function(data, corr = FALSE, distance = TRUE, na.action = na.fail,
estim = "auto", control = covRob.control(estim, ...), ...)
{
data <- na.action(data)
if(is.data.frame(data))
data <- data.matrix(data)
n <- nrow(data)
p <- ncol(data)
rowNames <- dimnames(data)[[1]]
colNames <- dimnames(data)[[2]]
dimnames(data) <- NULL
if(is.null(colNames))
colNames <- paste("V", 1:p, sep = "")
if(p < 2)
stop(sQuote("data"), " must have at least two columns to compute ",
"a covariance matrix")
if(n < p)
stop("not enough observations")
estim <- casefold(estim)
if(estim == "auto") {
if((n < 1000 && p < 10) || (n < 5000 && p < 5))
estim <- "donostah"
else if(n < 50000 && p < 20)
estim <- "mcd"
else
estim <- "pairwiseqc"
control <- covRob.control(estim)
}
else {
dots <- list(...)
dots.names <- names(dots)
if(any(dots.names == "quan") && all(dots.names != "alpha")) {
dots.names[dots.names == "quan"] <- "alpha"
names(dots) <- dots.names
}
if(any(dots.names == "ntrial") && all(dots.names != "nsamp")) {
dots.names[dots.names == "ntrial"] <- "nsamp"
names(dots) <- dots.names
}
control.names <- names(control)
if(any(control.names == "init.control"))
control.names <- c(control.names, names(control$init.control))
if(any(!is.element(dots.names, control.names))) {
bad.args <- sQuote(setdiff(dots.names, control.names))
if(length(bad.args) == 1)
stop(sQuote(bad.args), " is not a control argument for the ",
dQuote(estim), " estimator")
else
stop(paste(sQuote(bad.args), collapse = ", "), " are not control ",
"arguments for the ", dQuote(estim), " estimator")
}
}
ans <- switch(estim,
donostah = {
args <- list(x = data)
if(control$nresamp != "auto") args$nsamp <- control$nresamp
if(control$maxres != "auto") args$maxres <- control$maxres
if(!control$random.sample) set.seed(21)
args$tune <- control$tune
args$prob <- control$prob
args$eps <- control$eps
ds <- do.call("CovSde", args)
list(cov = getCov(ds), center = getCenter(ds), dist = getDistance(ds))
},
pairwiseqc = {
x <- CovOgk(data, control = CovControlOgk(smrob = "s_mad", svrob = "qc"))
list(center = getCenter(x), cov = getCov(x), dist = getDistance(x),
raw.center = [email protected], raw.cov = [email protected], raw.dist = [email protected])
},
pairwisegk = {
x <- CovOgk(data)
list(center = getCenter(x), cov = getCov(x), dist = getDistance(x),
raw.center = [email protected], raw.cov = [email protected], raw.dist = [email protected])
},
m = {
mcd.control <- control$init.control
control$init.control <- NULL
if(mcd.control$alpha > 1)
mcd.control$alpha <- mcd.control$alpha / n
init <- covMcd(data, cor = FALSE, control = mcd.control)
ans <- covMest(data, cor = FALSE, r = control$r, arp = control$arp,
eps = control$eps, maxiter = control$maxiter,
t0 = init$raw.center, S0 = init$raw.cov)
ans$dist <- ans$mah
ans$raw.center <- init$raw.center
ans$raw.cov <- init$raw.cov
ans$raw.dist <- init$raw.mah
ans
},
mcd = {
if(control$alpha > 1)
control$alpha <- control$alpha / n
ans <- covMcd(data, cor = FALSE, control = control)
ans$center <- ans$raw.center
ans$cov <- ans$raw.cov
ans$dist <- ans$raw.mah
ans$raw.cov <- ans$raw.cov / prod(ans$raw.cnp2)
ans$raw.dist <- ans$raw.mah * prod(ans$raw.cnp2)
ans
},
weighted = {
if(control$alpha > 1)
control$alpha <- control$alpha / n
ans <- covMcd(data, cor = FALSE, control = control)
ans$dist <- ans$mah
ans$raw.cov <- ans$raw.cov / prod(ans$raw.cnp2)
ans$raw.dist <- ans$raw.mah * prod(ans$raw.cnp2)
ans
},
default = stop("Invalid choice of estimator.")
)
dimnames(ans$cov) <- list(colNames, colNames)
names(ans$center) <- colNames
if(is.null(ans$raw.cov)) {
ans$raw.cov <- NA
ans$raw.center <- NA
}
else {
dimnames(ans$raw.cov) <- list(colNames, colNames)
names(ans$raw.center) <- colNames
}
if(distance) {
if(is.null(ans$dist))
ans$dist <- mahalanobis(data, ans$center, ans$cov)
if(!is.na(ans$raw.cov[1])) {
if(is.null(ans$raw.dist))
ans$raw.dist <- mahalanobis(data, ans$raw.center, ans$raw.cov)
}
else
ans$raw.dist <- NA
}
else {
ans$dist <- NA
ans$raw.dist <- NA
}
if(!is.na(ans$dist[1]) && !is.null(rowNames))
names(ans$dist) <- rowNames
if(!is.na(ans$raw.dist[1]) && !is.null(rowNames))
names(ans$raw.dist) <- rowNames
if(corr) {
std <- sqrt(diag(ans$cov))
ans$cov <- ans$cov / (std %o% std)
if(!is.na(ans$raw.cov[1])) {
std <- sqrt(diag(ans$raw.cov))
ans$raw.cov <- ans$raw.cov / (std %o% std)
}
}
ans$corr <- corr
ans$estim <- estim
ans$control <- control
ans$call <- match.call()
ans <- ans[c("call", "cov", "center", "dist", "raw.cov", "raw.center",
"raw.dist", "corr", "estim", "control")]
oldClass(ans) <- "covRob"
ans
} |
sample_draws = function(data, ndraws, draw = ".draw", seed = NULL) {
.draw = as.name(draw)
draw_full = data[[draw]]
if (!is.null(seed)) set.seed(seed)
draw_sample = sample(unique(draw_full), ndraws)
filter(data, !!.draw %in% !!draw_sample)
} |
context("sf linestring")
test_that("various objects converted to sf_linestring",{
m <- matrix(1:4, ncol = 2)
m <- cbind(c(1L,1L), m)
res <- sfheaders:::rcpp_sf_linestring(m, c(1L,2L), 0L, "", FALSE)
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(1:4, ncol = 2)
res <- sfheaders:::rcpp_sf_linestring(m, c(0L, 1L), NULL, "", TRUE)
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(1:9, ncol = 3)
res <- sfheaders:::rcpp_sf_linestring(m, c(0L, 1L), NULL, "", TRUE)
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(1:4, ncol = 2)
df <- as.data.frame( m )
res <- sfheaders:::rcpp_sf_linestring(df, c(0L,1L), NULL, "", TRUE )
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(1:4, ncol = 2)
df <- as.data.frame( m )
m <- as.matrix( df )
res <- sfheaders:::rcpp_sf_linestring(df, c("V1","V2"), NULL, "", TRUE)
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(c(1.2,3,4,5), ncol = 2)
df <- as.data.frame( m )
m <- as.matrix( df )
res <- sfheaders:::rcpp_sf_linestring(df, c("V1","V2"), NULL, "", TRUE)
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(1:4, ncol = 2)
df <- as.data.frame( m )
res <- sfheaders:::rcpp_sf_linestring(df, c("V1","V2"), NULL, "", TRUE)
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(1:8, ncol = 2)
m <- cbind(m, c(1L,1L,2L,2L))
res <- sfheaders:::rcpp_sf_linestring(m, c(0L,1L), 2L, "", TRUE)
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(1:8, ncol = 2)
m <- cbind(m, c(1,1,2,2))
res <- sfheaders:::rcpp_sf_linestring(m, c(0L,1L), 2L, "", TRUE)
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(1:2, ncol = 2)
m <- cbind(m, c(1))
res <- sfheaders:::rcpp_sf_linestring(m, c(0L,1L), 2L, "", TRUE)
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(1:8, ncol = 2)
m <- cbind(m, c(1,1,2,2))
df <- as.data.frame( m )
res <- sfheaders:::rcpp_sf_linestring(df, c(0L,1L), 2L, "", TRUE)
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(1:8, ncol = 2)
m <- cbind(m, c(1L,1L,2L,2L) )
df <- as.data.frame( m )
m <- as.matrix( df )
res <- sfheaders:::rcpp_sf_linestring(m, c("V1","V2"), NULL, "", TRUE)
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(1:8, ncol = 2)
m <- cbind(m, c(1,1,2,2) )
df <- as.data.frame( m )
m <- as.matrix( df )
res <- sfheaders:::rcpp_sf_linestring(m, c("V1","V2"), NULL, "", TRUE)
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(c(1.2,2:8), ncol = 2)
m <- cbind(m, c(1,1,2,2))
df <- as.data.frame( m )
res <- sfheaders:::rcpp_sf_linestring(df, c("V1","V2"), NULL, "", TRUE )
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(1L:4L, ncol = 2)
m <- cbind(c(1L,1L), m)
df <- as.data.frame( m )
m <- as.matrix( df )
res <- sfheaders:::rcpp_sf_linestring(m, c("V1","V2"), "V3", "", TRUE)
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(1:8, ncol = 2)
m <- cbind(m, c(1,1,2,2) )
df <- as.data.frame( m )
m <- as.matrix( df )
res <- sfheaders:::rcpp_sf_linestring(m, c("V1","V2"), "V3", "", TRUE )
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(c(1.2,2:8), ncol = 2)
m <- cbind(m, c(1,1,2,2))
df <- as.data.frame( m )
res <- sfheaders:::rcpp_sf_linestring(df, c("V1","V2"), "V3", "", TRUE )
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(1:8, ncol = 2)
m <- cbind(m, c(1,1,2,2))
df <- as.data.frame( m )
res <- sfheaders:::rcpp_sf_linestring(df, c("V1","V2"), c("V3"), "", TRUE)
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(1:2, ncol = 2)
m <- cbind(m, c(1))
df <- as.data.frame( m )
res <- sfheaders:::rcpp_sf_linestring(df, c("V1","V2"), c("V3"), "", TRUE)
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(1:2, ncol = 2)
m <- cbind(m, c(1))
df <- as.data.frame( m )
res <- sfheaders:::rcpp_sf_linestring(df, c(0L,1L), 2L, "", TRUE )
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
})
test_that("ineger column indexing works (issue
m <- matrix(1:8, ncol = 2)
m <- cbind(m, c(1,1,2,2))
res <- sfheaders:::rcpp_to_sf(obj = m, geometry_columns = c(0L,1L), NULL, 1L, NULL, NULL, NULL, NULL, FALSE, TRUE, "", "LINESTRING")
res2 <- sfheaders::sf_linestring(m, x = 1, y = 2, linestring_id = 2, keep = T)
expect_equal( res, res2 )
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
expect_equal( res$V3, c(1,1,2,2) )
expect_true( ncol(res) == 3 )
m <- matrix(1:8, ncol = 2)
m <- cbind(m, c(1,1,2,2))
res <- sfheaders:::rcpp_to_sf(obj = m, geometry_columns = c(0L,1L), NULL, 2L, NULL, NULL, NULL, NULL, FALSE, TRUE, "", "LINESTRING")
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
expect_equal( res$V3, 1:2 )
}) |
context("plot Words in Topics relative to Words")
test_that("plotTopicWord", {
suppressWarnings(RNGversion("3.5.0"))
set.seed(123)
x1 <- matrix(sample(c(rep(0, 20), 1:20), 10000, replace = TRUE), 10, 1000)
ldaID <- paste("ID", 1:200)
x2 <- list(document_sums = x1)
text <- matrix(sample(paste("word", 1:100), 10000, replace = TRUE), 200, 50)
text <- lapply(apply(text, 1, list), unlist)
names(text) <- paste("ID", 1:200)
words <- makeWordlist(text)$words
LDAdoc <- LDAprep(text, words)
lda <- LDAgen(documents = LDAdoc, K = 3L, vocab = words,
num.iterations = 20L, burnin = 70L, seed = 123)
meta1 <- as.Date(sample(1:730, 1200, replace = TRUE), origin = "1990-10-03")
names(meta1) <- paste("ID", 1:1200)
meta <- data.frame(id = paste("ID", 1:1200), date = meta1,
title = as.character(NA), stringsAsFactors = FALSE)
obj <- textmeta(text = text, meta = meta)
res1 <- plotTopicWord(object = obj, docs = LDAdoc, ldaresult = lda,
ldaID = ldaID)
expect_true(all(res1$date == seq(min(res1$date), max(res1$date), "month")))
res2 <- plotTopicWord(object = obj, docs = LDAdoc, ldaresult = lda,
ldaID = ldaID, unit = "week")
expect_true(all(res2$date == seq(min(res2$date), max(res2$date), "week")))
res3 <- plotTopicWord(object = obj, docs = LDAdoc, ldaresult = lda,
ldaID = ldaID, pages = TRUE)
expect_equal(res1, res3)
res4 <- plotTopicWord(object = obj, docs = LDAdoc, ldaresult = lda,
ldaID = ldaID,
mark = FALSE, curves = "both", legend = "none", natozero = FALSE)
expect_equal(res1, res4)
res5 <- plotTopicWord(object = obj, docs = LDAdoc, ldaresult = lda,
ldaID = ldaID, rel = TRUE, link = "or")
expect_true(all(res5$date == res1$date), all(colnames(res1) == colnames(res5)),
all(res5[, -1] <= 1))
res6 <- plotTopicWord(object = obj, docs = LDAdoc, ldaresult = lda,
ldaID = ldaID, file = file.path(tempdir(),"abc.pdf"))
expect_equal(res1, res6)
res7 <- plotTopicWord(object = obj, docs = LDAdoc, ldaresult = lda,
ldaID = ldaID, curves = "smooth")
expect_equal(res1, res7)
}) |
library(EnvStats)
windows()
par(mfrow = c(3, 2), mar = c(3, 3, 3, 1), mgp = c(1.5, 0.5, 0))
pdfPlot(dist = "beta", param.list = list(shape1=2, shape2=4),
curve.fill.col = "cyan")
pdfPlot(dist = "beta", param.list = list(shape1=1, shape2=1, ncp=1),
curve.fill.col = "cyan")
pdfPlot(dist = "binom", param.list = list(size=10, prob=0.5),
hist.col = "cyan")
pdfPlot(dist = "cauchy", param.list = list(location=0, scale=1),
left.tail.cutoff = 0.01, right.tail.cutoff = 0.01,
curve.fill.col = "cyan")
pdfPlot(dist = "chi", param.list = list(df=4),
curve.fill.col = "cyan")
pdfPlot(dist = "chisq", param.list = list(df=4),
curve.fill.col = "cyan")
windows()
par(mfrow = c(3, 2), mar = c(3, 3, 3, 1), mgp = c(1.5, 0.5, 0))
pdfPlot(dist = "chisq", param.list = list(df=5, ncp=1),
curve.fill.col = "cyan")
set.seed(21)
epdfPlot(rgamma(100, shape=4, scale=5), curve.fill = TRUE, curve.fill.col = "cyan",
xlab = "Observations",
main = "Empirical Density Based On 100\nGamma(shape=4, scale=5) Random Numbers",
cex.main = 1)
pdfPlot(dist = "exp", param.list = list(rate=2),
curve.fill.col = "cyan")
pdfPlot(dist = "evd", param.list = list(location=0, scale=1),
curve.fill.col = "cyan")
pdfPlot(dist = "gevd", param.list = list(location=0, scale=1, shape = 0.5),
curve.fill.col = "cyan", cex.main = 1)
pdfPlot(dist = "f", param.list = list(df1=5, df2=10),
curve.fill.col = "cyan")
windows()
par(mfrow = c(3, 2), mar = c(3, 3, 3, 1), mgp = c(1.5, 0.5, 0))
pdfPlot(dist = "f", param.list = list(df1=5, df2=10, ncp=1),
curve.fill.col = "cyan")
pdfPlot(dist = "gamma", param.list = list(shape=2, scale=1),
curve.fill.col = "cyan")
pdfPlot(dist = "gammaAlt", param.list = list(mean=10, cv=0.5),
curve.fill.col = "cyan")
pdfPlot(dist = "geom", param.list = list(prob=0.5),
hist.col = "cyan")
pdfPlot(dist = "hyper", param.list = list(m=20, n=15, k=7),
hist.col = "cyan")
pdfPlot(dist = "logis", param.list = list(location=0, scale=1),
curve.fill.col = "cyan")
windows()
par(mfrow = c(3, 2), mar = c(3, 3, 3, 1), mgp = c(1.5, 0.5, 0))
pdfPlot(dist = "lnorm", param.list = list(meanlog=0, sdlog=1),
curve.fill.col = "cyan")
pdfPlot(dist = "lnormAlt", param.list = list(mean=10, cv=0.5),
curve.fill.col = "cyan")
pdfPlot(dist = "lnormMix", param.list =
list(meanlog1=0, sdlog1=1, meanlog2=3, sdlog2=0.5, p.mix=0.5),
right.tail.cutoff = 0.02,
curve.fill.col = "cyan", cex.main = 1,
main = paste("Lognormal Mixture Density", "(meanlog1=0, sdlog1=1,",
"meanlog2=3, sdlog2=0.5, p.mix=0.5)", sep="\n"))
pdfPlot(dist = "lnormMixAlt", param.list =
list(mean1=5, cv1=1, mean2=20, cv2=0.5, p.mix=0.5),
right.tail.cutoff = 0.01,
curve.fill.col = "cyan", cex.main=1,
main = paste("Lognormal Mixture Density", "(mean1=5, cv1=1,",
"mean2=20, cv2=0.5, p.mix=0.5)", sep="\n"))
pdfPlot(dist = "lnorm3", param.list = list(meanlog=0, sdlog=1, threshold=5),
right.tail.cutoff = 0.01, curve.fill.col = "cyan", cex.main = 1)
pdfPlot(dist = "lnormTrunc", param.list =
list(meanlog=0, sdlog=1, min=0, max=2),
curve.fill.col = "cyan", cex.main = 1)
windows()
par(mfrow = c(3, 2), mar = c(3, 3, 3, 1), mgp = c(1.5, 0.5, 0))
pdfPlot(dist = "lnormTruncAlt", param.list =
list(mean=2, cv=1, min=0, max=3),
curve.fill.col = "cyan")
pdfPlot(dist = "nbinom", param.list = list(size=4, prob=0.5),
hist.col = "cyan")
pdfPlot(dist = "norm", param.list = list(mean=0, sd=1),
curve.fill.col = "cyan")
pdfPlot(dist = "normMix", param.list =
list(mean1=0, sd1=1, mean2=4, sd2=2, p.mix=0.5),
curve.fill.col = "cyan", cex.main=1,
main = paste("Normal Mixture Density", "(mean1=0, sd1=1,",
"mean2=4, sd2=2, p.mix=0.5)", sep="\n"))
pdfPlot(dist = "normTrunc", param.list =
list(mean=10, sd=2, min=8, max=13),
curve.fill.col = "cyan", cex.main = 1)
pdfPlot(dist = "pareto", param.list = list(location=1, shape=2),
curve.fill.col = "cyan", right.tail.cutoff = 0.01)
windows()
par(mfrow = c(3, 2), mar = c(3, 3, 3, 1), mgp = c(1.5, 0.5, 0))
pdfPlot(dist = "pois", param.list = list(lambda=5),
hist.col = "cyan")
pdfPlot(dist = "t", param.list = list(df=5),
curve.fill.col = "cyan")
pdfPlot(dist = "t", param.list = list(df=5, ncp=1),
curve.fill.col = "cyan")
pdfPlot(dist = "tri", param.list = list(min=0, max=1, mode=0.7),
curve.fill.col = "cyan")
pdfPlot(dist = "unif", param.list = list(min=0, max=1),
curve.fill.col = "cyan")
pdfPlot(dist = "weibul", param.list = list(shape=2, scale=1),
curve.fill.col = "cyan")
windows()
par(mfrow = c(3, 2), mar = c(3, 3, 3, 1), mgp = c(1.5, 0.5, 0))
pdfPlot(dist = "wilcox", param.list = list(m=4, n=3),
hist.col = "cyan")
pdfPlot(dist = "zmlnorm", param.list = list(meanlog=0, sdlog=1, p.zero=0.5),
right.tail.cutoff = 0.01, curve.fill.col = "cyan", cex.main = 1)
pdfPlot(dist = "zmlnormAlt", param.list = list(mean=2, cv=1, p.zero=0.4),
right.tail.cutoff = 0.01, curve.fill.col = "cyan", cex.main = 1)
pdfPlot(dist = "zmnorm", param.list = list(mean=5, sd=1, p.zero=0.3),
curve.fill.col = "cyan")
frame()
frame()
windows()
with(EPA.94b.tccb.df,
hist(TcCB[Area == "Reference"], freq = FALSE,
xlim = c(0, 2), xlab = "TcCB (ppb)", col = "cyan",
main = "Density Histogram of Reference Area TcCB Data"))
windows()
pdfPlot(distribution = "lnormAlt", param.list = list(mean = 0.6, cv = 0.5),
curve.fill.col = "cyan")
round(dlnormAlt(seq(0, 2, by = 0.5), mean = 0.6, cv = 0.5), 3)
windows()
pdfPlot(distribution = "gammaAlt", param.list = list(mean = 0.6, cv = 0.5))
round(dgammaAlt(seq(0, 2, by = 0.5), mean = 0.6, cv = 0.5), 3)
windows()
cdfPlot(distribution = "lnormAlt", param.list = list(mean = 0.6, cv = 0.5))
round(plnormAlt(seq(0, 2, by = 0.5), mean = 0.6, cv = 0.5), 2)
qlnormAlt(c(0.5, 0.95), mean = 0.6, cv = 0.5)
set.seed(23)
rlnormAlt(5, mean = 0.6, cv = 0.5)
library(MASS)
set.seed(47)
sd.vec <- c(1, 3)
cor.mat <- matrix(c(1, 0.5, 0.5, 1), ncol = 2)
cov.mat <- diag(sd.vec) %*% cor.mat %*% diag(sd.vec)
mvrnorm(n = 3, mu = c(5, 10), Sigma = cov.mat)
rm(sd.vec, cor.mat, cov.mat)
simulateMvMatrix(n = 3,
distributions = c(X1 = "norm", X2 = "lnormAlt"),
param.list = list(X1 = list(mean = 5, sd = 1),
X2 = list(mean = 10, cv = 2)),
cor.mat = matrix(c(1, 0.5, 0.5, 1), ncol=2), seed = 105) |
"charity"
|
context("Test of ODEmorris.default() (and plotting)")
FHNmod <- function(Time, State, Pars) {
with(as.list(c(State, Pars)), {
dVoltage <- s * (Voltage - Voltage^3 / 3 + Current)
dCurrent <- - 1 / s *(Voltage - a + b * Current)
return(list(c(dVoltage, dCurrent)))
})
}
FHNstate <- c(Voltage = -1, Current = 1)
FHNtimes1 <- seq(0.1, 20, by = 5)
FHNtimes2 <- 10
set.seed(2015)
FHNres1 <- ODEmorris(mod = FHNmod,
pars = c("a", "b", "s"),
state_init = FHNstate,
times = FHNtimes1,
binf = c(0.18, 0.18, 2.8),
bsup = c(0.22, 0.22, 3.2),
r = 4,
design =
list(type = "oat", levels = 100, grid.jump = 1),
scale = TRUE,
ode_method = "adams",
parallel_eval = FALSE,
parallel_eval_ncores = NA)
set.seed(2015)
FHNres2 <- ODEmorris(mod = FHNmod,
pars = c("a", "b", "s"),
state_init = FHNstate,
times = FHNtimes2,
binf = c(0.18, 0.18, 2.8),
bsup = c(0.22, 0.22, 3.2),
r = 4,
design =
list(type = "oat", levels = 100, grid.jump = 1),
scale = TRUE,
ode_method = "adams",
parallel_eval = FALSE,
parallel_eval_ncores = NA)
FHNmod3 <- function(Time, State, Pars) {
with(as.list(c(State, Pars)), {
dVoltage <- 3 * (Voltage - Voltage^3 / 3 + Current)
dCurrent <- - 1 / 3 *(Voltage - a + 0.3 * Current)
return(list(c(dVoltage, dCurrent)))
})
}
set.seed(2015)
FHNres3 <- ODEmorris(mod = FHNmod3,
pars = "a",
state_init = FHNstate,
times = FHNtimes2,
binf = 0.18,
bsup = 0.22,
r = 4,
design =
list(type = "oat", levels = 100, grid.jump = 1),
scale = TRUE,
ode_method = "adams",
parallel_eval = FALSE,
parallel_eval_ncores = NA)
set.seed(2015)
FHNres_parallel <- ODEmorris(mod = FHNmod,
pars = c("a", "b", "s"),
state_init = FHNstate,
times = FHNtimes1,
binf = c(0.18, 0.18, 2.8),
bsup = c(0.22, 0.22, 3.2),
r = 4,
design = list(type = "oat", levels = 100,
grid.jump = 1),
scale = TRUE,
ode_method = "adams",
parallel_eval = TRUE,
parallel_eval_ncores = 2)
set.seed(2015)
FHNres_simplex <- ODEmorris(mod = FHNmod,
pars = c("a", "b", "s"),
state_init = FHNstate,
times = FHNtimes1,
binf = c(0.18, 0.18, 2.8),
bsup = c(0.22, 0.22, 3.2),
r = 4,
design =
list(type = "simplex", scale.factor = 0.01),
scale = TRUE,
ode_method = "adams",
parallel_eval = FALSE,
parallel_eval_ncores = NA)
test_that("Result type is correct", {
expect_true(is.list(FHNres1))
expect_equal(class(FHNres1), "ODEmorris")
expect_equal(length(FHNres1), length(FHNstate))
expect_equal(names(FHNres1), names(FHNstate))
expect_true(is.matrix(FHNres1$Voltage))
expect_true(is.matrix(FHNres1$Current))
expect_equal(dim(FHNres1$Voltage),
c(1 + 3*length(c("a", "b", "s")), length(FHNtimes1)))
expect_equal(dim(FHNres1$Current),
c(1 + 3*length(c("a", "b", "s")), length(FHNtimes1)))
expect_true(is.list(FHNres2))
expect_equal(class(FHNres2), "ODEmorris")
expect_equal(length(FHNres2), length(FHNstate))
expect_equal(names(FHNres2), names(FHNstate))
expect_true(is.matrix(FHNres2$Voltage))
expect_true(is.matrix(FHNres2$Current))
expect_equal(dim(FHNres2$Voltage),
c(1 + 3*length(c("a", "b", "s")), length(FHNtimes2)))
expect_equal(dim(FHNres2$Current),
c(1 + 3*length(c("a", "b", "s")), length(FHNtimes2)))
expect_true(is.list(FHNres3))
expect_equal(class(FHNres3), "ODEmorris")
expect_equal(length(FHNres3), length(FHNstate))
expect_equal(names(FHNres3), names(FHNstate))
expect_true(is.matrix(FHNres3$Voltage))
expect_true(is.matrix(FHNres3$Current))
expect_equal(dim(FHNres3$Voltage),
c(1 + 3*length(c("a")), length(FHNtimes2)))
expect_equal(dim(FHNres3$Current),
c(1 + 3*length(c("a")), length(FHNtimes2)))
expect_equal(FHNres_parallel, FHNres1)
expect_true(is.list(FHNres_simplex))
expect_equal(class(FHNres_simplex), "ODEmorris")
expect_equal(length(FHNres_simplex), length(FHNstate))
expect_equal(names(FHNres_simplex), names(FHNstate))
expect_true(is.matrix(FHNres_simplex$Voltage))
expect_true(is.matrix(FHNres_simplex$Current))
expect_equal(dim(FHNres_simplex$Voltage),
c(1 + 3*length(c("a", "b", "s")), length(FHNtimes1)))
expect_equal(dim(FHNres_simplex$Current),
c(1 + 3*length(c("a", "b", "s")), length(FHNtimes1)))
})
test_that("Errors and warnings are thrown", {
set.seed(2015)
expect_warning(FHNres_binf_bsup <-
ODEmorris(mod = FHNmod,
pars = c("a", "b", "s"),
state_init = FHNstate,
times = FHNtimes1,
binf = c(0.22, 0.18, 2.8),
bsup = c(0.18, 0.22, 3.2),
r = 4,
design =
list(type = "oat", levels = 100, grid.jump = 1),
scale = TRUE,
ode_method = "adams",
parallel_eval = FALSE,
parallel_eval_ncores = NA),
paste("At least one element of \"bsup\" was lower than the",
"corresponding element of \"binf\".",
"Elements were swapped."))
expect_equal(FHNres1, FHNres_binf_bsup)
set.seed(2015)
expect_warning(ODEmorris(mod = FHNmod,
pars = c("a", "b", "s"),
state_init = FHNstate,
times = FHNtimes2,
binf = c(0.18, 0.18, 2.8),
bsup = c(0.22, 0.22, 3.2),
r = 1,
design =
list(type = "oat", levels = 100, grid.jump = 1),
scale = TRUE,
ode_method = "adams",
parallel_eval = FALSE,
parallel_eval_ncores = NA),
"Calculation of sigma requires r >= 2.")
set.seed(2015)
expect_error(ODEmorris(mod = FHNmod,
pars = c("a", "b", "s"),
state_init = FHNstate,
times = FHNtimes2,
binf = c(0.18, 0.18, 2.8),
bsup = c(0.22, 0.22, 3.2),
r = 0,
design =
list(type = "oat", levels = 100, grid.jump = 1),
scale = TRUE,
ode_method = "adams",
parallel_eval = FALSE,
parallel_eval_ncores = NA))
})
test_that("Plots are generated", {
expect_true(plot(FHNres1))
expect_true(plot(FHNres2))
expect_true(plot(FHNres3))
expect_true(plot(FHNres_parallel))
expect_true(plot(FHNres_simplex))
expect_true(plot(FHNres1, kind = "trajec"))
expect_true(plot(FHNres2, kind = "trajec"))
expect_true(plot(FHNres3, kind = "trajec"))
expect_true(plot(FHNres_parallel, kind = "trajec"))
expect_true(plot(FHNres_simplex, kind = "trajec"))
expect_true(plot(FHNres1, state_plot = "Current", main_title = "Hi!",
legendPos = "topleft", type = "b"))
my_cols <- c("firebrick", "chartreuse3", "dodgerblue")
expect_true(plot(FHNres1, state_plot = "Current", colors_pars = my_cols))
expect_true(plot(FHNres1, state_plot = "Current", cex.axis = 2, cex = 4,
main = "Small Title", cex.main = 0.5))
}) |
test_that("dendrogram plots", {
require(ggplot2)
hc <- hclust(dist(USArrests), "ave")
hcdata <- dendro_data(hc, type = "rectangle")
p <- ggplot() +
geom_segment(data = segment(hcdata), aes(x = x0, y = y0, xend = x1, yend = y1)) +
geom_text(data = label(hcdata), aes(x = x, y = y, label = text)) +
coord_flip() +
scale_y_reverse(expand = c(0.2, 0))
expect_s3_class(p, "ggplot")
}) |
acontext("mixtureKNN data set")
data(mixtureKNN)
mixtureKNN$Bayes.error$text.V1.prop <- 0
mixtureKNN$Bayes.error$text.V2.bottom <- -2
mixtureKNN$other.error$text.V1.prop <- 0
mixtureKNN$Bayes.error$text.V1.error <- -2.6
mixtureKNN$other.error$text.V1.error <- -2.6
classifier.linetypes <- c(
Bayes="dashed",
KNN="solid")
label.colors <- c(
"0"="
"1"="
set.colors <-
c(test="
validation="
Bayes="
train="black")
errorPlot <- ggplot()+
ggtitle("Select number of neighbors")+
theme_bw()+
theme_animint(height=500)+
geom_text(aes(min.neighbors, error.prop,
color=set, label="Bayes",
showSelected=classifier),
hjust=1,
data=mixtureKNN$Bayes.segment)+
geom_segment(aes(min.neighbors, error.prop,
xend=max.neighbors, yend=error.prop,
color=set,
showSelected=classifier, linetype=classifier),
data=mixtureKNN$Bayes.segment)+
scale_color_manual(values=set.colors, breaks=names(set.colors))+
scale_fill_manual(values=set.colors)+
guides(fill="none", linetype="none")+
scale_linetype_manual(values=classifier.linetypes)+
ylab("Misclassification Errors")+
scale_x_continuous(
"Number of Neighbors",
limits=c(-1, 30),
breaks=c(1, 10, 20, 29))+
geom_ribbon(aes(neighbors, ymin=mean-sd, ymax=mean+sd,
fill=set,
showSelected=classifier,
showSelected2=set),
alpha=0.5,
data=mixtureKNN$validation.error)+
geom_line(aes(neighbors, mean, color=set,
showSelected=classifier, linetype=classifier),
data=mixtureKNN$validation.error)+
geom_line(aes(neighbors, error.prop, group=set, color=set,
showSelected=classifier, linetype=classifier),
data=mixtureKNN$other.error)+
geom_tallrect(aes(xmin=neighbors-1, xmax=neighbors+1,
clickSelects=neighbors),
alpha=0.5,
data=mixtureKNN$validation.error)
errorPlot
scatterPlot <- ggplot()+
ggtitle("Mis-classification errors in train set")+
theme_bw()+
theme_animint(width=500, height=500)+
xlab("Input feature 1")+
ylab("Input feature 2")+
coord_equal()+
scale_color_manual(values=label.colors)+
scale_linetype_manual(values=classifier.linetypes)+
geom_point(aes(V1, V2, color=label,
showSelected=neighbors),
size=0.2,
data=mixtureKNN$show.grid)+
geom_path(aes(V1, V2, group=path.i, linetype=classifier,
showSelected=neighbors),
size=1,
data=mixtureKNN$pred.boundary)+
geom_path(aes(V1, V2, group=path.i, linetype=classifier),
color=set.colors[["test"]],
size=1,
data=mixtureKNN$Bayes.boundary)+
geom_point(aes(V1, V2, color=label,
fill=prediction,
showSelected=neighbors),
size=3,
shape=21,
data=mixtureKNN$show.points)+
scale_fill_manual(values=c(error="black", correct="transparent"))+
geom_text(aes(text.V1.error, text.V2.bottom, label=paste(set, "Error:")),
data=mixtureKNN$Bayes.error,
hjust=0)+
geom_text(aes(text.V1.prop, text.V2.bottom, label=sprintf("%.3f", error.prop)),
data=mixtureKNN$Bayes.error,
hjust=1)+
geom_text(aes(text.V1.error, V2.bottom, label=paste(set, "Error:"),
showSelected=neighbors),
data=mixtureKNN$other.error,
hjust=0)+
geom_text(aes(text.V1.prop, V2.bottom, label=sprintf("%.3f", error.prop),
showSelected=neighbors),
data=mixtureKNN$other.error,
hjust=1)+
geom_text(aes(V1, V2,
showSelected=neighbors,
label=paste0(
neighbors,
" nearest neighbor",
ifelse(neighbors==1, "", "s"),
" classifier")),
data=mixtureKNN$show.text)
scatterPlot+
facet_wrap("neighbors")+
theme(panel.margin=grid::unit(0, "lines"))
viz.neighbors <- list(
error=errorPlot,
data=scatterPlot,
first=list(neighbors=7)
)
info <- animint2HTML(viz.neighbors)
get_nodes <- function(html=getHTML()){
line.list <- getNodeSet(html, "//g[@class='geom2_segment_error']//line")
rect.list <- getNodeSet(
html, "//svg[@id='plot_error']//rect[@class='border_rect']")
rect.attr.mat <- sapply(rect.list, xmlAttrs)
rect.x <- as.numeric(rect.attr.mat["x",])
rect.width <- as.numeric(rect.attr.mat["width",])
rect.right <- rect.x + rect.width
line.attr.mat <- sapply(line.list, xmlAttrs)
list(
ribbon=getNodeSet(html, "//g[@class='geom3_ribbon_error']//path"),
validation=getNodeSet(html, "//g[@class='geom4_line_error']//path"),
train.test=getNodeSet(html, "//g[@class='geom5_line_error']//path"),
Bayes=line.list,
Bayes.x2=if(is.matrix(line.attr.mat))as.numeric(line.attr.mat["x2",]),
border.right=rect.right,
boundary.KNN=getNodeSet(html, "//g[@class='geom8_path_data']//path"),
boundary.Bayes=getNodeSet(html, "//g[@class='geom9_path_data']//path")
)
}
before <- get_nodes(info$html)
test_that("1 <path> rendered for validation error band", {
expect_equal(length(before$ribbon), 1)
})
test_that("1 <path> rendered for validation error mean", {
expect_equal(length(before$validation), 1)
})
test_that("2 <path> rendered for train/test error", {
expect_equal(length(before$train.test), 2)
})
test_that("1 <line> rendered for Bayes error", {
expect_equal(length(before$Bayes), 1)
})
test_that("Bayes error <line> inside of border_rect", {
expect_less_than(before$Bayes.x2, before$border.right)
})
test_that("6 <path> rendered for KNN boundary", {
expect_equal(length(before$boundary.KNN), 6)
})
test_that("2 <path> rendered for Bayes boundary", {
expect_equal(length(before$boundary.Bayes), 2)
})
clickID("plot_data_classifier_variable_Bayes")
click1 <- get_nodes()
test_that("first click, 1 <path> rendered for validation error band", {
expect_equal(length(click1$ribbon), 1)
})
test_that("first click, 1 <path> rendered for validation error mean", {
expect_equal(length(click1$validation), 1)
})
test_that("first click, 2 <path> rendered for train/test error", {
expect_equal(length(click1$train.test), 2)
})
test_that("first click, Bayes error disappears", {
expect_equal(length(click1$Bayes), 0)
})
test_that("first click, 6 <path> rendered for KNN boundary", {
expect_equal(length(click1$boundary.KNN), 6)
})
test_that("first click, Bayes boundary disappears", {
expect_equal(length(click1$boundary.Bayes), 0)
})
clickID("plot_data_classifier_variable_KNN")
click2 <- get_nodes()
test_that("second click, validation error band disappears", {
expect_equal(length(click2$ribbon), 0)
})
test_that("second click, validation error mean disappears", {
expect_equal(length(click2$validation), 0)
})
test_that("second click, train/test error disappears", {
expect_equal(length(click2$train.test), 0)
})
test_that("second click, Bayes error still gone", {
expect_equal(length(click2$Bayes), 0)
})
test_that("second click, KNN boundary disappears", {
expect_equal(length(click2$boundary.KNN), 0)
})
test_that("second click, Bayes boundary still gone", {
expect_equal(length(click2$boundary.Bayes), 0)
}) |
fitted.FPCA <-function (object, K = NULL, derOptns = list(p=0), ciOptns = list(alpha=NULL, cvgMethod=NULL), ...) {
ddd <- list(...)
if (!is.null(ddd[['k']])) {
K <- ddd[['k']]
warning("specifying 'k' is deprecated. Use 'K' instead!")
}
derOptns <- SetDerOptions(fpcaObject = object, derOptns)
p <- derOptns[['p']]
method <- derOptns[['method']]
bw <- derOptns[['bw']]
kernelType <- derOptns[['kernelType']]
alpha <- ciOptns[['alpha']]
if (is.null(alpha)==FALSE) {
if (alpha <= 0 || alpha >= 1) {
stop("'fitted.FPCA()' is requested to use a significant level between 0 and 1.")
}
}
cvgMethod <- ciOptns[['cvgMethod']]
if (is.null(cvgMethod)==TRUE) {
cvgMethod <- 'band'
}
fpcaObj <- object
if( is.null(K) ){
K = length( fpcaObj$lambda )
} else {
if( ( round(K)>=0) && ( round(K) <= length( fpcaObj$lambda ) ) ){
K = round(K);
} else {
stop("'fitted.FPCA()' is requested to use more components than it currently has available. (or 'K' is smaller than 0)")
}
}
if( ! (p %in% c(0,1,2))){
stop("'fitted.FPCA()' is requested to use a derivative order other than p = {0,1,2}!")
}
if( p < 1 ){
ZMFV = fpcaObj$xiEst[, seq_len(K), drop = FALSE] %*% t(fpcaObj$phi[, seq_len(K), drop = FALSE]);
IM = fpcaObj$mu
if (is.null(alpha)==TRUE || fpcaObj$optns$dataType=='Dense') {
return( t(apply( ZMFV, 1, function(x) x + IM)))
} else {
bwMu <- fpcaObj$bwMu
mu = fpcaObj$mu
phi = fpcaObj$phi
obsGrid = fpcaObj$obsGrid
workGrid = fpcaObj$workGrid
lambda = fpcaObj$lambda
cvgUpper <- cvgLower <- matrix(nrow=nrow(fpcaObj$xiEst), ncol=length(workGrid))
for (i in 1:nrow(fpcaObj$xiEst)) {
xHat <- mu + ZMFV[i,]
muObs <- Lwls1D(bw = bwMu, kernelType, win = rep(1,length(workGrid)),
xin = workGrid, yin = mu, xout = (fpcaObj$inputData)$Lt[[i]])
phiObs <- apply(phi, 2, function(phiI) Lwls1D(bw = bwMu, kernelType, win = rep(1, length(workGrid)),
xin = workGrid, yin = phiI, xout = (fpcaObj$inputData)$Lt[[i]]))
omegaI <- fpcaObj$xiVar[[i]]
tmp <- eigen(omegaI)
tmpA <- Re(tmp$vectors)
tmpB <- Re(tmp$values)
tmpB[which(tmpB<0)] <- 0
if (length(tmpB)==1) {
omegaI <- tmpA*tmpB*t(tmpA)
} else {
omegaI <- tmpA%*%diag(tmpB)%*%t(tmpA)
}
if (cvgMethod=='interval') {
cvgUpper[i,] <- xHat + stats::qnorm(1-alpha/2)*sqrt(diag(phi%*%omegaI%*%t(phi)))
cvgLower[i,] <- xHat + stats::qnorm(alpha/2)*sqrt(diag(phi%*%omegaI%*%t(phi)))
} else {
cvgUpper[i,] <- xHat + sqrt(stats::qchisq(1-alpha,K)*diag(phi%*%omegaI%*%t(phi)))
cvgLower[i,] <- xHat - sqrt(stats::qchisq(1-alpha,K)*diag(phi%*%omegaI%*%t(phi)))
}
}
return(list(
workGrid = workGrid,
fitted = t(apply( ZMFV, 1, function(x) x + IM)),
cvgUpper = cvgUpper,
cvgLower = cvgLower
)
)
}
} else {
if( K > SelectK( fpcaObj, FVEthreshold=0.95, criterion='FVE')$K ){
warning("Potentially you use too many components to estimate derivatives. \n Consider using SelectK() to find a more informed estimate for 'K'.");
}
if( is.null(method) ){
method = 'FPC'
}
mu = fpcaObj$mu
phi = fpcaObj$phi
obsGrid = fpcaObj$obsGrid
workGrid = fpcaObj$workGrid
if ( method == 'FPC'){
phi = apply(phi, 2, function(phiI) Lwls1D(bw = bw, kernelType, win = rep(1, length(workGrid)),
xin = workGrid, yin = phiI, xout = workGrid, npoly = p, nder = p))
mu = Lwls1D(bw = bw, kernelType, win = rep(1, length(workGrid)), xin = workGrid, yin = mu, xout = workGrid, npoly = p, nder = p)
ZMFV = fpcaObj$xiEst[, seq_len(K), drop = FALSE] %*% t(phi[, seq_len(K), drop = FALSE]);
IM = mu
return( t(apply( ZMFV, 1, function(x) x + IM) ))
}
if( method == 'QUO'){
impSample <- fitted(fpcaObj, K = K);
return( t(apply(impSample, 1, function(curve) Lwls1D(bw = bw, kernelType, win = rep(1, length(workGrid)),
xin = workGrid, yin = curve, xout = workGrid, npoly = p, nder = p))))
} else if (method == 'DPC') {
if (K > ncol(fpcaObj[['xiDer']])) {
stop('fpcaObj does not contain K columns!')
}
return(tcrossprod(fpcaObj[['xiDer']][, seq_len(K), drop=FALSE],
fpcaObj[['phiDer']][, seq_len(K), drop=FALSE]))
}else {
stop('You asked for a derivation scheme that is not implemented.')
}
}
}
getEnlargedGrid <- function(x){
N <- length(x)
return ( c( x[1] - 0.1 * diff(x[1:2]), x, x[N] + 0.1 * diff(x[(N-1):N])) )
}
getDerivative <- function(y, t, ord=1){
if( length(y) != length(t) ){
stop("getDerivative y/t lengths are unequal.")
}
newt = getEnlargedGrid(t)
newy = Hmisc::approxExtrap(x=t, y=y, xout= newt)$y
if (ord == 1) {
der <- numDeriv::grad( stats::splinefun(newt, newy) , x = t )
} else if (ord == 2) {
der <- sapply(t, function(t0)
numDeriv::hessian( stats::splinefun(newt, newy) , x = t0 )
)
}
return(der)
}
getSmoothCurve <- function(t, ft, GCV = FALSE, kernelType = 'epan', mult = 1){
myBw = ifelse( GCV, GCVLwls1D1( yy= ft, tt =t, npoly=1, nder=0, dataType='Sparse', kernel=kernelType)[['bOpt']] ,
CVLwls1D( y= ft, t = t, npoly=1, nder=0, dataType='Sparse', kernel=kernelType, kFolds = 10))
myBw <- myBw * mult
smoothCurve = Lwls1D(bw = myBw, kernel_type= kernelType, win = rep(1, length(t)), yin = ft, xout = t, xin= t)
return(smoothCurve)
} |
.summary_uncertainty <- function(x, sort=TRUE)
{
uct <- x$uctab
if (sort)
uct <- uct[order(uct$split, -uct$R, -uct$I),]
uct
} |
geom_exec <- function (geomfunc = NULL, data = NULL,
position = NULL, ...) {
params <- list(...)
mapping <-
list()
option <- list()
allowed_options <- c(
"x", "y", "color", "colour", "linetype", "fill", "size", "shape", "width",
"alpha", "na.rm", "lwd", "pch", "cex", "position", "stat", "geom",
"show.legend", "inherit.aes", "fun.args", "fontface",
"stroke",
"outlier.colour", "outlier.shape", "outlier.size",
"outlier.stroke", "notch", "notchwidth", "varwidth",
"binwidth", "binaxis", "method", "binpositions",
"stackdir", "stackratio", "dotsize",
"trim", "draw_quantiles", "scale",
"ymin", "ymax", "xmin", "xmax",
"label", "hjust", "vjust", "fontface", "angle", "family", "parse",
"segment.size", "force",
"se", "level", "fullrange",
"conf.int.level",
"xintercept", "yintercept",
"bins", "weight",
"sides",
"arrow", "xend", "yend",
"fun.data", "fun.y", "fun.ymin", "fun.ymax",
"y.position", "tip.length", "label.size", "step.increase",
"bracket.nudge.y", "bracket.shorten", "coord.flip"
)
columns <- colnames(data)
for (key in names(params)) {
value <- params[[key]]
if (is.null(value)) {
}
else if (unlist(value)[1] %in% columns & key %in% allowed_options) {
mapping[[key]] <- value
}
else if (key %in% allowed_options) {
option[[key]] <- value
}
else if (key =="group") {
mapping[[key]] <- value
}
else if(key == "step.group.by"){
option[[key]] <- value
}
}
if (!is.null(position))
option[["position"]] <- position
option[["data"]] <- data
if(is.null(geomfunc)){
res <- list(option = option, mapping = mapping)
}
else{
option[["mapping"]] <- create_aes(mapping)
res <- do.call(geomfunc, option)
}
res
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.