code
stringlengths 1
13.8M
|
---|
context("Expected input arguments and output in modelPop")
test_that("Incorrect/missing input arguments yields errors in modelPop", {
expect_error(modelPop(nPop="a", numVar=6, longitudinal=FALSE,
consMatrix=matrix(c(1, 2), 1, 2)),
"Argument nPop should be positive numeric, e.g., 50.")
expect_error(modelPop(nPop=25, numVar="a", longitudinal=FALSE,
consMatrix=matrix(c(1, 2), 1, 2)),
"Argument numVar should be positive numeric, e.g., 6.")
expect_error(modelPop(nPop=25, numVar=NULL, longitudinal=FALSE,
consMatrix=matrix(c(1, 2), 1, 2)),
"Argument numVar cannot be missing.")
expect_error(modelPop(nPop=25, numVar=6, longitudinal=2,
consMatrix=matrix(c(1, 2), 1, 2)),
"Argument longitudinal should be either logical TRUE or FALSE.")
expect_error(modelPop(nPop=25, numVar=6, longitudinal=NULL,
consMatrix=matrix(c(1, 2), 1, 2)),
"Argument longitudinal cannot be missing.")
expect_error(modelPop(nPop=25, numVar=6, longitudinal=FALSE,
consMatrix=1),
"The constraints should be formed in a matrix.")
expect_error(modelPop(nPop=25, numVar=6, longitudinal=FALSE,
consMatrix=NULL),
"Argument consMatrix cannot be missing.")
})
test_that("Correct input arguments yield expected output in modelPop", {
expect_true(is.matrix(modelPop(nPop=25, numVar=6, longitudinal=FALSE,
consMatrix=matrix(c(1, 2), 1, 2))))
expect_equal(nrow(modelPop(nPop=25, numVar=6, longitudinal=FALSE,
consMatrix=matrix(c(1, 2), 1, 2))), 25)
expect_equal(nrow(modelPop(nPop=1, numVar=6, longitudinal=FALSE,
consMatrix=matrix(c(1, 2), 1, 2))), 16)
expect_equal(nrow(modelPop(nPop=16, numVar=6, longitudinal=FALSE,
consMatrix=matrix(c(1, 2), 1, 2))), 16)
expect_equal(nrow(modelPop(nPop=25, numVar=6, longitudinal=FALSE,
consMatrix=matrix(c(1, 2), 1, 2))), 25)
expect_equal(nrow(modelPop(nPop=1, numVar=6, longitudinal=TRUE,
consMatrix=matrix(c(1, 2), 1, 2))), 52)
expect_equal(nrow(modelPop(nPop=52, numVar=6, longitudinal=TRUE,
consMatrix=matrix(c(1, 2), 1, 2))), 52)
expect_equal(nrow(modelPop(nPop=60, numVar=6, longitudinal=TRUE,
consMatrix=matrix(c(1, 2), 1, 2))), 60)
}) |
WFSCapabilities <- R6Class("WFSCapabilities",
inherit = OWSCapabilities,
private = list(
xmlElement = "Capabilities",
xmlNamespacePrefix = "WFS",
featureTypes = NA,
fetchFeatureTypes = function(xmlObj, version){
wfsNs <- NULL
if(all(class(xmlObj) == c("XMLInternalDocument","XMLAbstractDocument"))){
namespaces <- OWSUtils$getNamespaces(xmlObj)
wfsNs <- OWSUtils$findNamespace(namespaces, id = "wfs")
}
featureTypesXML <- getNodeSet(xmlObj, "//ns:FeatureType", wfsNs)
featureTypesList <- lapply(featureTypesXML, function(x){
WFSFeatureType$new(x, self, version, logger = self$loggerType)
})
return(featureTypesList)
}
),
public = list(
initialize = function(url, version, logger = NULL, ...) {
super$initialize(
element = private$xmlElement, namespacePrefix = private$namespacePrefix,
url, service = "WFS", owsVersion = "1.1", serviceVersion = version, logger = logger,
...)
xmlObj <- self$getRequest()$getResponse()
private$featureTypes = private$fetchFeatureTypes(xmlObj, version)
},
getFeatureTypes = function(pretty = FALSE){
fts <- private$featureTypes
if(pretty){
fts <- do.call("rbind", lapply(fts, function(x){
return(data.frame(
name = x$getName(),
title = x$getTitle(),
stringsAsFactors = FALSE
))
}))
}
return(fts)
},
findFeatureTypeByName = function(expr, exact = TRUE){
result <- lapply(private$featureTypes, function(x){
ft <- NULL
if(attr(regexpr(expr, x$getName()), "match.length") != -1
&& endsWith(x$getName(), expr)){
ft <- x
}
return(ft)
})
result <- result[!sapply(result, is.null)]
if(length(result) == 1) result <- result[[1]]
return(result)
}
)
) |
context("Testing haplo.cc with and without covariates output")
tmp <- Sys.setlocale("LC_ALL", "C")
tmp <- Sys.getlocale()
options(stringsAsFactors=FALSE)
data(hla.demo)
label <- c("DQB","DRB","B")
y.bin <- 1*(hla.demo$resp.cat=="low")
geno <- as.matrix(hla.demo[,c(17,18,21:24)])
seed <- c(17, 53, 1, 40, 37, 0, 62, 56, 5, 52, 12, 1)
set.seed(seed)
cc.hla.adj <- haplo.cc(y.bin, geno, x.adj=hla.demo[,c("male","age")],
miss.val=0,locus.label=label,
control=haplo.glm.control(haplo.min.count=8,
em.c=haplo.em.control()))
set.seed(seed)
ntest <- 200
geno.test <- cbind(sample(1:2, size=ntest, replace=TRUE),
sample(1:2, size=100, replace=TRUE),
sample(2:3,size=ntest, replace=TRUE),
sample(2:3, size=100, replace=TRUE),
sample(2:4,size=ntest, replace=TRUE, prob=c(.5,.35,.15)),
sample(2:4, size=100, replace=TRUE, prob=c(.5,.35,.15)))
y.test <- sample(1:2,size=ntest, replace=TRUE,prob=c(.6, .4)) - 1
x.test <- cbind(rbinom(nrow(geno.test), 1, prob=.3), round(rnorm(nrow(geno.test), mean=50, sd=4)))
locus.label <- c("A", "B", "C")
set.seed(seed)
cc.test <- haplo.cc(y.test, geno.test, locus.label=locus.label,
ci.prob=.95, control=haplo.glm.control(haplo.min.count=4))
if(0) {
saveRDS(cc.test, file="cc.test.rds")
saveRDS(cc.hla.adj, file="cc.hla.adj.rds")
}
cc.hla.adj.save <- readRDS("cc.hla.adj.rds")
cc.test.save <- readRDS("cc.test.rds")
test_that("Data.frames from haplo.cc", {
expect_equal(cc.hla.adj$cc.df, expected=cc.hla.adj.save$cc.df, tolerance=1e-3)
expect_equal(cc.test$cc.df, expected=cc.test.save$cc.df, tolerance=1e-3)
}) |
linear_reg <-
function(mode = "regression",
engine = "lm",
penalty = NULL,
mixture = NULL) {
args <- list(
penalty = enquo(penalty),
mixture = enquo(mixture)
)
new_model_spec(
"linear_reg",
args = args,
eng_args = NULL,
mode = mode,
method = NULL,
engine = engine
)
}
print.linear_reg <- function(x, ...) {
cat("Linear Regression Model Specification (", x$mode, ")\n\n", sep = "")
model_printer(x, ...)
if (!is.null(x$method$fit$args)) {
cat("Model fit template:\n")
print(show_call(x))
}
invisible(x)
}
translate.linear_reg <- function(x, engine = x$engine, ...) {
x <- translate.default(x, engine, ...)
if (engine == "glmnet") {
.check_glmnet_penalty_fit(x)
if (any(names(x$eng_args) == "path_values")) {
x$method$fit$args$lambda <- x$eng_args$path_values
x$eng_args$path_values <- NULL
x$method$fit$args$path_values <- NULL
} else {
x$method$fit$args$lambda <- NULL
}
x$args$penalty <- rlang::eval_tidy(x$args$penalty)
}
x
}
update.linear_reg <-
function(object,
parameters = NULL,
penalty = NULL, mixture = NULL,
fresh = FALSE, ...) {
eng_args <- update_engine_parameters(object$eng_args, ...)
if (!is.null(parameters)) {
parameters <- check_final_param(parameters)
}
args <- list(
penalty = enquo(penalty),
mixture = enquo(mixture)
)
args <- update_main_parameters(args, parameters)
if (fresh) {
object$args <- args
object$eng_args <- eng_args
} else {
null_args <- map_lgl(args, null_value)
if (any(null_args))
args <- args[!null_args]
if (length(args) > 0)
object$args[names(args)] <- args
if (length(eng_args) > 0)
object$eng_args[names(eng_args)] <- eng_args
}
new_model_spec(
"linear_reg",
args = object$args,
eng_args = object$eng_args,
mode = object$mode,
method = NULL,
engine = object$engine
)
}
check_args.linear_reg <- function(object) {
args <- lapply(object$args, rlang::eval_tidy)
if (all(is.numeric(args$penalty)) && any(args$penalty < 0))
rlang::abort("The amount of regularization should be >= 0.")
if (is.numeric(args$mixture) && (args$mixture < 0 | args$mixture > 1))
rlang::abort("The mixture proportion should be within [0,1].")
if (is.numeric(args$mixture) && length(args$mixture) > 1)
rlang::abort("Only one value of `mixture` is allowed.")
invisible(object)
}
.organize_glmnet_pred <- function(x, object) {
if (ncol(x) == 1) {
res <- x[, 1]
res <- unname(res)
} else {
n <- nrow(x)
res <- utils::stack(as.data.frame(x))
if (!is.null(object$spec$args$penalty))
res$lambda <- rep(object$spec$args$penalty, each = n) else
res$lambda <- rep(object$fit$lambda, each = n)
res <- res[, colnames(res) %in% c("values", "lambda")]
}
res
}
predict._elnet <-
function(object, new_data, type = NULL, opts = list(), penalty = NULL, multi = FALSE, ...) {
if (any(names(enquos(...)) == "newdata"))
rlang::abort("Did you mean to use `new_data` instead of `newdata`?")
if (is.null(penalty) & !is.null(object$spec$args$penalty)) {
penalty <- object$spec$args$penalty
}
object$spec$args$penalty <- .check_glmnet_penalty_predict(penalty, object, multi)
object$spec <- eval_args(object$spec)
predict.model_fit(object, new_data = new_data, type = type, opts = opts, ...)
}
predict_numeric._elnet <- function(object, new_data, ...) {
if (any(names(enquos(...)) == "newdata"))
rlang::abort("Did you mean to use `new_data` instead of `newdata`?")
object$spec <- eval_args(object$spec)
predict_numeric.model_fit(object, new_data = new_data, ...)
}
predict_raw._elnet <- function(object, new_data, opts = list(), ...) {
if (any(names(enquos(...)) == "newdata"))
rlang::abort("Did you mean to use `new_data` instead of `newdata`?")
object$spec <- eval_args(object$spec)
opts$s <- object$spec$args$penalty
predict_raw.model_fit(object, new_data = new_data, opts = opts, ...)
}
multi_predict._elnet <-
function(object, new_data, type = NULL, penalty = NULL, ...) {
if (any(names(enquos(...)) == "newdata"))
rlang::abort("Did you mean to use `new_data` instead of `newdata`?")
dots <- list(...)
object$spec <- eval_args(object$spec)
if (is.null(penalty)) {
if (!is.null(object$spec$args$penalty)) {
penalty <- object$spec$args$penalty
} else {
penalty <- object$fit$lambda
}
}
pred <- predict._elnet(object, new_data = new_data, type = "raw",
opts = dots, penalty = penalty, multi = TRUE)
param_key <- tibble(group = colnames(pred), penalty = penalty)
pred <- as_tibble(pred)
pred$.row <- 1:nrow(pred)
pred <- gather(pred, group, .pred, -.row)
pred <- full_join(param_key, pred, by = "group")
pred$group <- NULL
pred <- arrange(pred, .row, penalty)
.row <- pred$.row
pred$.row <- NULL
pred <- split(pred, .row)
names(pred) <- NULL
tibble(.pred = pred)
} |
copy_vignettes <- function(pkg, keep_md) {
pkg <- as.package(pkg)
usethis_use_directory(pkg, "doc", ignore = TRUE)
usethis_use_git_ignore(pkg, "/doc/")
doc_dir <- path(pkg$path, "doc")
vignettes <- tools::pkgVignettes(dir = pkg$path, output = TRUE, source = TRUE)
if (length(vignettes$docs) == 0) {
return(invisible())
}
md_outputs <- character()
if (isTRUE(keep_md)) {
md_outputs <- dir_ls(path = vignettes$dir, regexp = "[.]md$")
}
out_mv <- unique(c(
md_outputs,
vignettes$outputs,
unlist(vignettes$sources, use.names = FALSE)
))
out_cp <- vignettes$docs
cli::cli_alert_info("Moving {.file {path_file(out_mv)}} to {.path doc/}")
file_copy(out_mv, doc_dir, overwrite = TRUE)
file_delete(out_mv)
cli::cli_alert_info("Copying {.file {path_file(out_cp)}} to {.path doc/}")
file_copy(out_cp, doc_dir, overwrite = TRUE)
extra_files <- find_vignette_extras(pkg)
if (length(extra_files) == 0) {
return(invisible())
}
cli::cli_alert_info("Copying extra files {.file {path_file(extra_files)}} to {.path doc/}")
file_copy(extra_files, doc_dir)
invisible()
}
find_vignette_extras <- function(pkg = ".") {
pkg <- as.package(pkg)
vig_path <- path(pkg$path, "vignettes")
extras_file <- path(vig_path, ".install_extras")
if (!file_exists(extras_file)) {
return(character())
}
extras <- readLines(extras_file, warn = FALSE)
if (length(extras) == 0) {
return(character())
}
all_files <- path_rel(dir_ls(vig_path, all = TRUE), vig_path)
re <- paste0(extras, collapse = "|")
files <- grep(re, all_files, perl = TRUE, ignore.case = TRUE, value = TRUE)
path_real(path(vig_path, files))
} |
create.group <- function(pathway, rs){
pd <- pathway[pathway$SNP %in% rs, ]
GeneInGroup <- unique(pd$Gene)
ngene <- length(GeneInGroup)
GeneIdx <- list()
vGeneIdx <- NULL
GeneStartEnd <- matrix(NA, ngene, 2)
rownames(GeneStartEnd) <- GeneInGroup
colnames(GeneStartEnd) <- c("Start", "End")
N.SNP <- NULL
for(g in 1:ngene){
gene <- GeneInGroup[g]
snps <- pd$SNP[pd$Gene == gene]
N.SNP <- c(N.SNP, length(snps))
GeneIdx[[g]] <- rep(NA, length(snps))
for(i in 1:length(snps)){
GeneIdx[[g]][i] <- which(rs == snps[i])
}
GeneStartEnd[gene, "Start"] <- length(vGeneIdx) + 1
vGeneIdx <- c(vGeneIdx, GeneIdx[[g]])
GeneStartEnd[gene, "End"] <- length(vGeneIdx)
}
list(GeneInGroup = GeneInGroup, GeneIdx = GeneIdx,
vGeneIdx = vGeneIdx, GeneStartEnd = GeneStartEnd,
N.SNP = N.SNP)
} |
library(openintro)
library(tidyverse)
library(usethis)
unemploy_pres_orig <- unemploy_pres
unemploy_pres <- unemploy_pres_orig %>%
mutate(
party = if_else(party == "Democrate", "Democratic", "Republican"),
party = as.factor(party)
)
use_data(unemploy_pres, overwrite = TRUE) |
library(PortfolioAnalytics)
library(ROI)
library(ROI.plugin.quadprog)
library(ROI.plugin.glpk)
library(ROI.plugin.symphony)
data(edhec)
R <- edhec[, 1:4]
funds <- colnames(R)
init.portf <- portfolio.spec(assets=funds)
init.portf <- add.constraint(portfolio=init.portf, type="full_investment")
init.portf <- add.constraint(portfolio=init.portf, type="long_only")
maxret.portf <- add.objective(portfolio=init.portf, type="return", name="mean")
mines.portf <- add.objective(portfolio=init.portf, type="risk", name="ES")
minsd.portf <- add.objective(portfolio=init.portf, type="risk", name="StdDev")
qu.portf <- add.objective(portfolio=init.portf, type="risk", name="StdDev",
risk_aversion=0.25)
qu.portf <- add.objective(portfolio=qu.portf, type="return", name="mean")
opt.maxret.roi <- optimize.portfolio(R, maxret.portf, optimize_method="ROI")
opt.maxret.glpk <- optimize.portfolio(R, maxret.portf, optimize_method="glpk")
opt.maxret.symphony <- optimize.portfolio(R, maxret.portf, optimize_method="symphony")
all.equal(extractStats(opt.maxret.roi), extractStats(opt.maxret.glpk))
all.equal(extractStats(opt.maxret.roi), extractStats(opt.maxret.symphony))
opt.mines.roi <- optimize.portfolio(R, mines.portf, optimize_method="ROI")
opt.mines.glpk <- optimize.portfolio(R, mines.portf, optimize_method="glpk")
opt.mines.symphony <- optimize.portfolio(R, mines.portf, optimize_method="symphony")
all.equal(extractStats(opt.mines.roi), extractStats(opt.mines.glpk))
all.equal(extractStats(opt.mines.roi), extractStats(opt.mines.symphony))
opt.minsd.roi <- optimize.portfolio(R, minsd.portf, optimize_method="ROI")
opt.minsd.qp <- optimize.portfolio(R, minsd.portf, optimize_method="quadprog")
all.equal(extractStats(opt.minsd.roi), extractStats(opt.minsd.qp))
opt.qu.roi <- optimize.portfolio(R, qu.portf, optimize_method="ROI")
opt.qu.qp <- optimize.portfolio(R, qu.portf, optimize_method="quadprog")
all.equal(extractStats(opt.qu.roi), extractStats(opt.qu.qp)) |
library(lattice)
data(volcano)
foo <-
data.frame(z = as.vector(volcano),
x = rep(1:87, 61),
y = rep(1:61, each = 87))
wireframe(z ~ x * y, foo)
wireframe(z ~ x * y, foo, subset = z > 150)
if (FALSE)
{
wireframe(z + I(z + 100) ~ x * y, foo,
subset = z > 150,
scales = list(arrows = FALSE))
}
wireframe(z + I(z + 100) ~ x * y, foo)
bar <- foo
bar$z[bar$z < 150] <- NA
wireframe(z + I(z + 100) ~ x * y, bar,
scales = list(arrows = FALSE))
if (FALSE)
{
wireframe(z + I(z + 100) ~ x * y,
subset(bar, !is.na(z)),
scales = list(arrows = FALSE))
}
library(lattice)
n <- 20
psteps <- 50
binomtable <- function(n, psteps)
{
x <- (0:(10*n))/10
p <- (0:psteps)/psteps
dd <- expand.grid(x=x,p=p)
dd$F <- pbinom(dd$x,n,dd$p)
dd$x0 <-trunc(dd$x)
dd
}
bt <- binomtable(n = 5, psteps = 100)
bt[bt$x - bt$x0 >= 0.9, ]$F <- NA
if (FALSE)
{
wireframe(F ~ x * p, bt,
groups = bt$x0, shade = TRUE,
scales = list(arrows = FALSE))
}
wireframe(F ~ x * p, bt, shade = TRUE,
scales = list(arrows = FALSE))
wireframe(F ~ x * p | factor(x0), bt,
shade = TRUE,
scales = list(arrows = FALSE)) |
context("Test: getGFF()")
test_that("The getGFF() interface works properly for NCBI RefSeq (repeating command)..",{
skip_on_cran()
skip_on_travis()
getGFF( db = "refseq",
organism = "Saccharomyces cerevisiae",
path = tempdir())
getGFF( db = "refseq",
organism = "Saccharomyces cerevisiae",
path = tempdir())
})
test_that("The getGFF() interface works properly for NCBI RefSeq using taxid (repeating command)..",{
skip_on_cran()
skip_on_travis()
getGFF( db = "refseq",
organism = "559292",
path = tempdir())
getGFF( db = "refseq",
organism = "559292",
path = tempdir())
})
test_that("The getGFF() interface works properly for NCBI Genbank (repeating command)..",{
skip_on_travis()
skip_on_cran()
getGFF( db = "genbank",
organism = "Saccharomyces cerevisiae",
path = tempdir())
getGFF( db = "genbank",
organism = "Saccharomyces cerevisiae",
path = tempdir())
})
test_that("The getGFF() interface works properly for Ensembl (repeating command)",{
skip_on_cran()
skip_on_travis()
getGFF( db = "ensembl",
organism = "Saccharomyces cerevisiae",
path = tempdir())
getGFF( db = "ensembl",
organism = "Saccharomyces cerevisiae",
path = tempdir())
}) |
context("LRU cache")
`%is%` <- expect_equal
test_that("cache stores values", {
store <- lru_cache()
store("foo", 1)
store("bar", 2)
store("baz", 3)
expect_equal(1, store("foo", stop("should not be evaluated")))
expect_equal(2, store("bar", stop("should not be evaluated")))
expect_equal(3, store("baz", stop("should not be evaluated")))
expect_equal(2, store("bar", 4))
})
test_that("cache incrementally expires old values", {
store <- lru_cache(3)
store("foo", 1)
store("bar", 2)
store("baz", 3)
store("qux", 4)
expect_equal(4, store("qux", stop("should not be evaluated")))
expect_equal(3, store("baz", stop("should not be evaluated")))
expect_equal(2, store("bar", stop("should not be evaluated")))
expect_equal(100, store("foo", 100))
})
test_that("cache expires least recently accessed values", {
store <- lru_cache(3)
store("foo", 1)
store("bar", 2)
store("baz", 3)
expect_equal(1, store("foo", stop("should not be evaluated")))
expect_equal(4, store("qux", 4))
expect_equal(100, store("bar", 100))
expect_equal(4, store("qux", stop("should not be evaluated")))
expect_equal(200, store("baz", 200))
})
test_that("cache_stats extracts stats", {
fib <- function(x) if (x <= 1) 1 else fib(x-1) + fib(x-2)
fib <- memo(fib, key=pointer_key)
fib(30)
cache_stats(fib) %is%
list(size = 5000, used = 31, hits = 28, misses = 31, expired = 0)
}) |
ppc.plot <- function(llratio.s,llratio.r){
if(all(is.na(llratio.s)==TRUE)){stop("No valid llratio's were computed for the simulated data")}
Mode <- function(x) {
ux <- unique(x)
ux[which.max(tabulate(match(x, ux)))]}
h <- hist(llratio.s[-which(llratio.s==Mode(llratio.s))],breaks=20,plot=FALSE)
if(sum(llratio.s==Mode(llratio.s))/length(llratio.s)>=.10){
hist(llratio.s[-which(llratio.s==Mode(llratio.s))],ylim=c(0,max(sum(llratio.s==Mode(llratio.s)),h$counts[1])),
xlim=c(0,max(max(llratio.s),max(llratio.r))),
xlab=expression(italic(D)),ylab="Frequency",main="",breaks=20)
segments(x0=Mode(llratio.s),y0=0,x1=Mode(llratio.s),y1=sum(llratio.s==Mode(llratio.s)),col="black",lwd=5)
abline(v=llratio.r,col="red")
}else{
hist(llratio.s,main="",freq=TRUE,breaks=seq(0,max(llratio.s),length.out=40),
xlim=c(0,max(max(llratio.s),llratio.r)),xlab=expression(italic(D)))
abline(v=llratio.r,col="red")}
} |
test_that("as.term.nlist", {
rlang::scoped_options(lifecycle_verbosity = "quiet")
expect_identical(
as.term(nlist()),
term()
)
expect_identical(
as.term(nlist(x = 1)),
term("x")
)
expect_identical(
as.term(nlist(x = 1:2)),
term("x[1]", "x[2]")
)
expect_identical(
as.term(nlist(x = 1, y = 2:3)),
term(c("x", "y[1]", "y[2]"))
)
expect_identical(
as.term(nlist(x = 1, y = matrix(1:3, c(1, 3)))),
term(c("x", "y[1,1]", "y[1,2]", "y[1,3]"))
)
})
test_that("as.term.nlists", {
rlang::scoped_options(lifecycle_verbosity = "quiet")
expect_identical(
as.term(nlists()),
term(x = 0)
)
expect_identical(
as.term(nlists(nlist())),
term(x = 0)
)
expect_identical(
as.term(nlists(nlist(x = 1))),
term("x")
)
expect_identical(
as.term(nlists(nlist(x = 1:2))),
term(x = 2)
)
expect_identical(
as.term(nlists(
nlist(x = 1, y = matrix(1:3, c(1, 3))),
nlist(x = 1, y = matrix(1:3, c(1, 3)))
)),
term("x", y = c(1, 3))
)
})
test_that("as_term.nlist", {
expect_identical(
as_term(nlist()),
term()
)
expect_identical(
as_term(nlist(x = 1)),
term("x")
)
expect_identical(
as_term(nlist(x = 1:2)),
term("x[1]", "x[2]")
)
expect_identical(
as_term(nlist(x = 1, y = 2:3)),
term(c("x", "y[1]", "y[2]"))
)
expect_identical(
as_term(nlist(x = 1, y = matrix(1:3, c(1, 3)))),
term(c("x", "y[1,1]", "y[1,2]", "y[1,3]"))
)
})
test_that("as_term.nlists", {
expect_identical(
as_term(nlists()),
term(x = 0)
)
expect_identical(
as_term(nlists(nlist())),
term(x = 0)
)
expect_identical(
as_term(nlists(nlist(x = 1))),
term("x")
)
expect_identical(
as_term(nlists(nlist(x = 1:2))),
term(x = 2)
)
expect_identical(
as_term(nlists(
nlist(x = 1, y = matrix(1:3, c(1, 3))),
nlist(x = 1, y = matrix(1:3, c(1, 3)))
)),
term("x", y = c(1, 3))
)
}) |
g.part5.definedays = function(nightsi, wi, indjump, nightsi_bu,
ws3new, qqq_backup=c(), ts, Nts, timewindowi, Nwindows) {
qqq = rep(0,2)
if (timewindowi == "MM") {
if (nightsi[1] == 1) wi = wi + 1
if (length(nightsi) >= wi) {
if (wi==1) {
qqq[1] = 1
qqq[2] = nightsi[wi]
} else if (wi<=length(nightsi)) {
qqq[1] = nightsi[wi-1] + 1
qqq[2] = nightsi[wi]
qqq_backup = qqq
} else if (wi>length(nightsi)) {
qqq[1] = qqq_backup[2] + 1
if (wi <= length(nightsi)) {
qqq[2] = nightsi[wi]
} else {
if (wi-indjump <= length(nightsi)) {
tmp1 = which(nightsi_bu == nightsi[wi-indjump])
qqq[2] = nightsi_bu[tmp1 + indjump]
indjump = indjump + 1
if (is.na(qqq[2])) {
index_lastmidn = which(nightsi_bu == nightsi[wi-(indjump-1)]) + (indjump-1)
if (length(index_lastmidn) > 0) {
qqq[2] = nightsi_bu[index_lastmidn] + (24*(60/ws3new) * 60) -1
} else {
qqq[2] = NA
}
}
} else {
qqq[2] = NA
}
if (is.na(qqq[2])) {
qqq[2] = qqq_backup[2] + (24*(60/ws3new) * 60) -1
}
if (qqq[1] == qqq[2]) qqq[2] = qqq[2] + (24*(60/ws3new) * 60) - 1
}
if(is.na(qqq[2])==TRUE | Nts < qqq[2]) {
qqq[2] = Nts
}
}
} else {
qqq = c(NA,NA)
}
} else if(timewindowi == "WW") {
if (wi <=(Nwindows-1)) {
qqq[1] = which(diff(ts$diur) == -1)[wi] + 1
qqq[2] = which(diff(ts$diur) == -1)[wi+1]
} else {
qqq = c(NA, NA)
}
}
return(invisible(list(qqq=qqq,qqq_backup=qqq_backup)))
} |
library(stationery)
options <- commandArgs(trailingOnly = TRUE)
supported_extensions <- c(".Rmd")
files <- c()
base_command <- "rmd2pdf"
arguments <- c()
debug_level = 0
debug <- function(output, level=1) {
if ( debug_level >= level ){
cat("DEBUG: ")
print(output)
}
}
for ( option in options ){
if ( startsWith(option, "--") ){
arguments <- c(arguments, option)
} else {
files <- c(files, option)
}
}
debug("Input files: ")
debug(files)
debug("Input arguments: ")
debug(arguments)
if ( "--help" %in% arguments ){
print(help(base_command, help_type="html"))
cat("Usage information requested. No files were modified.")
Sys.sleep(10)
} else {
if ( is.null(files) ){
cat("No input files specified. Enter the supported files to compile (all): ")
in_files <- readLines("stdin", n=1)
in_files <- strsplit(in_files, " ")
debug("Files entered:")
debug(in_files)
if ( in_files[[1]][1] == "all" ){
for ( extension in supported_extensions ){
files <- c( files, Sys.glob( paste(sep="", "*", extension) ) )
}
} else {
files <- c(in_files)
}
}
for ( file in files ){
if ( !any( endsWith(file, supported_extensions) ) ){
cat(paste("WARN: Skipping compilation of file with unsupported extension:", file, "\n"))
} else {
arg_string <- ""
arg_names <- c()
for ( argument in arguments ){
argument <- strsplit(argument, "=")
if ( !( argument[[1]][1] %in% arg_names ) ){
arg_names <- c(arg_names, argument[[1]][1])
arg_string <- paste(arg_string, substring(argument[[1]][1], 3), "=\"", argument[[1]][2], "\"", ",", sep="")
}
}
arg_string <- substring(arg_string, 1, nchar(arg_string)-1)
debug("Argument string:")
debug(arg_string)
function_call <- paste(sep="", base_command, '("', file, '"')
if ( arg_string != "" ){
function_call <- paste(sep="", function_call, ", ", arg_string)
}
function_call <- paste(sep="", function_call, ")")
debug("Function call:")
debug(function_call)
eval(parse(text=function_call))
}
}
} |
library(checkmate)
library(testthat)
library(raster)
context("getGroups")
test_that("getGroups of a 'geom'", {
output <- getGroups(gtGeoms$grid$categorical)
expect_data_frame(output, any.missing = FALSE, nrows = 9, ncols = 2)
expect_names(x = names(output), permutation.of = c("gid", "cover"))
output <- getGroups(gtGeoms$grid$continuous)
expect_data_frame(output, any.missing = FALSE, nrows = 91, ncols = 1)
expect_names(x = names(output), permutation.of = c("gid"))
})
test_that("getGroups of a Raster* object", {
input <- gtRasters$continuous
output <- getGroups(input)
expect_data_frame(output, any.missing = FALSE, nrows = 0, ncols = 1)
expect_names(x = names(output), permutation.of = c("gid"))
input <- gtRasters$categorical
output <- getGroups(input)
expect_tibble(output, any.missing = FALSE, nrows = 9, ncols = 2)
expect_names(names(output), permutation.of = c("gid", "cover"))
})
test_that("getGroups returns a given raster attribute table", {
input <- gtRasters$categorical
output <- getGroups(input)
expect_data_frame(output, any.missing = FALSE, nrows = 9, ncols = 2)
expect_names(names(output), identical.to = c("gid", "cover"))
})
test_that("getGroups of any other object", {
output <- getGroups("bla")
expect_null(object = output)
}) |
library(xts)
library(qrmdata)
library(qrmtools)
data("SP500")
data("FTSE")
data("SMI")
SP500.X <- returns(SP500)
FTSE.X <- returns(FTSE)
SMI.X <- returns(SMI)
SP500.X <- SP500.X[SP500.X != 0]
SMI.X <- SMI.X[SMI.X != 0]
FTSE.X <- FTSE.X[FTSE.X != 0]
X <- merge(SP500 = SP500.X, FTSE = FTSE.X, SMI = SMI.X, all = FALSE)
X.w <- apply.weekly(X, FUN = colSums)
acf(X)
acf(abs(X))
acf(X.w)
acf(abs(X.w))
plot.zoo(X, xlab = "Time", main = "Log-returns")
X.vols <- apply.monthly(X, FUN = function(x) apply(x, 2, sd))
plot.zoo(X.vols, xlab = "Time", main = "Volatility estimates")
L <- -SP500.X
r <- 0.01
u <- quantile(L, probs = 1 - r)
xtr.L <- L[L > u]
plot(as.numeric(xtr.L), type = "h", xlab = "Time",
ylab = substitute("Largest"~r.*"% of losses ("*n.~"losses)",
list(r. = 100 * r, n. = length(xtr.L))))
spcs <- as.numeric(diff(time(xtr.L)))
qq_plot(spcs, FUN = function(p) qexp(p, rate = r))
set.seed(271)
L. <- rt(length(L), df = 3)
u. <- quantile(L., probs = 1 - r)
xtr.L. <- L.[L. > u.]
plot(xtr.L., type = "h", xlab = "Time",
ylab = substitute("Largest"~r.*"% of losses ("*n.~"losses)",
list(r. = 100 * r, n. = length(xtr.L.))))
spcs. <- diff(which(L. > u.))
qq_plot(spcs., FUN = function(p) qexp(p, rate = r))
layout(t(1:3))
for (i in 1:3)
qq_plot(X.w[,i], FUN = qnorm, method = "empirical",
main = names(X.w)[i])
layout(1) |
descendants <- function(node, G, topo) {
de.ind <- unique(unlist(igraph::neighborhood(G, order = igraph::vcount(G), nodes = node, mode = "out")))
de <- igraph::V(G)[de.ind]$name
de <- de %ts% topo
return(de)
} |
context("test_posterior_common_cov.R")
test_that("posterior calculations with common cov match regular version",{
Bhat = rbind(c(1,2,3),c(2,4,6))
Shat = rbind(c(1,1,1),c(1,1,1))
data = mash_set_data(Bhat,Shat)
Ulist = cov_canonical(data)
posterior_weights = matrix(1/length(Ulist),nrow = 2, ncol=length(Ulist))
out1 = compute_posterior_matrices_general_R(data,A=diag(3),Ulist,posterior_weights)
out2 = compute_posterior_matrices_common_cov_R(data,A=diag(3),Ulist,posterior_weights)
expect_equal(out1,out2)
out1 = calc_post_rcpp(t(data$Bhat),t(data$Shat), matrix(0,0,0), matrix(0,0,0), data$V, matrix(0,0,0), diag(ncol(data$Bhat)), simplify2array(Ulist),t(posterior_weights), TRUE, FALSE)
out2 = calc_post_rcpp(t(data$Bhat),t(data$Shat), matrix(0,0,0), matrix(0,0,0), data$V, matrix(0,0,0), diag(ncol(data$Bhat)), simplify2array(Ulist),t(posterior_weights), FALSE, FALSE)
expect_equal(out1,out2)
}
) |
knitr::opts_chunk$set(
echo = TRUE,
collapse = TRUE,
comment = "
);
preregExample <-
preregr::prereg_initialize(
"inclSysRev_v0_92"
);
preregr::prereg_next_item(
preregExample,
nrOfItems = 4
);
preregExample <-
preregExample |>
preregr::prereg_specify(
title = "Example Study",
authors = "Littlebottom, C., Dibbler, C., & Aching, T."
);
preregExample <-
preregExample |>
preregr::prereg_specify(
nonExistent_item = "This can't be stored anywhere"
);
preregExample <-
preregExample |>
preregr::prereg_specify(
start_date = "2021-9-01"
);
preregExample <-
preregExample |>
preregr::prereg_specify(
start_date = "2021-09-01"
);
preregExample |>
preregr::prereg_show_item_content(
section="metadata"
);
preregExample |>
preregr::prereg_show_item_completion();
lightenColor <- function(x,
amount = .4) {
x <- 255 - col2rgb(x);
x <- amount * x;
x <- 255 - x;
x <- rgb(t(x), maxColorValue=255);
return(x);
}
Okabe_Ito <- c("
"
"
orange <- Okabe_Ito[1];
lightBlue <- Okabe_Ito[2];
green <- Okabe_Ito[3]
yellow <- Okabe_Ito[4]
darkBlue <- Okabe_Ito[5];
red <- Okabe_Ito[6];
pink <- Okabe_Ito[7];
orange_l <- lightenColor(orange);
lightBlue_l <- lightenColor(lightBlue);
green_l <- lightenColor(green);
yellow_l <- lightenColor(yellow);
darkBlue_l <- lightenColor(darkBlue);
red_l <- lightenColor(red);
pink_l <- lightenColor(pink);
orangeBg <- lightenColor(orange, amount=.05);
greenBg <- lightenColor(green, amount=.05);
oldKableViewOption <- getOption("kableExtra_view_html", NULL);
options(kableExtra_view_html = FALSE);
oldSilentOption <- preregr::opts$get("silent");
preregr::opts$set(silent = TRUE);
knitr::opts_chunk$set(echo = FALSE, comment="");
if (!exists('headingLevel') || !is.numeric(headingLevel) || (length(headingLevel) != 1)) {
headingLevel <- 0;
}
if (is.null(section)) {
sectionsToShow <- x$form$sections$section_id;
} else {
sectionsToShow <-
intersect(
x$form$sections$section_id,
section
);
}
preregr::heading(
x$form$metadata[x$form$metadata$field == "title", "content"],
idSlug("preregr-prereg-spec"),
headingLevel=headingLevel
);
for (section in sectionsToShow) {
preregr::heading(
"Section: ",
x$form$sections[
x$form$sections$section_id==section,
"section_label"
],
idSlug("preregr-prereg-spec"),
headingLevel=headingLevel + 1
);
item_ids <- x$form$items$item_id[x$form$items$section_id == section];
item_labels <- x$form$items$item_label[x$form$items$section_id == section];
names(item_labels) <- item_ids;
for (currentItemId in item_ids) {
cat0("<div class=\"preregr preregr-item-spec ");
if (x$specs[[currentItemId]] == x$config$initialText) {
cat0("preregr-unspecified\">\n");
} else {
cat0("preregr-specified\">\n");
}
cat0("<div class=\"preregr-item-heading\">\n");
cat0("<div class=\"preregr-item-label\">",
item_labels[currentItemId],
"</div>\n");
cat0("<div class=\"preregr-item-id\">",
currentItemId,
"</div>\n");
cat0("</div>\n");
cat0("<div class=\"preregr-item-spec-text\">",
ifelse(!is.null(x$specs[[currentItemId]]$text) &&
!is.na(x$specs[[currentItemId]]$text) &&
nchar(x$specs[[currentItemId]]$text) > 0,
x$specs[[currentItemId]]$text,
" "),
"</div>\n");
cat0("</div>\n");
}
}
preregr::opts$set(silent = oldSilentOption);
if (!is.null(oldKableViewOption)) {
options(kableExtra_view_html = oldKableViewOption);
}
preregrJSON <-
preregr::prereg_spec_to_json(x);
preregrJSON <-
gsub("'",
"&
preregrJSON
);
slug <- paste0("preregr-data-", preregr::randomSlug());
preregr::prereg_knit_item_content(
preregExample,
section="metadata"
);
preregExample <-
preregExample |>
preregr::prereg_justify(
item = "start_date",
decision = "We decided to start on the first, rather than the second, of September 2021.",
justification = "It's a bit weird to start on the second day of a month."
); |
setScreenSize <-
function(size=c("normal", "small", "large"),
height, width)
{
if(!missing(height) && !is.null(height) && !is.na(height) && height > 0 &&
!missing(width) && !is.null(width) && !is.na(width) && width > 0)
screensize <- list(height=height, width=width)
else {
size <- match.arg(size)
screensize <- switch(size,
small= list(height= 600, width= 900),
normal= list(height= 700, width=1000),
large= list(height=1200, width=1600))
}
message("Set screen size to height=", screensize$height,
" x width=", screensize$width)
options(qtlchartsScreenSize=screensize)
}
getScreenSize <-
function()
{
screensize <- getOption("qtlchartsScreenSize")
if(is.null(screensize)) {
setScreenSize()
screensize <- getOption("qtlchartsScreenSize")
}
screensize
}
getPlotSize <-
function(aspectRatio)
{
screensize <- getScreenSize()
if(screensize$height*aspectRatio <= screensize$width)
return( list(height=screensize$height, width=screensize$height*aspectRatio) )
list(height=screensize$width/aspectRatio, width=screensize$width)
} |
tidy.orcutt <- function(x, ...) {
s <- summary(x)
co <- stats::coef(s)
ret <- as_tibble(co, rownames = "term")
names(ret) <- c("term", "estimate", "std.error", "statistic", "p.value")
ret
}
glance.orcutt <- function(x, ...) {
as_glance_tibble(
r.squared = unname(x$r.squared),
adj.r.squared = unname(x$adj.r.squared),
rho = unname(x$rho),
number.interaction = unname(x$number.interaction),
dw.original = unname(x$DW[1]),
p.value.original = unname(x$DW[2]),
dw.transformed = unname(x$DW[3]),
p.value.transformed = unname(x$DW[4]),
nobs = stats::nobs(x),
na_types = "rrrirrrri"
)
} |
makeRLearner.classif.fdausc.np = function() {
makeRLearnerClassif(
cl = "classif.fdausc.np",
package = "fda.usc",
par.set = makeParamSet(
makeIntegerVectorLearnerParam(id = "h", default = NULL, special.vals = list(NULL)),
makeDiscreteLearnerParam(id = "Ker", default = "AKer.norm", values = list("AKer.norm", "AKer.cos", "AKer.epa", "AKer.tri", "AKer.quar", "AKer.unif")),
makeDiscreteLearnerParam(id = "metric", default = "metric.lp", values = list("metric.lp", "metric.kl", "metric.hausdorff", "metric.dist")),
makeDiscreteLearnerParam(id = "type.CV", default = "GCV.S", values = c("GCV.S", "CV.S", "GCCV.S")),
makeDiscreteLearnerParam(id = "type.S", default = "S.NW", values = list("S.NW", "S.LLR", "S.KNN")),
makeNumericLearnerParam(id = "trim", lower = 0L, upper = 1L, default = 0L),
makeLogicalLearnerParam(id = "draw", default = TRUE, tunable = FALSE)
),
par.vals = list(draw = FALSE),
properties = c("twoclass", "multiclass", "prob", "single.functional"),
name = "Nonparametric classification on FDA",
short.name = "fdausc.np",
note = "Argument draw=FALSE is used as default. Additionally, mod$C[[1]] is set to quote(classif.np)"
)
}
trainLearner.classif.fdausc.np = function(.learner, .task, .subset, .weights = NULL, trim, draw, metric, Ker, ...) {
d = getTaskData(.task, subset = .subset, target.extra = TRUE, functionals.as = "matrix")
fd = getFunctionalFeatures(d$data)
data.fdclass = fda.usc::fdata(mdata = as.matrix(fd))
par.cv = learnerArgsToControl(list, trim, draw)
par.funs = learnerArgsToControl(list, metric, Ker)
par.funs = lapply(par.funs, function(x) getFromNamespace(x, "fda.usc"))
trainfun = getFromNamespace("classif.np", "fda.usc")
mod = do.call("trainfun",
c(list(group = d$target, fdataobj = data.fdclass, par.CV = par.cv, par.S = list(w = .weights)),
list(metric = par.funs$metric)[which(names(par.funs) == "metric")],
list(Ker = par.funs$Ker)[which(names(par.funs) == "Ker")],
...))
mod$C[[1]] = quote(classif.np)
return(mod)
}
predictLearner.classif.fdausc.np = function(.learner, .model, .newdata, ...) {
fd = getFunctionalFeatures(.newdata)
nd = fda.usc::fdata(mdata = as.matrix(fd))
type = ifelse(.learner$predict.type == "prob", "probs", "class")
if (type == "probs") {
predict(.model$learner.model, nd, type = type)$prob.group
} else {
predict(.model$learner.model, nd, type = type)
}
} |
test_that("relist_nlist", {
expect_identical(relist_nlist(
structure(numeric(0), .Names = character(0)),
nlist()
), nlist())
expect_identical(
relist_nlist(c(a = 5), nlist(a = NA_real_)),
nlist(a = 5)
)
expect_identical(
relist_nlist(c(a = 5), nlist(a = NA_integer_)),
nlist(a = 5L)
)
expect_identical(
relist_nlist(c(`a[2]` = 5), nlist(a = c(1, 2, 3))),
nlist(a = c(NA, 5, NA))
)
expect_identical(
relist_nlist(c(`a[2]` = 5), nlist(a = 1:3)),
nlist(a = c(NA, 5L, NA))
)
}) |
"kroyeri" |
read_from_folder <- function(path, type) {
possible_types <-
c(
"duration",
"audacity",
"eaf",
"exb",
"flextext",
"formant",
"intensity",
"picth",
"srt",
"textgrid"
)
match.arg(type, possible_types, several.ok = FALSE)
ext <- switch(
type,
duration = "(.wave?$)|(.WAVE?$)",
audacity = ".txt$",
eaf = ".eaf$",
exb = ".exb$",
flextext = ".flextext$",
formant = ".Formant$",
intensity = ".Intensity$",
picth = ".Pitch$",
srt = ".srt$",
textgrid = ".TextGrid$"
)
FUN <- switch(
type,
duration = "get_sound_duration(x)",
audacity = "audacity_to_df(x)",
eaf = "eaf_to_df(x)",
exb = "exb_to_df(x)",
flextext = "flextext_to_df(x)",
formant = "formant_to_df(x)",
intensity = "intensity_to_df(x)",
picth = "pitch_to_df(x)",
srt = "srt_to_df(x)",
textgrid = "textgrid_to_df(x)"
)
path <- normalizePath(path)
files <- list.files(path, pattern = ext, full.names = TRUE)
do.call(rbind, lapply(
files,
FUN = function(x) {
tryCatch(
eval(parse(text = FUN)),
error = function(e) {
warning("Error while reading from ", x, ":\n", e$message, "\n")
return(NA)
}
)
}
))
} |
print.summary.optbdmaeAT<-function(x,...)
{
cat("\n --------------------------------------- \n")
cat("Title: ",x$Optcrit,"-optimal or near-optimal block design ","Date: ", format(Sys.time(), "%a %b %d %Y %H:%M:%S"),"\n",sep="")
cat(" --------------------------------------- \n")
cat("Call:\n")
print(x$call)
cat("\nparametric combintaions:\n")
cat("\nNumber of treamtments: ", x$v, "\n")
cat("Number of blocks: ", x$b, "\n")
cat("Theta value: ", x$theta, "\n")
cat("Number of replications: ", x$nrep, "\n")
cat("Number of exchange iteration: ", x$itr.cvrgval, "\n")
cat("Algorithm used: ", x$Alg, "\n")
cat("OPtimality criterion used: ", x$Optcrit, "-optimality criteria\n",sep="")
cat("\nResultant ",x$Optcrit,"-optimal or near-optimal block design:\n",sep="")
cat("\n")
print(data.frame(x$OptdesF))
cat("\n")
cat(x$Optcrit,"-Score value: ", x$Optcrtsv, "\n",sep="")
plot(x$grphlt,edge.arrow.size=5, vertex.size=20, margin=0.5,
layout=layout.kamada.kawai,vertex.color="cyan",edge.color="black")
title(paste("Graphical layout of", paste(x$Optcrit,"-optimal or near-optimal block design",sep=""),sep=" "),
sub = NULL,cex.main = 1, font.main= 1, col.main= "black")
mtext(paste("using"," ",x$Alg,"-algorithm for:",sep=""), line = 0.5, col = "black", font = 1)
mtext(paste("(v, b, theta) =", " (",paste(x$v, x$b, x$theta, sep=", "),")",sep=""), line = -0.50, col = "blue", font = 1)
cat("\n", x$file_loc2,"\n", x$file_loc,"\n")
cat("\n")
} |
graph_topo_compar <- function(obs_graph, pred_graph,
mode = "mcc", directed = FALSE){
if(!inherits(obs_graph, "igraph")){
stop("'obs_graph' must be a graph object of class 'igraph'.")
} else if (!inherits(pred_graph, "igraph")){
stop("'pred_graph' must be a graph object of class 'igraph'.")
}
if(length(igraph::V(obs_graph)) != length(igraph::V(pred_graph))){
stop("Both graphs must have the same node number.")
}
if(is.null(igraph::V(obs_graph)$name)){
stop("The nodes of 'obs_graph' must have names.")
} else if(is.null(igraph::V(pred_graph)$name)){
stop("The nodes of 'pred_graph' must have names.")
}
if(!all(igraph::V(obs_graph)$name == igraph::V(pred_graph)$name)){
stop("Both graphs must have the same node names and the nodes
ranked in the same order.")
}
nb_nodes <- length(igraph::V(obs_graph)$name)
if(directed == TRUE){
K <- (nb_nodes - 1) * nb_nodes
adj1 <- igraph::as_adjacency_matrix(obs_graph,
names = TRUE , sparse = FALSE)
adj2 <- igraph::as_adjacency_matrix(pred_graph,
names = TRUE , sparse = FALSE)
diag(adj1) <- diag(adj2) <- 0
tp <- length(which((adj1 & adj2)))
tn <- length(which(((1 - adj1) & (1 - adj2)))) - nb_nodes
fp <- length(which(((1 - adj1) & adj2)))
fn <- length(which((adj1 & (1 - adj2))))
} else if (directed == FALSE){
K <- (nb_nodes - 1) * nb_nodes/2
adj1 <- igraph::as_adjacency_matrix(obs_graph, type = "both",
names = TRUE , sparse = FALSE)
adj2 <- igraph::as_adjacency_matrix(pred_graph, type = "both",
names = TRUE , sparse = FALSE)
diag(adj1) <- diag(adj2) <- 0
tp <- length(which((adj1 & adj2)))/2
tn <- length(which(((1 - adj1) & (1 - adj2))))/2 - (nb_nodes / 2)
fp <- length(which(((1 - adj1) & adj2)))/2
fn <- length(which((adj1 & (1 - adj2))))/2
} else {
stop("'directed' must be TRUE or FALSE.")
}
pp <- tp + fp
pn <- fn + tn
op <- tp + fn
on <- tn + fp
pp_op <- pp * op
pn_on <- pn * on
mcc <- (tp * tn - fp * fn) / ( sqrt(pp_op) * sqrt(pn_on) )
kappa <- ( K * (tp + tn) - (on * pn)-(op * pp))/(K^2 - (on * pn) - (op * pp))
fdr <- fp / (tp + fp)
acc <- (tp + tn) / K
sens <- tp / (tp + fn)
spec <- tn / (tn + fp)
prec <- tp / (tp + fp)
if(mode == "mcc"){
message(paste("Matthews Correlation Coefficient : ", mcc, sep = ""))
return(mcc)
} else if(mode == "kappa"){
message(paste("Kappa Index : ", kappa, sep = ""))
return(kappa)
} else if(mode == "fdr"){
message(paste("False Discovery Rate : ", fdr, sep = ""))
return(fdr)
} else if(mode == "acc"){
message(paste("Accuracy : ", acc, sep = ""))
return(acc)
} else if(mode == "sens"){
message(paste("Sensitivity : ", sens, sep = ""))
return(sens)
} else if(mode == "spec"){
message(paste("Specificity : ", spec, sep = ""))
return(spec)
} else if(mode == "prec"){
message(paste("Precision : ", prec, sep = ""))
return(prec)
}
} |
'dse06l' |
qrnn.rbf <-
function(x, x.basis, sigma)
{
kern <- matrix(0, nrow=nrow(x), ncol=nrow(x.basis))
for (k in seq(nrow(x.basis))){
x.basis.test <- matrix(x.basis[k,], nrow=nrow(x),
ncol=ncol(x.basis), byrow=TRUE)
kern[,k] <- exp(-apply(((x-x.basis.test)^2)/(2*sigma^2), 1, sum))
}
kern
} |
print.reg <- function(x, ...)
{
cat("Mean coefficients: ")
c <- colMeans(x$coeff.,na.rm=TRUE)
cat("\n")
print(round(c,digits=4),quote=FALSE)
cat("\n")
e <- (mean((as.vector(x$y)-as.vector(x$y.hat))^2,na.rm=TRUE))^0.5
e <- round(e,digits=4)
cat("RMSE: ",e)
cat("\n")
e <- mean(abs(as.vector(x$y)-as.vector(x$y.hat)),na.rm=TRUE)
e <- round(e,digits=4)
cat("MAE: ",e)
cat("\n")
if (! is.null(x$window)) { cat("rolling window: ",x$window) }
cat("\n")
} |
with_mock_api({
test_that("ff_starter_positions returns a tibble of starter positions", {
skippy()
dlf <- mfl_connect(2020, 37920)
dlf_starter_positions <- ff_starter_positions(dlf)
expect_tibble(dlf_starter_positions, min.rows = 4)
jml_conn <- sleeper_connect(league_id = "522458773317046272", season = 2020)
jml_starter_positions <- ff_starter_positions(jml_conn)
expect_tibble(jml_starter_positions, min.rows = 4)
got_conn <- fleaflicker_connect(season = 2020, league_id = 206154)
got_starter_positions <- ff_starter_positions(got_conn)
expect_tibble(got_starter_positions, min.rows = 10)
tony_conn <- espn_connect(season = 2020, league_id = 899513)
tony_starter_positions <- ff_starter_positions(tony_conn)
expect_tibble(tony_starter_positions, min.rows = 5)
})
})
test_that("ff_scoring for templates return tibbles", {
oneqb <- ff_template(roster_type = "1qb") %>% ff_starter_positions()
sf <- ff_template(roster_type = "superflex") %>% ff_starter_positions()
sfb11 <- ff_template(roster_type = "sfb11") %>% ff_starter_positions()
idp <- ff_template(roster_type = "idp") %>% ff_starter_positions()
expect_tibble(oneqb)
expect_tibble(sf)
expect_tibble(sfb11)
expect_tibble(idp)
}) |
commons_divisions <- function(division_id = NULL, division_uin = NULL,
summary = FALSE,
start_date = "1900-01-01",
end_date = Sys.Date(), extra_args = NULL,
tidy = TRUE, tidy_style = "snake",
verbose = TRUE) {
dates <- paste0(
"&_properties=date&max-date=",
as.Date(end_date), "&min-date=",
as.Date(start_date)
)
if (is.null(division_id) & is.null(division_uin)) {
baseurl <- paste0(url_util, "commonsdivisions")
if (verbose == TRUE) {
message("Connecting to API")
}
divis <- jsonlite::fromJSON(paste0(
baseurl, ".json?", dates,
extra_args, "&_pageSize=1"
),
flatten = TRUE
)
jpage <- floor(divis$result$totalResults / 100)
query <- paste0(baseurl, ".json?", dates, extra_args)
df <- loop_query(query, jpage, verbose)
} else if (!is.null(division_id)) {
baseurl <- paste0(url_util, "commonsdivisions/id/")
if (verbose == TRUE) {
message("Connecting to API")
}
divis <- jsonlite::fromJSON(paste0(
baseurl, division_id,
".json?", dates, extra_args
),
flatten = TRUE
)
df <- tibble::as_tibble(divis$result$primaryTopic$vote)
if (summary == TRUE) {
df <- dplyr::summarise(dplyr::group_by_at(df, "type"), count = dplyr::n())
}
df$date <- as.POSIXct(divis$result$primaryTopic$date$`_value`)
} else if (!is.null(division_uin)) {
baseurl <- paste0(url_util, "commonsdivisions.json?uin=")
if (verbose == TRUE) {
message("Connecting to API")
}
divis <- jsonlite::fromJSON(paste0(
baseurl, division_uin,
dates, extra_args
),
flatten = TRUE
)
df <- tibble::as_tibble(divis$result$items[["vote"]][[1]])
if (summary == TRUE) {
df <- dplyr::summarise(dplyr::group_by_at(df, "type"), count = dplyr::n())
}
df$date <- as.POSIXct(divis$result$items$date._value)
}
if (nrow(df) == 0) {
message("The request did not return any data.
Please check your parameters.")
} else {
if (tidy == TRUE) {
if (is.null(division_id) & is.null(division_uin)) {
df <- hansard_tidy(df, tidy_style)
} else {
df <- cd_tidy(df, tidy_style, summary)
}
}
df
}
}
hansard_commons_divisions <- commons_divisions |
uiReliability <-
navbarMenu(
"Reliability",
tabPanel(
"Spearman-Brown formula",
h3("Spearman-Brown formula"),
h4("Equation"),
p("Let \\(\\text{rel}(X)\\) be the reliability of the test composed of \\(I\\) equally precise
items measuring the same construct, \\(X = X_1 + ... + X_I\\).
Then for a test consisting of
\\(I^*\\) such items, that is for a test which is
\\(m = \\frac{I^*}{I}\\) times longer/shorter, the reliability would be"),
("$$\\text{rel}(X^*) = \\frac{m\\cdot \\text{rel}(X)}{1 + (m - 1)\\cdot\\text{rel}(X)}.$$"),
p("The Spearman-Brown formula can be used to determine reliability of a test with with a
different number of equally precise items measuring the same construct.
It can also be used to determine the necessary number of items to achieve
desired reliability."),
p(
"In the calculations below, ", strong("reliability of original data"), "is by
default set to the value of Cronbach's \\(\\alpha\\) for the dataset currently in use. The ",
strong("number of items in the original data"), "is
by default set to the number of items in the dataset currently in use. "
),
fluidRow(
column(
3,
numericInput(
inputId = "reliability_SBformula_reliability_original",
label = "Reliability of original data",
max = 0,
min = 1,
value = 0.7
)
),
column(
3,
numericInput(
inputId = "reliability_SBformula_items_original",
label = "Number of items in original data",
min = 1,
step = 1,
value = 20
)
)
),
h4("Estimate of reliability with different number of items"),
p("Here you can calculate an estimate of reliability for a test consisting of a different number of
items. "),
fluidRow(column(
3,
numericInput(
inputId = "reliability_SBformula_items_new",
label = "Number of items in new data",
min = 1,
step = 1,
value = 30
)
)),
uiOutput("reliability_SBformula_reliability_text"),
h4("Necessary number of items for required level of reliability"),
p("Here you can calculate the necessary number of items
to gain the required level of reliability. "),
fluidRow(column(
3,
numericInput(
inputId = "reliability_SBformula_reliability_new",
label = "Reliability of new data",
max = 0,
min = 1,
value = 0.8
)
)),
uiOutput("reliability_SBformula_items_text"),
br(),
h4("Selected R code"),
code(includeText("sc/reliability/sb.R"))
),
tabPanel(
"Split-half method",
h3("Split-half method"),
p("The split-half method uses the correlation between two subscores for an estimation of reliability.
The underlying assumption is that the two halves of the test (or even all items on the test) are
equally precise and measure the same underlying construct. The Spearman-Brown formula is then used to
correct the estimate for the number of items."),
h4("Equation"),
p("For a test with \\(I\\) items total score is calculated as \\(X = X_1 + ... + X_I\\).
Let \\(X^*_1\\) and \\(X^*_2\\) be total scores calculated from items found only in the first
and second subsets. The estimate of reliability is then given by the Spearman-Brown formula (Spearman, 1910; Brown, 1910)
with \\(m = 2\\)."),
("$$\\text{rel}(X) = \\frac{m\\cdot \\text{cor}(X^*_1, X^*_2)}{1 + (m - 1)\\cdot\\text{cor}(X^*_1, X^*_2)} =
\\frac{2\\cdot \\text{cor}(X^*_1, X^*_2)}{1 + \\text{cor}(X^*_1, X^*_2)}$$"),
p(
"You can choose below from different split-half approaches. The ",
strong("First-last"), "method uses a correlation between the first half of items and the second
half of items. The ", strong("Even-odd"), "method places even numbered items into the first subset and odd numbered items
into the second one. The ", strong("Random"), "method performs a random split of items, thus the
resulting estimate may be different for each call. Out of a specified number of random splits (10,000 by default),
the ", strong("Worst"), " method selects the lowest estimate and the ", strong("Average"), "method calculates the
average. In the case of an odd number of items, the first subset contains one more item than the second one."
),
uiOutput("reliability_splithalf_allpossible_text"),
br(),
fluidRow(
column(
3,
selectInput(
inputId = "reliability_splithalf_method",
label = "Split half method",
choices = c(
"First-last" = "firstlast",
"Even-odd" = "evenodd",
"Random" = "random",
"Worst" = "worst",
"Average" = "average"
),
selected = "First_last"
)
),
column(
4,
numericInput(
inputId = "reliability_splithalf_number",
label = textOutput("reliability_splithalf_number_label"),
value = 10000,
min = 1,
step = 1
)
)
),
conditionalPanel(
condition = "input.reliability_splithalf_method != 'average'",
uiOutput("reliability_splithalf_text"),
br()
),
h4("Reliability estimate with confidence interval"),
p(
"The estimate of reliability for ", strong("First-last"), ", ", strong("Even-odd"), ", ", strong("Random"), "and",
strong("Worst"), "is calculated using the Spearman-Brown formula. The confidence interval is based on a
confidence interval of correlation using the delta method. The estimate of reliability for the ", strong("Average"),
"method is a mean value of sampled reliabilities and the confidence interval is the confidence interval of this mean. "
),
uiOutput("reliability_splithalf_table"),
br(),
h4("Histogram of reliability estimates"),
p("A histogram is based on a selected number of split halves estimates (10,000 by default).
The current estimate is highlighted by a red colour."),
plotlyOutput("reliability_splithalf_histogram"),
downloadButton("DB_reliability_splithalf_histogram"),
br(),
h4("Selected R code"),
code(includeText("sc/reliability/sh.R"))
),
tabPanel("Cronbach's \\(\\alpha\\)",
value = "cronbach",
h3("Cronbach's \\(\\alpha\\)"),
p("Cronbach's \\(\\alpha\\) is an estimate of the internal consistency of a psychometric test.
It is a function of the number of items in a test, the average covariance
between item-pairs, and the variance of the total score (Cronbach, 1951)."),
h4("Equation"),
p("For a test with \\(I\\) items where \\(X = X_1 + ... + X_I\\) is a total score,
\\(\\sigma^2_X\\) its variance and \\(\\sigma^2_{X_i}\\) variances of items,
Cronbach's \\(\\alpha\\) is given by following equation"),
("$$\\alpha = \\frac{I}{I-1}\\left(1 - \\frac{\\sum_{i = 1}^I \\sigma^2_{X_i}}{\\sigma^2_X}\\right)$$"),
h4("Estimate with confidence interval"),
p("A confidence interval is based on F distribution as proposed by Feldt et al. (1987)."),
tableOutput("reliability_cronbachalpha_table"),
h4("Selected R code"),
code(includeText("sc/reliability/cronbach.R"))
),
"---",
"Modules",
tabPanel(tags$a("Restricted-range Reliability",
href = "https://shiny.cs.cas.cz/ShinyItemAnalysis-module-IRRrestricted/",
target = "_blank", .noWS = "outside"
))
) |
Gene_del<-function(query_genes=NULL,fba_object,return_reactions=FALSE)
{
require(abcdeFBA)
genes_NA_model<-setdiff(query_genes,fba_object$all_genes)
if(length(genes_NA_model)>0)
{
message("These genes are not present in the model")
message(genes_NA_model)
}
query_genes<-setdiff(query_genes,genes_NA_model)
if(length(query_genes)>0)
{
gpr_ix=vector()
Effect=vector()
for(i in 1:length(query_genes))
{
gpr_ix<-c(gpr_ix,grep(query_genes[i],fba_object$gpr))
}
gpr_ix<-unique(gpr_ix)
enlisted_gprs<-fba_object$gpr[gpr_ix]
for(i in 1:length(enlisted_gprs))
{
enlisted_gprs[i]<-gsub("\\(","( ",enlisted_gprs[i])
enlisted_gprs[i]<-gsub("\\)"," )",enlisted_gprs[i])
enlisted_gprs[i]<-gsub("and","&",enlisted_gprs[i])
enlisted_gprs[i]<-gsub("or","|",enlisted_gprs[i])
split_gpr<-strsplit(enlisted_gprs[i]," ")[[1]]
gpr_genes<-split_gpr[grep("[0-9,A-Z,a-z]",split_gpr)]
non_query_genes<-setdiff(gpr_genes,query_genes)
for(j in 1:length(query_genes))
{split_gpr[which(split_gpr==query_genes[j])]="0"}
for(j in 1:length(non_query_genes))
{split_gpr[which(split_gpr==non_query_genes[j])]="1"}
enlisted_gprs[i]=paste(split_gpr,collapse=" ")
Effect<-c(Effect,eval(parse(text=enlisted_gprs[i])))
KillSwitch<-rep(TRUE,length(fba_object$gpr))
KillSwitch[gpr_ix]<-Effect
Switch_off<-which(KillSwitch==0)
if(return_reactions==TRUE)
{return(Switch_off)}
if(return_reactions==FALSE)
{return(CHANGE_RXN_BOUNDS(Switch_off,fba_object,0,0))}
}
}else(message("the query list is empty"))
}
|
snps_permutation <-
function (ordered_alldata = "", pers_ids = "", ntraits = "",
nper = 100, threshold = 0.05, seed=10, saveto = "workspace",gs_locs="",envir = "")
{
print("Arguments")
print(paste("Ordered dataset: ", substitute(ordered_alldata),
sep = ""))
print(paste("Indexes of SNP Annotations: ", substitute(pers_ids)))
print(paste("Indexes of Traits to Analyse:", as.numeric(ntraits)))
print(paste("Traits:", colnames(ordered_alldata)[as.numeric(ntraits)]))
print(paste("Number of permutations: ", nper))
print(paste("Threshold: ", threshold))
print(paste("Permutation Results save to: ", substitute(saveto)))
if (saveto != "workspace"){
if(saveto != "directory") {
stop("Define where are the results to be saved: \"saveto\"=\"workspace\" OR \"directory\"")
}
}
ntraits <- as.numeric(ntraits)
nper <- as.numeric(nper)
threshold <- as.numeric(threshold)
set.seed(as.numeric(seed), kind = "Mersenne-Twister")
temp <- ordered_alldata[, c(1:6, ntraits)]
ns <- which(pers_ids != "NULL")
if (length(ns) == 0) {
stop("No SNPs mapped to the gene-sets")
}
pers_ids <- pers_ids[ns]
paths_list <- names(pers_ids)
mx_rs <- dim(temp)[1]
sd <- round(runif(nper, 1, mx_rs))
rowsf <- dim(gs_locs)[1]
tname <- NULL
lab <- NULL
i <- NULL
ids <- NULL
j <- NULL
k <- NULL
date()
all_ts <- NULL
listf <- as.numeric(as.character(gs_locs[, 4]))
for (i in 1:length(paths_list)) {
per_mat <- matrix(data = NA, nrow = length(sd) + 3, ncol = length(temp) -
6)
colnames(per_mat) <- colnames(temp)[7:length(temp)]
path_name <- strsplit(paths_list[i], split = "[_]")[[1]][3]
print(path_name)
indxs <- pers_ids[[i]]
for (j in 7:length(temp)) {
temp2 <- temp[, j]
big_count <- 0
sig_snps_real <- 0
for (k in 1:length(sd)) {
if (k == 1) {
per_mat[1, j - 6] <- sig_snps_real <- length(which(temp[indxs,
j] <= threshold))
}
count <- 0
fkindxs <- sapply(indxs, sum, sd[k])
mayores <- which(fkindxs > mx_rs)
menores <- which(fkindxs <= mx_rs)
count <- length(which(temp2[fkindxs[menores]] <=
threshold))
if (length(mayores) != 0) {
for (m in 1:length(mayores)) {
fkindxs[mayores[m]] <- fkindxs[mayores[m]] -
mx_rs
}
count <- count + length(which(temp2[fkindxs[mayores]] <=
threshold))
}
per_mat[k + 1, j - 6] <- count
if (count > sig_snps_real) {
big_count <- big_count + 1
}
}
per_mat[k + 2, j - 6] <- big_count
per_mat[k + 3, j - 6] <- big_count/length(sd)
}
rownames(per_mat) <- c("Real_Count", 1:length(sd), "All_Count",
"Score")
if (saveto == "directory") {
write.table(per_mat, file = paste("Permus_", path_name,
".txt", sep = ""), sep = "\t", row.names = T,
col.names = T, quote = F)
}
if (saveto == "workspace") {
assign(paste("Permus_", path_name, sep = ""), per_mat,
envir = envir)
}
}
} |
source(test_path('test-fns.R'))
old_path <- getwd()
temp_path <- tempfile()
dir_empty(temp_path)
setwd(temp_path)
git2r::init(temp_path)
config <- glue::glue(
"
[core]
bare = false
repositoryformatversion = 0
filemode = true
ignorecase = true
precomposeunicode = true
logallrefupdates = true
[remote \"origin\"]
url = [email protected]:user/repo.git
fetch = +refs/heads/*:refs/remotes/origin/*
[branch \"master\"]
remote = origin
merge = refs/heads/master
"
)
fileConn <- file(glue::glue("{temp_path}/.git/config"))
writeLines(config, fileConn)
close(fileConn)
cat(
"```{r}\nlibrary(dplyr)\nrequire(ggplot2)\nglue::glue_collapse(glue::glue('{1:10}'))\n```\n",
file = paste0(temp_path, "/test.Rmd")
)
on.exit(setwd(temp_path)) |
library(openintro)
library(tidyverse)
library(usethis)
gender_discrimination <- openintro::gender_discrimination %>%
mutate(
decision = as.character(decision),
decision = if_else(decision == "not", "not promoted", decision),
decision = fct_relevel(decision, "promoted", "not promoted"),
gender = fct_relevel(gender, "male", "female")
)
use_data(gender_discrimination, overwrite = TRUE) |
set.seed(1820)
n_rows = 1000
n_cols = 4
X = matrix(sample(-9:9, n_rows * n_cols, replace = TRUE), nrow = n_rows, ncol = n_cols)
column_names = sapply(1:n_cols, function(i_column){paste("column", i_column, sep = "_")})
colnames(X) = column_names
beta = sample(c(-1,-0.5, 0.5, 1), n_cols + 1, replace = TRUE)
mu = X %*% beta[-1] + beta[1]
y = rnorm( n_rows, mean = mu, sd = 2.5)
X = model.matrix(~ . + 0 + column_1 * ., data = as.data.frame(X))
colnames(X)
fit = lm(y ~ ., as.data.frame(X), x = TRUE, y = TRUE)
fwbw = fwbw(fit, BIC, control = list(monitor = TRUE))
names(coef(fwbw$object))
fwbw = fwbw(fit, AIC, control = list(plot = TRUE))
names(coef(fwbw$object))
fwbw = fwbw(fit, BIC, fw = TRUE)
names(coef(fwbw$object)) |
library(RUnit)
library(VarianceGamma)
data(vgParam)
testParam <- vgSmallShape
n <- 10000
nqp <- 100
N <- 100
Nfit <- 100
errorThresholdM <- 0.1
errorThresholdV <- 0.001
errorThresholdS <- 0.001
errorThresholdK <- 0.001
errorThresholdMom <- 0.01
errorThresholddpqrI <- 0.001
thresholddpqrR <- 0.001
errorThresholdFit <- 0.5
TestSuitevgL3 <- defineTestSuite(name = "VG Level 3 RUnit Tests",
dirs = "G:/vg/vg0.2-2/unitTests/tests",
testFileRegexp = "runit.vgLevel3Tests.R")
testResultL3 <- runTestSuite(TestSuitevgL3)
printTextProtocol(testResultL3, showDetails = TRUE)
TestSuitevgL3dpqrInversionqp <- defineTestSuite(name = "VG Level 3 RUnit Tests",
dirs = "G:/vg/vg0.2-2/unitTests/testFunction/runit.vgLevel3Tests",
testFileRegexp = "runit.vgLevel3dpqr.R", testFuncRegexp = "test.vgL3dpqrInversionqp")
testResultL3dpqrInversionqp <- runTestSuite(TestSuitevgL3dpqrInversionqp)
printTextProtocol(testResultL3dpqrInversionqp, showDetails = TRUE)
TestSuitevgL3dpqrInversionpq <- defineTestSuite(name = "VG Level 3 RUnit Tests",
dirs = "G:/vg/vg0.2-2/unitTests/testFunction/runit.vgLevel3Tests",
testFileRegexp = "runit.vgLevel3dpqr.R", testFuncRegexp = "test.vgL3dpqrInversionpq")
testResultL3dpqrInversionpq <- runTestSuite(TestSuitevgL3dpqrInversionpq)
printTextProtocol(testResultL3dpqrInversionpq, showDetails = TRUE)
TestSuitevgL3dpqrRandom <- defineTestSuite(name = "VG Level 3 RUnit Tests",
dirs = "G:/vg/vg0.2-2/unitTests/testFunction/runit.vgLevel3Tests",
testFileRegexp = "runit.vgLevel3dpqr.R", testFuncRegexp = "test.vgL3dpqrRandom")
testResultL3dpqrRandom <- runTestSuite(TestSuitevgL3dpqrRandom)
printTextProtocol(testResultL3dpqrRandom, showDetails = TRUE)
TestSuitevgL3mean <- defineTestSuite(name = "VG Level 3 RUnit Tests",
dirs = "G:/vg/vg0.2-2/unitTests/testFunction/runit.vgLevel3Tests",
testFileRegexp = "runit.vgLevel3moments.R", testFuncRegexp = "test.vgL3Mean")
testResultL3mean <- runTestSuite(TestSuitevgL3mean)
printTextProtocol(testResultL3mean, showDetails = TRUE)
TestSuitevgL3var <- defineTestSuite(name = "VG Level 3 RUnit Tests",
dirs = "F:/vg/vg0.2-2/unitTests/testFunction/runit.vgLevel3Tests",
testFileRegexp = "runit.vgLevel3moments.R", testFuncRegexp = "test.vgL3Var")
testResultL3var <- runTestSuite(TestSuitevgL3var)
printTextProtocol(testResultL3var, showDetails = TRUE)
TestSuitevgL3skew <- defineTestSuite(name = "VG Level 3 RUnit Tests",
dirs = "F:/vg/vg0.2-2/unitTests/testFunction/runit.vgLevel3Tests",
testFileRegexp = "runit.vgLevel3moments.R", testFuncRegexp = "test.vgL3Skew")
testResultL3skew <- runTestSuite(TestSuitevgL3skew)
printTextProtocol(testResultL3skew, showDetails = TRUE)
TestSuitevgL3kurt <- defineTestSuite(name = "VG Level 3 RUnit Tests",
dirs = "F:/vg/vg0.2-2/unitTests/testFunction/runit.vgLevel3Tests",
testFileRegexp = "runit.vgLevel3moments.R", testFuncRegexp = "test.vgL3Kurt")
testResultL3kurt <- runTestSuite(TestSuitevgL3kurt)
printTextProtocol(testResultL3kurt, showDetails = TRUE)
TestSuitevgL3mom <- defineTestSuite(name = "VG Level 3 RUnit Tests",
dirs = "G:/vg/vg0.2-2/unitTests/testFunction/runit.vgLevel3Tests",
testFileRegexp = "runit.vgLevel3moments.R", testFuncRegexp = "test.vgL3Mom")
testResultL3mom <- runTestSuite(TestSuitevgL3mom)
printTextProtocol(testResultL3mom, showDetails = TRUE)
TestSuitevgL3fittingNM <- defineTestSuite(name = "VG Level 3 RUnit Tests",
dirs = "G:/vg/vg0.2-2/unitTests/testFunction/runit.vgLevel3Tests",
testFileRegexp = "runit.vgLevel3fit.R", testFuncRegexp = "test.vgL3fitNM")
testResultL3fittingNM <- runTestSuite(TestSuitevgL3fittingNM)
printTextProtocol(testResultL3fittingNM, showDetails = TRUE)
TestSuitevgL3fittingnlm <- defineTestSuite(name = "VG Level 3 RUnit Tests",
dirs = "G:/vg/vg0.2-2/unitTests/testFunction/runit.vgLevel3Tests",
testFileRegexp = "runit.vgLevel3fit.R", testFuncRegexp = "test.vgL3fitnlm")
testResultL3fittingnlm <- runTestSuite(TestSuitevgL3fittingnlm)
printTextProtocol(testResultL3fittingnlm, showDetails = TRUE)
TestSuitevgL3fittingBFGS <- defineTestSuite(name = "VG Level 3 RUnit Tests",
dirs = "G:/vg/vg0.2-2/unitTests/testFunction/runit.vgLevel3Tests",
testFileRegexp = "runit.vgLevel3fit.R", testFuncRegexp = "test.vgL3fitBFGS")
testResultL3fittingBFGS <- runTestSuite(TestSuitevgL3fittingBFGS)
printTextProtocol(testResultL3fittingBFGS, showDetails = TRUE)
library(RUnit)
library(VarianceGamma)
data(vgParam)
testParam <- vgSmallShape
n <- 10000
nqp <- 100
N <- 100
Nfit <- 100
errorThresholdM <- 0.1
errorThresholdV <- 0.001
errorThresholdS <- 0.001
errorThresholdK <- 0.001
errorThresholdMom <- 0.01
errorThresholddpqrI <- 0.001
thresholddpqrR <- 0.001
errorThresholdFit <- 0.5
TestSuitevgL3 <- defineTestSuite(name = "VG Level 3 RUnit Tests",
dirs = "G:/vg/vg0.2-2/unitTests/tests",
testFileRegexp = "runit.vgLevel3Tests.R")
testResultL3 <- runTestSuite(TestSuitevgL3)
printTextProtocol(testResultL3, showDetails = TRUE)
TestSuitevgL3dpqrInversionqp <- defineTestSuite(name = "VG Level 3 RUnit Tests",
dirs = system.file("testFunction", package = "VarianceGamma"),
testFileRegexp = "runit.vgLevel3dpqr.R", testFuncRegexp = "test.vgL3dpqrInversionqp")
testResultL3dpqrInversionqp <- runTestSuite(TestSuitevgL3dpqrInversionqp)
printTextProtocol(testResultL3dpqrInversionqp, showDetails = TRUE)
TestSuitevgL3dpqrInversionpq <- defineTestSuite(name = "VG Level 3 RUnit Tests",
dirs = system.file("testFunction", package = "VarianceGamma"),
testFileRegexp = "runit.vgLevel3dpqr.R", testFuncRegexp = "test.vgL3dpqrInversionpq")
testResultL3dpqrInversionpq <- runTestSuite(TestSuitevgL3dpqrInversionpq)
printTextProtocol(testResultL3dpqrInversionpq, showDetails = TRUE)
TestSuitevgL3dpqrRandom <- defineTestSuite(name = "VG Level 3 RUnit Tests",
dirs = system.file("testFunction", package = "VarianceGamma"),
testFileRegexp = "runit.vgLevel3dpqr.R", testFuncRegexp = "test.vgL3dpqrRandom")
testResultL3dpqrRandom <- runTestSuite(TestSuitevgL3dpqrRandom)
printTextProtocol(testResultL3dpqrRandom, showDetails = TRUE)
TestSuitevgL3mean <- defineTestSuite(name = "VG Level 3 RUnit Tests",
dirs = system.file("testFunction", package = "VarianceGamma"),
testFileRegexp = "runit.vgLevel3moments.R", testFuncRegexp = "test.vgL3Mean")
testResultL3mean <- runTestSuite(TestSuitevgL3mean)
printTextProtocol(testResultL3mean, showDetails = TRUE)
TestSuitevgL3var <- defineTestSuite(name = "VG Level 3 RUnit Tests",
dirs = system.file("testFunction", package = "VarianceGamma"),
testFileRegexp = "runit.vgLevel3moments.R", testFuncRegexp = "test.vgL3Var")
testResultL3var <- runTestSuite(TestSuitevgL3var)
printTextProtocol(testResultL3var, showDetails = TRUE)
TestSuitevgL3skew <- defineTestSuite(name = "VG Level 3 RUnit Tests",
dirs = system.file("testFunction", package = "VarianceGamma"),
testFileRegexp = "runit.vgLevel3moments.R", testFuncRegexp = "test.vgL3Skew")
testResultL3skew <- runTestSuite(TestSuitevgL3skew)
printTextProtocol(testResultL3skew, showDetails = TRUE)
TestSuitevgL3kurt <- defineTestSuite(name = "VG Level 3 RUnit Tests",
dirs = system.file("testFunction", package = "VarianceGamma"),
testFileRegexp = "runit.vgLevel3moments.R", testFuncRegexp = "test.vgL3Kurt")
testResultL3kurt <- runTestSuite(TestSuitevgL3kurt)
printTextProtocol(testResultL3kurt, showDetails = TRUE)
TestSuitevgL3mom <- defineTestSuite(name = "VG Level 3 RUnit Tests",
dirs = system.file("testFunction", package = "VarianceGamma"),
testFileRegexp = "runit.vgLevel3moments.R", testFuncRegexp = "test.vgL3Mom")
testResultL3mom <- runTestSuite(TestSuitevgL3mom)
printTextProtocol(testResultL3mom, showDetails = TRUE)
TestSuitevgL3fittingNM <- defineTestSuite(name = "VG Level 3 RUnit Tests",
dirs = system.file("testFunction", package = "VarianceGamma"),
testFileRegexp = "runit.vgLevel3fit.R", testFuncRegexp = "test.vgL3fitNM")
testResultL3fittingNM <- runTestSuite(TestSuitevgL3fittingNM)
printTextProtocol(testResultL3fittingNM, showDetails = TRUE)
TestSuitevgL3fittingnlm <- defineTestSuite(name = "VG Level 3 RUnit Tests",
dirs = system.file("testFunction", package = "VarianceGamma"),
testFileRegexp = "runit.vgLevel3fit.R", testFuncRegexp = "test.vgL3fitnlm")
testResultL3fittingnlm <- runTestSuite(TestSuitevgL3fittingnlm)
printTextProtocol(testResultL3fittingnlm, showDetails = TRUE)
TestSuitevgL3fittingBFGS <- defineTestSuite(name = "VG Level 3 RUnit Tests",
dirs = system.file("testFunction", package = "VarianceGamma"),
testFileRegexp = "runit.vgLevel3fit.R", testFuncRegexp = "test.vgL3fitBFGS")
testResultL3fittingBFGS <- runTestSuite(TestSuitevgL3fittingBFGS)
printTextProtocol(testResultL3fittingBFGS, showDetails = TRUE) |
pvaft<-function(m,n,STime,Event,p=1,data)
{
data<-na.omit(data)
nr<-nrow(data)
if(STime!="os"){
names(data)[names(data) == STime] <- "os"
}
if(Event!="death"){
names(data)[names(data) == Event] <- "death"
}
pnt<-NULL
for(i in m:n)
{
if(sum(data[,i])==0) {
pnt<-c(pnt,i)
}
}
if(is.null(pnt)==F){
data<-data[,-pnt]
}
else{
data<-data
}
count<-length(pnt)
n=n-count
ht<-colnames(data)[m:n]
le<-length(ht)
covariates<-NULL
mtx<-matrix(nrow=le,ncol = 4)
colnames(mtx)<-c("Estimate","std.Error","z value","p_value")
rownames(mtx)<-ht
for(i in 1:le)
{
ftt<-aft(Surv(os,death==1)~get(ht[i]),data=data)
q1<-coef(summary(ftt))[1,]
mtx[i,]<-q1
}
mtx<-data.frame(mtx)
mtx<-mtx[order(mtx$p_value),]
fmtx<-subset(mtx,p_value<=p)
return(fmtx)
}
utils::globalVariables(c("na.omit","death","p_value")) |
encoded_text_to_latex <-
function(x, encoding = c("latin1", "latin2", "latin9", "UTF-8", "utf8"))
{
encoding <- match.arg(encoding)
do_latin1 <- function(x) {
xx <- charToRaw(x)
paste(latin1table[as.integer(xx)], collapse="")}
do_latin2 <- function(x) {
xx <- charToRaw(x)
paste(latin2table[as.integer(xx)], collapse="")}
do_latin9 <- function(x) {
xx <- charToRaw(x)
paste(latin9table[as.integer(xx)], collapse="")}
do_utf8 <- function(x) {
xx <- utf8ToInt(x)
y <- rep.int("?", length(x))
y[xx < 512] <- utf8table[xx]
y[xx == 0x02C6] <- "{\\textasciicircum}"
y[xx == 0x02C7] <- "{\\textasciicaron}"
y[xx == 0x02CA] <- "{\\textasciitilde}"
y[xx == 0x02D8] <- "{\\textasciibreve}"
y[xx == 0x02D9] <- "{\\textperiodcentered}"
y[xx == 0x02DD] <- "{\\textacutedbl}"
y[xx == 0x200C] <- "{\\textcompwordmark}"
y[xx == 0x2018] <- "{\\textquoteleft}"
y[xx == 0x2019] <- "{\\textquoteright}"
y[xx == 0x201C] <- "{\\textquotedblleft}"
y[xx == 0x201D] <- "{\\textquotedblright}"
y[xx == 0x2020] <- "{\\textdagger}"
y[xx == 0x2022] <- "{\\textbullet}"
y[xx == 0x2026] <- "{\\textellipsis}"
y[xx == 0x20AC] <- "{\\texteuro}"
paste(y, collapse="")
}
as.vector(switch(encoding,
"latin1" = sapply(x, do_latin1),
"latin2" = sapply(x, do_latin2),
"latin9" = sapply(x, do_latin9),
"UTF-8" = sapply(x, do_utf8),
"utf8" = sapply(x, do_utf8),
stop("unimplemented encoding")
))
}
latin1table <- c(
rep.int("?", 31),
rawToChar(as.raw(seq.int(32, 126)), multiple=TRUE), "?",
rep.int("?", 32),
"{\\nobreakspace}", "{\\textexclamdown}", "{\\textcent}", "{\\textsterling}", "{\\textcurrency}", "{\\textyen}", "{\\textbrokenbar}", "{\\S}",
'\\"{}', "{\\textcopyright}", "{\\textordfeminine}", "{\\guillemotleft}", "{\\textlnot}", "\\-", "{\\textregistered}", "{\\a={}}",
"{\\textdegree}", "{\\textpm}", "{\\mathtwosuperior}", "{\\maththreesuperior}", "{\\a'{}}", "{\\textmu}", "{\\P}", "{\\textperiodcentered}",
"{\\c\\ }", "{\\mathonesuperior}", "{\\textordmasculine}", "{\\guillemotright}", "{\\textonequarter}", "{\\textonehalf}", "{\\textthreequarters}", "{\\textquestiondown}",
"{\\a`A}", "{\\a'A}", "{\\^A}", "{\\~A}", '{\\"A}', "{\\r A}", "{\\AE}", "{\\c C}",
"{\\a`E}", "{\\a'E}", "{\\^E}", "{\\a`I}", "{\\a'I}", "{\\^I}", "{\\~I}", '{\\"I}',
"{\\DH}", "{\\~N}", "{\\a`O}", "{\\a'O}", "{\\^O}", "{\\~O}", '{\\"O}', "{\\texttimes}",
"{\\O}", "{\\a`U}", "{\\a'U}", "{\\^U}", '{\\"U}', "{\\a`Y}", "{\\TH}", "{\\ss}",
"{\\a`a}", "{\\a'a}", "{\\^a}", "{\\~a}", '{\\"a}', "{\\r a}", "{\\ae}", "{\\c c}",
"{\\a`e}", "{\\a'e}", "{\\^e}", '{\\"e}',"{\\a`\\i}", "{\\a'\\i}", "{\\^\\i}", '{\\"\\i}',
"{\\dh}", "{\\~n}", "{\\a`o}", "{\\a'o}", "{\\^o}", "{\\~o}", '{\\"o}', "{\\textdiv}",
"{\\o}", "{\\a`u}", "{\\a'u}", "{\\^u}", '{\\"u}', "{\\a`y}", "{\\th}", '{\\"y}'
)
latin2table <- c(
rep.int("?", 31),
rawToChar(as.raw(seq.int(32, 126)), multiple=TRUE), "?",
rep.int("?", 32),
"{\\nobreakspace}", "{\\k A}", "{\\u{}}", "{\\L}", "{\\textcurrency}", "{\\v L}", "{\\a'S}", "{\\S}",
'\\"{}', "{\\v S}", "{\\c S}", "{\\v T}", "{\\\'Z}", "\\-", "{\\v Z}", "{\\.Z}",
"{\\textdegree}", "{\\k A}", "{\\k\\ }", "{\\l}", "{\\a'{}}", "{\\v l}", "{\\a's}", "{\\v{}}",
"{\\c\\ }", "{\\v s}", "{\\c s}", "{\\v t}", "{\\'z}", "{\\H{}}", "{\\v z}", "{\\.z}",
"{\\a'R}", "{\\a'A}", "{\\^A}", "{\\u A}", '{\\"A}', "{\\'L}", "{\\a'C}", "{\\c C}",
"{\\v C}", "{\\a'E}", "{\\k E}", '{\\"E}', "{\\v E}", "{\\'I}", "{\\^I}", '{\\v D}',
"{\\DJ}", "{\\a'N}", "{\\v N}", "{\\a'O}", "{\\^O}", "{\\H O}", '{\\"O}', "{\\texttimes}",
"{\\v R}", "{\\r U}", "{\\a'U}", "{\\H U}", '{\\"U}', "{\\a`Y}", "{\\c I}", "{\\ss}",
"{\\a'r}", "{\\a'a}", "{\\^a}", "{\\u a}", '{\\"a}', "{\\'l}", "{\\a'c}", "{\\c c}",
"{\\v c}", "{\\a'e}", "{\\k e}", '{\\"e}', "{\\v e}", "{\\'\\i}", "{\\^\\i}", '{\\v d}',
"{\\dj}", "{\\a'n}", "{\\c n}", "{\\a'o}", '{\\"a}', "{\\H o}", '{\\"o}', "{\\textdiv}",
"{\\v r}", "{\\r u}", "{\\a'u}", "{\\H u}", '{\\"u}', "{\\a`y}", "{\\c t}", '{\\.{}}'
)
latin9table <- c(
rep.int("?}", 31),
rawToChar(as.raw(seq.int(32, 126)), multiple=TRUE), "?}",
rep.int("?}", 32),
"{\\nobreakspace}", "{\\textexclamdown}", "{\\textcent}", "{\\textsterling}", "{\\texteuro}", "{\\textyen}", "{\\v S}", "{\\S}",
'{\\v s}', "{\\copyright}", "{\\textordfeminine}", "{\\guillemotleft}", "{\\textlnot}", "\\-", "{\\textregistered}", "{\\a={}}",
"{\\textdegree}", "{\\textpm}", "{\\mathtwosuperior}", "{\\maththreesuperior}", "{\\v Z}", "{\\textmu}", "{\\P}", "{\\textperiodcentered}",
"{\\v z}", "{\\mathonesuperior}", "{\\textordmasculine}", "{\\guillemotright}", "{\\OE}", "{\\oe}", '{\\"Y}', "{\\textquestiondown}",
"{\\a`A}", "{\\a'A}", "{\\^A}", "{\\~A}", '{\\"A}', "{\\r A}", "{\\AE}", "{\\c C}",
"{\\a`E}", "{\\a'E}", "{\\^E}", "{\\a`I}", "{\\a'I}", "{\\^I}", "{\\~I}", '{\\"I}',
"{\\DH}", "{\\~N}", "{\\a`O}", "{\\a'O}", "{\\^O}", "{\\~O}", '{\\"O}', "{\\texttimes}",
"{\\O}", "{\\a`u}", "{\\a'U}", "{\\^U}", '\\"U', "{\\a`Y}", "{\\TH}", "{\\ss}",
"{\\a`a}", "{\\a'a}", "{\\^a}", "{\\~a}", '{\\"a}', "{\\r a}", "{\\ae}", "{\\c c}",
"{\\a`e}", "{\\a'e}", "{\\^e}", '{\\"e}',"{\\a`\\i}", "{\\a'\\i}", "{\\^\\i}", '{\\"\\i}',
"{\\dh}", "{\\~n}", "{\\a`o}", "{\\a'o}", "{\\^o}", "{\\~o}", '{\\"o}', "{\\textdiv}",
"{\\o}", "{\\a`u}", "{\\a'u}", "{\\^u}", '\\"u', "{\\a`y}", "{\\th}", '{\\"y}'
)
utf8table <- c(latin1table, rep.int("?", 256))
utf8table[0x0102:0x107] <-
c("{\\u A}","{\\u a}", "{\\k A}", "{\\k a}", "{\\a'C}", "{\\a'c}")
utf8table[0x010C:0x111] <-
c( "{\\v C}","{\\v c}","{\\v D}","{\\v d}","{\\DJ}","{\\dj}")
utf8table[0x0118:0x11B] <- c("{\\k E}","{\\k e}", "{\\v E}","{\\v e}")
utf8table[0x011E:0x11F] <- c("{\\u G}","{\\u g}")
utf8table[0x0130:0x131] <- c("{\\.I}","{\\i}")
utf8table[0x0139:0x13A] <- c("{\\a'L}","{\\a'l}")
utf8table[0x013D:0x13E] <- c("{\\v L}","{\\v l}")
utf8table[0x0141L:0x144] <- c("{\\L}","{\\l}","{\\a'N}","{\\a'n}")
utf8table[0x0147:0x14B] <- c("{\\v N}","{\\v n}","?","{\\NG}","{\\ng}")
utf8table[0x0150:0x155] <- c("{\\H O}","{\\H o}","{\\OE}","{\\oe}","{\\a'R}","{\\a'r}")
utf8table[0x0158:0x15B] <- c("{\\v R}","{\\v r}","{\\a'S}","{\\a's}")
utf8table[0x015E:0x165] <- c("{\\c S}","{\\c s}","{\\v S}","{\\v s}",
"{\\c T}","{\\c t}","{\\v T}","{\\v t}")
utf8table[0x016E:0x171] <- c("{\\r U}","{\\r u}","{\\H U}","{\\H u}")
utf8table[0x0178:0x17E] <- c('{\\"Y}',"{\\a'Z}","{\\a'z}","{\\.Z}", "{\\.z}","{\\v Z}","{\\v z}")
utf8table[0x0192] <- "{\\textflorin}" |
epsAg <- function(wavelength, epsilon.inf = 4,
lambda.p = 282, mu.p = 17000){
data.frame(wavelength=wavelength, epsilon=
epsilon.inf*(1 - 1 / (lambda.p^2*(1/wavelength^2 + 1i / (mu.p*wavelength)))))
}
epsAu <- function(wavelength, epsilon.infty = 1.54,
lambda.p = 177.5, mu.p = 14500,
A1 = 1.27, phi1 = -pi/4, lambda1 = 470, mu1 = 1900,
A2 = 1.1, phi2 = -pi/4, lambda2 = 325, mu2 = 1060){
eps.drude <-
epsilon.infty*(1 - 1 / (lambda.p^2*(1/wavelength^2 + 1i / (mu.p*wavelength))))
data.frame(wavelength=wavelength, epsilon=
eps.drude + A1 / lambda1 * (exp(1i*phi1) / (1/lambda1 - 1/wavelength - 1i/mu1) +
exp(-1i*phi1) / (1/lambda1 + 1/wavelength + 1i/mu1)) +
A2 / lambda2 * (exp(1i*phi2) / (1/lambda2 - 1/wavelength - 1i/mu2) +
exp(-1i*phi2) / (1/lambda2 + 1/wavelength + 1i/mu2))
)
} |
Mean.sigma.test <- function(x, sigma, n = length(x), mu0, alternative = alternative,
alpha = 0.05, plot = TRUE, lwd = 1) {
if (length(x) != 1L) {
bar_x <- mean(x)
} else {
bar_x <- x
}
DNAME <- paste0(deparse(substitute(x)), ", null probability ", deparse(substitute(mu0)))
NVAL <- mu0
STATISTIC <- (bar_x - mu0) / (sigma / sqrt(n))
if (alternative == "two.sided")
PVALUE <- 2 * pnorm(abs(STATISTIC), lower.tail = FALSE)
else
PVALUE <- pnorm(STATISTIC, lower.tail = (alternative == "less"))
RR <- paste0("RR = ", switch(alternative,
two.sided = paste0("(-\U221E, ", round(qnorm(alpha/2), 5), "] U [",
round(qnorm(1 - alpha/2), 5), ", +\U221E)"),
greater = paste0("[", round(qnorm(1 - alpha), 5), ", +\U221E)"),
less = paste0("(-\U221E, ", round(qnorm(alpha), 5), "]")))
if (plot) {
curve(dnorm(x), from = min(-3, -abs(STATISTIC) - 0.1), to = max(3, abs(STATISTIC) + 0.1),
main = 'T follows N(0,1)', axes = FALSE, xlab = "", ylab = "", lwd = lwd)
u <- par("usr")
rect(u[1], 0, u[2], u[4])
axis(2)
legend("topright", c("p-value", "RR"), bty = "n", pch = c(22,NA), lty = c(NA,1), lwd = c(1,2),
col = c("blue", "red"), pt.bg = adjustcolor('blue', alpha.f = 0.25), pt.cex = 2, seg.len = 1, cex = 1)
if (alternative == "two.sided") {
abline(h = 0, lwd = lwd + 1)
lines(c(u[1], qnorm(alpha / 2)), c(0,0), col = "red", lwd = lwd + 1)
lines(c(qnorm(1 - alpha / 2), u[2]), c(0,0), col = "red", lwd = lwd + 1)
axis(1, pos = 0, col = NA, col.ticks = 1,
at = c(0, STATISTIC, -1*STATISTIC),
labels = c(0, expression('T'[obs]), expression(-'T'[obs])))
segments(x0 = c(qnorm(alpha / 2), qnorm(1 - alpha / 2)), y0 = -u[4]*0.015, y1 = u[4]*0.015, col = "red", lwd = lwd + 1)
segments(x0 = c(qnorm(alpha / 2), qnorm(1 - alpha / 2)), y0 = rep(c(-u[4]*0.015, u[4]*0.015), each = 2),
x1 = c(qnorm(alpha / 2) - u[2]*0.015, qnorm(1 - alpha / 2) + u[2]*0.015),
rep(c(-u[4]*0.015, u[4]*0.015), each = 2), col = "red", lwd = lwd + 1)
if ((abs(STATISTIC) - qnorm(1 - alpha / 2)) > 0.3) {
axis(1, pos = 0, col = NA, col.ticks = NA,
at = c(qnorm(alpha / 2), qnorm(1 - alpha / 2)),
labels = c(expression(-'z'[1-alpha/2]), expression('z'[1-alpha/2])))
mtext("=", side = 1, line = 1.6, at = c(qnorm(alpha / 2), qnorm(1 - alpha / 2)), las = 2)
mtext(round(qnorm(alpha / 2), 2), side = 1, line = 2.5, at = qnorm(alpha / 2))
mtext(round(qnorm(1 - alpha / 2), 2), side = 1, line = 2.5, at = qnorm(1 - alpha / 2))
}
if (PVALUE > .Machine$double.eps) {
segments(x0 = qnorm(1 - PVALUE / 2), y0 = 0,
x1 = qnorm(1 - PVALUE / 2), y1 = dnorm(qnorm(1 - PVALUE / 2)),
col = 'blue', lwd = 1)
x_vector <- seq(qnorm(1 - PVALUE / 2), 4, length = 100)
y_vector <- dnorm(x_vector)
polygon(c(x_vector, rev(x_vector)), c(y_vector, rep(0, length(y_vector))),
col = adjustcolor('blue', alpha.f = 0.25), border = NA)
mtext("=", side = 1, line = 1.6, at = qnorm(1 - PVALUE / 2), las = 2)
mtext(round(qnorm(1 - PVALUE / 2), 2), side = 1, line = 2.5, at = qnorm(1 - PVALUE / 2))
segments(x0 = qnorm(PVALUE / 2), y0 = 0,
x1 = qnorm(PVALUE / 2), y1 = dnorm(qnorm(PVALUE / 2)),
col = 'blue', lwd = 1)
x_vector <- seq(-4, qnorm(PVALUE / 2), length = 100)
y_vector <- dnorm(x_vector)
polygon(c(x_vector, rev(x_vector)), c(y_vector, rep(0, length(y_vector))),
col = adjustcolor('blue', alpha.f = 0.25), border = NA)
mtext("=", side = 1, line = 1.6, at = qnorm(PVALUE / 2), las = 2)
mtext(round(qnorm(PVALUE / 2), 2), side = 1, line = 2.5, at = qnorm(PVALUE / 2))
}
} else {
if (alternative == "less") {
abline(h = 0, lwd = lwd + 1)
lines(c(u[1], qnorm(alpha)), c(0,0), col = "red", lwd = lwd + 1)
axis(1, pos = 0, col = NA, col.ticks = 1,
at = c(0, STATISTIC),
labels = c(0, expression('T'[obs])))
segments(x0 = qnorm(alpha), y0 = -u[4]*0.015, y1 = u[4]*0.015, col = "red", lwd = lwd + 1)
segments(x0 = qnorm(alpha), y0 = c(-u[4]*0.015, u[4]*0.015), x1 = qnorm(alpha) - u[2]*0.015,
c(-u[4]*0.015, u[4]*0.015), col = "red", lwd = lwd + 1)
if (abs(STATISTIC - qnorm(alpha)) > 0.3) {
axis(1, pos = 0, col = NA, col.ticks = NA, at = qnorm(alpha), labels = expression(-'z'[1-alpha]))
mtext("=", side = 1, line = 1.6, at = qnorm(alpha), las = 2)
mtext(round(qnorm(alpha), 2), side = 1, line = 2.5, at = qnorm(alpha))
}
} else {
abline(h = 0, lwd = lwd + 1)
lines(c(qnorm(1 - alpha), u[2]), c(0,0), col = "red", lwd = lwd + 1)
axis(1, pos = 0, col = NA, col.ticks = 1,
at = c(0, STATISTIC),
labels = c(0, expression('T'[obs])))
segments(x0 = qnorm(1 - alpha), y0 = -u[4]*0.015, y1 = u[4]*0.015, col = "red", lwd = lwd + 1)
segments(x0 = qnorm(1 - alpha ), y0 = c(-u[4]*0.015, u[4]*0.015), x1 = qnorm(1 - alpha) + u[2]*0.015,
c(-u[4]*0.015, u[4]*0.015), col = "red", lwd = lwd + 1)
if (abs(STATISTIC - qnorm(1 - alpha)) > 0.3) {
axis(1, pos = 0, col = NA, col.ticks = NA, at = qnorm(1 - alpha), labels = expression('z'[1-alpha]))
mtext("=", side = 1, line = 1.6, at = qnorm(1 - alpha), las = 2)
mtext(round(qnorm(1 - alpha), 2), side = 1, line = 2.5, at = qnorm(1 - alpha))
}
}
if (PVALUE > .Machine$double.eps) {
segments(x0 = STATISTIC, y0 = 0, x1 = STATISTIC, y1 = dnorm(STATISTIC),
col = 'blue', lwd = 1)
if (alternative == "less") {
x_vector <- seq(-4, STATISTIC, length = 100)
} else {
x_vector <- seq(STATISTIC, 4, length = 100)
}
y_vector <- dnorm(x_vector)
polygon(c(x_vector, rev(x_vector)), c(y_vector, rep(0, length(y_vector))),
col = adjustcolor('blue', alpha.f = 0.25), border = NA)
mtext("=", side = 1, line = 1.6, at = STATISTIC, las = 2)
mtext(round(STATISTIC, 2), side = 1, line = 2.5, at = STATISTIC)
}
}
}
METHOD <- "Test for the mean of a Normal population with known variance"
DISTNAME <- "\U2208 N(0,1)"
STATFORMULA <- "(bar.x - \U03BC\U2080) / (\U03C3 / \U221An)"
ESTIMATE <- setNames(bar_x, "\U03BC")
PARAMETER <- NULL
names(NVAL) <- names(ESTIMATE)
names(STATISTIC) <- "T"
unit <- "units"
RVAL <- list(
statistic = STATISTIC,
parameter = PARAMETER,
p.value = as.numeric(PVALUE),
estimate = ESTIMATE,
null.value = NVAL,
alternative = alternative,
method = METHOD,
data.name = DNAME,
alpha = alpha,
dist.name = DISTNAME,
statformula = STATFORMULA,
reject.region = RR,
unit = unit
)
class(RVAL) <- c("lstest", "htest")
return(RVAL)
} |
finterp <- function (.z, ...)
UseMethod("finterp")
finterp.default <- function (.z, .envir = parent.frame(), .formula = FALSE,
.vector = TRUE, .args = NULL, .start = 1,
.name = NULL, .expand = TRUE, .intercept = TRUE,
.old = NULL, .response = FALSE, ...)
{
if (!inherits(.z, "formula"))
return(NULL)
if (is.name(.envir)) {
if (is.null(.name))
.name <- as.character(.envir)
.envir <- eval(.envir)
}
if (!is.environment(.envir)) {
if (is.null(.name))
.name <- paste(deparse(substitute(.envir)))
if (inherits(.envir, "data.frame"))
return(finterp.data.frame(.z, .envir, .formula, .vector,
.args, .start, .name, .expand, .intercept, .old))
}
.pars <- .range <- NULL
if (!is.null(.old)) {
if (!is.list(.old))
.old <- list(.old)
for (.j in .old) {
if (!inherits(.j, "formulafn"))
stop("objects in .old must have class, formulafn")
.pars <- c(.pars, attr(.j, "parameters"))
.range <- c(.range, attr(.j, "range")[1]:attr(.j,
"range")[2])
}
if (.start <= max(.range))
warning("possible conflict in vector indexing - check .start")
}
if (!is.null(.args) && !is.character(.args))
stop(".args must be a character string")
.zz <- fmobj(.z)
.ch <- .zz$formula
.mem <- .zz$objects
.fcn <- .zz$functions
if ("$" %in% .fcn)
stop("sublists not allowed (attach dataframes and use variable names)")
.ex <- .zz$covariates
.fac <- .zz$factors
.local <- .zz$local
rm(.zz)
if (length(.mem) > 0) {
.un <- unique(.mem[!.ex & !.fac & !.local])
if (length(unique(.mem[.ex | .fac | .local])) == 0 &&
length(.un) == 0)
warning("finterp.default: no variables found")
}
if (length(.mem) == 0 || all(.ex | .fac | .local)) {
if (.formula)
return(.z)
else {
if (any("offset" %in% .fcn))
stop("offset not allowed")
.mt <- terms(.z)
if (is.numeric(.mt[[2]])) {
.dm <- matrix(1)
colnames(.dm) <- "(Intercept)"
}
else {
.dm <- model.matrix(.mt, model.frame(.mt, .envir))
if (!.intercept)
.dm <- .dm[, -1, drop = FALSE]
}
.fna <- function(.p) as.vector(.dm %*% .p[attr(.fna,
"range")[1]:attr(.fna, "range")[2]])
attributes(.fna) <- list(formula = .z, model = colnames(.dm),
covariates = if (length(.mem) > 0) unique(.mem[.ex |
.fac]) else NULL, parameters = paste("p[",
1:dim(.dm)[2], "]", sep = ""), range = c(.start,
.start + dim(.dm)[2] - 1), class = "formulafn")
.obj <- ls(all.names = TRUE)
rm(list = .obj[.obj != ".fna" & .obj != ".dm"])
rm(.obj)
return(.fna)
}
}
if (!is.null(.fac) && any(.fac))
stop(paste("covariates in formulae with unknowns must not be factors\ncheck",
.mem[.fac]))
.fna <- function(.p) eval(attr(.fna, "model"))
if (.vector) {
if (!is.null(.args)) {
.tmp <- match(.args, .un)
if (all(!is.na(.tmp)))
.un <- .un[-.tmp]
.par <- "alist(.p="
for (.j in 1:length(.args)) {
.par <- paste(.par, ",", collapse = "")
.par <- paste(.par, .args[.j], "=", collapse = "")
}
.par <- paste(.par, ")", collapse = "")
formals(.fna) <- eval(parse(text = .par))
}
if (!is.null(.old)) {
.j <- match(.pars, .un)
.un <- .un[-.j]
.pars <- .pars[!is.na(.j)]
.range <- .range[!is.na(.j)]
for (.j in 1:length(.pars)) .ch <- gsub(paste(" ",
.pars[.j], " ", sep = ""), paste(" .p[", .range[.j],
"] ", sep = ""), .ch)
}
if (length(.un) > 0)
for (.j in 1:length(.un)) .ch <- gsub(paste(" ",
.un[.j], " ", sep = ""), paste(" .p[", .start +
.j - 1, "] ", sep = ""), .ch)
}
else {
.par <- "alist("
for (.j in 1:length(.un)) {
if (.j > 1)
.par <- paste(.par, ",", collapse = "")
.par <- paste(.par, .un[.j], "=", collapse = "")
}
.par <- paste(.par, ")", collapse = "")
formals(.fna) <- eval(parse(text = .par))
}
attributes(.fna) <- list(formula = .z, model = parse(text = .ch),
parameters = .un, common = .pars, covariates = unique(.mem[.ex]),
range = c(.start, .start + length(.un) - 1), class = "formulafn")
.obj <- ls(all.names = TRUE)
rm(list = .obj[.obj != ".fna"])
rm(.obj)
return(.fna)
}
finterp.data.frame <- function (.z, .envir = NULL, .formula = FALSE, .vector = TRUE,
.args = NULL, .start = 1, .name = NULL, .expand = NULL, .intercept = TRUE,
.old = NULL, ...)
{
if (!inherits(.z, "formula"))
return(NULL)
.pars <- .range <- NULL
if (!is.null(.old)) {
if (!is.list(.old))
.old <- list(.old)
for (.j in .old) {
if (!inherits(.j, "formulafn"))
stop("objects in .old must have class, formulafn")
.pars <- c(.pars, attr(.j, "parameters"))
.range <- c(.range, attr(.j, "range")[1]:attr(.j,
"range")[2])
}
if (.start <= max(.range))
warning("possible conflict in vector indexing - check .start")
}
if (!is.null(.args) && !is.character(.args))
stop(".args must be a character string")
if (is.name(.envir)) {
if (is.null(.name))
.name <- as.character(.envir)
.envir <- eval(.envir)
}
.ndata <- if (is.null(.name))
paste(deparse(substitute(.envir)))
else .name
.cn <- colnames(.envir)
.ex1 <- NULL
.zz <- fmobj(.z)
.ch <- .zz$formula
.mem <- .zz$objects
.fcn <- .zz$functions
.local <- .zz$local
rm(.zz)
if (length(.mem) > 0) {
.ex1 <- match(.mem, .cn)
.un <- unique(.mem[is.na(.ex1) & !.local])
if (length(unique(.mem[!is.na(.ex1)])) == 0 && length(.un) ==
0)
warning("finterp.data.frame: no variables found")
}
.ex1a <- if (is.null(.ex1))
NULL
else .ex1[!is.na(.ex1)]
if (length(.ex1a) > 0)
for (.j in 1:length(.ex1a)) .ch <- gsub(paste(" ", .cn[.ex1a[.j]],
" ", sep = ""), paste(" ", .ndata, "$", .cn[.ex1a[.j]],
sep = ""), .ch)
if (is.null(.ex1) || all(!is.na(.ex1))) {
if (.formula)
return(.z)
else {
if (any("offset" %in% .fcn))
stop("offset not allowed")
.ch <- as.formula(paste("~", .ch))
.mt <- terms(.ch)
if (is.numeric(.mt[[2]])) {
if (!.intercept)
return(NULL)
.n <- dim(.envir)[1]
.dm <- matrix(1)
colnames(.dm) <- "(Intercept)"
.fna <- function(.p) rep(.p[attr(.fna, "range")[1]],
.n)
}
else {
.dm <- model.matrix(.mt, model.frame(.mt, data = .envir))
if (!.intercept)
.dm <- .dm[, -1, drop = FALSE]
.fna <- function(.p) as.vector(.dm %*% .p[attr(.fna,
"range")[1]:attr(.fna, "range")[2]])
}
attributes(.fna) <- list(formula = .z, model = colnames(.dm),
covariates = if (length(.mem) > 0) unique(.mem[!is.na(.ex1)]) else NULL,
parameters = paste("p[", 1:dim(.dm)[2], "]",
sep = ""), range = c(.start, .start + dim(.dm)[2] -
1), class = "formulafn")
.obj <- ls(all.names = TRUE)
rm(list = .obj[.obj != ".i" & .obj != ".fna" & .obj !=
".dm" & .obj != ".n"])
rm(.obj)
return(.fna)
}
}
if (length(.ex1a) > 0)
for (.j in 1:length(.ex1a)) if (is.factor(.envir[, .ex1a[.j]]))
stop(paste(colnames(.envir)[.ex1a[.j]], "is a factor variable"))
.fna <- function(.p) eval(attr(.fna, "model"))
if (!is.null(.args)) {
.tmp <- match(.args, .un)
if (all(!is.na(.tmp)))
.un <- .un[-.tmp]
.par <- "alist(.p="
for (.j in 1:length(.args)) {
.par <- paste(.par, ",", collapse = "")
.par <- paste(.par, .args[.j], "=", collapse = "")
}
.par <- paste(.par, ")", collapse = "")
formals(.fna) <- eval(parse(text = .par))
}
if (!is.null(.old)) {
.j <- match(.pars, .un)
.un <- .un[-.j]
.pars <- .pars[!is.na(.j)]
.range <- .range[!is.na(.j)]
for (.j in 1:length(.pars)) .ch <- gsub(paste(" ", .pars[.j],
" ", sep = ""), paste(" .p[", .range[.j], "] ", sep = ""),
.ch)
}
if (length(.un) > 0)
for (.j in 1:length(.un)) .ch <- gsub(paste(" ", .un[.j],
" ", sep = ""), paste(" .p[", .start + .j - 1, "] ",
sep = ""), .ch)
attributes(.fna) <- list(formula = .z, model = parse(text = .ch),
parameters = .un, common = .pars, covariates = unique(.mem[!is.na(.ex1)]),
range = c(.start, .start + length(.un) - 1), class = "formulafn")
.obj <- ls(all.names = TRUE)
rm(list = .obj[.obj != ".fna"])
rm(.obj)
return(.fna)
} |
"multi_data" |
"AdaptPred" <-
function(pointsin,X,coeff,nbrs,remove,intercept,neighbours){
details<-NULL
results<-list()
w<-list()
intercept<-FALSE
out1<-LinearPred(pointsin,X,coeff,nbrs,remove,intercept)
pred1<-out1$pred
w1<-out1$weights
w[[1]]<-w1
details[1]<-coeff[remove]-pred1
out1<-QuadPred(pointsin,X,coeff,nbrs,remove,intercept)
pred1<-out1$pred
w1<-out1$weights
w[[2]]<-w1
details[2]<-coeff[remove]-pred1
out1<-CubicPred(pointsin,X,coeff,nbrs,remove,intercept)
pred1<-out1$pred
w1<-out1$weights
w[[3]]<-w1
details[3]<-coeff[remove]-pred1
intercept<-TRUE
out1<-LinearPred(pointsin,X,coeff,nbrs,remove,intercept)
pred1<-out1$pred
w1<-out1$weights
w[[4]]<-w1
details[4]<-coeff[remove]-pred1
out1<-QuadPred(pointsin,X,coeff,nbrs,remove,intercept)
pred1<-out1$pred
w1<-out1$weights
w[[5]]<-w1
details[5]<-coeff[remove]-pred1
out1<-CubicPred(pointsin,X,coeff,nbrs,remove,intercept)
pred1<-out1$pred
w1<-out1$weights
w[[6]]<-w1
details[6]<-coeff[remove]-pred1
minindex<-order(abs(details))[1]
pred<-coeff[remove]-details[minindex]
coeff[remove]<-details[minindex]
int<-TRUE
scheme<-NULL
if(minindex<=3){
int<-FALSE
}
if((minindex==1)|(minindex==4)){
scheme<-"Linear"
}
if((minindex==2)|(minindex==5)){
scheme<-"Quad"
}
if((minindex==3)|(minindex==6)){
scheme<-"Cubic"
}
weights<-w[[minindex]]
return(list(weights=weights,pred=pred,coeff=coeff,int=int,scheme=scheme,details=details,minindex=minindex))
} |
array_branch <- function(array, margin = NULL) {
dims <- dim(array) %||% length(array)
margin <- margin %||% seq_along(dims)
if (length(margin) == 0) {
list(array)
} else if (is.null(dim(array))) {
if (!identical(as.integer(margin), 1L)) {
abort(sprintf(
"`margin` must be `NULL` or `1` with 1D arrays, not `%s`",
toString(margin)
))
}
as.list(array)
} else {
flatten(apply(array, margin, list))
}
}
array_tree <- function(array, margin = NULL) {
dims <- dim(array) %||% length(array)
margin <- margin %||% seq_along(dims)
if (length(margin) > 1) {
new_margin <- ifelse(margin[-1] > margin[[1]], margin[-1] - 1, margin[-1])
apply(array, margin[[1]], array_tree, new_margin)
} else {
array_branch(array, margin)
}
} |
bv_dummy <- function(
mode = 1, sd = 1,
min = 0.0001, max = 5,
fun) {
sd <- num_check(sd, min = 0 + 1e-16, max = Inf,
msg = "Parameter sd misspecified.")
fun <- match.fun(fun)
return(
dummy(mode = mode, min = min, max = max, sd = sd, fun = fun,
coef = gamma_coef(mode, sd))
)
}
dummy <- function(
mode = 1,
min = 0.0001, max = 5,
...) {
mode <- num_check(mode, min = 0, max = Inf,
msg = "Invalid value for mode (outside of [0, Inf]).")
min <- num_check(min, min = 0, max = max - 1e-16,
msg = "Invalid value for min (outside of [0, max)).")
max <- num_check(max, min = min + 1e-16, max = Inf,
msg = "Invalid value for max (outside of (min, Inf]).")
out <- structure(list(
"mode" = mode, "min" = min, "max" = max, ...), class = "bv_dummy")
return(out)
} |
ggm.DerivOPT1 <- function(params, s, n, idx, lambda, pen, VC, w.alasso, gamma, a){
params[idx] <- esp.tr(params[idx], "N")$vrb
p <- ncol(s)
p1 <- length(params)
omega <- matrix(0, p, p)
omega[lower.tri(omega, diag = TRUE)] <- params
omega <- t(omega) + omega - diag(diag(omega))
res.omega <- PDef(omega)
omega <- res.omega$res
sigma <- res.omega$res.inv
countPD <- VC$my.env$countPD
if(res.omega$check.eigen == TRUE){ params <- omega[lower.tri(omega, diag = TRUE)]; countPD <- countPD + 1; VC$my.env$countPD <- countPD}
sc.f <- n*0.5
ll <- ( determinant(omega)$modulus[1]-sum(omega*s) )*sc.f
res <- -ll
S <- sc.f*Dpens(params, type = pen, lambda, w.alasso, gamma, a)
diag(S)[idx] <- 0
S1 <- 0.5*crossprod(params, S)%*%params
Sres <- res
res <- Sres + S1
res
} |
new_directed_factor_model <- function(
X, S, Y,
...,
subclass = character()) {
ellipsis::check_dots_unnamed()
n <- nrow(X)
k1 <- ncol(X)
d <- nrow(Y)
k2 <- ncol(Y)
model <- list(
X = X,
S = S,
Y = Y,
n = n,
k1 = k1,
d = d,
k2 = k2
)
class(model) <- c(subclass, "directed_factor_model")
model
}
validate_directed_factor_model <- function(x) {
values <- unclass(x)
if (any(values$X < 0) || any(values$S < 0) || any(values$Y < 0)) {
stop(
"`X`, `S` and `Y` can only contain non-negative elements.",
call. = FALSE
)
}
if (values$k1 != nrow(values$S)) {
stop("`k1` must equal the number of rows in `S`", call. = FALSE)
}
if (values$k2 != ncol(values$S)) {
stop("`k2` must equal the number of columns in `S`", call. = FALSE)
}
x
}
directed_factor_model <- function(
X, S, Y,
...,
expected_in_degree = NULL,
expected_out_degree = NULL,
expected_density = NULL) {
X <- Matrix(X)
S <- Matrix(S)
Y <- Matrix(Y)
degree_controls <- c(
!is.null(expected_in_degree),
!is.null(expected_out_degree),
!is.null(expected_density)
)
if (sum(degree_controls) > 1) {
stop(
"Must specify only one of `expected_in_degree` ",
"`expected_out_degree`, and `expected_density`.",
call. = FALSE
)
}
fm <- new_directed_factor_model(X, S, Y, ...)
if (!is.null(expected_in_degree)) {
if (expected_in_degree <= 0) {
stop(
"`expected_in_degree` must be strictly greater than zero.",
call. = FALSE
)
}
S <- S * expected_in_degree / expected_in_degree(fm)
}
if (!is.null(expected_out_degree)) {
if (expected_out_degree <= 0) {
stop(
"`expected_out_degree` must be strictly greater than zero.",
call. = FALSE
)
}
S <- S * expected_out_degree / expected_out_degree(fm)
}
if (!is.null(expected_density)) {
if (expected_density <= 0 || 1 <= expected_density) {
stop(
"`expected_density` must be strictly between zero and one.",
call. = FALSE
)
}
S <- S * expected_density / expected_density(fm)
}
fm$S <- S
validate_directed_factor_model(fm)
}
dim_and_class <- function(x, ...) {
if (is.matrix(x) || inherits(x, "Matrix"))
paste0(nrow(x), " x ", ncol(x), " [", class(x)[1], "]")
else
paste0(length(x), " [", class(x)[1], "]")
}
print.directed_factor_model <- function(x, ...) {
cat(glue("Directed Factor Model\n", .trim = FALSE))
cat(glue("---------------------\n\n", .trim = FALSE))
cat(glue("Incoming Nodes (n): {x$n}\n", .trim = FALSE))
cat(glue("Incoming Rank (k1): {x$k1}\n", .trim = FALSE))
cat(glue("Outgoing Rank (k2): {x$k2}\n", .trim = FALSE))
cat(glue("Outgoing Nodes (d): {x$d}\n\n", .trim = FALSE))
cat("X:", dim_and_class(x$X), "\n")
cat("S:", dim_and_class(x$S), "\n")
cat("Y:", dim_and_class(x$Y), "\n\n")
cat(
glue("Expected edges: {round(expected_edges(x))}"),
glue("Expected density: {round(expected_density(x), 5)}"),
glue("Expected in degree: {round(expected_in_degree(x), 1)}"),
glue("Expected out degree: {round(expected_out_degree(x), 1)}"),
sep = "\n"
)
} |
sirt_permutations <- function(r,v)
{
NL <- length(v)
NC <- NL^r
mat <- matrix(0, nrow=NC, ncol=r)
hh <- 1
for (dd in seq(r,1,by=-1)){
m1 <- rep(v, each=NL^(hh-1) )
m1 <- rep(m1, NC/length(m1) )
mat[,dd] <- m1
hh <- hh + 1
}
return(mat)
} |
context("dual syt")
test_that("dual syt", {
syt <- list(c(1,2,6), c(3,5), 4)
dsyt <- dualsyt(syt)
expect_identical(dsyt, list(c(1L,3L,4L),c(2L,5L),6L))
}) |
optimize_png <- function(filename) {
optimized <- sub(".png", "-fs8.png", filename)
command <- sprintf(
"pngquant --speed=1 --quality=0-2 -f %s && mv -f %s %s",
filename, optimized, filename
)
system(command)
}
library(ggrepel)
set.seed(42)
p <- ggplot(mtcars, aes(wt, mpg, label = rownames(mtcars))) +
geom_text_repel(
size = 1.75,
segment.size = 0.2,
box.padding = 0.13, max.iter = 1e4
) +
geom_point(color = 'red', size = 0.5) +
theme_classic(base_size = 7) +
theme(
axis.line = element_line(size = 0.2),
axis.ticks = element_line(size = 0.2)
)
ggsave(filename = "fig.png", plot = p, width = 4, height = 2.5)
optimize_png("fig.png")
file.info("fig.png")$size |
validateSurvEndp <- function(object) {
errors <- character()
if(!(length(object@cenRate) == 1)) {
msg <- paste("Censoring rate should have length 1. Has length ", length(object@cenRate),
".", sep = "", collapse = ",")
errors <- c(errors, msg)
}
if (!(all(object@cenRate > 0))) {
msg <- ("The common exponential censoring rate must be positive.")
errors <- c(errors, msg)
}
if(!(length(object@accrualTime) == 1)) {
msg <- paste("Accrual time should have length 1. Has length ", length(object@accrualTime),
".", sep = "", collapse = ",")
errors <- c(errors, msg)
}
if (!(all(object@accrualTime >= 0))) {
msg <- ("The accrual time must non-negative.")
errors <- c(errors, msg)
}
if(!(length(object@cenTime) == 1)) {
msg <- paste("Censoring time should have length 1. Has length ", length(object@cenTime),
".", sep = "", collapse = ",")
errors <- c(errors, msg)
}
if (!(all(object@cenTime >= 0)) ){
msg <- ("Censoring time must non-negative.")
errors <- c(errors, msg)
}
if(length(object@cenTime) == 1 && length(object@accrualTime) == 1){
if (!(object@cenTime > object@accrualTime)) {
msg <- ("The censoring time must be greater than the accrual time.")
errors <- c(errors, msg)
}
}
if (length(errors) == 0) TRUE else errors
}
setClass("survEndp",
slots = c(cenRate="numeric", accrualTime="numeric", cenTime="numeric"),
validity = validateSurvEndp)
survEndp <- function(cenRate, accrualTime, cenTime) {
new("survEndp", cenRate = cenRate, accrualTime = accrualTime, cenTime = cenTime)
} |
get_acceleration <- function(bucket, ...){
r <- s3HTTP(verb = "GET",
bucket = bucket,
query = list(accelerate = ""),
...)
attributes(r) <- NULL
if (identical(r, list())) {
return(NULL)
} else {
return(r)
}
}
put_acceleration <- function(bucket, status = c("Enabled", "Suspended"), ...){
b <- paste0(
'<AccelerateConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Status>',match.arg(status),'</Status>
</AccelerateConfiguration>')
r <- s3HTTP(verb = "PUT",
bucket = bucket,
query = list(accelerate = ""),
request_body = b,
...)
attributes(r) <- NULL
if (identical(r, list())) {
return(NULL)
} else {
return(r)
}
} |
opt13 <- eventReactive(input$RunOpt13, {
Gprint(MODE_DEBUG, "Opt13\n")
dem = input$Dememo13
nbatchs = input$Nbatches13
niters = input$Niters13
ficout = tempfile()
ficin = GenepopFile()$datapath
out = TRUE
if (is.null(ficin)) {
out = FALSE
} else {
Gprint(MODE_DEBUG, ficin)
setRandomSeed(getSeed(input$randomSeed))
show("spinner")
tryCatch(test_HW(ficin, which = "Proba", outputFile = ficout, enumeration = TRUE, dememorization = dem, batches = nbatchs,
iterations = niters), error = function(e) {
file.create(ficout)
write(paste("Exeption : ", e$message), file = ficout)
}, finally = hide("spinner"))
file.rename("cmdline.txt", "cmdline.old")
}
data.frame(file = ficout, output = out)
})
output$Opt13outLoc <- renderText({
opt <- opt13()
if (opt$output) {
filePath <- toString(opt$file)
if (file.size(filePath) > 300) {
fileText <- readLines(filePath)
nbli = grep("Results by locus", fileText)
nblig = grep("Results by population", fileText)
fileText <- paste(fileText[nbli:(nblig - 1)], collapse = "\n")
shinyjs::enable("downloadOpt13All")
} else {
fileText <- readLines(filePath)
}
} else {
fileText <- "No genepop file found! please upload a file"
}
fileText
})
output$Opt13outPop <- renderText({
opt <- opt13()
if (opt$output) {
filePath <- toString(opt$file)
if (file.size(filePath) > 300) {
fileText <- readLines(filePath)
nbli = grep("All locus, all populations", fileText)
nblig = grep("Results by population", fileText)
fileText <- paste(fileText[nblig:(nbli - 1)], collapse = "\n")
} else {
fileText <- readLines(filePath)
}
} else {
fileText <- "No genepop file found! please upload a file"
}
fileText
})
output$Opt13outLocPop <- renderText({
opt <- opt13()
if (opt$output) {
filePath <- toString(opt$file)
if (file.size(filePath) > 300) {
fileText <- readLines(filePath)
nbli = grep("All locus, all populations", fileText)
fileText <- paste(fileText[nbli:length(fileText)], collapse = "\n")
} else {
fileText <- readLines(filePath)
}
} else {
fileText <- "No genepop file found! please upload a file"
}
fileText
})
output$downloadOpt13Loc <- downloadHandler(filename = function() {
paste("result_opt13_Loc_", Sys.Date(), ".txt", sep = "")
}, content = function(con) {
opt <- opt13()
if (opt$output) {
filePath <- toString(opt$file)
fileText <- readLines(filePath)
nbli = grep("Results by locus", fileText)
nblig = grep("Results by population", fileText)
fileText <- paste(fileText[nbli:(nblig - 1)], collapse = "\n")
} else {
fileText <- "No genepop file found! please upload a file"
}
write(fileText, con)
})
output$downloadOpt13Pop <- downloadHandler(filename = function() {
paste("result_opt13_Pop_", Sys.Date(), ".txt", sep = "")
}, content = function(con) {
opt <- opt13()
if (opt$output) {
filePath <- toString(opt$file)
fileText <- readLines(filePath)
nbli = grep("All locus, all populations", fileText)
nblig = grep("Results by population", fileText)
fileText <- paste(fileText[nblig:(nbli - 1)], collapse = "\n")
} else {
fileText <- "No genepop file found! please upload a file"
}
write(fileText, con)
})
output$downloadOpt13All <- downloadHandler(filename = function() {
paste("result_opt13_", Sys.Date(), ".txt", sep = "")
}, content = function(con) {
opt <- opt13()
if (opt$output) {
filePath <- toString(opt$file)
fileText <- readLines(filePath)
} else {
fileText <- "No genepop file found! please upload a file"
}
write(fileText, con)
}) |
quickPlot= function(soln, numR, numStrains, microbeNames, yLabel, xLabel, sumOverStrains,
saveFig = FALSE, figType = "eps", figName = "microPopFig") {
wlen = 7
hlen = 7
numMFG = length(microbeNames)
numM = numStrains * numMFG
time = soln[, 1]
if (numM == 1) {
X = as.matrix(soln[, 2:(numM + 1)])
colnames(X) = colnames(soln)[2]
} else {
X = soln[, 2:(numM + 1)]
}
R = soln[, (numM + 2):(numM + numR + 1), drop = FALSE]
if (numStrains > 1 & sumOverStrains) {
gmat = matrix(NA, nrow = length(time), ncol = numMFG)
for (g in 1:numMFG) {
st = (g - 1) * numStrains + 1
fin = g * numStrains
gmat[, g] = rowSums(X[, st:fin])
}
Xmax = max(gmat, na.rm = TRUE)
} else {
Xmax = max(X, na.rm = TRUE)
}
if (figType == "png") {
dev.new(bg = "white", horizontal = FALSE, onefile = FALSE, paper = "special",
width = wlen, height = hlen)
} else {
dev.new()
}
par(mar = c(5, 5, 5, 2))
cols = rainbow(numMFG)
plot(range(time), c(min(0, min(X, na.rm = TRUE)), 1.1 * Xmax), xlab = xLabel,
main = "Microbes", ylab = yLabel, cex.lab = 1.5, cex.axis = 1.3, cex.main = 1.5,
type = "n")
for (g in 1:numMFG) {
if (numStrains > 1 & sumOverStrains) {
lines(time, gmat[, g], lwd = 2, col = cols[g])
} else {
for (i in 1:numStrains) {
j = (g - 1) * numStrains + i
lines(time, X[, j], lwd = 2, col = cols[g])
}
}
}
legend("topleft", bg = "transparent", legend = microbeNames, col = cols, lty = 1,
lwd = 2)
if (saveFig) {
if (figType == "pdf") {
dev.copy2pdf(file = paste(figName, "Microbes.pdf", sep = ""))
}
if (figType == "eps") {
dev.copy2eps(file = paste(figName, "Microbes.eps", sep = ""))
}
if (figType == "png") {
dev.print(png, filename = paste(figName, "Microbes.png", sep = ""), res = 100,
width = wlen, height = hlen, units = "in")
}
if (figType == "tiff") {
dev.print(tiff, filename = paste(figName, "Microbes.tiff", sep = ""),
res = 100, width = wlen, height = hlen, units = "in")
}
}
if (figType == "png") {
wlen = 7
hlen = 7
dev.new(bg = "white", horizontal = FALSE, onefile = FALSE, paper = "special",
width = wlen, height = hlen)
} else {
dev.new()
}
par(mar = c(5, 5, 5, 2))
cols = rainbow(numR)
plot(range(time), c(min(0, min(R, na.rm = TRUE)), 1.1 * max(R, na.rm = TRUE)),
xlab = xLabel, main = "Resources", ylab = yLabel, cex.lab = 1.5, cex.axis = 1.3,
type = "n", cex.main = 1.5)
for (i in 1:numR) {
lines(time, R[, i], lwd = 2, col = cols[i])
}
legend("topleft", bg = "transparent", colnames(R), col = cols, lty = 1, lwd = 2)
if (saveFig) {
if (figType == "pdf") {
dev.copy2pdf(file = paste(figName, "Resources.pdf", sep = ""))
}
if (figType == "eps") {
dev.copy2eps(file = paste(figName, "Resources.eps", sep = ""))
}
if (figType == "png") {
dev.print(png, filename = paste(figName, "Resources.png", sep = ""),
res = 100, width = wlen, height = hlen, units = "in")
}
if (figType == "tiff") {
dev.print(tiff, filename = paste(figName, "Resources.tiff", sep = ""),
res = 100, width = wlen, height = hlen, units = "in")
}
}
} |
context('funfem')
skip_if_not_installed('funFEM')
rngReset()
library(funFEM)
data(CanadianWeather)
femData = CanadianWeather$dailyAv[,,'Temperature.C'] %>% t()
lcMethodTestFunFEM = function(...) {
lcMethodFunFEM(response = 'Value', ...)
}
test_that('default', {
suppressWarnings({
model = latrend(lcMethodTestFunFEM(), femData)
expect_valid_lcModel(model)
})
})
test_that('many clusters', {
suppressWarnings({
model = latrend(lcMethodTestFunFEM(nClusters = 4), femData)
expect_valid_lcModel(model)
})
})
test_that('testLongData', {
suppressWarnings({
model = latrend(lcMethodTestFunFEM(), testLongData)
expect_valid_lcModel(model)
})
}) |
`sr.sphere.test` <-
function(X, score=c("sign","symmsign"), shape=NULL, na.action=na.fail)
{
X<-na.action(X)
DNAME=deparse(substitute(X))
score=match.arg(score)
X<-as.matrix(X)
p<-dim(X)[2]
if (p<2) stop("'X' must be at least bivariate")
n<-dim(X)[1]
if(!is.null(shape))
{
if(!is.matrix(shape)) stop("'shape' must be a matrix")
if(!all(dim(shape)==c(p,p))) stop("dimensions of 'shape' and 'X' do not match")
X<-X%*%solve(mat.sqrt(shape))
}
Cp<-Cpp(p)
STATISTIC<-switch(score,
"sign"=
{
METHOD="Test of sphericity using spatial signs"
S<-spatial.signs(X,F,F)
S1<-as.vector(t(S)%*%S/n)
Q1<-(sum((Cp%*%S1)^2))
gamma<-2/(p*(p+2))
n*Q1/gamma
},
"symmsign"=
{
METHOD="Test of sphericity using spatial symmetrized signs"
tmp<-Q2internal(X)
S2<-tmp[1:p^2]
covmat<-4*(matrix(tmp[-(1:p^2)],ncol=p^2)-S2%*%t(S2))/n
as.vector(t(Cp%*%S2)%*%gen.inv(covmat)%*%(Cp%*%S2))
})
names(STATISTIC)<-"Q.2"
NVAL<-paste("diag(",paste(p),")",sep="")
names(NVAL)<-"shape"
PVAL<-1-pchisq(STATISTIC,(df<-(p+2)*(p-1)/2))
PARAMETER<-df
names(PARAMETER)<-"df"
ALTERNATIVE<-"two.sided"
res<-list(statistic=STATISTIC,parameter=PARAMETER,p.value=PVAL,null.value=NVAL,alternative=ALTERNATIVE,method=METHOD,data.name=DNAME)
class(res)<-"htest"
res
} |
data("colonDC")
set.seed(2)
colonDC <- colonDC[sample(1:nrow(colonDC), 1000), ]
bhaz1 <- general.haz(time = "FU", age = "agedays", sex = "sex", year = "dx",
data = colonDC, ratetable = survexp.dk)
bhaz2 <- general.haz(time = colonDC$FU, age = colonDC$agedays, sex = colonDC$sex,
year = colonDC$dx, ratetable = survexp.dk)
all(bhaz2 == bhaz1) |
`dsexpect` <-
function(x){
dsexpect=c(0,0);
dsexpect[1]=sum(cbind(x[,1]*x[,3]),na.rm = TRUE);
dsexpect[2]=sum(cbind(x[,2]*x[,3]),na.rm = TRUE);
dsexpect
} |
check_handle <- function(handle, data_product, what, component) {
if (missing(component)) {
if (!is.null(handle[[what]])) {
section <- handle[[what]]
if (data_product %in% section$data_product) {
index <- which(data_product %in% section$data_product)
path <- section$path[index]
return(path)
} else {
return(NULL)
}
}
} else {
if (!is.null(handle[[what]])) {
section <- handle[[what]]
if (data_product %in% section$data_product) {
index <- which(data_product %in% section$data_product)
this_dataproduct <- section[index, ]
if (component %in% this_dataproduct$use_component) {
index <- which(component %in% this_dataproduct$use_component)
handle_index <- this_dataproduct$index[index]
return(handle_index)
} else {
return(NULL)
}
}
}
}
} |
expected <- eval(parse(text="1L"));
test(id=0, code={
argv <- eval(parse(text="list(structure(\" \\\"Le français, c'est façile: Règles, Liberté, Egalité, Fraternité...\\\")\\n\", Rd_tag = \"RCODE\"))"));
do.call(`length`, argv);
}, o=expected); |
balloonplot <- function(x,...)
UseMethod("balloonplot",x)
balloonplot.table <- function(x, xlab, ylab, zlab, show.zeros = FALSE,
show.margins = TRUE, ... )
{
obj <- x
tmp <- as.data.frame(x)
x <- tmp[,1]
y <- tmp[,2]
z <- tmp[,3]
tableflag <- TRUE
if(missing(xlab)) xlab <- names(dimnames(obj))[1]
if(missing(ylab)) ylab <- names(dimnames(obj))[2]
if(missing(zlab)) zlab <- "Freq"
balloonplot.default(x, y, z, xlab=xlab, ylab=ylab, zlab=zlab,
show.zeros = show.zeros,
show.margins = show.margins, ...)
}
balloonplot.default <- function(x,y,z,
xlab,
ylab,
zlab=deparse(substitute(z)),
dotsize=2/max(strwidth(19),strheight(19)),
dotchar=19,
dotcolor="skyblue",
text.size=1,
text.color=par("fg"),
main,
label=TRUE,
label.digits=2,
label.size=1,
label.color=par("fg"),
scale.method=c("volume","diameter"),
scale.range=c("absolute","relative"),
colsrt=par("srt"),
rowsrt=par("srt"),
colmar=1,
rowmar=2,
show.zeros=FALSE,
show.margins=TRUE,
cum.margins=TRUE,
sorted=TRUE,
label.lines=TRUE,
fun=function(x)sum(x,na.rm=T),
hide.duplicates=TRUE,
... )
{
if(is.null(names(x)))
{
xnames <- as.character(substitute(x))
if(length(xnames)>1) xnames <- xnames[-1]
}
else
xnames <- names(x)
if(is.null(names(y)))
{
ynames <- as.character(substitute(y))
if(length(ynames)>1) ynames <- ynames[-1]
}
else
ynames <- names(y)
scale.method <- match.arg(scale.method)
scale.range <- match.arg(scale.range)
if( scale.method=="absolute" && any(z < 0, na.rm=TRUE ) )
warning("z value(s) below zero detected.",
" No balloons will be displayed for these cells.")
if(missing(main))
{
if(scale.method=="volume")
main <- paste("Balloon Plot for ",
paste(xnames, collapse=", "),
" by ",
paste(ynames, collapse=", "),
".\nArea is proportional to ", zlab, ".", sep='')
else
main <- paste("Balloon Plot for ",
paste(ynames, collapse=", "),
" by ",
paste(ynames, collapse=", "),
".\nDiameter is proportional to ", zlab, ".", sep='')
}
if(length(dotcolor)<length(z))
dotcolor <- rep(dotcolor, length=length(z))
if(is.list(x))
{
xlabs <- x
x$sep=":"
x <- do.call(paste, x)
}
else
xlabs <- list(x)
if(is.list(y))
{
ylabs <- y
y$sep=":"
y <- do.call(paste, y)
ylab <- paste( names(y) )
}
else
ylabs <- list(y)
if(sorted)
{
ord.x <- do.call(order, xlabs)
ord.y <- do.call(order, ylabs)
}
else
ord.x <- ord.y <- 1:length(x)
forceOrder <- function(X, sord, lord)
factor(X[sord], levels=unique(X[lord]))
x <- forceOrder(x, ord.y, ord.y)
y <- forceOrder(y, ord.y, ord.y)
z <- as.numeric(z[ord.y])
dotcolor <- dotcolor[ord.y]
xlabs <- unique(data.frame(lapply(xlabs, forceOrder,
sord=ord.y, lord=ord.y)))
ylabs <- unique(data.frame(lapply(ylabs, forceOrder,
sord=ord.y, lord=ord.y)))
myscale <- function(X, min=0, max=16, scale.method, scale.range)
{
if(scale.method=="volume")
{
X[X<0] <- 0
X <- sqrt(X)
}
if(scale.range=="relative")
X <- (X-min(X, na.rm=TRUE))
X <- X / max(X, na.rm=TRUE )
X <- min + X * (max - min)
cin.x <- par("cin")[1]
cin.y <- par("cin")[2]
if(cin.x < cin.y) X <- X * cin.x/cin.y
X
}
nlabels.y <- length(ylabs)
nlabels.x <- length(xlabs)
tab1 <- split( data.frame(z,dotcolor,x,y), f=list(x,y) )
ztab <- do.call(rbind,
lapply(
tab1,
FUN=function(X) cbind(z=fun(X[,1]),X[1,-1])
)
)
oldpar <- par("xpd","mar")
on.exit( par(oldpar) )
if(!show.margins)
{
xlim=c(-0.5,nlevels(x)+nlabels.y*rowmar-0.25)
ylim=c(0.50,nlevels(y)+nlabels.x*colmar+1)
}
else
{
xlim=c(-0.5,nlevels(x)+nlabels.y*rowmar+1)
ylim=c(0,nlevels(y)+nlabels.x*colmar+1)
}
plot(x=nlabels.y*rowmar+0.25 + as.numeric(ztab$x) - 1,
y=nlevels(y) - as.numeric(ztab$y) + 1,
cex=myscale(ztab$z, max=dotsize, scale.method=scale.method, scale.range=scale.range),
pch=dotchar,
col=as.character(ztab$dotcolor),
xlab="",
ylab="",
xaxt="n",
yaxt="n",
bty="n",
xaxs = "i",
yaxs = "i",
xlim=xlim,
ylim=ylim,
...
)
ny <- nlevels(ztab$y)
nx <- nlevels(ztab$x)
sumz <- sum(ztab$z, na.rm=TRUE)
colsumz <- sapply(split( ztab$z, ztab$y), sum, na.rm=TRUE)
rowsumz <- sapply(split( ztab$z, ztab$x), sum, na.rm=TRUE)
if(show.margins)
{
text(
x=(1:nx) + nlabels.y*rowmar + 0.25 -1,
y=0.25,
labels=format(c(sumz, rowsumz), digits=label.digits)[-1],
font=1,
adj=c(0.5,0.0),
col=text.color,
cex=text.size
)
rowlabs <- format(c(sumz, colsumz), digits=label.digits)[-1]
width <- max(strwidth(rowlabs),na.rm=TRUE)
text(
x=nx + nlabels.y*rowmar-0.25+width,
y= (ny:1),
labels=rowlabs,
font=1,
adj=c(1.0,0.5),
col=text.color,
cex=text.size
)
text(
x=nx + nlabels.y*rowmar-0.25+width,
y=0.25,
labels=sumz,
font=1,
adj=c(1.0,0.0),
col=text.color,
cex=text.size
)
}
if(cum.margins)
{
cx <- c(0, cumsum(rowsumz) / sumz)
rect(xleft = nlabels.y*rowmar - 1 - 0.25 + 1:nx,
xright = nlabels.y*rowmar - 1 + 0.75 + 1:nx,
ybottom = ny+0.75+cx[1:nx]*colmar*nlabels.x,
ytop = ny+0.75+cx[2:(nx+1)]*colmar*nlabels.x,
col = "lightgray",
border = NA)
cy <- c(0, cumsum(colsumz) / sumz)
rect(xleft = -0.5 +rowmar*cy[ny:1]*nlabels.y,
xright = -0.5 +rowmar*cy[(ny+1):2]*nlabels.y,
ybottom = 1:ny-0.5,
ytop = 1:ny+0.5,
col = "lightgray",
border = NA)
tx <- paste(levels(x),"\n[",rowsumz,"]")
ty <- paste(levels(y),"\n[",colsumz,"]")
}
segments(
x0=nlabels.y*rowmar-0.25,
x1=nx+nlabels.y*rowmar-0.25,
y0=(0:ny)+0.5,
y1=(0:ny)+0.5
)
segments(
x0=(0:nx)+nlabels.y*rowmar-0.25,
x1=(0:nx)+nlabels.y*rowmar-0.25,
y0= 0.5,
y1=ny+0.5,
)
if(hide.duplicates)
undupe <- function(X)
{
X <- as.character(X)
c(X[1], ifelse(X[-1] == X[-length(X)], "", X[-1]))
}
else
undupe <- function(X) X
for(i in 1:nlabels.x)
{
y <- ny + 0.75 + (nlabels.x - i + .5)*colmar
text(
x= (1:nx) + nlabels.y*rowmar + 0.25 - 1,
y= y,
labels=undupe(xlabs[,i]),
srt=colsrt,
font=1,
col=text.color,
cex=text.size
)
}
for(i in 1:length(ylabs))
{
text(
y=ny:1,
x= (i-0.5)*rowmar-0.5,
labels=undupe(ylabs[,i]),
srt=rowsrt,
font=1,
col=text.color,
cex=text.size
)
}
if(missing(ylab) || length(ylab)==0)
text(
x=((1:length(ylabs))-0.5)*rowmar-0.5,
y=ny+0.5,
labels=ynames,
srt=colsrt,
font=2,
adj=c(0.5,0.0),
col=text.color,
cex=text.size
)
else
text(
x=((1:length(ylab))-0.5)*rowmar-0.5,
y=ny+0.5,
labels=ylab,
srt=colsrt,
font=2,
adj=c(0.5,0.0),
col=text.color,
cex=text.size
)
if(missing(xlab) || length(xlab)==0)
text(
x= nlabels.y*rowmar - 0.25 - strwidth(','),
y= ny + 0.75 + ((nlabels.x:1) - 1 + .5)*colmar,
labels=xnames,
srt=colsrt,
font=2,
adj=c(1,0.5),
col=text.color,
cex=text.size
)
else
text(
x= nlabels.y*rowmar - 0.25 - strwidth(','),
y= ny + 0.75 + ((length(xlab):1) - 1 + .5)*colmar,
labels=xlab,
srt=colsrt,
font=2,
adj=c(1,0.5),
col=text.color,
cex=text.size
)
if(label.lines)
{
segments(
x0=(0:nlabels.y)*rowmar-0.5,
x1=(0:nlabels.y)*rowmar-0.5,
y0=0.5,
y1=ny+0.5
)
segments(
x0=nlabels.y*rowmar-0.25,
x1=nlabels.y*rowmar + nx - 0.25,
y0=(0:nlabels.x)*colmar +ny+0.75,
y1=(0:nlabels.x)*colmar +ny+0.75
)
}
if(label){
if(show.zeros)
indiv <- 1:length(ztab$y)
else
indiv <- which(ztab$z != 0)
text(x=as.numeric(ztab$x[indiv])+ nlabels.y*rowmar - 0.75,
y=ny - as.numeric(ztab$y[indiv]) + 1,
labels=format(ztab$z[indiv], digits=label.digits),
font=2,
adj=c(0.5,0.5),
col=label.color,
cex=label.size
)
}
title(main=main)
} |
context("PL 3: Create confusion matrices")
test_that("create_confmats() reterns a 'cmats' object", {
cmats1 <- create_confmats(scores = c(0.1, 0.2, 0), labels = c(1, 0, 1))
data(P10N10)
fmdat <- reformat_data(P10N10$scores, P10N10$labels)
cmats2 <- create_confmats(fmdat)
cmats3 <- create_confmats(scores = P10N10$scores, labels = P10N10$labels)
expect_true(is(cmats1, "cmats"))
expect_true(is(cmats2, "cmats"))
expect_true(is(cmats3, "cmats"))
})
test_that("'fmdat' must be a 'fmdat' object", {
expect_err_msg <- function(fmdat) {
err_msg <- "Unrecognized class for .validate()"
eval(bquote(expect_error(create_confmats(fmdat), err_msg)))
}
expect_err_msg(list())
expect_err_msg(data.frame())
})
test_that("create_confmats() can directly take scores and labels", {
fmdat <- reformat_data(c(0.1, 0.2, 0.2, 0), c(1, 0, 1, 1))
cmats1 <- create_confmats(fmdat)
cmats2 <- create_confmats(scores = c(0.1, 0.2, 0.2, 0),
labels = c(1, 0, 1, 1))
expect_equal(cmats1, cmats2)
})
test_that("create_confmats() accepts arguments for reformat_data()", {
err_msg <- "Invalid arguments: na.rm"
expect_error(create_confmats(scores = c(0.1, 0.2, 0.2, 0),
labels = c(1, 0, 1, 1), na.rm = TRUE),
err_msg)
cmats <- create_confmats(scores = c(0.1, 0.2, 0),
labels = c(1, 0, 1),
na_worst = TRUE,
ties_method = "first",
keep_fmdat = TRUE)
expect_equal(.get_obj_arg(cmats, "fmdat", "na_worst"), TRUE)
expect_equal(.get_obj_arg(cmats, "fmdat", "ties_method"), "first")
})
test_that("create_confmats() accepts na_worst argument", {
expect_equal_ranks <- function(scores, na_worst, ranks) {
cmats <- create_confmats(scores = scores,
labels = c(1, 0, 1),
na_worst = na_worst,
keep_fmdat = TRUE)
fmdat <- .get_obj(cmats, "fmdat")
eval(bquote(expect_equal(.get_obj_arg(cmats, NULL, "na_worst"), na_worst)))
eval(bquote(expect_equal(.get_obj_arg(fmdat, NULL, "na_worst"), na_worst)))
eval(bquote(expect_equal(fmdat[["ranks"]], ranks)))
sranks <- .rank_scores(scores, na_worst = na_worst)
eval(bquote(expect_equal(sranks[["ranks"]], ranks)))
}
na1_scores <- c(NA, 0.2, 0.1)
na2_scores <- c(0.2, NA, 0.1)
na3_scores <- c(0.2, 0.1, NA)
expect_equal_ranks(na1_scores, TRUE, c(3, 1, 2))
expect_equal_ranks(na1_scores, FALSE, c(1, 2, 3))
expect_equal_ranks(na2_scores, TRUE, c(1, 3, 2))
expect_equal_ranks(na2_scores, FALSE, c(2, 1, 3))
expect_equal_ranks(na3_scores, TRUE, c(1, 2, 3))
expect_equal_ranks(na3_scores, FALSE, c(2, 3, 1))
})
test_that("create_confmats() accepts ties_method argument", {
expect_equal_ranks <- function(ties_method, ranks) {
cmats <- create_confmats(scores = c(0.1, 0.2, 0.2, 0.2, 0.3),
labels = c(1, 0, 1, 1, 1),
ties_method = ties_method,
keep_fmdat = TRUE)
fmdat <- .get_obj(cmats, "fmdat")
eval(bquote(expect_equal(.get_obj_arg(cmats, NULL, "ties_method"),
ties_method)))
eval(bquote(expect_equal(.get_obj_arg(fmdat, NULL, "ties_method"),
ties_method)))
eval(bquote(expect_equal(fmdat[["ranks"]], ranks)))
}
expect_equal_ranks("equiv", c(5, 2, 2, 2, 1))
expect_equal_ranks("first", c(5, 2, 3, 4, 1))
})
test_that("'cmats' contains a list with 7 items", {
cmats <- create_confmats(scores = c(0.1, 0.2, 0), labels = c(1, 0, 1))
expect_true(is.list(cmats))
expect_equal(length(cmats), 7)
})
test_that("TPs, FNs, FPs, and TNs must be the same length", {
cmats <- create_confmats(scores = c(0.1, 0.2, 0), labels = c(1, 0, 1))
vec_size <- length(cmats[["ranks"]])
expect_true(vec_size != 0)
expect_equal(vec_size, length(cmats[["tp"]]))
expect_equal(vec_size, length(cmats[["fn"]]))
expect_equal(vec_size, length(cmats[["fp"]]))
expect_equal(vec_size, length(cmats[["tn"]]))
})
test_that("'cmats' contains correct items", {
cmats <- create_confmats(scores = c(0.1, 0.2, 0), labels = c(1, 0, 1))
np <- cmats[["pos_num"]]
nn <- cmats[["neg_num"]]
vec_size <- length(cmats[["ranks"]])
expect_equal(cmats[["tp"]][1], 0)
expect_equal(cmats[["fn"]][1], np)
expect_equal(cmats[["fp"]][1], 0)
expect_equal(cmats[["tn"]][1], nn)
expect_equal(cmats[["tp"]][vec_size], np)
expect_equal(cmats[["fn"]][vec_size], 0)
expect_equal(cmats[["fp"]][vec_size], nn)
expect_equal(cmats[["tn"]][vec_size], 0)
})
test_that("create_confmats() reterns correct matrices", {
cmats <- create_confmats(scores = c(0.1, 0.2, 0, 0.3),
labels = c(1, 0, 0, 1))
expect_equal(cmats[["pos_num"]], 2)
expect_equal(cmats[["neg_num"]], 2)
expect_equal(cmats[["tp"]], c(0, 1, 1, 2, 2))
expect_equal(cmats[["fn"]], c(2, 1, 1, 0, 0))
expect_equal(cmats[["fp"]], c(0, 0, 1, 1, 2))
expect_equal(cmats[["tn"]], c(2, 2, 1, 1, 0))
})
test_that("create_confmats() handles tied scores 1", {
cmats <- create_confmats(scores = c(0.3, 0.2, 0.2, 0.2, 0.2, 0.1),
labels = c(0, 1, 0, 1, 0, 1))
expect_equal(cmats[["tp"]], c(0, 0, 0.5, 1, 1.5, 2, 3))
expect_equal(cmats[["fp"]], c(0, 1, 1.5, 2, 2.5, 3, 3))
})
test_that("create_confmats() handles tied scores 2", {
cmats <- create_confmats(scores = c(0.3, 0.2, 0.2, 0.2, 0.2),
labels = c(0, 1, 0, 1, 0))
expect_equal(cmats[["tp"]], c(0, 0, 0.5, 1, 1.5, 2))
expect_equal(cmats[["fp"]], c(0, 1, 1.5, 2, 2.5, 3))
})
pl3_create_ms_dat <- function() {
s1 <- c(1, 2, 3, 4)
s2 <- c(5, 6, 7, 8)
s3 <- c(2, 4, 6, 8)
scores <- join_scores(s1, s2, s3)
l1 <- c(1, 0, 1, 1)
l2 <- c(0, 1, 1, 1)
l3 <- c(1, 1, 0, 1)
labels <- join_labels(l1, l2, l3)
list(scores = scores, labels = labels)
}
pl3_create_sm_dat <- function() {
s1 <- c(1, 2, 3, 4)
s2 <- c(5, 6, 7, 8)
s3 <- c(2, 4, 6, 8)
scores <- join_scores(s1, s2, s3)
l1 <- c(1, 0, 1, 1)
l2 <- c(0, 1, 1, 1)
l3 <- c(1, 1, 0, 1)
labels <- join_labels(l1, l2, l3)
list(scores = scores, labels = labels)
}
pl3_create_mm_dat <- function() {
s1 <- c(1, 2, 3, 4)
s2 <- c(5, 6, 7, 8)
s3 <- c(2, 4, 6, 8)
s4 <- c(2, 4, 6, 8)
scores <- join_scores(s1, s2, s3, s4)
l1 <- c(1, 0, 1, 1)
l2 <- c(0, 1, 1, 1)
l3 <- c(1, 1, 0, 1)
l4 <- c(1, 1, 0, 1)
labels <- join_labels(l1, l2, l3, l4)
list(scores = scores, labels = labels)
}
test_that("ss test data", {
cmats <- create_confmats(scores = c(1, 2, 3, 4),
labels = c(1, 0, 1, 0))
expect_equal(cmats[["pos_num"]], 2)
expect_equal(cmats[["neg_num"]], 2)
expect_equal(cmats[["tp"]], c(0, 0, 1, 1, 2))
expect_equal(cmats[["fn"]], c(2, 2, 1, 1, 0))
expect_equal(cmats[["fp"]], c(0, 1, 1, 2, 2))
expect_equal(cmats[["tn"]], c(2, 1, 1, 0, 0))
})
test_that("ms test data", {
msdat <- pl3_create_ms_dat()
cmats1 <- create_confmats(scores = msdat[["scores"]][[1]],
labels = msdat[["labels"]][[1]])
expect_equal(cmats1[["pos_num"]], 3)
expect_equal(cmats1[["neg_num"]], 1)
expect_equal(cmats1[["tp"]], c(0, 1, 2, 2, 3))
expect_equal(cmats1[["fn"]], c(3, 2, 1, 1, 0))
expect_equal(cmats1[["fp"]], c(0, 0, 0, 1, 1))
expect_equal(cmats1[["tn"]], c(1, 1, 1, 0, 0))
cmats2 <- create_confmats(scores = msdat[["scores"]][[2]],
labels = msdat[["labels"]][[2]])
expect_equal(cmats2[["pos_num"]], 3)
expect_equal(cmats2[["neg_num"]], 1)
expect_equal(cmats2[["tp"]], c(0, 1, 2, 3, 3))
expect_equal(cmats2[["fn"]], c(3, 2, 1, 0, 0))
expect_equal(cmats2[["fp"]], c(0, 0, 0, 0, 1))
expect_equal(cmats2[["tn"]], c(1, 1, 1, 1, 0))
cmats3 <- create_confmats(scores = msdat[["scores"]][[3]],
labels = msdat[["labels"]][[3]])
expect_equal(cmats3[["pos_num"]], 3)
expect_equal(cmats3[["neg_num"]], 1)
expect_equal(cmats3[["tp"]], c(0, 1, 1, 2, 3))
expect_equal(cmats3[["fn"]], c(3, 2, 2, 1, 0))
expect_equal(cmats3[["fp"]], c(0, 0, 1, 1, 1))
expect_equal(cmats3[["tn"]], c(1, 1, 0, 0, 0))
})
test_that("sm test data", {
smdat <- pl3_create_sm_dat()
cmats1 <- create_confmats(scores = smdat[["scores"]][[1]],
labels = smdat[["labels"]][[1]])
expect_equal(cmats1[["pos_num"]], 3)
expect_equal(cmats1[["neg_num"]], 1)
expect_equal(cmats1[["tp"]], c(0, 1, 2, 2, 3))
expect_equal(cmats1[["fn"]], c(3, 2, 1, 1, 0))
expect_equal(cmats1[["fp"]], c(0, 0, 0, 1, 1))
expect_equal(cmats1[["tn"]], c(1, 1, 1, 0, 0))
cmats2 <- create_confmats(scores = smdat[["scores"]][[2]],
labels = smdat[["labels"]][[2]])
expect_equal(cmats2[["pos_num"]], 3)
expect_equal(cmats2[["neg_num"]], 1)
expect_equal(cmats2[["tp"]], c(0, 1, 2, 3, 3))
expect_equal(cmats2[["fn"]], c(3, 2, 1, 0, 0))
expect_equal(cmats2[["fp"]], c(0, 0, 0, 0, 1))
expect_equal(cmats2[["tn"]], c(1, 1, 1, 1, 0))
cmats3 <- create_confmats(scores = smdat[["scores"]][[3]],
labels = smdat[["labels"]][[3]])
expect_equal(cmats3[["pos_num"]], 3)
expect_equal(cmats3[["neg_num"]], 1)
expect_equal(cmats3[["tp"]], c(0, 1, 1, 2, 3))
expect_equal(cmats3[["fn"]], c(3, 2, 2, 1, 0))
expect_equal(cmats3[["fp"]], c(0, 0, 1, 1, 1))
expect_equal(cmats3[["tn"]], c(1, 1, 0, 0, 0))
})
test_that("mm test data", {
mmdat <- pl3_create_mm_dat()
cmats1 <- create_confmats(scores = mmdat[["scores"]][[1]],
labels = mmdat[["labels"]][[1]])
expect_equal(cmats1[["pos_num"]], 3)
expect_equal(cmats1[["neg_num"]], 1)
expect_equal(cmats1[["tp"]], c(0, 1, 2, 2, 3))
expect_equal(cmats1[["fn"]], c(3, 2, 1, 1, 0))
expect_equal(cmats1[["fp"]], c(0, 0, 0, 1, 1))
expect_equal(cmats1[["tn"]], c(1, 1, 1, 0, 0))
cmats2 <- create_confmats(scores = mmdat[["scores"]][[2]],
labels = mmdat[["labels"]][[2]])
expect_equal(cmats2[["pos_num"]], 3)
expect_equal(cmats2[["neg_num"]], 1)
expect_equal(cmats2[["tp"]], c(0, 1, 2, 3, 3))
expect_equal(cmats2[["fn"]], c(3, 2, 1, 0, 0))
expect_equal(cmats2[["fp"]], c(0, 0, 0, 0, 1))
expect_equal(cmats2[["tn"]], c(1, 1, 1, 1, 0))
cmats3 <- create_confmats(scores = mmdat[["scores"]][[3]],
labels = mmdat[["labels"]][[3]])
expect_equal(cmats3[["pos_num"]], 3)
expect_equal(cmats3[["neg_num"]], 1)
expect_equal(cmats3[["tp"]], c(0, 1, 1, 2, 3))
expect_equal(cmats3[["fn"]], c(3, 2, 2, 1, 0))
expect_equal(cmats3[["fp"]], c(0, 0, 1, 1, 1))
expect_equal(cmats3[["tn"]], c(1, 1, 0, 0, 0))
cmats4 <- create_confmats(scores = mmdat[["scores"]][[4]],
labels = mmdat[["labels"]][[4]])
expect_equal(cmats4[["pos_num"]], 3)
expect_equal(cmats4[["neg_num"]], 1)
expect_equal(cmats4[["tp"]], c(0, 1, 1, 2, 3))
expect_equal(cmats4[["fn"]], c(3, 2, 2, 1, 0))
expect_equal(cmats4[["fp"]], c(0, 0, 1, 1, 1))
expect_equal(cmats4[["tn"]], c(1, 1, 0, 0, 0))
}) |
key2binary <- function (fulldata, key, score_missing = FALSE){
if(missing(fulldata)) missingMsg('fulldata')
if(missing(key)) missingMsg('key')
if(is.vector(key)) key <- matrix(key)
if (ncol(fulldata) != nrow(key)) stop("Key is not the correct length.\n", call.=FALSE)
colname <- colnames(fulldata)
X <- matrix(0L, nrow(fulldata), ncol(fulldata))
colnames(X) <- colname
for(i in 1L:ncol(X)){
if(all(is.na(key[i,]))) next
X[,i] <- fulldata[,i] %in% key[i,] + 0L
}
if(!score_missing)
X[is.na(fulldata)] <- NA
X
} |
gdal_rasterize <-
function(src_datasource, dst_filename, ..., b, i, at, burn, a,
threeD, add, l, where, sql, dialect, of, a_srs, to, co,
a_nodata, init, te, tr, tap, ts, ot, optim, q,
dryrun = FALSE) {
args <- mget(names(match.call())[-1])
args[c("src_datasource", "dst_filename", "dryrun")] <- NULL
formalsTable <- getFormalsTable("gdal_rasterize")
opts <- process_args(args, formalsTable)
if(dryrun) {
x <- CLI_call("gdal_rasterize", src_datasource, dst_filename, opts=opts)
return(x)
}
gdal_utils("rasterize", src_datasource, dst_filename, opts)
invisible(dst_filename)
} |
odds_ratios <- function(x){
this_call <- match.call()
if(is.table(x)){
if(!all(dim(x) == c(2L, 2L))) stop('if "x" is supplied as a table, it must be a 2 x 2 table (i.e., 2 variables with 2 categories each).')
tt <- list(x)
} else if(!is.data.frame(x) || (ncol(x) < 2L)){
stop('"x" must be either a 2x2 table or a data.frame with at least 2 variables.')
} else if(any(unlist(lapply(tt <- combn(seq_len(ncol(x)), 2L, function(i){
table(x[, i])
}, simplify=FALSE), function(tbl){
!all(dim(tbl) == c(2L, 2L))
})))){
stop('all variables in the data frame "x" must have two categories.')
}
res <- list("call"=this_call, "x"=x, "tables"=tt, "comps"=combn(colnames(x), 2, c, simplify=FALSE))
no_zero_warning <- TRUE
res$ORs <- lapply(tt, function(tbl){
if(any(tbl == 0) && no_zero_warning){
warning('one or more frequencies equal to 0 encountered. there will be no results for at least one table.', call.=FALSE, immediate.=TRUE)
no_zero_warning <- FALSE
return(list("or"=NA, "lor"=NA, "se"=NA, "z.value"=NA, "p.value"=NA))
}
or <- (tbl[1L, 1L] * tbl[2L, 2L]) / (tbl[1L, 2L] * tbl[2L, 1L])
lor <- log(or)
se <- sqrt(sum(1/(tbl)))
z <- lor/se
p <- 2*pnorm(-abs(z))
return(list("or"=or, "lor"=lor, "se"=se, "z.value"=z, "p.value"=p))
})
class(res) <- "REdaS_ORs"
return(res)
} |
library(ggplot2)
load('output/result-model7-2.RData')
ms <- rstan::extract(fit)
qua <- apply(10^ms$y_pred, 2, quantile, probs=c(0.1, 0.25, 0.50, 0.75, 0.9))
d_est <- data.frame(X=d$Y, t(qua), check.names=FALSE)
p <- ggplot(data=d_est, aes(x=X, y=`50%`)) +
theme_bw(base_size=18) +
coord_fixed(ratio=1, xlim=c(-50, 1900), ylim=c(-50, 1900)) +
geom_pointrange(aes(ymin=`10%`, ymax=`90%`), color='grey5', fill='grey95', shape=21) +
geom_abline(aes(slope=1, intercept=0), color='black', alpha=3/5, linetype='dashed') +
labs(x='Observed', y='Predicted')
ggsave(p, file='output/fig7-3-right.png', dpi=300, w=4.2, h=4) |
get_jagscode = function(prior, ST, formula_str, arma_order, family, sample) {
mm = paste0("model {")
cps = prior[stringr::str_detect(names(prior), "^cp_[1-9]+$")]
is_dirichlet = stringr::str_detect(cps, "^dirichlet\\([1-9]+\\)$")
if (any(is_dirichlet)) {
if (!all(is_dirichlet))
stop("All or none of the change point priors can be 'dirichlet(N)' and all N > 0.")
mm = paste0(mm, "
cp_betas ~ ddirch(c(", paste0(stringr::str_extract(cps, "[0-9]+"), collapse = ", "), ", 1))")
for (i in seq_along(cps)) {
mm = paste0(mm, "
cp_", i, " = MINX + sum(cp_betas[1:", i, "]) * (MAXX - MINX)")
}
is_dirichlet2 = stringr::str_detect(prior, "^dirichlet\\([1-9]+\\)$")
prior[is_dirichlet2] = NULL
}
prior_pop = prior[!names(prior) %in% ST$cp_group]
prior_varying = prior[names(prior) %in% ST$cp_group]
mm = paste0(mm, "
mm = paste0(mm, " cp_0 = MINX
mm = paste0(mm, " cp_", nrow(ST), " = MAXX
for (i in 1:length(prior_pop)) {
mm = paste0(mm, get_prior_str(prior_pop, i))
}
if (length(prior_varying) > 0) {
mm = paste0(mm, "\n
for (i in 1:length(prior_varying)) {
mm = paste0(mm, get_prior_str(
prior = prior_varying,
i = i,
varying_group = stats::na.omit(ST$cp_group_col[ST$cp_group == names(prior_varying[i])])
))
}
}
has_ar = !all(is.na(unlist(ST$ar_code))) || !all(is.na(unlist(ST$ar_int)))
if (has_ar)
mm = paste0(mm, get_ar_code(arma_order, family, is_R = FALSE, xvar = ST$x[1]))
formula_jags = gsub("PAR_X", paste0(ST$x[1], "[i_]"), formula_str)
for (i in seq_len(nrow(ST))) {
formula_jags = gsub(paste0("CP_", i, "_INDEX"), paste0("[", ST$cp_group_col[i], "[i_]]"), formula_jags)
}
mm = paste0(mm, "
for (i_ in 1:length(", ST$x[1], ")) {")
mm = paste0(mm, gsub("\n", "\n ", formula_jags))
y_code = "y_[i_]"
if (has_ar)
y_code = paste0(y_code, " + resid_arma_[i_]")
y_code = paste0(family$linkinv_str, "(", y_code, ")")
has_weights = !all(is.na(ST$weights))
weights = ifelse(has_weights, yes = paste0(ST$weights[1], "[i_]"), no = "1")
mm = paste0(mm, "\n\n
")
if (family$family == "gaussian") {
mm = paste0(mm, ST$y[1], "[i_] ~ dnorm(", y_code, ", ", weights, " / sigma_[i_]^2)
loglik_[i_] = logdensity.norm(", ST$y[1], "[i_], ", y_code, ", ", weights, " / sigma_[i_]^2)
} else if (family$family == "binomial") {
mm = paste0(mm, ST$y[1], "[i_] ~ dbin(", y_code, ", ", ST$trials[1], "[i_])
loglik_[i_] = logdensity.bin(", ST$y[1], "[i_], ", y_code, ", ", ST$trials[1], "[i_])")
} else if (family$family == "bernoulli") {
mm = paste0(mm, ST$y[1], "[i_] ~ dbern(", y_code, ")
loglik_[i_] = logdensity.bern(", ST$y[1], "[i_], ", y_code, ")")
} else if (family$family == "poisson") {
mm = paste0(mm, ST$y[1], "[i_] ~ dpois(", y_code, ")
loglik_[i_] = logdensity.pois(", ST$y[1], "[i_], ", y_code, ")")
} else if (family$family == "exponential") {
mm = paste0(mm, ST$y[1], "[i_] ~ dexp(", y_code, ")
loglik_[i_] = logdensity.exp(", ST$y[1], "[i_], ", y_code, ")")
}
if (has_ar) {
if (family$family == "binomial") {
mm = paste0(mm, "\n resid_abs_[i_] = ", family$linkfun_str, "(", ST$y[1], "[i_] / ", ST$trials[1], "[i_]) - y_[i_]
} else {
mm = paste0(mm, "\n resid_abs_[i_] = ", family$linkfun_str, "(", ST$y[1], "[i_]) - y_[i_]
}
}
if (sample == "prior")
mm = gsub("loglik.*?$","", mm)
mm = paste0(mm, "
}
}")
mm
}
get_prior_str = function(prior, i, varying_group = NULL) {
value = prior[[i]]
name = names(prior[i])
all_d = "dunif|dbern|dbeta|dbin|dchisqr|ddexp|dexp|df|dgamma|dgen.gamma|dhyper|dlogis|dlnorm|dnegbin|dnchisqr|dnorm|dpar|dpois|dt|dweib|dirichlet"
is_fixed = stringr::str_detect(value, "^[-0-9.]+$") |
value %in% names(prior)
if (!is_fixed && !stringr::str_detect(value, all_d))
stop("The prior '", name, " = ", value, "' is not a known distribution, a number, nor a model parameter.")
if (!is_fixed) {
value = sd_to_prec(value)
if (is.null(varying_group)) {
return(paste0(" ", name, " ~ ", value, "\n"))
} else {
return(paste0(" for (", varying_group, "_ in 1:n_unique_", varying_group, ") {
", name, "_uncentered[", varying_group, "_] ~ ", value, "
}
", name, " = ", name, "_uncentered - mean(", name, "_uncentered)
}
} else {
return(paste0(" ", name, " = ", value, "
}
}
sd_to_prec = function(prior_str) {
if (stringr::str_detect(prior_str, "dnorm|dt|dcauchy|ddexp|dlogis|dlnorm")) {
trunc_start = gregexpr("T\\(", prior_str)[[1]]
if (trunc_start != -1) {
trunc = substr(prior_str, trunc_start, 1000)
prior_str = substr(prior_str, 0, trunc_start-1)
} else trunc = ""
pieces = stringr::str_trim(strsplit(prior_str, ",")[[1]])
pieces = gsub(" ", "", pieces)
is_dt = stringr::str_starts(pieces[1], "dt\\(")
if (!is_dt) {
pieces[2] = substr(pieces[2], 1, nchar(pieces[2])-1)
}
pieces[2] = paste0("1/(", pieces[2], ")^2")
new_prior = paste0(paste0(pieces, collapse = ", "), ifelse(!is_dt, ") ", " "), trunc)
return(new_prior)
}
else return(prior_str)
} |
multipoint <- function(..., fmt = 16, third = "z") {
UseMethod("multipoint")
}
multipoint.character <- function(..., fmt = 16, third = "z") {
pts <- list(...)
if (grepl("empty", pts[[1]], ignore.case = TRUE)) {
return('MULTIPOINT EMPTY')
} else {
check_str(pts)
}
}
multipoint.numeric <- function(..., fmt = 16, third = "z") {
pts <- list(...)
fmtcheck(fmt)
invisible(lapply(pts, checker, type = 'MULTIPOINT', len = 2:4))
str <- paste0(lapply(pts, function(z){
sprintf("(%s)", paste0(str_trim_(format(z, nsmall = fmt, trim = TRUE)),
collapse = " "))
}), collapse = ", ")
len <- unique(vapply(pts, length, numeric(1)))
make_multi(str, len, third)
}
multipoint.data.frame <- function(..., fmt = 16, third = "z") {
pts <- list(...)
fmtcheck(fmt)
invisible(lapply(pts, dfchecker, type = 'MULTIPOINT', len = 2:4))
str <- paste0(apply(pts[[1]], 1, function(z){
sprintf("(%s)", paste0(str_trim_(format(z, nsmall = fmt, trim = TRUE)),
collapse = " "))
}), collapse = ", ")
len <- unique(vapply(pts, NCOL, numeric(1)))
make_multi(str, len, third)
}
multipoint.matrix <- function(..., fmt = 16, third = "z") {
pts <- list(...)
fmtcheck(fmt)
invisible(lapply(pts, dfchecker, type = 'MULTIPOINT', len = 2:4))
str <- paste0(apply(pts[[1]], 1, function(z){
sprintf("(%s)", paste0(str_trim_(format(z, nsmall = fmt, trim = TRUE)),
collapse = " "))
}), collapse = ", ")
len <- unique(vapply(pts, NCOL, numeric(1)))
make_multi(str, len, third)
}
multipoint.list <- function(..., fmt = 16, third = "z") {
pts <- list(...)[[1]]
fmtcheck(fmt)
invisible(lapply(pts, checker, type = 'MULTIPOINT', len = 2:4))
str <- paste0(lapply(pts, function(z) {
sprintf("(%s)", paste0(str_trim_(format(z, nsmall = fmt, trim = TRUE)),
collapse = " "))
}), collapse = ", ")
len <- unique(vapply(pts, length, numeric(1)))
make_multi(str, len, third)
}
make_multi <- function(str, len, third) {
if (len == 3) {
sprintf('MULTIPOINT %s(%s)', pick3(third), str)
} else if (len == 4) {
sprintf('MULTIPOINT ZM(%s)', str)
} else {
sprintf('MULTIPOINT (%s)', str)
}
} |
qrInitMatrix <- function(version) {
.Deprecated("qr_code")
size <- 21 + (version - 1) * 4
data <- matrix(0, size, size)
markerMat <- matrix(
c(
rep(100, 7), 50, 100, rep(50, 5), 100, 50, 100, 50, 100, 100, 100, 50,
100, 50, 100, 50, 100, 100, 100, 50, 100, 50, 100, 50, 100, 100, 100, 50,
100, 50, 100, rep(50, 5), 100, 50, rep(100, 7), 50, rep(50, 8)
),
nrow = 8, ncol = 8, byrow = TRUE
)
data[1:8, 1:8] <- markerMat
markerMat <- matrix(
c(
rep(50, 8), rep(100, 7), 50, 100, rep(50, 5), 100, 50, 100, 50, 100, 100,
100, 50, 100, 50, 100, 50, 100, 100, 100, 50, 100, 50, 100, 50, 100, 100,
100, 50, 100, 50, 100, rep(50, 5), 100, 50, rep(100, 7), 50
),
nrow = 8, ncol = 8, byrow = TRUE
)
data[(size - 7):size, 1:8] <- markerMat
markerMat <- matrix(
c(
rep(50, 8), rep(100, 7), 50, 100, rep(50, 5), 100, 50, 100, 50, 100, 100,
100, 50, 100, 50, 100, 50, 100, 100, 100, 50, 100, 50, 100, 50, 100, 100,
100, 50, 100, 50, 100, rep(50, 5), 100, 50, rep(100, 7), 50
),
nrow = 8, ncol = 8
)
data[1:8, (size - 7):size] <- markerMat
data[size - 7, 9] <- 95
data[seq(9, size - 8, 2), 7] <- 95
data[7, seq(9, size - 8, 2)] <- 95
data[seq(10, size - 8, 2), 7] <- 55
data[7, seq(10, size - 8, 2)] <- 55
data[c(1:6, 8:9, (size - 6):size), 9] <- 20
data[9, c(1:6, 8, (size - 7):size)] <- 20
if (version >= 7) {
data[(size - 8):(size - 10), 1:6] <- 40
data[1:6, (size - 8):(size - 10)] <- 40
}
if (version > 1) {
markerMat <- matrix(
c(
rep(100, 5), 100, 50, 50, 50, 100, 100, 50, 100, 50, 100, 100, 50, 50,
50, 100, rep(100, 5)
),
nrow = 5, ncol = 5, byrow = TRUE
)
pMarkerCount <- (version %/% 7) + 1
index <- size - 6
gap <- (index - 7) / pMarkerCount
pMarkerList <- c(7, seq(1, pMarkerCount, 1) * gap + 7)
lastIndex <- pMarkerList[length(pMarkerList)]
for (i in seq_along(pMarkerList)) {
for (j in seq_along(pMarkerList)) {
if (
(pMarkerList[i] == 7 & pMarkerList[j] == 7) |
(pMarkerList[i] == 7 & pMarkerList[j] == lastIndex) |
(pMarkerList[j] == 7 & pMarkerList[i] == lastIndex)
) {
} else {
if (pMarkerList[i] > 15 | pMarkerList[j] > 15) {
data[
(pMarkerList[i] - 2):(pMarkerList[i] + 2),
(pMarkerList[j] - 2):(pMarkerList[j] + 2)
] <- markerMat
}
}
}
}
}
return(data)
} |
plot.vlstarpred <- function(x, type = c('single', 'multiple'), names = NULL,
main = NULL, xlab = NULL, ylab = NULL,
lty.obs = 2,lty.pred = 1, lty.ci = 3, lty.vline = 1, lwd.obs = 1, lwd.pred = 1,
lwd.ci = 1, lwd.vline = 1, col.obs = NULL, col.pred = NULL, col.ci = NULL,
col.vline = NULL, ylim = NULL, mar = par("mar"), oma = par("oma"),...){
op <- par(no.readonly = TRUE)
on.exit(par(op))
type <- match.arg(type)
ifelse(is.null(names), ynames <- colnames(x$y), ynames <- names)
nrowy <- nrow(x$y)
if(is.null(col.obs)){
col.obs = 'black'
}
if(is.null(col.ci)){
col.ci = 'red'
}
if(length(col.ci) != 1){
stop('Please provide a unique color for the confidence interval')
}
col.ci = rep(col.ci, 2)
if(is.null(col.pred)){
col.pred = 'blue'
}
if(is.null(col.vline)){
col.vline = 'gray'
}
col = c(col.pred, col.obs, col.ci, col.vline)
lty = c(lty.pred, lty.obs, rep(lty.ci,2), lty.vline)
lwd = c(lwd.pred, lwd.obs, rep(lwd.ci,2), lwd.vline)
ncoly <- length(ynames)
nc <- ifelse(ncoly > 4, 2, 1)
ifelse(is.null(main), main <- paste("Forecast of series", ynames), main <- rep(main, ncoly)[1:ncoly])
ifelse(is.null(ylab), ylab <- rep("", ncoly), ylab <- rep(ylab, ncoly)[1:ncoly])
ifelse(is.null(xlab), xlab <- rep("", ncoly), xlab <- rep(xlab, ncoly)[1:ncoly])
plotprediction <- function(x, main, name, ylab, xlab, col, lty, lwd, ...){
predy <- c(rep(NA, nrowy - 1), x$y[nrowy, name], x$forecasts[[name]][, 1])
predl <- c(rep(NA, nrowy - 1), x$y[nrowy, name], x$forecasts[[name]][, 2])
predu <- c(rep(NA, nrowy - 1), x$y[nrowy, name], x$forecasts[[name]][, 3])
yobs <- c(x$y[, name], rep(NA, length(x$forecasts[[name]][, 1])))
if(is.null(ylim)){
min.y <- min(na.omit(c(predy, predl, predu, yobs)))
max.y <- max(na.omit(c(predy, predl, predu, yobs)))
ylim <- c(min.y, max.y)
}
plot.ts(predy, main = main, ylim = ylim, col = col[1], lwd=lwd[1], lty = lty[1], xlab = xlab, ylab = ylab, ...)
lines(yobs, col = col[2], lty = lty[2], lwd = lwd[2])
lines(predl, col = col[3], lty = lty[3], lwd = lwd[3])
lines(predu, col = col[4], lty = lty[4], lwd = lwd[4])
abline(v = nrowy, col = col[5], lty = lty[5], lwd = lwd[5])
}
if(type == "single"){
par(mar = mar, oma = oma)
if(ncoly > 1) par(ask = TRUE)
for(i in 1:ncoly){
plotprediction(x = x, name = ynames[i], main = main[i], col = col, lty = lty, lwd = lwd, ylab = ylab[i], xlab = xlab[i], ...)
}
} else if(type == "multiple"){
nr <- ceiling(ncoly / nc)
par(mfcol = c(nr, nc), mar = mar, oma = oma)
for(i in 1:ncoly){
plotprediction(x = x, name = ynames[i], main = main[i], col = col, lty = lty, lwd = lwd, ylab = ylab[i], xlab = xlab[i], ...)
}
}
} |
ND <- function(web, normalised=TRUE){
web <- (web > 0) * 1
k <- sum(web)
dlower <- rowSums(web)
dhigher <- colSums(web)
Nlow <- Nhigh <- 2
if (normalised){
Nlow <- length(dhigher)
Nhigh <- length(dlower)
}
low <- dlower/Nlow; names(low) <- rownames(web)
high <- dhigher/Nhigh; names(high) <- colnames(web)
list("lower"=low, "higher"=high)
}
CC <- function(web, cmode="suminvundir", rescale=TRUE, ...){
wh <- as.one.mode(web, project="higher")
cch <- closeness(wh, cmode=cmode, rescale=rescale, ...)
if (rescale) cch <- cch/sum(cch, na.rm=TRUE)
wl <- as.one.mode(web, project="lower")
ccl <- closeness(wl, cmode=cmode, rescale=rescale, ...)
if (rescale) ccl <- ccl/sum(ccl, na.rm=TRUE)
list("lower"=ccl, "higher"=cch)
}
BC <- function(web, rescale=TRUE, cmode="undirected", weighted=TRUE, ...){
wh <- as.one.mode(web, project="higher", weighted=weighted)
bch <- betweenness(wh, rescale=FALSE, cmode=cmode, ...)
if (rescale & sum(bch != 0)) bch <- bch/sum(bch, na.rm=TRUE)
wl <- as.one.mode(web, project="lower", weighted=weighted)
bcl <- betweenness(wl, rescale=FALSE, cmode=cmode, ...)
if (rescale & sum(bcl != 0)) bcl <- bcl/sum(bcl, na.rm=TRUE)
list("lower"=bcl, "higher"=bch)
} |
evalText <- function(text, where = .GlobalEnv) {
for(expr in parse(text = text))
value <- eval(expr, envir = where)
value
} |
expected <- eval(parse(text="\"head\""));
test(id=0, code={
argv <- eval(parse(text="list(\"head\", TRUE)"));
.Internal(`make.names`(argv[[1]], argv[[2]]));
}, o=expected); |
"estimateResidual" <-
function(Y,h = 10) {
n <- length(Y)
YBar <- rep(0,n)
for (i in 1:n) {
a <- min(n,i+h)
b <- max(1,i-h)
YBar[i] <- mean(Y[b:a])
}
return(Y-YBar)
} |
summary.gssanova <- function(object,diagnostics=FALSE,...)
{
if (object$family=="polr") {
y <- model.response(object$mf)
if (!is.factor(y))
stop("gss error in gssanova1: need factor response for polr family")
lvls <- levels(y)
if (nlvl <- length(lvls)<3)
stop("gss error in gssanova1: need at least 3 levels to fit polr family")
y <- outer(y,lvls,"==")
}
else y <- model.response(object$mf,"numeric")
wt <- model.weights(object$mf)
offset <- model.offset(object$mf)
if ((object$family=="nbinomial")&(!is.null(object$nu))) y <- cbind(y,object$nu)
dev.null <- switch(object$family,
binomial=dev.null.binomial(y,wt,offset),
nbinomial=dev.null.nbinomial(y,wt,offset),
polr=dev.null.polr(y,wt,offset),
poisson=dev.null.poisson(y,wt,offset),
Gamma=dev.null.Gamma(y,wt,offset),
weibull=dev.null.weibull(y,wt,offset,object$nu),
lognorm=dev.null.lognorm(y,wt,offset,object$nu),
loglogis=dev.null.loglogis(y,wt,offset,object$nu))
w <- object$w
if (is.null(offset)) offset <- rep(0,length(object$eta))
res <- residuals(object)*sqrt(w)
dev.resid <- residuals(object,"deviance")
fitted <- fitted(object)
sigma2 <- object$varht
rss <- sum(res^2)
dev <- sum(dev.resid^2)
obj.wk <- object
obj.wk$d[] <- 0
if (!is.null(model.offset(obj.wk$mf))) obj.wk$mf[,"(offset)"] <- 0
penalty <- sum(obj.wk$c*predict(obj.wk,obj.wk$mf[object$id.basis,]))
penalty <- as.vector(10^object$nlambda*penalty)
if (!is.null(object$random)) {
p.ran <- t(object$b)%*%object$random$sigma$fun(object$zeta,object$random$sigma$env)%*%object$b
penalty <- penalty + p.ran
}
if (diagnostics) {
comp <- NULL
p.dec <- NULL
for (label in object$terms$labels) {
if (label=="1") next
if (label=="offset") next
comp <- cbind(comp,predict(object,object$mf,inc=label))
jk <- sum(obj.wk$c*predict(obj.wk,obj.wk$mf[object$id.basis,],inc=label))
p.dec <- c(p.dec,10^object$nlambda*jk)
}
term.label <- object$terms$labels[object$terms$labels!="1"]
term.label <- term.label[term.label!="offset"]
if (!is.null(object$random)) {
mf <- object$mf
mf$random <- object$random$z
comp <- cbind(comp,predict(object,mf,inc=NULL))
p.dec <- c(p.dec,p.ran)
term.label <- c(term.label,"random")
}
fitted.off <- fitted-offset
comp <- cbind(comp,yhat=fitted.off,y=fitted.off+res/sqrt(w),e=res/sqrt(w))
if (any(outer(term.label,c("yhat","y","e"),"==")))
warning("gss warning in summary.gssanova: avoid using yhat, y, or e as variable names")
colnames(comp) <- c(term.label,"yhat","y","e")
comp <- sqrt(w)*comp - outer(sqrt(w),apply(w*comp,2,sum))/sum(w)
comp1 <- comp[,c(term.label,"yhat")]
decom <- t(comp1) %*% comp1[,"yhat"]
names(decom) <- c(term.label,"yhat")
decom <- decom[term.label]/decom["yhat"]
corr <- t(comp)%*%comp
corr <- t(corr/sqrt(diag(corr)))/sqrt(diag(corr))
norm <- apply(comp,2,function(x){sqrt(sum(x^2))})
cosines <- rbind(corr[c("y","e"),],norm)
rownames(cosines) <- c("cos.y","cos.e","norm")
corr <- corr[term.label,term.label,drop=FALSE]
if (qr(corr)$rank<dim(corr)[2])
kappa <- rep(Inf,len=dim(corr)[2])
else kappa <- as.numeric(sqrt(diag(solve(corr))))
rough <- p.dec / penalty
names(kappa) <- names(rough) <- term.label
}
else decom <- kappa <- cosines <- rough <- NULL
z <- list(call=object$call,family=object$family,alpha=object$alpha,
fitted=fitted,dispersion=sigma2,residuals=res/sqrt(w),rss=rss,
deviance=dev,dev.resid=dev.resid,nu=object$nu,
dev.null=dev.null,penalty=penalty,
pi=decom,kappa=kappa,cosines=cosines,roughness=rough)
class(z) <- "summary.gssanova"
z
} |
get.uppdwn <-
function (time, event, objs, flag.mdl, flag.prd, flag.rsk, t0, updown, cut, get.risk, msg=FALSE) {
mdl.std = objs[[1]]
mdl.new = objs[[2]]
z.std = objs[[3]]
z.new = objs[[4]]
p.std = objs[[5]]
p.new = objs[[6]]
if (flag.mdl || flag.prd) {
p.std = get.risk(update(mdl.std), t0)
p.new = get.risk(update(mdl.new), t0)
}
rtab = rtab.case = rtab.ctrl = NULL
if (updown == 'category') {
p.stdc = categorize(p.std, threshold = cut)
p.newc = categorize(p.new, threshold = cut)
upp = p.newc - p.stdc > 0
dwn = p.newc - p.stdc < 0
} else if (updown == 'diff') {
upp = ifelse(p.new - p.std > cut, TRUE, FALSE)
dwn = ifelse(p.std - p.new > cut, TRUE, FALSE)
}
if (msg) {
case = time <= t0 & event == 1
ctrl = time > t0
message("\nUP and DOWN calculation:")
message(paste("
if (updown == 'category') {
p.stdc = factor(p.stdc, levels=1:(length(cut)+1), labels=c(paste('<', cut), paste('>=', cut[length(cut)])))
p.newc = factor(p.newc, levels=1:(length(cut)+1), labels=c(paste('<', cut), paste('>=', cut[length(cut)])))
rtab = table(Standard = p.stdc, New = p.newc)
rtab.case = table(Standard = p.stdc[case], New = p.newc[case])
rtab.ctrl = table(Standard = p.stdc[ctrl], New = p.newc[ctrl])
message("\n Reclassification Table for all subjects:")
print(rtab)
message("\n Reclassification Table for case:")
print(rtab.case)
message("\n Reclassification Table for control:")
print(rtab.ctrl)
} else if (updown == 'diff') {
message(paste("
message(paste("
}
plot(p.std[case], p.new[case], xlab='Standard model', ylab='New model', main='', xlim=c(0,1), ylim=c(0,1), col=2)
par(new=T)
plot(p.std[ctrl], p.new[ctrl], xlab='', ylab='', axes=F, xlim=c(0,1), ylim=c(0,1))
par(new=T)
plot(p.std[!case&!ctrl], p.new[!case&!ctrl], xlab='', ylab='', axes=F, xlim=c(0,1), ylim=c(0,1), col=4)
legend('topleft', c('Case','Control','Censored'), col=c(2,1,4), bty='n', pch=1)
if (updown == 'diff') {
abline(0,1)
abline(-cut, 1, lty=2)
abline(cut, 1, lty=2)
}
if (updown == 'category') {
abline(h=cut, lty=2)
abline(v=cut, lty=2)
}
}
return(list(upp, dwn, p.std, p.new, rtab, rtab.case, rtab.ctrl))
} |
string_widths_dev <- function(strings, family = '', face = 1, size = 12, cex = 1, unit = 'cm') {
unit <- match.arg(unit, possible_units)
unit <- match(unit, possible_units) - 1L
n_total <- length(strings)
if (length(family) != 1) family <- rep_len(family, n_total)
if (any(c(length(face), length(size), length(cex)) != 1)) {
face <- rep_len(face, n_total)
size <- rep_len(size, n_total)
cex <- rep_len(cex, n_total)
}
dev_string_widths_c(as.character(strings), as.character(family),
as.integer(face), as.numeric(size), as.numeric(cex), unit)
}
string_metrics_dev <- function(strings, family = '', face = 1, size = 12, cex = 1, unit = 'cm') {
unit <- match.arg(unit, possible_units)
unit <- match(unit, possible_units) - 1L
n_total <- length(strings)
if (length(family) != 1) family <- rep_len(family, n_total)
if (any(c(length(face), length(size), length(cex)) != 1)) {
face <- rep_len(face, n_total)
size <- rep_len(size, n_total)
cex <- rep_len(cex, n_total)
}
dev_string_metrics_c(as.character(strings), as.character(family),
as.integer(face), as.numeric(size), as.numeric(cex), unit)
}
possible_units <- c('cm', 'inches', 'device', 'relative') |
make_call <- function(f, ..., .args = list()) {
if (is.character(f)) f <- as.name(f)
as.call(c(f, ..., .args))
}
do_call <- function(f, ..., .args = list(), .env = parent.frame()) {
f <- substitute(f)
call <- make_call(f, ..., .args)
eval(call, .env)
}
add_class <- function(x, class) {
if (!is(x, class)) {
class(x) <- c(class, class(x))
}
x
}
`%||%` <- function (lhs, rhs) {
lres <- withVisible(eval(lhs, envir = parent.frame()))
if (is.null(lres$value)) {
eval(rhs, envir = parent.frame())
} else {
if (lres$visible) {
lres$value
} else {
invisible(lres$value)
}
}
}
`%&&%` <- function(lhs, rhs) {
lres <- withVisible(eval(lhs, envir = parent.frame()))
if (!is.null(lres$value)) {
eval(rhs, envir = parent.frame())
} else {
if (lres$visible) {
lres$value
} else {
invisible(lres$value)
}
}
}
grab_args <- function() {
envir <- parent.frame()
func <- sys.function(-1)
call <- sys.call(-1)
dots <- match.call(func, call, expand.dots=FALSE)$...
c(as.list(envir), dots)
}
capitalize <- function(x) {
x <- tolower(x)
substr(x, 1, 1) <- toupper(substr(x, 1, 1))
x
}
address <- function(x) {
.Call(C_R_igraph_address, x)
}
`%+%` <- function(x, y) {
stopifnot(is.character(x), is.character(y))
paste0(x, y)
}
chr <- as.character
drop_null <- function(x) {
x [!sapply(x, is.null)]
} |
context("Manual run")
test_that("simple run", {
cleanup()
m <- remake("remake.yml")
expect_false(remake_is_current(m, "data.csv"))
expect_false(remake_is_current(m, "processed"))
expect_false(remake_is_current(m, "plot.pdf"))
cmp <- list(version=m$store$version,
name="data.csv",
type="file",
depends=empty_named_list(),
fixed=hash_object(list("data.csv")),
code=list(
functions=list(download_data=hash_function(
m$store$env$env$download_data))))
x <- dependency_status(m$targets[["data.csv"]], m$store, TRUE)
expect_equal(x[names(cmp)], cmp)
expect_equal(sort(setdiff(names(x), names(cmp))),
sort(c("hash", "time")))
cmp <- list(version=m$store$version,
name="processed",
type="object",
depends=list("data.csv"=NA_character_),
fixed=NULL,
code=list(
functions=list(process_data=hash_function(
m$store$env$env$process_data))))
x <- dependency_status(m$targets[["processed"]], m$store, TRUE)
expect_equal(x[names(cmp)], cmp)
expect_equal(sort(setdiff(names(x), names(cmp))),
sort(c("hash", "time")))
res <- dependency_status(m$targets[["plot.pdf"]], m$store, TRUE)
pkgs <- c("grDevices", "graphics")
cmp <- list(version=m$store$version,
name="plot.pdf",
type="file",
depends=list(processed=NA_character_),
fixed=hash_object(list("plot.pdf")),
code=list(
functions=list(
do_plot=hash_function(m$store$env$env$do_plot),
myplot=hash_function(m$store$env$env$myplot))))
expect_equal(res[names(cmp)], cmp)
expect_equal(sort(setdiff(names(res), names(cmp))),
sort(c("hash", "time")))
remake_update2(m, "data.csv")
remake_update2(m, "processed")
remake_update2(m, "plot.pdf")
expect_true(remake_is_current(m, "data.csv"))
expect_true(remake_is_current(m, "processed"))
expect_true(remake_is_current(m, "plot.pdf"))
cleanup()
})
test_that("Depending on a file we don't make", {
cleanup()
e <- new.env()
source("code.R", e)
e$download_data("data.csv")
expect_true(file.exists("data.csv"))
m <- remake("remake2.yml")
expect_message(remake_update2(m, "data.csv"), NA)
remake_update2(m, "processed")
remake_update2(m, "plot.pdf")
expect_true(file.exists("plot.pdf"))
remake_make(m, "clean")
expect_false(file.exists("plot.pdf"))
expect_true(file.exists("data.csv"))
cleanup()
}) |
'
Authors
Torsten Pook, [email protected]
Copyright (C) 2017 -- 2020 Torsten Pook
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 3
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
'
breeding.intern <- function(info.parent, parent, population , mutation.rate = 10^-5, remutation.rate = 10^-5, recombination.rate=1,
recom.f.indicator=NULL, duplication.rate=0, duplication.length=0.01,
duplication.recombination=1, delete.same.origin=FALSE,
gene.editing=FALSE, nr.edits= 0,
gen.architecture=0,
decodeOriginsU=MoBPS::decodeOriginsR){
n_snps <- sum(population$info$snp)
if(gen.architecture==0){
length.total <- population$info$length.total
} else{
length.total <- population$info$gen.architecture[[gen.architecture]]$length.total
for(haplo in 1:2){
for(index in 1:length(parent[[haplo]])){
before <- find.snpbefore(parent[[haplo]][index], population$info$snp.position)
if(before>0){
p_before <- population$info$snp.position[before]
new_p_before <- population$info$gen.architecture[[gen.architecture]]$snp.position[before]
} else{
p_before <- population$info$length.total[1]
new_p_before <-length.total[1]
}
if(before<n_snps){
p_after <- population$info$snp.position[before+1]
new_p_after <- population$info$gen.architecture[[gen.architecture]]$snp.position[before+1]
} else{
p_after <- population$info$length.total[length(population$info$length.total)]
new_p_after <- length.total[length(length.total)]
}
share <- (parent[[haplo]][index]-p_before) / (p_after-p_before)
parent[[haplo]][index] <- new_p_before + share * (new_p_after-new_p_before)
}
}
}
n.chromosome <- length(length.total)-1
if(length(recom.f.indicator)!=0){
recom.f.indicator <- rbind(recom.f.indicator, c(length.total[n.chromosome+1],0))
indicator.vol <- sum((recom.f.indicator[-1,1] -recom.f.indicator[-nrow(recom.f.indicator),1])*recom.f.indicator[-nrow(recom.f.indicator),2])
recom.vol <- indicator.vol
} else{
recom.vol <- length.total[n.chromosome+1]*recombination.rate
}
noc <- stats::rpois(1, recom.vol)
if(length(recom.f.indicator)!=0){
porc <- stats::runif(noc,0,recom.vol)
if(length(porc)>0){
cums <- cumsum((recom.f.indicator[-1,1] -recom.f.indicator[-nrow(recom.f.indicator),1])*recom.f.indicator[-nrow(recom.f.indicator),2])
for(index in 1:length(porc)){
actuel <- porc[index]
before <- sum(actuel < cums)
prev1 <- recom.f.indicator[nrow(recom.f.indicator)-before,]
next1 <- recom.f.indicator[nrow(recom.f.indicator)-before+1,]
porc[index] <- prev1[1] + (actuel-c(0,cums)[nrow(recom.f.indicator)-before]) /prev1[2]
}
}
} else{
porc <- stats::runif(noc,0,length.total[n.chromosome+1])
}
if(duplication.rate>0){
porc.d <- sortd(c(porc, 0, length.total[n.chromosome+1]))
rpod <- c(0, (stats::rbinom(noc,1,duplication.rate) * 2:(noc+1)),0)
pod <- porc.d[rpod]
pod2 <- rep(0,length(pod))
count <- 1
add.one <- rep(0,length(pod))
for(index in unique(c(0,rpod))[-1]){
activ.chromosome <- sum(pod[count] > length.total)
length.d <- stats::rexp(1, rate=(1/duplication.length)) * (-1)^(stats::rbinom(1,1,0.5))
pod2[count] <- max(min(pod[count]+ length.d, porc.d[index+1], length.total[activ.chromosome+1]),porc.d[index-1], length.total[activ.chromosome])
if(pod2[count]==(porc.d[index])|| sum(length.total[start.point]==porc.d[index])){
add.one[count] <- 1
}
count <- count+1
}
pod.start <- pod
pod.start[pod.start>pod2] <- pod2[pod.start>pod2]
pod.end <- pod
pod.end[pod.end<pod2] <- pod2[pod.end<pod2]
} else{
pod <- pod2 <- rpod <- pod.start <- pod.end <- numeric(0)
}
start.point <- c(stats::rbinom(n.chromosome,1,0.5),0) * (1:(n.chromosome+1))
porc <- sortd((c(length.total[start.point],porc)))
porc <- c(-1,porc,length.total[n.chromosome+1]+1)
dup <- list()
dup[[1]] <- (parent[[11]])
dup[[2]] <- (parent[[12]])
dup[[3]] <- "test"
for(abc in 1:2){
counter <- 0
if(length(dup[[abc]])>0){
posi <- 1:nrow(dup[[abc]])
for(index in 1:nrow(dup[[abc]])){
ndup <- stats::rpois(1, (dup[[abc]][index,3]- dup[[abc]][index,2])* duplication.recombination)
if(ndup>0){
pdup <- sort(stats::runif(ndup, min=dup[[abc]][index,2], max= dup[[abc]][index,3]))
} else{
pdup <- numeric(0)
}
if(counter==1 && dup[[abc]][index,1] == dup[[abc]][(index-1),1]){
pdup <- c(dup[[abc]][index,2], pdup)
}
counter <- 0
if(ndup%%2 && (sum(dup[[abc]][index,1]>=porc)%%2)==(abc%%2)){
porc <- sort(c(porc, dup[[abc]][index,1]))
}
if(ndup>0){
if(ndup%%2==0){
pdup <- c(pdup, dup[[abc]][index,3])
counter <- 1
}
dup[[abc]][index,3] <- pdup[1]
if(ndup>1){
for(index2 in seq(2,ndup,by=2)){
dup[[abc]] <- rbind(dup[[abc]], dup[[abc]][index,])
dup[[abc]][nrow(dup[[abc]]),2:3] <- pdup[index2:(index2+1)]
posi <- c(posi,index)
}
}
}
}
posi <- sort(posi,index.return=TRUE)$ix
dup[[abc]] <- matrix(dup[[abc]][posi,], ncol=8)
remove0 <- (dup[[abc]][,2] == dup[[abc]][,3])
if(sum(remove0)>0){
remove0 <- remove0 * 1:length(remove0)
dup[[abc]] <- dup[[abc]][-remove0,]
}
}
}
new.poc <- NULL
new.mut <- NULL
new.origin <- NULL
new.dup <- NULL
activ <- 1
store_mut <- list(population$info$snp.position[parent[[3]]], population$info$snp.position[parent[[4]]])
for(index in 1:(length(porc)-1)){
activ.porc <- (parent[[activ]]<porc[index+1]) & (parent[[activ]]>porc[index])
if(length(store_mut[[activ]])==0){
activ.mut <- logical(0)
} else{
activ.mut <- (store_mut[[activ]]<porc[index+1]) & (store_mut[[activ]]>porc[index])
}
if(length(dup[[activ]])>0){
activ.dup1 <- (dup[[activ]][,1] <= porc[index+1]) * (dup[[activ]][,1] >= porc[index]) * ((dup[[activ]][,8] == 1) + (dup[[activ]][,8] == 3))
activ.dup2 <- (dup[[activ]][,1] < porc[index+1]) * (dup[[activ]][,1] > porc[index]) * (dup[[activ]][,8] == 2)
leftb <- sum(porc[index]==porc) - sum(porc[index] == length.total[start.point])
rightb <- sum(porc[index+1]==porc) - sum(porc[index+1] == length.total[start.point])
activ.dup3 <- (1-leftb) * (dup[[activ]][,1] <= porc[index+1]) * (dup[[activ]][,1] == porc[index]) * ((dup[[activ]][,8] == 1) + (dup[[activ]][,8] == 3))
activ.dup4 <- (1-rightb) * (dup[[activ]][,1] == porc[index+1]) * (dup[[activ]][,1] >= porc[index]) * ((dup[[activ]][,8] == 1) + (dup[[activ]][,8] == 3))
activ.dup <- activ.dup1 + activ.dup2 - ((activ.dup3+ activ.dup4)>0)
new.dup <- rbind(new.dup, dup[[activ]][activ.dup * (1:length(activ.dup)),])
} else{
active.dup <- integer(0)
new.dup <- NULL
}
save1 <- max(new.poc[length(new.poc)],-2)!=porc[index+1]
if(save1){
new.poc <- c(new.poc, parent[[activ]][activ.porc], porc[index+1])
}
if(length(activ.mut)>0){
new.mut <- c(new.mut, parent[[activ+2]][activ.mut])
}
start <- max(sum(parent[[activ]]<=porc[index]),0)
activ.origin <- start:(start+sum(activ.porc))
if(start==0){
activ.origin <- activ.origin[-1]
}
if(length(activ.origin)>0){
while(max(activ.origin,-1)>length(parent[[activ+4]])){
activ.origin <- activ.origin[-length(activ.origin)]
}
}
if(porc[index+1]>0 && save1){
new.origin <- c(new.origin, parent[[activ+4]][activ.origin])
}
activ <- 3 - activ
}
new.poc <- new.poc[-length(new.poc)]
n_mut <- stats::rbinom(1,n_snps, mutation.rate)
if(length(new.mut)==0){
n_remut <- 0
} else{
n_remut <- stats::rbinom(1, length(new.mut), remutation.rate)
}
if(n_mut==0){
mutationen <- integer(0)
} else{
mutationen <- sample(1:n_snps, n_mut)
}
if(length(new.mut)>0){
checker <- duplicated(c(new.mut, mutationen))
mutationen <- mutationen[(1:length(checker))[-(1:length(new.mut))]-length(new.mut)]
}
if(n_remut==0){
remutationen <- integer(0)
} else{
remutationen <- sample(1:length(new.mut), n_remut)
}
if(length(remutationen)==0){
new.mut <- sort(unique(c(mutationen, new.mut)))
} else{
new.mut <- c(mutationen, new.mut[-remutationen])
if(length(new.mut)>0){
new.mut <- sort(new.mut)
new.mut <- new.mut[!duplicated(new.mut)]
}
}
new.dups <- NULL
if(length(pod)>0){
new.dups <- cbind(0,pod.start, pod.end, info.parent[1],info.parent[2],info.parent[3], 0, 0)
for(index in 1:length(pod)){
position.options <- c(length.total[sum(length.total<pod.start[index])], pod.start[index], length.total[sum(length.total<pod.start[index])+ 1])
samp<- sample(1:3,1)
new.dups[index,1] <- position.options[samp]
activ.chromo <- (sum(porc <= pod.start[index])+1+ add.one[index])%%2 +1
new.dups[index,7] <- activ.chromo
new.dups[index,8] <- samp
}
}
new.dup <- rbind(new.dup, new.dups)
if(length(new.dup)>0){
order <- sort(new.dup[,1],index.return=TRUE)$ix
new.dup <- new.dup[order,]
}
new.origin_old <- new.origin
new.poc_old <- new.poc
if(delete.same.origin==TRUE && length(new.origin)>1){
for(index in length(new.origin):2){
check <- prod(new.origin[index] == new.origin[index-1])
if(check==TRUE){
new.origin <- new.origin[-index]
new.poc <- new.poc[-index]
}
}
}
if(gen.architecture!=0){
for(index in 1:length(new.poc)){
before <- find.snpbefore(new.poc[index], population$info$gen.architecture[[gen.architecture]]$snp.position)
if(before>0){
new_p_before <- population$info$snp.position[before]
p_before <- population$info$gen.architecture[[gen.architecture]]$snp.position[before]
} else{
new_p_before <- population$info$length.total[1]
p_before <-length.total[1]
}
if(before<n_snps){
new_p_after <- population$info$snp.position[before+1]
p_after <- population$info$gen.architecture[[gen.architecture]]$snp.position[before+1]
} else{
new_p_after <- population$info$length.total[length(population$info$length.total)]
p_after <- length.total[length(length.total)]
}
share <- (new.poc[index]-p_before) / (p_after-p_before)
new.poc[index] <- new_p_before + share * (new_p_after-new_p_before)
}
}
if(length(new.poc)!=(length(new.origin)+1)){
stop("recombination inconsistency!")
}
if(gene.editing==TRUE){
hap_sequence <- compute.snps_single(population, new.poc, new.mut, new.origin, decodeOriginsU=decodeOriginsU)
ed_info <- population$info$editing_info[[length(population$info$editing_info)]]
edits_p <- numeric(nr.edits)
edits <- 0
current_p <- 1
max_p <- sum(population$info$snp)
while(edits < nr.edits && current_p <= max_p){
if(hap_sequence[ed_info[current_p,1]] == ed_info[current_p,2]){
edits <- edits + 1
edits_p[edits] <- ed_info[current_p,1]
}
current_p <- current_p +1
}
changes <- edits_p
new.mut <- sort(c(new.mut, edits_p))
}
maxl <- max(length.total)
segment_length <- diff(c(0,porc[-unique(c(1,length(porc)))], maxl))
share_a <- sum(segment_length[(1:length(segment_length)%%2)==1])/maxl
return(list(new.poc, new.mut, new.origin, info.parent, new.dup, porc, share_a))
} |
make_mob_in = function(comm, plot_attr, coord_names = NULL, binary = FALSE,
latlong = FALSE) {
out = list(tests = list(N = TRUE, SAD = TRUE, agg = TRUE))
if (nrow(comm) < 5) {
warning("Number of plots in community is less than five therefore only individual rarefaction will be computed")
out$tests$N = FALSE
out$tests$agg = FALSE
}
if (nrow(comm) != nrow(plot_attr))
stop("Number of plots in community does not equal number of plots in plot attribute table")
if (is.null(coord_names) == FALSE) {
spat_cols = sapply(coord_names, function(x) which(x == names(plot_attr)))
if (length(spat_cols) == 1 & latlong == TRUE)
stop("Both latitude and longitude have to be specified")
}
if (any(row.names(comm) != row.names(plot_attr)))
warning("Row names of community and plot attributes tables do not match
which may indicate different identities or orderings of samples")
if (binary) {
warning("Only spatially-explicit sampled based forms of rarefaction can be computed on binary data")
out$tests$SAD = FALSE
out$tests$N = FALSE
}
else {
if (max(comm) == 1)
warning("Maximum abundance is 1 which suggests data is binary, change the binary argument to TRUE")
}
if (any(colSums(comm) == 0)) {
warning("Some species have zero occurrences and will be dropped from the community table")
comm = comm[ , colSums(comm) != 0]
}
out$comm = data.frame(comm)
if (is.null(coord_names) == FALSE) {
if (length(spat_cols) > 0) {
out$env = data.frame(plot_attr[ , -spat_cols])
colnames(out$env) = colnames(plot_attr)[-spat_cols]
out$spat = data.frame(plot_attr[ , spat_cols])
}
}
else {
warning("Note: 'coord_names' was not supplied and therefore spatial aggregation will not be examined in downstream analyses")
out$tests$agg = FALSE
out$env = data.frame(plot_attr)
out$spat = NULL
}
out$latlong = latlong
class(out) = 'mob_in'
return(out)
}
subset.mob_in = function(x, subset, type = 'string', drop_levels = FALSE, ...) {
if (missing(subset))
r <- rep_len(TRUE, nrow(x$comm))
if (type == 'integer')
r <- 1:nrow(x$comm) %in% subset
if (type == 'logical')
r <- subset
if (type == 'string') {
e <- substitute(subset)
r <- eval(e, x$env)
if (!is.logical(r))
stop("'subset' must be logical when type = 'string'")
}
x$comm = base::subset(x$comm, r)
x$env = base::subset(x$env, r)
if (drop_levels)
x$env = droplevels(x$env)
if (!is.null(x$spat))
x$spat = x$spat[r, ]
return(x)
}
print.mob_in = function(x, nrows = 6, nsp = 5, ...) {
if (nrow(x$comm) > nrows)
cat(paste('Only the first', nrows, 'rows of any matrices are printed\n'))
else
nrows = nrow(x$comm)
cat('\n$tests\n')
print(x$tests)
if (ncol(x$comm) > nsp)
cat(paste('\n$comm (Only first', nsp, 'species columns are printed)\n'))
else
nsp = ncol(x$comm)
print(x$comm[1:nrows, 1:nsp])
cat('\n$env\n')
print(utils::head(x$env, nrows))
cat('\n$spat\n')
print(head(x$spat, nrows))
cat('\n$latlong\n')
print(x$latlong)
}
sphere_dist = function(coords, r = 6378.137){
coslat1 <- cos((coords[ , 2] * pi) / 180)
sinlat1 <- sin((coords[ , 2] * pi) / 180)
coslon1 <- cos((coords[ , 1] * pi) / 180)
sinlon1 <- sin((coords[ , 1] * pi) / 180)
pp <- cbind(coslat1 * coslon1, coslat1 * sinlon1, sinlat1) %*%
t(cbind(coslat1 * coslon1, coslat1 * sinlon1, sinlat1))
return(r * acos(ifelse(abs(pp) > 1, 1 * sign(pp), pp)))
}
rarefaction = function(x, method, effort=NULL, coords=NULL, latlong=NULL,
dens_ratio=1, extrapolate=FALSE, return_NA=FALSE,
quiet_mode=FALSE, spat_algo=NULL) {
if (method == 'indiv') {
warning('method == "indiv" is depreciated and should be set to "IBR" for individual-based rarefaction')
method = 'IBR'
} else if (method == 'samp') {
warning('method == "samp" is depreciated and should be set to "SBR" for sample-based rarefaction')
method = 'SBR'
} else if (method == 'spat') {
warning('method == "spat" is depreciated and should be set to "sSBR" for spatial, sample-based rarefaction')
method = 'sSBR'
} else if (!any(method %in% c('IBR', 'SBR', 'nsSBR', 'sSBR')))
stop('The argument "method" must be set to either "IBR", "SBR", "nsSBR",',
' or "sSBR" for random individual, random sample, non-spatial,',
' sample-based (nsSBR), and spatial, sample-based rarefaction (sSBR)',
' respectively.')
if (method == 'nsSBR' & dens_ratio == 1)
warning('The nonspatial, sample-based rarefaction (nsSBR) curve only differs from the IBR when compared with a reference density by setting "dens_ratio" not equal to 1')
if ('mob_in' %in% class(x)) {
x_mob_in = x
x = x_mob_in$comm
if (is.null(latlong))
latlong = x_mob_in$latlong
else if (latlong != x_mob_in$latlong)
stop(paste('The "latlong" argument is set to', latlong,
'but the value of x$latlong is', x_mob_in$latlong))
if (method == 'sSBR') {
if (is.null(coords)) {
if (is.null(x_mob_in$spat)) {
stop('Coordinate name value(s) must be supplied in the make_mob_in object in order to plot using sample spatially explicit based (spat) rarefaction')
}
coords = x_mob_in$spat
}
}
}
if (method == 'SBR' | method == 'sSBR') {
if (is.null(dim(x)))
stop('For random or spatially explicit sample based rarefaction "x" must be a site x species matrix as the input')
else {
x = (x > 0) * 1
n = nrow(x)
if (method == 'SBR')
x = colSums(x)
}
} else if (!is.null(spat_algo))
warning("Setting spat_algo to a non-NULL value only has consequences when method = sSBR")
if (method == 'IBR' | method == 'nsSBR') {
if (!is.null(dim(x)))
x = colSums(x)
n = sum(x)
}
if (is.null(effort))
if (n == 0)
effort = 0
else
effort = 1:n
if (any(effort > n)) {
if (extrapolate & return_NA)
stop('It does not make sense to set "extrapolate" and "return_NA" to both be TRUE, see documentation')
if (!quiet_mode) {
warning_mess = paste('"effort" larger than total number of',
ifelse(method == 'IBR', 'individuals', 'samples'),
'returning')
if (extrapolate)
warning(paste(warning_mess, 'extrapolated S using Chao1'))
else if (return_NA)
warning(paste(warning_mess, 'NA'))
else
warning(paste(warning_mess, 'S'))
}
} else if (extrapolate)
if (!quiet_mode)
message('Richness was not extrapolated because effort less than or equal to the number of samples')
if (method == 'sSBR') {
if (is.null(spat_algo))
spat_algo = 'kNN'
if (spat_algo == 'kNN') {
explicit_loop = matrix(0, n, n)
if (is.null(latlong))
stop('For spatial rarefaction the argument "latlong" must be set TRUE or FALSE')
if (latlong) {
pair_dist = sphere_dist(coords)
} else {
pair_dist = as.matrix(dist(coords))
}
for (i in 1:n) {
dist_to_site = pair_dist[i, ]
new_order = sample(1:n)
dist_new = dist_to_site[new_order]
new_order = new_order[order(dist_new)]
new_order = c(i, new_order[new_order != i])
comm_ordered = x[new_order, ]
comm_bool = as.data.frame((comm_ordered == 0) * 1)
rich = cumprod(comm_bool)
explicit_loop[ , i] = as.numeric(ncol(x) - rowSums(rich))
}
out = apply(explicit_loop, 1, mean)[effort]
}
else if (spat_algo == "kNCN")
out = kNCN_average(x=x, coords=coords, latlong=latlong)[effort]
}
else {
x = x[x > 0]
S = length(x)
if (dens_ratio == 1) {
ldiv = lchoose(n, effort)
} else {
effort = effort[effort / dens_ratio <= n]
ldiv = lgamma(n - effort / dens_ratio + 1) - lgamma(n + 1)
}
p = matrix(0, sum(effort <= n), S)
out = rep(NA, length(effort))
S_ext = NULL
for (i in seq_along(effort)) {
if (effort[i] <= n) {
if (dens_ratio == 1) {
p[i, ] = ifelse(n - x < effort[i], 0,
exp(lchoose(n - x, effort[i]) - ldiv[i]))
} else {
p[i, ] = ifelse(n - x < effort[i] / dens_ratio, 0,
exp(suppressWarnings(lgamma(n - x + 1)) -
suppressWarnings(lgamma(n - x - effort[i] /
dens_ratio + 1)) +
ldiv[i]))
}
} else if (extrapolate) {
f1 = sum(x == 1)
f2 = sum(x == 2)
f0_hat <- ifelse(f2 == 0,
(n - 1) / n * f1 * (f1 - 1) / 2,
(n - 1) / n * f1^2 / 2 / f2)
A = n * f0_hat / (n * f0_hat + f1)
S_ext = c(S_ext, ifelse(f1 == 0, S,
S + f0_hat * (1 - A ^ (effort[i] - n))))
}
else if (return_NA)
S_ext = c(S_ext, NA)
else
S_ext = c(S_ext, S)
}
out = rep(NA, length(effort))
out[effort <= n] = rowSums(1 - p)
out[effort > n] = S_ext
}
names(out) = effort
return(out)
}
ind_rare_perm = function(abu, n_perm = 100, n_indiv = NULL) {
if (!is.vector(abu)) {
stop('abu must be a vector of abundances')
}
calc_S = function(splist, n_indiv) {
sapply(n_indiv, function(n) length(unique(splist[1:n])))
}
rand_splist = function(abu, S) {
sample(unlist(mapply(rep, 1:S, abu)), replace = FALSE)
}
S = length(abu)
N = sum(abu)
if (is.null(n_indiv))
n_indiv = c(2^(seq(0, log2(N))), N)
S_rand = replicate(n_perm, calc_S(rand_splist(abu, S), n_indiv))
S_avg = apply(S_rand, 1, mean)
S_qt = apply(S_rand, 1, quantile, c(0.025, 0.975))
return(data.frame(n_indiv, S_avg, S_lo = S_qt[1, ], S_hi = S_qt[2, ]))
}
avg_nn_dist = function(coords) {
pair_dist = as.matrix(stats::dist(coords))
sort_dist = apply(pair_dist, 1, sort)
avg_dist = apply(sort_dist, 1, mean)
return(avg_dist)
}
get_delta_curves = function(x, tests=c('SAD', 'N', 'agg'), spat_algo=NULL,
inds=NULL, ind_dens=NULL, n_plots=NULL) {
if (is.null(inds) & any(c('SAD', 'N') %in% tests))
stop('If SAD or N effect to be calculated inds must be specified')
if (is.null(ind_dens) & 'N' %in% tests)
stop('If N effect to be calculated ind_dens must be specified')
if (any(c('N', 'agg') %in% tests) & !('mob_in' %in% class(x)))
stop('If N or agg effects to be computed x must be a mob_in object')
out = list()
if ('SAD' %in% tests) {
S_SAD = rarefaction(x, 'IBR', inds)
out$SAD = data.frame(test = 'SAD', sample = 'indiv',
effort = inds, S = S_SAD, effect = S_SAD,
stringsAsFactors = FALSE)
}
if ('N' %in% tests) {
comm_dens = sum(x$comm) / nrow(x$comm)
dens_ratio = ind_dens / comm_dens
S_N = rarefaction(x, 'IBR', inds, dens_ratio = dens_ratio)
if (!('SAD' %in% tests))
S_SAD = rarefaction(x, 'IBR', inds)
effect = S_N - S_SAD
out$N = data.frame(test = 'N', sample = 'indiv',
effort = inds, S = S_N, effect,
stringsAsFactors = FALSE)
}
if ('agg' %in% tests) {
if (is.null(n_plots))
n_plots = nrow(x$comm)
S_agg = rarefaction(x, 'sSBR', 1:n_plots, spat_algo = spat_algo)
ind_density = sum(x$comm) / nrow(x$comm)
samp_effort = round(1:n_plots * ind_density)
S_N = rarefaction(x, 'IBR', samp_effort)
effect = S_agg - S_N
out$agg = data.frame(test = 'agg', sample = 'plot',
effort = as.numeric(names(S_agg)),
S = S_agg, effect,
stringsAsFactors = FALSE)
}
return(flatten_dfr(tibble(out)))
}
get_rand_sad = function(rad, N) {
rand_samp = sample(1:length(rad), N, replace = T, prob = rad)
rand_sad = table(factor(rand_samp, levels = 1:length(rad)))
return(as.numeric(rand_sad))
}
get_null_comm = function(comm, null_model, groups = NULL) {
if (!(is.matrix(comm) | is.data.frame(comm)))
stop('comm must be a matrix or data.frame')
if (is.null(groups))
groups = rep(1, nrow(comm))
N_plots = rowSums(comm)
if (null_model == "rand_SAD") {
rad_pool = colSums(comm) / sum(comm)
null_sads = map(N_plots, ~ get_rand_sad(rad_pool, .x))
names(null_sads) = 1:length(null_sads)
} else if (null_model == "rand_N" | null_model == "rand_agg") {
if (null_model == "rand_N")
N_plots = sample(N_plots)
.x <- NULL
rad_groups = data.frame(comm, groups) %>%
group_by(groups) %>%
summarize_all(sum) %>%
select(-one_of("groups")) %>% t %>%
as_tibble(.x, .name_repair = ~ vctrs::vec_as_names(..., quiet = TRUE)) %>%
map(~ .x / sum(.x))
rad_plots = rep(rad_groups, table(groups))
names(rad_plots) = 1:length(rad_plots)
null_sads = map2(rad_plots, N_plots, get_rand_sad)
}
null_comm = null_sads %>% tibble %>% flatten_dfr %>% t
return(null_comm)
}
get_inds = function(N_max, inds = NULL, log_scale = FALSE) {
if (is.null(inds)) {
if (log_scale)
ind_sample_size = unique(c(2^seq(0, floor(log2(N_max))), N_max))
else
ind_sample_size = seq(N_max)
}
if (length(inds) == 1) {
if (log_scale)
ind_sample_size = floor(exp(seq(inds) * log2(N_max) / inds))
else
ind_sample_size = floor(seq(1, N_max, length.out = inds))
}
if (length(inds) > 1) {
if (max(inds) > N_max)
warning(paste('Sample size is higher than abundance of at least one group, only n up to',
N_max, 'will be used'))
ind_sample_size = inds
}
ind_sample_size = unique(c(ind_sample_size[ind_sample_size < N_max], N_max))
ind_sample_size = unique(c(1, ind_sample_size))
return(ind_sample_size)
}
get_ind_dens = function(comm, density_stat){
if (density_stat == 'mean') {
ind_dens = sum(comm) / nrow(comm)
} else if (density_stat == 'max') {
ind_dens = max(rowSums(comm))
} else {
ind_dens = min(rowSums(comm))
}
return(ind_dens)
}
get_overall_p = function(effort, perm, value){
delta_effort = c(effort[1], diff(effort))[perm == 0]
Hbarbar = tapply(value, effort, mean)
m = max(as.numeric(perm))
a = ((m + 1) / m)^2
u = tapply(value, perm, function(x)
a * sum((x - Hbarbar)^2 * delta_effort))
overall_p = sum(u >= u[1]) / (m + 1)
return(overall_p)
}
mod_sum = function(x, stats = c('betas', 'r', 'r2', 'r2adj', 'f', 'p')) {
summary_lm = summary(x)
out = list()
if ('betas' %in% stats)
out$betas = coef(x)
if ('r' %in% stats) {
betas = coef(x)
out$r = sqrt(summary_lm$r.squared) *
ifelse(betas[2] < 0, -1, 1)
}
if ('r2' %in% stats)
out$r2 = summary_lm$r.squared
if ('r2adj' %in% stats)
out$r2adj = summary_lm$adj.r.squared
if ('f' %in% stats)
out$f = summary_lm$fstatistic[1]
if ('p' %in% stats) {
f = summary_lm$fstatistic
out$p = unname(stats::pf(f[1], f[2], f[3], lower.tail = FALSE))
}
if ('betas' %in% stats)
coef_type = c(paste0('b', 0:(length(out$betas) - 1)),
stats[stats != 'betas'])
else
coef_type = stats
out = data.frame(coef_type, unlist(out))
names(out) = c('index', 'value')
row.names(out) = NULL
out
}
get_results = function(mob_in, env, groups, tests, inds, ind_dens, n_plots, type,
stats=NULL, spat_algo=NULL) {
group_levels = unique(groups)
group_rows = map(group_levels, ~ which(groups == .x))
mob_in_groups = map(group_rows, ~ subset(mob_in, .x, type = 'integer'))
names(mob_in_groups) = group_levels
S_df = map_dfr(mob_in_groups, get_delta_curves, tests, spat_algo,
inds, ind_dens, n_plots, .id = "group")
S_df = S_df %>% try(mutate_if(is.factor, as.character), silent = TRUE)
S_df = data.frame(env = env[match(S_df$group, groups)],
S_df)
S_df = tibble::as_tibble(S_df, .name_repair = 'minimal')
delta_mod = function(df) {
stats::lm(effect ~ env, data = df)
}
if (is.null(stats)) {
if (type == 'discrete')
stats = 'betas'
else
stats = c('betas', 'r', 'r2', 'r2adj', 'f')
}
mod_df = S_df %>%
group_by(.data$test, sample, .data$effort) %>%
nest() %>%
mutate(fit = map(.data$data, delta_mod)) %>%
mutate(sum = map(.data$fit, mod_sum, stats)) %>%
select(.data$test, sample, .data$effort, sum) %>%
unnest(sum) %>%
ungroup() %>%
try(mutate_if(is.factor, as.character), silent = TRUE)
return(list(S_df = S_df, mod_df = mod_df))
}
run_null_models = function(mob_in, env, groups, tests, inds, ind_dens, n_plots, type,
stats, spat_algo, n_perm, overall_p) {
if (overall_p)
p_val = vector('list', length(tests))
for (k in seq_along(tests)) {
null_results = vector('list', length = n_perm)
cat(paste('\nComputing null model for', tests[k], 'effect\n'))
pb <- txtProgressBar(min = 0, max = n_perm, style = 3)
for (i in 1:n_perm) {
null_mob_in = mob_in
null_mob_in$comm = get_null_comm(mob_in$comm, paste0('rand_', tests[k]),
groups)
null_results[[i]] = get_results(null_mob_in, env, groups, tests[k], inds,
ind_dens, n_plots, type, stats, spat_algo)
setTxtProgressBar(pb, i)
}
close(pb)
null_results = transpose(null_results)
null_df = map(null_results, function(x)
flatten_dfr(tibble(x), .id = "perm"))
null_qt = list()
null_qt$S_df = null_df$S_df %>%
group_by(env, .data$test, sample, .data$effort) %>%
summarize(low_effect = quantile(.data$effect, 0.025, na.rm = TRUE),
med_effect = quantile(.data$effect, 0.5, na.rm = TRUE),
high_effect = quantile(.data$effect, 0.975, na.rm = TRUE))
null_qt$mod_df = null_df$mod_df %>%
group_by(.data$test, sample, .data$effort, .data$index) %>%
summarize(low_value = quantile(.data$value, 0.025, na.rm = TRUE),
med_value = quantile(.data$value, 0.5, na.rm = TRUE),
high_value = quantile(.data$value, 0.975, na.rm = TRUE))
if (k == 1)
out = null_qt
else
out = map2(out, null_qt, rbind)
if (overall_p) {
obs_df = get_results(mob_in, env, groups, tests[k], inds, ind_dens,
n_plots, type, stats, spat_algo)
obs_df = map(obs_df, function(x) data.frame(perm = 0, x))
null_df = map2(obs_df, null_df, rbind)
p_val[[k]] = list(effect_p = null_df$S_df %>%
group_by(.data$test, .data$group) %>%
summarize(p = get_overall_p(.data$effort, .data$perm, .data$effect)),
mod_p = null_df$mod_df %>%
subset(!is.na(.data$value)) %>%
group_by(.data$test, .data$index) %>%
summarize(p = get_overall_p(.data$effort, .data$perm, .data$value)))
}
}
if (overall_p)
attr(out, "p") = map(transpose(p_val), bind_rows)
return(out)
}
get_delta_stats = function(mob_in, env_var, group_var=NULL, ref_level = NULL,
tests = c('SAD', 'N', 'agg'), spat_algo = NULL,
type = c('continuous', 'discrete'),
stats = NULL, inds = NULL,
log_scale = FALSE, min_plots = NULL,
density_stat = c('mean', 'max', 'min'),
n_perm=1000, overall_p = FALSE) {
if (class(mob_in) != "mob_in")
stop('mob_in must be output of function make_mob_in (i.e., of class mob_in')
if (!(env_var %in% names(mob_in$env)))
stop(paste(env_var, ' is not one of the columns in mob_in$env.'))
if (!is.null(group_var))
if (!(group_var %in% names(mob_in$env)))
stop(paste(group_var, ' is not one of the columns in mob_in$env.'))
tests = match.arg(tests, several.ok = TRUE)
test_status = tests %in% names(unlist(mob_in$tests))
approved_tests = tests[test_status]
if (length(approved_tests) < length(tests)) {
tests_string = paste(approved_tests, collapse = ' and ')
warning(paste('Based upon the attributes of the community object only the following tests will be performed:',
tests_string))
tests = approved_tests
}
type = match.arg(type)
density_stat = match.arg(density_stat)
env = mob_in$env[ , env_var]
if (is.null(group_var))
groups = env
else {
groups = mob_in$env[ , group_var]
if (any(tapply(env, groups, stats::sd) > 0)) {
message("Computed average environmental value for each group")
env = tapply(env, groups, mean)
}
}
if (type == 'discrete') {
if (class(env) != 'factor') {
warning(paste("Converting", env_var, "to a factor with the default contrasts because the argument type = 'discrete'."))
env = as.factor(env)
}
if (!is.null(ref_level)) {
env_levels = levels(env)
if (ref_level %in% env_levels) {
if (env_levels[1] != ref_level)
env = factor(env, levels = c(ref_level, env_levels[env_levels != ref_level]))
} else
stop(paste(ref_level, "is not in", env_var))
}
} else if (type == 'continuous') {
if (!is.numeric(env)) {
warning(paste("Converting", env_var, "to numeric because the argument type = 'continuous'"))
env = as.numeric(as.character(env))
}
if (!is.null(ref_level))
stop('Defining a reference level (i.e., ref_level) only makes sense when doing a discrete analysis (i.e., type = "discrete")')
}
N_max = min(tapply(rowSums(mob_in$comm), groups, sum))
inds = get_inds(N_max, inds, log_scale)
ind_dens = get_ind_dens(mob_in$comm, density_stat)
n_plots = min(tapply(mob_in$comm[ , 1], groups, length))
out = list()
out$env_var = env_var
if (!is.null(group_var))
out$group_var = group_var
out$type = type
out$tests = tests
out$log_scale = log_scale
out$density_stat = list(density_stat = density_stat,
ind_dens = ind_dens)
out = append(out,
get_results(mob_in, env, groups, tests, inds, ind_dens, n_plots,
type, stats, spat_algo))
null_results = run_null_models(mob_in, env, groups, tests, inds, ind_dens,
n_plots, type, stats, spat_algo,
n_perm, overall_p)
out$S_df = left_join(out$S_df, null_results$S_df,
by = c("env", "test", "sample", "effort"))
out$mod_df = left_join(out$mod_df, null_results$mod_df,
by = c("test", "sample", "effort", "index"))
if (overall_p)
out$p = attr(null_results, "p")
class(out) = 'mob_out'
return(out)
}
plot_abu = function(mob_in, group_var, ref_level = NULL, type=c('sad', 'rad'),
pooled=FALSE, col=NULL, lwd=3, log='', leg_loc = 'topleft') {
groups = factor(mob_in$env[ , group_var])
group_levels = levels(groups)
if (!is.null(ref_level)) {
if (ref_level %in% group_levels) {
if (group_levels[1] != ref_level)
groups = factor(groups, levels = c(ref_level, group_levels[group_levels != ref_level]))
group_levels = levels(groups)
} else
stop(paste(ref_level, "is not in", group_var))
}
if (is.null(col))
col = c("
"
else if (length(col) != length(group_levels))
stop('Length of col vector must match the number of unique groups')
title = ifelse(pooled, 'Group Scale', 'Sample Scale')
if ('sad' == type) {
plot(1, type = "n", xlab = "% abundance", ylab = "% species",
xlim = c(0.01, 1), ylim = c(0.01, 1), log = log, main = title)
for (i in 1:length(group_levels)) {
col_grp = col[i]
comm_grp = mob_in$comm[groups == group_levels[i], ]
comm_grp = comm_grp[rowSums(comm_grp) > 0, ]
if (pooled) {
sad_grp = colSums(comm_grp)
sad_sort = sort(sad_grp[sad_grp != 0])
s_cul = 1:length(sad_sort) / length(sad_sort)
n_cul = sapply(1:length(sad_sort), function(x)
sum(sad_sort[1:x]) / sum(sad_sort))
lines(n_cul, s_cul, col = col_grp, lwd = lwd, type = "l")
} else {
for (j in 1:nrow(comm_grp)) {
sad_sort = sort(as.numeric(comm_grp[j, comm_grp[j, ] != 0]))
s_cul = 1:length(sad_sort) / length(sad_sort)
n_cul = sapply(1:length(sad_sort), function(x)
sum(sad_sort[1:x]) / sum(sad_sort))
lines(n_cul, s_cul, col = scales::alpha(col_grp, 0.5),
lwd = lwd, type = "l")
}
}
}
}
if ('rad' == type) {
plot(1:10, 1:10, type = 'n', xlab = 'rank', ylab = 'abundance',
log = log, xlim = c(1, ncol(mob_in$comm)),
ylim = range(0.01, 1), cex.lab = 1.5, cex.axis = 1.5,
main = title)
for (i in 1:length(group_levels)) {
col_grp = col[i]
comm_grp = mob_in$comm[groups == group_levels[i], ]
comm_grp = comm_grp[rowSums(comm_grp) > 0, ]
if (pooled) {
sad_grp = colSums(comm_grp)
sad_sort = sort(sad_grp[sad_grp != 0], decreasing = TRUE)
lines(sad_sort / sum(sad_sort), col = col_grp, lwd = lwd,
type = "l")
} else {
for (j in 1:nrow(comm_grp)) {
sad_sort = sort(as.numeric(comm_grp[j, comm_grp[j, ] != 0], decreasing = TRUE))
lines(1:length(sad_sort), sad_sort / sum(sad_sort),
col = scales::alpha(col_grp, 0.5),
lwd = lwd, type = "l")
}
}
}
}
if (!is.na(leg_loc))
legend(leg_loc, legend = group_levels, col = col, lwd = lwd, bty = 'n')
}
plot_rarefaction = function(mob_in, group_var, ref_level = NULL,
method, dens_ratio = 1, pooled = TRUE,
spat_algo = NULL, col = NULL, lwd = 3, log = '',
leg_loc = 'topleft', ...) {
if (pooled == FALSE & method != 'IBR')
stop('Samples can only not be pooled at the treatment level when individual-based rarefaction is used (i.e., method="IBR")')
groups = factor(mob_in$env[ , group_var])
group_levels = levels(groups)
if (!is.null(ref_level)) {
if (ref_level %in% group_levels) {
if (group_levels[1] != ref_level)
groups = factor(groups, levels = c(ref_level, group_levels[group_levels != ref_level]))
group_levels = levels(groups)
} else
stop(paste(ref_level, "is not in", group_var))
}
if (is.null(col))
col = c("
"
else if (length(col) != length(group_levels))
stop('Length of col vector must match the number of unique groups')
if (method == 'indiv')
xlab = 'Number of individuals'
else
xlab = 'Number of samples'
if (pooled) {
Srare = lapply(group_levels, function(x)
rarefaction(subset(mob_in, groups == x, 'logical'),
method, spat_algo = spat_algo, ...))
xlim = c(1, max(unlist(sapply(Srare, function(x) as.numeric(names(x))))))
ylim = c(1, max(unlist(Srare)))
n = as.numeric(names(Srare[[1]]))
plot(n, Srare[[1]], type = "n", main = "Group scale",
xlab = xlab, ylab = "Species richness",
xlim = xlim, ylim = ylim, log = log)
for (i in seq_along(group_levels)) {
col_grp = col[i]
n = as.numeric(names(Srare[[i]]))
lines(n, Srare[[i]], col = col_grp, lwd = lwd, type = "l")
}
} else {
Srare = lapply(group_levels, function(x)
apply(mob_in$comm[groups == x, ], 1,
function(y) rarefaction(y, method, ...)))
xlim = c(1, max(unlist(lapply(Srare, function(x)
lapply(x, function(y)
as.numeric(names(y)))))))
ylim = c(1, max(unlist(Srare)))
n = as.numeric(names(Srare[[1]][[1]]))
plot(n, Srare[[1]][[1]], type = "n", main = "Sample scale",
xlab = xlab, ylab = "Species richness",
xlim = xlim, ylim = ylim, log = log)
for (i in seq_along(group_levels)) {
col_grp = col[i]
for (j in seq_along(Srare[[i]])) {
n = as.numeric(names(Srare[[i]][[j]]))
if (n[1] > 0)
lines(n, Srare[[i]][[j]], col = scales::alpha(col_grp, 0.5),
lwd = lwd, type = 'l')
}
}
}
if (!is.na(leg_loc))
legend(leg_loc, legend = group_levels, col = col, lwd = lwd, bty = 'n')
}
plot.mob_out = function(x, stat = 'b1', log2 = '', scale_by = NULL,
display = c('S ~ effort', 'effect ~ grad', 'stat ~ effort'),
eff_sub_effort = TRUE, eff_log_base = 2,
eff_disp_pts = TRUE, eff_disp_smooth = FALSE, ...) {
oldpar <- par(no.readonly = TRUE)
on.exit(par(oldpar))
if (x$type == 'discrete') {
if (stat != 'b1')
warning('The only statistic that has a reasonable interpretation for a discrete explanatory variable is the difference in the group means from the reference group (i.e., set stat = "b1")')
}
if (!is.null(scale_by)) {
if (scale_by == 'indiv') {
x$S_df = mutate(x$S_df,
effort = ifelse(sample == 'plot',
round(.data$effort * x$density_stat$ind_dens),
.data$effort))
x$mod_df = mutate(x$mod_df,
effort = ifelse(sample == 'plot',
round(.data$effort * x$density_stat$ind_dens),
.data$effort))
}
if (scale_by == 'plot') {
x$S_df = mutate(x$S_df,
effort = ifelse(sample == 'indiv',
round(.data$effort / x$density_stat$ind_dens),
.data$effort))
x$mod_df = mutate(x$mod_df,
effort = ifelse(sample == 'indiv',
round(.data$effort / x$density_stat$ind_dens),
.data$effort))
}
}
p_list = vector('list', 4)
if ('S ~ grad' %in% display) {
facet_labs = c(`agg` = 'sSBR',
`N` = 'nsSBR',
`SAD` = 'IBR')
p_list[[1]] = ggplot(x$S_df, aes(.data$env, .data$S)) +
geom_smooth(aes(group = .data$effort, color = .data$effort),
method = 'lm', se = FALSE) +
labs(x = x$env_var) +
facet_wrap(. ~ test, scales = "free",
labeller = as_labeller(facet_labs))
}
if ('S ~ effort' %in% display) {
facet_labs = c(`agg` = 'sSBR',
`N` = 'nsSBR',
`SAD` = 'IBR')
p_list[[2]] = ggplot(x$S_df, aes(.data$effort, .data$S)) +
geom_line(aes(group = .data$group, color = .data$env)) +
facet_wrap(. ~ test, scales = "free",
labeller = as_labeller(facet_labs)) +
labs(y = expression("richness (" *
italic(S) * ")"),
color = x$env_var)
}
if ('effect ~ grad' %in% display) {
efforts = sort(unique(x$S_df$effort))
if (is.logical(eff_sub_effort)) {
if (eff_sub_effort) {
effort_r = floor(log(range(efforts), eff_log_base))
effort_2 = eff_log_base^(effort_r[1]:effort_r[2])
effort_2 = effort_2[effort_2 > 1]
eff_d = as.matrix(stats::dist(c(efforts, effort_2)))
eff_d = eff_d[-((length(efforts) + 1):ncol(eff_d)),
-(1:length(efforts))]
min_index = apply(eff_d, 2, function(x) which(x == min(x))[1])
sub_effort = efforts[min_index]
message(paste("Effect size shown at the following efforts:",
paste(sub_effort, collapse = ', ')))
}
else
sub_effort = efforts
} else if (!is.null(eff_sub_effort))
sub_effort = eff_sub_effort
if (x$type == "continuous")
x$S_df = x$S_df %>%
group_by(.data$test, .data$effort) %>%
mutate(low_effect = predict(loess(low_effect ~ .data$env), .data$env)) %>%
mutate(high_effect = predict(loess(high_effect ~ .data$env), .data$env))
p_list[[3]] = ggplot(subset(x$S_df, x$S_df$effort %in% sub_effort),
aes(.data$env, .data$effect)) +
geom_hline(yintercept = 0, linetype = 'dashed') +
labs(x = x$env_var) +
facet_wrap(. ~ test, scales = "free_y") +
labs(y = expression('effect (' * italic(S) * ')')) +
scale_fill_manual(name = element_blank(),
values = c(null = 'grey40')) +
scale_colour_gradient2(trans=scales::log2_trans(),
low = rgb(248, 203, 173, maxColorValue = 255),
mid = rgb(237,127, 52, maxColorValue = 255),
high = rgb(165, 0 , 33, maxColorValue = 255),
midpoint = 4)
if (eff_disp_pts)
p_list[[3]] = p_list[[3]] + geom_point(aes(group = .data$effort,
color = .data$effort))
if (eff_disp_smooth)
p_list[[3]] = p_list[[3]] + geom_smooth(aes(group = .data$effort,
color = .data$effort),
method = lm, se = FALSE)
}
if ('stat ~ effort' %in% display) {
if (stat == 'b0')
ylab = expression('intercept (' * italic(beta)[0] * ')')
if (stat == 'b1')
ylab = expression('slope (' * italic(beta)[1] * ')')
if (stat == 'r2')
ylab = expression(italic(R^2))
if (stat == 'r')
ylab = expression(italic(r))
if (stat == 'f')
ylab = expression(italic(F))
p_list[[4]] = ggplot(subset(x$mod_df, x$mod_df$index == stat),
aes(.data$effort, .data$value)) +
geom_ribbon(aes(ymin = .data$low_value,
ymax = .data$high_value, fill = 'null'),
alpha = 0.25) +
geom_line(aes(group = .data$index, color = 'observed')) +
geom_hline(yintercept = 0, linetype = 'dashed') +
facet_wrap(. ~ test, scales = "free_x") +
labs(y = ylab) +
scale_color_manual(name = element_blank(),
values = c(observed = 'red')) +
scale_fill_manual(name = element_blank(),
values = c(null = 'grey40'))
}
if (!is.null(scale_by)) {
scale_by = ifelse(scale_by == 'indiv', '
if (!is.null(p_list[[1]]))
p_list[[1]] = p_list[[1]] + labs(color = scale_by)
if (!is.null(p_list[[3]]))
p_list[[3]] = p_list[[3]] + labs(color = scale_by)
if (!is.null(p_list[[2]]))
p_list[[2]] = p_list[[2]] + labs(x = scale_by)
if (!is.null(p_list[[4]]))
p_list[[4]] = p_list[[4]] + labs(x = scale_by)
}
if (grepl('x', log2)) {
if (!is.null(p_list[[2]]))
p_list[[2]] = p_list[[2]] + scale_x_continuous(trans = 'log2')
if (!is.null(p_list[[4]]))
p_list[[4]] = p_list[[4]] + scale_x_continuous(trans = 'log2')
}
if (grepl('y', log2)) {
if (!is.null(p_list[[2]]))
p_list[[2]] = p_list[[2]] + scale_y_continuous(trans = 'log2')
}
p_list = Filter(Negate(is.null), p_list)
egg::ggarrange(plots = p_list)
}
plot_N = function(comm, n_perm=1000) {
N = rowSums(comm)
ind_dens = mean(N)
N_sum = apply(replicate(n_perm, cumsum(sample(N))), 1, mean)
plot(N_sum, xlab = 'Number of plots', ylab = 'Number of Individuals')
abline(a = 0, b = ind_dens, col = 'red')
legend('topleft', 'Expected line', lty = 1, bty = 'n', col = 'red')
}
plotStacked <- function(
x, y,
order.method="as.is",
ylab="", xlab="",
border = NULL, lwd=1,
col=rainbow(length(y[1,])),
ylim=NULL,
...
){
oldpar <- par(no.readonly = TRUE)
on.exit(par(oldpar))
if (sum(y < 0) > 0) stop("y cannot contain negative numbers")
if (is.null(border)) border <- par("fg")
border <- as.vector(matrix(border, nrow = ncol(y), ncol = 1))
col <- as.vector(matrix(col, nrow = ncol(y), ncol = 1))
lwd <- as.vector(matrix(lwd, nrow = ncol(y), ncol = 1))
if (is.null(ylim)) ylim = c(0, 1.2 * max(apply(y, 1, sum)))
if (order.method == "max") {
ord <- order(apply(y, 2, which.max))
y <- y[, ord]
col <- col[ord]
border <- border[ord]
}
if (order.method == "first") {
ord <- order(apply(y, 2, function(x) min(which(x > 0))))
y <- y[ , ord]
col <- col[ord]
border <- border[ord]
}
top.old <- x*0
polys <- vector(mode = "list", ncol(y))
for (i in seq(polys)) {
top.new <- top.old + y[,i]
polys[[i]] <- list(x = c(x, rev(x)), y = c(top.old, rev(top.new)))
top.old <- top.new
}
if (is.null(ylim))
ylim <- range(sapply(polys, function(x) range(x$y, na.rm = TRUE)), na.rm = TRUE)
plot(x, y[ , 1], ylab = ylab, xlab = xlab, ylim = ylim, t = "n", ...)
for (i in seq(polys)) {
polygon(polys[[i]], border = border[i], col = col[i], lwd = lwd[i])
}
} |
require(LiblineaR)
require(SwarmSVM)
context("clusterSVM")
data(svmguide1)
svmguide1.t = svmguide1[[2]]
svmguide1 = svmguide1[[1]]
data(iris)
test_that("Error Trigger",{
expect_error({csvm.obj = clusterSVM(x = svmguide1[,-1], y = svmguide1[,1], lambda = 0,
centers = 8, seed = 512, verbose = 0,
valid.x = svmguide1.t[,-1],valid.y = svmguide1.t[,1])
})
expect_error({csvm.obj = clusterSVM(x = svmguide1[,-1], y = svmguide1[,1], lambda = 1,
centers = 8, seed = 512, verbose = 0, type = 11,
valid.x = svmguide1.t[,-1],valid.y = svmguide1.t[,1])
})
expect_error({csvm.obj = clusterSVM(x = svmguide1[,-1], y = svmguide1[,1], lambda = 1,
centers = 8, seed = 512, verbose = 0, cost = -1,
valid.x = svmguide1.t[,-1],valid.y = svmguide1.t[,1])
})
expect_error({csvm.obj = clusterSVM(x = svmguide1[,-1], y = svmguide1[,1], lambda = 1,
centers = 8, seed = 512, verbose = 0, epsilon = -1,
valid.x = svmguide1.t[,-1],valid.y = svmguide1.t[,1])
})
expect_error({csvm.obj = clusterSVM(x = svmguide1[,-1], y = svmguide1[,1], lambda = 1,
centers = 8, seed = 512, verbose = 0, verbose = 10,
valid.x = svmguide1.t[,-1],valid.y = svmguide1.t[,1])
})
csvm.obj = clusterSVM(x = svmguide1[,-1], y = svmguide1[,1], lambda = 1,
centers = 8, seed = 512, verbose = 0,
valid.x = svmguide1.t[,-1],valid.y = svmguide1.t[,1])
expect_error({pred = predict(csvm.obj$sparse, svmguide1.t[,-1])})
expect_warning({
csvm.obj = clusterSVM(x = svmguide1[,-1], y = svmguide1[,1], lambda = 1,
centers = 1, seed = 512, verbose = 0,
valid.x = svmguide1.t[,-1],valid.y = svmguide1.t[,1])
})
})
test_that("Switch Clustering function",{
csvm.obj.1 = clusterSVM(x = svmguide1[,-1], y = svmguide1[,1], lambda = 1,
centers = 8, seed = 512, verbose = 0,
valid.x = svmguide1.t[,-1],valid.y = svmguide1.t[,1],
cluster.method = "kmeans")
csvm.obj.2 = clusterSVM(x = svmguide1[,-1], y = svmguide1[,1], lambda = 1,
centers = 8, seed = 512, verbose = 0,
valid.x = svmguide1.t[,-1],valid.y = svmguide1.t[,1],
cluster.method = "mlKmeans")
csvm.obj.3 = clusterSVM(x = svmguide1[,-1], y = svmguide1[,1], lambda = 1,
centers = 2, seed = 1024, verbose = 0,
valid.x = svmguide1.t[,-1],valid.y = svmguide1.t[,1],
cluster.method = "kernkmeans")
expect_true(csvm.obj.3$time$total.time>csvm.obj.2$time$total.time)
expect_true(csvm.obj.3$time$total.time>csvm.obj.1$time$total.time)
})
test_that("Performance",{
liblinear.obj = LiblineaR::LiblineaR(data = svmguide1[,-1], target = svmguide1[,1],
type = 1, verbose = FALSE)
liblinear.pred = predict(liblinear.obj, svmguide1.t[,-1])$prediction
liblinear.score = sum(liblinear.pred==svmguide1.t[,1])/length(liblinear.pred)
csvm.obj = clusterSVM(x = svmguide1[,-1], y = svmguide1[,1], lambda = 1,
centers = 8, seed = 512, verbose = 0,
valid.x = svmguide1.t[,-1],valid.y = svmguide1.t[,1])
csvm.score = csvm.obj$valid.score
expect_true(csvm.score>liblinear.score)
expect_warning({
csvm.obj = clusterSVM(x = svmguide1[,-1], y = svmguide1[,1], lambda = 1,
centers = 1, seed = 512, verbose = 0,
valid.x = svmguide1.t[,-1],valid.y = svmguide1.t[,1])
})
csvm.score = csvm.obj$valid.score
expect_equal(csvm.score, liblinear.score)
csvm.obj = clusterSVM(x = as.matrix(iris[,-5]), y = iris[,5], sparse = FALSE,
centers = 2, seed = 512, verbose = 0,
valid.x = as.matrix(iris[,-5]),valid.y = iris[,5])
expect_true(csvm.obj$valid.score>0.97)
})
set.seed(512)
xorx = rbind(cbind(runif(100,1,2),runif(100,1,2)),
cbind(runif(100,-2,-1),runif(100,1,2)),
cbind(runif(100,1,2),runif(100,-2,-1)),
cbind(runif(100,-2,-1),runif(100,-2,-1)))
xory = c(rep(1,100),
rep(0,200),
rep(1,100))
test_that("XOR Toy Data",{
liblinear.obj = LiblineaR::LiblineaR(data = xorx, target = xory,
type = 1, verbose = FALSE)
liblinear.pred = predict(liblinear.obj, xorx)$prediction
liblinear.score = sum(liblinear.pred==xory)/length(liblinear.pred)
expect_true(liblinear.score<0.6)
csvm.obj = clusterSVM(x = xorx, y = xory, lambda = 1,
centers = 2, seed = 512, verbose = 0,
valid.x = xorx, valid.y = xory,
cluster.method = 'kmeans')
expect_equal(csvm.obj$valid.score,1)
csvm.obj = clusterSVM(x = xorx, y = xory, lambda = 1,
centers = 2, seed = 512, verbose = 0,
valid.x = xorx, valid.y = xory,
cluster.method = 'mlKmeans')
expect_equal(csvm.obj$valid.score,1)
csvm.obj = clusterSVM(x = xorx, y = xory, lambda = 1,
centers = 2, seed = 1024, verbose = 0,
valid.x = xorx, valid.y = xory,
cluster.method = 'kernkmeans')
expect_equal(csvm.obj$valid.score,1)
}) |
"summary.4thcorner" <- function(object,...){
cat("Fourth-corner Statistics\n")
cat("------------------------\n")
cat("Permutation method ",object$model," (",object$npermut," permutations)\n")
if(inherits(object, "4thcorner.rlq")){
cat("trRLQ statistic","\n\n")
cat("---\n\n")
print(object$trRLQ)
} else {
cat("\nAdjustment method for multiple comparisons: ", object$tabG$adj.method, "\n")
xrand <- object$tabG
sumry <- list(Test = xrand$names, Stat= xrand$statnames, Obs = xrand$obs, Std.Obs = xrand$expvar[, 1], Alter = xrand$alter)
sumry <- as.matrix(as.data.frame(sumry))
if (any(xrand$rep[1] != xrand$rep)) {
sumry <- cbind(sumry[, 1:4], N.perm = xrand$rep)
}
sumry <- cbind(sumry, Pvalue = format.pval(xrand$pvalue))
if (xrand$adj.method != "none") {
sumry <- cbind(sumry, Pvalue.adj = format.pval(xrand$adj.pvalue))
}
signifpval <- symnum(xrand$adj.pvalue, corr = FALSE, na = FALSE, cutpoints = c(0, 0.001, 0.01, 0.05, 0.1, 1), symbols = c("***", "**", "*", ".", " "))
sumry <- cbind(sumry,signifpval)
colnames(sumry)[ncol(sumry)] <- " "
rownames(sumry) <- 1:nrow(sumry)
print(sumry, quote = FALSE, right = TRUE)
cat("\n---\nSignif. codes: ", attr(signifpval, "legend"), "\n")
invisible(sumry)
}
} |
Sampler <- function(tree, n) {
abd <- [email protected]
discrete.nodes <- names([email protected])[[email protected]]
continuous.nodes <- names([email protected])[[email protected]]
disc.v <- setdiff(discrete.nodes, abd)
cont.v <- setdiff(continuous.nodes, abd)
if (length(disc.v) == 0) {
cont.g <- data.frame()
for (i in 1:n){
vec.g <- continuous.single.sampler.special(tree, cont.v)
cont.g <- rbind(cont.g, vec.g)
}
colnames(cont.g) <- cont.v
rownames(cont.g) <- NULL
return(cont.g)
}
disc.jd <- FactorQuery(tree, vars = disc.v, mode = "joint")
cnts <- rmultinom(n = 1, size = n, prob = disc.jd$prob)
config.tab <- disc.jd[, 1:(ncol(disc.jd)-1)]
config.tab <- data.frame(lapply(config.tab, as.character), stringsAsFactors=FALSE)
cont.g <- data.frame()
for (i in 1:nrow(config.tab)) {
if (cnts[i] == 0) {
next
}
this.config <- unlist(config.tab[i, , drop = TRUE])
for (j in 1:cnts[i]) {
vec.g <- continuous.single.sampler(tree, cont.v, this.config)
cont.g <- rbind(cont.g, vec.g)
}
}
colnames(cont.g) <- cont.v
disc.g <- config.tab[rep(1:nrow(config.tab), cnts), ]
colnames(disc.g) <- colnames(config.tab)
generated <- cbind(disc.g, cont.g)
rownames(generated) <- NULL
return(generated)
}
compatible <- function(config.1, config.2) {
var.1 <- names(config.1)
var.2 <- names(config.2)
var.b <- intersect(var.1, var.2)
if (length(var.b)==0) {
return(TRUE)
}
config.sub.1 <- config.1[var.b]
config.sub.2 <- config.2[var.b]
return(identical(config.sub.1, config.sub.2))
}
continuous.single.sampler <- function(tree, cont.v, this.config) {
x.cont <- rep(NA, length(cont.v))
names(x.cont) <- cont.v
x.gen <- c()
for (nd in rev(cont.v)) {
this.pot <- tree@lppotential[[nd]][[1]]
if(ncol(this.pot@config) == 0) {
selectedConfig <- 1
} else {
same_named_values <- this.config[intersect(colnames(this.pot@config), names(this.config))]
selectedConfig <- which(apply(this.pot@config, 1, function(x) identical(x, same_named_values)))
}
if(length(selectedConfig) > 1){
warning("More than one configuration selected!")
}
if(ncol(this.pot@beta) == 0) {
mu <- this.pot@const[selectedConfig]
} else {
this.beta <- this.pot@beta
beta.var <- colnames(this.beta)
var.g <- intersect(x.gen, beta.var)
betas <- this.beta[selectedConfig, var.g]
mu <- this.pot@const[selectedConfig] + sum(betas * x.cont[var.g])
}
sd <- sqrt(this.pot@variance[selectedConfig])
x.cont[nd] <- rnorm(1, mean = mu, sd = sd)
x.gen <- c(x.gen, nd)
}
return(x.cont)
}
continuous.single.sampler.special <- function(tree, cont.v) {
x.cont <- rep(NA, length(cont.v))
names(x.cont) <- cont.v
x.gen <- c()
for (nd in rev(cont.v)) {
this.pot <- tree@lppotential[[nd]][[1]]
if(ncol(this.pot@beta) == 0) {
mu <- this.pot@const[1]
} else {
this.beta <- this.pot@beta
beta.var <- colnames(this.beta)
var.g <- intersect(x.gen, beta.var)
betas <- this.beta[1, var.g]
mu <- this.pot@const[1] + sum(betas * x.cont[var.g])
}
sd <- sqrt(this.pot@variance[1])
x.cont[nd] <- rnorm(1, mean = mu, sd = sd)
x.gen <- c(x.gen, nd)
}
return(x.cont)
} |
tNN <- function(threshold = 0.2, measure = "euclidean",
centroids = identical(tolower(measure), "euclidean"), lambda=0) {
new("tNN", threshold=threshold, measure=measure, centroids=centroids,
lambda=lambda)
}
setMethod("show", signature(object = "tNN"),
function(object) {
cat("tNN with", nclusters(object), "clusters.\n",
"Measure:", object@measure, "\n",
"Threshold:", object@threshold, "\n",
"Centroid:", object@centroids, "\n",
"Lambda:", object@lambda, "\n"
)
invisible(NULL)
})
setMethod("copy", signature(x = "tNN"),
function(x) {
r <- new("tNN",
threshold = x@threshold,
measure = x@measure,
distFun = x@distFun,
centroids = x@centroids,
lambda = x@lambda,
lambda_factor = x@lambda_factor)
r@tnn_d <- as.environment(as.list(x@tnn_d))
r
})
setMethod("cluster_counts", signature(x = "tNN"),
function(x) x@tnn_d$counts)
setMethod("cluster_centers", signature(x = "tNN"),
function(x) x@tnn_d$centers)
setMethod("nclusters", signature(x = "tNN"),
function(x) nrow(x@tnn_d$centers))
setMethod("clusters", signature(x = "tNN"),
function(x) rownames(x@tnn_d$centers))
setMethod("last_clustering", signature(x = "tNN"),
function(x, remove = FALSE) {
lc <- x@tnn_d$last
if(remove) x@tnn_d$last <- as.character(NA)
lc
})
setMethod("rare_clusters", signature(x = "tNN"),
function(x, count_threshold)
names(which(x@tnn_d$counts <= count_threshold))
)
setMethod("find_clusters", signature(x = "tNN", newdata = "numeric"),
function(x, newdata, match_cluster=c("exact", "nn"), dist=FALSE)
find_clusters(x, as.matrix(rbind(newdata)), match_cluster, dist))
setMethod("find_clusters", signature(x = "tNN", newdata = "data.frame"),
function(x, newdata, match_cluster=c("exact", "nn"), dist=FALSE)
find_clusters(x, as.matrix(newdata), match_cluster, dist))
setMethod("find_clusters", signature(x = "tNN", newdata = "matrix"),
function(x, newdata, match_cluster=c("exact", "nn"), dist=FALSE) {
if(is.numeric(match_cluster)) {
multiplier <- match_cluster
match_cluster <- "exact"
}else multiplier <-1
match_cluster <- match.arg(match_cluster)
if(nclusters(x)==0) return(rep(NA, nrow(newdata)))
maxmem <- 128L
blocksize <- as.integer(floor(maxmem * 1024 * 1024
/ nclusters(x) / 8 / 5))
if(nrow(newdata)>1 && nrow(newdata)>blocksize) {
states <- character(nrow(newdata))
if(dist) d_state <- numeric(nrow(newdata))
blockStart <- 1L
while(blockStart < nrow(newdata)) {
blockEnd <- min(blockStart+blocksize-1L, nrow(newdata))
if(dist) {
tmp <- find_clusters(x,
newdata[blockStart:blockEnd,],
match_cluster, dist)
states[blockStart:blockEnd] <- as.character(tmp[,1])
d_state[blockStart:blockEnd] <- tmp[,2]
}else states[blockStart:blockEnd] <- find_clusters(x,
newdata[blockStart:blockEnd,],
match_cluster, dist)
blockStart <- blockEnd+1L
}
if(dist) return(data.frame(state = states, dist = d_state))
else return(states)
}
d <- dist(newdata, cluster_centers(x), method=x@distFun)
.which.min_NA <- function(x) {
m <- which.min(x)
if(length(m)==0) m <- NA
m
}
if(match_cluster=="nn") {
min <- apply(d, MARGIN=1, .which.min_NA)
closest <- clusters(x)[min]
if(dist) {
d_state <- sapply(1:nrow(newdata),
FUN = function(i) d[i,min[i]])
return(data.frame(state = closest, dist = d_state))
}else return(closest)
}
d2 <- d - matrix(x@tnn_d$var_thresholds*multiplier,
ncol=length(x@tnn_d$var_thresholds),
nrow=nrow(d), byrow=TRUE)
min <- apply(d2, MARGIN=1, .which.min_NA)
closest <- clusters(x)[min]
closest_val <- sapply(1:nrow(newdata),
FUN = function(i) d2[i,min[i]])
closest[closest_val>0] <- NA
if(dist) {
d_state <- sapply(1:nrow(newdata),
FUN = function(i) d[i,min[i]])
return(data.frame(state=closest, dist = d_state))
}else return(closest)
}
) |
context("Centering helper functions")
library(jointMeanCov)
test_that("centerDataTwoGroupsByIndices works correctly", {
X <- matrix(1:12, nrow=4, ncol=3)
X.cen <- matrix(c(-0.5, -0.5, -0.5,
0.5, 0.5, 0.5,
-0.5, -0.5, -0.5,
0.5, 0.5, 0.5),
nrow=4, ncol=3,
byrow = TRUE)
expect_equal(
centerDataTwoGroupsByIndices(
X, group.one.indices=1:2, group.two.indices=3:4),
X.cen)
})
test_that("centerDataTwoGroupsByModelSelection works correctly", {
X <- matrix(1:12, nrow=4, ncol=3)
X.cen <- matrix(c(-0.5, -0.5, -1.5,
0.5, 0.5, -0.5,
-0.5, -0.5, 0.5,
0.5, 0.5, 1.5),
nrow=4, ncol=3,
byrow = TRUE)
expect_equal(
centerDataTwoGroupsByModelSelection(
X, group.one.indices=1:2, group.two.indices=3:4,
within.group.indices=1:2),
X.cen)
})
test_that("centerDataGLSModelSelection works", {
n <- 4
m <- 3
X <- matrix(1:12, nrow=n, ncol=m)
out <- centerDataGLSModelSelection(
X, B.inv=diag(n), group.one.indices=1:2,
group.two.indices=3:4,
group.cen.indices=1:2)
expect_equal(out$X.cen,
matrix(c(-0.5, -0.5, -1.5,
0.5, 0.5, -0.5,
-0.5, -0.5, 0.5,
0.5, 0.5, 1.5),
byrow=TRUE, 4, 3))
expect_equal(out$global.means.gls, matrix(10.5, 1, 1))
expect_equal(out$group.means.gls,
matrix(c(1.5, 5.5,
3.5, 7.5),
byrow=TRUE, 2, 2))
}) |
computeHV = function(x, ref.point = NULL) {
assertMatrix(x, mode = "numeric", any.missing = FALSE, all.missing = FALSE)
if (is.null(ref.point)) {
ref.point = apply(x, 1L, max)
}
if (any(is.infinite(x))) {
warningf("Set of points contains %i infinite values.", sum(is.infinite(x)))
return(NaN)
}
if (length(ref.point) != nrow(x)) {
stopf("Set of points and reference point need to have the same dimension, but
set of points has dimension %i and reference point has dimension %i.", nrow(x), length(ref.point))
}
if (any(is.infinite(ref.point))) {
warningf("Reference point contains %i infinite values.", sum(is.infinite(ref.point)))
return(NaN)
}
return(.Call("computeHVC", x, ref.point, PACKAGE = "ecr"))
}
computeHVContr = function(x, ref.point = NULL, offset = 1) {
if (is.null(ref.point)) {
ref.point = approximateNadirPoint(x) + offset
}
assertMatrix(x, mode = "numeric", any.missing = FALSE)
assertNumeric(ref.point, any.missing = FALSE)
assertNumber(offset, finite = TRUE, lower = 0)
return(.Call("computeHVContributionC", x, ref.point, PACKAGE = "ecr"))
} |
partition_div <- function(dataset,
units, time,
cond, out,
n_cut, incl_cut) {
quiet <- function(x)
{
sink(tempfile())
on.exit(sink())
invisible(force(x))
}
x <- dataset
if (missing(units)) {
colnames(x)[which(names(x) == time)] <- "time"
xB <- x
xxx <- 1
BE_list <- split(xB, xB[, "time"])
} else if (missing(time)) {
colnames(x)[which(names(x) == units)] <- "units"
xW <- x
xxx <- 2
WI_list <- split(xW, xW[, "units"])
} else {
xxx <- 3
colnames(x)[which(names(x) == time)] <- "time"
colnames(x)[which(names(x) == units)] <- "units"
xB <- x
xW <- x
BE_list <- split(xB, xB[, "time"])
WI_list <- split(xW, xW[, "units"])
}
PO_list <- list(x)
paster <- function(x) {
x <- paste(x, collapse = "+")
x
}
pqmcc <- function(x) {
if (xxx == 1) {
part <- as.character(x$time[1])
type <- "between"
} else if (xxx == 2) {
part <- as.character(x$units[1])
type <- "within"
} else {
partition <- unlist(x$time)
if (partition[1] == partition[2]) {
part <- as.character(x$time[1])
type <- "between"
} else {
part <- as.character(x$units[1])
type <- "within"
}
}
check <- x[cond]
check[check < 0.5] <- 0
check[check > 0.5] <- 1
check2 <- as.data.frame(colMeans(check))
check2[check2 == 1] <- 0
check3 <- as.numeric(colMeans(check2))
if (check3 == 0) {
zz <- as.data.frame(part)
zz$type <- type
zz$diversity <- "No variation in all coniditions"
zz$diversity_per <- "-"
zz$diversity_1 <- "-"
zz$diversity_0 <- "-"
zz <- zz[!duplicated(zz), ]
colnames(zz)[1] <- "partition"
} else {
s <- testit::has_error(susu <- try(suppressWarnings(QCA::truthTable(x, outcome = out, conditions = cond, incl.cut1 = incl_cut, n.cut = n_cut)), silent = TRUE))
if (s == F) {
x1 <- try(suppressWarnings(QCA::truthTable(x, outcome = out, conditions = cond, incl.cut1 = incl_cut, n.cut = n_cut)), silent = TRUE)
zz <- as.data.frame(part)
zz$type <- type
zz$diversity <- as.numeric(length(x1$indexes))
zz$diversity_1 <- as.numeric(sum(x1$tt$OUT == 1))
zz$diversity_0 <- as.numeric(sum(x1$tt$OUT == 0))
zz$diversity_per <- "???"
zz <- zz[!duplicated(zz), ]
colnames(zz)[1] <- "partition"
} else {
zz <- as.data.frame(part)
zz$type <- type
zz$diversity <- "no combinations at this frequency cutoff"
zz$diversity_per <- "-"
zz$diversity_1 <- "-"
zz$diversity_0 <- "-"
zz <- zz[!duplicated(zz), ]
colnames(zz)[1] <- "partition"
}
}
zz
}
if (missing(time)) {
WI_list1 <- quiet(lapply(WI_list, pqmcc))
PO_list1 <- quiet(lapply(PO_list, pqmcc))
dff2 <- plyr::ldply(WI_list1)[, -1]
dff3 <- plyr::ldply(PO_list1)[, ]
dff3$type <- "pooled"
dff3$partition <- "-"
total <- rbind(dff3, dff2)
} else if (missing(units)) {
BE_list1 <- quiet(lapply(BE_list, pqmcc))
PO_list1 <- quiet(lapply(PO_list, pqmcc))
dff1 <- plyr::ldply(BE_list1)[, -1]
dff3 <- plyr::ldply(PO_list1)[, ]
dff3$type <- "pooled"
dff3$partition <- "-"
total <- rbind(dff3, dff1)
} else {
BE_list1 <- quiet(lapply(BE_list, pqmcc))
WI_list1 <- quiet(lapply(WI_list, pqmcc))
PO_list1 <- quiet(lapply(PO_list, pqmcc))
dff1 <- plyr::ldply(BE_list1)[, -1]
dff2 <- plyr::ldply(WI_list1)[, -1]
dff3 <- plyr::ldply(PO_list1)[, ]
dff3$type <- "pooled"
dff3$partition <- "-"
total <- rbind(dff3, dff1, dff2)
}
total$diversity_old <- total$diversity
total$diversity[total$diversity == "No variation in all coniditions"] <- NA
total$diversity[total$diversity == "no combinations at this frequency cutoff"] <- NA
total$diversity_1[total$diversity_1 == "-"] <- NA
total$diversity_0[total$diversity_0 == "-"] <- NA
total$diversity <- as.numeric(total$diversity)
total$diversity_1 <- as.numeric(total$diversity_1)
total$diversity_0 <- as.numeric(total$diversity_0)
if(length(unique(total$diversity)) == 1){
total$diversity_per <- "-"
total$diversity_per_1 <- "-"
total$diversity_per_0 <- "-"
total$diversity <- "-"
total$diversity <- total$diversity_old
total$diversity_old <- NULL
}else{
y <- as.numeric(max(total$diversity, na.rm = T))
total$diversity_per <- ifelse(is.na(total$diversity), NA, total$diversity/y)
total$diversity_per_1 <- ifelse(is.na(total$diversity_1), NA, total$diversity_1/y)
total$diversity_per_0 <- ifelse(is.na(total$diversity_0), NA, total$diversity_0/y)
total$diversity <- total$diversity_old
total$diversity_old <- NULL}
total <- total[, c(2, 1, 3, 4, 5, 6, 7, 8)]
return(total)} |
data()
library(ISLR)
?Default
data("Default")
str(Default)
class(Default)
head(Default)
names(Default)
balance
attach(Default)
balance
str(Default)
Default
dim(Default)
head(Default)
str(Default)
summary(Default)
names(Default)
tmp = table(default)
tmp
333/9667
(tmp[[2]]/tmp[[1]])*100
tmp[[2]]/dim(Default)[1] * 100
library(ggplot2); library(gridExtra)
x = qplot(x=balance, y=income, color=default, shape=default, geom='point')+scale_shape(solid=FALSE)
y = qplot(x=default, y=balance, fill=default, geom='boxplot')+guides(fill=FALSE)
z = qplot(x=default, y=income, fill=default, geom='boxplot')+guides(fill=FALSE)
x
grid.arrange(y, z, nrow=1)
logitb = glm(default ~ balance, data=Default, family='binomial')
summary(logitb)
coef(logitb)
exp(coef(logitb))
range(Default$balance)
b1= logitb$coefficients[2]
exp(b1)^1
exp(b1)^100
exp(b1)^1000
exp(confint(logitb))
predict(logit, newdata = data.frame(balance=c(0,100,500,1000,1500,1800,2500)), type=c('response'))
head(Default)
top_n(Default, 5, balance)
Default %>% arrange(balance) %>% slice(1:5)
Default %>% arrange(balance) %>% slice(seq(1,n(),1000))
str(Default)
logits = glm(default ~ student, data=Default, family='binomial')
summary(logits)
logits <- glm(default ~ student, data=Default, family='binomial')
summary(logits)
predict(logits, newdata = data.frame(student=c('Yes')), type=c('response'))
predict(logits, newdata = data.frame(student=c('No')), type=c('response'))
class(Default)
logit1 = glm(default ~ income + balance + student, family='binomial', data=Default)
summary(logit1)
exp(coef(logit1))
logit2 = glm(default ~ balance + student, family='binomial', data=Default)
summary(logit2)
exp(coef(logit2))
Default %>% group_by(student) %>% arrange(student, balance)
head(Default)
seq(1, 10000,500)
Default[c(1,501),]
Default[seq(1, 10000,500),]
library(dplyr)
(ndata = (slice(Default, seq(1,n(),500))))
ndata
slice(Default, seq(1,n(),1000))
head(ndata)
addmargins(prop.table(table(Default$default,Default$student)))
0.2817/0.9667; 0.0127/0.0333
options(digits=10)
fitted.results = predict(logit2, newdata=ndata,type='response')
fitted.results
head(fitted.results)
fitted.results
cbind(ndata, fitted.results)
ndata
ndata %>% mutate(predict = ifelse(fitted.results < 0.5, 'No','Yes'))
fitted.results
ifelse(fitted.results < 0.05, 0,1)
(ndata2 = data.frame(student=c('Yes','No'), balance=mean(Default$balance), income=mean(Default$income)))
(fitted.results2 <- predict(logit, newdata=ndata2,type='response'))
library(caret)
set.seed(3456)
str(Default)
trainIndex <- createDataPartition(Default$default, p = .67,
list = FALSE, times = 1)
Train <- Default[ trainIndex,]
Test <- Default[-trainIndex,]
head(Train)
head(Test)
model = glm(default ~ student, data=Default, family='binomial')
Test$model_prob <- predict(model, Test, type = "response")
head(Test)
Test <- Test %>% mutate(default_pred = ifelse(model_prob > .5,'Yes','No'))
head(Test)
Test <- Test %>% mutate(accurate = 1*(default == default_pred))
sum(Test$accurate)/nrow(Test)
?createDataPartition
Default
head(Default)
logr1 = glm(default ~ student + balance + income, data=Default, family='binomial')
summary(logr1)
logr2 = glm(default ~ student + balance , data=Default, family='binomial')
summary(logr2)
ndata3 = Default[seq(1,nrow(Default), 1000), ]
ndata3
(p1 = predict(logr2, newdata = ndata3, type='response'))
cbind(ndata3, p1, p2= ifelse(p1 < 0.5, 'No', 'Yes')) |
knitr::opts_chunk$set(
collapse = TRUE,
comment = "
)
library(genero) |
normnormexch=function(theta,data){
y=data[,1]
sigma2=data[,2]
mu=theta[1]
tau=exp(theta[2])
logf=function(mu,tau,y,sigma2)
dnorm(y,mu,sqrt(sigma2+tau^2),log=TRUE)
sum(logf(mu,tau,y,sigma2))+log(tau)
} |
ranger_class_pred <-
function(results, object) {
if (results$treetype == "Probability estimation") {
res <- colnames(results$predictions)[apply(results$predictions, 1, which.max)]
} else {
res <- results$predictions
}
res
}
ranger_num_confint <- function(object, new_data, ...) {
hf_lvl <- (1 - object$spec$method$pred$conf_int$extras$level)/2
const <- qnorm(hf_lvl, lower.tail = FALSE)
res <-
tibble(
.pred = predict(object$fit, data = new_data, type = "response", ...)$predictions
)
std_error <- predict(object$fit, data = new_data, type = "se", ...)$se
res$.pred_lower <- res$.pred - const * std_error
res$.pred_upper <- res$.pred + const * std_error
res$.pred <- NULL
if (object$spec$method$pred$conf_int$extras$std_error)
res$.std_error <- std_error
res
}
ranger_class_confint <- function(object, new_data, ...) {
hf_lvl <- (1 - object$spec$method$pred$conf_int$extras$level)/2
const <- qnorm(hf_lvl, lower.tail = FALSE)
pred <- predict(object$fit, data = new_data, type = "response", ...)$predictions
pred <- as_tibble(pred)
std_error <- predict(object$fit, data = new_data, type = "se", ...)$se
colnames(std_error) <- colnames(pred)
std_error <- as_tibble(std_error)
names(std_error) <- paste0(".std_error_", names(std_error))
lowers <- pred - const * std_error
names(lowers) <- paste0(".pred_lower_", names(lowers))
uppers <- pred + const * std_error
names(uppers) <- paste0(".pred_upper_", names(uppers))
res <- cbind(lowers, uppers)
res[res < 0] <- 0
res[res > 1] <- 1
res <- as_tibble(res)
lvl <- rep(object$fit$forest$levels, each = 2)
col_names <- paste0(c(".pred_lower_", ".pred_upper_"), lvl)
res <- res[, col_names]
if (object$spec$method$pred$conf_int$extras$std_error)
res <- bind_cols(res, std_error)
res
}
ranger_confint <- function(object, new_data, ...) {
if (object$fit$forest$treetype == "Regression") {
res <- ranger_num_confint(object, new_data, ...)
} else {
if (object$fit$forest$treetype == "Probability estimation") {
res <- ranger_class_confint(object, new_data, ...)
} else {
rlang::abort(
glue::glue(
"Cannot compute confidence intervals for a ranger forest ",
"of type {object$fit$forest$treetype}."
)
)
}
}
res
}
set_new_model("rand_forest")
set_model_mode("rand_forest", "classification")
set_model_mode("rand_forest", "regression")
set_model_engine("rand_forest", "classification", "ranger")
set_model_engine("rand_forest", "regression", "ranger")
set_dependency("rand_forest", "ranger", "ranger")
set_model_arg(
model = "rand_forest",
eng = "ranger",
parsnip = "mtry",
original = "mtry",
func = list(pkg = "dials", fun = "mtry"),
has_submodel = FALSE
)
set_model_arg(
model = "rand_forest",
eng = "ranger",
parsnip = "trees",
original = "num.trees",
func = list(pkg = "dials", fun = "trees"),
has_submodel = FALSE
)
set_model_arg(
model = "rand_forest",
eng = "ranger",
parsnip = "min_n",
original = "min.node.size",
func = list(pkg = "dials", fun = "min_n"),
has_submodel = FALSE
)
set_fit(
model = "rand_forest",
eng = "ranger",
mode = "classification",
value = list(
interface = "data.frame",
protect = c("x", "y", "case.weights"),
func = c(pkg = "ranger", fun = "ranger"),
defaults =
list(
num.threads = 1,
verbose = FALSE,
seed = expr(sample.int(10 ^ 5, 1))
)
)
)
set_encoding(
model = "rand_forest",
eng = "ranger",
mode = "classification",
options = list(
predictor_indicators = "none",
compute_intercept = FALSE,
remove_intercept = FALSE,
allow_sparse_x = TRUE
)
)
set_fit(
model = "rand_forest",
eng = "ranger",
mode = "regression",
value = list(
interface = "data.frame",
protect = c("x", "y", "case.weights"),
func = c(pkg = "ranger", fun = "ranger"),
defaults =
list(
num.threads = 1,
verbose = FALSE,
seed = expr(sample.int(10 ^ 5, 1))
)
)
)
set_encoding(
model = "rand_forest",
eng = "ranger",
mode = "regression",
options = list(
predictor_indicators = "none",
compute_intercept = FALSE,
remove_intercept = FALSE,
allow_sparse_x = TRUE
)
)
set_pred(
model = "rand_forest",
eng = "ranger",
mode = "classification",
type = "class",
value = list(
pre = NULL,
post = ranger_class_pred,
func = c(fun = "predict"),
args =
list(
object = quote(object$fit),
data = quote(new_data),
type = "response",
seed = expr(sample.int(10 ^ 5, 1)),
verbose = FALSE
)
)
)
set_pred(
model = "rand_forest",
eng = "ranger",
mode = "classification",
type = "prob",
value = list(
pre = function(x, object) {
if (object$fit$forest$treetype != "Probability estimation")
rlang::abort(
glue::glue(
"`ranger` model does not appear to use class probabilities. Was ",
"the model fit with `probability = TRUE`?"
)
)
x
},
post = function(x, object) {
x <- x$prediction
as_tibble(x)
},
func = c(fun = "predict"),
args =
list(
object = quote(object$fit),
data = quote(new_data),
seed = expr(sample.int(10 ^ 5, 1)),
verbose = FALSE
)
)
)
set_pred(
model = "rand_forest",
eng = "ranger",
mode = "classification",
type = "conf_int",
value = list(
pre = NULL,
post = NULL,
func = c(fun = "ranger_confint"),
args =
list(
object = quote(object),
new_data = quote(new_data),
seed = expr(sample.int(10^5, 1))
)
)
)
set_pred(
model = "rand_forest",
eng = "ranger",
mode = "classification",
type = "raw",
value = list(
pre = NULL,
post = NULL,
func = c(fun = "predict"),
args =
list(
object = quote(object$fit),
data = quote(new_data),
seed = expr(sample.int(10 ^ 5, 1))
)
)
)
set_pred(
model = "rand_forest",
eng = "ranger",
mode = "regression",
type = "numeric",
value = list(
pre = NULL,
post = function(results, object)
results$predictions,
func = c(fun = "predict"),
args =
list(
object = quote(object$fit),
data = quote(new_data),
type = "response",
seed = expr(sample.int(10 ^ 5, 1)),
verbose = FALSE
)
)
)
set_pred(
model = "rand_forest",
eng = "ranger",
mode = "regression",
type = "conf_int",
value = list(
pre = NULL,
post = NULL,
func = c(fun = "ranger_confint"),
args =
list(
object = quote(object),
new_data = quote(new_data),
seed = expr(sample.int(10^5, 1))
)
)
)
set_pred(
model = "rand_forest",
eng = "ranger",
mode = "regression",
type = "raw",
value = list(
pre = NULL,
post = NULL,
func = c(fun = "predict"),
args =
list(
object = quote(object$fit),
data = quote(new_data),
seed = expr(sample.int(10 ^ 5, 1))
)
)
)
set_model_engine("rand_forest", "classification", "randomForest")
set_model_engine("rand_forest", "regression", "randomForest")
set_dependency("rand_forest", "randomForest", "randomForest")
set_model_arg(
model = "rand_forest",
eng = "randomForest",
parsnip = "mtry",
original = "mtry",
func = list(pkg = "dials", fun = "mtry"),
has_submodel = FALSE
)
set_model_arg(
model = "rand_forest",
eng = "randomForest",
parsnip = "trees",
original = "ntree",
func = list(pkg = "dials", fun = "trees"),
has_submodel = FALSE
)
set_model_arg(
model = "rand_forest",
eng = "randomForest",
parsnip = "min_n",
original = "nodesize",
func = list(pkg = "dials", fun = "min_n"),
has_submodel = FALSE
)
set_fit(
model = "rand_forest",
eng = "randomForest",
mode = "classification",
value = list(
interface = "data.frame",
protect = c("x", "y"),
func = c(pkg = "randomForest", fun = "randomForest"),
defaults =
list()
)
)
set_encoding(
model = "rand_forest",
eng = "randomForest",
mode = "classification",
options = list(
predictor_indicators = "none",
compute_intercept = FALSE,
remove_intercept = FALSE,
allow_sparse_x = FALSE
)
)
set_fit(
model = "rand_forest",
eng = "randomForest",
mode = "regression",
value = list(
interface = "data.frame",
protect = c("x", "y"),
func = c(pkg = "randomForest", fun = "randomForest"),
defaults =
list()
)
)
set_encoding(
model = "rand_forest",
eng = "randomForest",
mode = "regression",
options = list(
predictor_indicators = "none",
compute_intercept = FALSE,
remove_intercept = FALSE,
allow_sparse_x = FALSE
)
)
set_pred(
model = "rand_forest",
eng = "randomForest",
mode = "regression",
type = "numeric",
value = list(
pre = NULL,
post = NULL,
func = c(fun = "predict"),
args =
list(object = quote(object$fit),
newdata = quote(new_data))
)
)
set_pred(
model = "rand_forest",
eng = "randomForest",
mode = "regression",
type = "raw",
value = list(
pre = NULL,
post = NULL,
func = c(fun = "predict"),
args =
list(object = quote(object$fit),
newdata = quote(new_data))
)
)
set_pred(
model = "rand_forest",
eng = "randomForest",
mode = "classification",
type = "class",
value = list(
pre = NULL,
post = NULL,
func = c(fun = "predict"),
args = list(object = quote(object$fit), newdata = quote(new_data))
)
)
set_pred(
model = "rand_forest",
eng = "randomForest",
mode = "classification",
type = "prob",
value = list(
pre = NULL,
post = function(x, object) {
as_tibble(as.data.frame(x))
},
func = c(fun = "predict"),
args =
list(
object = quote(object$fit),
newdata = quote(new_data),
type = "prob"
)
)
)
set_pred(
model = "rand_forest",
eng = "randomForest",
mode = "classification",
type = "raw",
value = list(
pre = NULL,
post = NULL,
func = c(fun = "predict"),
args =
list(object = quote(object$fit),
newdata = quote(new_data))
)
)
set_model_engine("rand_forest", "classification", "spark")
set_model_engine("rand_forest", "regression", "spark")
set_dependency("rand_forest", "spark", "sparklyr")
set_model_arg(
model = "rand_forest",
eng = "spark",
parsnip = "mtry",
original = "feature_subset_strategy",
func = list(pkg = "dials", fun = "mtry"),
has_submodel = FALSE
)
set_model_arg(
model = "rand_forest",
eng = "spark",
parsnip = "trees",
original = "num_trees",
func = list(pkg = "dials", fun = "trees"),
has_submodel = FALSE
)
set_model_arg(
model = "rand_forest",
eng = "spark",
parsnip = "min_n",
original = "min_instances_per_node",
func = list(pkg = "dials", fun = "min_n"),
has_submodel = FALSE
)
set_fit(
model = "rand_forest",
eng = "spark",
mode = "classification",
value = list(
interface = "formula",
data = c(formula = "formula", data = "x"),
protect = c("x", "formula", "type"),
func = c(pkg = "sparklyr", fun = "ml_random_forest"),
defaults = list(seed = expr(sample.int(10 ^ 5, 1)))
)
)
set_encoding(
model = "rand_forest",
eng = "spark",
mode = "classification",
options = list(
predictor_indicators = "none",
compute_intercept = FALSE,
remove_intercept = FALSE,
allow_sparse_x = FALSE
)
)
set_fit(
model = "rand_forest",
eng = "spark",
mode = "regression",
value = list(
interface = "formula",
data = c(formula = "formula", data = "x"),
protect = c("x", "formula", "type"),
func = c(pkg = "sparklyr", fun = "ml_random_forest"),
defaults = list(seed = expr(sample.int(10 ^ 5, 1)))
)
)
set_encoding(
model = "rand_forest",
eng = "spark",
mode = "regression",
options = list(
predictor_indicators = "none",
compute_intercept = FALSE,
remove_intercept = FALSE,
allow_sparse_x = FALSE
)
)
set_pred(
model = "rand_forest",
eng = "spark",
mode = "regression",
type = "numeric",
value = list(
pre = NULL,
post = format_spark_num,
func = c(pkg = "sparklyr", fun = "ml_predict"),
args =
list(x = quote(object$fit),
dataset = quote(new_data))
)
)
set_pred(
model = "rand_forest",
eng = "spark",
mode = "classification",
type = "class",
value = list(
pre = NULL,
post = format_spark_class,
func = c(pkg = "sparklyr", fun = "ml_predict"),
args =
list(x = quote(object$fit),
dataset = quote(new_data))
)
)
set_pred(
model = "rand_forest",
eng = "spark",
mode = "classification",
type = "prob",
value = list(
pre = NULL,
post = format_spark_probs,
func = c(pkg = "sparklyr", fun = "ml_predict"),
args =
list(x = quote(object$fit),
dataset = quote(new_data))
)
) |
kissmig <- function(O, S=NULL, it, type='FOC', signed=FALSE, pext=1.0, pcor=0.2, seed=NULL) {
if (class(O) != 'RasterLayer') stop("origin 'O' must be a RasterLayer")
ans <- O
ov <- values(O)
type <- toupper(type)
ifelse(type %in% c('DIS','FOC','LOC','NOC'),
ty <- which(c('DIS','FOC','LOC','NOC')==toupper(type)),
stop("'type' must be 'DIS', 'FOC', 'LOC', or 'NOC'", call. = FALSE)
)
ifelse(is.null(S),
{ sv <- rep(1.0, ncell(O))
dh <- dim(O)
warning('no suitability data found - globally set to 1.0', call. = FALSE)
},{
if (!(class(S) %in% c('RasterLayer', 'RasterStack', 'RasterBrick'))) {
stop("suitability 'S' must be a RasterLayer, RasterStack, or RasterBrick")
}
compareRaster(O,S)
sv <- as.vector(values(S))
dh <- dim(S)
})
if (!is.null(seed)) set.seed(seed)
si <- ifelse(signed, 1, 0)
v <- .Call('kissmig_c',
as.double(ov),
as.double(sv),
as.integer(dh),
as.integer(it),
as.double(pext),
as.double(pcor),
as.integer(ty),
as.integer(si),
PACKAGE='kissmig'
)
values(ans) <- v
return(ans)
} |
tabPanel('Inference', value = 'tab_infer_home',
fluidPage(
fluidRow(
column(12),
br(),
column(12, align = 'center',
h5('What do you want to do?')
),
br(),
br(),
br(),
column(3),
column(4, align = 'left',
h5('Comparison of one group to a hypothetical value')
),
column(2, align = 'left',
actionButton(inputId = 'button_infer_home_1', label = 'Click Here', width = '120px')
),
column(3),
br(),
br(),
br(),
column(3),
column(4, align = 'left',
h5('Comparison of two groups')
),
column(2, align = 'left',
actionButton(inputId = 'button_infer_home_2', label = 'Click Here', width = '120px')
),
column(3),
br(),
br(),
br(),
column(3),
column(4, align = 'left',
h5('Comparison of three or more groups')
),
column(2, align = 'left',
actionButton(inputId = 'button_infer_home_3', label = 'Click Here', width = '120px')
),
column(3)
)
)
) |
library("dplyr")
library("ggplot2")
library("car")
library("boot")
library("tree")
library("randomForest")
library(pROC)
library(AER)
data("Affairs")
Affairs.data <- Affairs
head(Affairs.data)
summary(Affairs.data)
str(Affairs.data)
Affairs.data %>%
group_by(gender) %>%
summarise(total_participants = n()) %>%
ungroup() %>%
mutate(prop_gender = total_participants/sum(total_participants))
ggplot(Affairs.data, aes(gender)) + geom_bar(width = 0.2) +
xlab("Gender") + ylab("Frequency") +
ggtitle("Number of participants based on gender")
Affairs.data %>%
summarise(avg_age = mean(age, na.rm = TRUE))
ggplot(Affairs.data, aes(occupation)) + geom_bar(width = 0.2) +
xlab("Occupation") + ylab("Frequency") +
ggtitle("Number of participants based on occupation") +
scale_x_continuous(breaks=c(1, 2, 3, 4, 5, 6, 7),
labels=c("Class 1", "Class 2", "Class 3", "Class 4",
"Class 5", "Class 6", "Class 7"))
ggplot(Affairs.data, aes(education)) + geom_bar(width = 0.2) +
xlab("Education") + ylab("Frequency") +
ggtitle("Number of participants based on education") +
scale_x_continuous(breaks=c(9, 12, 14, 16, 17, 18, 20),
labels=c("Grade School", "High School Graduate",
"Some college", "College graduate",
"Some graduate work", "Masters degree",
"Advanced Degree")) + coord_flip()
Affairs.data$haveaffair[Affairs.data$affairs > 0] <- 1
Affairs.data$haveaffair[Affairs.data$affairs == 0] <- 0
Affairs$haveaffair <- factor(Affairs.data$haveaffair,
levels=c(0,1),
labels=c("No","Yes"))
table(Affairs$haveaffair)
fit.allpredictors <- glm(haveaffair ~ gender + age + yearsmarried + children +
religiousness + education + occupation +rating,
data=Affairs.data,family=binomial())
library("bestglm")
Affairs.data$y <- Affairs.data$haveaffair
Affairs.for.bestglm <- Affairs.data[,c("gender","age","yearsmarried","children",
"religiousness", "education",
"occupation", "rating", "y")]
set.seed(1)
fit.reduced <- bestglm(Affairs.for.bestglm, family = binomial,
method = "exhaustive")
fit.reduced$BestModel
summary(fit.allpredictors)
testdata <- data.frame(yearsmarried=mean(Affairs.data$yearsmarried),
religiousness=mean(Affairs.data$religiousness),
rating=c(1, 2, 3, 4, 5))
testdata$prob <- predict(fit.reduced$BestModel, testdata, type="response")
testdata
ggplot(testdata, aes(rating, prob)) + geom_point() + geom_line() +
xlab("Rating") + ylab("Probabilty of having an affair") +
ggtitle("Rating of marriage vs probability of having an affair") +
scale_x_continuous(breaks=c(1, 2, 3, 4, 5),
labels=c("Very unhappy", "Somewhat unhappy",
"Average", "Happier than avaerage", "Very happy")) |
\dontrun{
library("shiny")
library("rbokeh")
ui <- fluidPage(
rbokehOutput("rbokeh")
)
server <- function(input, output, session) {
output$rbokeh <- renderRbokeh({
invalidateLater(1000, session)
figure() %>%
ly_points(jitter(cars$speed), jitter(cars$dist))
})
}
shinyApp(ui, server)
library("shiny")
library("rbokeh")
ui <- fluidPage(
rbokehOutput("rbokeh", width = 500, height = 540),
textOutput("x_range_text")
)
server <- function(input, output, session) {
output$rbokeh <- renderRbokeh({
figure() %>% ly_points(1:10) %>%
x_range(callback = shiny_callback("x_range"))
})
output$x_range_text <- reactive({
xrng <- input$x_range
if(!is.null(xrng)) {
paste0("factors: ", xrng$factors, ", start: ", xrng$start,
", end: ", xrng$end)
} else {
"waiting for axis event..."
}
})
}
shinyApp(ui, server)
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.