code
stringlengths 1
13.8M
|
---|
knitr::opts_chunk$set(
collapse = TRUE,
comment = "
)
N <- 1e7
piR <- function(n, rng = runif) {
x <- rng(n)
y <- rng(n)
4 * sum(sqrt(x^2 + y^2) < 1.0) / n
}
set.seed(42)
system.time(cat("pi ~= ", piR(N), "\n"))
library(dqrng)
dqRNGkind("Xoroshiro128+")
dqset.seed(42)
system.time(cat("pi ~= ", piR(N, rng = dqrunif), "\n"))
system.time(runif(N))
system.time(dqrunif(N))
system.time(rexp(N))
system.time(dqrexp(N))
system.time(rnorm(N))
system.time(dqrnorm(N))
system.time(for (i in 1:100) sample.int(N, N/100, replace = TRUE))
system.time(for (i in 1:100) dqsample.int(N, N/100, replace = TRUE))
system.time(for (i in 1:100) sample.int(N, N/100))
system.time(for (i in 1:100) dqsample.int(N, N/100)) |
"reyes" |
.get_raw_data_path <- function(file_name) {
message("Using package raw data path, not resource directory.")
dr <- system.file("data-raw", package = "icd")
if (missing(file_name)) {
dr
} else {
file.path(dr, file_name)
}
}
.get_versioned_raw_file_name <- function(base_name, ver) {
paste0("ver", ver, "_", base_name)
}
.save_in_data_dir <- function(var_name,
x = NULL,
compress = "xz",
envir = parent.frame()) {
if (!is.character(var_name)) {
var_name <- as.character(substitute(var_name))
}
message("Asked to save ", var_name, " to PACKAGE data dir")
stopifnot(is.character(var_name))
if (is.null(x)) {
stopifnot(exists(var_name, envir = envir))
x <- get(var_name, envir = envir)
}
package_dir <- getwd()
data_path <- "data"
assign(x = var_name, value = x)
.assign(var_name = var_name, value = x)
out_file <- file.path(
package_dir,
data_path,
paste0(var_name, ".rda")
)
oldx <- NULL
if (file.exists(out_file)) {
load(file = out_file, envir = (tenv <- new.env(emptyenv())))
oldx <- tenv[[var_name]]
}
if (!identical(x, oldx)) {
warning("Data for ", sQuote(var_name), " is being updated.")
old_file <- tempfile(paste0(var_name, ".old"), fileext = "rds")
new_file <- tempfile(paste0(var_name, ".new"), fileext = "rds")
message(
"Saving old to: ", old_file,
" and new to: ", new_file
)
saveRDS(oldx, old_file, version = 2)
saveRDS(x, new_file, version = 2)
if (!askYesNo("Proceed?", default = FALSE)) {
message(
"Examine differences. Consider:",
"testthat::compare(x, oldx), ",
"daff::diff_data, ",
"compareDF (slow), compare::compare (may need lapply), ",
"arsenal::comparedf may have details but summary superficial, ",
"base::setdiff not so helpful"
)
if (!askYesNo("Continue saving?", default = FALSE)) {
stop("Not saved in package data.")
}
}
save(
list = var_name,
envir = envir,
file = out_file,
compress = compress,
version = 2
)
message("Now reload package to enable updated/new data: ", var_name)
} else {
message("No change in data, so not re-saving.")
}
invisible(x)
}
.save_in_cache <- function(var_name,
x = NULL,
envir = parent.frame()) {
if (!is.character(var_name)) {
var_name <- as.character(substitute(var_name))
}
stopifnot(is.character(var_name))
if (is.null(x)) {
stopifnot(exists(var_name, envir = envir))
x <- get(var_name, envir = envir)
}
.assign(var_name, x)
saveRDS(x,
.rds_path(var_name),
compress = "gzip"
)
invisible(.get(var_name))
}
.unzip_single <- function(url,
file_name,
save_path,
insecure = TRUE,
dl_msg = NULL,
...) {
stopifnot(is.character(url))
stopifnot(is.character(file_name))
stopifnot(is.character(save_path))
if (file.exists(save_path)) {
return(TRUE)
}
if (!.confirm_download()) {
return(FALSE)
}
zipfile <- tempfile(fileext = ".zip")
on.exit(unlink(zipfile), add = TRUE)
extra <- ifelse(insecure, "--insecure --silent", NULL)
if (.verbose() && !is.null(dl_msg)) message(dl_msg)
dl_code <- utils::download.file(
url = url,
destfile = zipfile,
quiet = !.verbose(),
method = "curl",
extra = extra,
...
)
stopifnot(dl_code == 0)
zipdir <- tempfile()
on.exit(unlink(zipdir), add = TRUE)
dir.create(zipdir)
optwarn <- options(warn = 10)
on.exit(options(optwarn), add = FALSE)
file_paths <- utils::unzip(zipfile, exdir = zipdir)
options(optwarn)
if (length(file_paths) == 0) {
stop("No files found in zip: ", zipfile)
}
files <- list.files(zipdir)
if (length(files) == 0) stop("No files in unzipped directory")
if (missing(file_name)) {
if (length(files) == 1) {
file_name <- files
} else {
stop(
"multiple files in zip, but no file name specified: ",
paste(files, collapse = ", ")
)
}
} else {
if (!file_name %in% files) {
message("files")
print(files)
message("file_name")
print(file_name)
stop(paste(file_name, " not found in ", paste(files, collapse = ", ")))
}
}
ret <- file.copy(file.path(zipdir, file_name), save_path, overwrite = TRUE)
unlink(zipdir, recursive = TRUE)
ret
}
.dir_writable <- function(path) {
dir.exists(path) && file.access(path, 2) == 0
} |
require(geometa, quietly = TRUE)
require(testthat)
context("ISOFormat")
test_that("encoding",{
testthat::skip_on_cran()
md <- ISOFormat$new()
md$setName("name")
md$setVersion("1.0")
md$setAmendmentNumber("2")
md$setSpecification("specification")
xml <- md$encode()
expect_is(xml, "XMLInternalNode")
md2 <- ISOFormat$new(xml = xml)
xml2 <- md2$encode()
expect_true(ISOAbstractObject$compare(md, md2))
})
test_that("encoding -i18n",{
testthat::skip_on_cran()
md <- ISOFormat$new()
md$setName(
"someone",
locales = list(
EN = "name in english",
FR = "nom en français",
ES = "Nombre en español",
AR = "الاسم باللغة العربية",
RU = "имя на русском",
ZH = "中文名"
))
md$setVersion("1.0")
md$setAmendmentNumber("2")
md$setSpecification(
"specification title",
locales = list(
EN="specification title",
FR="Titre de la spécification",
ES="Título de la especificación",
AR="عنوان المواصفات",
RU="название спецификации",
ZH="规范的标题"
))
xml <- md$encode()
expect_is(xml, "XMLInternalNode")
md2 <- ISOFormat$new(xml = xml)
xml2 <- md2$encode()
expect_true(ISOAbstractObject$compare(md, md2))
}) |
test_that("srt_to_df", {
t <- srt_to_df(system.file("extdata", "test.srt", package = "phonfieldwork"))
expect_true(nrow(t) == 4)
}) |
upsilon_cumuls <- function(df,t,order.max=3) {
kappa <- norm_cumuls(0,1,order.max) +
sumchisqpow_cumuls(wts=t/sqrt(df),df=df,ncp=0,pow=0.5,order.max=order.max)
return(kappa)
}
dupsilon <- function(x, df, t, log = FALSE, order.max=6) {
kappa <- upsilon_cumuls(df,t,order.max=order.max)
retval <- PDQutils::dapx_edgeworth(x,kappa,log=log)
return(retval)
}
pupsilon <- function(q, df, t, lower.tail = TRUE, log.p = FALSE, order.max=6) {
kappa <- upsilon_cumuls(df,t,order.max=order.max)
retval <- PDQutils::papx_edgeworth(q,kappa,lower.tail=lower.tail,log.p=log.p)
return(retval)
}
qupsilon <- function(p, df, t, lower.tail = TRUE, log.p = FALSE, order.max=6) {
kappa <- upsilon_cumuls(df,t,order.max=order.max)
retval <- PDQutils::qapx_cf(p,kappa,lower.tail=lower.tail,log.p=log.p)
return(retval)
}
rupsilon <- function(n, df, t) {
X <- rnorm(n) + rsumchisqpow(n, wts=t/sqrt(df), df=df, ncp=0, pow=0.5)
return(X)
} |
estimate<-EstMLECorrBin(Chromosome_data$No.of.Asso,Chromosome_data$fre,0.1,0.0001)
context("Checking outputs")
test_that("estimate method",{
expect_identical(estimate@method,
"BFGS")
})
test_that("estimate method",{
expect_identical(estimate@optimizer,
"optim")
})
test_that("minimized negative ll value",{
expect_identical(round(estimate@min,4),
436.743)
})
test_that("Checking class of output",{
expect_that(estimate,
is_a("mle2"))
}) |
basic_config <- function(
file = NULL,
fmt = "%L [%t] %m",
timestamp_fmt = "%Y-%m-%d %H:%M:%OS3",
threshold = "info",
appenders = NULL,
console = if (is.null(appenders)) "all" else FALSE,
console_fmt = "%L [%t] %m %f",
console_timestamp_fmt = "%H:%M:%OS3",
memory = FALSE
){
default_fmt = "%L [%t] %m"
stopifnot(
is.null(file) || is_scalar_character(file),
is.null(fmt) || is_scalar_character(fmt),
is_scalar_character(console_fmt),
is_scalar_character(timestamp_fmt),
is_threshold(threshold),
is_scalar_bool(console) || is_threshold(console),
is_scalar_bool(memory) || is_threshold(console),
is.null(appenders) || is.list(appenders) || inherits(appenders, "Appender")
)
l <-
get_logger()$
config(NULL)$
set_threshold(threshold)
if (length(appenders)){
assert(
is.null(file) || !"file" %in% names(appenders),
"If `appenders` contains an appender named `file`, the `file` argument to basic_config() must be `NULL`"
)
assert(
isFALSE(console) || !"console" %in% names(appenders),
"If `appenders` contains an appender named `console`, the `console` argument to basic_config() must be `FALSE`"
)
assert(
isFALSE(memory) || !"memory" %in% names(appenders),
"If `appenders` contains an appender named `memory`, the `memory` argument to basic_config() must be `FALSE`"
)
l$set_appenders(appenders)
}
if (!is.null(file)){
ext <- tools::file_ext(file)
if (identical(tolower(ext), "json")){
stop(
"Please use `.jsonl` and not `.json` as file extension for JSON log",
"files. The reason is that that JSON files created",
"by lgr are not true JSON files but JSONlines files.",
"See https://jsonlines.org/ for more infos."
)
} else if (identical(tolower(ext), "jsonl")){
if (!is.null(fmt) && !identical(fmt, default_fmt))
warning("`fmt` is ignored if `file` is a '.jsonl' file")
l$add_appender(
name = "file",
AppenderJson$new(file = file, threshold = NA)
)
} else {
l$add_appender(
name = "file",
AppenderFile$new(
file = file,
threshold = NA,
layout = LayoutFormat$new(
fmt = fmt,
timestamp_fmt = timestamp_fmt
)
)
)
}
}
if (!isFALSE(console)){
if (isTRUE(console)) console <- 400
l$add_appender(
name = "console",
AppenderConsole$new(
threshold = console,
layout = LayoutFormat$new(
colors = getOption("lgr.colors"),
fmt = console_fmt,
timestamp_fmt = console_timestamp_fmt
)
)
)
}
if (!isFALSE(memory)){
if (isTRUE(memory)) memory <- NA
l$add_appender(name = "memory", AppenderBuffer$new(
threshold = memory,
should_flush = NULL
))
}
lgr
} |
library("reticulate")
library("igraph")
library("leiden")
library("bipartite")
set.seed(9000)
context("running Leiden on a bipartite igraph object")
suppressWarnings(suppressMessages({
bipartite_graph <- structure(list(32, FALSE,
c(18, 19, 20, 21, 22, 23, 25, 26, 18,
19, 20, 22, 23, 24, 25, 19, 20, 21, 22, 23, 24, 25, 26, 18, 20,
21, 22, 23, 24, 25, 20, 21, 22, 24, 20, 22, 23, 25, 22, 23, 24,
25, 23, 25, 26, 22, 24, 25, 26, 24, 25, 26, 29, 25, 26, 27, 29,
25, 26, 27, 29, 30, 31, 24, 25, 26, 27, 29, 30, 31, 23, 24, 26,
27, 28, 29, 30, 31, 24, 25, 27, 28, 29, 30, 31, 25, 26, 27, 29,
26, 28, 26, 28),
c(0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1,
1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 5,
5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 8, 8, 8, 8, 9, 9, 9, 9, 10, 10,
10, 10, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 13,
13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 15, 15,
15, 15, 16, 16, 17, 17),
c(0, 8, 23, 1, 9, 15, 2, 10, 16, 24,
30, 34, 3, 17, 25, 31, 4, 11, 18, 26, 32, 35, 38, 45, 5, 12,
19, 27, 36, 39, 42, 70, 13, 20, 28, 33, 40, 46, 49, 63, 71, 78,
6, 14, 21, 29, 37, 41, 43, 47, 50, 53, 57, 64, 79, 85, 7, 22,
44, 48, 51, 54, 58, 65, 72, 86, 89, 91, 55, 59, 66, 73, 80, 87,
74, 81, 90, 92, 52, 56, 60, 67, 75, 82, 88, 61, 68, 76, 83, 62,
69, 77, 84),
c(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45,
46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77,
78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92),
c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3, 6, 12, 16, 24, 32, 42, 56, 68, 74, 78, 85, 89, 93),
c(0, 8, 15, 23, 30, 34, 38, 42, 45, 49, 53, 57, 63, 70, 78, 85,
89, 91, 93, 93, 93, 93, 93, 93, 93, 93, 93, 93, 93, 93, 93,
93, 93),
list(c(1, 0, 1), structure(list(), .Names = character(0)),
list(type = c(FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE,
TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE),
name = c("EVELYN", "LAURA", "THERESA", "BRENDA",
"CHARLOTTE", "FRANCES", "ELEANOR", "PEARL", "RUTH",
"VERNE", "MYRA", "KATHERINE", "SYLVIA", "NORA", "HELEN",
"DOROTHY", "OLIVIA", "FLORA", "E1", "E2", "E3", "E4",
"E5", "E6", "E7", "E8", "E9", "E10", "E11", "E12",
"E13", "E14")), list())), class = "igraph")
bipartite_graph <- upgrade_graph(bipartite_graph)
}))
bipartite_adj_mat <- igraph::as_adjacency_matrix(bipartite_graph)
modules <- reticulate::py_module_available("leidenalg") && reticulate::py_module_available("igraph")
skip_if_no_python <- function() {
if (!modules)
testthat::skip("leidenalg not available for testing")
}
test_that("run with CPMVertexPartition.Bipartite", {
skip_if_no_python()
partition <- leiden(bipartite_graph,
partition_type = "CPMVertexPartition.Bipartite",
resolution_parameter = 0.1,
seed = 9001)
expect_length(partition, length(V(bipartite_graph)))
expect_equal(sort(unique(partition)), c(1, 2))
expect_equal(partition,
c(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1))
bipartite_graph
})
test_that("run with CPMVertexPartition.Bipartite and degree as node size", {
skip_if_no_python()
partition <- leiden(bipartite_graph,
partition_type = "CPMVertexPartition.Bipartite",
resolution_parameter = 0.01,
degree_as_node_size = TRUE,
seed = 9001)
expect_length(partition, length(V(bipartite_graph)))
expect_equal(sort(unique(partition)), c(1, 2))
expect_equal(partition,
c(1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1,
1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2))
})
test_that("run with ModularityVertexPartition.Bipartite", {
skip_if_no_python()
partition <- leiden(bipartite_graph,
partition_type = "ModularityVertexPartition.Bipartite",
resolution_parameter = 0.01,
seed = 9001)
expect_length(partition, length(V(bipartite_graph)))
expect_equal(sort(unique(partition)), c(1, 2))
expect_equal(partition,
c(1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1,
1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2))
}) |
kalseries <- function(response=NULL, times=NULL, intensity="exponential",
depend="independence", mu=NULL, shape=NULL, density=FALSE, ccov=NULL,
tvcov=NULL, torder=0, interaction=NULL, preg=NULL, ptvc=NULL,
pintercept=NULL, pshape=NULL, pinitial=1, pdepend=NULL,
pfamily=NULL, delta=NULL, transform="identity", link="identity",
envir=parent.frame(), print.level=0, ndigit=10, gradtol=0.00001,
steptol=0.00001, fscale=1, iterlim=100, typsize=abs(p),
stepmax=10*sqrt(p%*%p)){
series <- function(p){
if(rf)b <- mu1(p)
if(sf)v <- sh1(p[nps1:np])
z <- .C("kserie_c",
p=as.double(p),
y=as.double(y),
t=as.double(times),
x=as.double(resp$ccov$ccov),
nind=as.integer(nind),
nobs=as.integer(nobs),
nbs=as.integer(n),
nccov=as.integer(nccov),
npv=as.integer(npv),
model=as.integer(mdl),
link=as.integer(lnk),
density=as.integer(density),
pfamily=as.integer(!is.null(pfamily)),
dep=as.integer(dep),
torder=as.integer(torder),
inter=as.integer(interaction),
tvc=as.integer(tvc),
tvcov=as.double(resp$tvcov$tvcov),
fit=as.integer(0),
pred=double(n),
rpred=double(n),
rf=as.integer(rf),
bbb=as.double(b),
sf=as.integer(sf),
vv=as.double(v),
like=double(1),
PACKAGE="repeated")
z$like}
serief <- function(p){
if(rf)b <- mu1(p)
if(sf)v <- sh1(p[nps1:np])
z <- .C("krand_c",
p=as.double(p),
y=as.double(y),
t=as.double(times),
x=as.double(resp$ccov$ccov),
nind=as.integer(nind),
nobs=as.integer(nobs),
nbs=as.integer(n),
nccov=as.integer(nccov),
npv=as.integer(npv),
model=as.integer(mdl),
link=as.integer(lnk),
density=as.integer(density),
torder=as.integer(torder),
inter=as.integer(interaction),
tvc=as.integer(tvc),
tvcov=as.double(resp$tvcov$tvcov),
fit=as.integer(0),
pred=double(n),
rpred=double(n),
rf=as.integer(rf),
bbb=as.double(b),
sf=as.integer(sf),
vv=as.double(v),
frser=as.integer(frser),
like=double(1),
PACKAGE="repeated")
z$like}
call <- sys.call()
tmp <- c("exponential","Weibull","gamma","gen logistic","normal",
"logistic","Cauchy","Laplace","log normal","log logistic",
"log Cauchy","log Laplace","inverse Gauss")
mdl <- match(intensity <- match.arg(intensity,tmp),tmp)
tmp <- c("independence","Markov","serial","frailty")
dep <- match(depend <- match.arg(depend,tmp),tmp)-1
transform <- match.arg(transform,c("identity","exp","square","sqrt","log"))
tmp <- c("identity","exp","square","sqrt","log")
lnk <- match(link <- match.arg(link,tmp),tmp)
rf <- !is.null(mu)
sf <- !is.null(shape)
type <- "unknown"
respenv <- exists(deparse(substitute(response)),envir=parent.frame())&&
inherits(response,"repeated")&&!inherits(envir,"repeated")
if(respenv){
if(dim(response$response$y)[2]>1)
stop("kalseries only handles univariate responses")
if(!is.null(response$NAs)&&any(response$NAs))
stop("kalseries does not handle data with NAs")
type <- response$response$type}
envname <- if(respenv)deparse(substitute(response))
else if(!is.null(class(envir)))deparse(substitute(envir))
else NULL
if(!respenv&&inherits(envir,"repeated")){
if(!is.null(envir$NAs)&&any(envir$NAs))
stop("kalseries does not handle data with NAs")
cn <- deparse(substitute(response))
if(length(grep("\"",cn))>0)cn <- response
if(length(cn)>1)stop("only one response variable allowed")
response <- envir
col <- match(cn,colnames(response$response$y))
if(is.na(col))stop(paste("response variable",cn,"not found"))
type <- response$response$type[col]
if(dim(response$response$y)[2]>1){
response$response$y <- response$response$y[,col,drop=FALSE]
if(!is.null(response$response$delta)){
response$response$delta <- response$response$delta[,col,drop=FALSE]
if(all(response$response$delta==1)||all(is.na(response$response$delta)))response$response$delta <- NULL}}}
if(respenv||inherits(envir,"repeated")){
if(rf)resp <- rmna(response$response)
else {
resp <- response
if(is.null(ccov))resp$ccov <- NULL
else if(inherits(ccov,"formula")){
if(any(is.na(match(rownames(attr(terms(ccov,data=response),"factors")),colnames(response$ccov$ccov)))))
stop("ccov covariate(s) not found")
tmp <- wr(ccov,data=response,expand=FALSE)$design
resp$ccov$ccov <- tmp[,-1,drop=FALSE]
rm(tmp)}
else stop("ccov must be a W&R formula")
if(is.null(tvcov))resp$tvcov <- NULL
else if(inherits(tvcov,"formula")){
if(any(is.na(match(rownames(attr(terms(tvcov,data=response),"factors")),colnames(response$tvcov$tvcov)))))
stop("tvcov covariate(s) not found")
tmp <- wr(tvcov,data=response)$design
resp$tvcov$tvcov <- tmp[,-1,drop=FALSE]
rm(tmp)}
else stop("tvcov must be a W&R formula")}
nccov <- if(rf||is.null(resp$ccov$ccov)) 0
else dim(resp$ccov$ccov)[2]
ttvc <- if(rf||is.null(resp$tvcov$tvcov)) 0
else dim(resp$tvcov$tvcov)[2]}
else {
if(inherits(response,"response")){
resp <- response
type <- response$type}
else resp <- restovec(response,times,delta=delta)
if(is.null(ccov))nccov <- 0
else {
if(!inherits(ccov,"tccov")){
ccname <- deparse(substitute(ccov))
if((is.matrix(ccov)&&is.null(colnames(ccov)))){
ccname <- deparse(substitute(ccov))
if(dim(ccov)[2]>1){
tmp <- NULL
for(i in 1:dim(ccov)[2])
tmp <- c(tmp,paste(ccname,i,sep=""))
ccname <- tmp}}
ccov <- tcctomat(ccov,names=ccname)}
nccov <- if(rf) 0 else dim(ccov$ccov)[2]}
if(is.null(tvcov))ttvc <- 0
else {
if(!inherits(tvcov,"tvcov")){
tvcname <- deparse(substitute(tvcov))
if(is.list(tvcov)&&dim(tvcov[[1]])[2]>1){
if(is.null(colnames(tvcov[[1]]))){
tvcname <- deparse(substitute(tvcov))
tmp <- NULL
for(i in 1:dim(tvcov[[1]])[2])
tmp <- c(tmp,paste(tvcname,i,sep=""))
tvcname <- tmp}
else tvcname <- colnames(tvcov[[1]])}
tvcov <- tvctomat(tvcov,names=tvcname)}
ttvc <- if(rf) 0 else dim(tvcov$tvcov)[2]}
resp <- rmna(response=resp, tvcov=tvcov, ccov=ccov)
if(!is.null(ccov))rm(ccov)
if(!is.null(tvcov))rm(tvcov)}
n <- dim(resp$response$y)[1]
nobs <- nobs(resp)
nind <- length(nobs)
if((inherits(envir,"repeated")&&(length(nobs)!=length(nobs(envir))||
any(nobs!=nobs(envir))))||(inherits(envir,"tvcov")&&
(length(nobs)!=length(envir$tvcov$nobs)||any(nobs!=envir$tvcov$nobs))))
stop("response and envir objects are incompatible")
if((intensity=="exponential"||intensity=="Weibull"||intensity=="gamma"
||intensity=="log normal"||intensity=="log logistic"
||intensity=="log Cauchy"||intensity=="log Laplace"
||intensity=="inverse Gauss")&&type!="unknown"&&type!="duration"
&&type!="continuous")stop("duration data required")
if((intensity=="gen logistic"||intensity=="normal"||intensity=="logistic"
||intensity=="Cauchy"||intensity=="Laplace")&&type!="unknown"
&&type!="continuous"&&type!="duration")stop("continuous data required")
if(!is.null(resp$response$censor))stop("kalseries does not handle censoring")
mu3 <- sh3 <- NULL
if(respenv||inherits(envir,"repeated")||inherits(envir,"tccov")||inherits(envir,"tvcov")){
if(is.null(envname))envname <- deparse(substitute(envir))
if(inherits(mu,"formula")){
mu3 <- if(respenv)finterp(mu,.envir=response,.name=envname)
else finterp(mu,.envir=envir,.name=envname)}
else if(is.function(mu)){
if(is.null(attr(mu,"model"))){
tmp <- parse(text=deparse(mu)[-1])
mu <- if(respenv)fnenvir(mu,.envir=response,.name=envname)
else fnenvir(mu,.envir=envir,.name=envname)
mu3 <- mu
attr(mu3,"model") <- tmp}
else mu3 <- mu}
if(inherits(shape,"formula")){
sh3 <- if(respenv)finterp(shape,.envir=response,.name=envname)
else finterp(shape,.envir=envir,.name=envname)}
else if(is.function(shape)){
if(is.null(attr(shape,"model"))){
tmp <- parse(text=deparse(shape)[-1])
shape <- if(respenv)fnenvir(shape,.envir=response,.name=envname)
else fnenvir(shape,.envir=envir,.name=envname)
sh3 <- shape
attr(sh3,"model") <- tmp}
else sh3 <- shape}}
npreg <- length(preg)
mu1 <- sh1 <- v <- b <- NULL
if(inherits(mu,"formula")){
pr <- if(npreg>0)preg else ptvc
npr <- length(pr)
mu2 <- if(respenv)
finterp(mu,.envir=response,.name=envname,.expand=is.null(preg))
else finterp(mu,.envir=envir,.name=envname,.expand=is.null(preg))
npt1 <- length(attr(mu2,"parameters"))
if(is.character(attr(mu2,"model"))){
if(length(attr(mu2,"model"))==1){
mu1 <- function(p) p[1]*rep(1,n)
attributes(mu1) <- attributes(mu2)
mu2 <- NULL}}
else {
if(npr!=npt1){
cat("\nParameters are ")
cat(attr(mu2,"parameters"),"\n")
stop(paste("preg or ptvc should have",npt1,"estimates"))}
if(is.list(pr)){
if(!is.null(names(pr))){
o <- match(attr(mu2,"parameters"),names(pr))
pr <- unlist(pr)[o]
if(sum(!is.na(o))!=length(pr))
stop("invalid estimates for mu - probably wrong names")}
else pr <- unlist(pr)
if(npreg>0)preg <- pr else ptvc <- pr}}
if(!is.null(mu2)){
if(inherits(envir,"tccov")){
cv <- covind(response)
mu1 <- function(p) mu2(p)[cv]
attributes(mu1) <- attributes(mu2)}
else {
mu1 <- mu2
rm(mu2)}}}
else if(is.function(mu))mu1 <- mu
if(!is.null(mu1)&&is.null(attr(mu1,"parameters"))){
attributes(mu1) <- if(is.function(mu)){
if(!inherits(mu,"formulafn")){
if(respenv)attributes(fnenvir(mu,.envir=response))
else attributes(fnenvir(mu,.envir=envir))}
else attributes(mu)}
else {
if(respenv)attributes(fnenvir(mu1,.envir=response))
else attributes(fnenvir(mu1,.envir=envir))}}
nlp <- if(is.function(mu1))length(attr(mu1,"parameters"))
else if(is.null(mu1))NULL
else npt1
if(!is.null(nlp)){
if(is.null(ptvc)&&nlp!=npreg)
stop(paste("preg should have",nlp,"initial estimates"))
else if(!is.null(ptvc)&&length(ptvc)!=nlp)
stop(paste("ptvc should have",nlp,"initial estimates"))}
nps <- length(pshape)
if(inherits(shape,"formula")){
sh2 <- if(respenv)finterp(shape,.envir=response,.name=envname)
else finterp(shape,.envir=envir,.name=envname)
npt2 <- length(attr(sh2,"parameters"))
common <- FALSE
npl1 <- if(common&&!inherits(shape,"formula")) 1 else nlp+1
if(is.character(attr(sh2,"model"))){
if(length(attr(sh2,"model"))==1){
sh1 <- function(p) p[npl1]*rep(1,n)
attributes(sh1) <- attributes(sh2)
sh2 <- NULL}}
else {
if(nps!=npt2){
cat("\nParameters are ")
cat(attr(sh2,"parameters"),"\n")
stop(paste("pshape should have",npt2,"estimates"))}
if(is.list(pshape)){
if(!is.null(names(pshape))){
o <- match(attr(sh2,"parameters"),names(pshape))
pshape <- unlist(pshape)[o]
if(sum(!is.na(o))!=length(pshape))
stop("invalid estimates for shape - probably wrong names")}
else pshape <- unlist(pshape)}}
if(!is.null(sh2)){
if(inherits(envir,"tccov")){
cv <- covind(response)
sh1 <- function(p) sh2(p)[cv]
attributes(sh1) <- attributes(sh2)}
else {
sh1 <- sh2
rm(sh2)}}}
else if(is.function(shape))sh1 <- shape
if(!is.null(sh1)&&is.null(attr(sh1,"parameters")))
attributes(sh1) <- if(is.function(shape)){
if(!inherits(shape,"formulafn")){
if(respenv)attributes(fnenvir(shape,.envir=response))
else attributes(fnenvir(shape,.envir=envir))}
else attributes(shape)}
else {
if(respenv)attributes(fnenvir(sh1,.envir=response))
attributes(fnenvir(sh1,.envir=envir))}
nlp <- if(is.function(shape))length(attr(sh1,"parameters"))
else if(is.null(shape))NULL
else npt2
if(!is.null(nlp)&&nlp!=nps)
stop(paste("pshape should have",nlp,"initial estimates"))
if(rf&&!is.function(mu1))stop("mu must be a formula or function")
if(sf&&!is.function(sh1))stop("shape must be a formula or function")
tvc <- length(ptvc)
if(!rf&&(ttvc>0&&tvc!=ttvc||ttvc==0&&tvc>0))
stop(paste(ttvc,"initial estimates of coefficients for time-varying covariates must be supplied"))
if(intensity=="exponential"){
sf <- FALSE
pshape <- NULL}
else {
if(is.null(pshape))
stop("Initial value of the shape parameter must be supplied")
if(!sf){
if(pshape<=0)stop("Shape must be positive")
else pshape <- log(pshape)}}
if(intensity=="gen logistic"){
if(is.null(pintercept))
stop("Initial value of the intercept parameter must be supplied")}
else pintercept <- NULL
if(pinitial<=0)stop("Estimate of initial parameter must greater than 0")
else pinitial <- log(pinitial)
frser <- FALSE
if(depend=="independence")pdepend <- NULL
else if(depend=="frailty"){
frser <- !is.null(pdepend)
if(frser){
if(pdepend<=0)stop("Dependence parameter must be positive")
pdepend <- log(pdepend)}}
else {
if(is.null(pdepend))
stop("An estimate of the dependence parameter must be supplied")
else if(pdepend<=0||pdepend>=1)
stop("Dependence parameter must be between 0 and 1")
else pdepend <- log(pdepend/(1-pdepend))}
if(is.null(resp$response$times)){
if(depend=="serial"||depend=="Markov")
stop("No times. Serial and Markov dependence cannot be fitted.")
ave <- times <- 0}
else {
if(torder>length(unique(resp$response$times)))
stop("torder is too large for the number of distinct times")
ave <- mean(resp$response$times)
times <- resp$response$times-ave}
if(!is.null(interaction)){
if(length(interaction)!=nccov)
stop(paste(nccov,"interactions with time must be specified"))
else if(any(interaction>torder))
stop(paste("Interactions can be at most of order ",torder))}
else interaction <- rep(0,nccov)
if(!is.null(pfamily)&&depend=="frailty")
stop("pfamily cannot be used with frailty dependence")
npv <- torder+sum(interaction)
if(rf&&npreg>0)nccov <- npreg-1
if(!rf&&1+nccov+npv!=npreg)
stop(paste(1+nccov+npv,"regression estimates must be supplied"))
y <- resp$response$y
if(!is.null(resp$response$delta))jacob <- if(length(resp$response$delta)==1)
-n*log(resp$response$delta)
else -sum(log(resp$response$delta))
else jacob <- 0
if((mdl<=3||mdl>=8)&&any(y<=0))stop("All responses must be positive")
if(transform=="exp"){
jacob <- jacob-sum(y)
y <- exp(y)}
else if(transform!="identity"){
if(any(y==0))stop("Zero response values: invalid transformation")
if(transform=="square"){
jacob <- jacob-sum(log(abs(y)))-n*log(2)
y <- y^2}
if(any(y<0))stop("Nonpositive response values: invalid transformation")
if(transform=="sqrt"){
jacob <- jacob+sum(log(y))/2+n*log(2)
y <- sqrt(y)}
else if(transform=="log"){
jacob <- jacob+sum(log(y))
y <- log(y)}}
if(rf){
if(tvc>0&&nccov>0)
stop("With a mean function or formula, initial estimates must be supplied either in preg or in ptvc")
if(tvc>0){
if(length(mu1(ptvc))!=n)
stop("The mu function or formula must provide an estimate for each observation")
tvc <- tvc-1}
else {
lp <- length(mu1(preg))
if(lp==1){
if(nccov==0)mu1 <- function(p) rep(p[1],nind)
else stop("Number of estimates does not correspond to mu function")}
else if(lp!=nind)
stop("The mu function or formula must provide an estimate for each individual")}}
if(sf&&length(sh1(pshape))!=n)
stop("The shape function must provide an estimate for each observation")
np <- 1+nccov+npv+tvc+1+(depend=="serial"||depend=="Markov")+
nps+(!is.null(pintercept))+frser+(!is.null(pfamily))
nps1 <- np-nps+1
p <- c(preg,ptvc,pinitial,pdepend,pfamily,pintercept,pshape)
if(dep==3)serie <- serief
else serie <- series
if(fscale==1)fscale <- serie(p)
if(is.na(serie(p)))
stop("Likelihood returns NAs: probably invalid initial values")
z0 <- nlm(serie, p=p, hessian=TRUE, print.level=print.level,
typsize=typsize, ndigit=ndigit, gradtol=gradtol, stepmax=stepmax,
steptol=steptol, iterlim=iterlim, fscale=fscale)
p <- z0$estimate
like <- z0$minimum+jacob
a <- if(any(is.na(z0$hessian))||any(abs(z0$hessian)==Inf))0
else qr(z0$hessian)$rank
if(a==np)cov <- solve(z0$hessian)
else cov <- matrix(NA,ncol=np,nrow=np)
se <- sqrt(diag(cov))
corr <- cov/(se%o%se)
dimnames(corr) <- list(1:np,1:np)
if(mdl==4)z <- list()
else {
z <- if(depend=="frailty"){
if(rf)b <- mu1(p)
if(sf)v <- sh1(p[nps1:np])
.C("krand_c",
p=as.double(p),
y=as.double(y),
t=as.double(times),
x=as.double(resp$ccov$ccov),
nind=as.integer(nind),
nobs=as.integer(nobs),
nbs=as.integer(n),
nccov=as.integer(nccov),
npv=as.integer(npv),
model=as.integer(mdl),
link=as.integer(lnk),
density=as.integer(density),
torder=as.integer(torder),
inter=as.integer(interaction),
tvc=as.integer(tvc),
tvcov=as.double(resp$tvcov$tvcov),
fit=as.integer(1),
pred=double(n),
rpred=double(n),
rf=as.integer(rf),
bbb=as.double(b),
sf=as.integer(sf),
vv=as.double(v),
frser=as.integer(frser),
like=double(1),
PACKAGE="repeated")}
else {
if(rf)b <- mu1(p)
if(sf)v <- sh1(p[nps1:np])
z <- .C("kserie_c",
p=as.double(p),
y=as.double(y),
t=as.double(times),
x=as.double(resp$ccov$ccov),
nind=as.integer(nind),
nobs=as.integer(nobs),
nbs=as.integer(n),
nccov=as.integer(nccov),
npv=as.integer(npv),
model=as.integer(mdl),
link=as.integer(lnk),
density=as.integer(density),
pfamily=as.integer(!is.null(pfamily)),
dep=as.integer(dep),
torder=as.integer(torder),
inter=as.integer(interaction),
tvc=as.integer(tvc),
tvcov=as.double(resp$tvcov$tvcov),
fit=as.integer(1),
pred=double(n),
rpred=double(n),
rf=as.integer(rf),
bbb=as.double(b),
sf=as.integer(sf),
vv=as.double(v),
like=double(1),
PACKAGE="repeated")}}
if(rf&&tvc>0){
nccov <- tvc
tvc <- 0}
if(!is.null(mu3))mu1 <- mu3
if(!is.null(sh3))sh1 <- sh3
z <- list(
call=call,
intensity=intensity,
pfamily=!is.null(pfamily),
mdl=mdl,
frser=frser,
mu=mu1,
npr=1+nccov+tvc+torder+sum(interaction),
shape=sh1,
nps=nps,
density=density,
depend=depend,
torder=torder,
interaction=interaction,
response=resp$response,
pred=z$pred,
rpred=z$rpred,
transform=transform,
ccov=resp$ccov,
tvcov=resp$tvcov,
link=link,
maxlike=like,
aic=like+np,
df=n-np,
npt=np,
npv=npv,
coefficients=p,
se=se,
cov=cov,
corr=corr,
grad=z0$gradient,
iterations=z0$iterations,
code=z0$code)
class(z) <- if(mdl==4)"kalseries" else c("kalseries","recursive")
return(z)}
deviance.kalseries <- function(object, ...) 2*object$maxlike
fitted.kalseries <- function(object, recursive=TRUE, ...)
if(recursive) object$rpred else object$pred
residuals.kalseries <- function(object, recursive=TRUE, ...){
if(object$transform=="exp")object$response$y <- exp(object$response$y)
else if(object$transform=="square")object$response$y <- object$response$y^2
else if(object$transform=="sqrt")object$response$y <- sqrt(object$response$y)
else if(object$transform=="log")object$response$y <- log(object$response$y)
res <- if(recursive)object$response$y-object$rpred else object$response$y-object$pred
class(res) <- "residuals"
res}
print.kalseries <- function(x,digits=max(3,.Options$digits-3),correlation=TRUE, ...){
z<-x
if(!is.null(z$ccov))nccov <- dim(z$ccov$ccov)[2]
else nccov <- 0
expm <- z$intensity!="exponential"&&!is.function(z$shape)
glm <- z$intensity=="gen logistic"
npt <- if(is.function(z$shape)) z$npt-z$nps else z$npt
deppar <- (z$depend=="serial"||z$depend=="Markov")
cat("\nCall:",deparse(z$call),sep="\n")
cat("\n")
if(z$code>2)cat("Warning: no convergence - error",z$code,"\n\n")
cat("Number of subjects ",length(nobs(z)),"\n")
cat("Number of observations",length(z$response$y),"\n")
if(!is.null(z$response$time))
cat("Times centred at ",mean(z$response$time),"\n")
cat("Transformation ",z$trans,"\n")
cat("Link function ",z$link,"\n\n")
if(z$density)cat(z$intensity," density",sep="")
else cat(z$intensity," intensity",sep="")
if(z$depend=="independence")cat(" with independence\n")
else cat(" with",z$depend,"dependence",if(z$frser)"and AR","\n")
cat("\n-Log likelihood ",z$maxlike,"\n")
cat("Degrees of freedom",z$df,"\n")
cat("AIC ",z$aic,"\n")
cat("Iterations ",z$iterations,"\n\n")
cat("Location parameters\n")
if(!is.null(attr(z$mu,"formula")))
cat(deparse(attr(z$mu,"formula")),sep="\n")
else if(!is.null(attr(z$mu,"model"))){
t <- deparse(attr(z$mu,"model"))
t[1] <- sub("expression\\(","",t[1])
t[length(t)] <- sub("\\)$","",t[length(t)])
cat(t,sep="\n")}
coef.table <- cbind(z$coef[1:z$npr],z$se[1:z$npr])
if(inherits(z$mu,"formulafn"))
cname <- if(is.character(attr(z$mu,"model")))attr(z$mu,"model")
else attr(z$mu,"parameters")
else {
cname <- "(Intercept)"
if(nccov)cname <- c(cname,colnames(z$ccov$ccov))
if(z$torder){
cname <- c(cname,paste("t^",1:z$torder,sep=""))
if(length(z$interaction)>0){
for(i in 1:nccov)if(z$interaction[i]>0){
cname <- c(cname,paste(colnames(z$ccov$ccov)[i],".t^",1:z$interaction[i],sep=""))}}}
if(!is.null(z$tvcov))cname <- c(cname,colnames(z$tvcov$tvcov))}
dimnames(coef.table) <- list(cname, c("estimate","se"))
print.default(coef.table, digits=digits, print.gap=2)
if(is.function(z$shape))cat("\nDependence parameters\n")
else cat("\nNonlinear parameters\n")
coef <- exp(z$coef[(npt-deppar-z$frser-z$pfamily-glm-expm):npt])
cname <- "initial"
if(deppar){
cname <- c(cname,"depend")
coef[2] <- coef[2]/(1+coef[2])}
if(z$frser)cname <- c(cname,"AR")
if(z$pfamily){
cname <- c(cname,"family")
coef[length(coef)-glm-expm] <- z$coef[npt-glm-expm]}
if(glm){
cname <- c(cname,"intercept")
coef[length(coef)-expm] <- z$coef[npt-expm]
if(expm){
cname <- c(cname,"asymptote")
coef[length(coef)] <- 1/coef[length(coef)]}}
else if(expm)cname <- c(cname,"shape")
coef.table <- cbind(z$coef[(npt-deppar-z$frser-z$pfamily-glm-expm):npt],
z$se[(npt-deppar-z$frser-z$pfamily-glm-expm):npt],coef)
dimnames(coef.table) <- list(cname, c("estimate","se","parameter"))
print.default(coef.table, digits=digits, print.gap=2)
if(z$depend=="frailty"){
tmp <- trigamma(exp(-z$coef[npt-deppar-expm-z$frser]))
cat("Correlation =",tmp/(tmp+trigamma(1)),"\n")}
if(inherits(z$shape,"formulafn")){
cat("\nShape parameters\n")
if(!is.null(attr(z$shape,"formula")))
cat(deparse(attr(z$shape,"formula")),sep="\n")
else if(!is.null(attr(z$shape,"model"))){
t <- deparse(attr(z$shape,"model"))
t[1] <- sub("expression\\(","",t[1])
t[length(t)] <- sub("\\)$","",t[length(t)])
cat(t,sep="\n")}
cname <- if(is.character(attr(z$shape,"model")))
attr(z$shape,"model")
else attr(z$shape,"parameters")
coef.table <- cbind(z$coef[(z$npt-z$nps+1):z$npt],
z$se[(z$npt-z$nps+1):z$npt])
dimnames(coef.table) <- list(cname, c("estimate","se"))
print.default(coef.table, digits=digits, print.gap=2)}
if(correlation){
cat("\nCorrelation matrix\n")
print.default(z$corr, digits=digits)}} |
pred.jplm.cumhaz <- function(object, at=NULL, CI=FALSE){
digits = max(4, getOption("digits") - 1)
testpt=ifelse(matrix(object$wt,length(at),length(object$wt),byrow=TRUE) <= matrix(at,length(at),length(object$wt)), 1, 0)
coef = c(testpt %*% object$lambdas)
coefTab = cbind(At=at, Value=coef)
out <- as.data.frame(round(coefTab, digits))
if (CI==TRUE) {
where= length(object$coef.fixed.surv)+length(object$coef.frailty.surv) + (1:length(object$wt))
cov.lambdas = object$covt[where, where]
se=sqrt(diag((as.matrix(testpt%*%cov.lambdas%*%t(testpt)))))
lower = coef - 1.96*se
upper = coef + 1.96*se
coefTab = cbind(coefTab, Std.Err=se, Lower=lower,Upper=upper, deparse.level = 0)
out <- as.data.frame(coefTab)
ind <- out$Lower < 0
if(sum(ind)>0) out$Lower[ind] <- sprintf("<0")
ind <- out$Upper < 0
if(sum(ind)>0) out$Upper[ind] <- sprintf("<0")
}
cat("\n<< Event Process >>\n")
transf.par=object$K
if (transf.par==0) cat("Transformation function: H(x) = x\n") else if (transf.par>0)
cat(sprintf("\nTransformation function: H(x) = log(1+%s)/%s\n",transf.par,transf.par))
cat("\nBaseline Cumulative Hazard:\n")
print(out)
} |
null.model <- function(predictors, xy = NULL, type = c(1, 2), algorithm = c("brt", "maxent"), nb,unique.data = T, same = T, background.nb = nb, nb.rep = 10, tc = 2, lr = 0.001, bf = 0.75,
n.trees = 50, step.size = n.trees) {
if (unique.data == T) {
xy <- unique(xy)
}
eval <- data.frame(AUC.value = NA, MaxSSS = NA, preferential.area = NA, omission.rate = NA, nb.omission = NA,
SD.value = NA)
stack.pred <- raster::subset(predictors, 1)
raster::values(stack.pred) <- NA
p.vect0 <- raster::reclassify(raster::subset(predictors, 1), base::cbind(NA, -99999))
tableau.cor <- data.frame(raster::rasterToPoints(p.vect0)[, 3])
tableau.cor <- base::replace(tableau.cor, values = NA)
colnames(tableau.cor) <- "initial"
if (algorithm == "maxent")
{
if (type == 1)
{
for (i in 1:nb.rep) {
samples <- xy[sample(nrow(xy), nb), ]
SDMtab.object <- SDMtab(xydata = samples, predictors = predictors, unique.data = unique.data, same = same,background.nb=background.nb)
model <- compute.maxent(x = SDMtab.object, proj.predictors = predictors)
eval <- base::rbind(eval, SDMeval(model))
stack.pred <- raster::stack(stack.pred, model$raster.prediction)
prediction.wNA <- raster::reclassify(model$raster.prediction, base::cbind(NA, -99999))
p.vect <- data.frame(raster::rasterToPoints(prediction.wNA)[, 3])
p.vect <- replace(p.vect, p.vect == -99999, NA)
tableau.cor <- base::cbind(tableau.cor, p.vect)
}
}
if (type == 2)
{
for (boot in 1:nb.rep) {
vect.r <- raster::reclassify(raster::subset(predictors, 1), base::cbind(NA, -99999))
vect <- data.frame(raster::rasterToPoints(vect.r)[, c(1, 2)])
vect <- base::replace(vect, vect[, 1] == -99999, NA)
vect <- base::replace(vect, vect[, 2] == -99999, NA)
samples <- vect[sample(nrow(vect), nb), ]
SDMtab.object <- SDMtab(xydata = samples, predictors = predictors, unique.data = unique.data,
same = same, background.nb = background.nb)
model <- compute.maxent(x = SDMtab.object, proj.predictors = predictors)
eval <- base::rbind(eval, SDMeval(model))
stack.pred <- raster::stack(stack.pred, model$raster.prediction)
prediction.wNA <- raster::reclassify(model$raster.prediction, base::cbind(NA, -99999))
p.vect <- data.frame(raster::rasterToPoints(prediction.wNA)[, 3])
p.vect <- replace(p.vect, p.vect == -99999, NA)
tableau.cor <- base::cbind(tableau.cor, p.vect)
}
}
}
if (algorithm == "brt")
{
if (type == 1)
{
for (i in 1:nb.rep) {
samples <- xy[sample(nrow(xy), nb), ]
SDMtab.object <- SDMtab(xydata = samples, predictors = predictors, unique.data = unique.data,
same = same, background.nb = background.nb)
model <- compute.brt(x = SDMtab.object, proj.predictors = predictors,
tc=tc,lr=lr,bf=bf,n.trees=n.trees,step.size=step.size)
eval <- base::rbind(eval, SDMeval(model))
stack.pred <- raster::stack(stack.pred, model$raster.prediction)
prediction.wNA <- raster::reclassify(model$raster.prediction, base::cbind(NA, -99999))
p.vect <- data.frame(raster::rasterToPoints(prediction.wNA)[, 3])
p.vect <- replace(p.vect, p.vect == -99999, NA)
tableau.cor <- base::cbind(tableau.cor, p.vect)
}
}
if (type == 2)
{
for (boot in 1:nb.rep) {
vect.r <- raster::reclassify(raster::subset(predictors, 1), base::cbind(NA, -99999))
vect <- data.frame(raster::rasterToPoints(vect.r)[, c(1, 2)])
vect <- base::replace(vect, vect[, 1] == -99999, NA)
vect <- base::replace(vect, vect[, 2] == -99999, NA)
samples <- vect[sample(nrow(vect), nb), ]
SDMtab.object <- SDMtab(xydata = samples, predictors = predictors, unique.data = unique.data,
same = same, background.nb = background.nb)
model <- compute.brt(x = SDMtab.object, proj.predictors = predictors,
tc=tc,lr=lr,bf=bf,n.trees=n.trees,step.size=step.size)
eval <- base::rbind(eval, SDMeval(model))
stack.pred <- raster::stack(stack.pred, model$raster.prediction)
prediction.wNA <- raster::reclassify(model$raster.prediction, base::cbind(NA, -99999))
p.vect <- data.frame(raster::rasterToPoints(prediction.wNA)[, 3])
p.vect <- replace(p.vect, p.vect == -99999, NA)
tableau.cor <- base::cbind(tableau.cor, p.vect)
}
}
}
arguments <- list(xy = xy, predictors = predictors, type = type, algorithm = algorithm, nb = nb,
same = same, background.nb = background.nb, nb.rep = nb.rep)
eval <- eval[-1, ]
rownames(eval) <- seq(from = 1, to = nb.rep, by = 1)
eval.mean <- apply(eval, FUN = mean, MARGIN = 2, na.rm = T)
pred.stack <- raster::dropLayer(stack.pred, i = 1)
pred.mean <- raster::calc(pred.stack, fun = mean, na.rm = T)
tableau.cor <- tableau.cor[, -1]
colnames(tableau.cor) <- seq(from = 1, to = nb.rep, by = 1)
matrice.cor <- stats::cor(tableau.cor, metho = "spearman", use = "pairwise.complete.obs")
lt <- lower.tri(matrice.cor)
data.cor <- matrice.cor[lt]
return(list(input = arguments, eval = eval, eval.null = eval.mean, pred.stack = pred.stack, pred.mean = pred.mean,
correlation = data.cor))
} |
q.mle.norm.estimate <- function(complete.lifespans, censored.lifespans)
{
mle(function(mean, sd, Q)
{
qsurvival.nllik("norm", complete.lifespans, censored.lifespans, Q, mean, sd)
},
method='L-BFGS-B',
lower=list(mean=min(complete.lifespans), sd=0.01, Q=0.000001),
upper=list(mean=max(complete.lifespans),
sd=(max(complete.lifespans) - min(complete.lifespans)),
Q=0.999999),
start=list(mean=mean(complete.lifespans),
sd=sd(complete.lifespans),
Q = 0.5))
} |
leapYear <- function( year )
{
return ( year %% 400 == 0 | ( year %% 4 == 0 & year %% 100 != 0 ) )
} |
s_hs <- function(..., char = NULL, fields = c("alias", "concept", "title"),
apropos, keyword, whatis, ignore.case = TRUE,
package = NULL, agrep = NULL, use_UTF8 = FALSE) {
pattern <- if (is.null(char)) cnscinfun() else char
if (is.list(pattern)) stop("... (or char) cannot be a list.")
utils::help.search(pattern, fields,
apropos, keyword, whatis, ignore.case = ignore.case,
package = package, agrep = agrep, use_UTF8 = use_UTF8)
} |
CDORemap <- function(data_array = NULL, lons, lats, grid, method,
avoid_writes = TRUE, crop = TRUE,
force_remap = FALSE, write_dir = tempdir()) {
.isRegularVector <- function(x, tol = 0.1) {
if (length(x) < 2) {
TRUE
} else {
spaces <- x[2:length(x)] - x[1:(length(x) - 1)]
(sum(abs(spaces - mean(spaces)) > mean(spaces) / (1 / tol)) < 2)
}
}
known_lon_names <- .KnownLonNames()
known_lat_names <- .KnownLatNames()
if (!is.numeric(lons) || !is.numeric(lats)) {
stop("Expected numeric 'lons' and 'lats'.")
}
if (any(is.na(lons > 0))) {
stop("Found invalid values in 'lons'.")
}
if (any(is.na(lats > 0))) {
stop("Found invalid values in 'lats'.")
}
if (is.null(dim(lons))) {
dim(lons) <- length(lons)
}
if (is.null(dim(lats))) {
dim(lats) <- length(lats)
}
if (length(dim(lons)) > 2 || length(dim(lats)) > 2) {
stop("'lons' and 'lats' can only have up to 2 dimensions.")
}
if (length(dim(lons)) != length(dim(lats))) {
stop("'lons' and 'lats' must have the same number of dimensions.")
}
if (length(dim(lons)) == 2 && !all(dim(lons) == dim(lats))) {
stop("'lons' and 'lats' must have the same dimension sizes.")
}
return_array <- TRUE
if (is.null(data_array)) {
return_array <- FALSE
if (length(dim(lons)) == 1) {
array_dims <- c(length(lats), length(lons))
new_lon_dim_name <- 'lon'
new_lat_dim_name <- 'lat'
} else {
array_dims <- dim(lons)
new_lon_dim_name <- 'i'
new_lat_dim_name <- 'j'
}
if (!is.null(names(dim(lons)))) {
if (any(known_lon_names %in% names(dim(lons)))) {
new_lon_dim_name <- known_lon_names[which(known_lon_names %in% names(dim(lons)))[1]]
}
}
if (!is.null(names(dim(lats)))) {
if (any(known_lat_names %in% names(dim(lats)))) {
new_lat_dim_name <- known_lat_names[which(known_lat_names %in% names(dim(lats)))[1]]
}
}
names(array_dims) <- c(new_lat_dim_name, new_lon_dim_name)
data_array <- array(as.numeric(NA), array_dims)
}
if (!(is.logical(data_array) || is.numeric(data_array)) || !is.array(data_array)) {
stop("Parameter 'data_array' must be a numeric array.")
}
if (is.null(names(dim(data_array)))) {
stop("Parameter 'data_array' must have named dimensions.")
}
lon_dim <- which(known_lon_names %in% names(dim(data_array)))
if (length(lon_dim) < 1) {
stop("Could not find a known longitude dimension name in the provided 'data_array'.")
}
if (length(lon_dim) > 1) {
stop("Found more than one known longitude dimension names in the provided 'data_array'.")
}
lon_dim <- known_lon_names[lon_dim]
lat_dim <- which(known_lat_names %in% names(dim(data_array)))
if (length(lat_dim) < 1) {
stop("Could not find a known latitude dimension name in the provided 'data_array'.")
}
if (length(lat_dim) > 1) {
stop("Found more than one known latitude dimension name in the provided 'data_array'.")
}
lat_dim <- known_lat_names[lat_dim]
if (is.null(names(dim(lons)))) {
if (length(dim(lons)) == 1) {
names(dim(lons)) <- lon_dim
} else {
stop("Parameter 'lons' must be provided with dimension names.")
}
} else {
if (!(lon_dim %in% names(dim(lons)))) {
stop("Parameter 'lon' must have the same longitude dimension name as the 'data_array'.")
}
if (length(dim(lons)) > 1 && !(lat_dim %in% names(dim(lons)))) {
stop("Parameter 'lon' must have the same latitude dimension name as the 'data_array'.")
}
}
if (is.null(names(dim(lats)))) {
if (length(dim(lats)) == 1) {
names(dim(lats)) <- lat_dim
} else {
stop("Parameter 'lats' must be provided with dimension names.")
}
} else {
if (!(lat_dim %in% names(dim(lats)))) {
stop("Parameter 'lat' must have the same latitude dimension name as the 'data_array'.")
}
if (length(dim(lats)) > 1 && !(lon_dim %in% names(dim(lats)))) {
stop("Parameter 'lat' must have the same longitude dimension name as the 'data_array'.")
}
}
lons_attr_bk <- attributes(lons)
if (is.null(lons_attr_bk)) {
lons_attr_bk <- list()
}
lats_attr_bk <- attributes(lats)
if (is.null(lats_attr_bk)) {
lats_attr_bk <- list()
}
if (length(attr(lons, 'variables')) == 0) {
new_metadata <- list(list())
if (length(dim(lons)) == 1) {
names(new_metadata) <- lon_dim
} else {
names(new_metadata) <- paste0(lon_dim, '_var')
}
attr(lons, 'variables') <- new_metadata
}
if (!('units' %in% names(attr(lons, 'variables')[[1]]))) {
new_metadata <- attr(lons, 'variables')
new_metadata[[1]][['units']] <- 'degrees_east'
attr(lons, 'variables') <- new_metadata
}
if (length(attr(lats, 'variables')) == 0) {
new_metadata <- list(list())
if (length(dim(lats)) == 1) {
names(new_metadata) <- lat_dim
} else {
names(new_metadata) <- paste0(lat_dim, '_var')
}
attr(lats, 'variables') <- new_metadata
}
if (!('units' %in% names(attr(lats, 'variables')[[1]]))) {
new_metadata <- attr(lats, 'variables')
new_metadata[[1]][['units']] <- 'degrees_north'
attr(lats, 'variables') <- new_metadata
}
if (!is.character(grid)) {
stop("Parameter 'grid' must be a character string specifying a ",
"target CDO grid, 'rXxY' or 'tRESgrid', or a path to another ",
"NetCDF file.")
}
if (grepl('^r[0-9]{1,}x[0-9]{1,}$', grid)) {
grid_type <- 'regular'
grid_lons <- as.numeric(strsplit(strsplit(grid, 'x')[[1]][1], 'r')[[1]][2])
grid_lats <- as.numeric(strsplit(grid, 'x')[[1]][2])
} else if (grepl('^t[0-9]{1,}grid$', grid)) {
grid_type <- 'gaussian'
grid_t <- as.numeric(strsplit(strsplit(grid, 'grid')[[1]][1], 't')[[1]][2])
grid_size <- .t2nlatlon(grid_t)
grid_lons <- grid_size[2]
grid_lats <- grid_size[1]
} else {
grid_type <- 'custom'
}
if (method %in% c('bil', 'bilinear')) {
method <- 'bil'
} else if (method %in% c('bic', 'bicubic')) {
method <- 'bic'
} else if (method %in% c('con', 'conservative')) {
method <- 'con'
} else if (method %in% c('dis', 'distance-weighted')) {
method <- 'dis'
} else {
stop("Unsupported CDO remap method. 'bilinear', 'bicubic', 'conservative' or 'distance-weighted' supported only.")
}
if (!is.logical(avoid_writes)) {
stop("Parameter 'avoid_writes' must be a logical value.")
}
crop_tight <- FALSE
if (is.character(crop)) {
if (crop == 'tight') {
crop_tight <- TRUE
} else if (crop != 'preserve') {
stop("Parameter 'crop' can only take the values 'tight' or 'preserve' if specified as a character string.")
}
crop <- TRUE
}
if (is.logical(crop)) {
if (crop) {
warning("Parameter 'crop' = 'TRUE'. The output grid range will follow the input lons and lats.")
if (length(lons) == 1 || length(lats) == 1) {
stop("CDORemap cannot remap if crop = TRUE and values for only one ",
"longitude or one latitude are provided. Either a) provide ",
"values for more than one longitude/latitude, b) explicitly ",
"specify the crop limits in the parameter crop, or c) set ",
"crop = FALSE.")
}
if (crop_tight) {
lon_extremes <- c(min(lons), max(lons))
lat_extremes <- c(min(lats), max(lats))
} else {
if (length(dim(lons)) == 1) {
tmp_lon <- lons
} else {
min_pos <- which(lons == min(lons), arr.ind = TRUE)[1, ]
tmp_lon <- Subset(lons, lat_dim, min_pos[which(names(dim(lons)) == lat_dim)], drop = 'selected')
}
i <- 1:length(tmp_lon)
degree <- min(3, length(i) - 1)
lon_model <- lm(tmp_lon ~ poly(i, degree))
lon_extremes <- c(NA, NA)
left_is_min <- FALSE
right_is_max <- FALSE
if (which.min(tmp_lon) == 1) {
left_is_min <- TRUE
prev_lon <- predict(lon_model, data.frame(i = 0))
first_lon_cell_width <- (tmp_lon[1] - prev_lon)
lon_extremes[1] <- tmp_lon[1] - first_lon_cell_width / 2
} else {
lon_extremes[1] <- min(tmp_lon)
}
if (which.max(tmp_lon) == length(tmp_lon)) {
right_is_max <- TRUE
next_lon <- predict(lon_model, data.frame(i = length(tmp_lon) + 1))
last_lon_cell_width <- (next_lon - tmp_lon[length(tmp_lon)])
lon_extremes[2] <- tmp_lon[length(tmp_lon)] + last_lon_cell_width / 2
} else {
lon_extremes[2] <- max(tmp_lon)
}
if (right_is_max) {
if (lon_extremes[1] < -180) {
if (!((lon_extremes[2] < 180) && !((180 - lon_extremes[2]) <= last_lon_cell_width / 2))) {
lon_extremes[1] <- -180
lon_extremes[2] <- 180
}
} else if (lon_extremes[1] < 0) {
if (!((lon_extremes[2] < 360) && !((360 - lon_extremes[2]) <= last_lon_cell_width / 2))) {
lon_extremes[1] <- 0
lon_extremes[2] <- 360
}
}
}
if (left_is_min) {
if (lon_extremes[2] > 360) {
if (!((lon_extremes[1] > 0) && !(lon_extremes[1] <= first_lon_cell_width / 2))) {
lon_extremes[1] <- 0
lon_extremes[2] <- 360
}
} else if (lon_extremes[2] > 180) {
if (!((lon_extremes[1] > -180) && !((180 + lon_extremes[1]) <= first_lon_cell_width / 2))) {
lon_extremes[1] <- -180
lon_extremes[2] <- 180
}
}
}
if (length(dim(lats)) == 1) {
tmp_lat <- lats
} else {
min_pos <- which(lats == min(lats), arr.ind = TRUE)[1, ]
tmp_lat <- Subset(lats, lon_dim, min_pos[which(names(dim(lats)) == lon_dim)], drop = 'selected')
}
i <- 1:length(tmp_lat)
degree <- min(3, length(i) - 1)
lat_model <- lm(tmp_lat ~ poly(i, degree))
lat_extremes <- c(NA, NA)
if (which.min(tmp_lat) == 1) {
prev_lat <- predict(lat_model, data.frame(i = 0))
lat_extremes[1] <- tmp_lat[1] - (tmp_lat[1] - prev_lat) / 2
} else {
lat_extremes[1] <- min(tmp_lat)
}
if (which.max(tmp_lat) == length(tmp_lat)) {
next_lat <- predict(lat_model, data.frame(i = length(tmp_lat) + 1))
lat_extremes[2] <- tmp_lat[length(tmp_lat)] + (next_lat - tmp_lat[length(tmp_lat)]) / 2
} else {
lat_extremes[2] <- max(tmp_lat)
}
if (lat_extremes[1] < -90) {
lat_extremes[1] <- -90
} else if (lat_extremes[1] > 90) {
lat_extremes[1] <- 90
}
if (lat_extremes[2] < -90) {
lat_extremes[2] <- -90
} else if (lat_extremes[2] > 90) {
lat_extremes[2] <- 90
}
}
} else if (crop == FALSE) {
warning("Parameter 'crop' = 'FALSE'. The output grid range will follow parameter 'grid'.")
}
} else if (is.numeric(crop)) {
if (length(crop) != 4) {
stop("Paramrter 'crop' must be a logical value or a numeric vector of length 4: c(western border, eastern border, southern border, northern border.")
} else {
lon_extremes <- crop[1:2]
lat_extremes <- crop[3:4]
crop <- TRUE
}
} else {
stop("Parameter 'crop' must be a logical value or a numeric vector.")
}
if (!is.logical(force_remap)) {
stop("Parameter 'force_remap' must be a logical value.")
}
if (!is.character(write_dir)) {
stop("Parameter 'write_dir' must be a character string.")
}
if (!dir.exists(write_dir)) {
stop("Parameter 'write_dir' must point to an existing directory.")
}
interpolation_needed <- TRUE
if (!force_remap) {
if (!(grid_type == 'custom')) {
if (length(lons) == grid_lons && length(lats) == grid_lats) {
if (grid_type == 'regular') {
if (.isRegularVector(lons) && .isRegularVector(lats)) {
interpolation_needed <- FALSE
}
} else if (grid_type == 'gaussian') {
if (.isRegularVector(lons) && !.isRegularVector(lats)) {
interpolation_needed <- FALSE
}
}
}
}
}
found_lons <- lons
found_lats <- lats
if (interpolation_needed) {
if (nchar(Sys.which('cdo')[1]) < 1) {
stop("CDO must be installed in order to use the .CDORemap.")
}
cdo_version <- as.numeric_version(
strsplit(suppressWarnings(system2("cdo", args = '-V', stderr = TRUE))[[1]], ' ')[[1]][5]
)
warning("CDORemap: Using CDO version ", cdo_version, ".")
if ((cdo_version >= as.numeric_version('1.7.0')) && (method == 'con')) {
method <- 'ycon'
}
is_irregular <- FALSE
if (length(dim(lats)) > 1 && length(dim(lons)) > 1) {
is_irregular <- TRUE
}
attribute_backup <- attributes(data_array)
other_dims <- which(!(names(dim(data_array)) %in% c(lon_dim, lat_dim)))
permutation <- NULL
unlimited_dim <- NULL
dims_to_iterate <- NULL
total_slices <- 1
other_dims_per_chunk <- ifelse(is_irregular, 1, 2)
if (length(other_dims) > 1 || (length(other_dims) > 0 && (is_irregular))) {
if (!(length(dim(data_array)) %in% other_dims)) {
if (avoid_writes || is_irregular) {
dims_mod <- dim(data_array)
dims_mod[which(names(dim(data_array)) %in%
c(lon_dim, lat_dim))] <- 0
dim_to_move <- which.max(dims_mod)
permutation <- (1:length(dim(data_array)))[-dim_to_move]
permutation <- c(permutation, dim_to_move)
permutation_back <- sort(permutation, index.return = TRUE)$ix
dim_backup <- dim(data_array)
data_array <- aperm(data_array, permutation)
dim(data_array) <- dim_backup[permutation]
other_dims <- which(!(names(dim(data_array)) %in% c(lon_dim, lat_dim)))
} else {
other_dims_per_chunk <- 1
}
}
other_dims_ordered_by_size <- other_dims[sort(dim(data_array)[other_dims], index.return = TRUE)$ix]
dims_to_iterate <- sort(head(other_dims_ordered_by_size, length(other_dims) - other_dims_per_chunk))
if (length(dims_to_iterate) == 0) {
dims_to_iterate <- NULL
} else {
slices_to_iterate <- array(1:prod(dim(data_array)[dims_to_iterate]),
dim(data_array)[dims_to_iterate])
total_slices <- prod(dim(slices_to_iterate))
}
if ((other_dims_per_chunk > 1) || (other_dims_per_chunk > 0 && is_irregular)) {
unlimited_dim <- tail(sort(tail(other_dims_ordered_by_size, other_dims_per_chunk)), 1)
}
}
result_array <- NULL
lon_pos <- which(names(dim(data_array)) == lon_dim)
lat_pos <- which(names(dim(data_array)) == lat_dim)
dim_backup <- dim(data_array)
attributes(data_array) <- NULL
dim(data_array) <- dim_backup
names(dim(data_array)) <- paste0('dim', 1:length(dim(data_array)))
names(dim(data_array))[c(lon_pos, lat_pos)] <- c(lon_dim, lat_dim)
if (!is.null(unlimited_dim)) {
names(dim(data_array))[unlimited_dim] <- 'time'
}
if (length(dim(lons)) == 1) {
names(dim(lons)) <- lon_dim
}
if (length(dim(lats)) == 1) {
names(dim(lats)) <- lat_dim
}
if (length(dim(lons)) > 1) {
lon_var_name <- paste0(lon_dim, '_var')
} else {
lon_var_name <- lon_dim
}
if (length(dim(lats)) > 1) {
lat_var_name <- paste0(lat_dim, '_var')
} else {
lat_var_name <- lat_dim
}
if (is_irregular) {
metadata <- list(list(coordinates = paste(lon_var_name, lat_var_name)))
names(metadata) <- 'var'
attr(data_array, 'variables') <- metadata
}
names(attr(lons, 'variables')) <- lon_var_name
names(attr(lats, 'variables')) <- lat_var_name
if (!is.null(attr(lons, 'variables')[[1]][['dim']])) {
attr(lons, 'variables')[[1]][['dim']] <- NULL
}
if (!is.null(attr(lats, 'variables')[[1]][['dim']])) {
attr(lats, 'variables')[[1]][['dim']] <- NULL
}
lons_lats_taken <- FALSE
for (i in 1:total_slices) {
tmp_file <- tempfile('R_CDORemap_', write_dir, fileext = '.nc')
tmp_file2 <- tempfile('R_CDORemap_', write_dir, fileext = '.nc')
if (!is.null(dims_to_iterate)) {
slice_indices <- which(slices_to_iterate == i, arr.ind = TRUE)
subset <- Subset(data_array, dims_to_iterate, as.list(slice_indices), drop = 'selected')
if (is_irregular) {
pos_lon <- which(names(dim(subset)) == lon_dim)
pos_lat <- which(names(dim(subset)) == lat_dim)
pos_lon_dim_in_lons <- which(names(dim(lons)) == lon_dim)
pos_lat_dim_in_lons <- which(names(dim(lons)) == lat_dim)
if ((pos_lon > pos_lat && pos_lon_dim_in_lons < pos_lat_dim_in_lons) ||
(pos_lon < pos_lat && pos_lon_dim_in_lons > pos_lat_dim_in_lons)) {
new_pos <- 1:length(dim(subset))
new_pos[pos_lon] <- pos_lat
new_pos[pos_lat] <- pos_lon
subset <- .aperm2(subset, new_pos)
}
if ('time' %in% names(dim(subset)) &&
which(names(dim(subset)) == 'time') != length(dim(subset))) {
new_pos <- 2:length(dim(subset))
new_pos[length(dim(subset))] <- 1
subset <- .aperm2(subset, new_pos)
}
}
ArrayToNetCDF(setNames(list(subset, lons, lats), c('var', lon_var_name, lat_var_name)), tmp_file)
} else {
if (is_irregular) {
pos_lon <- which(names(dim(data_array)) == lon_dim)
pos_lat <- which(names(dim(data_array)) == lat_dim)
pos_lon_dim_in_lons <- which(names(dim(lons)) == lon_dim)
pos_lat_dim_in_lons <- which(names(dim(lons)) == lat_dim)
if ((pos_lon > pos_lat && pos_lon_dim_in_lons < pos_lat_dim_in_lons) ||
(pos_lon < pos_lat && pos_lon_dim_in_lons > pos_lat_dim_in_lons)) {
new_pos <- 1:length(dim(data_array))
new_pos[pos_lon] <- pos_lat
new_pos[pos_lat] <- pos_lon
data_array <- .aperm2(data_array, new_pos)
}
}
ArrayToNetCDF(setNames(list(data_array, lons, lats), c('var', lon_var_name, lat_var_name)), tmp_file)
}
sellonlatbox <- ''
if (crop) {
sellonlatbox <- paste0('sellonlatbox,', format(lon_extremes[1], scientific = FALSE),
',', format(lon_extremes[2], scientific = FALSE),
',', format(lat_extremes[1], scientific = FALSE),
',', format(lat_extremes[2], scientific = FALSE), ' -')
}
err <- try({
system(paste0("cdo -s ", sellonlatbox, "remap", method, ",", grid, " ", tmp_file, " ", tmp_file2))
})
file.remove(tmp_file)
if (('try-error' %in% class(err)) || err > 0) {
stop("CDO remap failed.")
}
ncdf_remapped <- nc_open(tmp_file2)
if (!lons_lats_taken) {
found_dim_names <- sapply(ncdf_remapped$var$var$dim, '[[', 'name')
found_lon_dim <- found_dim_names[which(found_dim_names %in% .KnownLonNames())[1]]
found_lat_dim <- found_dim_names[which(found_dim_names %in% .KnownLatNames())[1]]
found_lon_dim_size <- length(ncdf_remapped$dim[[found_lon_dim]]$vals)
found_lat_dim_size <- length(ncdf_remapped$dim[[found_lat_dim]]$vals)
found_var_names <- names(ncdf_remapped$var)
found_lon_var_name <- which(found_var_names %in% .KnownLonNames())
found_lat_var_name <- which(found_var_names %in% .KnownLatNames())
if (length(found_lon_var_name) > 0) {
found_lon_var_name <- found_var_names[found_lon_var_name[1]]
} else {
found_lon_var_name <- NULL
}
if (length(found_lat_var_name) > 0) {
found_lat_var_name <- found_var_names[found_lat_var_name[1]]
} else {
found_lat_var_name <- NULL
}
if (length(found_lon_var_name) > 0) {
found_lons <- ncvar_get(ncdf_remapped, found_lon_var_name,
collapse_degen = FALSE)
} else {
found_lons <- ncdf_remapped$dim[[found_lon_dim]]$vals
dim(found_lons) <- found_lon_dim_size
}
if (length(found_lat_var_name) > 0) {
found_lats <- ncvar_get(ncdf_remapped, found_lat_var_name,
collapse_degen = FALSE)
} else {
found_lats <- ncdf_remapped$dim[[found_lat_dim]]$vals
dim(found_lats) <- found_lat_dim_size
}
if (length(dim(lons)) == length(dim(found_lons))) {
new_lon_name <- lon_dim
} else {
new_lon_name <- found_lon_dim
}
if (length(dim(lats)) == length(dim(found_lats))) {
new_lat_name <- lat_dim
} else {
new_lat_name <- found_lat_dim
}
if (length(dim(found_lons)) > 1) {
if (which(sapply(ncdf_remapped$var$lon$dim, '[[', 'name') == found_lon_dim) <
which(sapply(ncdf_remapped$var$lon$dim, '[[', 'name') == found_lat_dim)) {
names(dim(found_lons)) <- c(new_lon_name, new_lat_name)
} else {
names(dim(found_lons)) <- c(new_lat_name, new_lon_name)
}
} else {
names(dim(found_lons)) <- new_lon_name
}
if (length(dim(found_lats)) > 1) {
if (which(sapply(ncdf_remapped$var$lat$dim, '[[', 'name') == found_lon_dim) <
which(sapply(ncdf_remapped$var$lat$dim, '[[', 'name') == found_lat_dim)) {
names(dim(found_lats)) <- c(new_lon_name, new_lat_name)
} else {
names(dim(found_lats)) <- c(new_lat_name, new_lon_name)
}
} else {
names(dim(found_lats)) <- new_lat_name
}
lons_lats_taken <- TRUE
}
if (!is.null(dims_to_iterate)) {
if (is.null(result_array)) {
if (return_array) {
new_dims <- dim(data_array)
new_dims[c(lon_dim, lat_dim)] <- c(found_lon_dim_size, found_lat_dim_size)
lon_pos <- which(names(new_dims) == lon_dim)
lat_pos <- which(names(new_dims) == lat_dim)
if (is_irregular) {
if (lon_pos > lat_pos) {
new_pos <- 1:length(new_dims)
new_pos[lon_pos] <- lat_pos
new_pos[lat_pos] <- lon_pos
new_dims <- new_dims[new_pos]
}
}
result_array <- array(dim = new_dims)
store_indices <- as.list(rep(TRUE, length(dim(result_array))))
}
}
if (return_array) {
store_indices[dims_to_iterate] <- as.list(slice_indices)
if (is_irregular) {
test_dims <- dim(ncvar_get(ncdf_remapped, 'var',
collapse_degen = FALSE))
test_dims <- test_dims[which(test_dims > 1)]
pos_test_dims <- match(dim(result_array), test_dims)
if (is.unsorted(pos_test_dims, na.rm = TRUE)) {
pos_new_dims <- 1:length(dim(result_array))
pos_new_dims[which(!is.na(pos_test_dims))] <-
match(test_dims, dim(result_array))
backup_result_array_dims <- dim(result_array)
dim(result_array) <- dim(result_array)[pos_new_dims]
}
}
result_array <- do.call('[<-', c(list(x = result_array), store_indices,
list(value = ncvar_get(ncdf_remapped, 'var', collapse_degen = FALSE))))
}
} else {
new_dims <- dim(data_array)
new_dims[c(lon_dim, lat_dim)] <- c(found_lon_dim_size, found_lat_dim_size)
result_array <- ncvar_get(ncdf_remapped, 'var', collapse_degen = FALSE)
dim(result_array) <- new_dims
}
nc_close(ncdf_remapped)
file.remove(tmp_file2)
}
if (is_irregular & (!is.null(dims_to_iterate))) {
if (exists('pos_new_dims')) {
pos_new_dims <- 1:length(dim(result_array))
dims_to_change <- match(backup_result_array_dims, dim(result_array))
pos_new_dims[which(dims_to_change != 1)] <-
dims_to_change[which(dims_to_change != 1)]
result_array <- .aperm2(result_array, pos_new_dims)
}
}
if (!is.null(permutation)) {
dim_backup <- dim(result_array)
result_array <- aperm(result_array, permutation_back)
dim(result_array) <- dim_backup[permutation_back]
}
result_is_irregular <- FALSE
if (length(dim(found_lats)) > 1 && length(dim(found_lons)) > 1) {
result_is_irregular <- TRUE
}
attribute_backup[['dim']][which(names(dim(result_array)) == lon_dim)] <- dim(result_array)[lon_dim]
attribute_backup[['dim']][which(names(dim(result_array)) == lat_dim)] <- dim(result_array)[lat_dim]
names(attribute_backup[['dim']])[which(names(dim(result_array)) == lon_dim)] <- new_lon_name
names(attribute_backup[['dim']])[which(names(dim(result_array)) == lat_dim)] <- new_lat_name
if (!is.null(attribute_backup[['variables']]) && (length(attribute_backup[['variables']]) > 0)) {
for (var in 1:length(attribute_backup[['variables']])) {
if (length(attribute_backup[['variables']][[var]][['dim']]) > 0) {
for (dim in 1:length(attribute_backup[['variables']][[var]][['dim']])) {
dim_name <- NULL
if ('name' %in% names(attribute_backup[['variables']][[var]][['dim']][[dim]])) {
dim_name <- attribute_backup[['variables']][[var]][['dim']][[dim]][['name']]
if (dim_name %in% c(lon_dim, lat_dim)) {
if (dim_name == lon_dim) {
attribute_backup[['variables']][[var]][['dim']][[dim]][['name']] <- new_lon_name
} else {
attribute_backup[['variables']][[var]][['dim']][[dim]][['name']] <- new_lat_name
}
}
} else if (!is.null(names(attribute_backup[['variables']][[var]][['dim']]))) {
dim_name <- names(attribute_backup[['variables']][[var]][['dim']])[dim]
if (dim_name %in% c(lon_dim, lat_dim)) {
if (dim_name == lon_dim) {
names(attribute_backup[['variables']][[var]][['dim']])[which(names(attribute_backup[['variables']][[var]][['dim']]) == lon_dim)] <- new_lon_name
} else {
names(attribute_backup[['variables']][[var]][['dim']])[which(names(attribute_backup[['variables']][[var]][['dim']]) == lat_dim)] <- new_lat_name
}
}
}
if (!is.null(dim_name)) {
if (dim_name %in% c(lon_dim, lat_dim)) {
if (dim_name == lon_dim) {
new_vals <- found_lons[TRUE]
} else if (dim_name == lat_dim) {
new_vals <- found_lats[TRUE]
}
if (!is.null(attribute_backup[['variables']][[var]][['dim']][[dim]][['len']])) {
attribute_backup[['variables']][[var]][['dim']][[dim]][['len']] <- length(new_vals)
}
if (!is.null(attribute_backup[['variables']][[var]][['dim']][[dim]][['vals']])) {
if (!result_is_irregular) {
attribute_backup[['variables']][[var]][['dim']][[dim]][['vals']] <- new_vals
} else {
attribute_backup[['variables']][[var]][['dim']][[dim]][['vals']] <- 1:length(new_vals)
}
}
}
}
}
}
if (!is_irregular && result_is_irregular) {
attribute_backup[['coordinates']] <- paste(lon_var_name, lat_var_name)
} else if (is_irregular && !result_is_irregular) {
attribute_backup[['coordinates']] <- NULL
}
}
}
attributes(result_array) <- attribute_backup
lons_attr_bk[['dim']] <- dim(found_lons)
if (!is.null(lons_attr_bk[['variables']]) && (length(lons_attr_bk[['variables']]) > 0)) {
for (var in 1:length(lons_attr_bk[['variables']])) {
if (length(lons_attr_bk[['variables']][[var]][['dim']]) > 0) {
dims_to_remove <- NULL
for (dim in 1:length(lons_attr_bk[['variables']][[var]][['dim']])) {
dim_name <- NULL
if ('name' %in% names(lons_attr_bk[['variables']][[var]][['dim']][[dim]])) {
dim_name <- lons_attr_bk[['variables']][[var]][['dim']][[dim]][['name']]
if (dim_name %in% c(lon_dim, lat_dim)) {
if (dim_name == lon_dim) {
lons_attr_bk[['variables']][[var]][['dim']][[dim]][['name']] <- new_lon_name
} else {
lons_attr_bk[['variables']][[var]][['dim']][[dim]][['name']] <- new_lat_name
}
}
} else if (!is.null(names(lons_attr_bk[['variables']][[var]][['dim']]))) {
dim_name <- names(lons_attr_bk[['variables']][[var]][['dim']])[dim]
if (dim_name %in% c(lon_dim, lat_dim)) {
if (dim_name == lon_dim) {
names(lons_attr_bk[['variables']][[var]][['dim']])[which(names(lons_attr_bk[['variables']][[var]][['dim']]) == lon_dim)] <- new_lon_name
} else {
names(lons_attr_bk[['variables']][[var]][['dim']])[which(names(lons_attr_bk[['variables']][[var]][['dim']]) == lat_dim)] <- new_lat_name
}
}
}
if (!is.null(dim_name)) {
if (dim_name %in% c(lon_dim, lat_dim)) {
if (dim_name == lon_dim) {
new_vals <- found_lons[TRUE]
} else if (dim_name == lat_dim) {
new_vals <- found_lats[TRUE]
if (!result_is_irregular) {
dims_to_remove <- c(dims_to_remove, dim)
}
}
if (!is.null(lons_attr_bk[['variables']][[var]][['dim']][[dim]][['len']])) {
lons_attr_bk[['variables']][[var]][['dim']][[dim]][['len']] <- length(new_vals)
}
if (!is.null(lons_attr_bk[['variables']][[var]][['dim']][[dim]][['vals']])) {
if (!result_is_irregular) {
lons_attr_bk[['variables']][[var]][['dim']][[dim]][['vals']] <- new_vals
} else {
lons_attr_bk[['variables']][[var]][['dim']][[dim]][['vals']] <- 1:length(new_vals)
}
}
}
}
}
if (length(dims_to_remove) > 1) {
lons_attr_bk[['variables']][[var]][['dim']] <- lons_attr_bk[['variables']][[var]][['dim']][[-dims_to_remove]]
}
}
}
names(lons_attr_bk[['variables']])[1] <- lon_var_name
lons_attr_bk[['variables']][[1]][['units']] <- 'degrees_east'
}
attributes(found_lons) <- lons_attr_bk
lats_attr_bk[['dim']] <- dim(found_lats)
if (!is.null(lats_attr_bk[['variables']]) && (length(lats_attr_bk[['variables']]) > 0)) {
for (var in 1:length(lats_attr_bk[['variables']])) {
if (length(lats_attr_bk[['variables']][[var]][['dim']]) > 0) {
dims_to_remove <- NULL
for (dim in 1:length(lats_attr_bk[['variables']][[var]][['dim']])) {
dim_name <- NULL
if ('name' %in% names(lats_attr_bk[['variables']][[var]][['dim']][[dim]])) {
dim_name <- lats_attr_bk[['variables']][[var]][['dim']][[dim]][['name']]
if (dim_name %in% c(lon_dim, lat_dim)) {
if (dim_name == lon_dim) {
lons_attr_bk[['variables']][[var]][['dim']][[dim]][['name']] <- new_lon_name
} else {
lons_attr_bk[['variables']][[var]][['dim']][[dim]][['name']] <- new_lat_name
}
}
} else if (!is.null(names(lats_attr_bk[['variables']][[var]][['dim']]))) {
dim_name <- names(lats_attr_bk[['variables']][[var]][['dim']])[dim]
if (dim_name %in% c(lon_dim, lat_dim)) {
if (dim_name == lon_dim) {
names(lats_attr_bk[['variables']][[var]][['dim']])[which(names(lats_attr_bk[['variables']][[var]][['dim']]) == lon_dim)] <- new_lon_name
} else {
names(lats_attr_bk[['variables']][[var]][['dim']])[which(names(lats_attr_bk[['variables']][[var]][['dim']]) == lat_dim)] <- new_lat_name
}
}
}
if (!is.null(dim_name)) {
if (dim_name %in% c(lon_dim, lat_dim)) {
if (dim_name == lon_dim) {
new_vals <- found_lons[TRUE]
if (!result_is_irregular) {
dims_to_remove <- c(dims_to_remove, dim)
}
} else if (dim_name == lat_dim) {
new_vals <- found_lats[TRUE]
}
if (!is.null(lats_attr_bk[['variables']][[var]][['dim']][[dim]][['len']])) {
lats_attr_bk[['variables']][[var]][['dim']][[dim]][['len']] <- length(new_vals)
}
if (!is.null(lats_attr_bk[['variables']][[var]][['dim']][[dim]][['vals']])) {
if (!result_is_irregular) {
lats_attr_bk[['variables']][[var]][['dim']][[dim]][['vals']] <- new_vals
} else {
lats_attr_bk[['variables']][[var]][['dim']][[dim]][['vals']] <- 1:length(new_vals)
}
}
}
}
}
if (length(dims_to_remove) > 1) {
lats_attr_bk[['variables']][[var]][['dim']] <- lats_attr_bk[['variables']][[var]][['dim']][[-dims_to_remove]]
}
}
}
names(lats_attr_bk[['variables']])[1] <- lat_var_name
lats_attr_bk[['variables']][[1]][['units']] <- 'degrees_north'
}
attributes(found_lats) <- lats_attr_bk
}
list(data_array = if (return_array) {
if (interpolation_needed) {
result_array
} else {
data_array
}
} else {
NULL
},
lons = found_lons, lats = found_lats)
} |
micro_rocky_mtn <- function(modsum, ..., main = NULL,
ylab = NULL, subtitle = NULL,
pval_lines = TRUE, pval_text = TRUE, sig_text = TRUE,
facet_labels = NULL, alpha = 0.05, lwd = 2, lty = 1){
if('Model_Type' %nin% names(modsum)){
stop("'modsum' must be output from either nb_mods or bb_mods")
}
if(modsum$Model_Type %nin% c('nb_mod', 'bb_mod') ){
stop("'modsum' must be output from either nb_mods or bb_mods")
}
if(missing(...)) stop("NB_RockMtn requires a model coefficient")
if(is.null(ylab)) ylab <- expression("log"[10]*" p-value")
CC <- modsum$Convergent_Summary %>%
dplyr::filter(.data$Coef != "(Intercept)", stringr::str_detect(.data$Coef, cov_str(...))) %>%
dplyr::mutate(FDR_Pval = ifelse(.data$FDR_Pval == 0.0000, 0.0005, .data$FDR_Pval),
phyl = sapply(.data$Taxa, phy_fun)) %>%
phyl_ord()
CC %<>%
dplyr::mutate(Coef = factor(.data$Coef, levels = unique(.data$Coef)),
rnum = seq(1,nrow(CC)),
ys = rep(0,nrow(CC)),
ye = ifelse(.data$Beta < 0, log(.data$FDR_Pval, base = 10), -log(.data$FDR_Pval, base = 10)))
pline <- c(-1,1)*sapply(rep(c(0.01, 0.05, 0.1), 2), log, base = 10)
gg <- ggplot2::ggplot(CC, ggplot2::aes(x = .data$rnum, y = .data$ye))
if(pval_lines){
gg <- gg + ggplot2::geom_hline(yintercept = pline, linetype = "dotdash")
}
if(pval_text){
gg <- gg +
ggplot2::scale_y_continuous(breaks = pline,
labels = c("0.01","0.05","0.10", "0.01","0.05","0.10"))
} else{
gg <- gg +
ggplot2::theme(axis.text.y = ggplot2::element_blank(),
axis.ticks.y = ggplot2::element_blank())
}
gg <- gg +
ggplot2::geom_segment(ggplot2::aes(x = .data$rnum, xend = .data$rnum,
y = .data$ys, yend = .data$ye, colour = .data$phyl),
size = lwd) +
ggplot2::theme_bw() +
ggplot2::labs(colour = "Phylum", y = ylab, title = main, subtitle = subtitle,
x = NULL) +
ggplot2::theme(axis.text.x=ggplot2::element_blank(),
axis.ticks.x=ggplot2::element_blank())
if(length(unique(CC$Coef)) > 1L){
if(!is.null(facet_labels) & is.null(names(facet_labels))){
names(facet_labels) <- unique(CC$Coef)
}
gg <- gg + ggplot2::facet_wrap( ~ Coef, labeller = ggplot2::labeller(Coef = facet_labels))
}
if(sig_text){
gg <- gg + ggrepel::geom_label_repel(data = CC %>% dplyr::filter(.data$FDR_Pval < alpha),
ggplot2::aes(label = .data$Taxa), show.legend = F)
}
gg +
ggplot2::geom_hline(yintercept = 0)
} |
"absval.dipred.vs.cov.model.comp" <-
function(object,
object.ref = NULL,
type = NULL,
ylb=expression(paste("|", Delta, "IPRED|")),
main="Default",
...) {
if (is.null(object.ref)) {
ref.list <- get.refrunno()
if(exists(".ref.db")){
object.ref <- eval(parse(text=".ref.db"))
} else {
return()
}
if(any(is.null(ref.list)))
return()
}
if(dim(object@Data)[1] != dim(object.ref@Data)[1]) {
cat("The current database and the reference database do not have\n")
cat("the same number of lines!\n")
return()
}
if ((is.null(xvardef("idlab",object))) || (is.null(xvardef("ipred",object)))) {
cat("The required variables (ID label, IPRED) are not set in the database!\n")
return()
}
if(any(is.null(xvardef("covariates",object)))) {
return(cat("No covariates found in the current database!\n"))
}
object@Data$dIPRED <- abs(object@Data[,xvardef("ipred", object)] -
object.ref@Data[,xvardef("ipred", object.ref)])
number.of.plots <- 0
for (i in xvardef("covariates", object)) {
number.of.plots <- number.of.plots + 1
}
plotList <- vector("list",number.of.plots)
plot.num <- 0
for (i in xvardef("covariates", object)) {
if (is.null(type)) {
if (!is.factor(object@Data[,i])) {
type <- "p"
} else {
type = object@[email protected]$type
}
}
xplot <- xpose.plot.default(i,
"dIPRED",
object,
ylb = ylb,
type = type,
pass.plot.list=TRUE,,
main=NULL,
...)
plot.num <- plot.num+1
plotList[[plot.num]] <- xplot
}
default.plot.title <- paste("|IPRED_(Run", object@Runno,
") - IPRED_(Run",object.ref@Runno,
")| vs. Covariates",sep="")
plotTitle <- xpose.multiple.plot.title(object=object,
plot.text = default.plot.title,
no.runno=TRUE,
main=main,
...)
obj <- xpose.multiple.plot(plotList,plotTitle,...)
return(obj)
} |
finiterange <- function (x)
{
x <- x[! is.na(x)]
x <- x[is.finite(x)]
if (length(x)==0) {
return(c(NA, NA))
}
range(x)
} |
simulate.gp <- function(object, nsim = 1L, seed = NULL,
newdata = NULL,
cond = TRUE,
trendKnown = FALSE,
newVarNoise = NULL,
nuggetSim = 1e-8,
checkNames = TRUE,
output = c("list", "matrix"),
label = "y", unit = "",
...) {
mc <- match.call()
output <- match.arg(output)
noise <- object$noise
if (noise && object$varNoise == 0.0) noise <- FALSE
newNoise <- FALSE
if (is.null(newVarNoise)) {
if (noise) {
newVarNoise <- object$varNoise
newNoise <- TRUE
}
} else {
if (newVarNoise < 0.0) stop("'varNoise' must be >= 0.0")
newNoise <- (newVarNoise > 0.0)
}
N <- nsim
n <- nrow(object$F)
p <- ncol(object$F)
d <- ncol(object$X)
nms <- object$inputNames
if (missing(newdata)) {
XNew <- object$X
FNew <- object$F
} else {
XNew <- newdata[ , nms, drop = FALSE]
nms <- object$inputNames
New <- newdata[ , nms, drop = FALSE]
tt <- delete.response(terms(object))
mf <- model.frame(tt, data = data.frame(newdata))
FNew <- model.matrix(tt, data = mf)
}
nNew <- nrow(XNew)
if (!cond) {
if (!trendKnown) {
Znorm <- array(rnorm(p * N), dim = c(p, N))
dBetaSim <- backsolve(object$RStar, Znorm)
trendNewSim <- FNew %*% sweep(x = dBetaSim, MARGIN = 1,
STATS = object$betaHat, FUN = "+")
} else {
trend <- FNew %*% object$betaHat
trendNewSim <- array(trend, dim = c(nNew, N))
}
zetaNewSim <- simulate(object = object$covariance,
nsim = nsim,
seed = seed,
X = XNew, checkNames = checkNames, ...)
yNewSim <- trendNewSim + zetaNewSim
} else {
smallNoise <- (object$varNoise < 1e-10)
kNew <- covMat(object$covariance, X = object$X, Xnew = XNew,
compGrad = FALSE)
KNew <- covMat(object$covariance, X = XNew, Xnew = XNew,
compGrad = FALSE)
kNewStar <- forwardsolve(object$L, kNew)
KNewCond <- KNew - crossprod(kNewStar)
diag(KNewCond) <- diag(KNewCond) + nuggetSim
LNewCond <- t(chol(KNewCond))
if (noise) {
smallNoise <- (object$varNoise < 1e-8)
K <- covMat(object$covariance, X = object$X, checkNames = checkNames,
compGrad = FALSE)
KStar <- forwardsolve(object$L, K)
if (!smallNoise) {
KCond <- K - crossprod(KStar)
diag(KCond) <- diag(KCond) + nuggetSim
LCond <- t(chol(KCond))
}
if (!trendKnown) {
Znorm <- array(rnorm(p * N), dim = c(p, N))
dBetaSim <- backsolve(object$RStar, Znorm)
trendNewSim <- FNew %*% sweep(x = dBetaSim, MARGIN = 1,
STATS = object$betaHat, FUN = "+")
E <- -object$FStar %*% dBetaSim
E <- sweep(x = E, MARGIN = 1, STATS = object$eStar, FUN = "+")
E <- t(KStar) %*% E
ZSim <- array(rnorm(n * N), dim = c(n, N))
if (smallNoise) {
ZSim <- E + sqrt(object$varNoise) * ZSim
} else {
ZSim <- E + LCond %*% ZSim
}
} else {
trend <- FNew %*% object$betaHat
trendNewSim <- array(trend, dim = c(nNew, N))
ZSim <- array(rnorm(n * N), dim = c(n, N))
if (smallNoise) {
ZSim <- sqrt(object$varNoise) * ZSim
} else {
ZSim <- LCond %*% ZSim
}
eStarMod <- t(KStar) %*% object$eStar
ZSim <- sweep(x = ZSim, MARGIN = 1, STATS = eStarMod, FUN = "+")
}
ZNewSim <- array(rnorm(nNew * N), dim = c(nNew, N))
ZNewSim <- LNewCond %*% ZNewSim
ZNewSim <- ZNewSim + t(kNewStar) %*% forwardsolve(object$L, ZSim)
yNewSim <- trendNewSim + ZNewSim
} else {
if (!trendKnown) {
Znorm <- array(rnorm(p * N), dim = c(p, N))
BetaSim <- backsolve(object$RStar, Znorm)
BetaSim <- sweep(x = BetaSim, MARGIN = 1, STATS = object$betaHat,
FUN = "+")
trendSim <- object$F %*% BetaSim
ZSim <- sweep(x = -trendSim, MARGIN = 1, STATS = object$y,
FUN = "+")
trendNewSim <- FNew %*% BetaSim
ZNewSim <- array(rnorm(nNew * N), dim = c(nNew, N))
ZNewSim <- LNewCond %*% ZNewSim
ZNewSim <- ZNewSim + t(kNewStar) %*% forwardsolve(object$L, ZSim)
yNewSim <- trendNewSim + ZNewSim
} else {
trendNew <- FNew %*% object$betaHat
trendNewSim <- array(trendNew, dim = c(nNew, N))
shift <- trendNew + t(kNewStar) %*% object$eStar
ZNewSim <- array(rnorm(nNew * N), dim = c(nNew, N))
ZNewSim <- LNewCond %*% ZNewSim
yNewSim <- sweep(ZNewSim, MARGIN = 1, STATS = shift, FUN = "+")
}
}
}
if (newNoise) {
yNewSim <- yNewSim + sqrt(newVarNoise) *
array(rnorm(nNew * N), dim = c(nNew, N))
}
if (output == "list") {
yNewSim <- list(X = object$X, F = object$F, y = object$y,
XNew = XNew, FNew = FNew,
sim = yNewSim,
trend = trendNewSim,
trendKnown = trendKnown,
noise = noise,
newVarNoise = newVarNoise,
label = label,
unit = unit,
Call = mc)
class(yNewSim) <- "simulate.gp"
return(yNewSim)
} else {
attr(yNewSim, "trendKnown") <- trendKnown
return(yNewSim)
}
}
plot.simulate.gp <- function(x, y,
col = list("sim" = "SpringGreen3", "trend" = "orangered"),
show = c("sim" = TRUE, "trend" = TRUE, "y" = TRUE),
...) {
if (ncol(x$X) > 1L) {
stop("For now, this function only for the one-dimensional case")
}
X <- drop(as.matrix(x$X))
XNew <- drop(as.matrix(x$XNew))
ylab <- x$label
if (nchar(x$unit)) ylab <- sprintf("%s (%s)", ylab, x$unit)
plot(X, x$y, type = "n",
xlim = range(X, XNew),
ylim = range(x$sim, x$trend, x$y),
ylab = ylab,
...)
nSim <- ncol(x$sim)
alpha <- exp(-sqrt(((nSim - 1)/ 200)))
if (show["trend"]) {
matlines(XNew, x$trend, pch = 16,
col = translude(col[["trend"]], alpha = alpha))
}
if (show["sim"]) {
matlines(XNew, x$sim, pch = 16,
col = translude(col[["sim"]], alpha = alpha))
}
if (show["y"]) {
points(X, x$y, pch = 21, bg = "white", lwd = 2)
}
} |
edge_bundle_hammer <- function(object,xy,bw=0.05,decay=0.7){
if (!requireNamespace('reticulate', quietly = TRUE)) {
stop('The `reticulate` package is required for this functionality')
}
if(any(class(object)=="igraph")){
if (!requireNamespace('igraph', quietly = TRUE)) {
stop('The `igraph` package is required for this functionality')
}
nodes <- data.frame(name=paste0("node",0:(igraph::vcount(object)-1)),x=xy[,1],y=xy[,2])
el <- igraph::get.edgelist(object,names = FALSE)
el1 <- data.frame(source=el[,1]-1,target=el[,2]-1)
} else if(any(class(object)=="tbl_graph")){
if (!requireNamespace('tidygraph', quietly = TRUE)) {
stop('The `tidygraph` package is required for this functionality')
}
object <- tidygraph::as.igraph(object)
nodes <- data.frame(name=paste0("node",0:(igraph::vcount(object)-1)),x=xy[,1],y=xy[,2])
el <- igraph::get.edgelist(object,names = FALSE)
el1 <- data.frame(source=el[,1]-1,target=el[,2]-1)
} else if(any(class(object)=="network")){
nodes <- data.frame(name=paste0("node",0:(network::get.network.attribute(object,"n")-1)),x=xy[,1],y=xy[,2])
el <- network::as.edgelist(object)
el1 <- data.frame(source=el[,1]-1,target=el[,2]-1)
} else{
stop("only `igraph`, `network` or `tbl_graph` objects supported.")
}
data_bundle <- shader_env$datashader_bundling$hammer_bundle(nodes,el1,initial_bandwidth = bw,decay = decay)
data_bundle$group <- is.na(data_bundle$y)+0
data_bundle$group <- cumsum(data_bundle$group)+1
data_bundle <- data_bundle[!is.na(data_bundle$y),]
data_bundle$index <- unlist(sapply(table(data_bundle$group),function(x) seq(0,1,length.out=x)))
data_bundle[,c("x","y","index","group")]
}
install_bundle_py <- function(method = "auto", conda = "auto") {
if (!requireNamespace('reticulate', quietly = TRUE)) {
stop('The `reticulate` package is required for this functionality')
}
reticulate::py_install("datashader", method = method, conda = conda, pip = TRUE)
reticulate::py_install("scikit-image", method = method, conda = conda, pip = TRUE)
}
shader_env <- new.env(parent = emptyenv())
.onLoad <- function(libname, pkgname) {
reticulate::configure_environment(pkgname)
assign("datashader_bundling", reticulate::import("datashader.bundling", delay_load = TRUE), shader_env)
} |
pmml.nnet <- function(model,
model_name = "NeuralNet_model",
app_name = "SoftwareAG PMML Generator",
description = "Neural Network Model",
copyright = NULL,
model_version = NULL,
transforms = NULL,
missing_value_replacement = NULL,
...) {
if (!inherits(model, "nnet")) stop("Not a legitimate nnet object")
number.of.neural.layers <- length(model$n) - 1
field <- NULL
numerical <- NULL
if (model$call[[1]] != "nnet.formula") {
if (is.null(attributes(model$fitted.values)$dimnames[[2]][1])) {
numerical <- TRUE
} else {
numerical <- FALSE
tmp <- c()
for (i in 1:length(attributes(model$fitted.values)$dimnames[[2]]))
{
tmp <- c(tmp, attributes(model$fitted.values)$dimnames[[2]][i])
}
levels <- list(tmp)
names(levels) <- "lev"
model <- c(model, levels)
trms <- list("terms", name = "Variable Information")
names(trms) <- "terms"
number.of.inputs <- model$n[1]
allnames <- c("y")
input.names <- c()
if (numerical) {
classes <- c("numeric")
} else {
classes <- c("factor")
}
for (i in 1:number.of.inputs)
{
tmp <- paste("x", i, sep = "")
allnames <- c(allnames, tmp)
input.names <- c(input.names, tmp)
classes <- c(classes, "numeric")
}
trms <- list(name = "variable information")
names(trms) <- "terms"
attr(trms$terms, "term.labels") <- input.names
attr(trms$terms, "dataClasses") <- classes
names(attributes(trms$terms)$dataClasses) <- allnames
model$call[[1]] <- "nnet.formula"
model <- c(model, trms)
attr(model, "class") <- c("nnet.formula", "nnet")
}
}
if (model$call[[1]] == "nnet.formula") {
terms <- attributes(model$terms)
field$name <- names(terms$dataClasses)
field$class <- terms$dataClasses
target <- field$name[1]
number.of.fields <- length(terms$term.labels) + 1
number.of.inputs <- length(terms$term.labels)
} else {
number.of.fields <- model$n[1] + 1
number.of.inputs <- model$n[1]
target <- "y"
field$name[1] <- target
field$class[[field$name[1]]] <- "numeric"
for (i in 1:number.of.inputs)
{
tmp <- paste("x", i, sep = "")
field$name[i + 1] <- tmp
field$class[[field$name[i + 1]]] <- "numeric"
}
}
if (length(grep("^as.factor\\(", field$name[1]))) {
field$name[1] <- sub("^as.factor\\((.*)\\)", "\\1", field$name[1])
names(field$class)[1] <- sub("^as.factor\\((.*)\\)", "\\1", names(field$class)[1])
names(field$levels)[1] <- sub("^as.factor\\((.*)\\)", "\\1", names(field$levels)[1])
}
target <- field$name[1]
number.of.fields <- length(field$name)
normalization.method <- "none"
skipLayers <- FALSE
linearOutputUnits <- FALSE
if (length(model$call$skip) && model$call$skip) {
skipLayers <- TRUE
}
if (model$nunits > model$nsunits) {
linearOutputUnits <- TRUE
}
if (model$softmax) {
normalization.method <- "softmax"
}
if (model$censored) {
stop("PMML does not support the censored variant of softmax!")
}
if (field$class[[field$name[1]]] == "factor") {
field$levels[[field$name[1]]] <- model$lev
}
for (i in seq_len(number.of.inputs)) {
if (field$class[[field$name[i + 1]]] == "factor") {
field$levels[[field$name[i + 1]]] <- model$xlevels[[field$name[i + 1]]]
}
}
pmml <- .pmmlRootNode()
pmml <- append.XMLNode(pmml, .pmmlHeader(
description, copyright, model_version,
app_name
))
pmml <- append.XMLNode(pmml, .pmmlDataDictionary(field, transformed = transforms))
if (model$n[length(model$n)] == 1 && field$class[[field$name[1]]] == "factor") {
temp <- number.of.neural.layers + 1
} else {
temp <- number.of.neural.layers
}
if (field$class[[field$name[1]]] == "factor") {
the.model <- xmlNode("NeuralNetwork",
attrs = c(
modelName = model_name,
functionName = "classification",
numberOfLayers = temp,
activationFunction = "logistic"
)
)
} else {
the.model <- xmlNode("NeuralNetwork",
attrs = c(
modelName = model_name,
functionName = "regression",
numberOfLayers = temp,
activationFunction = "logistic"
)
)
}
the.model <- append.XMLNode(the.model, .pmmlMiningSchema(field, target, transformed = transforms, missing_value_replacement = missing_value_replacement))
the.model <- append.XMLNode(the.model, .pmmlOutput(field, target))
if (!is.null(transforms)) {
the.model <- append.XMLNode(the.model, .pmmlLocalTransformations(field, transforms, NULL))
}
neuralInputs <- xmlNode("NeuralInputs",
attrs = c(numberOfInputs = as.numeric(model$n[1]))
)
input_count <- 1
factor_count <- 1
for (i in seq_len(number.of.inputs))
{
if (terms$dataClasses[[terms$term.labels[i]]] == "factor") {
number.of.values <- length(model$xlevels[[factor_count]])
usedValues <- model$xlevels[[factor_count]]
factor_count <- factor_count + 1
for (j in 1:number.of.values)
{
if (j > 1)
{
neuralInputNode <- xmlNode("NeuralInput",
attrs = c(id = as.numeric(input_count))
)
input_count <- input_count + 1
fieldName <- paste("derivedNI_", terms$term.labels[i], sep = "")
fieldName <- paste(fieldName, usedValues[j], sep = "")
derivedFieldNode <- xmlNode("DerivedField",
attrs = c(
name = fieldName,
optype = "continuous",
dataType = "double"
)
)
normDiscreteNode <- xmlNode("NormDiscrete",
attrs = c(
field = terms$term.labels[i],
value = usedValues[j]
)
)
derivedFieldNode <- append.XMLNode(derivedFieldNode, normDiscreteNode)
neuralInputNode <- append.XMLNode(neuralInputNode, derivedFieldNode)
neuralInputs <- append.XMLNode(neuralInputs, neuralInputNode)
}
}
} else {
neuralInputNode <- xmlNode("NeuralInput",
attrs = c(id = as.numeric(input_count))
)
input_count <- input_count + 1
name <- terms$term.labels[i]
fieldName <- paste("derivedNI_", name, sep = "")
derivedFieldNode <- xmlNode("DerivedField",
attrs = c(
name = fieldName,
optype = "continuous",
dataType = "double"
)
)
fieldRefNode <- xmlNode("FieldRef",
attrs = c(field = terms$term.labels[i])
)
derivedFieldNode <- append.XMLNode(derivedFieldNode, fieldRefNode)
neuralInputNode <- append.XMLNode(neuralInputNode, derivedFieldNode)
neuralInputs <- append.XMLNode(neuralInputs, neuralInputNode)
}
}
the.model <- append.XMLNode(the.model, neuralInputs)
number.of.inputs <- model$n[1]
wtsID <- 1
neuronID <- number.of.inputs
previous.number.of.neurons <- number.of.inputs
for (i in 1:number.of.neural.layers)
{
number.of.neurons <- model$n[i + 1]
if (i == number.of.neural.layers)
{
if (number.of.neurons == 1 && field$class[[field$name[1]]] == "factor") {
neuralLayerNode <- xmlNode("NeuralLayer",
attrs = c(numberOfNeurons = as.numeric(number.of.neurons))
)
} else if (model$softmax) {
neuralLayerNode <- xmlNode("NeuralLayer",
attrs = c(
numberOfNeurons = as.numeric(number.of.neurons),
activationFunction = "identity",
normalizationMethod = "softmax"
)
)
} else if (linearOutputUnits) {
neuralLayerNode <- xmlNode("NeuralLayer",
attrs = c(
numberOfNeurons = as.numeric(number.of.neurons),
activationFunction = "identity"
)
)
} else {
neuralLayerNode <- xmlNode("NeuralLayer",
attrs = c(numberOfNeurons = as.numeric(number.of.neurons))
)
}
} else
{
neuralLayerNode <- xmlNode("NeuralLayer",
attrs = c(numberOfNeurons = as.numeric(number.of.neurons))
)
}
for (j in 1:number.of.neurons)
{
neuronID <- neuronID + 1
neuronNode <- xmlNode("Neuron",
attrs = c(
id = as.numeric(neuronID),
bias = model$wts[wtsID]
)
)
wtsID <- wtsID + 1
if (i == number.of.neural.layers && j == 1)
{
first.outputNeuronID <- neuronID
}
if (i == number.of.neural.layers && skipLayers) {
previous.number.of.neurons <- previous.number.of.neurons + number.of.inputs
}
for (k in 1:previous.number.of.neurons)
{
number.of.connections <- model$n[i + 1]
connectionNode <- xmlNode("Con",
attrs = c(
from = model$conn[wtsID],
weight = model$wts[wtsID]
)
)
wtsID <- wtsID + 1
neuronNode <- append.XMLNode(neuronNode, connectionNode)
}
neuralLayerNode <- append.XMLNode(neuralLayerNode, neuronNode)
}
previous.number.of.neurons <- number.of.neurons
the.model <- append.XMLNode(the.model, neuralLayerNode)
}
if (number.of.neurons == 1 && field$class[[field$name[1]]] == "factor") {
neuralLayerNode <- xmlNode("NeuralLayer",
attrs = c(
numberOfNeurons = "2",
activationFunction = "threshold", threshold = "0.5"
)
)
neuronID <- neuronID + 1
first.outputNeuronID <- neuronID
neuronNode <- xmlNode("Neuron",
attrs = c(
id = as.numeric(neuronID),
bias = "1.0"
)
)
connectionNode <- xmlNode("Con",
attrs = c(
from = neuronID - 1,
weight = "-1.0"
)
)
neuronNode <- append.XMLNode(neuronNode, connectionNode)
neuralLayerNode <- append.XMLNode(neuralLayerNode, neuronNode)
neuronID <- neuronID + 1
neuronNode <- xmlNode("Neuron",
attrs = c(
id = as.numeric(neuronID),
bias = "0.0"
)
)
connectionNode <- xmlNode("Con",
attrs = c(
from = neuronID - 2,
weight = "1.0"
)
)
neuronNode <- append.XMLNode(neuronNode, connectionNode)
neuralLayerNode <- append.XMLNode(neuralLayerNode, neuronNode)
the.model <- append.XMLNode(the.model, neuralLayerNode)
number.of.neurons <- number.of.neurons + 1
previous.number.of.neurons <- number.of.neurons
}
neuralOutputs <- xmlNode("NeuralOutputs",
attrs = c(numberOfOutputs = previous.number.of.neurons)
)
for (i in 1:number.of.neurons)
{
neuralOutputNode <- xmlNode("NeuralOutput",
attrs = c(outputNeuron = first.outputNeuronID)
)
first.outputNeuronID <- first.outputNeuronID + 1
if (field$class[[field$name[1]]] == "factor") {
targetName <- target
temp <- grep("as.factor", field$name[1], value = TRUE, fixed = TRUE)
if (length(temp) > 0) {
target <- field$name[1]
tempName <- strsplit(field$name[1], "")
endPos <- (length(tempName[[1]]) - 1)
targetName <- substring(target, 11, endPos)
}
fieldName <- paste("derivedNO_", targetName, sep = "")
derivedFieldNode <- xmlNode("DerivedField",
attrs = c(
name = fieldName,
optype = "continuous",
dataType = "double"
)
)
normDiscreteNode <- xmlNode("NormDiscrete",
attrs = c(
field = targetName,
value = model$lev[i]
)
)
derivedFieldNode <- append.XMLNode(derivedFieldNode, normDiscreteNode)
} else
{
name <- field$name[1]
fieldName <- paste("derivedNO_", name, sep = "")
derivedFieldNode <- xmlNode("DerivedField",
attrs = c(
name = fieldName,
optype = "continuous",
dataType = "double"
)
)
fieldRefNode <- xmlNode("FieldRef",
attrs = c(field = field$name[1])
)
derivedFieldNode <- append.XMLNode(derivedFieldNode, fieldRefNode)
}
neuralOutputNode <- append.XMLNode(neuralOutputNode, derivedFieldNode)
neuralOutputs <- append.XMLNode(neuralOutputs, neuralOutputNode)
}
the.model <- append.XMLNode(the.model, neuralOutputs)
pmml <- append.XMLNode(pmml, the.model)
return(pmml)
} |
enlarge_plot <- function(plot,
large_plot = plot,
plot_name = NULL,
width_small = 2,
height_small = 2,
width_large = 7,
height_large = 7,
...) {
if (is.null(plot_name)) {
plot_name <- digest::digest(stats::runif(1))
}
partial(require_file("_enlarge_plot.Rmd"), name = plot_name, ...,
!!!list(
plot = plot,
large_plot = large_plot,
width_small = width_small,
height_small = height_small,
width_large = width_large,
height_large = height_large))
} |
source("ESEUR_config.r")
plot_layout(2, 1, max_height=12)
par(mar=MAR_default-c(0.8, -2, 0.5, 0))
cpu2006=read.csv(paste0(ESEUR_dir, "benchmark/cpu2006-results-20140206.csv.xz"), as.is=TRUE)
start_date=as.Date("01-Jan-2006", format="%d-%B-%Y")
cpu2006$Test.Date=as.Date(paste0("01-", cpu2006$Test.Date), format="%d-%B-%Y")
cpu2006=subset(cpu2006, Test.Date >= start_date)
cint=subset(cpu2006, Benchmark == "CINT2006")
cint$Benchmark=NULL
cint=subset(cint, Result > 0)
hist(cint$Result, main="", col=point_col,
cex.axis=1.4, cex.lab=1.4,
xlab="SPECint result", ylab="Number of computers\n")
plot(density(cint$Result), col=point_col, main="",
cex.axis=1.4, cex.lab=1.4,
xaxs="i", yaxs="i",
xlab="SPECint result", ylab="Density (of number of computers)\n\n") |
library("mboost")
data("bodyfat", package = "mboost")
bffm <- DEXfat ~ age + waistcirc + hipcirc + elbowbreadth + kneebreadth +
anthro3a + anthro3b + anthro3c + anthro4 - 1
indep <- names(bodyfat)[names(bodyfat) != "DEXfat"]
bodyfat[indep] <- lapply(bodyfat[indep], function(x) x - mean(x))
n <- nrow(bodyfat)
set.seed(290875)
bs <- rmultinom(100, n, rep(1, n)/n)
mod1 <- glmboost(bffm, data = bodyfat)
grid <- seq(from = 2, to = 100, by = 2)
boob <- cvrisk(mod1, folds = bs, grid = grid)
mod2 <- glmboost(bffm, data = bodyfat, control = boost_control(nu = 0.2))
boobfull <- cvrisk(mod1, folds = bs, grid = 5000)
bffm <- DEXfat ~ hipcirc + kneebreadth + anthro3a
mod3 <- glmboost(bffm, data = bodyfat, control = boost_control(nu = 0.2))
boobrest <- cvrisk(mod3, folds = bs, grid = 5000)
save(boob, boobfull, boobrest, grid, file = "bodyfat_benchmarks.rda")
dgp <- function(n = 100) {
x <- sort(runif(n) - 0.5)
ytrue <- 0.8 * x + sin(6 * x)
data.frame(x = x, y = ytrue + rnorm(n, sd = sqrt(2)), ytrue = ytrue)
}
nsim <- 100
dfree <- seq(from = 2, to = 40, by = 2)
mstops <- seq(from = 5, to = 1005, by = 10)
mseSS <- matrix(0, nrow = nsim, ncol = length(dfree))
mseB <- matrix(0, nrow = nsim, ncol = length(mstops))
se <- GaussReg()@risk
for (i in 1:nsim) {
print(i)
learn <- dgp()
mseSS[i,] <- sapply(dfree, function(d) {
se(learn$ytrue, predict(smooth.spline(x = learn$x, y = learn$y, df = d),
x = learn$x)$y)/nrow(learn)
})
mod <- mboost(y ~ bbs(x, df = 2.5), data = learn,
control = boost_control(mstop = max(mstops)))
fm <- predict(mod, agg = "cumsum")[,mstops]
mseB[i,] <- apply(fm, 2, function(f) se(learn$ytrue, f)) / nrow(learn)
}
save(dfree, mstops, mseSS, mseB, file = "curve_estimation.rda")
data("wpbc")
wpbc <- wpbc[complete.cases(wpbc),]
indep <- names(wpbc)[!(names(wpbc) %in% c("time", "status"))]
wpbc[indep] <- lapply(wpbc[indep], function(x) x - mean(x))
n <- nrow(wpbc)
set.seed(290875)
bs <- rmultinom(100, n, rep(1, n)/n)
mod <- glmboost(status ~ ., data = wpbc[,-2], family = Binomial())
boob <- c()
grid <- seq(from = 5, to = 500, by = 5)
boob <- cvrisk(mod, folds = bs, grid = grid)
goob <- c()
flink <- function(x)
pmin(abs(x), 18) * sign(x)
for (j in 1:ncol(bs)) {
b <- bs[,j]
print(j)
y <- c(-1, 1)[as.integer(wpbc$status)]
foo <- function(f)
Binomial()@risk(y, f, as.numeric(b == 0)) / sum(b == 0)
object <- glm(status ~ ., data = wpbc[,-2], subset = b > 0,
family = binomial(), weights = b)
p <- predict(object, newdata = wpbc[,-2], type = "link")
goob <- c(goob, foo(flink(p)))
}
soob <- c()
flink <- function(x)
pmin(abs(x), 18) * sign(x)
for (j in 1:ncol(bs)) {
b <- bs[,j]
print(j)
foo <- function(f)
Binomial()@risk(y, f, as.numeric(b == 0)) / sum(b == 0)
object <- step(glm(status ~ ., data = wpbc[,-2], subset = b > 0,
family = binomial(), weights = b),
trace = 0)
p <- predict(object, newdata = wpbc[,-2], type = "link")
soob <- c(soob, foo(flink(p)))
}
save(bs, goob, soob, boob, file = "wpbc_benchmarks.rda")
data("wpbc")
wpbc <- wpbc[complete.cases(wpbc),]
iw <- IPCweights(Surv(wpbc$time, wpbc$status == "R"))
wpbc3 <- wpbc[,colnames(wpbc) != "status"]
wpbc3 <- wpbc3[iw > 0,]
iw <- iw[iw > 0]
n <- nrow(wpbc3)
set.seed(290875)
bs <- rmultinom(10, n, rep(1, n)/n)
mod <- glmboost(log(time) ~ ., data = wpbc3)
grid <- seq(from = 5, to = 1000, by = 10)
boob <- cvrisk(mod, folds = bs, grid = grid)
save(boob, grid, file = "wpbc_survivalbenchmarks.rda") |
mra.wt <-
function(x.wt)
{
wf<-attr(x.wt,"wavelet")
J<-length(x.wt)-1
method<-attr(x.wt,"class")
boundary<-attr(x.wt,"boundary")
if(method=="modwt") n<-length(x.wt[[1]])
else n<-2*length(x.wt[[1]])
x.mra <- vector("list", J + 1)
zero <- vector("list", J + 1)
names(zero) <- c(paste("d", 1:J, sep = ""), paste("s", J,
sep = ""))
class(zero) <- method
attr(zero, "wavelet") <- wf
attr(zero, "boundary") <- boundary
zero[[J + 1]] <- x.wt[[J + 1]]
if (method == "modwt") {
for (k in 1:J) zero[[k]] <- numeric(n)
x.mra[[J + 1]] <- imodwt(zero)
}
else {
for (k in 1:J) zero[[k]] <- numeric(n/2^k)
x.mra[[J + 1]] <- idwt(zero)
}
for (j in J:1) {
zero <- vector("list", j + 1)
names(zero) <- c(paste("d", 1:j, sep = ""), paste("s",
j, sep = ""))
class(zero) <- method
attr(zero, "wavelet") <- wf
attr(zero, "boundary") <- boundary
zero[[j]] <- x.wt[[j]]
if (method == "modwt") {
if (j != 1) {
for (k in c(j + 1, (j - 1):1)) zero[[k]] <- numeric(n)
}
else {
zero[[j + 1]] <- numeric(n)
}
x.mra[[j]] <- imodwt(zero)
}
else {
zero[[j + 1]] <- numeric(n/2^j)
if (j != 1) {
for (k in (j - 1):1) zero[[k]] <- numeric(n/2^k)
}
x.mra[[j]] <- idwt(zero)
}
}
names(x.mra) <- c(paste("D", 1:J, sep = ""), paste("S", J,
sep = ""))
if (boundary == "reflection") {
for (j in (J + 1):1) x.mra[[j]] <- x.mra[[j]][1:(n/2)]
return(x.mra)
}
else {
return(x.mra)
}
} |
knitr::opts_chunk$set(echo=TRUE, collapse=TRUE, error=TRUE, comment = "
library("TITAN2")
data(glades.taxa)
str(glades.taxa, list.len = 5)
data(glades.env)
str(glades.env)
data(glades.titan)
str(glades.titan, 1)
message("100% occurrence detected 1 times (0.8% of taxa), use of TITAN less than ideal for this data type")
message("Taxa frequency screen complete")
message("Determining partitions along gradient")
message("Calculating observed IndVal maxima and class values")
message("Calculating IndVals using mean relative abundance")
message("Permuting IndVal scores")
message("IndVal $z$ score calculation complete")
message("Summarizing Observed Results")
message("Estimating taxa change points using z-score maxima")
message("Bootstrap resampling in sequence...")
message(1*1)
message(2*1)
message(3*1)
message("Bootstrap resampling in parallel using 2 CPUs...no index will be printed to screen")
glades.titan$sumz.cp
head(glades.titan$sppmax)
str(glades.titan, max.level = 1, give.attr = FALSE)
plot_sumz_density(glades.titan)
plot_sumz_density(glades.titan, ribbon = FALSE, points = TRUE)
plot_sumz_density(glades.titan,
ribbon = TRUE, points = FALSE, sumz1 = FALSE, change_points = FALSE,
xlabel = expression(paste("Surface Water Total Phosphorus ("*mu*"g/l)"))
)
plot_sumz(glades.titan, filter = TRUE)
plot_taxa_ridges(glades.titan, axis.text.y = 8)
plot_taxa_ridges(glades.titan,
xlabel = expression(paste("Surface water total phosphorus ("*mu*"g/l)")),
n_ytaxa = 50
)
plot_taxa_ridges(glades.titan,
xlabel = expression(paste("Surface water total phosphorus ("*mu*"g/l)")),
z2 = FALSE
)
plot_taxa_ridges(glades.titan,
xlabel = expression(paste("Surface water total phosphorus ("*mu*"g/l)")),
z2 = FALSE, grid = FALSE
)
plot_taxa(glades.titan, xlabel = "Surface Water TP (ug/l)")
plot_taxa(glades.titan, xlabel = "Surface Water TP (ug/l)", z.med = TRUE)
plot_taxa(glades.titan, xlabel = "Surface Water TP (ug/l)", z.med = FALSE, prob95 = TRUE)
plot_cps(glades.titan)
plot_cps(glades.titan, taxaID = "ENALCIVI", xlabel = "Surface Water TP (ug/l)")
plot_cps(glades.titan, taxaID = "ENALCIVI", cp.trace = TRUE, xlabel = "Surface Water TP (ug/l)")
plot_cps(glades.titan, taxaID = "OSTRASP5", cp.trace = TRUE, xlabel = "Surface Water TP (ug/l)")
plot_cps(glades.titan, taxa.dist = FALSE, xlabel = "Surface Water TP (ug/l)")
plot_cps(glades.titan, taxa.dist = FALSE, xlabel = "Surface Water TP (ug/l)", stacked = TRUE) |
addLine=function(x,...){
if(x=="") x<-paste0(...)
else x<-paste0(x,"\n",...)
x
}
addPlus=function(x,...){
if(x=="") x<-paste0(...)
else x<-paste0(x,"+",...)
x
}
makeEquation1=function(X,M,stage=1,start=0,add2ndMediation=TRUE){
countX=length(X)
countM=length(M)
equation=""
for(i in 1:countM){
sub=""
for(j in 1:countX){
sub=addPlus(sub,letters[stage],start+(i-1)*countX+j,"*",X[j])
}
if(add2ndMediation &(countM>1)&(i>1)){
sub=addPlus(sub,"d",i-1,"*",M[i-1])
}
temp=paste0(M[i],"~",sub)
equation=addLine(equation,temp)
}
equation
}
makeEquation2=function(X,M,Y){
countX=length(X)
countM=length(M)
countY=length(Y)
equation=""
for(i in 1:countY){
temp=makeEquation1(M,Y[i],stage=2,start=(i-1)*countM)
temp
sub=""
for(j in 1:countX){
sub=addPlus(sub,letters[3],(i-1)*countX+j,"*",X[j])
}
temp=addPlus(temp,sub)
equation=addLine(equation,temp)
}
equation
}
makeEquation3=function(X,M,Y,add2ndMediation=TRUE){
(countX=length(X))
(countM=length(M))
(countY=length(Y))
equation=""
ind=c()
for(k in 1:countY){
for(i in 1:countX) {
for(j in 1:countM) {
noA=(i-1)*countM+j
no=noA+(k-1)*countX*countM
b=((noA-1)%/%countX)+1+(k-1)*countM
ind=c(ind,paste0("a",noA,"*b",b))
temp=paste0("ind",length(ind),":=",ind[length(ind)])
equation=addLine(equation,temp)
}
}
}
equation
secondInd=c()
if(add2ndMediation &(countM>1)){
for(k in 1:countY){
for(j in 2:countM){
equationa=""
for(i in 1:countX){
start=(j-2)*countX
tempa=paste0("a",start+i,"*d",j-1,"*b",j+(k-1)*countM)
equationa=addPlus(equationa,tempa)
}
secondInd=c(secondInd,equationa)
temp=paste0("secondInd",length(secondInd),":=",secondInd[length(secondInd)])
equation=addLine(equation,temp)
}
}
}
equation
thirdInd=c()
if(add2ndMediation &(countM>2)){
for(k in 1:countY){
for(j in 3:countM){
equationa=""
for(i in 1:countX){
start=(j-3)*countX
tempa=paste0("a",start+i,"*d",j-2,"*d",j-1,"*b",j+(k-1)*countM)
equationa=addPlus(equationa,tempa)
}
thirdInd=c(thirdInd,equationa)
temp=paste0("thirdInd",length(thirdInd),":=",thirdInd[length(thirdInd)])
equation=addLine(equation,temp)
}
}
}
equation
total=c()
for(k in 1:countY){
direct=paste0("c",seq(1:countX)+(k-1)*countX)
Effect=Reduce(addPlus,direct)
if(countM>=1){
start=1+(k-1)*length(ind)/countY
end=start+length(ind)/countY-1
indirectEffect=Reduce(addPlus,ind[start:end])
Effect=addPlus(Effect,indirectEffect)
}
if(countM>=2){
start=1+(k-1)*length(secondInd)/countY
end=start+length(secondInd)/countY-1
secondIndEffect=Reduce(addPlus,secondInd[start:end])
if(add2ndMediation) Effect=addPlus(Effect,secondIndEffect)
}
if(countM>=3){
start=1+(k-1)*length(thirdInd)/countY
end=start+length(thirdInd)/countY-1
thirdIndEffect=Reduce(addPlus,thirdInd[start:end])
if(add2ndMediation) Effect=addPlus(Effect,thirdIndEffect)
}
temp=paste0("total",k,":=",Effect)
equation=addLine(equation,temp)
}
equation
}
makeEquation=function(X,M,Y,add2ndMediation=TRUE,covar=list()){
(countX=length(X))
(countM=length(M))
(countY=length(Y))
if(countX*countM*countY==0) {
equation="
} else{
temp=makeEquation2(X,M,Y)
temp
temp=addCovarEquation(temp,covar,prefix="g")
equation=paste0("
temp=makeEquation1(X,M,add2ndMediation=add2ndMediation)
temp=addCovarEquation(temp,covar,prefix="h")
equation=addLine(equation,temp)
equation=addLine(equation,makeEquation3(X,M,Y,add2ndMediation=add2ndMediation))
}
equation
}
addCovarEquation=function(equation,covar=list(),prefix="f",grouplabels=NULL,multipleMediator=FALSE){
temp1=unlist(strsplit(equation,"\n"))
temp1
temp2=strsplit(temp1,"~")
temp2
result=list()
start=1
i=1
for(i in 1:length(temp2)){
var=temp2[[i]][1]
var=str_trim(var,side="both")
var
covar
if(multipleMediator){
suffix=i
} else{
suffix<-NULL
}
temp3=seekVar(covar=covar,var=var,prefix=prefix,start=start,grouplabels=grouplabels,suffix=suffix)
temp3
if(is.null(temp3)){
result[[i]]=paste(var,"~",temp2[[i]][2])
} else {
temp4=paste(temp2[[i]][2],"+",paste(temp3,collapse=" + "))
result[[i]]=paste(var,"~",temp4)
start=start+length(temp3)
}
}
paste(unlist(result),collapse="\n")
}
seekVar=function(covar=list(),var,prefix="h",start=1,grouplabels=NULL,suffix=NULL){
temp=c()
if(length(covar$name)>0){
j=start
for(i in 1:length(covar$name)){
if(var %in% covar$site[[i]]) temp=c(temp,covar$name[i])
}
res=c()
for(i in seq_along(temp)){
if(temp[i] %in% names(grouplabels)){
count=attr(grouplabels[[temp[i]]],"length")
temp1=paste0(grouplabels[[temp[i]]],1:count)
res=c(res,temp1)
} else{
res=c(res,temp[i])
}
}
temp=c()
j=1
for(i in seq_along(res)){
if(!is.null(prefix)) {
temp=c(temp,paste0(prefix,j,suffix,"*",res[i]))
} else{
temp=c(temp,res[i])
}
j=j+1
}
}
temp
} |
`deshrink` <- function(env, wa.env,
type = c("inverse", "classical",
"expanded", "none","monotonic")) {
`inverse` <- function(env, wa.env) {
X <- cbind(rep(1, length(wa.env)), wa.env)
QR <- qr(X)
coef <- qr.coef(QR, env)
pred <- qr.fitted(QR, env)
return(list(coefficients = coef, env = pred))
}
`classical` <- function(env, wa.env) {
X <- cbind(rep(1, length(env)), env)
QR <- qr(X)
coef <- drop(qr.coef(QR, wa.env))
pred <- (wa.env - coef[1]) / coef[2]
return(list(coefficients = coef, env = pred))
}
`expanded` <- function(env, wa.env) {
b1 <- sd(env)/sd(wa.env)
b0 <- mean(env) - b1 * mean(wa.env)
pred <- b0 + b1 * wa.env
return(list(coefficients = c(b0, b1), env = pred))
}
`none` <- function(env, wa.env) {
return(list(coefficients = c(0, 1), env = wa.env))
}
`monotonic` <- function(env, wa.env) {
df <- data.frame(env = env, wa.env = drop(wa.env))
mod <- gam(env ~ s(wa.env, k = 10, bs = "cr"), data = df)
sm <- smoothCon(s(wa.env, k = 10, bs = "cr"), data = df,
knots = NULL)[[1]]
Fm <- mono.con(sm$xp)
G <- list(X = sm$X, C = matrix(0,0,0), sp = mod$sp, p = sm$xp,
y = env, w = env*0+1, Ain = Fm$A, bin = Fm$b, S = sm$S,
off = 0)
p <- pcls(G)
pred <- Predict.matrix(sm, data = data.frame(wa.env = wa.env)) %*% p
pred <- drop(pred)
list(coefficients = list(sm = sm, p = p), env = pred)
}
if(missing(type))
type <- "inverse"
type <- match.arg(type)
res <- switch(type,
inverse = inverse(env, wa.env),
classical = classical(env, wa.env),
expanded = expanded(env, wa.env),
none = none(env, wa.env),
monotonic = monotonic(env, wa.env))
class(res) <- c("deshrink","list")
attr(res, "type") <- type
return(res)
} |
positive.negative <- function(x,method = "min"){
.Call(Rfast_positive_negative,x,method)
} |
required <- c("tidyverse")
if (!all(sapply(required,
function(pkg) requireNamespace(pkg, quietly = TRUE)))) {
message(paste("This vignette needs the followig packages:\n\t",
paste(required, collapse = " "),
"\nSince not all are installed, code will not be executed: "))
knitr::opts_chunk$set(eval = FALSE)
}
knitr::opts_chunk$set(collapse = TRUE, comment = "
options(tidyverse.quiet = TRUE)
library("MortalityTables")
mortalityTables.list()
mortalityTables.list("Austria_*")
mortalityTables.load("Germany_Annuities_DAV2004R")
mortalityTables.load("Austria_Annuities*")
mortalityTables.load("Austria_Census")
plot(mort.AT.census.1951.male, mort.AT.census.1991.male,
mort.AT.census.2001.male, mort.AT.census.2011.male,
legend.position = c(1,0))
plot(mort.AT.census.1951.male, mort.AT.census.1991.male,
mort.AT.census.2001.male,
reference = mort.AT.census.2011.male, legend.position = c(1,0.75), ylim = c(0,4))
plot(AVOe1996R.male, AVOe2005R.male, YOB = 1977, title = "Comparison for YOB=1977")
plot(AVOe1996R.male, AVOe2005R.male, Period = 2020, title = "Comparison for observation year 2020")
mortalityTables.load("Austria_Annuities")
qx.coh1977 = deathProbabilities(AVOe2005R.male, YOB = 1977)
qx.per2020 = periodDeathProbabilities(AVOe2005R.male, Period = 2020)
table.coh1977 = getCohortTable(AVOe2005R.male, YOB = 1977)
table.per2020 = getPeriodTable(AVOe2005R.male, Period = 2020)
plot(table.coh1977, table.per2020, title = "Comparison of cohort 1977 with Period 2020", legend.position = c(1,0))
plotMortalityTables(
mort.AT.census[c("m", "w"), c("1951", "1991", "2001", "2011")]) +
aes(color = as.factor(year), linetype = sex) + labs(color = "Period", linetype = "Sex")
mort.AT.census.2011.male@data$dim
lt = mortalityTable.period(name = "Sample period lifetable", ages = 1:99, deathProbs = exp(-(99:1)/10))
plot(lt, title = "Simple log-linear period mortality table")
deathProbabilities(lt)
atPlus2 = mortalityTable.trendProjection(
name = "Austrian Census Males 2011, 2% yearly trend",
baseYear = 2011,
deathProbs = deathProbabilities(mort.AT.census.2011.male),
ages = ages(mort.AT.census.2011.male),
trend = rep(0.02, length(ages(mort.AT.census.2011.male)))
)
atPlus2.damp = mortalityTable.trendProjection(
name = "Austrian M '11, 2% yearly, damping until 2111",
baseYear = 2011,
deathProbs = deathProbabilities(mort.AT.census.2011.male),
ages = ages(mort.AT.census.2011.male),
trend = rep(0.02, length(ages(mort.AT.census.2011.male))),
dampingFunction = function(n) { n - n * (n + 1) / 2 / 100 }
)
plot(mort.AT.census.2011.male, atPlus2, atPlus2.damp, YOB = 2011, legend.position = c(0.8,0.75))
atPlus2.damp2 = mortalityTable.trendProjection(
name = "Austrian M '11, 2% yearly, 1% long-term",
baseYear = 2011,
deathProbs = deathProbabilities(mort.AT.census.2011.male),
ages = ages(mort.AT.census.2011.male),
trend = rep(0.02, length(ages(mort.AT.census.2011.male))),
trend2 = rep(0.01, length(ages(mort.AT.census.2011.male))),
dampingFunction = function(year) {
if (year <= 2021) 1
else if (year > 2031) 14.5/(year - 2011)
else 1 - (year - 2021)*(year - 2021 + 1) / 20 / (year - 2011)
}
)
plot(mort.AT.census.2011.male, atPlus2, atPlus2.damp, atPlus2.damp2, YOB = 2011, legend.position = c(0.02, 0.98), legend.justification = c(0, 1))
baseTableShift = getCohortTable(atPlus2, YOB = 2011);
baseTableShift@name = "Base table of the shift (YOB 2011)"
atShifted = mortalityTable.ageShift(
name = "Approximation with age shift",
baseYear = 2011,
deathProbs = deathProbabilities(baseTableShift),
ages = ages(baseTableShift),
ageShifts = data.frame(
shifts = c(
rep( 0, 3),
rep(-1, 3),
rep(-2, 3),
rep(-3, 3),
rep(-4, 3),
rep(-5, 3),
rep(-6, 3)
),
row.names = 2011:2031
)
)
ageShift(atShifted, YOB = 2021)
plot(baseTableShift, atPlus2, atShifted, YOB = 2021, legend.position = c(0.8,0.75))
b = AVOe2005R.female
b@name = "Modified Copy"
b@modification = function(qx) pmax(qx, 0.01)
plot(AVOe2005R.female, b, YOB = 2000)
AVOe2005R.female.sec = setLoading(AVOe2005R.female, loading = 0.1);
AVOe2005R.female.sec@name = "Table with 10% loading"
plot(AVOe2005R.female, AVOe2005R.female.sec, title = "Original and modified table")
AVOe2005R.female.mod = setModification(AVOe2005R.female, modification = function(qx) pmax(0.03, qx));
AVOe2005R.female.mod@name = "Modified table (lower bound of 3%)"
plot(AVOe2005R.female, AVOe2005R.female.mod, title = "Original and modified table")
library(tidyverse)
data("PopulationData.AT2017", package = "MortalityTables")
PopulationData.AT2017.raw = PopulationData.AT2017 %>%
select(age, exposure.total, deaths.total) %>%
mutate(qraw = deaths.total / (exposure.total + deaths.total/2))
PopulationTable.AT2017 = mortalityTable.period(
name = "Austrian Population Mortality 2017 (raw)",
baseYear = 2017,
deathProbs = PopulationData.AT2017.raw$qraw,
ages = PopulationData.AT2017.raw$age,
exposures = PopulationData.AT2017.raw$exposure.total,
data = list(
deaths = PopulationData.AT2017.raw$deaths.total,
dim = list(sex = "u", collar = "Population", type = "raw", year = "2017")
)
)
plotMortalityTables(PopulationTable.AT2017, title = "Austrian population mortality (raw), 2017")
PopulationTable.AT2017.smooth = PopulationTable.AT2017 %>%
whittaker.mortalityTable(lambda = 1/10, d = 2, name.postfix = ", Whittaker") %>%
mT.setDimInfo(type = "smoothed")
plotMortalityTables(PopulationTable.AT2017, PopulationTable.AT2017.smooth, title = "Austrian population mortality (raw and smoothed), 2017") +
aes(colour = type)
PopulationData.AT2017.raw %>% filter(age > 90)
PopulationTable.AT2017.cut = PopulationTable.AT2017.smooth %>%
mT.fillAges(0:99) %>%
mT.setName("Austrian Population Mortality 2017, Whittaker-smoothed and cut at age 99")
PopulationTable.AT2017.ex = PopulationTable.AT2017.smooth %>%
mT.fitExtrapolationLaw(law = "HP2", fit = 75:99, extrapolate = 80:120, fadeIn = 80:95) %>%
mT.setDimInfo(type = "smoothed and extrapolated")
plotMortalityTables(PopulationTable.AT2017, PopulationTable.AT2017.smooth, PopulationTable.AT2017.ex, title = "Austrian population mortality (raw and smoothed), 2017") +
aes(colour = type)
plotMortalityTables(
PopulationTable.AT2017,
PopulationTable.AT2017.smooth %>%
mT.fitExtrapolationLaw(law = "HP2", fit = 75:99, extrapolate = 80:120, fadeIn = 80:95) %>%
mT.setDimInfo(type = "Extrapolation: HP2, Fit 75--99"),
PopulationTable.AT2017.smooth %>%
mT.fitExtrapolationLaw(law = "HP2", fit = 75:85, extrapolate = 80:120, fadeIn = 80:95) %>%
mT.setDimInfo(type = "Extrapolation: HP, Fit 75--85"),
PopulationTable.AT2017.smooth %>%
mT.fitExtrapolationLaw(law = "HP2", fit = 90:110, extrapolate = 80:120, fadeIn = 90:100) %>%
mT.setDimInfo(type = "Extrapolation: HP2, Fit 90--110"),
title = "Examples of different fitting ranges for extrapolation") +
aes(colour = type)
plotMortalityTables(
PopulationTable.AT2017,
PopulationTable.AT2017.smooth %>%
mT.fitExtrapolationLaw(law = "HP2", fit = 75:99, extrapolate = 80:120, fadeIn = 80:95) %>%
mT.setDimInfo(type = "HP2"),
PopulationTable.AT2017.smooth %>%
mT.fitExtrapolationLaw(law = "thiele", fit = 75:99, extrapolate = 80:120, fadeIn = 80:95) %>%
mT.setDimInfo(type = "thiele"),
PopulationTable.AT2017.smooth %>%
mT.fitExtrapolationLaw(law = "ggompertz", fit = 75:99, extrapolate = 80:120, fadeIn = 80:95) %>%
mT.setDimInfo(type = "ggompertz"),
PopulationTable.AT2017.smooth %>%
mT.fitExtrapolationLaw(law = "carriere1", fit = 75:99, extrapolate = 80:120, fadeIn = 80:95) %>%
mT.setDimInfo(type = "carriere1"),
title = "Examples of different fitting functions for extrapolation (fit 75--99)",
ages = 75:120, legend.position = "bottom", legend.key.width = unit(15, "mm")) +
aes(colour = type) + labs(colour = "Mortality Law")
mortalityTables.load("Austria_PopulationForecast")
plotMortalityTrend(mort.AT.forecast, title = "Forecast trend (medium scenario) by Statistik Austria")
PopulationTable.AT2017.trend = PopulationTable.AT2017.ex %>%
mT.addTrend(mort.AT.forecast$m@trend, trendages = ages(mort.AT.forecast$m)) %>%
mT.setDimInfo(type = "smoothed, extrapolated, trend")
PopulationTable.AT2017.trend.ex = PopulationTable.AT2017.trend %>%
mT.extrapolateTrendExp(95) %>%
mT.setDimInfo(type = "smoothed, extrapolated, trend extrapolated")
plotMortalityTrend(PopulationTable.AT2017.trend, PopulationTable.AT2017.trend.ex,
title = "Extrapolating the trend via Exponential function") +
aes(color = type)
plotMortalityTables(PopulationTable.AT2017, PopulationTable.AT2017.smooth, PopulationTable.AT2017.ex, PopulationTable.AT2017.trend.ex, YOB = 1980, title = "Austrian population mortality (Period 2017 vs. Generation 1980)", legend.position = c(0.01, 0.99), legend.justification = c(0,1)) +
aes(colour = type)
PopulationTable.AT2017.trend.ex@data$whittaker
PopulationTable.AT2017.Cohort.FINAL = PopulationTable.AT2017.trend.ex %>%
mT.cleanup() %>%
mT.round(digits = 6) %>%
mT.setName("Austrian Population Mortality, Period 2017 with trend projection")
TableForProduct = PopulationTable.AT2017.Cohort.FINAL %>%
mT.scaleProbs(factor = 1.25, name.postfix = "10% security added")
plotMortalityTables(TableForProduct, PopulationTable.AT2017.Cohort.FINAL,
title = "Adding a security loading of 25%", Period = 2017, legend.position = "bottom") |
summary.est_multi_poly_clust <-function(object, ...){
out = object
cat("\nCall:\n")
print(out$call)
cat("\nLog-likelihood:\n")
print(round(out$lk,2))
cat("\nAIC:\n")
print(round(out$aic,2))
cat("\nBIC:\n")
print(round(out$bic,2))
cat("\nRegression parameters at cluster level:\n")
print(round(out$DeU,4))
cat("\nRegression parameters at individual level:\n")
print(round(out$DeV,4))
cat("\nConditional response probabilities:\n")
print(round(out$Phi,4))
cat("\n")
} |
plot.lmrob <-
function (x, which = 1:5,
caption = c("Standardized residuals vs. Robust Distances",
"Normal Q-Q vs. Residuals", "Response vs. Fitted Values",
"Residuals vs. Fitted Values" ,
"Sqrt of abs(Residuals) vs. Fitted Values"),
panel = if(add.smooth) panel.smooth else points,
sub.caption = deparse(x$call), main = "",
compute.MD = TRUE,
ask = prod(par("mfcol")) < length(which) && dev.interactive(),
id.n = 3, labels.id = names(residuals(x)), cex.id = 0.75,
label.pos = c(4,2), qqline = TRUE, add.smooth = getOption("add.smooth"),
..., p = 0.025)
{
if (!inherits(x, "lmrob"))
stop("Use only with 'lmrob' objects")
if (!is.numeric(which) || any(which < 1) || any(which > 5))
stop("'which' must be in 1:5")
show <- rep(FALSE, 5)
show[which] <- TRUE
r <- residuals(x)
n <- length(r)
sr <- r/x$scale
yh <- fitted(x)
if (is.null(id.n))
id.n <- 0
else {
id.n <- as.integer(id.n)
if(id.n < 0L || id.n > n)
stop(gettextf("'id.n' must be in {1,..,%d}", n), domain = NA)
}
if(id.n > 0L) {
if(is.null(labels.id))
labels.id <- paste(1L:n)
iid <- 1L:id.n
show.r <- sort.list(abs(r), decreasing = TRUE)[iid]
text.id <- function(x, y, ind, adj.x = TRUE) {
labpos <-
if(adj.x) label.pos[1+as.numeric(x > mean(range(x)))] else 3
text(x, y, labels.id[ind], cex = cex.id, xpd = TRUE,
pos = labpos, offset = 0.25)
}
}
one.fig <- prod(par("mfcol")) == 1
if (ask) {
op <- par(ask = TRUE)
on.exit(par(op))
}
if (show[1]) {
if(is.null(x[['MD']]) && compute.MD) {
message("recomputing robust Mahalanobis distances")
x$MD <-
robMD(x = if(!is.null(x[['x']])) x$x else
if(!is.null(x[['model']])) model.matrix(x, x$model)
else stop("need 'model' or 'x' component for robust Mahalanobis distances"),
intercept = attr(x$terms,"intercept"),
wqr = x$qr)
.ge <- .GlobalEnv
if(identical(parent.frame(), .ge) &&
exists((cnx <- as.character(match.call()[["x"]])), .ge)) {
assign(cnx, x, envir = .ge)
message("saving the robust distances 'MD' as part of ", sQuote(cnx))
}
}
if(!is.null(xD <- x[['MD']])) {
if (p < 0 || p > 1)
stop ("Tolerance range must be between 0% to 100%")
else chi <- sqrt( qchisq(p = 1-p, df = x$rank) )
ylim <- range(sr, na.rm=TRUE)
if(id.n > 0) ylim <- extendrange(r = ylim, f = 0.08)
plot(xD, xlab = "Robust Distances",
sr, ylab = "Robust Standardized residuals", ylim=ylim,
main = main, type = "n", ...)
panel(xD, sr, ...)
mtext(caption[1], 3, 0.25)
if (one.fig)
title(sub = sub.caption, ...)
if(id.n > 0) {
y.id <- sr[show.r]
y.id[y.id < 0] <- y.id[y.id < 0] - strheight(" ")/3
text.id(xD[show.r], y.id, show.r)
}
abline(h = c(2.5,-2.5), lty = 3)
abline(v = chi, lty = 3)
}
}
if (show[2L]) {
qq <- qqnorm(r, ylab = "Residuals", main = main,...)
if(qqline) qqline(r, lty = 3, col = "gray50")
mtext(caption[2], 3, 0.25)
if (one.fig)
title(sub = sub.caption, ...)
if(id.n > 0)
text.id(qq$x[show.r], qq$y[show.r], show.r)
}
if (show[3]) {
y <- if(!is.null(x[['model']])) model.response(x$model) else yh + r
m1 <- min(yh,y)
m2 <- max(yh,y)
plot(yh, y, xlab = "Fitted Values", ylab = "Response",
xlim = c(m1,m2), ylim = c(m1,m2), main = main, type = "n", ...)
panel(yh, y, ...)
mtext(caption[3], 3, 0.25)
if (one.fig)
title(sub = sub.caption, ...)
if(id.n > 0)
text.id(yh[show.r], y[show.r], show.r)
abline(a = 0,b = 1)
}
if (show[4]) {
plot(yh, r, xlab = "Fitted Values", ylab = "Residuals",
main = main, type = "n", ...)
panel(yh, r, ...)
mtext(caption[4], 3, 0.25)
if (one.fig)
title(sub = sub.caption, ...)
if(id.n > 0) {
y.id <- r[show.r]
y.id[y.id < 0] <- y.id[y.id < 0] - strheight(" ")/3
text.id(yh[show.r], y.id, show.r)
}
abline(h = c(2.5*x$scale,0,-2.5*x$scale), lty = 3)
}
if (show[5]) {
sqrtabsr <- sqrt(abs(r))
plot(yh, sqrtabsr, xlab = "Fitted Values", ylab = "Sqrt of abs(Residuals)",
main = main, type = "n", ...)
panel(yh, sqrtabsr, ...)
mtext(caption[5], 3, 0.25)
if (one.fig)
title(sub = sub.caption, ...)
if(id.n > 0)
text.id(yh[show.r], sqrtabsr[show.r], show.r)
}
invisible()
} |
rename_wrap_var <- function(wrap_object,
xform_info = NA, ...) {
i <- NULL
j <- NULL
colnm <- NULL
boxData <- .init_wrap_params(wrap_object)
if (is.na(xform_info)) {
warning("No field name to rename found")
return(wrap_object)
} else {
coln <- as.character(xform_info)
if (grepl("[^-]->", coln)) {
st <- strsplit(coln, "->")
} else {
st <- strsplit(coln, "-->")
}
if (!is.na(st[[1]][2])) {
derivedFieldName <- st[[1]][2]
}
colnm <- st[[1]][1]
if (grepl("column", colnm, ignore.case = TRUE)) {
colnm <- gsub("column", "", colnm, ignore.case = TRUE)
}
if (grepl("^[-,_]", colnm)) {
colnm <- gsub("^[-,_]*", "", colnm)
}
if (is.na(st[[1]][2])) {
derivedFieldName <- paste("derived_", row.names(boxData$field_data)[coln2], sep = "")
}
if (suppressWarnings(!is.na(as.numeric(colnm)))) {
coln2 <- as.numeric(colnm)
dataType <- boxData$field_data[names(boxData$data)[coln2], "dataType"]
if (dataType == "numeric") {
row.names(boxData$field_data)[coln2] <- derivedFieldName
names(boxData$data)[coln2] <- derivedFieldName
if (!is.null(boxData$matrixData)) {
names(boxData$matrixData)[coln2] <- derivedFieldName
}
}
} else {
i <- which(names(boxData$data) == colnm)
if (is.null(i)) {
j <- which(names(boxData$data) == colnm)
}
if (is.null(i) && is.null(j)) {
stop("field name not found.")
}
if (is.null(j)) {
row.names(boxData$field_data)[i] <- derivedFieldName
names(boxData$data)[i] <- derivedFieldName
if (!is.null(boxData$matrixData)) {
names(boxData$matrixData)[i] <- derivedFieldName
}
} else {
row.names(boxData$field_data)[j] <- derivedFieldName
names(boxData$data)[j] <- derivedFieldName
if (!is.null(boxData$matrixData)) {
names(boxData$matrixData)[j] <- derivedFieldName
}
}
}
}
return(boxData)
} |
GetProteinGOInfo <- function(ProteinAccList , directorypath = NULL)
{
if(!has_internet())
{
message("Please connect to the internet as the package requires internect connection.")
return()
}
ProteinInfoParsed_total = data.frame()
baseUrl <- "http://www.uniprot.org/uniprot/"
message("Please wait we are processing your accessions ...")
pb <- progress::progress_bar$new(total = length(ProteinAccList))
Colnames = "go-id,go,go(biological process),go(molecular function),go(cellular component)"
for (ProteinAcc in ProteinAccList)
{
Request <- tryCatch(
{
GET(paste0(baseUrl , ProteinAcc,".xml"))
},error = function(cond)
{
message("Internet connection problem occurs and the function will return the original error")
message(cond)
}
)
ProteinName_url <- paste0("?query=accession:",ProteinAcc,"&format=tab&columns=",Colnames)
RequestUrl <- paste0(baseUrl , ProteinName_url)
RequestUrl <- URLencode(RequestUrl)
if (length(Request) == 0)
{
message("nternet connection problem occurs")
return()
}
if (Request$status_code == 200){
ProteinDataTable <- tryCatch(read.csv(RequestUrl, header = TRUE, sep = '\t'), error=function(e) NULL)
if (!is.null(ProteinDataTable))
{
ProteinDataTable <- ProteinDataTable[1,]
ProteinInfoParsed <- as.data.frame(ProteinDataTable,row.names = ProteinAcc)
ProteinInfoParsed_total <- rbind(ProteinInfoParsed_total, ProteinInfoParsed)
}
}else {
HandleBadRequests(Request$status_code)
}
pb$tick()
}
if(!is.null(directorypath))
{
write.csv(ProteinInfoParsed_total , paste0(directorypath , "/","Protein GO Info.csv"))
}
return(ProteinInfoParsed_total)
} |
rm(list = ls())
if(FALSE){
library(testthat)
library(lavaSearch2)
}
lava.options(symbols = c("~","~~"))
context("lavaSearch2")
n <- 100
m.sim <- lvm(Y~E+0*X1)
m <- lvm(Y~E)
addvar(m) <- ~X1
set.seed(12)
df.sim <- lava::sim(m.sim, n=100, latent = FALSE)
e.base <- estimate(m, data = df.sim)
test_that("Score 1 link",{
GS.score <- modelsearch(e.base, silent = TRUE)
index.coef <- which(GS.score$res[,"Index"]=="Y~X1")
search.holm <- modelsearch2(e.base, method.p.adjust = "holm", trace = 0)
expect_equal(as.double(GS.score$test[index.coef,"Test Statistic"]),
as.double(search.holm$sequenceTest[[1]][1,"statistic"]), tol = 1e-9)
expect_equal(as.double(GS.score$test[index.coef,"P-value"]),
as.double(search.holm$sequenceTest[[1]][1,"p.value"]), tol = 1e-9)
search.approx <- modelsearch2(e.base, method.p.adjust = "fastmax", trace = 0,
method.maxdist = "approximate")
search.resampling <- modelsearch2(e.base, method.p.adjust = "fastmax", trace = 0,
method.maxdist = "resampling")
search.bootstrap <- modelsearch2(e.base, method.p.adjust = "fastmax", trace = 0,
method.maxdist = "bootstrap")
expect_equal(search.approx$sequenceTest[[1]][1,"statistic"],
search.holm$sequenceTest[[1]][1,"statistic"])
expect_equal(search.approx$sequenceTest[[1]][1,"statistic"],
search.resampling$sequenceTest[[1]][1,"statistic"], tol = 1e-3)
expect_equal(search.approx$sequenceTest[[1]][1,"statistic"],
search.bootstrap$sequenceTest[[1]][1,"statistic"], tol = 1e-3)
expect_equal(round(search.resampling$sequenceTest[[1]][1,"p.value"],2),
0.24)
expect_equal(round(search.bootstrap$sequenceTest[[1]][1,"p.value"],2),
0.24)
}) |
my_mtcars <- mtcars
my_mtcars$am <- factor(my_mtcars$am, labels = c("automatic", "manual"))
my_mtcars$vs <- factor(my_mtcars$vs, labels = c("v", "s"))
my_mtcars$cyl <- factor(my_mtcars$cyl)
my_mtcars$gear <- factor(my_mtcars$gear)
my_mtcars$carb <- factor(my_mtcars$carb)
model_nofactors <- lm(mpg ~ cyl*wt*hp, data = mtcars)
model_onefactor <- lm(mpg ~ cyl*wt*hp, data = my_mtcars)
model_twofactors <- lm(mpg ~ cyl*wt*hp + vs, data = my_mtcars)
model_allfactors <- lm(mpg ~ cyl*vs, data = my_mtcars) |
orthog.complete <- function(x, normalize=TRUE, abs2.rows=1:nrow(x),
Int=TRUE, drop.Int=Int) {
if (length(dimnames(x)[[2]])==0) dimnames(x)[[2]] <- paste("Col",1:ncol(x), sep=".")
if (Int) x <- cbind("(I)"=1, x)
qr.x <- qr(x)
Q <- qr.Q(qr.x, complete=TRUE)
dimnames(Q) <- list(paste("Row",1:nrow(Q), sep="."),
paste("Col",1:ncol(Q), sep="."))
if (length(dimnames(x)[[1]])) dimnames(Q)[[1]] <- dimnames(x)[[1]]
mincolx <- min(ncol(x), ncol(Q))
if (length(dimnames(x)[[2]])) dimnames(Q)[[2]][1:mincolx] <- dimnames(x)[[2]][1:mincolx]
if (drop.Int) Q <- Q[,-1, drop=FALSE]
if (normalize=="abs2")
sweep(Q, 2, apply(abs(Q[abs2.rows,,drop=FALSE]), 2, sum)/2, "/")
else Q
}
orthog.construct <- function(y, x, x.rows, normalize=FALSE) {
y.x.lm <- lm(y ~ x[x.rows,, drop=FALSE] - 1, singular.ok=TRUE, qr=TRUE)
assign <- y.x.lm$qr$pivot[seq(y.x.lm$qr$rank)]
beta.tmp <- coef(y.x.lm)
beta <-
if.R(s={
array(beta.tmp, dim(beta.tmp), dimnames(beta.tmp))
},r={
beta.tmp[assign,, drop=FALSE]
})
result <- x[,assign, drop=FALSE] %*% beta
if (normalize=="abs2")
sweep(result, 2, apply(abs(result[x.rows,,drop=FALSE]), 2, sum)/2, "/")
else result
} |
CreateGeoObject_BioGeoBEARS<-function(full.phylo,trimmed.phylo=NULL,ana.events,clado.events,stratified=FALSE){
if(stratified){
if(is.null(trimmed.phylo)){
clado_events_tables<-list()
clado_events_tables[[1]]<-clado.events
smap<-.stratified_BGB_to_tables(full.phylo,clado_events_tables,1)
x<-.CreateBioGeoB_Object_subclade(anc.phylo=full.phylo,subclade.phylo=full.phylo,ana.events=smap$ana.int,clado.events=smap$clado.int,nat.only=FALSE)
return(x)
}else{
clado_events_tables<-list()
clado_events_tables[[1]]<-clado.events
smap<-.stratified_BGB_to_tables(full.phylo,clado_events_tables,1)
x<-.CreateBioGeoB_Object_subclade(anc.phylo=full.phylo,subclade.phylo=trimmed.phylo,ana.events=smap$ana.int,clado.events=smap$clado.int,nat.only=FALSE)
return(x)
}
} else{
if(is.null(trimmed.phylo)){
x<-.CreateBioGeoB_Object_subclade(anc.phylo=full.phylo,subclade.phylo=full.phylo,ana.events=ana.events,clado.events=clado.events,nat.only=FALSE)
return(x)
} else{
x<-.CreateBioGeoB_Object_subclade(anc.phylo=full.phylo,subclade.phylo=trimmed.phylo,ana.events=ana.events,clado.events=clado.events,nat.only=FALSE)
return(x)
}
}
} |
utils::globalVariables(c("x", "y", "text", "fill", "lab", "ticks", "zero")) |
library(otvPlots)
library(proto)
context("Plot Quantiles over Time")
load("../testthat/testData.rda")
setDT(testData)
testData[, weeks := round(date, "weeks")]
testDT = testData[, {
tmp1 = quantile(balance, p = c(.01, .5, .99));
list("p1" = tmp1[1] ,
"p50" = tmp1[2] ,
"p99" = tmp1[3]
)}, by = "weeks"]
testMT = melt(testDT, id.vars = "weeks",
measure.vars = c("p99", "p50","p1"))
globalPct = testData[ , quantile(balance, p = c(.01, .5, .99) ) ]
globalDT = data.table("weeks" = rep(testMT[variable == "p99", "weeks",
with = FALSE][[1]], 3))
globalDT[, c("variable", "value") := list(rep(c("p1_g", "p50_g", "p99_g"),
each = .N/3),
rep(globalPct, each = .N/3))]
testMT = rbindlist(list( testMT, globalDT))
test_that("Plot layers match expectations",{
p <- PlotQuantiles(testMT, myVar = "balance", dateGp = "weeks")
expect_is(p$layers[[1]], "ggproto")
expect_is(p$layers[[1]]$geom, "GeomLine")
expect_is(p$layers[[1]]$stat, "StatIdentity")
})
test_that("Mapping layer contains expected elements", {
p <- PlotQuantiles(testMT, myVar = "balance", dateGp = "weeks")
expect_true( "colour" %in% names(p$mapping))
expect_true( "linetype" %in% names(p$mapping))
expect_true( "group" %in% names(p$mapping))
expect_true( "x" %in% names(p$mapping))
expect_true( "y" %in% names(p$mapping))
expect_length(setdiff(c("colour", "linetype", "group", "x", "y"), names(p$mapping)), 0)
}) |
plugin.density <- function(x, nout = 201, xout = NULL, na.rm = FALSE)
{
if (!is.numeric(x))
stop("argument must be numeric")
name <- deparse(substitute(x))
x <- as.vector(x)
if (any(x.na <- is.na(x))) {
if (na.rm)
x <- x[!x.na]
else stop("x contains missing values")
}
n <- length(x <- sort(x))
if(is.null(xout)) {
dx <- diff(rx <- range(x))
if(dx < sqrt(.Machine$double.eps)) dx <- mean(abs(rx))/1000
m <- as.integer(nout)
xout <- seq(from=rx[1] - dx/10, to=rx[2] + dx/10, length= m)
} else {
m <- length(xout)
if(is.unsorted(xout)) xout <- sort(xout)
}
r <- .C(plugin,
x = as.double(x), n=n,
z = xout, m=m,
f = double(m),
h = double(1))
structure(list(x= r$z, y= r$f, bw = r$h, n=n,
call = match.call(), data.name = name),
class=c("densityEHpi", "density"))
}
print.densityEHpi <- function(x, digits = getOption("digits"), ...)
{
cat("EvaHerrmann plugin density estimate\n call :",
deparse(x$call),"\n n = ", x$n,
" ; estimated (Gaussian) bandwidth h = ",
format(x$bw, digits = digits),"\n")
str(x[1:2], digits = digits, ...)
invisible(x)
} |
expected <- eval(parse(text="c(13L, 6L, 1L)"));
test(id=0, code={
argv <- eval(parse(text="list(structure(c(-3.14159265358979e-05, 3.14159265358979e-05, -0.000314159265358979, 0.000314159265358979, -0.00314159265358979, 0.00314159265358979, -0.0314159265358979, 0.0314159265358979, -0.314159265358979, 0.314159265358979, -3.14159265358979, 3.14159265358979, -31.4159265358979, 31.4159265358979, -314.159265358979, 314.159265358979, -3141.59265358979, 3141.59265358979, -31415.9265358979, 31415.9265358979, -314159.265358979, 314159.265358979, -1e-05, 1e-05, -1e-04, 1e-04, -0.001, 0.001, -0.01, 0.01, -0.1, 0.1), .Dim = c(2L, 16L)), NULL, 0L)"));
.Internal(format.info(argv[[1]], argv[[2]], argv[[3]]));
}, o=expected); |
library(miRetrieve)
library(testthat)
df_merged <- rbind(df_crc, df_panc)
compare_terms <- compare_mir_terms(df_merged,
"miR-21",
title = "Test_title")
compare_terms_top <- compare_mir_terms(df_merged,
"miR-21",
top = 3)
compare_terms_n <- compare_mir_terms(df_merged,
"miR-21",
token = "ngrams",
n = 2)
compare_terms_normalize <- compare_mir_terms(df_merged,
"miR-21",
normalize = FALSE)
test_that("Tests that terms are compared between dataframes", {
expect_s3_class(compare_terms, "ggplot")
expect_equal(compare_terms$labels$title, "Test_title")
expect_lte(length(compare_terms_top$data$word),
length(compare_terms$data$word))
expect_gte(lengths(strsplit(as.character(compare_terms_n$data$word[1]), " ")),
lengths(strsplit(as.character(compare_terms$data$word[1]), " ")))
expect_type(compare_terms$data$no_of_abstract, "double")
expect_type(compare_terms_normalize$data$no_of_abstract, "integer")
})
compare_terms_log2 <- compare_mir_terms_log2(df_merged,
"miR-21",
title = "Test_title")
compare_terms_top_log2 <- compare_mir_terms_log2(df_merged,
"miR-21",
top = 3)
compare_terms_n_log2 <- compare_mir_terms_log2(df_merged,
"miR-21",
token = "ngrams",
n = 2)
test_that("Tests that terms are compared between dataframes on a log2-scale", {
expect_s3_class(compare_terms_log2$plot, "ggplot")
expect_equal(compare_terms_log2$plot$labels$title, "Test_title")
expect_lte(length(compare_terms_top_log2$plot$data$word),
length(compare_terms_log2$plot$data$word))
expect_gte(lengths(strsplit(as.character(compare_terms_n_log2$plot$data$word[1]), " ")),
lengths(strsplit(as.character(compare_terms_log2$plot$data$word[1]), " ")))
})
compare_terms_scatter <- compare_mir_terms_scatter(df_merged,
"miR-21",
title = "Test_title")
compare_terms_top_scatter <- compare_mir_terms_scatter(df_merged,
"miR-21",
top = 3)
test_that("Tests that terms are compared between dataframes in a scatter plot", {
expect_s3_class(compare_terms_scatter, "plotly")
expect_equal(compare_terms_scatter$x$layout$title$text,
"Test_title")
})
compare_terms_unique <- compare_mir_terms_unique(df_merged,
"miR-21",
title = "Test_title")
compare_terms_top_unique <- compare_mir_terms_unique(df_merged,
"miR-21",
top = 3)
compare_terms_n_unique <- compare_mir_terms_unique(df_merged,
"miR-21",
token = "ngrams",
n = 2)
compare_terms_normalize_unique <- compare_mir_terms_unique(df_merged,
"miR-21",
normalize = FALSE)
test_that("Tests that unique terms are compared between dataframes", {
expect_s3_class(compare_terms_unique, "ggplot")
expect_equal(compare_terms_unique$labels$title, "Test_title")
expect_lte(length(compare_terms_top_unique$data$word),
length(compare_terms_unique$data$word))
expect_gte(lengths(strsplit(as.character(compare_terms_n_unique$data$word[1]), " ")),
lengths(strsplit(as.character(compare_terms_unique$data$word[1]), " ")))
expect_type(compare_terms_unique$data$no_per_topic, "double")
expect_type(compare_terms_normalize_unique$data$no_per_topic, "integer")
}) |
library(testthat)
library(recipes)
n <- 20
set.seed(752)
as_fact <- data.frame(
numbers = rnorm(n),
fact = factor(sample(letters[1:3], n, replace = TRUE)),
ord = factor(sample(LETTERS[22:26], n, replace = TRUE),
ordered = TRUE)
)
as_str <- as_fact
as_str$fact <- as.character(as_str$fact)
as_str$ord <- as.character(as_str$ord)
test_that('strings_as_factors = FALSE', {
rec1 <- recipe(~ ., data = as_fact) %>%
step_center(numbers)
rec1 <- prep(rec1, training = as_fact,
strings_as_factors = FALSE, verbose = FALSE)
rec1_as_fact <- bake(rec1, new_data = as_fact)
expect_warning(rec1_as_str <- bake(rec1, new_data = as_str))
expect_equal(as_fact$fact, rec1_as_fact$fact)
expect_equal(as_fact$ord, rec1_as_fact$ord)
expect_equal(as_str$fact, rec1_as_str$fact)
expect_equal(as_str$ord, rec1_as_str$ord)
})
test_that('strings_as_factors = TRUE', {
rec2 <- recipe(~ ., data = as_fact) %>%
step_center(numbers)
rec2 <- prep(rec2, training = as_fact,
strings_as_factors = TRUE, verbose = FALSE)
rec2_as_fact <- bake(rec2, new_data = as_fact)
expect_warning(rec2_as_str <- bake(rec2, new_data = as_str))
expect_equal(as_fact$fact, rec2_as_fact$fact)
expect_equal(as_fact$ord, rec2_as_fact$ord)
expect_equal(as_fact$fact, rec2_as_str$fact)
expect_equal(as_fact$ord, rec2_as_str$ord)
}) |
spline.arrow<-function(x,y=0,kdiv=20, arrow=1, length=.2, col="black",
thick = 0.01, headlength = 0.2, headthick = 0.1, code=2, ...)
{
if(arrow>1)
{
}
if(is.list(x))
{
y = x$y
x = x$x
}
G = GEOmap::getspline(x, y, kdiv=kdiv)
lines(G, col = col, ...)
n = length(G$x)
if(arrow==1)
{
arrows(G$x[n-1], G$y[n-1], G$x[n], G$y[n], length=length, col = col)
if(code==3)
{
arrows(G$x[2], G$y[2], G$x[1], G$y[1], length=length, col = col)
}
}
if(arrow==2)
{
fancyarrows(G$x[n-1], G$y[n-1], G$x[n], G$y[n],
thick =thick ,
headlength = headlength,
headthick =headthick,
col = col , border = col )
}
invisible(G)
} |
parlitools_tidy <- function(df, tidy_style) {
names(df) <- gsub("\\.", "_", names(df))
names(df) <- gsub("([[:lower:]])([[:upper:]])", "\\1_\\2", names(df))
names(df) <- gsub("__", "_", names(df))
names(df) <- gsub("^_", "", names(df))
names(df) <- tolower(names(df))
names(df)[names(df) == "df_about"] <- "about"
names(df)[names(df) == "df_value"] <- "value"
names(df) <- gsub("@", "", names(df))
names(df) <- gsub("
names(df) <- gsub("\\.\\.", "\\.", names(df))
names(df) <- gsub("^Members\\.Member\\.", "", names(df))
names(df) <- gsub("^BasicDetails\\.", "", names(df))
names(df) <- gsub("^BiographyEntries\\.", "", names(df))
names(df) <- gsub("^Committees\\.", "", names(df))
names(df) <- gsub("^Addresses\\.", "", names(df))
names(df) <- gsub("^Constituencies\\.", "", names(df))
names(df) <- gsub("^ElectionsContested\\.", "", names(df))
names(df) <- gsub("^Edfperiences\\.", "", names(df))
names(df) <- gsub("^GovernmentPosts\\.", "", names(df))
names(df) <- gsub("^Honours\\.", "", names(df))
names(df) <- gsub("^HouseMemberships\\.", "", names(df))
names(df) <- gsub("^Statuses\\.", "", names(df))
names(df) <- gsub("^Staff\\.", "", names(df))
names(df) <- gsub(
"^Interests\\.Category\\.Interest\\.",
"Interest\\.", names(df)
)
names(df) <- gsub("^Interests\\.Category\\.", "Interest\\.", names(df))
names(df) <- gsub("^MaidenSpeeches\\.", "", names(df))
names(df) <- gsub("^OppositionPosts\\.", "", names(df))
names(df) <- gsub("^Parties\\.", "", names(df))
names(df) <- gsub("^PreferredNames\\.", "", names(df))
names(df) <- gsub("^ParliamentaryPosts\\.", "", names(df))
names(df) <- gsub("^OtherParliaments\\.", "", names(df))
names(df) <- gsub("^ParliamentaryPosts\\.", "", names(df))
names(df) <- gsub(
"^Post.PostHolders.PostHolder.Member",
"PostHolder", names(df)
)
names(df) <- gsub("^Post\\.PostHolders\\.", "", names(df))
names(df) <- gsub("xsi:nil", "nil", names(df))
names(df) <- gsub("xmlns:xsi", "label", names(df))
names(df) <- gsub("\\.", "_", names(df))
names(df) <- gsub("([[:lower:]])([[:upper:]])", "\\1_\\2", names(df))
names(df) <- gsub("__", "_", names(df))
names(df) <- gsub(" ", "_", names(df))
names(df) <- tolower(names(df))
names(df)[names(df) == "df_house"] <- "house"
if (tidy_style == "camelCase") {
names(df) <- gsub("(^|[^[:alnum:]])([[:alnum:]])", "\\U\\2",
names(df),
perl = TRUE
)
substr(names(df), 1, 1) <- tolower(substr(names(df), 1, 1))
} else if (tidy_style == "period.case") {
names(df) <- gsub("_", ".", names(df))
}
df
} |
eseq <- function(n){
if (n %% 2 != 0){
e <- 1
for (i in seq(3,to=n,by=2)){
alt <-rep((i-1):i,length.out=i-2)
new <- as.vector(rbind(1:(i-2),alt))
new <- c(new,i,1)
e <- c(e,new[-1])
}}
else {
e <- 1:2
if (n >= 4)
for (i in seq(4,to=n,by=2)){
alt <-rep((i-1):i,length.out=i-2)
new <- as.vector(rbind(alt,1:(i-2)))
new <- c(new,i-1,i)
e <- c(e,new)
}}
return(e)
}
eseqa <-
function(n){
if (n %% 2 != 0) {
m <- (n-1)/2
f <- m*(m+1)/2
e <- cumsum(0:(m-1)) + rep(seq(0, by=f, length.out=n),each=m)
e <- c(e%% n +1 ,1)
}
else {
e <- eseqa(n+1)
e <- e[e!=(n+1)]
e <- e[-length(e)]
}
return(e)
}
kntour_drop <- function(e){
n <- max(e)
if (n %% 2 != 0){
m <- length(e)
if (n==e[1]) x <- e[e!=n]
else if (n==e[m-1]) {
x <- e[-m]
x <- x[x!=n]}
else if (n==e[2]) {
x <- e[-1]
x <- x[x!=n] }
else {
x<- e[-m]
m <- length(x)
i <- match(n,x)
x <- x[c(i:m,1:(i-1))]
x <- x[x!=n]}
return(x)}
else {
stop("Argument must be an euler tour on 1..n for odd n")}
}
kntour_add <- function(e){
n <- max(e)
if (n %% 2 != 0){
new <- 1:n
new <- new[new!=e[1]]
enew <- n+1
for (j in seq(2,n-1,2)) enew <- c(enew,new[(j-1):j],n+1)
return(c(e,enew))}
else {
stop("Argument must be an euler tour on 1..n for odd n")}
} |
.p_hat_fun <- function(delta.n) {
n <- length(delta.n)
K <- 1:(n-1)
p.hat <- cumsum(delta.n[n-K+1])/K
return(p.hat)
}
.ES <- function(s, Z.n) {
n <- length(Z.n)
output <- numeric(n-1)
for (k in 1:(n-1)) {
v <- (Z.n[n-(1:k)+1]/Z.n[n-k])^(s[k])
output[k] <- mean(v)
}
return(output)
}
.cEs <- function(s, Z.n, delta.n) {
n <- length(Z.n)
output <- numeric(n-1)
for (k in 1:(n-1)) {
v <- delta.n[n-(1:k)+1] * (Z.n[n-(1:k)+1]/Z.n[n-k])^(s[k])
output[k] <- mean(v)
}
return(output)
}
cEPD <- function(data, censored, rho = -1, beta = NULL, logk = FALSE, plot = FALSE, add = FALSE,
main = "EPD estimates of the EVI", ...) {
.checkInput(data)
censored <- .checkCensored(censored, length(data))
n <- length(data)
k <- n-1
s <- sort(data, index.return = TRUE)
Z.n <- s$x
delta.n <- !(censored[s$ix])
HillZ <- Hill(Z.n)$gamma
phat <- .p_hat_fun(delta.n)
nrho <- length(rho)
if (is.null(beta)) {
beta <- matrix(0, n-1, nrho)
if (all(rho > 0) & nrho == 1) {
rho <- .rhoEst(data, alpha=1, tau=rho)$rho
beta[,1] <- -rho/HillZ
for (j in 1:length(rho)) {
beta[,j] <- -rho[j]/HillZ
}
} else if (all(rho < 0)) {
for (j in 1:nrho) {
beta[,j] <- -rho[j]/HillZ
}
} else {
stop("rho should be a single positive number or a vector (of length >=1) of negative numbers.")
}
} else {
nrho <- length(beta)
if (length(beta) == 1) {
beta <- matrix(beta, n-1, nrho)
} else {
beta <- matrix(rep(beta, n-1), ncol=length(beta), byrow=TRUE)
}
}
gamma1 <- matrix(0, n-1, nrho)
kappa1 <- matrix(0, n-1, nrho)
Delta <- matrix(0, n-1, nrho)
for (j in 1:nrho) {
K <- 1:k
D <- - (beta[K,j]^4 * HillZ[K]^3) / ( (1+HillZ[K]*beta[K,j])^2 * (1+2*HillZ[K]*beta[K,j]) )
Es <- .ES(-beta[,j], Z.n)
cEs <- .cEs(-beta[,j], Z.n, delta.n)
kappa1[,j] <- (1 - Es[K] - beta[K,j] * (HillZ[K] / phat[K]) * cEs[K]) / D[K]
kappa1[,j] <- pmax(kappa1[,j], pmax(-1, -1/beta[,j])+0.001)
Delta[,j] <- (kappa1[,j]*(1-Es))/phat
Delta[,j] <- pmin(0, Delta[,j])
gamma1[,j] <- (HillZ/phat) + Delta[,j]
gamma1[gamma1[,j] <= 0, j] <- 0.001
}
if (logk) {
.plotfun(log(K), gamma1[,1], type="l", xlab="log(k)", ylab="gamma", main=main, plot=plot, add=add, ...)
} else {
.plotfun(K, gamma1[,1], type="l", xlab="k", ylab="gamma", main=main, plot=plot, add=add, ...)
}
if (nrho == 1) {
gamma1 <- as.vector(gamma1)
kappa1 <- as.vector(kappa1)
beta <- as.vector(beta)
Delta <- as.vector(Delta)
} else if (plot | add) {
for (j in 2:nrho) {
lines(K, gamma1[,j], lty=j)
}
}
.output(list(k=K, gamma1=gamma1, kappa1=kappa1, beta=beta, Delta=Delta), plot=plot, add=add)
}
cProbEPD <- function(data, censored, gamma1, kappa1, beta, q, plot = FALSE, add=FALSE,
main = "Estimates of small exceedance probability", ...) {
.checkInput(data)
censored <- .checkCensored(censored, length(data))
if (length(q) > 1) {
stop("q should be a numeric of length 1.")
}
s <- sort(data, index.return = TRUE)
X <- as.numeric(s$x)
sortix <- s$ix
n <- length(X)
prob <- rep(NA, n)
K <- 1:(n-1)
K2 <- K[!is.na(gamma1[K])]
km <- KaplanMeier(X[n-K2], data=X, censored = censored[sortix])$surv
prob[K2] <- km * (1-pepd(q/X[n-K2], gamma=gamma1[K2], kappa=kappa1[K2], tau=-beta[K2]))
prob[prob < 0 | prob > 1] <- NA
.plotfun(K, prob[K], type="l", xlab="k", ylab="1-F(x)", main=main, plot=plot, add=add, ...)
.output(list(k=K, P=prob[K], q=q), plot=plot, add=add)
}
cReturnEPD <- function(data, censored, gamma1, kappa1, beta, q, plot = FALSE, add = FALSE,
main = "Estimates of large return period", ...) {
.checkInput(data)
censored <- .checkCensored(censored, length(data))
if (length(q) > 1) {
stop("q should be a numeric of length 1.")
}
s <- sort(data, index.return = TRUE)
X <- as.numeric(s$x)
sortix <- s$ix
n <- length(X)
R <- rep(NA, n)
K <- 1:(n-1)
K2 <- K[!is.na(gamma1[K])]
km <- KaplanMeier(X[n-K2], data=X, censored = censored[sortix])$surv
R[K2] <- 1 / (km * (1-pepd(q/X[n-K2], gamma=gamma1[K2], kappa=kappa1[K2], tau=-beta[K2])))
R[R < 1] <- NA
.plotfun(K, R[K], type="l", xlab="k", ylab="1/(1-F(x))", main=main, plot=plot, add=add, ...)
.output(list(k=K, R=R[K], q=q), plot=plot, add=add)
} |
fitCRM <-
function(data,ipar,est.thetas,max.item,group=20) {
n=ncol(data)
N=nrow(data)
if(is.data.frame(data)==FALSE) stop("The input response data is not a data frame.
Please use as.data.frame() and convert your response data to a data frame object before the analysis")
if(dim(data)[2]!=length(max.item)) stop("The number of columns in the data is not equal to the length of max.item vector")
if(dim(data)[2]!=nrow(ipar)) stop("The number of columns in the data is not equal to the number of rows in the item parameter matrix")
for(i in 1:n) {
if(max(na.omit(data[,i]))> max.item[i]) stop("The column ",i," has values higher than the maximum available score in the
user specified max.item vector. Please check and clean your data.")
}
if(class(est.thetas)!="CRMtheta") stop("The estimated thetas is not an object created by EstCRMperson()")
data$theta <- est.thetas$thetas[,2]
data <- data[order(data$theta),]
data$groups <- cut2(data$theta,g=group)
labels <- unique(data$groups)
expected <- function(t,item) {
prob <- function(t,x) {
v1=ipar[item,1]*(t-ipar[item,2]-((1/ipar[item,3])*log((x-.5)/(max.item[item]-x+.5))))
v2=ipar[item,1]*(t-ipar[item,2]-((1/ipar[item,3])*log((x+.5)/(max.item[item]-x-.5))))
integrate(dnorm,v2,v1)$value
}
all.scores <- 1:(max.item[item]-1)
all.probs <- c()
for(i in 1:length(all.scores)) { all.probs[i]=prob(t,all.scores[i]) }
sum(all.scores*all.probs)
}
fitindex <- vector("list",n)
median.thetas.list <- vector("list",n)
mean.obs.score.list <- vector("list",n)
sd.obs.score.list <- vector("list",n)
exp.score.list <- vector("list",n)
for(item in 1:n) {
median.thetas <- c()
exp.score <- c()
mean.obs.score <- c()
sd.obs.score <- c()
for(i in 1:length(labels)) {
median.thetas[i]= median(data[which(data$groups==labels[i]),]$theta,na.rm=TRUE)
exp.score[i]= expected(median.thetas[i],item)
mean.obs.score[i]= mean(data[which(data$groups==labels[i]),item],na.rm=TRUE)
sd.obs.score[i]= sqrt(var(data[which(data$groups==labels[i]),item],na.rm=TRUE)/50)
}
median.thetas.list[[item]] <- median.thetas
mean.obs.score.list[[item]] <- mean.obs.score
sd.obs.score.list[[item]] <- sd.obs.score
exp.score.list[[item]] <- exp.score
fitindex[[item]]<-(mean.obs.score - exp.score)/sd.obs.score
}
fits <- as.data.frame(matrix(nrow=length(labels),ncol=n+1))
colnames(fits) <- c("Interval",colnames(data)[1:n])
fits[,1]=labels
for(i in 2:(n+1)) { fits[,i]=fitindex[[i-1]] }
emp.plots <- vector("list",nrow(ipar))
for(u in 1:nrow(ipar)) {
emp.prop <- cbind(expand.grid(median.thetas.list[[u]],1:max.item[u]),labels)
emp.prop$prob <- NA
for(i in 1:dim(emp.prop)[1]) {
emp.prop[i,4]=length(which(round(data[which(data$groups==emp.prop[i,3]),u],0)==emp.prop[i,2]))/length(which(data$groups==emp.prop[i,3]))
}
emp.plots [[u]] <- wireframe(emp.prop[,4]~emp.prop[,1]*emp.prop[,2],xlab="Ability Scale",ylab="Response Scale",
zlab="Prob",zlim=c(0,max(emp.prop[,4])+.05),screen=list(z =-50, x = -70),
scales =list(arrows = FALSE,tck=.5),main=paste("Category Response Curves - Item ",u,sep=""))
}
CRM.fit <- list(fit.stat=fits,emp.irf=emp.plots)
return(CRM.fit)
} |
fix_date_continuity = function(dat, index, form=ymd, granularity='days'){
granularity = match.arg(granularity, c('secs', 'mins', 'hours', 'days', 'weeks', 'years'))
ordered_dates = sort(form(dat[,index]))
l = length(ordered_dates)
if(granularity != 'years'){
diffs = difftime(ordered_dates[-1], ordered_dates[-l], units=granularity)
}else{
diffs = year(ordered_dates[-1]) - year(ordered_dates[-l])
}
addfunc = switch(granularity,
secs=seconds, mins=minutes, hours=hours, days=days, weeks=weeks)
add_after = which(diffs > 1)
how_many = as.numeric(diffs[add_after])-1
new_dates = mapply(function(x,y){
seq(ordered_dates[x] + addfunc(1), by=granularity, length.out=y)
}, add_after, how_many)
return_dat = dat[order(form(dat[,index])),]
return_dat[,1] = ordered_dates
insert = matrix(NA, nrow=sum(how_many), ncol=ncol(dat))
insert = as.data.frame(insert)
insert[,index] = do.call(c, new_dates)
names(insert) = names(dat)
return_dat = rbind(return_dat, insert)
return_dat = return_dat[order(return_dat[,index]),]
rownames(return_dat) = NULL
return_dat
} |
expected <- eval(parse(text="structure(c(FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE), .Dim = c(12L, 2L), .Dimnames = list(c(\"1\", \"3\", \"5\", \"7\", \"9\", \"11\", \"13\", \"15\", \"17\", \"19\", \"21\", \"23\"), c(\"X\", \"M\")))"));
test(id=0, code={
argv <- eval(parse(text="list(structure(c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(12L, 2L), .Dimnames = list(c(\"1\", \"3\", \"5\", \"7\", \"9\", \"11\", \"13\", \"15\", \"17\", \"19\", \"21\", \"23\"), c(\"X\", \"M\"))), 0.001)"));
do.call(`>`, argv);
}, o=expected); |
library("arules")
library("testthat")
set.seed(20070611)
context("itemMatrix")
m <- matrix(runif(50)>0.8, ncol=5)
dn <- list(paste("trans", seq(nrow(m)), sep=""),
paste("item", seq(ncol(m)), sep=""))
dimnames(m) <- dn
i <- as(m, "itemMatrix")
expect_identical(length(i), nrow(m))
expect_identical(dim(i), dim(m))
expect_identical(as(i[1:5], "matrix"), m[1:5,])
expect_identical(as(i[1:5,1:2], "matrix"), m[1:5, 1:2])
expect_identical(as(i[-1,-1], "matrix"), m[-1,-1])
expect_identical(as(i[rep(c(T,F), nrow(m)/2), c(T,F,F,F,T)], "matrix"),
m[rep(c(T,F), nrow(m)/2), c(T,F,F,F,T)])
expect_identical(dimnames(i), dn)
expect_identical(dimnames(i[c("trans1", "trans10"), c("item5", "item4")]),
dimnames(m[c("trans1", "trans10"), c("item5", "item4")]))
j <- i
dn2 <- list(paste("trans", LETTERS[1:nrow(j)], sep = ""),
paste("items", LETTERS[1:ncol(j)], sep = ""))
dimnames(j) <- dn2
expect_identical(dimnames(j), dn2)
expect_error(i[,c(1,1)])
i2 <- i3 <- i
itemsetInfo(i2) <- data.frame(matrix(nrow = length(i2), ncol = 0))
expect_equal(itemsetInfo(c(i, i2)),
data.frame(itemsetID = c(
itemsetInfo(i)$itemsetID,
rep(NA, times = length(i2))
), stringsAsFactors = FALSE))
expect_equal(itemsetInfo(c(i2, i)),
data.frame(itemsetID = c(
rep(NA, times = length(i2)),
itemsetInfo(i)$itemsetID
), stringsAsFactors = FALSE))
itemsetInfo(i3)$moreInfo <- sample(1:2, length(i3), replace = TRUE)
expect_equal(itemsetInfo(c(i, i3)), data.frame(
itemsetID = c(itemsetInfo(i)$itemsetID, itemsetInfo(i3)$itemsetID),
moreInfo = c(rep(NA, times = length(i)), itemsetInfo(i3)$moreInfo),
stringsAsFactors = FALSE))
expect_equal(itemsetInfo(c(i3, i)), data.frame(
itemsetID = c(itemsetInfo(i3)$itemsetID, itemsetInfo(i)$itemsetID),
moreInfo = c(itemsetInfo(i3)$moreInfo, rep(NA, times = length(i))),
stringsAsFactors = FALSE))
expect_equal(itemsetInfo(c(i, i[0])), itemsetInfo(i))
expect_equal(itemsetInfo(c(i[0], i3)), itemsetInfo(i3))
ngc <- as(i, "ngCMatrix")
expect_true(all(t(m)==ngc))
expect_identical(dimnames(ngc),dimnames(t(m)))
expect_identical(i, as(ngc, "itemMatrix"))
expect_equivalent(crossTable(i), crossprod(as(i, "matrix"))) |
mapdeckPathDependency <- function() {
list(
createHtmlDependency(
name = "path",
version = "1.0.0",
src = system.file("htmlwidgets/lib/path", package = "mapdeck"),
script = c("path.js"),
all_files = FALSE
)
)
}
add_path <- function(
map,
data = get_map_data(map),
polyline = NULL,
stroke_colour = NULL,
stroke_width = NULL,
stroke_opacity = NULL,
dash_size = NULL,
dash_gap = NULL,
offset = NULL,
width_units = c("meters", "common","pixels"),
width_min_pixels = NULL,
width_max_pixels = NULL,
width_scale = 1,
tooltip = NULL,
billboard = FALSE,
layer_id = NULL,
id = NULL,
auto_highlight = FALSE,
highlight_colour = "
palette = "viridis",
na_colour = "
legend = FALSE,
legend_options = NULL,
legend_format = NULL,
update_view = TRUE,
focus_layer = FALSE,
digits = 6,
transitions = NULL,
brush_radius = NULL
) {
l <- list()
width_units <- match.arg(width_units)
use_dashes <- !is.null( dash_size ) | !is.null( dash_gap )
l[["polyline"]] <- force( polyline )
l[["stroke_colour"]] <- force( stroke_colour)
l[["stroke_width"]] <- force( stroke_width )
l[["stroke_opacity"]] <- resolve_opacity( stroke_opacity )
l[["dash_size"]] <- force(dash_size)
l[["dash_gap"]] <- force(dash_gap)
l[["offset"]] <- force(offset)
l[["tooltip"]] <- force(tooltip)
l[["id"]] <- force(id)
l[["na_colour"]] <- force(na_colour)
l <- resolve_palette( l, palette )
l <- resolve_legend( l, legend )
l <- resolve_legend_options( l, legend_options )
bbox <- init_bbox()
layer_id <- layerId(layer_id, "path")
checkHexAlpha( highlight_colour )
update_view <- force( update_view )
focus_layer <- force( focus_layer )
use_offset <- !is.null( offset )
use_dash <- !is.null( dash_size ) && !is.null( dash_gap )
map <- addDependency(map, mapdeckPathDependency())
l <- resolve_binary_data( data, l )
if( !is.null(l[["bbox"]] ) ) {
bbox <- l[["bbox"]]
l[["bbox"]] <- NULL
}
if ( !is.null(l[["data"]]) ) {
data <- l[["data"]]
l[["data"]] <- NULL
}
tp <- l[["data_type"]]
l[["data_type"]] <- NULL
jsfunc <- "add_path_geo"
if ( tp == "sf" ) {
geometry_column <- c( "geometry" )
list_cols <- list_columns( data, geometry_column )
shape <- rcpp_path_interleaved( data, l, list_cols, digits, "path" )
} else if ( tp == "sfencoded" ) {
jsfunc <- "add_path_polyline"
geometry_column <- "polyline"
shape <- rcpp_path_polyline( data, l, geometry_column, "path" )
} else if ( tp == "interleaved" ) {
shape <- list(
data = jsonify::to_json(
data
, unbox = FALSE
, digits = digits
, factors_as_string = TRUE
, numeric_dates = FALSE
, by = "column"
)
)
}
legend_type <- "rgb"
js_transitions <- resolve_transitions( transitions, "path" )
if( inherits( legend, "json" ) ) {
shape[["legend"]] <- legend
legend_type <- "hex"
} else {
shape[["legend"]] <- resolve_legend_format( shape[["legend"]], legend_format )
}
invoke_method(
map, jsfunc, map_type( map ), shape, layer_id, auto_highlight,
highlight_colour, bbox, update_view, focus_layer,
js_transitions, billboard, brush_radius, width_units, width_scale, width_min_pixels,
width_max_pixels, use_offset, use_dash, legend_type
)
}
resolve_binary_data <- function( data, l ) UseMethod("resolve_binary_data")
resolve_binary_data.interleaved <- function( data, l ) {
l[["bbox"]] <- get_box( data, l )
l[["data_type"]] <- "interleaved"
return( l )
}
resolve_binary_data.sf <- function( data, l ) {
sfc_col <- attr( data, "sf_column" )
l[["geometry"]] <- sfc_col
cls <- attr( data[[ sfc_col ]], "class" )
if( is.null( cls ) ) {
stop("mapdeck - invalid sf object; have you loaded library(sf)?")
}
l[["bbox"]] <- get_box( data, l )
l[["data_type"]] <- "sf"
return(l)
}
resolve_binary_data.default <- function( data, l ) {
return( resolve_data( data, l, "LINESTRING" ) )
}
clear_path <- function( map, layer_id = NULL, update_view = TRUE ) {
layer_id <- layerId(layer_id, "path")
invoke_method(map, "md_layer_clear", map_type( map ), layer_id, "path", update_view )
} |
geojson_write <- function(input, lat = NULL, lon = NULL, geometry = "point",
group = NULL, file = "myfile.geojson",
overwrite = TRUE, precision = NULL,
convert_wgs84 = FALSE, crs = NULL, ...) {
UseMethod("geojson_write")
}
geojson_write.SpatialPolygons <- function(input, lat = NULL, lon = NULL, geometry = "point",
group = NULL, file = "myfile.geojson",
overwrite = TRUE, precision = NULL,
convert_wgs84 = FALSE, crs = NULL, ...) {
write_geojson(as(input, "SpatialPolygonsDataFrame"), file, precision = precision,
convert_wgs84 = convert_wgs84, crs = crs, ...)
return(geo_file(file, "SpatialPolygons"))
}
geojson_write.SpatialPolygonsDataFrame <- function(input, lat = NULL, lon = NULL,
geometry = "point",
group = NULL, file = "myfile.geojson",
overwrite = TRUE, precision = NULL,
convert_wgs84 = FALSE, crs = NULL, ...) {
write_geojson(input, file, precision = precision, convert_wgs84 = convert_wgs84,
crs = crs, ...)
return(geo_file(file, "SpatialPolygonsDataFrame"))
}
geojson_write.SpatialPoints <- function(input, lat = NULL, lon = NULL, geometry = "point",
group = NULL, file = "myfile.geojson",
overwrite = TRUE, precision = NULL,
convert_wgs84 = FALSE, crs = NULL, ...) {
write_geojson(as(input, "SpatialPointsDataFrame"), file, precision = precision,
convert_wgs84 = convert_wgs84, crs = crs, ...)
return(geo_file(file, "SpatialPoints"))
}
geojson_write.SpatialPointsDataFrame <- function(input, lat = NULL, lon = NULL, geometry = "point",
group = NULL, file = "myfile.geojson",
overwrite = TRUE, precision = NULL,
convert_wgs84 = FALSE, crs = NULL, ...) {
write_geojson(input, file, precision = precision, convert_wgs84 = convert_wgs84,
crs = crs, ...)
return(geo_file(file, "SpatialPointsDataFrame"))
}
geojson_write.SpatialLines <- function(input, lat = NULL, lon = NULL, geometry = "point",
group = NULL, file = "myfile.geojson",
overwrite = TRUE, precision = NULL,
convert_wgs84 = FALSE, crs = NULL, ...) {
write_geojson(as(input, "SpatialLinesDataFrame"), file, precision = precision,
convert_wgs84 = convert_wgs84, crs = crs, ...)
return(geo_file(file, "SpatialLines"))
}
geojson_write.SpatialLinesDataFrame <- function(input, lat = NULL, lon = NULL,
geometry = "point",
group = NULL, file = "myfile.geojson",
overwrite = TRUE, precision = NULL,
convert_wgs84 = FALSE, crs = NULL, ...) {
write_geojson(input, file, precision = precision, convert_wgs84 = convert_wgs84,
crs = crs, ...)
return(geo_file(file, "SpatialLinesDataFrame"))
}
geojson_write.SpatialGrid <- function(input, lat = NULL, lon = NULL, geometry = "point",
group = NULL, file = "myfile.geojson",
overwrite = TRUE, precision = NULL,
convert_wgs84 = FALSE, crs = NULL, ...) {
size <- prod(input@[email protected])
input <- SpatialGridDataFrame(input, data.frame(val = rep(1, size)))
write_geojson(input, file, precision = precision, convert_wgs84 = convert_wgs84,
crs = crs, ...)
return(geo_file(file, "SpatialGrid"))
}
geojson_write.SpatialGridDataFrame <- function(input, lat = NULL, lon = NULL,
geometry = "point",
group = NULL, file = "myfile.geojson",
overwrite = TRUE, precision = NULL,
convert_wgs84 = FALSE, crs = NULL, ...) {
write_geojson(as(input, "SpatialPointsDataFrame"), file, precision = precision,
convert_wgs84 = convert_wgs84, crs = crs, ...)
return(geo_file(file, "SpatialGridDataFrame"))
}
geojson_write.SpatialPixels <- function(input, lat = NULL, lon = NULL, geometry = "point",
group = NULL, file = "myfile.geojson",
overwrite = TRUE, precision = NULL,
convert_wgs84 = FALSE, crs = NULL, ...) {
write_geojson(as(input, "SpatialPointsDataFrame"), file, precision = precision,
convert_wgs84 = convert_wgs84, crs = crs, ...)
return(geo_file(file, "SpatialPixels"))
}
geojson_write.SpatialPixelsDataFrame <- function(input, lat = NULL, lon = NULL, geometry = "point",
group = NULL, file = "myfile.geojson",
overwrite = TRUE, precision = NULL,
convert_wgs84 = FALSE, crs = NULL, ...) {
write_geojson(as(input, "SpatialPointsDataFrame"), file, precision = precision,
convert_wgs84 = convert_wgs84, crs = crs, ...)
return(geo_file(file, "SpatialPixelsDataFrame"))
}
geojson_write.SpatialRings <- function(input, lat = NULL, lon = NULL, geometry = "point",
group = NULL, file = "myfile.geojson",
overwrite = TRUE, precision = NULL,
convert_wgs84 = FALSE, crs = NULL, ...) {
write_geojson(as(input, "SpatialPolygonsDataFrame"), file, precision = precision,
convert_wgs84 = convert_wgs84, crs = crs, ...)
return(geo_file(file, "SpatialRings"))
}
geojson_write.SpatialRingsDataFrame <- function(input, lat = NULL, lon = NULL, geometry = "point",
group = NULL, file = "myfile.geojson",
overwrite = TRUE, precision = NULL,
convert_wgs84 = FALSE, crs = NULL, ...) {
write_geojson(as(input, "SpatialPolygonsDataFrame"), file, precision = precision,
convert_wgs84 = convert_wgs84, crs = crs, ...)
return(geo_file(file, "SpatialRingsDataFrame"))
}
geojson_write.SpatialCollections <- function(input, lat = NULL, lon = NULL,
geometry = "point",
group = NULL, file = "myfile.geojson",
overwrite = TRUE, precision = NULL,
convert_wgs84 = FALSE, crs = NULL, ...) {
ptfile <- iter_spatialcoll(input@pointobj, file, precision = precision,
convert_wgs84 = convert_wgs84, crs = crs, ...)
lfile <- iter_spatialcoll(input@lineobj, file, precision = precision,
convert_wgs84 = convert_wgs84, crs = crs, ...)
rfile <- iter_spatialcoll(input@ringobj, file, precision = precision,
convert_wgs84 = convert_wgs84, crs = crs, ...)
pyfile <- iter_spatialcoll(input@polyobj, file, precision = precision,
convert_wgs84 = convert_wgs84, crs = crs, ...)
return(structure(list(ptfile, lfile, rfile, pyfile), class = "spatialcoll"))
}
iter_spatialcoll <- function(z, file, precision = NULL, convert_wgs84 = FALSE,
crs = NULL, ...) {
wfile <- sprintf("%s/%s_%s", dirname(file), class(z)[1], basename(file))
if (!is.null(z)) {
geojson_write(z, file = wfile, precision = precision,
convert_wgs84 = convert_wgs84, crs = crs, ...)
}
}
geojson_write.sf <- function(input, lat = NULL, lon = NULL, geometry = "point",
group = NULL, file = "myfile.geojson",
overwrite = TRUE, precision = NULL,
convert_wgs84 = FALSE, crs = NULL, ...) {
geojson_write(geojson_list(input, convert_wgs84 = convert_wgs84, crs = crs), file = file, overwrite = overwrite, ...)
}
geojson_write.sfc <- function(input, lat = NULL, lon = NULL, geometry = "point",
group = NULL, file = "myfile.geojson",
overwrite = TRUE, precision = NULL,
convert_wgs84 = FALSE, crs = NULL, ...) {
geojson_write(geojson_list(input, convert_wgs84 = convert_wgs84, crs = crs),
file = file, overwrite = overwrite, ...)
}
geojson_write.sfg <- function(input, lat = NULL, lon = NULL, geometry = "point",
group = NULL, file = "myfile.geojson",
overwrite = TRUE, precision = NULL,
convert_wgs84 = FALSE, crs = NULL, ...) {
geojson_write(geojson_list(input, convert_wgs84 = convert_wgs84, crs = crs),
file = file, overwrite = overwrite, ...)
}
geojson_write.numeric <- function(input, lat = NULL, lon = NULL, geometry = "point",
group = NULL, file = "myfile.geojson",
overwrite = TRUE, precision = NULL, ...) {
if (geometry == "point") {
res <- df_to_SpatialPointsDataFrame(num2df(input, lat, lon), lon = lon, lat = lat)
} else {
res <- df_to_SpatialPolygonsDataFrame(input)
}
write_geojson(res, file, precision = precision, ...)
return(geo_file(file, "numeric"))
}
num2df <- function(x, lat, lon) {
if (is.null(lat)) lat <- "lat"
if (is.null(lon)) lon <- "lon"
stats::setNames(data.frame(rbind(x), stringsAsFactors = FALSE, row.names = NULL), c(lat, lon))
}
geojson_write.data.frame <- function(input, lat = NULL, lon = NULL, geometry = "point",
group = NULL, file = "myfile.geojson", overwrite = TRUE,
precision = NULL, ...) {
tmp <- guess_latlon(names(input), lat, lon)
if (geometry == "point") {
res <- df_to_SpatialPointsDataFrame(input, tmp$lon, tmp$lat)
} else {
res <- df_to_SpatialPolygonsDataFrame2(input, tmp$lat, tmp$lon, group)
}
write_geojson(res, file, precision = precision, overwrite = overwrite, ...)
return(geo_file(file, "data.frame"))
}
geojson_write.list <- function(input, lat = NULL, lon = NULL, geometry="point",
group = NULL, file = "myfile.geojson",
overwrite = TRUE, precision = NULL, ...) {
if (geometry == "polygon") lint_polygon_list(input)
if (is.named(input)) {
tmp <- guess_latlon(names(input[[1]]), lat, lon)
res <- list_to_geo_list(input, tmp$lat, tmp$lon, geometry)
list_to_geojson(res, lat = tmp$lat, lon = tmp$lon, geometry = geometry, ...)
} else {
if (geometry == "point") {
res <- list_to_SpatialPointsDataFrame(input, lon = lon, lat = lat)
} else {
res <- list_to_SpatialPolygonsDataFrame(input, lat, lon)
}
write_geojson(res, file, precision = precision, ...)
}
return(geo_file(file, "list"))
}
geojson_write.geo_list <- function(input, lat = NULL, lon = NULL, geometry = "point",
group = NULL, file = "myfile.geojson", overwrite = TRUE, ...) {
if (!overwrite && file.exists(file)) {
stop(file, " already exists and overwrite = FALSE", call. = FALSE)
}
cat(as.json(input, ...), file = file)
message("Success! File is at ", file)
return(geo_file(file, "geo_list"))
}
geojson_write.json <- function(input, lat = NULL, lon = NULL, geometry = "point",
group = NULL, file = "myfile.geojson", overwrite = TRUE,
precision = NULL, ...) {
if (!overwrite && file.exists(file)) {
stop(file, " already exists and overwrite = FALSE", call. = FALSE)
}
if (is.null(precision)) precision <- 4
cat(toJSON(jsonlite::fromJSON(input), auto_unbox = TRUE, digits = precision, ...),
file = file)
message("Success! File is at ", file)
return(geo_file(file, "json"))
}
print.spatialcoll <- function(x, ...) {
cat("<spatial collection>", "\n", sep = "")
x <- tg_compact(x)
for (i in seq_along(x)) {
cat(" <geojson>", "\n", sep = "")
cat(" Path: ", x[[i]]$path, "\n", sep = "")
cat(" From class: ", x[[i]]$type, "\n", sep = "")
}
}
geo_file <- function(path, type) {
structure(list(path = path, type = type), class = "geojson_file")
}
print.geojson_file <- function(x, ...) {
cat("<geojson-file>", "\n", sep = "")
cat(" Path: ", x$path, "\n", sep = "")
cat(" From class: ", x$type, "\n", sep = "")
} |
library(knotR)
filename <- "8_12.svg"
a <- reader(filename)
sym812 <- symmetry_object(a,Mver=NULL,xver=5)
a <- symmetrize(a,sym812)
ou812 <- matrix(c(
18,2,
3,16,
13,5,
6,10,
22,8,
9,21,
20,14,
15,19
),ncol=2,byrow=TRUE)
jj <-
knotoptim(filename,
symobj = sym812,
ou = ou812,
prob = 0,
iterlim=1000,print.level=2,hessian=FALSE
)
write_svg(jj, filename,safe=FALSE)
dput(jj,file=sub('.svg','.S',filename)) |
LLsurface.secr <- function (object, betapar = c('g0', 'sigma'), xval = NULL, yval = NULL,
centre = NULL, realscale = TRUE, plot = TRUE, plotfitted = TRUE, ncores = NULL, ...) {
if (inherits(object, 'list')) {
temp <- list()
nsecr <- length(object)
for (i in 1:nsecr) {
temp[[i]] <- LLsurface.secr (object[[i]], betapar = betapar, xval=xval, yval=yval,
centre = centre, realscale = realscale, plot = plot, plotfitted = plotfitted,
ncores = ncores, ...)
}
invisible(temp)
}
else {
if (is.null(centre))
centre <- t(coef(object))['beta',]
else {
if (is.null(names(centre)))
names(centre) <- object$betanames
else
if (any(names(centre) != object$betanames))
stop ("names of 'centre' do not match 'object$betanames'")
}
if (object$detectfn %in% 14:19 & "g0" %in% betapar) {
betapar[betapar=="g0"] <- "lambda0"
warning ("substituting lambda0 for g0 to match detectfn")
}
betaindices <- match(betapar, names(centre))
if ((length(betapar) != 2) | (any(is.na(betaindices))))
stop ("requires two named beta parameters")
if (realscale & any(is.na(match(betapar, names(object$link)))))
stop ("link function not found - see Notes in help")
linkx <- ifelse(realscale, object$link[[betapar[1]]], 'identity')
linky <- ifelse(realscale, object$link[[betapar[2]]], 'identity')
if (is.null(xval)) {
betax0 <- centre[betaindices[1]]
realx0 <- untransform(betax0, linkx)
xval <- transform (seq(0.8,1.2,0.04) * realx0, linkx)
xval <- sort(xval)
}
else if (realscale) xval <- transform(xval, linkx)
if (is.null(yval)) {
betay0 <- centre[betaindices[2]]
realy0 <- untransform(betay0, linky)
yval <- transform (seq(0.8,1.2,0.04) * realy0, linky)
yval <- sort(yval)
}
else if (realscale) yval <- transform(yval, linky)
varying <- list(xval,yval)
names(varying) <- betapar
grid <- expand.grid(c(as.list(centre[-betaindices]), varying))
grid <- grid[, object$betanames]
details <- replace(object$details, 'hessian', FALSE)
details$trace <- FALSE
details$LLonly <- TRUE
LL <- function (start) {
suppressWarnings(
secr.fit(capthist = object$capthist, model = object$model,
mask = object$mask, CL = object$CL, detectfn =
object$detectfn, start = start, link = object$link, fixed
= object$fixed, timecov = object$timecov, sessioncov =
object$sessioncov, groups = object$groups, dframe =
object$dframe, details = details, method =
object$fit$method, verify = FALSE, ncores = ncores)
)
}
cat ('Evaluating log likelihood across grid of', nrow(grid), 'points...\n')
flush.console()
temp <- apply (grid, 1, LL)
temp <- matrix(temp, nrow=length(xval))
if (realscale) {
xval <- round(untransform(xval, linkx),4)
yval <- round(untransform(yval, linky),4)
centre[betapar[1]] <- untransform(centre[betapar[1]], linkx)
centre[betapar[2]] <- untransform(centre[betapar[2]], linky)
}
dimnames(temp) <- list(xval, yval)
temp[temp < -1e9] <- NA
if (plot) {
contour(x=xval, y=yval, z=temp, xlab=betapar[1], ylab=betapar[2], ...)
if (plotfitted) {
points(centre[betapar[1]], centre[betapar[2]], pch = 3)
}
invisible(temp)
}
else {
temp
}
}
} |
check_log_sort <- function(sort) {
if (!sort %in% beautier::get_log_sorts()) {
stop(
"'sort' must be a valid log sort. \n",
"Supported values: '",
paste0(beautier::get_log_sorts(), collapse = ", "), "' \n",
"Actual value: '", sort
)
}
} |
context("AddressProvider works")
test_that("AddressProvider works", {
aa <- AddressProvider$new()
expect_is(aa, "AddressProvider")
expect_is(aa, "R6")
expect_is(aa$locale, "character")
expect_equal(aa$locale, "en_US")
expect_is(aa$city_suffix, "function")
expect_is(aa$city_suffix(), "character")
expect_equal(length(aa$city_suffix()), 1)
expect_is(aa$street_suffix, "function")
expect_is(aa$street_suffix(), "character")
expect_equal(length(aa$street_suffix()), 1)
expect_is(aa$building_number, "function")
expect_is(aa$building_number(), "character")
expect_equal(length(aa$building_number()), 1)
expect_is(aa$city, "function")
expect_is(aa$city(), "character")
expect_equal(length(aa$city()), 1)
expect_is(aa$street_name, "function")
expect_is(aa$street_name(), "character")
expect_equal(length(aa$street_name()), 1)
expect_is(aa$street_address, "function")
expect_is(aa$street_address(), "character")
expect_equal(length(aa$street_address()), 1)
expect_is(aa$address, "function")
expect_is(aa$address(), "character")
expect_equal(length(aa$address()), 1)
expect_is(aa$country, "function")
expect_is(aa$country(), "character")
expect_equal(length(aa$country()), 1)
expect_is(aa$country_code, "function")
expect_is(aa$country_code(), "character")
expect_equal(length(aa$country_code()), 1)
expect_is(aa$postcode, "function")
expect_is(aa$postcode(), "character")
expect_equal(length(aa$postcode()), 1)
})
test_that("AddressProvider locale support works", {
test_locale <- function(loc) {
bb <- AddressProvider$new(locale = loc)
expect_is(bb$locale, "character")
expect_equal(bb$locale, loc)
expect_is(bb$city_suffix(), "character")
expect_is(bb$street_suffix(), "character")
}
locales <- c("en_US", "en_GB")
for (loc in locales) {
test_locale(loc)
}
}) |
context("nonLifeRisk S3 class")
test_that("nonLifeRisk: checks at construction are ok", {
expect_error(nonLifeRisk(type = "cdf"))
expect_error(nonLifeRisk(param = list("cdf")))
expect_error(nonLifeRisk(currency = "CHF"))
expect_error(nonLifeRisk(type = 1, param = list(simulations = c(1,2)),
currency = c("CHF")),
"type")
expect_error(nonLifeRisk(type = c("simulations", "cdf"), param = list(simulations = c(1,2)),
currency = c("CHF")),
"type")
expect_error(nonLifeRisk(type = "density", param = list(simulations = c(1,2)),
currency = c("CHF")),
"type")
expect_error(nonLifeRisk(type = "simulations", param = c(1,2),
currency = c("CHF")),
"list")
expect_error(nonLifeRisk(type = "simulations", param = data.frame(simulations = c(1,2)),
currency = c("CHF")),
"list")
expect_error(nonLifeRisk(type = "simulations", param = list(c(1,2)),
currency = c("CHF")),
"named")
expect_error(nonLifeRisk(type = "simulations", param = list(simulations = c(1,2)),
currency = c(1)),
"type")
expect_error(nonLifeRisk(type = "simulations", param = list(simulations = c(1,2)),
currency = c("CHF", "EUR")),
"type")
expect_error(nonLifeRisk(type = "simulations", param = list(simulations = c(1,2), other = c(1,2)),
currency = c("CHF")),
"dimensions")
expect_error(nonLifeRisk(type = "simulations", param = list(other = c(1,2)),
currency = c("CHF")),
"param")
expect_error(nonLifeRisk(type = "simulations", param = list(other = c(1,2)),
currency = c("CHF")),
"param")
expect_error(nonLifeRisk(type = "simulations", param = list(simulations = c()),
currency = c("CHF")),
"simulations")
expect_error(nonLifeRisk(type = "simulations", param = list(simulations = c("a")),
currency = c("CHF")),
"types")
expect_error(nonLifeRisk(type = "simulations", param = list(simulations = c(NA,1)),
currency = c("CHF")),
"Missing")
expect_error(nonLifeRisk(type = "simulations", param = list(simulations = c(1,NaN)),
currency = c("CHF")),
"Missing")
expect_error(nonLifeRisk(type = "simulations", param = list(simulations = c(1,2)),
currency = as.character(NA)),
"Missing")
expect_error(nonLifeRisk(type = "simulations", param = list(simulations = c(1,-Inf)),
currency = c("CHF")),
"finite")
expect_error(nonLifeRisk(type = "log-normal", param = list(mu=3),
currency = c("CHF")),
"dimensions")
expect_error(nonLifeRisk(type = "log-normal", param = list(mu = 3, var = 2),
currency = c("CHF")),
"param")
expect_error(nonLifeRisk(type = "log-normal", param = list(sigma = c(1,2),
mu = 3),
currency = c("CHF")),
"dimensions")
expect_error(nonLifeRisk(type = "log-normal", param = list(sigma = 1,
mu = "a"),
currency = c("CHF")),
"Invalid mu")
expect_error(nonLifeRisk(type = "log-normal", param = list(sigma = 1,
mu = NA),
currency = c("CHF")),
"Invalid mu")
expect_error(nonLifeRisk(type = "log-normal", param = list(sigma = 1,
mu = Inf),
currency = c("CHF")),
"Invalid mu")
expect_error(nonLifeRisk(type = "log-normal", param = list(sigma = "a",
mu = 1),
currency = c("CHF")),
"Invalid sigma")
expect_error(nonLifeRisk(type = "log-normal", param = list(sigma = NA,
mu = 1),
currency = c("CHF")),
"Invalid sigma")
expect_error(nonLifeRisk(type = "log-normal", param = list(sigma = Inf,
mu = 1),
currency = c("CHF")),
"Invalid sigma")
expect_error(nonLifeRisk(type = "log-normal", param = list(sigma = 0,
mu = 1),
currency = c("CHF")),
"Invalid sigma")
expect_error(nonLifeRisk(type = "cdf", param = list(cdf = c(1,2), x = c(1,2)),
currency = c("CHF")),
"dimensions")
expect_error(nonLifeRisk(type = "cdf", param = list(x = c(1,2)),
currency = c("CHF")),
"param")
expect_error(nonLifeRisk(type = "cdf", param = list(cdf = c(1,2)),
currency = c("CHF")),
"data.frame")
expect_error(nonLifeRisk(type = "cdf", param = list(cdf = data.frame(x = c(1), y = c(2))),
currency = c("CHF")),
"data.frame")
expect_error(nonLifeRisk(type = "cdf", param = list(cdf = data.frame(x = c("a","b"), cdf = c(2,1))),
currency = c("CHF")),
"Invalid column")
expect_error(nonLifeRisk(type = "cdf", param = list(cdf = data.frame(x = c(1,NA), cdf = c(2,1))),
currency = c("CHF")),
"Invalid column")
expect_error(nonLifeRisk(type = "cdf", param = list(cdf = data.frame(x = c(1,Inf), cdf = c(2,1))),
currency = c("CHF")),
"Invalid column")
expect_error(nonLifeRisk(type = "cdf", param = list(cdf = data.frame(x = c(1,2), cdf = c("a", "b"))),
currency = c("CHF")),
"Invalid column")
expect_error(nonLifeRisk(type = "cdf", param = list(cdf = data.frame(x = c(1,2), cdf = c(0.1, NA))),
currency = c("CHF")),
"Invalid column")
expect_error(nonLifeRisk(type = "cdf", param = list(cdf = data.frame(x = c(1,2), cdf = c(0.1, Inf))),
currency = c("CHF")),
"Invalid column")
expect_error(nonLifeRisk(type = "cdf", param = list(cdf = data.frame(x = c(2,1), cdf = c(0,0.3))),
currency = c("CHF")),
"Invalid column")
expect_error(nonLifeRisk(type = "cdf", param = list(cdf = data.frame(x = c(1,2), cdf = c(0,1.5))),
currency = c("CHF")),
"Invalid column")
expect_error(nonLifeRisk(type = "cdf", param = list(cdf = data.frame(x = c(1,2), cdf = c(0.1,0.5))),
currency = c("CHF")),
"Invalid column")
expect_error(nonLifeRisk(type = "cdf", param = list(cdf = data.frame(x = c(1,2), cdf = c(0.5,0))),
currency = c("CHF")),
"Invalid column")
})
test_that("nonLifeRisk: internal structure is ok", {
d <- nonLifeRisk(type = "simulations", param = list(simulations = rnorm(100)),
currency = "CHF")
expect_equal(is.nonLifeRisk(d), TRUE)
expect_equal(is.insuranceRisk(d), TRUE)
expect_equal(is.risk(d), TRUE)
expect_equal(is.list(d), TRUE)
})
test_that("nonLifeRisk: check method is OK", {
d <- nonLifeRisk(type = "simulations",
param = list(simulations = c(1, 1, 2, 3, 4)),
currency = "CHF")
cov.mat <- diag(2, 2, 2) %*% diag(rep(1, 2)) %*% diag(2, 2, 2)
name <- c("2YCHF", "EURCHF")
colnames(cov.mat) <- name
rownames(cov.mat) <- name
attr(cov.mat, "base.currency") <- "CHF"
mapping.table <- mappingTable(rate(name = "2YCHF",
currency = "CHF",
horizon = "k"),
rate(name = "2YCHF",
currency = "EUR",
horizon = "k",
scale = 0.75),
currency(name = "EURCHF",
from = "EUR",
to = "CHF"))
initial.values <- list()
initial.values$initial.fx <- data.frame(from = "EUR",
to = "CHF",
fx = 1.05,
stringsAsFactors = F)
initial.values$initial.rate <- data.frame(time = c(1L, 1L),
currency = c("CHF", "EUR"),
rate = c(0.01, 0.01),
stringsAsFactors = F)
mapping.time <- data.frame(time = 1L, mapping = "k", stringsAsFactors = F)
mr <- marketRisk(cov.mat = cov.mat,
mapping.table = mapping.table,
initial.values = initial.values,
base.currency = "CHF",
mapping.time = mapping.time)
expect_true(check(d, mr))
d <- nonLifeRisk(type = "simulations",
param = list(simulations=c(1, 1, 2, 3, 4)),
currency = "USD")
expect_false(check(d, mr))
})
test_that("nonLifeRisk: standard methods are ok", {
expect_equal(class(format(nonLifeRisk(type = "simulations",
param = list(simulations=c(1, 1, 2, 3, 4)),
currency = "USD"))),
"character")
expect_equal(class(format(nonLifeRisk(type = "cdf",
param = list(cdf = data.frame(x = c(1), cdf = c(1))),
currency = c("CHF")))),
"character")
expect_equal(class(format(nonLifeRisk(type = "log-normal",
param = list(sigma = 2,
mu = 1),
currency = c("CHF")))),
"character")
expect_equal(class(summary(nonLifeRisk(type = "simulations",
param = list(simulations=c(1, 1, 2, 3, 4)),
currency = "USD"))),
c("summaryDefault", "table"))
expect_equal(class(summary(nonLifeRisk(type = "cdf",
param = list(cdf = data.frame(x = c(1), cdf = c(1))),
currency = c("CHF")))),
c("summaryDefault", "table"))
expect_equal(class(summary(nonLifeRisk(type = "log-normal",
param = list(sigma = 2,
mu = 1),
currency = c("CHF")))),
c("summaryDefault", "table"))
expect_equal(class(print(nonLifeRisk(type = "simulations",
param = list(simulations=c(1, 1, 2, 3, 4)),
currency = "USD"))),
"NULL")
expect_equal(class(print(nonLifeRisk(type = "cdf",
param = list(cdf = data.frame(x = c(1), cdf = c(1))),
currency = c("CHF")))),
"NULL")
expect_equal(class(print(nonLifeRisk(type = "log-normal",
param = list(sigma = 2,
mu = 1),
currency = c("CHF")))),
"NULL")
}) |
SSexp2P<-selfStart(
function(predictor,a,b){a*exp(b*predictor)},
function(mCall,LHS, data, ...)
{
xy <- sortedXyData(mCall[["predictor"]],LHS, data)
if (min(y)>0){
lmFit <- lm(log(xy[,"y"]) ~ xy[,"x"])
coefs <- coef(lmFit)
a <- exp(coefs[1])
b <- coefs[2]
value <- c(a, b)
names(value) <- mCall[c("a","b")]
value
}else{stop("
>>Try to use other selfStart functions.
Because the 'SSexp2P' function need ALL x values greater than 0.")
}
},c("a","b")) |
context("poll stress test")
test_that("many processes", {
skip_on_cran()
num <- 100
px <- get_tool("px")
on.exit(try(lapply(pp, function(x) x$kill()), silent = TRUE), add = TRUE)
pp <- lapply(1:num, function(i) {
cmd <- c("sleep", "1", "outln", paste("out", i),
"errln", paste("err", i))
process$new(px, cmd, stdout = "|", stderr = "|")
})
results <- replicate(num, list(character(), character()), simplify = FALSE)
while (TRUE) {
pr <- poll(pp, -1)
lapply(seq_along(pp), function(i) {
if (pr[[i]]["output"] == "ready") {
results[[i]][[1]] <<- c(results[[i]][[1]], pp[[i]]$read_output_lines())
}
if (pr[[i]]["error"] == "ready") {
results[[i]][[2]] <<- c(results[[i]][[2]], pp[[i]]$read_error_lines())
}
})
inc <- sapply(pp, function(x) x$is_incomplete_output() || x$is_incomplete_error())
if (!any(inc)) break
}
exp <- lapply(1:num, function(i) list(paste("out", i), paste("err", i)))
expect_identical(exp, results)
}) |
denseFLMM <- function(Y, gridpoints = 1:ncol(Y), Zlist = NA, G = NA,
Lvec = NA, groups = matrix(1, nrow(Y), 1), Zvars, L = NA, NPC = NA,
smooth = FALSE, bf = 10, smoothalg = "gamm"){
if(all(is.na(Zlist)) & all(is.na(groups))){
stop("either Zlist or groups and Zvars must be specified")
}
if(all(is.na(Zlist)) & all(is.na(Zvars))){
stop("either Zlist or groups and Zvars must be specified")
}
if(all(is.na(Zlist))){
if(nrow(Y) != nrow(groups)){
stop("The number of rows in Y needs to agree with the number
of rows in the grouping matrix")
}
if(ncol(groups) != length(Zvars)){
stop("the number of grouping factors has to correspond to
the number of groups of random variables")
}
if(!prod(sapply(seq(len = length(Zvars)),
function(r) nrow(Zvars[[r]])) == nrow(Y))){
stop("the number of rows in Y needs to agree with the number
of rows in the matrices of random variables")
}
if(prod(!is.na(NPC))){
if(!prod(sapply(seq(len = length(NPC)),
function(N){!(NPC[N] > floor(NPC[N]))}))){
warning("NPC contains not only integers, will use rounded values")
NPC <- round(NPC)
}
if(length(NPC) != ncol(groups) + 1){
warning("the length of NPC has to correspond to the
number of groups + 1, will repeatedly use last value")
temp <- length(NPC)
NPC <- c(NPC, rep(NPC[temp], ncol(groups) + 1 - temp))
}
}
}else{
check_dims <- function(Zlist, Y, Lvec){
zdim <- list()
for(z in seq_along(Zlist)){
zdim[[z]] <- do.call(rbind, (lapply(Zlist[[z]], dim)))
}
zdim_un <- do.call(rbind, zdim)
if(isTRUE(all.equal(zdim_un[, 1], rep(zdim_un[1, 1], nrow(zdim_un))))){
x <- isTRUE(all.equal(zdim_un[1, 1], nrow(Y)))
if(!x)
stop("the number of rows of each matrix in Zlist need to
correspond to the number of rows of Y")
}else{
stop("the number of rows of each matrix in Zlist need to
correspond to the number of rows of Y")
}
y <- (all(Lvec == zdim_un[, 2]))
if(!y)
stop("the number of columns of each matrix in Zlist need to
correspond to the respective number in Lvec")
}
check_dims(Zlist = Zlist, Y = Y, Lvec = Lvec)
}
if(ncol(Y) != length(gridpoints)){
stop("the number of columns in Y needs to agree with the length of
the gridpoints vector")
}
if(is.na(L) & (!prod(!is.na(NPC)))){
warning("as both L and part of NPC are missing, will default
to L = 0.9, NPC = NA")
L <- 0.9
NPC <- NA
}
if(!is.na(L) & (prod(!is.na(NPC)))){
warning("NPC will override choice of L")
}
if(!is.na(L)){
if(L > 1 | L < 0){
stop("the level of explained variance needs to be between 0 and 1")
}
}
if(smooth)
stopifnot(smoothalg %in% c("gamm", "gamGCV", "gamREML", "bamGCV",
"bamREML", "bamfREML"))
message("set up")
D <- ncol(Y)
n <- nrow(Y)
if(all(is.na(Zlist))){
G <- length(Zvars)
rhovec <- 1
Lvec <- n
if(G > 0){
rhovec <- c(sapply(1:G, function(g){ncol(Zvars[[g]])}), 1)
Lvec <- c(sapply(1:G, function(g){nlevels(as.factor(groups[, g]))}), n)
}
}else{
H <- length(Zlist)
rhovec <- unlist(lapply(Zlist, length))
}
sq2 <- sum(rhovec^2)
if(all(is.na(Zlist))){
H <- G + 1
}
if(all(is.na(Zlist))){
message("construct Zlist")
foroneq <- function(g, q){
gp1 <- as.factor(groups[, g])
sparse.model.matrix(~ gp1 * Zvars[[g]][, q] - gp1 - Zvars[[g]][, q] - 1)
}
Zlist <- lapply(seq(len = G), function(g){lapply(1:rhovec[g],
function(q) foroneq(g, q))})
Zlist[[H]] <- list(Diagonal(n))
}
Y.tilde <- Y
message("estimate covariance(s)")
gcyc <- rep(1:(H), rhovec^2)
qcyc <- unlist(sapply(rhovec, FUN = function(q){rep(1:q, each = q)}))
pcyc <- unlist(sapply(rhovec, FUN = function(p){rep(1:p, p)}))
XtXentry <- function(ro, co){
A1 <-
if(H == G + 1){
if(gcyc[co] == H){
Zlist[[gcyc[ro]]][[pcyc[ro]]]
}else{
if(gcyc[ro] == H){
t(Zlist[[gcyc[co]]][[pcyc[co]]])
}else{
crossprod(Zlist[[gcyc[co]]][[pcyc[co]]],
Zlist[[gcyc[ro]]][[pcyc[ro]]])
}
}
}else{
crossprod(Zlist[[gcyc[co]]][[pcyc[co]]],
Zlist[[gcyc[ro]]][[pcyc[ro]]])
}
A2 <-
if(H == G + 1){
if(gcyc[co] == H){
Zlist[[gcyc[ro]]][[qcyc[ro]]]
}else{
if(gcyc[ro] == H){
t(Zlist[[gcyc[co]]][[qcyc[co]]])
}else{
crossprod(Zlist[[gcyc[co]]][[qcyc[co]]],
Zlist[[gcyc[ro]]][[qcyc[ro]]])
}
}
}else{
crossprod(Zlist[[gcyc[co]]][[qcyc[co]]],
Zlist[[gcyc[ro]]][[qcyc[ro]]])
}
traceA1tA2 <- function(A1, A2){
ret <- if(all(dim(A1) == dim(A2))){
sum(rowSums(A1 * A2))
}else{
if(ncol(A1) < nrow(A1)){
sum(sapply(1:ncol(A1),
function(i) as.numeric(crossprod(A1[, i, drop = F],
A2[, i, drop = F])), simplify = T))
}else{
sum(sapply(1:nrow(A1),
function(i) as.numeric(tcrossprod(A1[i, , drop = F],
A2[i,, drop = F])), simplify = T))
}
}
return(ret)
}
return(traceA1tA2(A1, A2))
}
matrixouter <- function(rows, cols, FUN){
FUN <- match.fun(FUN)
do.call(rbind, lapply(rows, function(g)
do.call(cbind, lapply(cols, function(h) FUN(g, h)))))
}
XtX <- matrixouter(seq(len = sq2), seq(len = sq2), FUN = "XtXentry")
Xtcentry <- function(ro){
if(gcyc[ro] == H & H == (G + 1)){
return(as.vector(crossprod(Y.tilde)))
}else{
A1 <- crossprod(Zlist[[gcyc[ro]]][[pcyc[ro]]], Y.tilde)
A2 <- crossprod(Zlist[[gcyc[ro]]][[qcyc[ro]]], Y.tilde)
return(as.vector(crossprod(A1, A2)))
}
}
Xtc <- do.call(rbind, lapply(seq(len = sq2), function(h){Xtcentry(h)}))
Ktilde <- solve(XtX, Xtc)
rowvec <- rep(gridpoints, each = D)
colvec <- rep(gridpoints, D)
cum_rhovec2 <- cumsum(rhovec^2)
if(H == G + 1){
diago <- diag(matrix(Ktilde[sq2, ], D, D))
}else{
diagos <- list()
use <- cum_rhovec2[(G + 1):H]
for(k in seq(along = use)){
diagos[[k]] <- diag(matrix(Ktilde[use[k], ], D, D))
}
}
if(smooth){
message("smooth covariance(s)")
if(H == G + 1){
Ktilde[sq2, as.logical(diag(D))] <- rep(NA, D)
}else{
use <- cum_rhovec2[(G + 1):H]
for(k in seq(along = use)){
Ktilde[use[k], as.logical(diag(D))] <- rep(NA, D)
}
}
Km <- t(sapply(1:sq2, function(r){
m <- switch(smoothalg,
"gamm" = gamm(Ktilde[r, ] ~ te(rowvec, colvec, k = bf))$gam,,
"gamREML" = gam(Ktilde[r, ] ~ te(rowvec, colvec, k = bf),
method = "REML"),
"gamGCV" = gam(Ktilde[r, ] ~ te(rowvec, colvec, k = bf),
method = "GCV.Cp"),
"bamGCV" = bam(Ktilde[r, ] ~ te(rowvec, colvec, k = bf),
method = "GCV.Cp"),
"bamREML" = bam(Ktilde[r, ] ~ te(rowvec, colvec, k = bf),
method = "REML"),
"bamfREML" = bam(Ktilde[r, ] ~ te(rowvec, colvec, k = bf),
method = "fREML"))
return(predict(m, newdata = data.frame(rowvec = rowvec,
colvec = colvec)))
}))
}else{
Km <- Ktilde
}
onecov <- function(g){
covcomp <- function(s, r){
matrix(Km[sum(rhovec[seq(len = g - 1)]^2) + (s - 1) * rhovec[g] + r, ],
D, byrow = TRUE)
}
matrixouter(1:rhovec[g], 1:rhovec[g], FUN = "covcomp")
}
K <- lapply(1:(H), "onecov")
K <- lapply(1:(H), function(g){(K[[g]] + t(K[[g]])) / 2})
message("estimate error variance")
if(H == (G + 1)){
sigma2.hat <- max(mean((diago -
diag(K[[H]]))[floor(D * 0.2):ceiling(D * 0.8)],
na.rm = TRUE), 0)
}else{
sigma2.hat_parts <- numeric()
use <- (G + 1):H
for(k in seq(along = use)){
sigma2.hat_parts[k] <- max(mean((diagos[[k]] -
diag(K[[use[k]]]))[floor(D * 0.2):ceiling(D * 0.8)],
na.rm = TRUE), 0)
}
sigma2.hat <- max(mean(sigma2.hat_parts, na.rm = TRUE), 0)
}
sigma2.hat_int <- (max(gridpoints) - min(gridpoints)) * sigma2.hat
interv <- gridpoints[2] - gridpoints[1]
message("make eigen decomposition(s)")
eigen.list <- lapply(1:(H), function(g) eigen(K[[g]], symmetric = TRUE))
nu.hat <- lapply(1:(H), function(g) eigen.list[[g]]$values * interv)
total.variance <- sigma2.hat_int + sum(unlist(nu.hat) * (unlist(nu.hat) > 0))
if(any(is.na(NPC))){
message("get truncation level(s)")
explained.variance <- sigma2.hat_int / total.variance
NPC <- rep(0, H)
while(explained.variance < L){
maxl <- sapply(1:(H), function(g){nu.hat[[g]][NPC[[g]] + 1]})
maxg <- match(max(maxl), maxl)
NPC[[maxg]] <- NPC[[maxg]] + 1
explained.variance <- explained.variance +
nu.hat[[maxg]][NPC[[maxg]]] / total.variance
}
}
phi <- lapply(1:(H), function(g) (eigen.list[[g]]$vectors *
(1 / sqrt(interv)))[, seq(len = NPC[g]),
drop = FALSE])
nu.hat <- lapply(1:(H), function(g){nu.hat[[g]][seq(len = NPC[g]),
drop = FALSE]})
explained.variance <- (sigma2.hat_int + sum(unlist(nu.hat))) / total.variance
message("update error variance")
if(H == (G + 1)){
newdiag <- diag(phi[[H]] %*% tcrossprod(Diagonal(length(nu.hat[[H]]),
nu.hat[[H]]), phi[[H]]))
if(length(nu.hat[[H]]) == 0){
newdiag <- rep(0,D)}
sigma2.hat <- max(mean((diago - newdiag)[floor(D * 0.2):ceiling(D * 0.8)],
na.rm = TRUE), 0)
}else{
newdiag <- list()
sigma2.hat_parts <- numeric()
use <- (G + 1):H
for(k in seq(along = use)){
newdiag[[k]] <-
diag(phi[[use[k]]] %*% tcrossprod(Diagonal(length(nu.hat[[use[k]]]),
nu.hat[[use[k]]]), phi[[use[k]]]))
if(length(nu.hat[[use[k]]]) == 0){
newdiag[[k]] <- rep(0,D)
}
sigma2.hat_parts[k] <- max(mean((diagos[[k]] -
newdiag[[k]])[floor(D * 0.2):ceiling(D * 0.8)], na.rm = TRUE), 0)
}
sigma2.hat <- max(mean(sigma2.hat_parts, na.rm = TRUE), 0)
}
message("predict basis weights")
ZtYfunc <- function(g){
foroneq <- function(q){
as.vector(crossprod(Zlist[[g]][[q]], Y.tilde) %*% phi[[g]][(q -
1) * D + (1:D), ])
}
rowSums(sapply(1:rhovec[g], 'foroneq'))
}
ZtY <- do.call("c", lapply((1:(H))[NPC != 0], 'ZtYfunc'))
ZtZcomp <- function(g, h){
foroneqr <- function(q, r){
phitphi <- crossprod(phi[[g]][(q - 1) * D + (1:D), ],
phi[[h]][(r - 1) * D + (1:D), ])
ZtZ <-
if(H == G + 1){
if(g == H){
Zlist[[h]][[r]]
}else{
if(h == H){
t(Zlist[[g]][[q]])
}else{
crossprod(Zlist[[g]][[q]], Zlist[[h]][[r]])
}
}
}else{
crossprod(Zlist[[g]][[q]], Zlist[[h]][[r]])
}
return(kronecker(phitphi, ZtZ))
}
Reduce('+',
mapply(foroneqr,
c(rep(1:rhovec[g], rhovec[h])),
c(rep(1:rhovec[h], each = rhovec[g])),
SIMPLIFY = FALSE))
}
ZtZ <- matrixouter((1:(H))[NPC != 0], (1:(H))[NPC != 0], "ZtZcomp")
cIN <- c(0, cumsum(Lvec * NPC))
IN <- Lvec * NPC
Dinv <- Diagonal(sum(IN), rep(1 / unlist(nu.hat), rep(Lvec, NPC)))
b.hat <- solve(ZtZ + sigma2.hat * Dinv, ZtY)
xi.hat <- vector(H, mode = "list")
xi.hat[NPC != 0] <- lapply((1:(H))[NPC != 0],
function(g) matrix(b.hat[cIN[g] + 1:IN[g]], Lvec[g]))
results <- list(Y = Y, gridpoints = gridpoints, groups = groups,
Zvars = Zvars, rhovec = rhovec, Lvec = Lvec, NPC = NPC, phi = phi,
sigma2 = sigma2.hat, nu = nu.hat, xi = xi.hat, L = L,
totvar = total.variance, exvar = explained.variance, bf = bf,
smooth = smooth, smoothalg = smoothalg, Zlist = Zlist)
return(results)
} |
ascii.summary.table <- function(x, caption = NULL, caption.level = NULL, list.type = "bullet", ...) {
x <- as.list(capture.output(x))
obj <- asciiList$new(x = x, caption = caption, caption.level = caption.level, list.type = list.type)
return(obj)
} |
checkConstColumns <- function(y,prednames) {
ny <- length(y)
eps <- sqrt(.Machine$double.eps)
noCC <- TRUE
oneOK <- TRUE
for(i in ny) {
xxx <- apply(y[[i]],2,function(x){all(diff(x)==0)})
if(sum(xxx)>1) {
j <- which(xxx)
whinge <- paste0("Predictor matrix ",i," has at least two\n",
" constant columns.\n")
stop(whinge)
}
xxx <- apply(y[[i]],2,function(x){all(abs(x-1) <= eps)})
if(sum(xxx)) noCC <- FALSE
if(!xxx[1]) oneOK <- FALSE
if(!(noCC | oneOK)) {
whinge <- paste0("Predictor matrix ",i," has a column of 1's\n",
" which is not the first column.\n")
stop(whinge)
}
}
if(!(noCC | oneOK)) {
whinge("Some predictor matrices have a first column of 1's\n",
" and some don't.\n")
stop(whinge)
}
if(oneOK) {
if(!is.null(prednames) && prednames[1] != "Intercept")
stop("The initial column of 1's must be named \"Intercept\".\n")
}
oneOK
} |
dfp_createTeams <- function(request_data, as_df=TRUE, verbose=FALSE){
request_body <- form_request_body(service='TeamService', root_name='createTeams', data=request_data)
httr_response <- execute_soap_request(request_body=request_body, verbose=verbose)
result <- parse_soap_response(httr_response=httr_response, resp_element='createTeamsResponse', as_df=as_df)
return(result)
}
dfp_getTeamsByStatement <- function(request_data, as_df=TRUE, verbose=FALSE){
request_body <- form_request_body(service='TeamService', root_name='getTeamsByStatement', data=request_data)
httr_response <- execute_soap_request(request_body=request_body, verbose=verbose)
result <- parse_soap_response(httr_response=httr_response, resp_element='getTeamsByStatementResponse', as_df=as_df)
return(result)
}
dfp_performTeamAction <- function(request_data, as_df=TRUE, verbose=FALSE){
request_body <- form_request_body(service='TeamService', root_name='performTeamAction', data=request_data)
httr_response <- execute_soap_request(request_body=request_body, verbose=verbose)
result <- parse_soap_response(httr_response=httr_response, resp_element='performTeamActionResponse', as_df=as_df)
return(result)
}
dfp_updateTeams <- function(request_data, as_df=TRUE, verbose=FALSE){
request_body <- form_request_body(service='TeamService', root_name='updateTeams', data=request_data)
httr_response <- execute_soap_request(request_body=request_body, verbose=verbose)
result <- parse_soap_response(httr_response=httr_response, resp_element='updateTeamsResponse', as_df=as_df)
return(result)
} |
FHtestrcc <-
function(L, ...) {
UseMethod("FHtestrcc")
} |
encode_bounds = function(x) {
encoded = paste0(x[1], ",", x[2], "|", x[3], ",", x[4])
return(encoded)
} |
with_tempfile <- function(new, code, envir = parent.frame(), .local_envir = parent.frame(),
pattern = "file", tmpdir = tempdir(), fileext = "") {
if (!missing(envir)) {
.Deprecated(msg = "`envir` argument of with_tempfile() is deprecated.\n Use `with_tempfile(.local_envir=)` instead.")
.local_envir <- envir
}
env <- new.env(parent = .local_envir)
for (f in new) {
assign(f,
tempfile(pattern = pattern, tmpdir = tmpdir, fileext = fileext),
envir = env)
}
on.exit(unlink(mget(new, envir = env), recursive = TRUE))
eval(substitute(code), envir = env)
}
local_tempfile <- function(new = NULL, lines = NULL, envir = parent.frame(), .local_envir = parent.frame(),
pattern = "file", tmpdir = tempdir(), fileext = "") {
if (!missing(envir)) {
.Deprecated(msg = "`envir` argument of local_tempfile() is deprecated.\n Use `local_tempfile(.local_envir=)` instead.")
.local_envir <- envir
}
if (is.null(new)) {
path <- tempfile(pattern = pattern, tmpdir = tmpdir, fileext = fileext)
if (!is.null(lines)) {
writeLines(lines, path)
}
defer(unlink(path, recursive = TRUE), envir = .local_envir)
return(path)
}
.Deprecated(msg = "`new` argument of local_tempfile() is deprecated.\n Use `path <- local_tempfile()` instead.")
for (f in new) {
assign(f,
tempfile(pattern = pattern, tmpdir = tmpdir, fileext = fileext),
envir = .local_envir)
}
defer(unlink(mget(new, envir = .local_envir), recursive = TRUE), envir = .local_envir)
}
with_tempdir <- function(code, clean = TRUE,
pattern = "file", tmpdir = tempdir(), fileext = "") {
if (length(clean) > 1 || !is.logical(clean)) {
stop("`clean` must be a single TRUE or FALSE", call. = FALSE)
}
tmp <- tempfile(pattern = pattern, tmpdir = tmpdir, fileext = fileext)
dir.create(tmp)
if (clean) {
on.exit(unlink(tmp, recursive = TRUE), add = TRUE)
}
withr::with_dir(tmp, code)
}
local_tempdir <- function(pattern = "file", tmpdir = tempdir(),
fileext = "", .local_envir = parent.frame(), clean = TRUE) {
if (length(clean) > 1 || !is.logical(clean)) {
stop("`clean` must be a single TRUE or FALSE", call. = FALSE)
}
path <- tempfile(pattern = pattern, tmpdir = tmpdir, fileext = fileext)
dir.create(path, recursive = TRUE)
if (isTRUE(clean)) {
defer(unlink(path, recursive = TRUE), envir = .local_envir)
}
path
} |
vw_rename_datasets <- function(spec) {
spec <- as_vegaspec(spec)
if (!("datasets" %in% names(spec))) {
return(spec)
}
dataset_names <- names(spec$datasets)
dataset_names_new <- list(sprintf("data_%03d", seq_along(dataset_names)))
names(dataset_names_new) <- dataset_names
fn_replace <- function(x) {
if (rlang::has_name(dataset_names_new, x)) {
x <- dataset_names_new[[x]]
}
x
}
names(spec$datasets) <- dataset_names_new
fn_crawl <- function(x, fn_rep) {
if (is.data.frame(x) || !is.list(x)) {
return(x)
}
if (rlang::has_name(x, "data") && rlang::has_name(x$data, "name")) {
x$data$name <- fn_rep(x$data$name)
}
x <- lapply(x, fn_crawl, fn_rep)
x
}
spec <- fn_crawl(spec, fn_replace)
spec <- as_vegaspec(spec)
spec
} |
first_row_to_header <- function(a_table){
colnames(a_table) <- a_table[1, , drop = FALSE]
colnames(a_table) <- gsub(" ", ".", colnames(a_table))
a_table <- a_table[-1, , drop = FALSE]
rownames(a_table) <- seq(nrow(a_table))
a_table
}
is_rstudio_selection <- function(){
context <- rstudioapi::getActiveDocumentContext()
!all(context$selection[[1]]$range$start ==
context$selection[[1]]$range$end)
}
is_tibble <- function(x) inherits(x, "tbl_df")
is_data.table <- function(x) inherits(x, "data.table") |
Election.getElection <-
function (electionId) {
Election.getElection.basic <- function (.electionId) {
request <- "Election.getElection?"
inputs <- paste("&electionId=",.electionId,sep="")
output <- pvsRequest10(request,inputs)
output
}
output.list <- lapply(electionId, FUN= function (s) {
Election.getElection.basic(.electionId=s)
}
)
output.list <- redlist(output.list)
output <- dfList(output.list)
output
} |
setMethod(
'prune',
signature(
x = 'lineage_tree'
),
function(
x,
...
){
x@graph <- x@graph %>% prune(...)
x@x <- x@x[V(x@graph)$name %>% unlist()]
x
}
)
setMethod(
'prune',
signature(
x = 'igraph'
),
function(
x,
weighted = TRUE,
...
){
is_leaf <- degree(x, mode = 'out') == 0
is_branch <- degree(x, mode = 'out') == 2
is_link <- degree(x, mode = 'out') == 1
d <- distances(x, v = V(x)[is_link], to = V(x)[is_leaf | is_branch], mode = 'out')
d_lb <- distances(x, v = V(x)[is_leaf | is_branch], to = V(x)[is_leaf | is_branch])
n_nodes <- sum(is_leaf | is_branch)
new2old <- which(is_leaf | is_branch)
name2id <- 1:n_nodes
names(name2id) <- names(new2old)
old2new <- rep(NA, vcount(x))
names(old2new) <- V(x)$name
old2new[is_leaf] <- name2id[names(which(is_leaf))]
old2new[is_branch] <- name2id[names(which(is_branch))]
old2new[is_link] <- name2id[names(new2old[apply(d, 1, which.min)])]
x <- contract(x, old2new)
x <- simplify(x)
x <- x %>% set_vertex_attr('name', index = 1:vcount(x), value = names(name2id))
if (weighted){
edges <- (x[] %>% summary())[, 1:2] %>% as.matrix()
E(x)$weight <- d_lb[edges]
}
x
}
) |
max_min_assoc.gee = function(target, reps, group, dataset, test, wei, threshold, max_k, selectedVars, pvalues, stats, remainingVars,
univariateModels, selectedVarsOrder, hash, stat_hash, pvalue_hash, correl, se) {
selected_var = -1;
selected_pvalue = 2;
selected_stat = 0;
varsToIterate = which(remainingVars==1);
for (cvar in varsToIterate) {
mma_res = min_assoc.gee(target, reps, group, dataset, test, max_k, cvar, wei, selectedVars, pvalues, stats, univariateModels, selectedVarsOrder, hash, stat_hash, pvalue_hash, correl = correl, se = se);
pvalues = mma_res$pvalues;
stats = mma_res$stats;
stat_hash = mma_res$stat_hash;
pvalue_hash = mma_res$pvalue_hash;
if(mma_res$pvalue > threshold) remainingVars[[cvar]] = 0;
if ( compare_p_values(mma_res$pvalue, selected_pvalue, mma_res$stat, selected_stat) ) {
selected_var = cvar;
selected_pvalue = mma_res$pvalue;
selected_stat = mma_res$stat;
}
}
results <- list(selected_var = selected_var, selected_pvalue = selected_pvalue, remainingVars = remainingVars, pvalues = pvalues, stats = stats, stat_hash=stat_hash, pvalue_hash = pvalue_hash, correl = correl, se = se);
return(results);
} |
context("GEER method")
data('schools')
rlme.fit = rlme(y ~ 1 + sex + age + (1 | region) + (1 | region:school), schools, method="geer", rprpair='hl-disp')
test_that("GEER calculates correct fixed effects for schools data", {
expect_equal(rlme.fit$fixed.effects$Estimate, c(0.09027762, -0.36728706, 0.07401874), tolerance=0.01)
})
test_that("GEER calculates correct covariance matrix for school data", {
correct.var.b = matrix(c( 7.475441e-02, -0.0009965355, -2.297116e-05,
-9.965355e-04, 0.0082464809, 4.872454e-04,
-2.297116e-05, 0.0004872454, 2.069968e-02), nrow=3, ncol=3)
expect_equal(unname(rlme.fit$var.b), correct.var.b, tolerance = 0.01)
})
test_that("GEER calculates correct random effect variances for school data (hl-disp)", {
expect_equal(rlme.fit$random.effects$Variance, c(0.12837229, 0.01994128, 0.81967053), tolerance=0.01)
})
rlme.fit = rlme(y ~ 1 + sex + age + (1 | region) + (1 | region:school), schools, method="jr", rprpair='med-mad')
test_that("GEER calculates correct random effect variances for school data (med-mad)", {
expect_equal(rlme.fit$random.effects$Variance, c(0.02413042, 0.09661321, 0.38645392), tolerance=0.01)
})
rlme.fit = rlme(y ~ 1 + sex + age + (1 | region), schools, method="geer", rprpair='hl-disp')
test_that("2 Level GEER calculates correct fixed effects for schools data", {
expect_equal(rlme.fit$fixed.effects$Estimate, c(2.351191e-01, -4.185243e-01, 2.942611e-10), tolerance=0.01)
})
test_that("2 Level GEER calculates correct covariance matrix for school data", {
correct.var.b = matrix(c( 7.354120e-02, -0.0004833065, 8.462203e-06,
-4.833065e-04, 0.0061781128, 1.521761e-04,
8.462203e-06, 0.0001521761, 1.526932e-02), nrow=3, ncol=3)
expect_equal(unname(rlme.fit$var.b), correct.var.b, tolerance = 0.01)
})
test_that("2 Level GEER calculates correct random effect variances for school data (hl-disp)", {
expect_equal(rlme.fit$random.effects$Variance, c(0.1668618, 0.8385061), tolerance=0.01)
})
rlme.fit = rlme(y ~ 1 + sex + age + (1 | region), schools, method="geer", rprpair='med-mad')
test_that("2 Level GEER calculates correct random effect variances for school data (med-mad)", {
expect_equal(rlme.fit$random.effects$Variance, c(4.329403e-09, 3.864539e-01), tolerance=0.01)
}) |
dkumar <- function(x, a = 1, b = 1, log = FALSE) {
cpp_dkumar(x, a, b, log[1L])
}
pkumar <- function(q, a = 1, b = 1, lower.tail = TRUE, log.p = FALSE) {
cpp_pkumar(q, a, b, lower.tail[1L], log.p[1L])
}
qkumar <- function(p, a = 1, b = 1, lower.tail = TRUE, log.p = FALSE) {
cpp_qkumar(p, a, b, lower.tail[1L], log.p[1L])
}
rkumar <- function(n, a = 1, b = 1) {
if (length(n) > 1) n <- length(n)
cpp_rkumar(n, a, b)
} |
sim.taxa <-
function (numbsim, n, m=n, waitsp, waitext="rexp(0)", symmetric=TRUE, complete=TRUE, tiplabel=c("sp.", "ext.","Ss", "Se"),
shiftsp=list(prob=0, strength="runif(0.5,0.9)"), shiftext=list(prob=0, strength="runif(0.1,0.2)"),
sampling=list(frac=1, branchprop=FALSE), sampling.gsa=1, gsa=FALSE) {
if (complete==TRUE & sampling$frac!=1) {
warning("Sampling on taxa based can only be used with complete=FALSE, thus complete was changed to FALSE")
complete=FALSE
}
if (sampling$frac>1 | sampling$frac<0){
warning("Sampling Sampling fraction needs to range between 0 and 1, thus sampling$frac was changed to 1")
sampling$frac=1
}
if (sampling$frac!=1){
n=round(n/sampling$frac)
if (m<n) {
warning("You are using sampling, thus tips=n/sampling$frac. m is smaller than n, thus we changed m=n/frac")
m=n
}
}
if (m<n){
warning("m can not be samller than n, thus we changed m=n")
m=n
}
check<-gsa
if (gsa==F && complete==T){check<-T}
mytreegsazed <- list()
while (length(mytreegsazed) < numbsim)
{
mytree <- list()
step <- 1
{
if (symmetric == TRUE)
{
for (step in 1: (numbsim) ){
mytreenext <- mytree.symmetric.taxa(m=m, waitsp=waitsp, waitext=waitext, complete=check, tiplabel=tiplabel,
shiftsp=shiftsp, shiftext=shiftext, sampling=sampling, gsa=gsa)
mytree<- c(mytree, list(mytreenext))
}
}
else
{
for (step in 1: (numbsim) ){
mytreenext <- mytree.asymmetric.taxa(m=m, waitsp=waitsp, waitext=waitext, complete=check, tiplabel=tiplabel,
shiftsp=shiftsp, shiftext=shiftext, sampling=sampling, gsa=gsa)
mytree<- c(mytree, list(mytreenext))
}
}
}
{
if (gsa==T)
{
mytreegsa <- sim.gsa.taxa(mytree, n=n, sampling=sampling.gsa, frac=sampling$frac, complete=complete)
}
else
{
mytreegsa <- mytree
}
}
mytreegsazed <- c(mytreegsazed, mytreegsa)
}
mytreegsazeds <- sample(mytreegsazed, numbsim)
mytreegsazed <- mytreegsazeds
return(mytreegsazed)
} |
NULL
new_ml_sample_transformer <- function(jobj) {
new_ml_transformer(jobj, class = "ml_sample_transformer")
}
ft_dplyr_transformer <- function(x, tbl,
uid = random_string("dplyr_transformer_"), ...) {
UseMethod("ft_dplyr_transformer")
}
ml_dplyr_transformer <- ft_dplyr_transformer
ft_dplyr_transformer.spark_connection <- function(x, tbl,
uid = random_string("dplyr_transformer_"), ...) {
if (!identical(class(tbl)[1], "tbl_spark")) stop("'tbl' must be a Spark table")
if (is.null(attributes(tbl)$sampling_params)) {
ft_sql_transformer(x, ft_extract_sql(tbl), uid = uid)
} else {
sc <- spark_connection(tbl)
sampling_params <- attributes(tbl)$sampling_params
if (sampling_params$frac) {
jobj <- invoke_new(sc, "sparklyr.SampleFrac", uid) %>%
invoke("setFrac", sampling_params$args$size)
} else {
jobj <- invoke_new(sc, "sparklyr.SampleN", uid) %>%
invoke("setN", as.integer(sampling_params$args$size))
}
jobj <- jobj %>%
invoke(
"%>%",
list(
"setWeight",
if (rlang::quo_is_null(sampling_params$args$weight)) {
""
} else {
rlang::as_name(sampling_params$args$weight)
}
),
list("setReplace", sampling_params$args$replace),
list("setGroupBy", as.list(sampling_params$group_by)),
list("setSeed", as.integer(sampling_params$args$seed %||% Sys.time()))
)
new_ml_sample_transformer(jobj)
}
}
ft_dplyr_transformer.ml_pipeline <- function(x, tbl,
uid = random_string("dplyr_transformer_"), ...) {
stage <- ft_dplyr_transformer.spark_connection(
x = spark_connection(x),
tbl = tbl,
uid = uid,
...
)
ml_add_stage(x, stage)
}
ft_dplyr_transformer.tbl_spark <- function(x, tbl,
uid = random_string("dplyr_transformer_"), ...) {
stage <- ft_dplyr_transformer.spark_connection(
x = spark_connection(x),
tbl = tbl,
uid = uid,
...
)
ml_transform(stage, x)
} |
sirt_progress_cat <- function(progress)
{
if (progress){
cat("\n")
}
} |
context("checking that the length of DP_PSSM feature vector is equal to 240")
test_that("whether the DP_PSSM function gives us the expected output",{
ss<-DP_PSSM(system.file("extdata","C7GQS7.txt.pssm",package="PSSMCOOL"))
expect_equal(length(ss),240)
}) |
restrict_coef <- function(model, restrictions){
if ( inherits(model, "glm") ){
fm <- formula(model)
offset_term <- get_offset(model)
fm_no_offset <- remove_offset_formula(fm)
df_new <- model$data
model_call <- model$call
model_out <- model
rfdf <- rating_factors1(model)
rst_lst <- list(restrictions)
names(rst_lst) <- names(restrictions[1])
restricted_df <- restrict_df(restrictions)
new_col_nm <- NULL
old_col_nm <- NULL
mgd_rst <- NULL
mgd_smt <- NULL
}
if ( inherits(model, c("smooth", "restricted")) ){
fm <- model$formula_restricted
offset_term <- model$offset
fm_no_offset <- model$formula_removed
df_new <- model$data_restricted
model_call <- model$model_call
model_out <- model$model_out
rfdf <- model$rating_factors
rst_lst <- model$restrictions_lst
rst_lst[[names(restrictions)[1]]] <- restrictions
restricted_df <- restrict_df(restrictions)
new_col_nm <- model$new_col_nm
old_col_nm <- model$old_col_nm
mgd_rst <- model$mgd_rst
mgd_smt <- model$mgd_smt
}
if ( inherits(model, "restricted") ){
restricted_df <- rbind(model$rf_restricted_df, restricted_df)
}
if ( inherits(model, "smooth") ){
restricted_df <- rbind(model$new_rf, restricted_df)
}
fm_remove <- update_formula_remove(fm_no_offset, names(restrictions)[1])
fm_add <- update_formula_add(offset_term, fm_remove, names(restrictions)[2])
df_restricted <- add_restrictions_df(df_new, restrictions)
nrst <- unique(setdiff(names(restrictions), unique(rfdf$risk_factor)))
orst <- unique(setdiff(names(restrictions), new_col_nm))
mgd_rst <- append(mgd_rst, list(unique(c(orst, nrst))))
new_col_nm <- unique(append(new_col_nm,
setdiff(names(restrictions),
unique(rfdf$risk_factor))))
old_col_nm <- unique(append(old_col_nm, setdiff(names(restrictions),
new_col_nm)))
rt <- list(formula_restricted = fm_add[[1]],
formula_removed = fm_remove,
data_restricted = df_restricted,
fm_no_offset = fm_no_offset,
offset = fm_add[[2]],
rating_factors = rfdf,
restrictions_lst = rst_lst,
rf_restricted_df = restricted_df,
model_call = model_call,
model_out = model_out,
new_col_nm = new_col_nm,
old_col_nm = old_col_nm,
mgd_rst = mgd_rst,
mgd_smt = mgd_smt)
attr(rt, "class") <- "restricted"
invisible(rt)
}
smooth_coef <- function(model, x_cut, x_org, degree = NULL, breaks = NULL){
if ( is.null(breaks) | !is.numeric(breaks) ){
stop("'breaks' must be a numerical vector", call. = FALSE)
}
if ( inherits(model, "glm") ){
fm <- formula(model)
offset_term <- get_offset(model)
fm_no_offset <- remove_offset_formula(fm)
df_new <- model$data
model_call <- model$call
model_out <- model
rfdf <- rating_factors1(model)
rst_lst <- NULL
new_col_nm <- NULL
old_col_nm <- NULL
mgd_smt <- NULL
mgd_rst <- NULL
}
if ( inherits(model, c("smooth", "restricted")) ){
fm <- model$formula_restricted
offset_term <- model$offset
fm_no_offset <- model$formula_removed
df_new <- model$data_restricted
model_call <- model$model_call
model_out <- model$model_out
rfdf <- model$rating_factors
rst_lst <- model$restrictions_lst
new_col_nm <- model$new_col_nm
old_col_nm <- model$old_col_nm
mgd_smt <- model$mgd_smt
mgd_rst <- model$mgd_rst
}
mgd_smt <- append(mgd_smt, list(c(paste0(x_org, "_smooth"),
paste0(x_cut, "_smooth"))))
old_col_nm <- append(old_col_nm, paste0(x_org, "_smooth"))
new_col_nm <- append(new_col_nm, paste0(x_cut, "_smooth"))
fm_remove <- update_formula_remove(fm_no_offset, x_cut)
fm_add <- update_formula_add(offset_term, fm_remove, paste0(x_cut, "_smooth"))
borders_x_cut <- cut_borders_model(model, x_cut)
if ( is.null(degree) ){
degree <- nrow(borders_x_cut) - 1
}
fit_poly <- fit_polynomial(borders_x_cut, x_org, degree, breaks)
df_poly <- fit_poly[["new_poly_df"]]
df_poly_line <- fit_poly[["poly_line"]]
df_new_rf <- fit_poly[["new_rf"]]
if ( inherits(model, "smooth")){
df_new_rf <- rbind(model$new_rf, df_new_rf)
}
if ( inherits(model, "restricted")){
df_new_rf <- rbind(model$rf_restricted_df, df_new_rf)
}
df_smooth <- join_to_nearest(df_new, df_poly, x_org)
names(df_smooth)[names(df_smooth) == 'yhat'] <- paste0(x_cut, "_smooth")
st <- list(formula_restricted = fm_add[[1]],
formula_removed = fm_remove,
data_restricted = df_smooth,
fm_no_offset = fm_no_offset,
offset = fm_add[[2]],
borders = borders_x_cut,
new = df_poly,
new_line = df_poly_line,
model_call = model_call,
rating_factors = rfdf,
restrictions_lst = rst_lst,
new_rf = df_new_rf,
degree = degree,
model_out = model_out,
new_col_nm = new_col_nm,
old_col_nm = old_col_nm,
mgd_rst = mgd_rst,
mgd_smt = mgd_smt)
attr(st, "class") <- "smooth"
invisible(st)
}
print.restricted <- function(x, ...){
cat("Formula: ")
print(x$formula_restricted)
}
print.smooth <- function(x, ...){
cat("Formula: ")
print(x$formula_restricted)
}
autoplot.restricted <- function(object, ...){
names_rf <- names(object$restrictions_lst)
name <- names_rf[length(names_rf)]
naam_rst <- object$restrictions_lst[[name]]
rf <- object$rating_factors
naam_rf <- rf[rf$risk_factor == name,]
naam_rf <- naam_rf[,2:3]
names(naam_rst)[names(naam_rst) == name] <- "level"
naam_rf <- matchColClasses(naam_rst, naam_rf)
koppel <- dplyr::left_join(naam_rst, naam_rf, by = "level")
meas_vars <- c(names(naam_rst)[2], names(rf)[3])
koppel_dt <- data.table::setDT(koppel)
koppel_ldt <- data.table::melt(koppel_dt,
id.vars = names(koppel_dt)[!names(
koppel_dt) %in% meas_vars],
measure.vars = meas_vars,
variable.name = "type",
value.name = "Coef")
koppel <- data.table::setDF(koppel_ldt)
koppel$level <- as.factor(koppel$level)
koppel$type <- as.character(koppel$type)
koppel$type[koppel$type == names(naam_rst)[2]] <- "restricted"
koppel$type[koppel$type == names(rf)[3]] <- "unrestricted"
ggplot2::ggplot(data = koppel, aes(x = level,
y = Coef,
color = type, group = type)) +
ggplot2::geom_point() +
ggplot2::geom_line() +
ggplot2::theme_minimal() +
ggplot2::labs(x = name, color = NULL)
}
autoplot.smooth <- function(object, ...){
rf2 <- object$borders
new <- object$new
new_line <- object$new_line
degree <- scales::ordinal(object$degree)
degree_name <- paste0(degree, " order polynomial")
rf2_start_open <- rf2[rf2$start_oc == "open",]
rf2_start_closed <- rf2[rf2$start_oc == "closed",]
rf2_end_open <- rf2[rf2$end_oc == "open",]
rf2_end_closed <- rf2[rf2$end_oc == "closed",]
new_start_open <- new[new$start_oc == "open",]
new_start_closed <- new[new$start_oc == "closed",]
new_end_open <- new[new$end_oc == "open",]
new_end_closed <- new[new$end_oc == "closed",]
x_name <- names(new_line)[1]
names(new_line)[names(new_line) == x_name] <- "col1"
ggplot2::ggplot(data = rf2) +
ggplot2::geom_segment(ggplot2::aes(x = start_, y = estimate, xend = end_,
yend = estimate, color = "Model fit"),
group = 1) +
ggplot2::geom_segment(data = new, ggplot2::aes(x = breaks_min, y = yhat,
xend = breaks_max,
yend = yhat,
color = "New cluster"),
group = 2) +
ggplot2::geom_point(data = rf2_start_closed, ggplot2::aes(x = start_,
y = estimate),
color = "dodgerblue") +
ggplot2::geom_point(data = rf2_end_closed, ggplot2::aes(x = end_,
y = estimate),
color = "dodgerblue") +
ggplot2::geom_point(data = rf2_start_open, ggplot2::aes(x = start_,
y = estimate),
color = "dodgerblue", shape = 21, fill = "white") +
ggplot2::geom_point(data = rf2_end_open, ggplot2::aes(x = end_,
y = estimate),
color = "dodgerblue", shape = 21, fill = "white") +
ggplot2::geom_point(data = new_start_closed, ggplot2::aes(x = start_,
y = yhat),
color = "red") +
ggplot2::geom_point(data = new_end_closed, ggplot2::aes(x = end_,
y = yhat),
color = "red") +
ggplot2::geom_point(data = new_start_open, ggplot2::aes(x = start_,
y = yhat),
color = "red", shape = 21, fill = "white") +
ggplot2::geom_point(data = new_end_open, ggplot2::aes(x = end_,
y = yhat),
color = "red", shape = 21, fill = "white") +
ggplot2::labs(x = x_name, y = "Estimated coefficient") +
ggplot2::geom_line(data = new_line, ggplot2::aes(x = col1, y = yhat,
color = "Smooth"),
group = 3) +
ggplot2::scale_colour_manual(name = "Risk factor",
values = c("Model fit" = "dodgerblue",
"New cluster" = "red",
"Smooth" = "black"),
labels = c("Model fit", "New cluster",
degree_name)) +
ggplot2::theme_minimal()
}
update_glm <- function(x){
if( !inherits(x, c("restricted", "smooth")) ) {
stop("Input must be of class restricted or of class smooth", call. = FALSE)
}
lst_call <- as.list(x$model_call)
lst <- list(formula = x$formula_restricted, data = x$data_restricted,
offset = NULL)
y <- eval(as.call(modifyList(lst_call, lst)))
y$call$formula <- lst$formula
y$call$data <- quote(df_new)
offweights <- NULL
if ( !is.null(lst_call$weights) ) {
offweights <- append(offweights, as.character(lst_call$weights))
}
if ( !is.null(lst_call$offset) ) {
offweights <- append(offweights, as.character(lst_call$offset)[2])
}
if ( inherits(x, "smooth")) {
attr(y, "new_rf") <- x[["new_rf"]]
attr(y, "class") <- append(class(y), "refitsmooth")
}
if ( inherits(x, "restricted")) {
attr(y, "new_rf_rst") <- x[["rf_restricted_df"]]
attr(y, "class") <- append(class(y), "refitrestricted")
}
rf <- x$rating_factors
rf2 <- unique(rf$risk_factor[rf$risk_factor != "(Intercept)"])
attr(y, "new_col_nm") <- x$new_col_nm
attr(y, "old_col_nm") <- x$old_col_nm
attr(y, "rf") <- rf2
attr(y, "mgd_smt") <- x$mgd_smt
attr(y, "mgd_rst") <- x$mgd_rst
attr(y, "offweights") <- offweights
y
} |
makeOMLSeedParList = function(seed, prefix = "openml") {
assertIntegerish(seed)
assert(checkString(prefix), checkNull(prefix))
rng.kind = RNGkind()
if (length(rng.kind) == 2)
seed.pars = setNames(c(seed, rng.kind), c("seed", "kind", "normal.kind"))
if (length(rng.kind) == 3)
seed.pars = setNames(c(seed, rng.kind), c("seed", "kind", "normal.kind", "sample.kind"))
if (!is.null(prefix))
names(seed.pars) = paste0(prefix, ".", names(seed.pars))
seed.setting = lapply(seq_along(seed.pars), function(x) {
makeOMLRunParameter(
name = names(seed.pars[x]),
value = as.character(seed.pars[x]),
component = NA_character_
)
})
seed.setting = setNames(seed.setting, names(seed.pars))
setClasses(seed.setting, "OMLSeedParList")
}
print.OMLSeedParList = function(x, ...) {
catf("This is a '%s' with the following parameters:", class(x)[1])
if (length(x) > 0)
x = rbindlist(lapply(x, function(x) x[c("name", "value", "component")])) else
x = data.frame()
x$component = NULL
print(x)
}
getOMLSeedParList = function(run) {
assertClass(run, "OMLRun")
par = run$parameter.setting
return(setClasses(par[isSeedPar(par)], "OMLSeedParList"))
}
isSeedPar = function(par) {
rpl.names = vcapply(par, function(x) x$name)
stri_detect_regex(rpl.names, "(seed$|kind$|normal.kind$)")
}
setOMLSeedParList = function(x, flow = NULL) {
assertClass(x, "OMLSeedParList")
seed.pars = vcapply(x, function(x) x$value)
prefix = unique(gsub("seed|kind|normal.kind|sample.kind", "", names(seed.pars)))
names(seed.pars) = gsub(prefix, "", names(seed.pars))
xRNG = seed.pars[names(seed.pars) %nin% "seed"]
currentRNG = RNGkind()
if (!identical(currentRNG, unname(xRNG)))
messagef("Kind of RNG has been changed to '%s'",
convertToShortString(as.list(xRNG)))
if (!is.null(flow)) RNGversion(extractRVersionFromFlow(flow))
do.call("set.seed", as.list(seed.pars))
}
extractSeed = function(x) {
assertClass(x, "OMLSeedParList")
seed.names = vcapply(x, function(x) x$name)
seed = vcapply(x, function(x) x$value)[stri_detect_fixed(seed.names, "seed")]
as.integer(seed)
} |
coxseiest2 <-
function(dat,par.init,m=2,
mit=1000,tr=TRUE,method="L-BFGS-B",
lower=c(rep(-Inf,ncol(dat)-3),-Inf,0),
upper=rep(Inf,ncol(dat)-3 + 2),
gfun=function(x,pa){
ifelse(x <= 0, rep(0,length(x)),
pa[1]*exp(-pa[2]*x))
}){
ids <- unique(dat$id)
ng <- length(ids)
gs <- as.numeric(table(dat$id))
gofst <- cumsum(gs)-gs
C <- dat$Y[!dat$delta]
Z <- as.matrix(dat[,setdiff(colnames(dat),c("Y","delta","id"))],
nrow=nrow(dat))
Zs <- array(0,dim=c(dim(Z),ng))
for(l in 1:ng){
for(i in 1:ng){
for(j in 1:NCOL(Z)){
Zs[dat$id==ids[i],j,l] <- if(diff(range(Z[dat$id==ids[l],j]))==0){
Z[dat$id==ids[l],j][1]
}else{
approxfun(dat$Y[dat$id==ids[l]],
Z[dat$id==ids[l],j],method="constant",
f=1,rule=2)(dat$Y[dat$id==ids[i]])
}
}
}
}
loglik <- function(param){
.Call("ll",dat$Y, as.double(as.vector(Z)), as.double(as.vector(Zs)),
as.double(C), as.integer(gs), as.integer(gofst), as.double(param),
gfun, as.integer(2), as.integer(m), new.env(),
PACKAGE="coxsei")
}
ret <- optim(par.init,loglik,control=list(trace=tr,maxit=mit),
hessian=TRUE,method=method,
lower=lower, upper=upper)
ret
} |
metareg_plot <- function(full,
reg,
compar,
cov_value,
drug_names,
save_xls) {
if (length(unique(reg$covariate)) < 3 &
!is.element(cov_value[[1]], reg$covariate)) {
aa <- "The first element of the argument 'cov_value' is out of the value"
stop(paste(aa, "range of the analysed covariate"), call. = FALSE)
} else if (length(unique(reg$covariate)) > 2 &
(cov_value[[1]] < min(reg$covariate) |
cov_value[[1]] > max(reg$covariate))) {
aa <- "The first element of the argument 'cov_value' is out of the value"
stop(paste(aa, "range of the analysed covariate"), call. = FALSE)
}
if (length(drug_names) < 3) {
stop("This function is *not* relevant for a pairwise meta-analysis",
call. = FALSE)
}
cov_value <- if (missing(cov_value)) {
stop("The argument 'cov_value' has not been defined", call. = FALSE)
} else if (length(cov_value) < 2) {
aa <- "The argument 'cov_value' must be a list with elements a number and"
stop(paste(aa, "a character"), call. = FALSE)
} else if (length(cov_value) == 2) {
cov_value
}
save_xls <- if (missing(save_xls)) {
FALSE
} else {
save_xls
}
compar <- if (missing(compar)) {
stop("The argument 'compar' has not been defined", call. = FALSE)
} else if (!is.element(compar, drug_names)) {
stop("The value of the argument 'compar' is not found in the 'drug_names'",
call. = FALSE)
} else if (is.element(compar, drug_names)) {
compar
}
covariate <- if (length(unique(reg$covariate)) < 3) {
unique(reg$covariate)
} else {
reg$covariate
}
cov_val <- ifelse(length(unique(covariate)) < 3, cov_value[[1]],
cov_value[[1]] - mean(covariate))
drug_names <- if (missing(drug_names)) {
aa <- "The argument 'drug_names' has not been defined."
bb <- "The intervention ID, as specified in 'data' is used as"
cc <- "intervention names"
message(cat(paste0("\033[0;", col = 32, "m", aa, " ", bb, " ", cc,
"\033[0m", "\n")))
as.character(seq_len(length(full$SUCRA[, 1])))
} else {
drug_names
}
model <- full$model
measure <- effect_measure_name(full$measure)
sucra_full <- round(full$SUCRA, 2)
sucra_full_order <- round(full$SUCRA, 2)[order(sucra_full[, 1],
decreasing = TRUE), ]
drug_names_sorted <- drug_names[order(sucra_full[, 1], decreasing = TRUE)]
poss_pair_comp1 <- data.frame(exp = t(combn(drug_names, 2))[, 2],
comp = t(combn(drug_names, 2))[, 1])
poss_pair_comp2 <- data.frame(exp = t(combn(drug_names, 2))[, 1],
comp = t(combn(drug_names, 2))[, 2])
poss_pair_comp <- rbind(poss_pair_comp1, poss_pair_comp2)
em_ref00_nma <- cbind(rbind(data.frame(mean = full$EM[, 1],
lower = full$EM[, 3],
upper = full$EM[, 7]),
data.frame(mean = full$EM[, 1] * (-1),
lower = full$EM[, 7] * (-1),
upper = full$EM[, 3] * (-1))),
poss_pair_comp)
em_subset_nma <- subset(em_ref00_nma, em_ref00_nma[5] == compar)
em_ref0_nma <- rbind(em_subset_nma[, 1:3], c(rep(NA, 3)))
sucra_full_new <- data.frame(sucra_full[, 1], drug_names)[
order(match(data.frame(sucra_full[, 1], drug_names)[, 2],
em_subset_nma[, 4])), 1]
em_ref_nma <- em_ref0_nma[order(sucra_full_new, decreasing = TRUE), ]
if (is.element(reg$covar_assumption, c("exchangeable", "independent"))) {
beta00 <- cbind(rbind(data.frame(mean = reg$beta_all[, 1],
lower = reg$beta_all[, 3],
upper = reg$beta_all[, 7]),
data.frame(mean = reg$beta_all[, 1] * (-1),
lower = reg$beta_all[, 7] * (-1),
upper = reg$beta_all[, 3] * (-1))),
poss_pair_comp)
beta_all_subset <- subset(beta00, beta00[5] == compar)
beta0 <- rbind(beta_all_subset[, 1:3], c(rep(NA, 3)))
beta <- beta0[order(sucra_full_new, decreasing = TRUE), ]
rownames(beta) <- NULL
} else {
beta <- reg$beta[1, c(1, 3, 7)]
}
par_mean <- as.vector(c(reg$EM[, 1] + reg$beta_all[, 1] * cov_val,
(reg$EM[, 1] * (-1)) +
(reg$beta_all[, 1] * (-1) * cov_val)))
par_sd <- as.vector(c(sqrt(((reg$EM[, 2])^2) +
((reg$beta_all[, 2] * cov_val)^2)),
sqrt(((reg$EM[, 2])^2) +
((reg$beta_all[, 2] * cov_val)^2))))
em_ref00_nmr <- cbind(mean = par_mean,
lower = par_mean - 1.96 * par_sd,
upper = par_mean + 1.96 * par_sd,
poss_pair_comp)
em_subset_nmr <- subset(em_ref00_nmr, em_ref00_nmr[5] == compar)
em_ref0_nmr <- rbind(em_subset_nmr[, 1:3], c(rep(NA, 3)))
em_ref_nmr <- em_ref0_nmr[order(sucra_full_new, decreasing = TRUE), ]
rownames(em_ref_nma) <- rownames(em_ref_nmr) <- NULL
if (model == "RE") {
pred_ref00_nma <- cbind(rbind(data.frame(mean = full$EM_pred[, 1],
lower = full$EM_pred[, 3],
upper = full$EM_pred[, 7]),
data.frame(mean = full$EM_pred[, 1] * (-1),
lower = full$EM_pred[, 7] * (-1),
upper = full$EM_pred[, 3] * (-1))),
poss_pair_comp)
pred_subset_nma <- subset(pred_ref00_nma, pred_ref00_nma[5] == compar)
pred_ref0_nma <- rbind(pred_subset_nma[, 1:3], c(rep(NA, 3)))
par_mean <- as.vector(c(reg$EM_pred[, 1] + reg$beta_all[, 1] * cov_val,
(reg$EM_pred[, 1] * (-1)) +
(reg$beta_all[, 1] * (-1) * cov_val)))
par_sd <- as.vector(c(sqrt(((reg$EM_pred[, 2])^2) +
((reg$beta_all[, 2] * cov_val)^2)),
sqrt(((reg$EM_pred[, 2])^2) +
((reg$beta_all[, 2] * cov_val)^2))))
pred_ref00_nmr <- cbind(data.frame(mean = par_mean,
lower = par_mean - 1.96 * par_sd,
upper = par_mean + 1.96 * par_sd),
poss_pair_comp)
pred_subset_nmr <- subset(pred_ref00_nmr, pred_ref00_nmr[5] == compar)
pred_ref0_nmr <- rbind(pred_subset_nmr[, 1:3], c(rep(NA, 3)))
pred_ref_nma <- pred_ref0_nma[order(sucra_full_new, decreasing = TRUE), ]
pred_ref_nmr <- pred_ref0_nmr[order(sucra_full_new, decreasing = TRUE), ]
rownames(pred_ref_nma) <- rownames(pred_ref_nmr) <- NULL
}
if (!is.element(measure, c("Odds ratio", "Ratio of means")) & model == "RE") {
em_ref_nma <- em_ref_nma
em_ref_nmr <- em_ref_nmr
pred_ref_nma <- pred_ref_nma
pred_ref_nmr <- pred_ref_nmr
beta <- beta
cri_est_nma <- paste0("(", round(em_ref_nma[, 2], 2), ",", " ",
round(em_ref_nma[, 3], 2), ")",
ifelse(em_ref_nma[, 2] > 0 |
em_ref_nma[, 3] < 0, "*", " "))
cri_est_nmr <- paste0("(", round(em_ref_nmr[, 2], 2), ",", " ",
round(em_ref_nmr[, 3], 2), ")",
ifelse(em_ref_nmr[, 2] > 0 |
em_ref_nmr[, 3] < 0, "*", " "))
cri_pred_nma <- paste0("(", round(pred_ref_nma[, 2], 2), ",", " ",
round(pred_ref_nma[, 3], 2), ")",
ifelse(pred_ref_nma[, 2] > 0 |
pred_ref_nma[, 3] < 0, "*", " "))
cri_pred_nmr <- paste0("(", round(pred_ref_nmr[, 2], 2), ",", " ",
round(pred_ref_nmr[, 3], 2), ")",
ifelse(pred_ref_nmr[, 2] > 0 |
pred_ref_nmr[, 3] < 0, "*", " "))
cri_beta <- if (is.element(reg$covar_assumption, c("exchangeable",
"independent"))) {
paste0("(", round(beta[, 2], 2), ",", " ", round(beta[, 3], 2), ")",
ifelse(beta[, 2] > 0 | beta[, 3] < 0, "*", " "))
} else {
paste0("(", round(reg$beta[2], 2), ",", " ", round(reg$beta[3], 2), ")",
ifelse(reg$beta[2] > 0 | reg$beta[3] < 0, "*", " "))
}
} else if (is.element(measure, c("Odds ratio", "Ratio of means")) &
model == "RE") {
em_ref_nma <- exp(em_ref_nma)
em_ref_nmr <- exp(em_ref_nmr)
pred_ref_nma <- exp(pred_ref_nma)
pred_ref_nmr <- exp(pred_ref_nmr)
beta <- exp(beta)
cri_est_nma <- paste0("(", round(em_ref_nma[, 2], 2), ",", " ",
round(em_ref_nma[, 3], 2), ")",
ifelse(em_ref_nma[, 2] > 1 |
em_ref_nma[, 3] < 1, "*", " "))
cri_est_nmr <- paste0("(", round(em_ref_nmr[, 2], 2), ",", " ",
round(em_ref_nmr[, 3], 2), ")",
ifelse(em_ref_nmr[, 2] > 1 |
em_ref_nmr[, 3] < 1, "*", " "))
cri_pred_nma <- paste0("(", round(pred_ref_nma[, 2], 2), ",", " ",
round(pred_ref_nma[, 3], 2), ")",
ifelse(pred_ref_nma[, 2] > 1 |
pred_ref_nma[, 3] < 1, "*", " "))
cri_pred_nmr <- paste0("(", round(pred_ref_nmr[, 2], 2), ",", " ",
round(pred_ref_nmr[, 3], 2), ")",
ifelse(pred_ref_nmr[, 2] > 1 |
pred_ref_nmr[, 3] < 1, "*", " "))
cri_beta <- if (is.element(reg$covar_assumption, c("exchangeable",
"independent"))) {
paste0("(", round(beta[, 2], 2), ",", " ", round(beta[, 3], 2), ")",
ifelse(beta[, 2] > 1 | beta[, 3] < 1, "*", " "))
} else {
paste0("(", round(beta[2], 2), ",", " ", round(beta[3], 2), ")",
ifelse(beta[2] > 1 | beta[3] < 1, "*", " "))
}
} else if (!is.element(measure, c("Odds ratio", "Ratio of means")) &
model == "FE") {
em_ref_nma <- em_ref_nma
em_ref_nmr <- em_ref_nmr
beta <- beta
cri_est_nma <- paste0("(", round(em_ref_nma[, 2], 2), ",", " ",
round(em_ref_nma[, 3], 2), ")",
ifelse(em_ref_nma[, 2] > 0 |
em_ref_nma[, 3] < 0, "*", " "))
cri_est_nmr <- paste0("(", round(em_ref_nmr[, 2], 2), ",", " ",
round(em_ref_nmr[, 3], 2), ")",
ifelse(em_ref_nmr[, 2] > 0 |
em_ref_nmr[, 3] < 0, "*", " "))
cri_beta <- if (is.element(reg$covar_assumption, c("exchangeable",
"independent"))) {
paste0("(", round(beta[, 2], 2), ",", " ", round(beta[, 3], 2), ")",
ifelse(beta[, 2] > 0 | beta[, 3] < 0, "*", " "))
} else {
paste0("(", round(reg$beta[2], 2), ",", " ", round(reg$beta[3], 2), ")",
ifelse(reg$beta[2] > 0 | reg$beta[3] < 0, "*", " "))
}
} else if (is.element(measure, c("Odds ratio", "Ratio of means")) &
model == "FE") {
em_ref_nma <- exp(em_ref_nma)
em_ref_nmr <- exp(em_ref_nmr)
beta <- exp(beta)
cri_est_nma <- paste0("(", round(em_ref_nma[, 2], 2), ",", " ",
round(em_ref_nma[, 3], 2), ")",
ifelse(em_ref_nma[, 2] > 1 |
em_ref_nma[, 3] < 1, "*", " "))
cri_est_nmr <- paste0("(", round(em_ref_nmr[, 2], 2), ",", " ",
round(em_ref_nmr[, 3], 2), ")",
ifelse(em_ref_nmr[, 2] > 1 |
em_ref_nmr[, 3] < 1, "*", " "))
cri_beta <- if (is.element(reg$covar_assumption, c("exchangeable",
"independent"))) {
paste0("(", round(beta[, 2], 2), ",", " ", round(beta[, 3], 2), ")",
ifelse(beta[, 2] > 1 | beta[, 3] < 1, "*", " "))
} else {
paste0("(", round(beta[2], 2), ",", " ", round(beta[3], 2), ")",
ifelse(beta[2] > 1 | beta[3] < 1, "*", " "))
}
}
tau_nma <- if (model == "RE") {
round(full$tau, 2)
}
tau_nmr <- if (model == "RE") {
round(reg$tau, 2)
}
model_assess_nma <- round(full$model_assessment, 2)
model_assess_nmr <- round(reg$model_assessment, 2)
if (model == "RE") {
cri_tau_nma <- paste0("(", tau_nma[, 3], ",", " ", tau_nma[, 7], ")")
cri_tau_nmr <- paste0("(", tau_nmr[, 3], ",", " ", tau_nmr[, 7], ")")
}
if (model == "RE") {
table_model_assess <- data.frame(c("Network meta-analysis",
"Meta-regression"),
rbind(model_assess_nma[c(1, 3, 2)],
model_assess_nmr[c(1, 3, 2)]),
rbind(cbind(tau_nma[, 5],
tau_nma[, 2],
cri_tau_nma),
cbind(tau_nmr[, 5],
tau_nmr[, 2],
cri_tau_nmr)))
colnames(table_model_assess) <- c("Analysis",
"DIC", "Mean deviance", "pD",
"Median tau", "SD tau", "95% CrI tau")
} else {
table_model_assess <- data.frame(c("Network meta-analysis",
"Meta-regression"),
rbind(model_assess_nma[c(1, 3, 2)],
model_assess_nmr[c(1, 3, 2)]))
colnames(table_model_assess) <- c("Analysis",
"DIC", "Mean deviance", "pD")
}
message(ifelse(model_assess_nma[1] - model_assess_nmr[1] > 5,
"NMR preferred when accounting for model fit and complexity",
ifelse(
model_assess_nma[1] - model_assess_nmr[1] < -5,
"NMA preferred when accounting for model fit and complexity",
"There is little to choose between the two models")))
if (model == "RE") {
est_both_models <- na.omit(data.frame(drug_names_sorted,
round(em_ref_nma[, 1], 2),
cri_est_nma,
round(em_ref_nmr[, 1], 2),
cri_est_nmr))
pred_both_models <- na.omit(data.frame(drug_names_sorted,
round(pred_ref_nma[, 1], 2),
cri_pred_nma,
round(pred_ref_nmr[, 1], 2),
cri_pred_nmr))
colnames(est_both_models) <- colnames(pred_both_models) <-
c(paste("versus", compar),
"Mean NMA", "95% CrI NMA", "Mean NMR", "95% CrI NMR")
rownames(est_both_models) <- rownames(pred_both_models) <- NULL
} else {
est_both_models <- na.omit(data.frame(drug_names_sorted,
round(em_ref_nma[, 1], 2),
cri_est_nma,
round(em_ref_nmr[, 1], 2),
cri_est_nmr))
colnames(est_both_models) <- c(paste("versus", compar),
"Mean NMA", "95% CrI NMA", "Mean NMR", "95% CrI NMR")
rownames(est_both_models) <- NULL
}
if (is.element(reg$covar_assumption, c("exchangeable", "independent"))) {
reg_coeff <- na.omit(data.frame(drug_names_sorted, round(beta[, 1], 2),
cri_beta))
colnames(reg_coeff) <- c(paste("versus", compar), "Mean beta",
"95% CrI beta")
} else {
reg_coeff <- data.frame(round(beta[1], 2), cri_beta)
colnames(reg_coeff) <- c("Mean beta", "95% CrI beta")
}
rownames(reg_coeff) <- NULL
forest_plots <- forestplot_metareg(full, reg, compar, cov_value, drug_names)
sucra_scatterplot <- scatterplot_sucra(full, reg, cov_value, drug_names)
if (save_xls == TRUE & model == "RE") {
write_xlsx(est_both_models, paste0("Table NMA vs NMR_Estimation", ".xlsx"))
write_xlsx(pred_both_models, paste0("Table NMA vs NMR_Prediction", ".xlsx"))
write_xlsx(table_model_assess, paste0("Table Model Assessment_NMA vs NMR",
".xlsx"))
if (is.element(reg$covar_assumption, c("exchangeable", "independent"))) {
write_xlsx(reg_coeff, paste0("Table NMA vs NMR_Coefficient", ".xlsx"))
}
} else if (save_xls == TRUE & model == "FE") {
write_xlsx(est_both_models, paste0("Table NMA vs NMR_Estimation", ".xlsx"))
write_xlsx(table_model_assess, paste0("Table Model Assessment_NMA vs NMR",
".xlsx"))
if (is.element(reg$covar_assumption, c("exchangeable", "independent"))) {
write_xlsx(reg_coeff, paste0("Table NMA vs NMR_Coefficient", ".xlsx"))
}
}
results <- if (model == "RE") {
list(table_estimates = knitr::kable(est_both_models),
table_predictions = knitr::kable(pred_both_models),
table_model_assessment = knitr::kable(table_model_assess),
table_regression_coeffients = knitr::kable(reg_coeff),
interval_plots = suppressWarnings(ggarrange(forest_plots)),
sucra_scatterplot = sucra_scatterplot)
} else {
list(table_estimates = knitr::kable(est_both_models),
table_model_assessment = knitr::kable(table_model_assess),
table_regression_coeffients = knitr::kable(reg_coeff),
interval_plots = suppressWarnings(ggarrange(forest_plots)),
sucra_scatterplot = sucra_scatterplot)
}
return(results)
} |
test_that(
desc = "hybrid methods works",
code = {
options(tibble.width = Inf)
library(lme4)
set.seed(123)
lmm_mod <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy)
df_lmm <- tidy_parameters(lmm_mod, effects = "fixed")
expect_equal(
df_lmm$estimate,
c(251.40510, 10.46729),
tolerance = 0.001
)
expect_equal(
dim(glance_performance(lmm_mod, effects = "fixed")), c(1L, 10L)
)
set.seed(123)
lm_mod <- lm(Reaction ~ Days, sleepstudy)
df_lm <- tidy_parameters(lm_mod, robust = TRUE)
expect_equal(df_lm$estimate[1], 251.4051, tolerance = 0.001)
expect_equal(
dim(glance_performance(lm_mod, effects = "fixed"))[[1]], 1L
)
set.seed(123)
library(lavaan)
HS.model <- " visual =~ x1 + x2 + x3
textual =~ x4 + x5 + x6
speed =~ x7 + x8 + x9 "
mod_lavaan <-
lavaan(
HS.model,
data = HolzingerSwineford1939,
auto.var = TRUE,
auto.fix.first = TRUE,
auto.cov.lv.x = TRUE
)
expect_identical(
tidy_parameters(mod_lavaan, effects = "fixed"),
tidy_parameters(mod_lavaan, effects = "fixed", exponentiate = TRUE)
)
library(MASS)
set.seed(123)
mod <- rlm(stack.loss ~ ., stackloss)
df_rlm <- tidy_parameters(mod)
df <- suppressMessages(suppressWarnings(tidy_parameters(tidy(mod))))
expect_snapshot(df_rlm)
expect_equal(dim(df_rlm)[[1]], 4L)
expect_s3_class(df, "tbl_df")
expect_equal(
dim(suppressWarnings(broomExtra::tidy_parameters(acf(lh, plot = FALSE)))),
c(17L, 2L)
)
expect_null(suppressWarnings(broomExtra::glance_performance(acf(lh, plot = FALSE))))
expect_null(broomExtra::tidy_parameters(list(1, c("x", "y")), verbose = FALSE))
}
) |
read_initial_state <- function(model.path) {
IRdata <- get.model.file(model.path, PARAMETERS_DIR, file.pattern=INITIAL_STATE, header=FALSE)
initial.state <- list(
detritus_so=IRdata[1,2],
detritus_d=IRdata[2,2],
x_detritus_s1=IRdata[3,2],
x_detritus_s2=IRdata[4,2],
x_detritus_s3=IRdata[5,2],
x_detritus_d1=IRdata[6,2],
x_detritus_d2=IRdata[7,2],
x_detritus_d3=IRdata[8,2],
xR_detritus_s1=IRdata[9,2],
xR_detritus_s2=IRdata[10,2],
xR_detritus_s3=IRdata[11,2],
xR_detritus_d1=IRdata[12,2],
xR_detritus_d2=IRdata[13,2],
xR_detritus_d3=IRdata[14,2],
discard_o=IRdata[15,2],
corpse_s1=IRdata[16,2],
corpse_s2=IRdata[17,2],
corpse_s3=IRdata[18,2],
corpse_d1=IRdata[19,2],
corpse_d2=IRdata[20,2],
corpse_d3=IRdata[21,2],
ammonia_so=IRdata[22,2],
ammonia_d=IRdata[23,2],
x_ammonia_s1=IRdata[24,2],
x_ammonia_s2=IRdata[25,2],
x_ammonia_s3=IRdata[26,2],
x_ammonia_d1=IRdata[27,2],
x_ammonia_d2=IRdata[28,2],
x_ammonia_d3=IRdata[29,2],
nitrate_so=IRdata[30,2],
nitrate_d=IRdata[31,2],
x_nitrate_s1=IRdata[32,2],
x_nitrate_s2=IRdata[33,2],
x_nitrate_s3=IRdata[34,2],
x_nitrate_d1=IRdata[35,2],
x_nitrate_d2=IRdata[36,2],
x_nitrate_d3=IRdata[37,2],
phyt_so=IRdata[38,2],
phyt_d=IRdata[39,2],
omni_o=IRdata[40,2],
carn_o=IRdata[41,2],
benthslar_o=IRdata[42,2],
benths_o=IRdata[43,2],
benthclar_o=IRdata[44,2],
benthc_o=IRdata[45,2],
fishp_o=IRdata[46,2],
fishplar_o=IRdata[47,2],
fishd_o=IRdata[48,2],
fishdlar_o=IRdata[49,2],
fishm_o=IRdata[50,2],
bird_o=IRdata[51,2],
detritus_si=IRdata[52,2],
ammonia_si=IRdata[53,2],
nitrate_si=IRdata[54,2],
phyt_si=IRdata[55,2],
benthslar_i=IRdata[56,2],
benthclar_i=IRdata[57,2],
benths_i=IRdata[58,2],
benthc_i=IRdata[59,2],
discard_i=IRdata[60,2],
omni_i=IRdata[61,2],
carn_i=IRdata[62,2],
fishplar_i=IRdata[63,2],
fishdlar_i=IRdata[64,2],
fishp_i=IRdata[65,2],
fishm_i=IRdata[66,2],
fishd_i=IRdata[67,2],
bird_i=IRdata[68,2],
seal_o=IRdata[69,2],
seal_i=IRdata[70,2],
ceta_o=IRdata[71,2],
ceta_i=IRdata[72,2],
corpse_s0=IRdata[73,2],
corpse_d0=IRdata[74,2],
kelpC= IRdata[75,2],
kelpN= IRdata[76,2],
kelpdebris= IRdata[77,2],
netpprod_o=0,
netpprod_i=0,
NNCP_o=0,
NNCP_i=0,
phytgrossprod_o=0,
phytgrossprod_i=0,
kelpCprod_i=0,
kelpCexud_i=0,
kelpNprod_i=0,
omnigrossprod_o=0,
omnigrossprod_i=0,
carngrossprod_o=0,
carngrossprod_i=0,
pfishlargrossprod_o=0,
pfishlargrossprod_i=0,
dfishlargrossprod_o=0,
dfishlargrossprod_i=0,
pfishgrossprod_o=0,
pfishgrossprod_i=0,
mfishgrossprod_o=0,
mfishgrossprod_i=0,
dfishgrossprod_o=0,
dfishgrossprod_i=0,
benthslargrossprod_o=0,
benthslargrossprod_i=0,
benthclargrossprod_o=0,
benthclargrossprod_i=0,
benthsgrossprod_o=0,
benthsgrossprod_i=0,
benthcgrossprod_o=0,
benthcgrossprod_i=0,
birdgrossprod_o=0,
birdgrossprod_i=0,
sealgrossprod_o=0,
sealgrossprod_i=0,
cetagrossprod_o=0,
cetagrossprod_i=0,
wcdenitrif_o=0,
wcdenitrif_i=0,
seddenitrif_o=0,
seddenitrif_i=0,
fluxsedamm_wcamm=0,
fluxwcdet_wcamm=0,
fluxomni_wcamm=0,
fluxcarn_wcamm=0,
fluxpfishlar_wcamm=0,
fluxdfishlar_wcamm=0,
fluxpfish_wcamm=0,
fluxmfish_wcamm=0,
fluxdfish_wcamm=0,
fluxbenthslar_wcamm=0,
fluxbenthclar_wcamm=0,
fluxbenths_wcamm=0,
fluxbenthc_wcamm=0,
fluxbird_wcamm=0,
fluxseal_wcamm=0,
fluxceta_wcamm=0,
fluxxdet_sedamm=0,
fluxxRdet_sedamm=0,
fluxwcamm_wcnit=0,
fluxsednit_wcnit=0,
fluxsedamm_sednit=0,
fluxxdet_wcdet=0,
fluxkelpdebris_wcdet=0,
fluxcorp_wcdet=0,
fluxphyt_wcdet=0,
fluxomni_wcdet=0,
fluxcarn_wcdet=0,
fluxpfishlar_wcdet=0,
fluxdfishlar_wcdet=0,
fluxpfish_wcdet=0,
fluxmfish_wcdet=0,
fluxdfish_wcdet=0,
fluxbenthslar_wcdet=0,
fluxbenthclar_wcdet=0,
fluxbenths_wcdet=0,
fluxbenthc_wcdet=0,
fluxbird_wcdet=0,
fluxseal_wcdet=0,
fluxceta_wcdet=0,
fluxwcdet_xdet=0,
fluxcorp_xdet=0,
fluxbenths_xdet=0,
fluxbenthc_xdet=0,
fluxxdet_xRdet=0,
fluxkelpdebris_xRdet=0,
fluxcorp_xRdet=0,
fluxkelp_kelpdebris=0,
fluxdisc_corp=0,
fluxpfish_corp=0,
fluxmfish_corp=0,
fluxdfish_corp=0,
fluxbenths_corp=0,
fluxbenthc_corp=0,
fluxbird_corp=0,
fluxseal_corp=0,
fluxceta_corp=0,
fluxwcamm_kelp=0,
fluxwcnit_kelp=0,
fluxwcamm_phyt_o=0,
fluxwcamm_phyt_i=0,
fluxwcnit_phyt_o=0,
fluxwcnit_phyt_i=0,
fluxwcdet_omni=0,
fluxphyt_omni=0,
fluxbenthslar_omni=0,
fluxbenthclar_omni=0,
fluxomni_carn=0,
fluxpfishlar_carn=0,
fluxdfishlar_carn=0,
fluxbenthslar_carn=0,
fluxbenthclar_carn=0,
fluxomni_pfishlar=0,
fluxbenthslar_pfishlar=0,
fluxbenthclar_pfishlar=0,
fluxomni_dfishlar=0,
fluxbenthslar_dfishlar=0,
fluxbenthclar_dfishlar=0,
fluxomni_pfish=0,
fluxcarn_pfish=0,
fluxpfishlar_pfish=0,
fluxdfishlar_pfish=0,
fluxbenthslar_pfish=0,
fluxbenthclar_pfish=0,
fluxomni_mfish=0,
fluxcarn_mfish=0,
fluxpfishlar_mfish=0,
fluxdfishlar_mfish=0,
fluxbenthslar_mfish=0,
fluxbenthclar_mfish=0,
fluxcorp_dfish=0,
fluxdisc_dfish=0,
fluxcarn_dfish=0,
fluxpfishlar_dfish=0,
fluxdfishlar_dfish=0,
fluxpfish_dfish=0,
fluxmfish_dfish=0,
fluxdfish_dfish=0,
fluxbenths_dfish=0,
fluxbenthc_dfish=0,
fluxwcdet_benthslar=0,
fluxphyt_benthslar=0,
fluxwcdet_benthclar=0,
fluxphyt_benthclar=0,
fluxwcdet_benths=0,
fluxxdet_benths=0,
fluxxRdet_benths=0,
fluxphyt_benths=0,
fluxkelp_benthc=0,
fluxkelpdebris_benthc=0,
fluxcorp_benthc=0,
fluxbenths_benthc=0,
fluxcorp_bird=0,
fluxdisc_bird=0,
fluxcarn_bird=0,
fluxpfish_bird=0,
fluxmfish_bird=0,
fluxdfish_bird=0,
fluxbenths_bird=0,
fluxbenthc_bird=0,
fluxcorp_seal=0,
fluxdisc_seal=0,
fluxcarn_seal=0,
fluxpfish_seal=0,
fluxmfish_seal=0,
fluxdfish_seal=0,
fluxbenths_seal=0,
fluxbenthc_seal=0,
fluxbird_seal=0,
fluxdisc_ceta=0,
fluxomni_ceta=0,
fluxcarn_ceta=0,
fluxpfish_ceta=0,
fluxmfish_ceta=0,
fluxdfish_ceta=0,
fluxbenths_ceta=0,
fluxbenthc_ceta=0,
fluxbird_ceta=0,
fluxseal_ceta=0,
Bs_spawn=0,
Bs_recruit=0,
Bc_spawn=0,
Bc_recruit=0,
Pfish_spawn=0,
Pfish_recruit=0,
Dfish_spawn=0,
Dfish_recruit=0,
fluxwcnit_Ngas=0,
fluxsednit_Ngas=0,
fluxkelpdebris_beachexport=0,
fluxAMMoutflow_o=0,
fluxNIToutflow_o=0,
fluxAMMoutflow_i=0,
fluxNIToutflow_i=0,
fluxPHYToutflow_o=0,
fluxDEToutflow_o=0,
fluxPHYToutflow_i=0,
fluxDEToutflow_i=0,
mfish_emigration=0,
fluxsedboundary_o=0,
fluxsedboundary_i=0,
fluxAMMinflow_o=0,
fluxNITinflow_o=0,
fluxAMMinflow_i=0,
fluxNITinflow_i=0,
fluxPHYTinflow_o=0,
fluxDETinflow_o=0,
fluxPHYTinflow_i=0,
fluxDETinflow_i=0,
mfish_imigration=0,
atmosAMMinput_o=0,
atmosNITinput_o=0,
atmosAMMinput_i=0,
atmosNITinput_i=0,
rivAMMinflow=0,
rivNITinflow=0,
rivPARTinflow=0,
DINflux_i_o=0,
DINflux_o_i=0,
PARTflux_i_o=0,
PARTflux_o_i=0,
activemigpelfish_i_o=0,
activemigmigfish_i_o=0,
activemigdemfish_i_o=0,
activemigbird_i_o=0,
activemigseal_i_o=0,
activemigceta_i_o=0,
activemigpelfish_o_i=0,
activemigmigfish_o_i=0,
activemigdemfish_o_i=0,
activemigbird_o_i=0,
activemigseal_o_i=0,
activemigceta_o_i=0,
vertnitflux=0,
horiznitflux=0,
landp_o=0,
landd_quota_o=0,
landd_nonquota_o=0,
landm_o=0,
landsb_o=0,
landcb_o=0,
landcz_o=0,
landbd_o=0,
landsl_o=0,
landct_o=0,
discpel_o=0,
discdem_quota_o=0,
discdem_nonquota_o=0,
discmig_o=0,
discsb_o=0,
disccb_o=0,
disccz_o=0,
discbd_o=0,
discsl_o=0,
discct_o=0,
landp_i=0,
landd_quota_i=0,
landd_nonquota_i=0,
landm_i=0,
landsb_i=0,
landcb_i=0,
landcz_i=0,
landbd_i=0,
landsl_i=0,
landct_i=0,
landkp_i=0,
discpel_i=0,
discdem_quota_i=0,
discdem_nonquota_i=0,
discmig_i=0,
discsb_i=0,
disccb_i=0,
disccz_i=0,
discbd_i=0,
discsl_i=0,
discct_i=0,
disckp_i=0,
offalpel_o=0,
offaldem_quota_o=0,
offaldem_nonquota_o=0,
offalmig_o=0,
offalsb_o=0,
offalcb_o=0,
offalcz_o=0,
offalbd_o=0,
offalsl_o=0,
offalct_o=0,
offalpel_i=0,
offaldem_quota_i=0,
offaldem_nonquota_i=0,
offalmig_i=0,
offalsb_i=0,
offalcb_i=0,
offalcz_i=0,
offalbd_i=0,
offalsl_i=0,
offalct_i=0,
offalkp_i=0,
omninetprod_o=0,
omninetprod_i=0,
carnnetprod_o=0,
carnnetprod_i=0,
pfishlarnetprod_o=0,
pfishlarnetprod_i=0,
dfishlarnetprod_o=0,
dfishlarnetprod_i=0,
pfishnetprod_o=0,
pfishnetprod_i=0,
mfishnetprod_o=0,
mfishnetprod_i=0,
dfishnetprod_o=0,
dfishnetprod_i=0,
benthslarnetprod_o=0,
benthslarnetprod_i=0,
benthclarnetprod_o=0,
benthclarnetprod_i=0,
benthsnetprod_o=0,
benthsnetprod_i=0,
benthcnetprod_o=0,
benthcnetprod_i=0,
birdnetprod_o=0,
birdnetprod_i=0,
sealnetprod_o=0,
sealnetprod_i=0,
cetanetprod_o=0,
cetanetprod_i=0
)
} |
[
{
"title": "Spline wiggles (I)",
"href": "http://dankelley.github.io//r/2014/02/03/splines.html"
},
{
"title": "P-values: the Continuing Saga",
"href": "https://matloff.wordpress.com/2016/03/10/p-values-the-continuing-saga/"
},
{
"title": "Packages for By-Group Processing in R",
"href": "http://blog.revolutionanalytics.com/2011/02/packages-for-by-group-processing-in-r.html"
},
{
"title": "ISMB coverage on Twitter? It’s possible there was…",
"href": "https://nsaunders.wordpress.com/2011/08/01/ismb-coverage-on-twitter-its-possible-there-was/"
},
{
"title": "Nairobi Data Science Meetup: Paradigm Shift in Research with Samuel Kamande",
"href": "http://datascience-africa.org/2016/02/nairobi-data-science-meetup-paradigm-shift-in-research-with-samuel-kamande/"
},
{
"title": "Climate datasets in R",
"href": "https://web.archive.org/web/http://inundata.org/2011/08/10/climate-datasets-in-r/"
},
{
"title": "Pipeline to Plot Annual % Change",
"href": "http://timelyportfolio.blogspot.com/2014/11/pipeline-to-plot-annual-change.html"
},
{
"title": "Interactive color picker, using locator()",
"href": "http://is-r.tumblr.com/post/35332733463/interactive-color-picker-using-locator"
},
{
"title": "New R User Groups in Sydney, South Asia",
"href": "http://blog.revolutionanalytics.com/2010/06/new-r-user-groups-in-sydney-south-asia.html"
},
{
"title": "Digitizing jpeg graphs in R",
"href": "http://rscriptsandtips.blogspot.com/2014/02/digitizing-jpeg-graphs-in-r-i-have-been.html"
},
{
"title": "text2vec 0.3",
"href": "http://dsnotes.com//articles/text2vec-0-3"
},
{
"title": "Free Workshop: Mapping Open Data in R",
"href": "http://www.arilamstein.com/blog/2016/04/25/free-workshop-may-17-mapping-open-data-r/"
},
{
"title": "cricketr digs the Ashes!",
"href": "https://gigadom.wordpress.com/2015/07/20/cricketr-digs-the-ashes/"
},
{
"title": "Beaten by sheer pace – Cricket analytics with yorkr",
"href": "https://gigadom.wordpress.com/2016/05/08/beaten-by-sheer-pace-cricket-analytics-with-yorkr/"
},
{
"title": "Cool articles in the New York Time’s: Statistics + R",
"href": "http://blog.nguyenvq.com/blog/2009/08/14/cool-articles-in-the-new-york-times-statistics-r/"
},
{
"title": "Updates to the ‘aqp’ Package for R (version 0.98-3)",
"href": "https://casoilresource.lawr.ucdavis.edu/"
},
{
"title": "Hypothesis testing on normally distributed data in R",
"href": "http://firsttimeprogrammer.blogspot.com/2015/07/hypothesis-testing-on-normally.html"
},
{
"title": "Interactive plotting with rbokeh",
"href": "http://datascienceplus.com/interactive-plotting-with-rbokeh/"
},
{
"title": "Hadley Wickham: ggplot2 Webinar (Today!)",
"href": "http://www.gettinggeneticsdone.com/2012/02/hadley-wickham-ggplot2-webinar-today.html"
},
{
"title": "New ‘R Talk’ podcast with news from R Consortium",
"href": "http://blog.revolutionanalytics.com/2015/08/r-talk.html"
},
{
"title": "Building JSON in R: Three Methods",
"href": "http://randyzwitch.com/r-json-jsonlite-sprintf-paste/"
},
{
"title": "Random and fixed effects in sensory profiling",
"href": "http://wiekvoet.blogspot.com/2012/08/random-and-fixed-effects-in-sensory.html"
},
{
"title": "Bio7 1.7 for Windows Released!",
"href": "http://bio7.org/?p=2049"
},
{
"title": "More BLAS, BLASter, BLAStest: Updates on gcbd",
"href": "http://dirk.eddelbuettel.com/blog/2010/10/03/"
},
{
"title": "Adding Sweave.sty and Rd.sty to your LaTeX path in Mac OS X",
"href": "https://joey711.wordpress.com/2011/07/28/adding-sweave-sty-and-rd-sty-to-your-latex-path-in-mac-os-x/"
},
{
"title": "London UseR Group Talk – Slides",
"href": "https://web.archive.org/web/http://www.theresearchkitchen.com/blog/archives/434"
},
{
"title": "Mapped: Twitter Languages in New York",
"href": "http://spatial.ly/2013/02/mapped-twitter-languages-york/"
},
{
"title": "Codes for common Data Frame operations in R",
"href": "https://ujjwalkarn.me/2016/05/29/codes-for-common-dataframe-operations-in-r/"
},
{
"title": "Sensitivity analysis for neural networks",
"href": "https://beckmw.wordpress.com/2013/10/07/sensitivity-analysis-for-neural-networks/"
},
{
"title": "Happy Birthday rasterVis!",
"href": "https://procomun.wordpress.com/2013/06/07/happy-birthday-rastervis/"
},
{
"title": "In case you missed it: December 2013 Roundup",
"href": "http://blog.revolutionanalytics.com/2014/01/in-case-you-missed-it-december-2013-roundup.html"
},
{
"title": "Creating a text grob that automatically adjusts to viewport size",
"href": "https://ryouready.wordpress.com/2012/08/01/creating-a-text-grob-that-automatically-adjusts-to-viewport-size/"
},
{
"title": "Lattice when modeling, ggplot when publishing",
"href": "http://www.quantumforest.com/2011/10/lattice-when-modeling-ggplot-when-publishing/"
},
{
"title": "STL random_sample",
"href": "http://gallery.rcpp.org/articles/stl-random-sample/"
},
{
"title": "slumping model",
"href": "http://dankelley.github.io//r/2014/06/08/slumping-model.html"
},
{
"title": "Yep. He made it; country voted No",
"href": "https://feedproxy.google.com/~r/danielmarcelino/~3/jzTO5ME8BZE/"
},
{
"title": "Short tales of two NCAA basketball conferences (Big 12 and West Coast) using graphs",
"href": "http://www.analyticsandvisualization.com/2013/08/short-tales-of-two-ncaa-basketball.html"
},
{
"title": "Five great charts in 5 lines of R code each",
"href": "http://blog.revolutionanalytics.com/2016/08/five-great-charts-in-5-lines-of-r-code-each.html"
},
{
"title": "ggplot2: A little twist on back-to-back bar charts",
"href": "http://anythingbutrbitrary.blogspot.com/2012/07/ggplot2-little-twist-on-back-to-back.html"
},
{
"title": "A quetion",
"href": "http://sgsong.blogspot.com/2010/06/quetion.html"
},
{
"title": "Object Orientation in R – Notes from a novice",
"href": "https://web.archive.org/web/http://pirategrunt.com/2013/01/25/object-orientation-in-r-notes-from-a-novice/"
},
{
"title": "Google Statistician uses R and other programming tools",
"href": "http://industrialengineertools.blogspot.com/2013/02/google-statistician-uses-r-and-other.html"
},
{
"title": "Betterment uses R for modeling and reporting",
"href": "http://blog.revolutionanalytics.com/2015/11/betterment-uses-r.html"
},
{
"title": "R’s Testing Predicament",
"href": "http://www.mango-solutions.com/wp/2016/10/rs-testing-predicament/"
},
{
"title": "What can other languages learn from R?",
"href": "http://blog.revolutionanalytics.com/2010/09/what-can-other-languages-learn-from-r.html"
},
{
"title": "Wikipedia Attention and the US elections",
"href": "http://beautifuldata.net/2012/11/wikipedia-attention-and-the-us-elections/"
},
{
"title": "Email Netiquette",
"href": "http://applyr.blogspot.com/2011/10/email-netiquette.html"
},
{
"title": "Sinterklaas and Santa Claus gave presents",
"href": "http://wiekvoet.blogspot.com/2013/12/sinterklaas-and-santa-claus-gave.html"
},
{
"title": "Peer-reviewed R packages?",
"href": "https://rmazing.wordpress.com/2012/11/23/peer-reviews/"
},
{
"title": "Using rvest and dplyr to look at aviation incidents",
"href": "https://datashenanigan.wordpress.com/2015/04/30/using-rvest-and-dplyr-to-look-at-aviation-incidents/"
}
] |
h.mcv <- function(x, ...) UseMethod("h.mcv")
h.mcv.default <- function(x,deriv.order=0,lower=0.1*hos,upper=2*hos,tol=0.1 * lower,
kernel=c("gaussian","epanechnikov","triweight","tricube",
"biweight","cosine"),...)
{
if (!is.numeric(x) || length(dim(x)) >=1 || length(x) < 3L)
stop("argument 'x' must be numeric and need at least 3 data points")
if (any(deriv.order < 0 || deriv.order != round(deriv.order)))
stop("argument 'deriv.order' is non-negative integers")
if (missing(kernel)) kernel <- "gaussian"
r <- deriv.order
name <- deparse(substitute(x))
x <- x[!is.na(x)]
x <- sort(x)
n <- length(x)
if (kernel=="epanechnikov" && (2*r) + 2 >= 3) return(structure(list(x=x, data.name=name,n=n, kernel=kernel, deriv.order=r, h = NA ,min.mcv=NA),class="h.mcv"))
else if (kernel=="triweight" && (2*r) + 2 >= 7) return(structure(list(x=x, data.name=name,n=n, kernel=kernel, deriv.order=r, h = NA ,min.mcv=NA),class="h.mcv"))
else if (kernel=="biweight" && (2*r) + 2 >= 5) return(structure(list(x=x, data.name=name,n=n, kernel=kernel, deriv.order=r, h = NA ,min.mcv=NA),class="h.mcv"))
else if (kernel=="tricube" && (2*r) + 2 >= 10) return(structure(list(x=x, data.name=name,n=n, kernel=kernel, deriv.order=r, h = NA ,min.mcv=NA),class="h.mcv"))
hos <- ((243 *(2*r+1)*A3_kMr(kernel,r))/(35* A2_kM(kernel)^2))^(1/(2*r+5)) * sd(x,na.rm = TRUE) * n^(-1/(2*r+5))
if (!is.numeric(upper)){
stop("argument 'upper' must be numeric. Default 2*hos (Oversmoothing) boundary was used")
upper= 2*hos
}
if (!is.numeric(lower)){
stop("argument 'lower' must be numeric. Default 0.1*hos boundary was used")
lower=0.1*hos
}
if (lower < 0 | lower >= upper){
stop("the boundaries must be positive and 'lower' must be smaller than 'upper'. Default boundaries were used")
upper=2*hos
lower=0.1*hos
}
R_Kr1 <- A3_kMr(kernel,r)
fmcv <- function(h)
{
L1 <- kernel_fun_conv(kernel,outer(x,x,"-")/h,deriv.order=r)
diag(L1) <- 0
L2 <- ((-1)^(r)/((n-1)*h^(2*r+1)))* colSums(L1)
Q1 <- mean(L2)
D1 <- kernel_fun_der(kernel, outer(x,x,"-")/h,deriv.order=2*r)
diag(D1) <- 0
D2 <- ((-1)^r / ((n-1)*h^(2*r+1)))* colSums(D1)
Q2 <- mean(D2)
D3 <- kernel_fun_der(kernel, outer(x,x,"-")/h,deriv.order=2*r+2)
diag(D3) <- 0
D4 <- ((-1)^r / ((n-1)*h^(2*r+1)))* colSums(D3)
Q3 <- mean(D4)
(1/(n*h^(2*r+1)))* R_Kr1 + Q1 - Q2 - 0.5 * A2_kM(kernel) * Q3
}
obj <- optimize(fmcv , c(lower, upper),tol=tol)
structure(list(x=x, data.name=name,n=n, kernel=kernel, deriv.order=r,h = obj$minimum ,
min.mcv=obj$objective),class="h.mcv")
}
print.h.mcv <- function(x, digits=NULL, ...)
{
class(x) <- "h.mcv"
cat("\nCall:\t","\tModified Cross-Validation","\n",
"\nDerivative order = ",x$deriv.order,
"\nData: ",x$data.name," (",x$n," obs.);","\tKernel: ",x$kernel,
"\nMin MCV = ",format(x$min.mcv,digits=digits),";","\tBandwidth 'h' = ",format(x$h,digits=digits), "\n\n",sep="")
invisible(x)
}
plot.mcv <- function(f,seq.bws=NULL,main=NULL,sub = NULL, xlab=NULL, ylab=NULL,
type="l",las=1,lwd=1,...)
{
class(f) <- "h.mcv"
r <- f$deriv.order
n <- f$n
kernel <- f$kernel
x <- sort(f$x)
if (kernel=="epanechnikov" && (2*r) + 2 >= 3) stop(" 'epanechnikov kernel derivative = 0' for '(2 * order) + 2 >= 3' ")
else if (kernel=="triweight" && (2*r) + 2 >= 7) stop(" 'triweight kernel derivative = 0' for '(2 * order) + 2 >= 7' ")
else if (kernel=="biweight" && (2*r) + 2 >= 5) stop(" 'biweight kernel derivative = 0' for '(2 * order) + 2 >= 5' ")
else if (kernel=="tricube" && (2*r) + 2 >= 10) stop(" 'tricube kernel derivative = 0' for '(2 * order) + 2 >= 10' ")
if(is.null(xlab)) xlab <- "Bandwidths"
if(is.null(ylab)) ylab <- bquote(MCV~(h[(.(r))]))
if(is.null(main)){
if(r !=0) {main <- "Modified Cross-Validation function for \nBandwidth Choice for Density Derivative"}else{
main <- "Modified Cross-Validation function for \nBandwidth Choice for Density Function"}
}
if(is.null(sub)) sub <- paste("Kernel",kernel,";","Derivative order = ",r)
if(is.null(seq.bws)){
hos <- ((243 *(2*r+1)*A3_kMr(kernel,r))/(35* A2_kM(kernel)^2))^(1/(2*r+5)) * sd(x,na.rm = TRUE) * n^(-1/(2*r+5))
seq.bws <- seq(0.15*hos,2*hos,length=50)
}
R_Kr1 <- A3_kMr(kernel,r)
fmcv <- function(h)
{
L1 <- kernel_fun_conv(kernel,outer(x,x,"-")/h,deriv.order=r)
diag(L1) <- 0
L2 <- ((-1)^(r)/((n-1)*h^(2*r+1)))* colSums(L1)
Q1 <- mean(L2)
D1 <- kernel_fun_der(kernel, outer(x,x,"-")/h,deriv.order=2*r)
diag(D1) <- 0
D2 <- ((-1)^r / ((n-1)*h^(2*r+1)))* colSums(D1)
Q2 <- mean(D2)
D3 <- kernel_fun_der(kernel, outer(x,x,"-")/h,deriv.order=2*r+2)
diag(D3) <- 0
D4 <- ((-1)^r / ((n-1)*h^(2*r+1)))* colSums(D3)
Q3 <- mean(D4)
(1/(n*h^(2*r+1)))* R_Kr1 + Q1 - Q2 - 0.5 * A2_kM(kernel) * Q3
}
D <- lapply(1:length(seq.bws), function(i) fmcv(seq.bws[i]))
Minf <- c(do.call("rbind",D))
plot.default(seq.bws,Minf,type=type,las=las,lwd=lwd,xlab=xlab,ylab=ylab,
main=main,sub=sub,font.main=2,cex.main=0.9,font.sub=2,cex.sub=0.7,...)
return(list(kernel=kernel,deriv.order=r,seq.bws=seq.bws, mcv=Minf))
}
plot.h.mcv <- function(x,seq.bws=NULL,...) plot.mcv(x,seq.bws,...)
lines.mcv <- function(f,seq.bws=NULL,...)
{
class(f) <- "h.mcv"
r <- f$deriv.order
n <- f$n
kernel <- f$kernel
x <- sort(f$x)
if (kernel=="epanechnikov" && (2*r) + 2 >= 3) stop(" 'epanechnikov kernel derivative = 0' for '(2 * order) + 2 >= 3' ")
else if (kernel=="triweight" && (2*r) + 2 >= 7) stop(" 'triweight kernel derivative = 0' for '(2 * order) + 2 >= 7' ")
else if (kernel=="biweight" && (2*r) + 2 >= 5) stop(" 'biweight kernel derivative = 0' for '(2 * order) + 2 >= 5' ")
else if (kernel=="tricube" && (2*r) + 2 >= 10) stop(" 'tricube kernel derivative = 0' for '(2 * order) + 2 >= 10' ")
if(is.null(seq.bws)){
hos <- ((243 *(2*r+1)*A3_kMr(kernel,r))/(35* A2_kM(kernel)^2))^(1/(2*r+5)) * sd(x,na.rm = TRUE) * n^(-1/(2*r+5))
seq.bws <- seq(0.15*hos,2*hos,length=50)
}
R_Kr1 <- A3_kMr(kernel,r)
fmcv <- function(h)
{
L1 <- kernel_fun_conv(kernel,outer(x,x,"-")/h,deriv.order=r)
diag(L1) <- 0
L2 <- ((-1)^(r)/((n-1)*h^(2*r+1)))* colSums(L1)
Q1 <- mean(L2)
D1 <- kernel_fun_der(kernel, outer(x,x,"-")/h,deriv.order=2*r)
diag(D1) <- 0
D2 <- ((-1)^r / ((n-1)*h^(2*r+1)))* colSums(D1)
Q2 <- mean(D2)
D3 <- kernel_fun_der(kernel, outer(x,x,"-")/h,deriv.order=2*r+2)
diag(D3) <- 0
D4 <- ((-1)^r / ((n-1)*h^(2*r+1)))* colSums(D3)
Q3 <- mean(D4)
(1/(n*h^(2*r+1)))* R_Kr1 + Q1 - Q2 - 0.5 * A2_kM(kernel) * Q3
}
D <- lapply(1:length(seq.bws), function(i) fmcv(seq.bws[i]))
Minf <- c(do.call("rbind",D))
lines.default(seq.bws,Minf,...)
invisible(NULL)
}
lines.h.mcv <- function(x,seq.bws=NULL,...) lines.mcv(x,seq.bws,...) |
NULL
.codegurureviewer$associate_repository_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Repository = structure(list(CodeCommit = structure(list(Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), Bitbucket = structure(list(Name = structure(logical(0), tags = list(type = "string")), ConnectionArn = structure(logical(0), tags = list(type = "string")), Owner = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), GitHubEnterpriseServer = structure(list(Name = structure(logical(0), tags = list(type = "string")), ConnectionArn = structure(logical(0), tags = list(type = "string")), Owner = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), ClientRequestToken = structure(logical(0), tags = list(idempotencyToken = TRUE, type = "string")), Tags = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codegurureviewer$associate_repository_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(RepositoryAssociation = structure(list(AssociationId = structure(logical(0), tags = list(type = "string")), AssociationArn = structure(logical(0), tags = list(type = "string")), ConnectionArn = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string")), Owner = structure(logical(0), tags = list(type = "string")), ProviderType = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), StateReason = structure(logical(0), tags = list(type = "string")), LastUpdatedTimeStamp = structure(logical(0), tags = list(type = "timestamp")), CreatedTimeStamp = structure(logical(0), tags = list(type = "timestamp"))), tags = list(type = "structure")), Tags = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codegurureviewer$create_code_review_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string")), RepositoryAssociationArn = structure(logical(0), tags = list(type = "string")), Type = structure(list(RepositoryAnalysis = structure(list(RepositoryHead = structure(list(BranchName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure")), ClientRequestToken = structure(logical(0), tags = list(idempotencyToken = TRUE, type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codegurureviewer$create_code_review_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CodeReview = structure(list(Name = structure(logical(0), tags = list(type = "string")), CodeReviewArn = structure(logical(0), tags = list(type = "string")), RepositoryName = structure(logical(0), tags = list(type = "string")), Owner = structure(logical(0), tags = list(type = "string")), ProviderType = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), StateReason = structure(logical(0), tags = list(type = "string")), CreatedTimeStamp = structure(logical(0), tags = list(type = "timestamp")), LastUpdatedTimeStamp = structure(logical(0), tags = list(type = "timestamp")), Type = structure(logical(0), tags = list(type = "string")), PullRequestId = structure(logical(0), tags = list(type = "string")), SourceCodeType = structure(list(CommitDiff = structure(list(SourceCommit = structure(logical(0), tags = list(type = "string")), DestinationCommit = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), RepositoryHead = structure(list(BranchName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), AssociationArn = structure(logical(0), tags = list(type = "string")), Metrics = structure(list(MeteredLinesOfCodeCount = structure(logical(0), tags = list(type = "long")), FindingsCount = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codegurureviewer$describe_code_review_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CodeReviewArn = structure(logical(0), tags = list(location = "uri", locationName = "CodeReviewArn", type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codegurureviewer$describe_code_review_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CodeReview = structure(list(Name = structure(logical(0), tags = list(type = "string")), CodeReviewArn = structure(logical(0), tags = list(type = "string")), RepositoryName = structure(logical(0), tags = list(type = "string")), Owner = structure(logical(0), tags = list(type = "string")), ProviderType = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), StateReason = structure(logical(0), tags = list(type = "string")), CreatedTimeStamp = structure(logical(0), tags = list(type = "timestamp")), LastUpdatedTimeStamp = structure(logical(0), tags = list(type = "timestamp")), Type = structure(logical(0), tags = list(type = "string")), PullRequestId = structure(logical(0), tags = list(type = "string")), SourceCodeType = structure(list(CommitDiff = structure(list(SourceCommit = structure(logical(0), tags = list(type = "string")), DestinationCommit = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), RepositoryHead = structure(list(BranchName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), AssociationArn = structure(logical(0), tags = list(type = "string")), Metrics = structure(list(MeteredLinesOfCodeCount = structure(logical(0), tags = list(type = "long")), FindingsCount = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codegurureviewer$describe_recommendation_feedback_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CodeReviewArn = structure(logical(0), tags = list(location = "uri", locationName = "CodeReviewArn", type = "string")), RecommendationId = structure(logical(0), tags = list(location = "querystring", locationName = "RecommendationId", type = "string")), UserId = structure(logical(0), tags = list(location = "querystring", locationName = "UserId", type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codegurureviewer$describe_recommendation_feedback_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(RecommendationFeedback = structure(list(CodeReviewArn = structure(logical(0), tags = list(type = "string")), RecommendationId = structure(logical(0), tags = list(type = "string")), Reactions = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), UserId = structure(logical(0), tags = list(type = "string")), CreatedTimeStamp = structure(logical(0), tags = list(type = "timestamp")), LastUpdatedTimeStamp = structure(logical(0), tags = list(type = "timestamp"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codegurureviewer$describe_repository_association_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(AssociationArn = structure(logical(0), tags = list(location = "uri", locationName = "AssociationArn", type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codegurureviewer$describe_repository_association_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(RepositoryAssociation = structure(list(AssociationId = structure(logical(0), tags = list(type = "string")), AssociationArn = structure(logical(0), tags = list(type = "string")), ConnectionArn = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string")), Owner = structure(logical(0), tags = list(type = "string")), ProviderType = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), StateReason = structure(logical(0), tags = list(type = "string")), LastUpdatedTimeStamp = structure(logical(0), tags = list(type = "timestamp")), CreatedTimeStamp = structure(logical(0), tags = list(type = "timestamp"))), tags = list(type = "structure")), Tags = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codegurureviewer$disassociate_repository_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(AssociationArn = structure(logical(0), tags = list(location = "uri", locationName = "AssociationArn", type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codegurureviewer$disassociate_repository_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(RepositoryAssociation = structure(list(AssociationId = structure(logical(0), tags = list(type = "string")), AssociationArn = structure(logical(0), tags = list(type = "string")), ConnectionArn = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string")), Owner = structure(logical(0), tags = list(type = "string")), ProviderType = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), StateReason = structure(logical(0), tags = list(type = "string")), LastUpdatedTimeStamp = structure(logical(0), tags = list(type = "timestamp")), CreatedTimeStamp = structure(logical(0), tags = list(type = "timestamp"))), tags = list(type = "structure")), Tags = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codegurureviewer$list_code_reviews_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ProviderTypes = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(location = "querystring", locationName = "ProviderTypes", type = "list")), States = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(location = "querystring", locationName = "States", type = "list")), RepositoryNames = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(location = "querystring", locationName = "RepositoryNames", type = "list")), Type = structure(logical(0), tags = list(location = "querystring", locationName = "Type", type = "string")), MaxResults = structure(logical(0), tags = list(location = "querystring", locationName = "MaxResults", type = "integer")), NextToken = structure(logical(0), tags = list(location = "querystring", locationName = "NextToken", type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codegurureviewer$list_code_reviews_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CodeReviewSummaries = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), CodeReviewArn = structure(logical(0), tags = list(type = "string")), RepositoryName = structure(logical(0), tags = list(type = "string")), Owner = structure(logical(0), tags = list(type = "string")), ProviderType = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), CreatedTimeStamp = structure(logical(0), tags = list(type = "timestamp")), LastUpdatedTimeStamp = structure(logical(0), tags = list(type = "timestamp")), Type = structure(logical(0), tags = list(type = "string")), PullRequestId = structure(logical(0), tags = list(type = "string")), MetricsSummary = structure(list(MeteredLinesOfCodeCount = structure(logical(0), tags = list(type = "long")), FindingsCount = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codegurureviewer$list_recommendation_feedback_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextToken = structure(logical(0), tags = list(location = "querystring", locationName = "NextToken", type = "string")), MaxResults = structure(logical(0), tags = list(location = "querystring", locationName = "MaxResults", type = "integer")), CodeReviewArn = structure(logical(0), tags = list(location = "uri", locationName = "CodeReviewArn", type = "string")), UserIds = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(location = "querystring", locationName = "UserIds", type = "list")), RecommendationIds = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(location = "querystring", locationName = "RecommendationIds", type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codegurureviewer$list_recommendation_feedback_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(RecommendationFeedbackSummaries = structure(list(structure(list(RecommendationId = structure(logical(0), tags = list(type = "string")), Reactions = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), UserId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codegurureviewer$list_recommendations_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextToken = structure(logical(0), tags = list(location = "querystring", locationName = "NextToken", type = "string")), MaxResults = structure(logical(0), tags = list(location = "querystring", locationName = "MaxResults", type = "integer")), CodeReviewArn = structure(logical(0), tags = list(location = "uri", locationName = "CodeReviewArn", type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codegurureviewer$list_recommendations_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(RecommendationSummaries = structure(list(structure(list(FilePath = structure(logical(0), tags = list(type = "string")), RecommendationId = structure(logical(0), tags = list(type = "string")), StartLine = structure(logical(0), tags = list(type = "integer")), EndLine = structure(logical(0), tags = list(type = "integer")), Description = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codegurureviewer$list_repository_associations_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ProviderTypes = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(location = "querystring", locationName = "ProviderType", type = "list")), States = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(location = "querystring", locationName = "State", type = "list")), Names = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(location = "querystring", locationName = "Name", type = "list")), Owners = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(location = "querystring", locationName = "Owner", type = "list")), MaxResults = structure(logical(0), tags = list(location = "querystring", locationName = "MaxResults", type = "integer")), NextToken = structure(logical(0), tags = list(location = "querystring", locationName = "NextToken", type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codegurureviewer$list_repository_associations_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(RepositoryAssociationSummaries = structure(list(structure(list(AssociationArn = structure(logical(0), tags = list(type = "string")), ConnectionArn = structure(logical(0), tags = list(type = "string")), LastUpdatedTimeStamp = structure(logical(0), tags = list(type = "timestamp")), AssociationId = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string")), Owner = structure(logical(0), tags = list(type = "string")), ProviderType = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codegurureviewer$list_tags_for_resource_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(resourceArn = structure(logical(0), tags = list(location = "uri", locationName = "resourceArn", type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codegurureviewer$list_tags_for_resource_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Tags = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codegurureviewer$put_recommendation_feedback_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CodeReviewArn = structure(logical(0), tags = list(type = "string")), RecommendationId = structure(logical(0), tags = list(type = "string")), Reactions = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codegurureviewer$put_recommendation_feedback_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.codegurureviewer$tag_resource_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(resourceArn = structure(logical(0), tags = list(location = "uri", locationName = "resourceArn", type = "string")), Tags = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codegurureviewer$tag_resource_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.codegurureviewer$untag_resource_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(resourceArn = structure(logical(0), tags = list(location = "uri", locationName = "resourceArn", type = "string")), TagKeys = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(location = "querystring", locationName = "tagKeys", type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.codegurureviewer$untag_resource_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
} |
bandwidth_optimal<-function( ptrue, r, m, x, H, link = c( "logit" ), guessing = 0,
lapsing = 0, K = 2, p = 1, ker = c( "dnorm" ),
maxiter = 50, tol = 1e-6, method = c( "all" ) ) {
ISE <- function( f1, f2 ) {
return( sum( ( f1 - f2 )^2 ) );
}
get_ise_p <- function( h ) {
fest <- locglmfit( x, r, m, x, h, FALSE, link, guessing, lapsing,
K, p, ker, maxiter, tol );
return( ISE( ptrue, fest$pfit ) );
}
get_ise <- function( h ) {
ftmp <- locglmfit( x, r, m, x, h, FALSE, link, guessing, lapsing,
K, p, ker, maxiter, tol );
fit_eta <- linkfun( ptrue );
return( ISE( fit_eta, ftmp$eta ) );
}
get_dev <- function( h ) {
ftmp <- locglmfit( x, r, m, x, h, FALSE, link, guessing, lapsing,
K, p, ker, maxiter, tol );
D = return( deviance2( ptrue * m, m, ftmp$pfit ) );
}
if( missing("ptrue") || missing("r") || missing("m") || missing("x") ||
missing("H") ) {
stop("Check input. First 5 arguments are mandatory");
}
if ( length(ptrue) != length(x)){
stop("ptrue must have the same length as the stimulus levels x")
}
checkdata<-list();
checkdata[[1]] <- x;
checkdata[[2]] <- r;
checkdata[[3]] <- m;
checkinput( "psychometricdata", checkdata );
rm( checkdata )
checkinput( 'minmaxbandwidth', H );
checkinput( 'linkfunction', link );
if( length( guessing ) > 1 ) {
stop( 'guessing rate must be scalar' );
}
if( length( lapsing ) > 1 ) {
stop( 'lapsing rate must be scalar' );
}
checkinput( 'guessingandlapsing', c( guessing, lapsing ) );
if (link == "weibull" || link == "revweibull"){
checkinput( "exponentk", K );
}
pn <- list()
pn[[1]] <- p
pn[[2]] <- x
checkinput( "degreepolynomial", pn );
checkinput( 'kernel', ker );
checkinput( 'maxiter', maxiter );
checkinput( 'tolerance', tol );
checkinput( 'method', method );
if( link == "logit" ||
link == "probit" ||
link == "loglog" ||
link == "comploglog" ||
link == "weibull" ||
link == "revweibull" ) {
LINK <- paste( link, "_link_private", sep = "" );
}else{
LINK <- link
}
if( LINK != "weibull_link_private" && LINK != "revweibull_link_private" ) {
linkuser <- eval( call( LINK, guessing, lapsing ) );
}
else{
linkuser <- eval( call( LINK, K, guessing, lapsing ) );
}
linkfun <- linkuser$linkfun;
options(warn=-1)
h <- NULL;
if( method == "ISE" ) {
h <- optimize( get_ise_p, lower = H[1], upper = H[2] )$minimum;
}
else {
if ( method == "ISEeta") {
h <- optimize( get_ise, lower = H[1], upper = H[2] )$minimum;
}
else {
if( method == "likelihood" ) {
h <- optimize( get_dev, lower = H[1], upper = H[2] )$minimum;
}
else {
h$pscale <- optimize( get_ise_p, lower = H[1], upper = H[2]
)$minimum;
h$etascale <- optimize( get_ise, lower = H[1], upper = H[2]
)$minimum;
h$deviance <- optimize( get_dev, lower = H[1], upper = H[2]
)$minimum;
}
}
}
options(warn=-1)
return( h );
} |
redcap_project <- setRefClass(
Class = "redcap_project",
fields = list(
redcap_uri = "character",
token = "character"
),
methods = list(
read = function(
batch_size = 100L,
interbatch_delay = 0,
records = NULL,
records_collapsed = "",
fields = NULL,
fields_collapsed = "",
forms = NULL,
forms_collapsed = "",
events = NULL,
events_collapsed = "",
raw_or_label = "raw",
raw_or_label_headers = "raw",
export_checkbox_label = FALSE,
export_survey_fields = FALSE,
export_data_access_groups = FALSE,
filter_logic = "",
guess_type = TRUE,
guess_max = 1000L,
verbose = TRUE,
config_options = NULL
) {
"Exports records from a REDCap project."
return(REDCapR::redcap_read(
batch_size = batch_size,
interbatch_delay = interbatch_delay,
redcap_uri = redcap_uri,
token = token,
records = records,
records_collapsed = records_collapsed,
fields = fields,
fields_collapsed = fields_collapsed,
forms = forms,
forms_collapsed = forms_collapsed,
events = events,
events_collapsed = events_collapsed,
raw_or_label = raw_or_label,
raw_or_label_headers = raw_or_label_headers,
export_checkbox_label = export_checkbox_label,
export_survey_fields = export_survey_fields,
export_data_access_groups = export_data_access_groups,
filter_logic = filter_logic,
guess_type = guess_type,
verbose = verbose,
config_options = config_options
))
},
write = function(
ds_to_write,
batch_size = 100L,
interbatch_delay = 0,
continue_on_error = FALSE,
verbose = TRUE,
config_options = NULL
) {
"Imports records to a REDCap project."
return(REDCapR::redcap_write(
ds_to_write = ds_to_write,
batch_size = batch_size,
interbatch_delay = interbatch_delay,
continue_on_error = continue_on_error,
redcap_uri = redcap_uri,
token = token,
config_options = config_options
))
}
)
) |
context("CUSUMfixed")
set.seed(410)
T <- 100
d <- 0
x <- fracdiff::fracdiff.sim(n=T, d=d)$series
expect_error(CUSUMfixed(x, d=d, procedure="something_else", bandw=0.1))
expect_error(CUSUMfixed(x, d=d, procedure="CUSUMfixedb_typeA", bandw=10))
expect_error(CUSUMfixed(x, d=d, procedure="CUSUMfixedm_typeA", bandw=0.1))
expect_error(CUSUMfixed(c(x,NA), d=d, procedure="CUSUMfixedb_typeA", bandw=0.1))
expect_error(CUSUMfixed(x, d=d, procedure="CUSUMfixedb_typeA", bandw=0.1,tau=2))
expect_warning(CUSUMfixed(x, d=d, procedure="CUSUMfixedb_typeA", bandw=0.1,tau=0.1))
x=stats::ts(x)
expect_error(CUSUMfixed(x, d=d, procedure="CUSUMfixedb_typeA", bandw=0.1))
T = 100
procedure = c("CUSUMfixedb_typeA", "CUSUMfixedb_typeB", "CUSUMfixedm_typeA", "CUSUMfixedm_typeB")
d_grid = c(0.1,0.2)
for(a in 1:length(procedure)){
proc = procedure[a]
if((proc == "CUSUMfixedb_typeA" | proc == "CUSUMfixedb_typeB")) bandw=0.1
if((proc == "CUSUMfixedm_typeA" | proc == "CUSUMfixedm_typeB")) bandw=10
for(b in 1:length(d_grid)){
d = d_grid[b]
q = 0
for(i in 1:15){
x = fracdiff::fracdiff.sim(n=T, d=d)$series
mod = CUSUMfixed(x, d=d, procedure=proc, bandw=bandw)
q = q+sum(mod[4]>mod[3])
}
expect_lt(q,11)
}
}
T = 100
procedure = c("CUSUMfixedb_typeA", "CUSUMfixedb_typeB", "CUSUMfixedm_typeA", "CUSUMfixedm_typeB")
d_grid = c(0.1,0.2)
for(a in 1:length(procedure)){
proc = procedure[a]
if((proc == "CUSUMfixedb_typeA" | proc == "CUSUMfixedb_typeB")) bandw=0.1
if((proc == "CUSUMfixedm_typeA" | proc == "CUSUMfixedm_typeB")) bandw=10
for(b in 1:length(d_grid)){
d = d_grid[b]
q = 0
for(i in 1:15)
{
x = fracdiff::fracdiff.sim(n=T, d=d)$series
changep = c(rep(0,T/2), rep(1,T/2))
x = x+changep
mod = CUSUMfixed(x, d=d, procedure=proc, bandw=bandw)
q = q+sum(mod[4]>mod[1])
}
expect_gt(q,2)
}
} |
shortesttimepath <-
function(g, startvertexname, startvertextime, stopvertexname)
{
if (length(startvertexname) != 1 | length(stopvertexname) != 1)
{
stop("must provide single startvertex and stop vertex")
}
startvertex <- V(g)[V(g)$Name==startvertexname & V(g)$Time==startvertextime]
stopvertices <- V(g)[V(g)$Name==stopvertexname & V(g)$Time>=startvertextime]
paths <- get.shortest.paths(g, startvertex, stopvertices, mode="out", weights=E(g)$TimeCost)
if (is.list(paths) && "vpath" %in% names(paths)) { paths <- paths$vpath }
shortestpath <- paths[[which.min(lapply(paths, function(x) { tail(V(g)[x]$Time,1) })) ]]
return(V(g)[shortestpath])
} |
MultiDimen_test = function(data , stat = "HT",pair=FALSE, method_p = "sampling" ,rank = FALSE, diff = FALSE , samplenum = 1000){
if(!is.matrix(data) & !is.data.frame(data)) stop("The input data must be a matrix or dataframe")
if(!is.numeric(samplenum)) stop("samplenum input is not true")
if(pair==TRUE){
if(stat %in% c("wsum")) stop("The stat input is not suitable for paired calculation")
if( length(unique(data[,ncol(data)-1])) != 2) stop("The last two column of the data must only contain two unique numbers if pair = 'TRUE'")
if(method_p == "asymptotic" & stat != "HT") stop("Asymptotic method can only be used when statistic is HT when pair = 'TRUE'")
}
if(pair==FALSE) {
if(stat %in% c("zmax","zmaxabs")) stop("The stat input is not suitable for not paired calculation")
if(sum(unique(data[,ncol(data)]) == c(0,1)) != 2 ) stop("The last column of the data must only contain 0 and 1 if pair = 'FALSE'")
if(method_p == "asymptotic" & stat %in% c("tmax","tmaxabs")) stop("Asymptotic method can not be used when statistic is tmax or tmaxabs and pair = 'FALSE'")
if(method_p == "asymptotic" & rank == TRUE & stat == "HT") stop("Asymptotic method can not be used when statistic is ranked HT and pair = 'FALSE'")
}
parameter_value = c(stat , method_p , rank , diff , pair)
parameter_name = c("stat" , "method_p" , "rank" , "diff","pair")
parameter_content = c("HT tmax tmaxabs wsum zmax zmaxabs" , "sampling exact asymptotic" , "TRUE FALSE 1 0" , "TRUE FALSE 1 0", "TRUE FALSE 1 0")
parameter = data.frame(parameter_value , parameter_name , parameter_content)
for(i in 1:nrow(parameter)){
if(!parameter[i,1] %in% strsplit(parameter[i,3]," ")[[1]]) stop(paste(parameter[i,2] , "input is not true"))
}
if(stat == "wsum" & diff == TRUE & pair == FALSE) warning("The diff is not calculating when stat = 'wsum'")
if(method_p != "sampling" & !missing(samplenum)) warning("\n samplenum is not working")
if(stat == "wsum" & pair == FALSE) {
rank = TRUE
if(rank == FALSE) warning("Changing the rank to TRUE, because stat = 'wsum' is chosen")
}
if(pair == FALSE){
HT <- function(dat,index,rank){
x = dat[index==0,]
y = dat[index==1,]
m = dim(x)[1]
k = dim(x)[2]
n = dim(y)[1]
mx = apply(x,2,mean)
vx = var(x)
my = apply(y,2,mean)
vy = var(y)
v = (m-1)*vx+(n-1)*vy
v = v/(m+n-2)
diff = mx-my
t2 = t(diff)%*%solve(v)%*%diff
t2 = t2*m*n/(m+n)
fs = (m+n-k-1)*t2/((m+n-2)*k)
as.numeric(fs)
}
wilcox.sd <- function(x, index){
rd = rank(x)
m = sum(index==0)
nt = length(index)
n = nt-m
mw = m*(nt+1)/2
vw1 = mw*n/6
ux = unique(x)
freq = ux
for(i in 1:length(ux)){
freq[i] = sum(x==ux[i])
}
af = sum(freq^3-freq)*m*n
tmp = 12*nt*(nt-1)
af = af/tmp
vw = vw1-af
w = sum(rd[index==0])
(w-mw)/sqrt(vw)
}
tall <- function(dat,index=NULL , rank=NULL){
if(rank == FALSE){
x = dat[index==0,]
y = dat[index==1,]
m = dim(x)[1]
k = dim(x)[2]
n = dim(y)[1]
stata = rep(0,k)
for(i in 1:k){
stata[i] = t.test(x[,i],y[,i],var.equal=TRUE)$statistic
}
stata
}
else if(rank == TRUE){
k = dim(dat)[2]
stat = rep(0,k)
for(i in 1:k){
stat[i] = wilcox.sd(dat[,i],index)
}
stat
}
}
tmax <- function(dat,index=NULL,rank=NULL){
max(tall(dat,index,rank))
}
tmaxabs <- function(dat,index=NULL,rank=NULL){
max(abs(tall(dat,index,rank)))
}
wsum <- function(rd,index , rank){
sum(rd[index==0,])
}
nc = ncol(data)
index = data[,nc]
data = data[,-nc]
STAT = stat
if(stat == "HT") stat = HT
else if(stat == "tmax") stat = tmax
else if(stat == "tmaxabs") stat = tmaxabs
else if(stat == "wsum") stat = wsum
if(rank == TRUE){
for(i in 1:ncol(data)){
data[,i] = rank(data[,i])
}
}
else if(rank == FALSE) {
}
x = data[index==0,]
y = data[index==1,]
m = dim(x)[1]
k = dim(x)[2]
n = dim(y)[1]
nt=n+m
data = as.matrix(data)
Fobs = stat(data, index , rank)
if( method_p == "exact"){
all.comb = combn(nt, m)
N = dim(all.comb)[2]
replace = rep(1, N)
for(i in 1:N){
ind = all.comb[,i]
index = rep(0,nt)
index[ind] = 1
replace[i] = stat(data,index,rank)
}
p.value <- length(replace[replace >= Fobs]) / N
}
else if(method_p == "sampling"){
replace <- replicate(samplenum, stat(data, sample(index) , rank))
p.value <- length(replace[replace >= Fobs]) / samplenum
}
else if( method_p == "asymptotic"){
if(STAT == "HT" & rank == FALSE){
p.value = 1-pf(Fobs, k, nt-k-1)
}
else if(STAT == "wsum"){
S = apply(data,1,sum)
sigmaz = var(S)*(nt-1)/nt
varw = m*n*sigmaz/(nt-1)
mw = k*m*(nt+1)/2
z = (Fobs - mw) / sqrt(varw)
p.value = 1 - pnorm(z)
}
}
if(diff == TRUE & STAT != "wsum"){
replace <- replicate(samplenum, tmaxabs(data, sample(index) , rank))
quan = quantile(replace , 0.95)
large = which(abs(tall(data , index , rank)) >= quan)
larger=NULL
for(i in 1:length(large)){
if(i<length(large)){larger<-c(larger,paste(large[i],", "))}
else{larger<-c(larger,paste(large[i]))}
}
larger<-paste(larger,collapse = "")
}
}
else {
HT1<-function(x){
mx = apply(x,2,mean)
vx = var(x)
n = dim(x)[1]
k = dim(x)[2]
t2 = n*t(mx)%*%solve(vx)%*%mx
t2[1,1]
}
HT2<-function(x){
mx=apply(x,2,mean)
vx=var(x)
n=dim(x)[1]
k=dim(x)[2]
t2=n*t(mx)%*%solve(vx)%*%mx
fs = (n-k)*t2/((n-1)*k)
fs[1,1]
}
statistic <- function(x, fun){
n = dim(x)[1]
bin = rbinom(n,1,0.5)
bin = 2*bin-1
y = diag(bin)%*%x
fun(y)
}
binary <- function(x, k){
tmp = NULL
y = x
if(x < 2^k) {
for(i in k-1:k){
a = floor(y/2^i)
tmp = c(tmp, a)
y = y-a*2^i
}
}
2*(tmp-0.5)
}
tall <- function(dat,index=NULL , rank=NULL){
x=dat
mx=apply(x,2,mean)
vx=apply(x,2,var)
n=dim(x)[1]
t=mx*sqrt(n)/sqrt(vx)
t
}
tmax <- function(dat,index=NULL , rank=NULL){ max(tall(dat))}
tmaxabs <- function(dat,index=NULL , rank=NULL){ max(abs(tall(dat)))}
zall<-function(x){
mx=apply(abs(x),2,sum)/2
vx=apply(x^2,2,sum)/4
sr=apply(x*(x>0),2,sum)
z=(sr-mx)/sqrt(vx)
z
}
zmax<-function(x){ max(zall(x)) }
zmaxabs<-function(x){ max(abs(zall(x))) }
STAT = stat
if(stat == "HT") stat = HT1
else if(stat == "tmax") stat = tmax
else if(stat == "tmaxabs") stat = tmaxabs
else if(stat == "zmax") stat = zmax
else if(stat == "zmaxabs") stat = zmaxabs
m = nrow(data) / 2
n = nrow(data) / 2
k = ncol(data) - 2
nc = ncol(data)
uni_index = unique(data[,nc-1])
uni_pair = unique(data[,nc])
x = data[data[,nc-1] == uni_index[1],]
y = data[data[,nc-1] == uni_index[2],]
x = x[x[,nc] == uni_pair,]
y = y[y[,nc] == uni_pair,]
x = x[,1:k]
y = y[,1:k]
D = x - y
rank=FALSE
if(STAT == "zmax" | STAT == "zmaxabs"){
rank=TRUE
sd = sign(D)
SRD = abs(D)
for(i in 1:k){
SRD[,i] = rank(SRD[,i])
}
D = SRD*sd
}
Fobs = stat(D)
if(method_p == "exact"){
l = dim(D)[1]
ppm = NULL
N = 2^l-1
for(i in 0:N) {
condition = diag(binary(i, l)) %*% D
ppm = c(ppm, stat(condition))
}
p.value = length(ppm[ppm >= Fobs]) / N
}
else if(method_p == "sampling"){
results <- replicate(samplenum, statistic(D,stat))
p.value = length(results[results >= Fobs]) / samplenum
}
if(method_p == "asymptotic" & STAT == "HT"){
f = HT2(D)
p.value = 1 - pf(f,k,n-k)
}
}
names(p.value) = method_p
names(Fobs) = STAT
if(method_p == "asymptotic") {
attr(p.value , "type") = switch(STAT,"HT"="F distribution","wsum"="normal")
}
alternative=if(STAT=="wsum"){"Each components in sample0 is greater than sample1."}else{"The means are different."}
output <- list(method = if(pair){"Multiple Dimensional paired test"}else("Multiple Dimensional test") , score = if(rank){"Wilcoxon"}else{"original"} , stat =Fobs ,pval = p.value , alternative=alternative)
if(diff == TRUE & STAT != "wsum"){
output<-c(output,addition=paste(larger , 'dimensions are considered having significant differences\n'))
}
class(output) = "nonp"
output
} |
checkInteger <- function( x ) {
all( x == as.integer(x) )
}
if ( 0 == 1 ) {
object <- getExampleSetOfTimePoints(0:21)
str( object )
checkSetOfTimePoints( object )
wrongObject <- object
[email protected] <- [email protected][1:2,1:2]
colnames([email protected]) <- c("bla", "bla")
[email protected][1,1] <- 1.5
wrongObject@nFullTimePoints <- 300
checkSetOfTimePoints( wrongObject )
objectWrongTime <- object
[email protected] <- [email protected]
checkSetOfTimePoints()
object <- getExampleSetOfTimePoints( 0:21 )
}
checkSetOfTimePoints <- function( object ) {
errors <- character( )
dimData <- dim( [email protected] )
nDimensions <- length( dimData )
if( !length(object@fullTimePoints) == object@nFullTimePoints ) {
msg <- paste0( "Number of fullTimePoints do no match nFullTimePoints", "\n" )
errors <- c ( errors , msg )
}
if( !nDimensions == 2 ) {
msg <- paste0( "Dimensions of data array should be 2, not: ", nDimensions, "\n" )
errors <- c ( errors , msg )
}
if( !object@nTimePointsSelect== dimData[ 2 ] ) {
msg <- paste0( "Number of timePoints: ", object@nTimePointsSelect ," does not match number
of timePoints in time point options: ", dimData[ 2 ], "\n" )
errors <- c ( errors , msg )
}
timeChoiceNames <- rownames( [email protected] )
nNames <- length( timeChoiceNames )
uniqueNames <- unique( timeChoiceNames )
nUniqueNames <- length( uniqueNames )
checkUnique <- ( nNames == nUniqueNames ) && ( ! is.null( timeChoiceNames ) )
if ( ! checkUnique ) {
msg <- paste0( "non-unique names of time point options ( .Data slot )" , "\n" )
errors <- c( errors , msg )
}
firstTimePoint <- [email protected][ , 1 ]
lastTimePoint <- [email protected][ , object@nTimePointsSelect]
fullTimePoints <- object@fullTimePoints
indZeroPoint <- any( firstTimePoint == min( fullTimePoints ) )
indAllLast <- all( lastTimePoint == max( fullTimePoints ) )
if ( indZeroPoint ) {
msg <- paste0( "Zero point cannot be included as sample option" , "\n" )
errors <- c( errors , msg )
}
if ( ! indAllLast ) {
msg <- paste0( "Not all time points options end with the last time" , "\n" )
errors <- c( errors , msg )
}
if ( length(errors) == 0 ) {
TRUE
} else {
cat( errors )
FALSE
}
}
setClass( "SetOfTimePoints",
representation = representation(
.Data = "array" ,
fullTimePoints = "numeric" ,
nFullTimePoints = "numeric" ,
nTimePointsSelect = "numeric" ,
nTimePointOptions = "numeric" ,
ranking = "data.frame"
) ,
validity = checkSetOfTimePoints
)
if( 0 == 1 ) {
fullTimePoints = 0:10
nTimePointsSelect = 5
nChoicesSubset = 7
}
getExampleSetOfTimePoints <- function( fullTimePoints, nTimePointsSelect = 5, nChoicesSubset = 7 ) {
nFulltimePoints <- length( fullTimePoints )
nTimeOptions <- nFulltimePoints - 2
nTimeOptionsToSelect <- nTimePointsSelect - 1
timePointChoices <- combinations( nTimeOptions, nTimeOptionsToSelect )
ncomb <- dim( timePointChoices )[ 1 ]
set.seed(1234)
indSample <- sample( ncomb, nChoicesSubset, replace = FALSE )
samplePointsInternal <- timePointChoices[ indSample , ]
samplePoints <- data.frame( samplePointsInternal + 1, nFulltimePoints)
samplePoints <- as.matrix(samplePoints)
timePointMatrix <- matrix( fullTimePoints[ samplePoints ] , nrow = nrow(samplePoints ) )
colnames( timePointMatrix ) <- paste0( "time" , 1:nTimePointsSelect)
rownames( timePointMatrix ) <- paste0( "timeOption" , 1:nChoicesSubset )
exampleTimes <- new("SetOfTimePoints",
.Data = timePointMatrix ,
fullTimePoints = fullTimePoints ,
nFullTimePoints = nFulltimePoints ,
nTimePointsSelect = nTimePointsSelect ,
nTimePointOptions = nChoicesSubset
)
exampleTimes
}
setMethod( "getTimePoints", "SetOfTimePoints",
function( object ) {
return( object@fullTimePoints )
}
)
if( 0 == 1 ) {
timeZonesEx <- getExampleTimeZones()
fullTimePointsEx <- seq( 0 , 21 , 1 )
object <- getAllTimeOptions( timeZones = timeZonesEx ,
fullTimePoints = fullTimePointsEx )
str( object )
}
setMethod( "getData", "SetOfTimePoints",
function( object ) {
return( [email protected])
}
)
setMethod( "getNames" , "SetOfTimePoints" ,
function( object ){
dimNames <- dimnames( [email protected] )
names <- dimNames[[ 1 ]]
return( names )
}
)
setMethod( "getRanking", "SetOfTimePoints",
function( object ) {
return( object@ranking)
}
)
setReplaceMethod( f = "setRanking",
signature = "SetOfTimePoints",
definition = function( object, value) {
object@ranking <- value
validObject( object )
return( object )
}
)
if( 0 == 1 ){
object <- getExampleSetOfTimePoints( 0 :10 )
pkData <- getPkData(getExamplePkModel() , getTimePoints( object ) , 1 , 5 )
objectRanked <- rankObject( object , pkData )
object <- objectRanked
rank <- 2
extractRank.timePoints( object, 5 )
extractByRank( object, 1)
}
extractRank.timePoints <- function( object , rank ) {
ranking <- getRanking( object )
checkNoRank <- all( dim( ranking ) == c(0,0) )
if( checkNoRank ) {
stop( "SetOfTimePoints is not ranked" )
}
checkRankOk <- length( rank ) == 1
if( ! checkRankOk) {
stop( "Only one rank should be selected" )
}
indRanks <- getTopNRanking( ranking , rank )
rankToSelect <- indRanks[ rank ]
object[rankToSelect , , drop = FALSE ]
}
setMethod( "extractByRank" , signature = c( "SetOfTimePoints" , "numeric" ) ,
definition = extractRank.timePoints )
if( 0 == 1 ) {
extrac
} |
spec_compliance <- c(
spec_compliance_methods,
NULL
) |
clicklpp <- local({
clicklpp <- function(L, n=NULL, types=NULL, ...,
add=FALSE, main=NULL, hook=NULL) {
if(!inherits(L, "linnet"))
stop("L should be a linear network", call.=FALSE)
instructions <-
if(!is.null(n)) paste("click", n, "times in window") else
paste("add points: click left mouse button in window\n",
"exit: press ESC or another mouse button")
if(is.null(main))
main <- instructions
W <- Window(L)
if(is.null(types)) {
plot(L, add=add, main=main)
if(!is.null(hook))
plot(hook, add=TRUE)
xy <- if(!is.null(n)) spatstatLocator(n=n, ...) else spatstatLocator(...)
ok <- inside.owin(xy, w=W)
if((nbad <- sum(!ok)) > 0)
warning(paste("Ignored",
nbad,
ngettext(nbad, "point", "points"),
"outside window"),
call.=FALSE)
X <- as.lpp(xy$x[ok], xy$y[ok], L=L)
return(X)
}
ftypes <- factor(types, levels=types)
X <- getem(ftypes[1L], instructions, n=n, L=L, add=add, ..., pch=1)
X <- X %mark% ftypes[1L]
for(i in 2:length(types)) {
Xi <- getem(ftypes[i], instructions, n=n, L=L, add=add,
..., hook=X, pch=i)
Xi <- Xi %mark% ftypes[i]
X <- superimpose(X, Xi, L=L)
}
if(!add)
plot(X, main="Final pattern")
return(X)
}
getem <- function(i, instr, ...) {
main <- paste("Points of type", sQuote(i), "\n", instr)
do.call(clicklpp, resolve.defaults(list(...), list(main=main)))
}
clicklpp
}) |
"AIBS" |
Proportional_D <- function(train, test, seed=-1){
alg <- RKEEL::R6_Proportional_D$new()
alg$setParameters(train, test, seed)
return (alg)
}
R6_Proportional_D <- R6::R6Class("R6_Proportional_D",
inherit = PreprocessAlgorithm,
public = list(
seed = -1,
setParameters = function(train, test, seed=-1){
super$setParameters(train, test)
if(seed == -1) {
self$seed <- sample(1:1000000, 1)
}
else {
self$seed <- seed
}
}
),
private = list(
jarName = "Disc-Proportional.jar",
algorithmName = "Proportional-D",
algorithmString = "Proportional Discretizer",
getParametersText = function(){
text <- ""
text <- paste0(text, "seed = ", self$seed, "\n")
return(text)
}
)
) |
remove.duplicated.variances.lavsyn <- function( res0, items ) {
res0 <- gsub( " ", "", res0 )
res1 <- strsplit( res0, split="\n")[[1]]
res1 <- data.frame( "syn"=res1, "sel"=0 )
res1$variance <- 0
ind <- grep( "~~", res1$syn )
res1$variance.index <- 0
l0 <- strsplit( paste(res1[ ind, "syn" ]), split="~~")
l0 <- unlist( lapply( l0, FUN=function(ll){ ll[1] } ) )
ind <- ind[ l0 %in% items ]
if ( length(ind) > 0){
res1$variance.index[ind] <- 1
}
if ( length(ind) > 0 ){
res1[ ind,"variance"] <- 1
res1$variance.obs <- ""
l1 <- res1[ ind, "syn" ]
l1 <- strsplit( paste(l1), split="~~")
l2 <- lapply( l1, FUN=function(ll){ ll[1] } )
res1$variance.obs[ind] <- unlist(l2)
l3 <- duplicated( res1[ind, "variance.obs"] )
if ( sum(l3) > 0 ){
res1 <- res1[ - ind[ l3 ], ]
}
}
lav2 <- paste( res1$syn, collapse="\n")
return(lav2)
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.