code
stringlengths 1
13.8M
|
---|
tauvar<-function(x,cval=3){
x<-elimna(x)
s<-qnorm(.75)*mad(x)
y<-(x-tauloc(x))/s
cvec<-rep(cval,length(x))
W<-apply(cbind(y^2,cvec^2),1,FUN="min")
val<-s^2*sum(W)/length(x)
val
}
|
"sm.ancova" <- function(x, y, group, h, model = "none",
h.alpha = NA, weights = NA, covar = diag(1/weights), ...) {
x.name <- deparse(substitute(x))
y.name <- deparse(substitute(y))
data <- sm.check.data(x, y, weights, group, ...)
x <- data$x
y <- data$y
weights<- data$weights
group <- data$group
nobs <- data$nobs
ndim <- data$ndim
opt <- data$options
if(missing(h))
h <- h.select(x, y, weights = weights, group = group, ...)
else
{if(length(h)!=ndim) stop("length(h) does not match size of x")}
covar.set <- FALSE
if (!missing(covar)) {
if (!is.na(opt$nbins) & opt$nbins!=0)
stop("if covar is set, nbins must be 0 or NA")
if (!all(weights == as.integer(rep(1,length(y)))))
stop("if covar is set, weights must not be set.")
covar.set <- TRUE
}
if (missing(weights) & missing(covar))
replace.na(opt, nbins, round((nobs > 500) * 8 * log(nobs) / ndim))
replace.na(opt, band, TRUE)
if (model == "none") opt$band <- FALSE
if (ndim == 1) {
if (is.na(h.alpha)) h.alpha <- 2 * diff(range(x)) / nobs
replace.na(opt, display, "line")
replace.na(opt, ngrid, 50)
replace.na(opt, xlab, x.name)
replace.na(opt, ylab, y.name)
}
else {
opt$display <- "none"
}
fact <- factor(group)
if (ndim==1) {
ord <- order(fact, x)
xx <- x[ord]
}
else {
ord <- order(fact)
xx <- x[ord,]
}
yy <- y[ord]
weights <- weights[ord]
fact <- fact[ord]
fac.levels <- levels(fact)
nlev <- length(fac.levels)
rawdata <- list(x = xx, y = yy, fac = fact, nbins = opt$nbins, nobs = nobs,
ndim = ndim, devs = 0)
if ((!is.na(opt$nbins)) & (opt$nbins>0)) {
for (i in 1:nlev) {
ind <- (fact==fac.levels[i])
if (ndim==1) {xx.ind <- xx[ind]} else {xx.ind <- xx[ind,]}
bins <- binning(xx.ind, yy[ind], nbins=opt$nbins)
if (i ==1 ) {
x <- matrix(as.vector(bins$x), ncol=ndim)
y <- bins$means
fac <- rep(fac.levels[1], length(bins$means))
weights <- bins$x.freq
rawdata$devs <- bins$devs
}
else {
x <- rbind(x, matrix(as.vector(bins$x), ncol=ndim))
y <- c(y, bins$means)
fac <- c(fac, rep(fac.levels[i], length(bins$means)))
weights <- c(weights, bins$x.freq)
rawdata$devs <- c(rawdata$devs, bins$devs)
}
}
if (ndim == 1) x <- as.vector(x)
weights <- as.integer(weights)
fac <- factor(fac)
covar <- diag(1/weights)
}
else {
x <- xx
y <- yy
fac <- fact
}
n <- table(fac)
B <- diag(0, sum(n))
Sd <- diag(0, sum(n))
istart <- 1
for (i in 1:nlev) {
irange <- istart:(istart + n[i] - 1)
wi <- weights[irange]
if (ndim==1) {
xi <- x[irange]
Sd[irange, irange] <- sm.weight(xi, xi, h, weights=wi, options=opt)
}
else {
xi <- x[irange,]
Sd[irange, irange] <- sm.weight2(xi, xi, h, weights=wi, options=opt)
}
B[irange, irange] <- sm.sigweight(xi, weights=wi)
istart <- istart + n[i]
}
if (ndim==1) {
Ss <- sm.weight(x, x, h, weights=weights, options=opt)
}
else {
Ss <- sm.weight2(x, x, h, weights=weights, options=opt)
}
sigma <- sqrt((y %*% B %*% y)[1, 1] + sum(rawdata$devs))
if (model == "equal") {
Q <- Sd - Ss
Q <- t(Q) %*% diag(weights) %*% Q
obs <- ((y %*% Q %*% y) / sigma^2)[1,1]
}
if (model == "parallel") {
D <- matrix(0, ncol = nlev - 1, nrow = sum(n))
istart <- n[1] + 1
for (i in 2:nlev) {
D[istart:(istart + n[i] - 1),i - 1] <- 1
}
if (ndim==1) {
Q <- diag(sum(n)) - sm.weight(x, x, h.alpha, weights=weights, options=opt)
}
else {
Q <- diag(sum(n)) - sm.weight2(x, x, h, weights=weights, options=opt)
}
Q <- solve(t(D) %*% t(Q) %*% diag(weights) %*% Q %*% D) %*%
t(D) %*% t(Q) %*% diag(weights) %*% Q
alpha <- as.vector(Q %*% y)
ghat <- as.vector(Ss %*% (diag(sum(n)) - D %*% Q) %*% y)
ghati <- as.vector(Sd %*% y)
obs <- sum(weights*(as.vector(D %*% alpha) + ghat - ghati)^2) / sigma^2
Q <- D %*% Q + Ss %*% (diag(sum(n)) - D %*% Q) - Sd
Q <- t(Q) %*% diag(weights) %*% Q
}
p <- NULL
if (model %in% c("equal", "parallel")) {
if (!covar.set) {
p <- p.quad.moment(Q - B * obs, covar, obs,
sum(weights)-length(weights))
}
else {
p <- p.quad.moment.old(Q, covar, obs * sigma^2)
}
if (model == "equal") model.name <- "equality"
if (model == "parallel") model.name <- "parallelism"
if (opt$verbose > 0) cat("Test of", model.name, ": h = ",
signif(h), " p-value = ", round(p, 4), "\n")
}
if (ndim == 1)
sigma <- sigma / sqrt(nobs - 2 * nlev)
else
sigma <- sigma / sqrt(nobs)
if (!(opt$display %in% "none")) {
replace.na(opt, xlim, range(rawdata$x))
replace.na(opt, ylim, range(rawdata$y))
if (length(opt$lty) < nlev) opt$lty <- 1:nlev
if (length(opt$col) < nlev) opt$col <- 2:(nlev + 1)
plot(rawdata$x, rawdata$y, type = "n",
xlab = opt$xlab, ylab = opt$ylab, xlim = opt$xlim, ylim = opt$ylim)
for (i in 1:nlev)
text(rawdata$x[fac == fac.levels[i]], rawdata$y[fac == fac.levels[i]],
as.character(fac.levels[i]), col = opt$col[i])
if (opt$band & nlev > 2) {
if (opt$verbose > 0) cat("Band available only to compare two groups.\n")
opt$band <- FALSE
}
if (opt$band & covar.set) {
if (opt$verbose > 0) cat("Band not available when covariance is set.\n")
opt$band <- FALSE
}
if (!opt$band) {
for (i in 1:nlev) {
ind <- (fac == fac.levels[i])
sm.regression(x[ind], y[ind], h = h, weights = weights[ind],
ngrid = opt$ngrid, add = TRUE, lty = opt$lty[i], col = opt$col[i])
}
}
else {
eval.points <- opt$eval.points
if (any(is.na(eval.points))) {
start.eval <- max(tapply(x, fac, min))
stop.eval <- min(tapply(x, fac, max))
eval.points <- seq(start.eval, stop.eval, length = opt$ngrid)
}
ind <- (fac == fac.levels[1])
model1 <- sm.regression(x[ind], y[ind], h = h,
eval.points = eval.points, weights = weights[ind],
options = opt, display = "none", ngrid = opt$ngrid,
add = TRUE, lty = 1)
ind <- fac == fac.levels[2]
model2 <- sm.regression(x[ind], y[ind], h = h,
eval.points = eval.points, weights = weights[ind],
options = opt, display = "none", ngrid = opt$ngrid,
add = TRUE, lty = 2)
model.y <- (model1$estimate + model2$estimate) / 2
if (model == "parallel")
model.y <- cbind(model.y - alpha/2, model.y + alpha/2)
se <- sqrt((model1$se/model1$sigma)^2 + (model2$se/model2$sigma)^2)
se <- se * sigma
upper <- model.y + se
lower <- model.y - se
if (model == "equal") {
upper <- pmin(pmax(upper, par()$usr[3]), par()$usr[4])
lower <- pmin(pmax(lower, par()$usr[3]), par()$usr[4])
polygon(c(eval.points, rev(eval.points)), c(lower, rev(upper)),
border = FALSE, col = 5)
}
else if (model == "parallel") {
upper[,1] <- pmin(pmax(upper[,1], par()$usr[3]), par()$usr[4])
lower[,1] <- pmin(pmax(lower[,1], par()$usr[3]), par()$usr[4])
upper[,2] <- pmin(pmax(upper[,2], par()$usr[3]), par()$usr[4])
lower[,2] <- pmin(pmax(lower[,2], par()$usr[3]), par()$usr[4])
polygon(c(eval.points, rev(eval.points)),
c(lower[,1], rev(upper[,1])),
density = 20, angle = 90, border = FALSE, col = 5)
polygon(c(eval.points, rev(eval.points)),
c(lower[,2], rev(upper[,2])),
density = 20, angle = 0, border = FALSE, col = 6)
}
for (i in 1:nlev)
text(rawdata$x[fac == fac.levels[i]], rawdata$y[fac == fac.levels[i]],
as.character(fac.levels[i]), col = opt$col[i])
lines(eval.points, model1$estimate, lty = opt$lty[1], col = opt$col[1])
lines(eval.points, model2$estimate, lty = opt$lty[2], col = opt$col[2])
}
}
r <- list(p = p, model = model, sigma = sigma)
if (model == "parallel")
r <- list(p = p, model = model, sigma = sigma, alphahat = alpha)
if (!(opt$display == "none") & opt$band) {
r$upper <- upper
r$lower <- lower
r$eval.points <- eval.points
}
r$data <- list(x=x, y=y, group=fac, nbins=rawdata$nbins, devs=rawdata$devs,
weights=weights)
r$call <- match.call()
invisible(r)
}
|
keops_kernel <- function(formula, args) {
if(!is.character(formula))
stop("`formula` input parameter should be a text string")
if(!(length(args)==0 | (is.vector(args) & is.character(args))))
stop("`args` input parameter should be a vector of text strings")
var_aliases <- format_var_aliases(args)
dllname <- create_dllname(formula, args)
dllfilename <- file.path(get_build_dir(),
paste0("librkeops", dllname, .Platform$dynlib.ext))
if(!file.exists(dllfilename) | get_rkeops_option("verbosity")) {
compile_formula(formula, var_aliases$var_aliases, dllname)
}
r_genred <- load_dll(path = get_build_dir(),
dllname = paste0("librkeops", dllname),
object = "r_genred",
genred=TRUE)
var_aliases <- lapply(
var_aliases,
function(elem) {
if(length(elem)>1)
return(elem[order(var_aliases$var_pos)])
else
return(elem)
}
)
function(input=NULL, inner_dim=1) {
env <- list(formula=formula,
args=args,
var_aliases=var_aliases,
inner_dim=inner_dim)
if(missing(input) | is.null(input))
return(env)
if(sum(str_length(names(input)) > 0) == length(input)) {
expected_order <- env$var_aliases$var_name
if(all(names(input) %in% expected_order))
if(any(names(input) != expected_order))
input <- input[expected_order]
}
check_scalar <- sapply(1:length(input),
function(ind) return(is.null(dim(input[[ind]]))))
if(any(check_scalar)) {
tmp_names <- names(input)
input[check_scalar] <- lapply(which(check_scalar),
function(ind)
return(as.matrix(input[[ind]])))
names(input) <- tmp_names
}
nx <- 0
if("Vi" %in% env$var_aliases$var_type) {
ind_Vi <- head(which(env$var_aliases$var_type == "Vi"), 1)
dim_Vi <- dim(input[[ind_Vi]])
if(env$inner_dim == 1) {
nx <- dim_Vi[1]
} else {
nx <- dim_Vi[2]
}
}
ny <- 0
if("Vj" %in% env$var_aliases$var_type) {
ind_Vj <- head(which(env$var_aliases$var_type == "Vj"), 1)
dim_Vj <- dim(input[[ind_Vj]])
if(env$inner_dim == 1) {
ny <- dim_Vj[1]
} else {
ny <- dim_Vj[2]
}
}
param <- c(get_rkeops_options("runtime"),
list(inner_dim=inner_dim, nx=nx, ny=ny))
out <- r_genred(input, param)
if(inner_dim) {
return(t(out))
} else {
return(out)
}
}
}
|
smoothColors<-function(...,alpha=NA){
args <- list(...)
r <- g <- b <- NULL
while(length(args) > 0) {
if(!is.character(args[[1]]))
stop("Usage: smoothColors(\"color name\",[n|\"color name\"],...,\"color name\")")
arglen<-length(args)
if(arglen > 1){
if(is.numeric(args[[2]])){
lastarg<-2
while(is.numeric(args[[lastarg]])) {
lastarg<-lastarg+1
if(lastarg > arglen) stop("bad argument list")
}
from <- col2rgb(args[[1]])
too <- col2rgb(args[[lastarg]])
n <- args[[2]]+2
r <- c(r,seq(from[1,],too[1,],length=n))
i <- length(r)
r <- r[-i]
g <- c(g,seq(from[2,],too[2,],length=n))
g <- g[-i]
b <- c(b,seq(from[3,],too[3,],length=n))
b <- b[-i]
args <- args[-(1:(lastarg-1))]
}
else {
cc <- col2rgb(args[[1]])
r <- c(r,cc[1,])
g <- c(g,cc[2,])
b <- c(b,cc[3,])
args <- args[-1]
}
}
else {
cc <- col2rgb(args[[1]])
r <- c(r,cc[1,])
g <- c(g,cc[2,])
b <- c(b,cc[3,])
args <- args[-1]
}
}
if(is.na(alpha)) rgb(r,g,b,maxColorValue=255)
else rgb(r,g,b,alpha=alpha,maxColorValue=255)
}
|
library(RNetLogo)
path.to.NetLogo <- "C:/Program Files/NetLogo 6.0/app"
if (!exists("nl.test1", -1))
{
nl.test1 <- "nl.test1"
NLStart(path.to.NetLogo, gui=TRUE, nl.obj=nl.test1)
}
path.to.local.file <- 'C:/Users/jthiele/Documents/R/win-library/3.4/RNetLogo/examples/code_samples/9-NLDfToList/dftest.nlogo'
NLLoadModel(path.to.local.file, nl.obj=nl.test1)
NLCommand("setup", nl.obj=nl.test1)
list1 <- c(1,2,3,4,5)
list2 <- c(6,7,8,9,10)
list3 <- c('test1','test2','test3','test4','test5')
list4 <- c(TRUE,FALSE,TRUE,FALSE,TRUE)
sample.df <- data.frame(list1,list2,list3,list4)
NLDfToList(sample.df, nl.obj=nl.test1)
NLCommand("show-lists", nl.obj=nl.test1)
|
summary.hcov <- function(object, ...){
x <- object
cat("Summary for the object \"hcov\"\n")
cat("Information of the optimization problem: \n")
print(data.frame("n" = x$n, "p" = x$p, "lambda1" = signif(x$lambda1,2),"lambda2" = signif(x$lambda2,2),"lambda3" = signif(x$lambda3,2)),...)
cat("Sigma\n:")
print(data.frame("Number of Edges" = (sum(abs(x$Sigma)!=0)-x$p)/2, "Indices for hub nodes" = toString(x$hubind)),...)
cat("V\n:")
tempV<-(x$V!=0)
diag(tempV)<-0
print(data.frame("Indices for hub nodes" = x$hubind, "Number of Edges within each hub" = apply(tempV,2,sum)[x$hubind]),...)
cat("Z\n:")
print(data.frame("Number of Edges" = (sum(abs(x$Z)!=0)-x$p)/2),...)
invisible(tempV)
invisible(x)
}
|
callName <- function(n=1){
sc <- sys.calls()
cl <- deparse(sc[[length(sc)-n]])
str_replace(cl, '([^()]+)\\(.*', '\\1')
}
Hypothesis <- CoefficientHypothesis <- function(hypothesis, terms){
whoami <- callName()
h <- new(whoami, .Data=hypothesis)
if(!missing(terms)){
h <- generateHypothesis(h, terms)
}
h
}
.makeContrastMatrixFromCoefficientHypothesis <- function(testIdx, coefnames){
cm <- matrix(0, nrow=length(testIdx), ncol=length(coefnames), dimnames=list(contrast=coefnames[testIdx], coefnames))
cm[cbind(seq_along(testIdx), testIdx)] <- 1
t(cm)
}
generateHypothesis <- function(h, terms){
stopifnot(inherits(h, 'Hypothesis') | inherits(h, 'CoefficientHypothesis'))
if(class(h) =='Hypothesis'){
cm <- makeContrasts2([email protected], levels=terms)
rownames(cm) <- terms
sd <- setdiff(rownames(cm), terms)
} else {
index <- match([email protected], terms)
sd <- setdiff([email protected], terms)
cm <- .makeContrastMatrixFromCoefficientHypothesis(index, terms)
h@index <- index
}
if(length(sd)>0) stop("Term(s) '", paste(sd, ','), "' not found.\nTerms available: ", paste(terms, ", "))
h@contrastMatrix <- cm
h
}
listType <- function(alist){
types <- lapply(alist, function(x) class(x)[1])
if(length(unique(types))>1) stop("Not 'atomic' list")
types[1]
}
escapeSymbols <- function(text, warn=TRUE){
hasBT <- str_detect(text, fixed('`'))
if(any(hasBT)){
if(warn) warning("Some symbols already contain backticks ('`'). Deleting backticks and hoping for the best.")
text <- str_replace_all(text, fixed('`'), '')
}
hasSymbols <- str_detect(text, '[():+*/^]|-')
text[hasSymbols] <- str_c('`', text[hasSymbols], '`')
text
}
makeContrasts2 <- function (contrasts = NULL, levels, warn=TRUE)
{
if (is.factor(levels))
levels <- levels(levels)
if (!is.character(levels))
levels <- colnames(levels)
symbols <- str_detect(levels, '[():+*/^=]|-')
if (any(symbols) && warn)
warning("Some levels contain symbols. Be careful to escape these names with backticks ('`') when specifying contrasts.")
n <- length(levels)
if (n < 1)
stop("No levels to construct contrasts from")
indicator <- function(i, n) {
out <- rep(0, n)
out[i] <- 1
out
}
levelsenv <- new.env()
for (i in 1:n) assign(levels[i], indicator(i, n), pos = levelsenv)
if (!is.null(contrasts)) {
e <- as.character(contrasts)
ne <- length(e)
cm <- matrix(0, n, ne, dimnames = list(Levels = levels,
Contrasts = e))
if (ne == 0)
return(cm)
for (j in 1:ne) {
tryCatch( ej <- parse(text = e[j]), error=function(E) stop('Could not parse contrast ', e[j]))
cm[, j] <- eval(ej, envir = levelsenv)
}
return(cm)
}
}
|
transform_shape <- function(dframe){
dframe[,2:3] <- apply(dframe[,2:3], 2, as.character)
unfilled <- which(dframe$shape<=14)
solid <- which(dframe$shape>14 & dframe$shape<=20)
outlined <- which(dframe$shape>20)
xold <- dframe$shape
shapeidx <- data.frame(x = 0:25, shape="", stringsAsFactors=FALSE)
shapeidx[c(0, 7, 12, 13, 14, 15, 22)+1,2] <- "square"
shapeidx[c(3, 4, 8)+1, 2] <- "cross"
shapeidx[c(5, 9, 18, 23)+1, 2] <- "diamond"
shapeidx[c(1, 10, 16, 19, 20, 21)+1, 2] <- "circle"
shapeidx[c(6, 11, 25)+1, 2] <- "triangle-down"
shapeidx[c(2, 17, 24)+1, 2] <- "triangle-up"
shapeidx$fill <- c(rep(FALSE, 15), rep(TRUE, 6), rep(TRUE, 5))
shapeidx$line <- c(rep(TRUE, 15), rep(FALSE, 6), rep(TRUE, 5))
vals <- unique(xold)
dframe$fill[unfilled] <- "null"
dframe$colour[solid] <- "null"
dframe$Rshape <- dframe$shape
dframe$shape <- shapeidx$shape[xold+1]
return(dframe)
}
|
CFKNN_C <- function(train, test, k=3, alpha=0.6, seed=-1){
alg <- RKEEL::R6_CFKNN_C$new()
alg$setParameters(train, test, k, alpha, seed)
return (alg)
}
R6_CFKNN_C <- R6::R6Class("R6_CFKNN_C",
inherit = ClassificationAlgorithm,
public = list(
k = 3,
alpha = 0.6,
seed = -1,
setParameters = function(train, test, k=3, alpha=0.6, seed=-1){
super$setParameters(train, test)
self$k <- k
self$alpha <- alpha
if(seed == -1) {
self$seed <- sample(1:1000000, 1)
}
else {
self$seed <- seed
}
}
),
private = list(
jarName = "CFKNN.jar",
algorithmName = "CFKNN-C",
algorithmString = "Condensed Fuzzy K Nearest Neighbors Classifier",
getParametersText = function(){
text <- ""
text <- paste0(text, "seed = ", self$seed, "\n")
text <- paste0(text, "K Value = ", self$k, "\n")
text <- paste0(text, "Alpha = ", self$alpha, "\n")
return(text)
}
)
)
|
buildSemisupTab <- function(mainWindow, console, graphicFrame, RclusTool.env) {
semi.env <- RclusTool.env$gui$tabs.env$semisup
fontFrame <- tkfont.create(family = "Arial", weight = "bold", size = RclusTool.env$param$visu$size)
semi.env$tcl.method.select <- tclVar("Constrained_KM")
semi.env$tcl.K <- tclVar("0")
semi.env$tcl.export.clustering <- tclVar("1")
semi.env$tcl.export.calcul <- tclVar("0")
semi.env$tcl.classif.imgsig <- tclVar("0")
semi.env$tcl.extract.protos <- tclVar("0")
semi.env$tcl.rename.clusters <- tclVar("0")
semi.env$tcl.sampling.check <- tclVar("0")
win1.nb <- RclusTool.env$gui$win1$env$nb
win2.nb <- RclusTool.env$gui$win2$env$nb
cluster.summary <- NULL
K <- 0
method.select <- NULL
method.title <- c("Constrained_KM"="Constrained K-Means (CKM)",
"Constrained_SC"="Constrained Spectral Clustering (CSC)")
method.description <- c("Constrained_KM"="Method: vector quantization\nTechnique: item belongs to cluster with the nearest\n\tmean (randomly initialized)\nResults: partition of N items into K clusters\nDisadvantage: computationally difficult (NP-hard)\n",
"Constrained_SC"="Method: spectrum of data similarity matrix\nTechnique: evaluate the relative similarity of each pair\nResults: partition of N items into K clusters\nDisadvantage: slow for large datasets\nAdvantage: processing of non convex data\n")
MethodFrametext <- StringToTitle("CLUSTERING", RclusTool.env$param$visu$sizecm, fontsize=RclusTool.env$param$visu$size)
MethodFrame <- tkwidget(win1.nb$env$semisup, "labelframe", text = MethodFrametext, font = fontFrame, padx = RclusTool.env$param$visu$sizecm, pady = 8, relief = "flat")
AdviceFrame <- tkwidget(MethodFrame, "labelframe", text = "method name", padx = RclusTool.env$param$visu$sizecm, pady = 8, relief = "groove")
tkgrid(AdviceFrame, columnspan = 3, rowspan = 5, column = 2, row = 2, padx = RclusTool.env$param$visu$sizecm)
AdviceFrameText <- tk2label(AdviceFrame, text = "method description", width=50)
tkgrid(AdviceFrameText, sticky = "w")
MethodFrameExpert <- tkwidget(MethodFrame, "labelframe", font = fontFrame, padx = RclusTool.env$param$visu$sizecm, relief = "flat")
MethodFrameStandard <- tkwidget(MethodFrame, "labelframe", font = fontFrame, padx = RclusTool.env$param$visu$sizecm, relief = "flat")
semi.env$onMethodDescription <- function()
{
tkconfigure(AdviceFrame, text=method.title[tclvalue(semi.env$tcl.method.select)], font=fontFrame)
tkconfigure(AdviceFrameText, text=method.description[tclvalue(semi.env$tcl.method.select)], font=fontFrame)
}
rb_methods <- sapply(names(method.title), function(name) {
tkr <- tkradiobutton(MethodFrameExpert, variable=semi.env$tcl.method.select, value=name, text=method.title[name])
tkbind(tkr, "<ButtonRelease-1>", semi.env$onMethodDescription)
tkr
}, simplify=F)
SpaceFrametext <- StringToTitle("FEATURE SPACE SELECTION", RclusTool.env$param$visu$sizecm, fontsize=RclusTool.env$param$visu$size)
SpaceFrame <- tkwidget(win1.nb$env$semisup, "labelframe", text = SpaceFrametext, font = fontFrame, padx = RclusTool.env$param$visu$sizecm, pady = 8, relief = "flat")
semi.env$spaceList <- tk2listbox(SpaceFrame, selectmode = "single", activestyle = "dotbox",
height = 5, width = 45, autoscroll = "none", background = "white")
tkgrid(tk2label(SpaceFrame, text="Apply sampling"), row=11, column=0, sticky="w")
tk.sampling.check <- tkcheckbutton(SpaceFrame, text="", variable=semi.env$tcl.sampling.check, state="disabled")
tkgrid(semi.env$spaceList, row = 2, column = 1)
tkgrid(tk.sampling.check, row=11, column=1, sticky="e")
semi.env$initSamplingCheck <- function()
{
state.sampling.button <- "disabled"
if (!is.null(RclusTool.env$data.sample$config$sampling.size)){
state.sampling.button <- "normal"
semi.env$tcl.sampling.check <- tclVar("1")
}
tkconfigure(tk.sampling.check, variable=semi.env$tcl.sampling.check, state=state.sampling.button)
}
initAvailableSpaces <- function()
{
spaces <- names(RclusTool.env$data.sample$features)
spaces <- spaces[!grepl("pca_full", spaces, fixed=T)]
spaces <- spaces[spaces!="initial"]
semi.env$available.spaces <- sapply(spaces, featSpaceNameConvert, short2long=T)
}
eraseSpaceList <- function()
{
sapply(1:size(semi.env$spaceList), function(x) tkdelete(semi.env$spaceList, "end"))
}
semi.env$updateSpaceList <- function(reset=F) {
eraseSpaceList()
initAvailableSpaces()
sapply(semi.env$available.spaces, function(s) tkinsert(semi.env$spaceList, "end", s))
ind <- which(semi.env$featSpace==featSpaceNameConvert(semi.env$available.spaces, short2long=F))
if (!length(ind))
ind <- 1
tkselection.set(semi.env$spaceList, ind-1)
}
getSelectedSpace <- function()
{
space <- NULL
selection <- tclvalue((tkcurselection(semi.env$spaceList)))
if (selection!="")
space <- featSpaceNameConvert(semi.env$available.spaces[as.numeric(selection)+1], short2long=F)
space
}
OnSpaceSelection <- function()
{
semi.env$featSpace <- getSelectedSpace()
}
tkbind(semi.env$spaceList, "<ButtonRelease-1>", OnSpaceSelection)
summaryConfig <- function() {
summarytt <- tktoplevel()
tktitle(summarytt) <- "Summaries"
summaryFrame <- tkwidget(summarytt, "labelframe", text = "SUMMARIES", padx = RclusTool.env$param$visu$sizecm, pady = 8, relief = "flat")
summaries <- c("Min", "Max", "Sum", "Average", "SD")
config.env <- new.env()
config.env$summariesList <- summaries %in% names(RclusTool.env$param$analysis$summary.functions)
names(config.env$summariesList) <- summaries
functionsList <- c("min", "max", "sum", "mean", "sd")
names(functionsList) <- summaries
OnMinCheck <- function() {
config.env$summariesList["Min"] <- tclvalue(tcl.min.check)=="1"
}
OnMaxCheck <- function() {
config.env$summariesList["Max"] <- tclvalue(tcl.max.check)=="1"
}
OnSumCheck <- function() {
config.env$summariesList["Sum"] <- tclvalue(tcl.sum.check)=="1"
}
OnMeanCheck <- function() {
config.env$summariesList["Average"] <- tclvalue(tcl.mean.check)=="1"
}
OnStdCheck <- function() {
config.env$summariesList["SD"] <- tclvalue(tcl.std.check)=="1"
}
tcl.min.check <- tclVar(as.character(as.integer(config.env$summariesList["Min"])))
tk.min.check <- tkcheckbutton(summaryFrame, text="", variable=tcl.min.check,
command=OnMinCheck)
tcl.max.check <- tclVar(as.character(as.integer(config.env$summariesList["Max"])))
tk.max.check <- tkcheckbutton(summaryFrame, text="", variable=tcl.max.check,
command=OnMaxCheck)
tcl.sum.check <- tclVar(as.character(as.integer(config.env$summariesList["Sum"])))
tk.sum.check <- tkcheckbutton(summaryFrame, text="", variable=tcl.sum.check,
command=OnSumCheck)
tcl.mean.check <- tclVar(as.character(as.integer(config.env$summariesList["Average"])))
tk.mean.check <- tkcheckbutton(summaryFrame, text="", variable=tcl.mean.check,
command=OnMeanCheck)
tcl.std.check <- tclVar(as.character(as.integer(config.env$summariesList["SD"])))
tk.std.check <- tkcheckbutton(summaryFrame, text="", variable=tcl.std.check,
command=OnStdCheck)
tkgrid(tk2label(summarytt, text = " "), row=1, sticky = "w")
tkgrid(summaryFrame, row = 2, columnspan = 2, sticky = "w")
tkgrid(tk2label(summaryFrame, text="Minimum"), row=2, column=0, sticky="w")
tkgrid(tk.min.check, row=2, column=1, sticky="e")
tkgrid(tk2label(summaryFrame, text="Maximum"), row=3, column=0, sticky="w")
tkgrid(tk.max.check, row=3, column=1, sticky="e")
tkgrid(tk2label(summaryFrame, text="Sum"), row=4, column=0, sticky="w")
tkgrid(tk.sum.check, row=4, column=1, sticky="e")
tkgrid(tk2label(summaryFrame, text="Average"), row=5, column=0, sticky="w")
tkgrid(tk.mean.check, row=5, column=1, sticky="e")
tkgrid(tk2label(summaryFrame, text="Standard deviation"), row=6, column=0, sticky="w")
tkgrid(tk.std.check, row=6, column=1, sticky="e")
onClose <- function() {
RclusTool.env$param$analysis$summary.functions <- functionsList[config.env$summariesList]
tkdestroy(summarytt)
}
butClose <- tk2button(summarytt, text = "Close", width = -6, command = onClose)
tkgrid(butClose, row = 7, columnspan = 2)
tkwait.window(summarytt)
}
OnCompute <- function() {
if (is.null(RclusTool.env$data.sample)){
msg <- paste("Please, load a file.")
tkmessageBox(message=msg)
return()
}
if (is.null(semi.env$pairs.abs))
semi.env$pairs.abs <- list(ML=list(), CNL=list())
is.empty <- function(pairs) {
all(sapply(pairs, length)==0)
}
is.equal <- function(pairs.1, pairs.2) {
if (length(pairs.1) != length(pairs.2))
return(FALSE)
if (length(pairs.1)==0)
return(TRUE)
all(unlist(pairs.1)==unlist(pairs.2))
}
if (sum(sapply(semi.env$pairs.abs, length))==0) {
profile.mode <- "whole sample"
} else profile.mode <- "constrained pairs"
K <- as.integer(tclvalue(semi.env$tcl.K))
if (is.null(K) || (K<0) || (K>length(RclusTool.env$data.sample$id.clean)) || (K>RclusTool.env$param$classif$unsup$K.max)){
tkmessageBox(message=paste("Please enter a valid number of clusters <=", RclusTool.env$param$classif$unsup$K.max))
return()
}
classif.space <- semi.env$featSpace
decomposition.space <- unlist(strsplit(classif.space, split=".", fixed=T))
pca <- ("pca" %in% decomposition.space) || ("pca_full" %in% decomposition.space)
spec <- "spectral" %in% decomposition.space
use.sampling <- tclvalue(semi.env$tcl.sampling.check)=="1"
use.scaling <- "scaled" %in% decomposition.space
pca.nb.dims <- RclusTool.env$data.sample$config$pca.nb.dims
method.select <- tclvalue(semi.env$tcl.method.select)
method.space.name <- paste(method.select,classif.space,sep="_")
sampling.size.max <- RclusTool.env$data.sample$config$sampling.size.max
if (!is.null(RclusTool.env$data.sample$sampling))
sampling.size.max <- RclusTool.env$data.sample$config$sampling.size.max
features.mode <- classif.space
semi.env$label <- NULL
semi.env$pairs.abs <- visualizeSampleClustering(RclusTool.env$data.sample, label=semi.env$label,
clustering.name= method.space.name, profile.mode=profile.mode,
selection.mode = "pairs", pairs=semi.env$pairs.abs, wait.close=TRUE,
RclusTool.env=RclusTool.env, features.mode=features.mode, fontsize=RclusTool.env$param$visu$size)
RclusTool.env$data.sample$clustering[[method.space.name]] <- computeSemiSupervised(data.sample=RclusTool.env$data.sample, ML=semi.env$pairs.abs$ML, CNL=semi.env$pairs.abs$CNL,
K=K, method.name=method.select, use.sampling=use.sampling, sampling.size.max=sampling.size.max,
pca=pca, scaling=use.scaling, pca.nb.dims=pca.nb.dims, spec=spec, RclusTool.env=RclusTool.env)
tkinsert(console, "0.0", paste("----- Constrained clustering -----\n",
"
"
method.select, " computing\n",
"Obtained K: ", length(unique(RclusTool.env$data.sample$clustering[[method.space.name]]$label)), "\n\n", sep = ""))
semi.env$label <- RclusTool.env$data.sample$clustering[[method.space.name]]$label
cluster.summary <- RclusTool.env$data.sample$clustering[[method.space.name]]$summary
if (tclvalue(semi.env$tcl.rename.clusters)=="1") {
new.data.sample <- nameClusters(data.sample = RclusTool.env$data.sample,
method = method.space.name, RclusTool.env=RclusTool.env)
if (!is.null(new.data.sample)){
RclusTool.env$data.sample <- new.data.sample
semi.env$label <- RclusTool.env$data.sample$clustering[[method.space.name]]$label
semi.env$cluster.summary <- RclusTool.env$data.sample$clustering[[method.space.name]]$summary
}
}
if (tclvalue(semi.env$tcl.export.calcul)=="1") {
if (method.select == "Constrained_SC") {
fileSpec <- paste(RclusTool.env$data.sample$name, " constrained spectral ", RclusTool.env$operator.name, " ",
method.space.name, ".rdata", sep="")
saveCalcul(fileSpec, RclusTool.env$data.sample$features$`spectral embedding`, RclusTool.env$data.sample$files$results$rdata)
}
}
tk2delete.notetab(win2.nb)
abdPlotTabs(clusterings=RclusTool.env$data.sample$clustering, win2.nb, RclusTool.env = RclusTool.env, hscale = RclusTool.env$param$visu$hscale)
if (K==0 && !grepl("Constrained_SC_",method.space.name)){
ElbowPlot(win2.nb, method.space.name, RclusTool.env, hscale = RclusTool.env$param$visu$hscale, charsize=RclusTool.env$param$visu$size)
}
new.protos <- visualizeSampleClustering(RclusTool.env$data.sample, label=semi.env$label, clustering.name=method.space.name,
selection.mode = "prototypes", cluster.summary=cluster.summary,
profile.mode="whole sample", wait.close=TRUE, features.mode=features.mode,
RclusTool.env=RclusTool.env, fontsize=RclusTool.env$param$visu$size)
new.protos$label <- new.protos$label[[method.space.name]]$label
RclusTool.env$data.sample$clustering[[method.space.name]]$label <- new.protos$label
semi.env$cluster.summary <- clusterSummary(RclusTool.env$data.sample, new.protos$label,
summary.functions=RclusTool.env$param$analysis$summary.functions)
if (tclvalue(semi.env$tcl.extract.protos)=="1") {
extractProtos(data.sample = RclusTool.env$data.sample, method = method.space.name, K.max=RclusTool.env$param$classif$unsup$K.max, kmeans.variance.min=RclusTool.env$param$classif$unsup$kmeans.variance.min, user.name=RclusTool.env$gui$user.name)
}
RclusTool.env$data.sample <- updateClustersNames(RclusTool.env$data.sample, new.protos$prototypes)
tk2delete.notetab(win2.nb)
abdPlotTabs(clusterings=RclusTool.env$data.sample$clustering, win2.nb, RclusTool.env = RclusTool.env, hscale = RclusTool.env$param$visu$hscale)
if (K==0 && !grepl("Constrained_SC_",method.space.name)){
ElbowPlot(win2.nb, method.space.name, RclusTool.env, hscale = RclusTool.env$param$visu$hscale, charsize=RclusTool.env$param$visu$size)
}
if (tclvalue(semi.env$tcl.export.clustering)=="1") {
fileClust.csv <- paste("clustering ", RclusTool.env$gui$user.name, " ",
method.space.name, ".csv", sep="")
saveClustering(fileClust.csv, new.protos$label, RclusTool.env$data.sample$files$results$clustering)
fileSum.csv <- paste("results ", RclusTool.env$gui$user.name, " ",
method.space.name, ".csv", sep="")
saveSummary(fileSum.csv, semi.env$cluster.summary, RclusTool.env$data.sample$files$results$clustering)
}
if (tclvalue(semi.env$tcl.classif.imgsig)=="1") {
imgClassif(data.sample = RclusTool.env$data.sample,
imgdir = RclusTool.env$data.sample$files$images,
method = method.space.name, user.name=RclusTool.env$gui$user.name)
sigClassif(data.sample = RclusTool.env$data.sample,
method = method.space.name, user.name=RclusTool.env$gui$user.name)
}
if (length(new.protos$prototypes[[method.space.name]]>0)){
saveManualProtos(RclusTool.env$data.sample, new.protos$prototypes)
}
RclusTool.env$data.sample$clustering[[method.space.name]]$label <- semi.env$label
}
tk.K <- tkentry(MethodFrameExpert, textvariable=semi.env$tcl.K, width=2, background = "white")
tkconfigure(tk.K,font = fontFrame)
OutputsFrametext <- StringToTitle("OUTPUTS SELECTION", RclusTool.env$param$visu$sizecm,fontsize=RclusTool.env$param$visu$size)
OutputsFrame <- tkwidget(win1.nb$env$semisup, "labelframe", text = OutputsFrametext, font = fontFrame, padx = RclusTool.env$param$visu$sizecm, pady = 8, relief = "flat")
tk.export.clustering <- tkcheckbutton(OutputsFrame, text="", variable=semi.env$tcl.export.clustering)
tk.classif.imgsig <- tkcheckbutton(OutputsFrame, text="", variable=semi.env$tcl.classif.imgsig)
tk.extract.protos <- tkcheckbutton(OutputsFrame, text="", variable=semi.env$tcl.extract.protos)
tk.rename.clusters <- tkcheckbutton(OutputsFrame, text="", variable=semi.env$tcl.rename.clusters)
butSummary <- tk2button(OutputsFrame, text = "Summary settings", width = 20, command = summaryConfig)
tk.compute.but <- tkbutton(win1.nb$env$semisup, text="COMPUTE", width = 10, command=OnCompute)
sapply(1:length(rb_methods), function(i) tkgrid(rb_methods[[i]], row=i-1, column=0, columnspan=2, padx=RclusTool.env$param$visu$sizecm, sticky="w"))
tkgrid(tk2label(MethodFrameExpert, text=" "))
tkgrid(tk2label(MethodFrameExpert, text="Number of clusters (0=auto)"), row=6, column=0, sticky="w")
tkgrid(tk2label(MethodFrameExpert, text=" "))
tkgrid(tk.K, row=6, column=1, sticky="e")
tkgrid(tk2label(OutputsFrame, text=" "))
tkgrid(tk2label(OutputsFrame, text="Export clustering results"), row=11, column=0, sticky="w")
tkgrid(tk.export.clustering, row=11, column=1, sticky="e")
tkgrid(tk2label(OutputsFrame, text="Classify images/signals (if available)"), row=13, column=0, sticky="w")
tkgrid(tk.classif.imgsig, row=13, column=1, sticky="e")
tkgrid(tk2label(OutputsFrame, text="Extract prototypes automatically"), row=14, column=0, sticky="w")
tkgrid(tk.extract.protos, row=14, column=1, sticky="e")
tkgrid(tk2label(OutputsFrame, text="Rename clusters automatically"), row=15, column=0, sticky="w")
tkgrid(tk.rename.clusters, row=15, column=1, sticky="e")
tkgrid(tk2label(OutputsFrame, text=" "))
tkgrid(butSummary, column = 0)
tkgrid(tk2label(MethodFrameStandard, text="Standard parameters:\n - K estimation: method Elbow\n - Constrained K-means"), row = 0, column = 0)
tkgrid(tk2label(MethodFrameStandard, text=" "), row = 6, column = 0)
tkgrid(tk2label(win1.nb$env$semisup, text=" "), row = 1, column = 1)
tkgrid(MethodFrame, columnspan = 3, row = 2, sticky = "w")
tkgrid(tk2label(MethodFrame, text=" "), row = 1,column = 0)
if (RclusTool.env$gui$user.type=="expert")
{
tkgrid(MethodFrameExpert, row=2, column=0)
tkgrid(SpaceFrame, row = 17, columnspan = 3, sticky = "w")
tkgrid(OutputsFrame, row = 22, columnspan = 3, sticky = "w")
} else {
tkgrid(MethodFrameStandard, row=2, column=0)
}
tkgrid(tk2label(win1.nb$env$semisup, text=" "))
tkgrid(tk.compute.but, column = 0)
tkgrid(tk2label(win1.nb$env$semisup, text=" "))
}
initSemisupTab <- function(mainWindow, console, graphicFrame, RclusTool.env, reset=F)
{
if (is.null(RclusTool.env$gui$tabs.env$semisup) || !length(RclusTool.env$gui$tabs.env$semisup))
{
RclusTool.env$gui$tabs.env$semisup <- new.env()
buildSemisupTab(mainWindow, console, graphicFrame, RclusTool.env)
reset <- T
}
semi.env <- RclusTool.env$gui$tabs.env$semisup
if (reset)
{
tclvalue(semi.env$tcl.method.select) <- "Constrained_KM"
tclvalue(semi.env$tcl.K) <- "0"
tclvalue(semi.env$tcl.export.clustering) <- "1"
tclvalue(semi.env$tcl.export.calcul) <- "0"
tclvalue(semi.env$tcl.classif.imgsig) <- "0"
tclvalue(semi.env$tcl.extract.protos) <- "0"
tclvalue(semi.env$tcl.rename.clusters) <- "0"
tclvalue(semi.env$tcl.sampling.check) <- "0"
semi.env$available.spaces <- NULL
semi.env$featSpace <- RclusTool.env$data.sample$config$default.classif.feature.space
semi.env$scaling <- RclusTool.env$data.sample$config$scaling
semi.env$label <- NULL
semi.env$pairs.abs <- NULL
semi.env$onMethodDescription()
}
if (RclusTool.env$gui$user.type=="expert")
{
semi.env$updateSpaceList()
semi.env$initSamplingCheck()
} else {
semi.env$featSpace <- 'preprocessed'
}
}
|
mcmc_aperm <- function(x, perm, ...) {
UseMethod("mcmc_aperm")
}
mcmc_aperm.mcmcarray <- function(x, perm = NULL, ...) {
if (!is.null(perm)) {
chk_whole_numeric(perm)
chk_range(perm, c(1, npdims(x)))
chk_unique(perm)
}
chk_unused(...)
perm_all <- 1:npdims(x)
perm <- c(perm, if (!is.null(perm)) setdiff(perm_all, perm) else rev(perm_all))
perm <- c(1L, 2L, perm + 2L)
x <- aperm(x, perm = perm)
set_class(x, "mcmcarray")
}
mcmc_aperm.mcmc <- function(x, perm = NULL, ...) {
chk_unused(...)
x <- as.mcmcr(x)
x <- mcmc_aperm(x, perm = perm)
as.mcmc(x)
}
mcmc_aperm.mcmc.list <- function(x, perm = NULL, ...) {
chk_unused(...)
x <- lapply(x, mcmc_aperm, perm = perm)
set_class(x, "mcmc.list")
}
mcmc_aperm.mcmcr <- function(x, perm = NULL, ...) {
chk_unused(...)
x <- lapply(x, mcmc_aperm, perm = perm)
set_class(x, "mcmcr")
}
mcmc_aperm.mcmcrs <- function(x, perm = NULL, ...) {
chk_unused(...)
x <- lapply(x, mcmc_aperm, perm = perm)
set_class(x, "mcmcrs")
}
|
context("Test helical wheel drawing")
test_that("helical wheel produces valid ggplot object", {
temp.seq <- "AAAAAAAAAAAAAAAAAA"
temp.wheel <- draw_wheel(temp.seq)
expect_true("ggplot" %in% class(temp.wheel))
temp.seq <- "ADKS"
temp.wheel <- draw_wheel(temp.seq)
expect_true("ggplot" %in% class(temp.wheel))
temp.wheel <- draw_wheel(temp.seq, labels = TRUE)
expect_true("ggplot" %in% class(temp.wheel))
temp.wheel <- draw_wheel(temp.seq, legend = TRUE)
expect_true("ggplot" %in% class(temp.wheel))
})
|
library(tidyverse)
library(grid)
library(gridExtra)
library(gridtext)
source("include.R")
theme_set(theme_minimal() + theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.line = element_line(color = GRAY9),
axis.title.y = element_text(color = GRAY5, hjust = 1, size = 14),
axis.title.x = element_text(color = GRAY5, hjust = 0),
axis.ticks = element_line(color = GRAY9),
axis.text = element_text(color = GRAY6, size = 12),
plot.margin = unit(c(1,1,1,1), "cm"),
plot.title = element_text(size = 18, color = GRAY2, margin = margin(0,0,.5,0,"cm")),
plot.caption = element_text(color = GRAY8, hjust = 0, size = 10, margin = margin(.5,0,0,0,"cm"))
))
df <- read_csv(file.path("data","FIG0602.csv")) %>% mutate(sales = as.numeric(str_remove(sales,"\\$")))
grob_actual <- grobTree(richtext_grob("ACTUAL", x=.3, y=-.11, hjust=0, gp=gpar(col = GRAY6, fontsize=13)))
grob_forecast <- grobTree(richtext_grob("FORECAST", x=.78, y=-.11, hjust=0, gp=gpar(col = GRAY6, fontsize=13)))
grob_2006 <- grobTree(richtext_grob(
sprintf("<b style='color:%s'>2006-09</b> :<br>annual sale<br>growth of<br>7-8%%",BLUE2),
x=.02,y=.99, hjust=0, gp=gpar(col = GRAY6, fontsize=13), vjust = 1))
grob_2010 <- grobTree(richtext_grob(
sprintf("<b style='color:%s'>2010</b> :<br>more marked<br>increase<br>of 22%%<br>sales year<br>over year,<br>driven by",BLUE2),
x=.18,y=.99, hjust=0, gp=gpar(col = GRAY6, fontsize=13), vjust = 1))
grob_2011 <- grobTree(richtext_grob(
sprintf("<b style='color:%s'>2011-14</b> :<br>another period<br>of steady<br>frowth of<br>8-9%% annually",BLUE2),
x=.36,y=.99, hjust=0, gp=gpar(col = GRAY6, fontsize=13), vjust = 1))
grob_2015 <- grobTree(richtext_grob(
sprintf("<b style='color:%s'>2015 & beyond</b> : assumed<br>10%% year over year<br>increase in sales*",BLUE2),
x=.56,y=.99, hjust=0, gp=gpar(col = GRAY6, fontsize=13), vjust = 1))
pt <- ggplot(df, aes(x = year, y = sales)) +
geom_line(data = df %>% filter(category == "FORECAST" | (category == "ACTUAL" & year == 2014)), size = 1, color = BLUE2, linetype = 8 ) +
geom_point(data = df %>% filter(category == "FORECAST"), size = 3, color = BLUE2, shape = 16) +
geom_line(data = df %>% filter(category == "ACTUAL"), color = BLUE2, size = 2) +
geom_point(data = df %>% filter(category == "ACTUAL"), size = 3, fill = "white", color = BLUE2, stroke = 2, shape = 21) +
geom_text(data = df %>% filter(category == "FORECAST" | (category == "ACTUAL" & year == 2014)),
aes(label = scales::number_format(prefix = "$")(sales)), color = BLUE2, nudge_y = 9) +
scale_x_continuous(breaks = seq(2006,2018,1)) +
scale_y_continuous(breaks = seq(0,180,20), labels = scales::number_format(prefix = "$"), limits = c(0,180), expand = c(0,0)) +
labs(y = "Sales ($Billion)", x = "",
title = "Sales over time",
caption = "Data source: Sales Dashboard; annual figures are as of 12/31 of hte given year.\n*Use this footnote to explain what is driving the 10% annual growth forecast assumption") +
coord_cartesian(clip = "off") +
geom_rect(xmin = 2014.5, xmax = 2018.5, ymin = -28, ymax = 0, alpha = 0.01) +
annotation_custom(grob_actual) +
annotation_custom(grob_forecast) +
annotation_custom(grob_2006) +
annotation_custom(grob_2010) +
annotation_custom(grob_2011) +
annotation_custom(grob_2015)
height <- 5.5
width <- 8
dev.new(width = width, height = height, units = "in", noRStudioGD = T)
pt
ggsave(file.path("plot output", "FIG0602.png"), pt, width = width, height = height)
|
read_slf_header <- function(fname) {
stopifnot(is.character(fname) && length(fname) == 1)
check_file(fname, "slf")
stopifnot(file.exists(fname))
con <- file(fname, "rb")
on.exit(close(con), add = TRUE)
waste <- readBin(con, integer(), 1, size = 4, endian = "big")
title <- stringr::str_trim(readChar(con, 72))
precision <- stringr::str_trim(readChar(con, 8))
waste <- readBin(con, integer(), 1, size = 4, endian = "big")
fsize <- dplyr::case_when(
stringr::str_to_upper(precision) %in% c("SERAFIN", "SELAFIN") ~ 4L,
stringr::str_to_upper(precision) %in% c("SERAFIND", "SELAFIND") ~ 8L,
stringr::str_to_upper(precision) == "" ~ 4L,
TRUE ~ NA_integer_
)
if (is.na(fsize)) stop(paste0("Could not infer precision, unknown specification: '", precision, "'."))
waste <- readBin(con, integer(), 1, size = 4, endian = "big")
nbv1 <- readBin(con, integer(), 1, size = 4, endian = "big")
waste <- readBin(con, integer(), 1, size = 4, endian = "big")
waste <- readBin(con, integer(), 1, size = 4, endian = "big")
varnames <- character(nbv1)
varunits <- character(nbv1)
for (i in seq_len(nbv1)) {
waste <- readBin(con, integer(), 1, size = 4, endian = "big")
varnames[i] <- stringr::str_trim(readChar(con, 16))
varunits[i] <- stringr::str_trim(readChar(con, 16))
waste <- readBin(con, integer(), 1, size = 4, endian = "big")
}
waste <- readBin(con, integer(), 1, size = 4, endian = "big")
iparam <- readBin(con, integer(), 10, size = 4, endian = "big")
waste <- readBin(con, integer(), 1, size = 4, endian = "big")
if (iparam[10] == 1) {
waste <- readBin(con, integer(), 1, size = 4, endian = "big")
date <- lubridate::ymd_hms(paste(readBin(con, integer(), 6, size = 4, endian = "big"), collapse = "-"), tz = "UTC")
waste <- readBin(con, integer(), 1, size = 4, endian = "big")
}
waste <- readBin(con, integer(), 1, size = 4, endian = "big")
nelem <- readBin(con, integer(), 1, size = 4, endian = "big")
npoin <- readBin(con, integer(), 1, size = 4, endian = "big")
ndp <- readBin(con, integer(), 1, size = 4, endian = "big")
waste <- readBin(con, integer(), 1, size = 4, endian = "big")
waste <- readBin(con, integer(), 1, size = 4, endian = "big")
waste <- readBin(con, integer(), 1, size = 4, endian = "big")
if (waste / 4 != nelem*ndp)
stop("Dimensions of connectivity table IKLE do not match!")
ikle <- readBin(con, integer(), nelem*ndp, size = 4, endian = "big")
ikle <- matrix(ikle, nrow = nelem, ncol = ndp, byrow = TRUE)
waste <- readBin(con, integer(), 1, size = 4, endian = "big")
waste <- readBin(con, integer(), 1, size = 4, endian = "big")
if (waste / 4 != npoin)
stop("Dimensions of boundary table IPOBO do not match!")
ipobo <- readBin(con, integer(), npoin, size = 4, endian = "big")
waste <- readBin(con, integer(), 1, size = 4, endian = "big")
waste <- readBin(con, integer(), 1, size = 4, endian = "big")
xvals <- readBin(con, double(), npoin, size = fsize, endian = "big")
waste <- readBin(con, integer(), 1, size = 4, endian = "big")
waste <- readBin(con, integer(), 1, size = 4, endian = "big")
yvals <- readBin(con, double(), npoin, size = fsize, endian = "big")
waste <- readBin(con, integer(), 1, size = 4, endian = "big")
nbts <- 4 + fsize + 4 + (npoin * fsize + 8) * nbv1
seek_head <- seek(con)
seek(con, 0, origin = "end")
seek_end <- seek(con)
ntimes <- (seek_end - seek_head) / nbts
return(list(title = title,
precision = fsize,
nbv1 = nbv1,
varnames = varnames,
varunits = varunits,
date = date,
ntimes = ntimes,
nelem = nelem,
npoin = npoin,
ndp = ndp,
ikle = ikle,
ipobo = ipobo,
x = xvals,
y = yvals,
seek_head = seek_head))
}
read_slf_variable <- function(fname, seek_start, vars, nv, fsize, npoin, times = NULL) {
stopifnot(is.character(fname) && length(fname) == 1)
check_file(fname, "slf")
stopifnot(file.exists(fname))
stopifnot(all(sapply(c(seek_start, vars, nv, fsize, npoin), is.numeric)))
nvars <- length(vars)
con <- file(fname, "rb")
on.exit(close(con), add = TRUE)
seek(con, seek_start)
timestep <- NULL
values_t <- matrix(NA_real_, nrow = npoin, ncol = nvars)
values <- array(values_t, dim = c(dim(values_t), 1))
nbts <- 4 + (npoin * fsize + 8) * nv
while (TRUE) {
waste <- readBin(con, integer(), 1, size = 4, endian = "big")
if (length(waste) == 0) break
time_t <- readBin(con, double(), 1, size = fsize, endian = "big")
if (!is.null(times) && !(time_t %in% times)) {
seek(con, nbts, origin = "current")
next
}
timestep <- c(timestep, time_t)
waste <- readBin(con, integer(), 1, size = 4, endian = "big")
seek_pos <- seek(con)
for (i in seq_len(nvars)) {
seek(con, seek_pos + (npoin * fsize + 8) * (vars[i] - 1))
waste <- readBin(con, integer(), 1, size = 4, endian = "big")
values_t[,i] <- readBin(con, double(), npoin, size = fsize, endian = "big")
waste <- readBin(con, integer(), 1, size = 4, endian = "big")
}
seek(con, seek_pos + (npoin * fsize + 8) * nv)
values <- array(c(values, values_t), dim = c(dim(values)[1:2], dim(values)[3] + 1))
}
if (!is.null(times) && !all(times %in% timestep))
stop("There are specified timesteps that cannot be found in the dataset!", call. = F)
return(list(time = timestep,
values = values[,,-1, drop = F]))
}
write_slf_header <- function(fname, header) {
stopifnot(is.character(fname) && length(fname) == 1)
stopifnot(inherits(header, "list"))
stopifnot(all(c("title", "precision", "nbv1", "varnames", "varunits",
"nelem", "npoin", "ndp", "ikle", "ipobo", "x", "y",
"date") %in% names(header)))
check_file(fname, "slf")
fs::dir_create(dirname(fname))
con <- file(fname, "wb")
on.exit(close(con), add = TRUE)
prec_char <- dplyr::case_when(
header$precision == 4L ~ "SELAFIN",
header$precision == 8L ~ "SELAFIND",
TRUE ~ NA_character_
)
if (is.na(prec_char)) stop("Precision value not supported (must be 4 or 8)!", call. = F)
writeBin(80L, con, size = 4, endian = "big")
writeChar(stringr::str_pad(header$title, 72, "right"), con, eos = NULL)
writeChar(stringr::str_pad(prec_char, 8, "right"), con, eos = NULL)
writeBin(80L, con, size = 4, endian = "big")
writeBin(8L, con, size = 4, endian = "big")
writeBin(header$nbv1, con, 1, size = 4, endian = "big")
writeBin(0L, con, 1, size = 4, endian = "big")
writeBin(8L, con, size = 4, endian = "big")
for (i in seq_len(header$nbv1)) {
writeBin(32L, con, size = 4, endian = "big")
writeChar(stringr::str_pad(header$varnames[i], 16, "right"), con, eos = NULL)
writeChar(stringr::str_pad(header$varunits[i], 16, "right"), con, eos = NULL)
writeBin(32L, con, size = 4, endian = "big")
}
writeBin(40L, con, size = 4, endian = "big")
writeBin(as.integer(c(1, rep(0, 8), 1)), con, size = 4, endian = "big")
writeBin(40L, con, size = 4, endian = "big")
writeBin(24L, con, size = 4, endian = "big")
writeBin(as.integer(unlist(stringr::str_split(format(header$date, "%Y %m %d %H %M %S"), " "))),
con, size = 4, endian = "big")
writeBin(24L, con, size = 4, endian = "big")
writeBin(16L, con, size = 4, endian = "big")
writeBin(header$nelem, con, size = 4, endian = "big")
writeBin(header$npoin, con, size = 4, endian = "big")
writeBin(header$ndp, con, size = 4, endian = "big")
writeBin(1L, con, size = 4, endian = "big")
writeBin(16L, con, size = 4, endian = "big")
writeBin(4L*header$nelem*header$ndp, con, size = 4, endian = "big")
writeBin(as.integer(t(header$ikle)), con, size = 4, endian = "big")
writeBin(4L*header$nelem*header$ndp, con, size = 4, endian = "big")
writeBin(4L*header$npoin, con, size = 4, endian = "big")
writeBin(as.integer(header$ipobo), con, size = 4, endian = "big")
writeBin(4L*header$npoin, con, size = 4, endian = "big")
writeBin(header$precision*header$npoin, con, size = 4, endian = "big")
writeBin(header$x, con, size = header$precision, endian = "big")
writeBin(header$precision*header$npoin, con, size = 4, endian = "big")
writeBin(header$precision*header$npoin, con, size = 4, endian = "big")
writeBin(header$y, con, size = header$precision, endian = "big")
writeBin(header$precision*header$npoin, con, size = 4, endian = "big")
}
write_slf_variable <- function(fname, data) {
stopifnot(is.character(fname) && length(fname) == 1)
check_file(fname, "slf")
stopifnot(file.exists(fname))
stopifnot(inherits(data, "list"))
stopifnot(all(c("time", "values", "precision") %in% names(data)))
if (length(dim(data$values)) != 3 || dim(data$values)[3] != length(data$time))
stop("Element 'value' in input 'data' must be of dimensions (npoin x length(nv) x length(time))!", call. = F)
con <- file(fname, "ab")
on.exit(close(con), add = TRUE)
for (j in seq_along(data$time)) {
writeBin(data$precision, con, size = 4, endian = "big")
writeBin(data$time[j], con, size = data$precision, endian = "big")
writeBin(data$precision, con, size = 4, endian = "big")
for (i in seq_len(dim(data$values)[2])) {
writeBin(data$precision*dim(data$values)[1], con, size = 4, endian = "big")
writeBin(data$values[,i,j], con, size = data$precision, endian = "big")
writeBin(data$precision*dim(data$values)[1], con, size = 4, endian = "big")
}
}
}
|
`PLL` <-
function(object, newdata, newtime, newstatus, ...){
UseMethod("PLL", object)
}
|
polyline.svg <- function(points = NULL,
fill,
stroke,
stroke.width,
stroke.opacity,
style.sheet = NULL) {
if (is.null(points)) {
stop("[ERROR] Basic line elements are required (points)!")
} else {
if (!is.matrix(points)) {
points <- as.matrix(points)
}
}
if (!is.null(style.sheet)) {
style.sheet.ele <- paste(style.sheet, collapse = ";")
} else {
style.sheet.ele <- ""
}
if (!missing(fill)) {
style.sheet.ele <- paste0(style.sheet.ele, "fill:", fill, ";")
}
if (!missing(stroke)) {
style.sheet.ele <- paste0(style.sheet.ele, "stroke:", stroke, ";")
}
if (!missing(stroke.width)) {
style.sheet.ele <- paste0(style.sheet.ele, "stroke-width:", stroke.width, ";")
}
if (!missing(stroke.opacity)) {
style.sheet.ele <- paste0(style.sheet.ele, "stroke-opacity:", stroke.opacity, ";")
}
if (style.sheet.ele != "") {
style.sheet.ele <- paste0('style="', style.sheet.ele, '"')
}
points.ele <- lapply(1:nrow(points), function(x) { paste(points[x, 1], points[x, 2], sep = " ") })
points.ele <- paste(points.ele, collapse = " , ")
polyline.svg.ele <- sprintf('<polyline points="%s" %s />', points.ele, style.sheet.ele)
return(polyline.svg.ele)
}
|
source("ESEUR_config.r")
library("plyr")
library("reshape2")
bench_path=paste0(ESEUR_dir, "benchmark/hyojun/")
merge_benchmark=function(file_str)
{
t=read.csv(paste0(bench_path, file_str), as.is=TRUE)
t=melt(t, c("Vendor", "Benchmark"), value.name="Perf")
t$variable=NULL
return(t)
}
equalise_scale=function(df)
{
df$eq_Perf=df$Perf/max(df$Perf)
return(df)
}
bfiles=list.files(path=bench_path, pattern="*.csv.xz")
all_bench=adply(bfiles, 1, merge_benchmark)
all_bench=ddply(all_bench, .(Benchmark), equalise_scale)
fv_mod=glm(eq_Perf ~ Vendor, data=all_bench)
summary(fv_mod)
|
library(testthat)
library(mockery)
set.seed(123)
data <- data.frame(
x = 1:20,
f = rep(c("a", "b"), each = 10)
)
test_that("it adds a column of NA's if no target is provided", {
o <- ptd_add_target_column(data, NULL)
expect_equal(o$target, rep(as.double(NA), 20))
})
test_that("it correctly sets target column when a numeric vector is provided", {
o <- ptd_add_target_column(data, 0.9)
expect_equal(o$target, rep(0.9, 20))
})
test_that("it correctly sets target column when a list is provided", {
o <- ptd_add_target_column(data, list("a" = 0.9, "b" = 0.8))
expect_equal(o$target, rep(c(0.9, 0.8), each = 10))
})
|
Z_unpooled_test_2x2 <- function(n, printresults=TRUE) {
n1p <- n[1, 1] + n[1, 2]
n2p <- n[2, 1] + n[2, 2]
Z <- (n[1, 1] / n1p - n[2, 1] / n2p) / sqrt(n[1, 1] * n[1, 2] / n1p ^ 3 + n[2, 1] * n[2, 2] / n2p ^ 3)
P <- 2 * (1 - pnorm(abs(Z), 0, 1))
if (is.na(P)) {
P <- 1.0
}
if (printresults) {
print(sprintf('The Z-unpooled test: P = %7.5f, Z = %6.3f', P, Z), quote=FALSE)
}
res <- data.frame(p.value=P, statistic=Z)
invisible(res)
}
|
CNOT5_10 <- function(a){
cnot5_10=TensorProd(CNOT4_10(diag(16)),diag(2))
result = cnot5_10 %*% a
result
}
|
rbind_list <- function(x) do.call(rbind,x)
getSummary.mblogit <- function(obj,
alpha=.05,
...){
smry <- summary(obj)
N <- obj$N
coef <- smry$coefficients
lower.cf <- qnorm(p=alpha/2,mean=coef[,1],sd=coef[,2])
upper.cf <- qnorm(p=1-alpha/2,mean=coef[,1],sd=coef[,2])
coef <- cbind(coef,lower.cf,upper.cf)
ttl <- c("est","se","stat","p","lwr","upr")
colnames(coef) <- ttl
modcat <- colnames(obj$D)
basecat <- rownames(obj$D)[rownames(obj$D)%nin%modcat]
eqs <- paste0(modcat,"~")
rn.coef <- rownames(coef)
coef.grps <- lapply(eqs,function(eq){
ii <- grep(eq,rn.coef,fixed=TRUE)
coef.grp <- coef[ii,,drop=FALSE]
rownames(coef.grp) <- gsub(eq,"",rownames(coef.grp),fixed=TRUE)
coef.grp
})
if(getOption("mblogit.show.basecat",TRUE))
grp.titles <- paste(modcat,basecat,sep=getOption("mblogit.basecat.sep","/"))
else
grp.titles <- modcat
names(coef.grps) <- grp.titles
coef <- do.call(memisc::collect,coef.grps)
VarPar <- NULL
VarCov <- smry$VarCov
se_VarCov <- smry$se_VarCov
n.eq <- length(eqs)
for(i in seq_along(VarCov)){
lv.i <- names(VarCov)[i]
vc.i <- VarCov[[i]]
se_vc.i <- se_VarCov[[i]]
vp.i <- array(NA,c(
nrow(vc.i),
ncol(vc.i),
6
))
vp.i[,,1] <- vc.i
vp.i[,,2] <- se_vc.i
m.i <- ncol(vc.i) %/% n.eq
d <- c(n.eq,m.i)
dim(vp.i) <- c(d,d,6)
vn.i <- colnames(vc.i)
vn.i <- strsplit(vn.i,"~")
vn.i <- unique(sapply(vn.i,"[",2))
dn <- list(eqs,vn.i)
dimnames(vp.i) <- c(dn,dn,list(ttl))
vp.i.arr <- aperm(vp.i,c(4,2,3,1,5))
vp.i_ <- matrix(list(NULL),n.eq,n.eq)
for(j in 1:n.eq){
for(k in 1:n.eq){
vp.ijk <- vp.i.arr[,,j,k,]
dim(vp.ijk) <- c(m.i^2,6)
rn.i.1 <- rep(vn.i,m.i)
rn.i.2 <- rep(vn.i,each=m.i)
jk.1 <- rep(1:m.i,m.i)
jk.2 <- rep(1:m.i,each=m.i)
rownames(vp.ijk) <- paste0("VCov(~",rn.i.1,",","~",rn.i.2,")")
rownames(vp.ijk)[1] <- paste0(grp.titles[j],": ",rownames(vp.ijk)[1])
rownames(vp.ijk) <- format(rownames(vp.ijk),justify="right")
colnames(vp.ijk) <- ttl
ii <- c(which(jk.1==jk.2),which(jk.1 < jk.2))
ii <- which(jk.1<=jk.2)
vp.ijk <- vp.ijk[ii,,drop=FALSE]
vp.i_[[j,k]] <- vp.ijk
}
}
vp.i_ <- lapply(1:n.eq,function(j)do.call(rbind,vp.i_[,j]))
vp.i <- list()
vp.i <- array(NA,c(dim(vp.i_[[1]]),n.eq),dimnames=c(dimnames(vp.i_[[1]]),list(NULL)))
for(j in 1:n.eq)
vp.i[,,j] <- vp.i_[[j]]
VarPar <- c(VarPar,structure(list(vp.i),names=lv.i))
}
phi <- smry$phi
LR <- smry$null.deviance - smry$deviance
df <- obj$model.df
deviance <- deviance(obj)
if(df > 0){
p <- pchisq(LR,df,lower.tail=FALSE)
L0.pwr <- exp(-smry$null.deviance/N)
LM.pwr <- exp(-smry$deviance/N)
McFadden <- 1- smry$deviance/smry$null.deviance
Cox.Snell <- 1 - exp(-LR/N)
Nagelkerke <- Cox.Snell/(1-L0.pwr)
}
else {
LR <- NA
df <- NA
p <- NA
McFadden <- NA
Cox.Snell <- NA
Nagelkerke <- NA
}
ll <- obj$ll
AIC <- AIC(obj)
BIC <- AIC(obj,k=log(N))
sumstat <- c(
phi = phi,
LR = LR,
df = df,
logLik = ll,
deviance = deviance,
McFadden = McFadden,
Cox.Snell = Cox.Snell,
Nagelkerke = Nagelkerke,
AIC = AIC,
BIC = BIC,
N = N
)
ans <- list(coef= coef)
ans <- c(ans,VarPar)
if(length(smry$ngrps)){
G <-as.integer(smry$ngrps)
names(G) <- names(smry$ngrps)
names(G) <- paste("Groups by",names(G))
ans <- c(ans,list(Groups=G))
}
c(ans,
list(sumstat=sumstat,
call=obj$call,
contrasts = obj$contrasts,
xlevels = obj$xlevels))
}
getSummary.mmblogit <- getSummary.mblogit
|
library("deSolve")
options(prompt = "R> ")
options(width=70)
model <- function(t, Y, parameters) {
with (as.list(parameters),{
dy1 = -k1*Y[1] + k2*Y[2]*Y[3]
dy3 = k3*Y[2]*Y[2]
dy2 = -dy1 - dy3
list(c(dy1, dy2, dy3))
})
}
jac <- function (t, Y, parameters) {
with (as.list(parameters),{
PD[1,1] <- -k1
PD[1,2] <- k2*Y[3]
PD[1,3] <- k2*Y[2]
PD[2,1] <- k1
PD[2,3] <- -PD[1,3]
PD[3,2] <- k3*Y[2]
PD[2,2] <- -PD[1,2] - PD[3,2]
return(PD)
})
}
parms <- c(k1 = 0.04, k2 = 1e4, k3=3e7)
Y <- c(1.0, 0.0, 0.0)
times <- c(0, 0.4*10^(0:11))
PD <- matrix(nrow = 3, ncol = 3, data = 0)
out <- ode(Y, times, model, parms = parms, jacfunc = jac)
caraxisfun <- function(t, y, parms) {
with(as.list(c(y, parms)), {
yb <- r * sin(w * t)
xb <- sqrt(L * L - yb * yb)
Ll <- sqrt(xl^2 + yl^2)
Lr <- sqrt((xr - xb)^2 + (yr - yb)^2)
dxl <- ul; dyl <- vl; dxr <- ur; dyr <- vr
dul <- (L0-Ll) * xl/Ll + 2 * lam2 * (xl-xr) + lam1*xb
dvl <- (L0-Ll) * yl/Ll + 2 * lam2 * (yl-yr) + lam1*yb - k * g
dur <- (L0-Lr) * (xr-xb)/Lr - 2 * lam2 * (xl-xr)
dvr <- (L0-Lr) * (yr-yb)/Lr - 2 * lam2 * (yl-yr) - k * g
c1 <- xb * xl + yb * yl
c2 <- (xl - xr)^2 + (yl - yr)^2 - L * L
list(c(dxl, dyl, dxr, dyr, dul, dvl, dur, dvr, c1, c2))
})
}
eps <- 0.01; M <- 10; k <- M * eps^2/2;
L <- 1; L0 <- 0.5; r <- 0.1; w <- 10; g <- 1
parameter <- c(eps = eps, M = M, k = k, L = L, L0 = L0,
r = r, w = w, g = g)
yini <- c(xl = 0, yl = L0, xr = L, yr = L0, ul = -L0/L, vl = 0,
ur = -L0/L, vr = 0, lam1 = 0, lam2 = 0)
Mass <- diag(nrow = 10, 1)
Mass[5,5] <- Mass[6,6] <- Mass[7,7] <- Mass[8,8] <- M * eps * eps/2
Mass[9,9] <- Mass[10,10] <- 0
Mass
index <- c(4, 4, 2)
times <- seq(0, 3, by = 0.01)
out <- radau(y = yini, mass = Mass, times = times, func = caraxisfun,
parms = parameter, nind = index)
plot(out, which = 1:4, type = "l", lwd = 2)
plot(out, which = 1:4, type = "l", lwd = 2)
f1 <- function (t, y, parms) {
ydot <- vector(len = 5)
ydot[1] <- 0.1*y[1] -0.2*y[2]
ydot[2] <- -0.3*y[1] +0.1*y[2] -0.2*y[3]
ydot[3] <- -0.3*y[2] +0.1*y[3] -0.2*y[4]
ydot[4] <- -0.3*y[3] +0.1*y[4] -0.2*y[5]
ydot[5] <- -0.3*y[4] +0.1*y[5]
return(list(ydot))
}
bandjac <- function (t, y, parms) {
jac <- matrix(nrow = 3, ncol = 5, byrow = TRUE,
data = c( 0 , -0.2, -0.2, -0.2, -0.2,
0.1, 0.1, 0.1, 0.1, 0.1,
-0.3, -0.3, -0.3, -0.3, 0))
return(jac)
}
yini <- 1:5
times <- 1:20
out <- lsode(yini, times, f1, parms = 0, jactype = "bandusr",
jacfunc = bandjac, bandup = 1, banddown = 1)
Parms <- c(0.182, 4.0, 4.0, 0.08, 0.04, 0.74, 0.05, 0.15, 0.32,
16.17, 281.48, 13.3, 16.17, 5.487, 153.8, 0.04321671,
0.4027255, 1000, 0.02, 1.0, 3.8)
yini <- c( AI=21, AAM=0, AT=0, AF=0, AL=0, CLT=0, AM=0 )
DLLfunc(y = yini, dllname = "deSolve", func = "derivsccl4",
initfunc = "initccl4", parms = Parms, times = 1,
nout = 3, outnames = c("DOSE", "MASS", "CP") )
pars <- c(K = 1, ka = 1e6, r = 1)
y <- c(A = 2, B = 3, D = 2*3/pars["K"])
dy <- c(dA = 0, dB = 0, dD = 0)
prod <- matrix(nc=2,data=c(seq(0,100,by=10),seq(0.1,0.5,len=11)))
DLLres(y=y,dy=dy,times=5,res="chemres",
dllname="deSolve", initfunc="initparms",
initforc="initforcs", parms=pars, forcings=prod,
nout=2, outnames=c("CONC","Prod"))
Flux <- matrix(ncol=2,byrow=TRUE,data=c(
1, 0.654, 11, 0.167, 21, 0.060, 41, 0.070, 73,0.277, 83,0.186,
93,0.140,103, 0.255, 113, 0.231,123, 0.309,133,1.127,143,1.923,
153,1.091,163,1.001, 173, 1.691,183, 1.404,194,1.226,204,0.767,
214, 0.893,224,0.737, 234,0.772,244, 0.726,254,0.624,264,0.439,
274,0.168,284 ,0.280, 294,0.202,304, 0.193,315,0.286,325,0.599,
335, 1.889,345, 0.996,355,0.681,365,1.135))
head(Flux)
parms <- 0.01
meanDepo <- mean(approx(Flux[,1],Flux[,2], xout=seq(1,365,by=1))$y)
Yini <- c(y=meanDepo/parms)
times <- 1:365
out <- ode(y=Yini, times, func = "scocder",
parms = parms, dllname = "deSolve",
initforc="scocforc", forcings=Flux,
initfunc = "scocpar", nout = 2,
outnames = c("Mineralisation","Depo"))
head(out)
fcontrol <- list(method="constant")
out2 <- ode(y=Yini, times, func = "scocder",
parms = parms, dllname = "deSolve",
initforc="scocforc", forcings=Flux, fcontrol=fcontrol,
initfunc = "scocpar", nout = 2,
outnames = c("Mineralisation","Depo"))
par (mfrow=c(1,2))
plot(out, which = "Depo", col="red",
xlab="days", ylab="mmol C/m2/ d", main="method='linear'")
lines(out[,"time"], out[,"Mineralisation"], lwd=2, col="blue")
legend("topleft",lwd=1:2,col=c("red","blue"), c("Flux","Mineralisation"))
plot(out, which = "Depo", col="red",
xlab="days", ylab="mmol C/m2/ d", main="method='constant'")
lines(out2[,"time"], out2[,"Mineralisation"], lwd=2, col="blue")
par (mfrow=c(1,2))
plot(out, which = "Depo", col="red",
xlab="days", ylab="mmol C/m2/ d", main="method='linear'")
lines(out[,"time"], out[,"Mineralisation"], lwd=2, col="blue")
legend("topleft",lwd=1:2,col=c("red","blue"), c("Flux","Mineralisation"))
plot(out, which = "Depo", col="red",
xlab="days", ylab="mmol C/m2/ d", main="method='constant'")
lines(out2[,"time"], out2[,"Mineralisation"], lwd=2, col="blue")
SPCmod <- function(t, x, parms, input) {
with(as.list(c(parms, x)), {
import <- input(t)
dS <- import - b*S*P + g*C
dP <- c*S*P - d*C*P
dC <- e*P*C - f*C
res <- c(dS, dP, dC)
list(res, signal = import)
})
}
parms <- c(b = 0.1, c = 0.1, d = 0.1, e = 0.1, f = 0.1, g = 0.0)
times <- seq(0, 100, by=0.1)
signal <- as.data.frame(list(times = times,
import = rep(0, length(times))))
signal$import <- ifelse((trunc(signal$times) %% 2 == 0), 0, 1)
sigimp <- approxfun(signal$times, signal$import, rule = 2)
xstart <- c(S = 1, P = 1, C = 1)
print (system.time(
out <- ode(y = xstart,times = times,
func = SPCmod, parms, input = sigimp)
))
plot(out)
plot(out)
eventdata <- data.frame(var=rep("C",10),time=seq(10,100,10),value=rep(0.5,10),
method=rep("multiply",10))
eventdata
derivs <- function(t, y, parms) {
with(as.list(c(y, parms)), {
if (t < tau)
ytau <- c(1, 1)
else
ytau <- lagvalue(t - tau, c(1, 2))
dN <- f * N - g * N * P
dP <- e * g * ytau[1] * ytau[2] - m * P
list(c(dN, dP), tau=ytau[1], tau=ytau[2])
})
}
yinit <- c(N = 1, P = 1)
times <- seq(0, 500)
parms <- c(f = 0.1, g = 0.2, e = 0.1, m = 0.1, tau = .2)
yout <- dede(y = yinit, times = times, func = derivs, parms = parms)
head(yout)
|
weightedMedian <- function(x, w = NULL, idxs = NULL, na.rm = FALSE,
interpolate = is.null(ties), ties = NULL, ...) {
if (is.null(w)) {
w <- rep(1, times = length(x))
} else {
w <- as.double(w)
}
na.rm <- as.logical(na.rm)
if (is.na(na.rm)) na.rm <- FALSE
interpolate <- as.logical(interpolate)
if (is.null(ties)) {
ties_id <- 1L
} else {
if (ties == "weighted") {
ties_id <- 1L
} else if (ties == "min") {
ties_id <- 2L
} else if (ties == "max") {
ties_id <- 4L
} else if (ties == "mean") {
ties_id <- 8L
} else {
stop(sprintf("Unknown value of argument '%s': %s", "ties", ties))
}
}
.Call(C_weightedMedian, x, w, idxs, na.rm, interpolate, ties_id)
}
|
colpick <- function(design, q, all=FALSE,
select.catlg=catlg, estimable=NULL,
method="VF2", sort="natural", res3=FALSE, all0=FALSE,
quiet=FALSE, firsthit=is.numeric(design)){
catlg <- select.catlg
if (!("catlg" %in% class(design) || is.character(design) || is.numeric(design)))
stop("design must be the number of factors of a full factorial",
" or a character string that refers to an element of select.catlg",
" or of class catlg")
if (is.numeric(design)){
element <- list(list(nfac = design,
nruns = 2^design,
res = Inf,
nclear.2fis = choose(design, 2),
clear.2fis = combn(design, 2),
gen = numeric(0),
WLP = rep(0, 4)))
class(element) <- c("catlg", "list")
}
if (is.character(design)){
element <- catlg[design]
}
if ("catlg" %in% class(design)) element <- design[1]
N <- element[[1]]$nruns
n <- element[[1]]$nfac
gen <- element[[1]]$gen
p <- length(gen)
k <- n - p
clear2fis <- clear.2fis(element)
nclear2fis <- nclear.2fis(element)
if (length(clear2fis[[1]])==0)
clear2fis <- character(0)
else{
if (n <= 50)
clear2fis <- sapply(1:ncol(clear2fis[[1]]), function(obj)
paste(Letters[clear2fis[[1]][,obj]], collapse = ""))
else
clear2fis <- sapply(1:ncol(clear2fis[[1]]), function(obj)
paste("F",clear2fis[[1]][,obj], sep="", collapse = ":"))
}
if (!is.null(estimable)){
if (!is.numeric(estimable) && !is.character(estimable))
stop("estimable must be an integer-valued two row matrix",
" or a character vector")
if (is.numeric(estimable) && !is.matrix(estimable))
stop("if numeric, estimable must be a matrix with two rows")
if (is.numeric(estimable) && !nrow(estimable)==2)
stop("if numeric, estimable must be a matrix with two rows")
if (is.character(estimable)){
colons <- grep(":", estimable)
if (!(length(colons)==0 || length(colons)==length(estimable)))
stop("All elements of estimable must have the same format")
if (length(colons)==0)
estimable <- sapply(estimable,
function(obj) which(Letters %in% unlist(strsplit(obj, "", fixed=TRUE))))
else estimable <- sapply(estimable,
function(obj) as.numeric(gsub( "F", "", unlist(strsplit(obj, ":", fixed=TRUE)) )))
if (!is.matrix(estimable)) stop("invalid estimable")
}
}
Z <- t(sapply(gen, function(obj) rev(digitsBase(obj, 2, k))))
div <- 2^q-1
divbase <- 2^(0:(q-1))
Xcands <- lapply(1:div, function(obj) digitsBase(obj, 2, ndigits=q))
success <- FALSE
Xlist <- vector(mode="list")
tablist <- vector(mode="list")
clearlist <- vector(mode="list")
if (!is.null(estimable)) maplist <- vector(mode="list")
poscands <- lapply(1:k, function(obj)
if (obj <= div)
1:obj
else 1:div
)
nxt <- do.call(lazyExpandGrid, poscands)
nr <- prod(lengths(poscands))
if (!quiet) message("checking up to ", nr, " matrices")
jetzt <- rep(1:div, (k%/%div+1))[1:k]
i <- 0
while (i <= nr){
XI <- do.call(cbind, Xcands[jetzt])
if (length(Z)>0){
XII <- (XI%*%t(Z))%%2
X <- cbind(XI, XII)
}
else X <- XI
rankdefect <- "try-error" %in% class(try(solve(tcrossprod(X)), silent=TRUE))
if ((all(colSums(X) > 0) || all0) && !rankdefect){
if (length(clear2fis) > 0){
ingroup <- character(0)
for (i in 1:(n-1))
for (j in (i+1):n)
if (all(X[,i]==X[,j]))
ingroup <- c(ingroup,
ifelse(n<=50,
paste0(Letters[i],Letters[j]),
paste0("F",i,":F",j)))
clearcur <- setdiff(clear2fis, ingroup)
}
else clearcur <- clear2fis
if (is.null(estimable)){
if (length(Z)==0 && !all)
return(list(X=X, clear2fis=clearcur))
success <- TRUE
}
else{
map <- mapcalc.block(estimable, n,
sapply(clearcur,
function(obj) which(Letters %in%
unlist(strsplit(obj, "",
fixed=TRUE)))),
method=method, sort=sort)[[1]]
if (!is.null(map)) {
success <- TRUE
if (success && !all && firsthit)
return(list(X=X, clear2fis=clearcur, map=map))
}
else{
jetzt <- unlist(nxt())
i <- i+1
if (is.logical(jetzt)){
if (!jetzt) break
}
next
}
}
}
else {
jetzt <- unlist(nxt())
i <- i+1
if (is.logical(jetzt)){
if (!jetzt) break
}
next
}
tab <- sort(table(apply(X, 2, paste0, collapse="")))
tab <- unname(tab)
if (length(clearcur)== min(phimax(n,q), nclear2fis) && !all){
if (is.null(estimable))
return(list(X=X, clear2fis=clearcur))
else {
return(list(X=X, clear2fis=clearcur, map=map))
}
} else{
if (is.null(estimable)){
Xlist <- c(Xlist, list(X))
tablist <- c(tablist, list(tab))
clearlist <- c(clearlist, list(clearcur))
}
else{
if (!is.null(map)){
Xlist <- c(Xlist, list(X))
tablist <- c(tablist, list(tab))
clearlist <- c(clearlist, list(clearcur))
maplist <- c(maplist, list(map))
}
}
}
jetzt <- unlist(nxt())
i <- i+1
if (is.logical(jetzt)){
if (!jetzt) break
}
}
if (!success) {
if (!quiet) message("no suitable block arrangement was found")
return(NULL)
}
if (all) {
if (is.null(estimable))
return(list(X_matrices=Xlist, clearlist=clearlist, profiles=tablist))
else
return(list(X_matrices=Xlist, clearlist=clearlist, profiles=tablist, maplist=maplist))
}
lens <- lengths(clearlist)
tablist <- tablist[lens==max(lens)]
Xlist <- Xlist[lens==max(lens)]
clearlist <- clearlist[lens==max(lens)]
diffs <- sapply(tablist, function(obj) diff(range(obj)))
pos <- which.min(diffs)
if (!is.null(estimable))
return(list(X=Xlist[[pos]], clear2fis=clearlist[[pos]], map=maplist[[pos]]))
list(X=Xlist[[pos]], clear2fis=clearlist[[pos]])
}
|
plot.fluss <-
function(x, subs, dims, folder = getwd(), xlims = NULL, ...){
xr <- x$flux.res
xt <- x$flux.table
d <- dims*200*5/360
if(is.null(xlims)){
xlims <- range(as.vector(sapply(xr, function(x) range(x$fl.dat$orig.dat$time))))
}
if(is.null(subs)){
dev.new(width=d[2], height=d[1])
par(mfrow = dims)
for(i in c(1:length(xr))){
zero.line <- switch(xr[[i]]$fluss$ghg, CH4 = 1870, N2O = 323, CO2 = 388.5)
plot.flux(xr[[i]], zero.line = zero.line, main=rownames(xt)[i], xlims = xlims, cex.axis=1.2, cex.lab=1.4, ...)
}
}
else{
if(length(subs)==1){
spots <- xt[,subs]
}
else{
spots <- as.factor(as.character(apply(xt[,subs], 1, function(x) paste(x, collapse="."))))
}
for(j in levels(spots)){
tmp.flux <- xr[spots==j]
tmp.nms <- rownames(xt)[spots==j]
pdf(file=paste(folder, j, ".pdf", sep=""), width=d[2], height=d[1])
par(mfrow = dims)
for(i in c(1:length(tmp.nms))){
zero.line <- switch(xr[[i]]$fluss$ghg, CH4 = 1870, N2O = 323, CO2 = 388.5)
plot.flux(tmp.flux[[i]], zero.line = zero.line, main=tmp.nms[i], xlims = xlims, cex.axis=1.2, cex.lab=1.4, ...)
}
dev.off()
}
}
}
|
dislnorm =
setRefClass("dislnorm",
contains = "discrete_distribution",
fields = list(
dat = function(x) {
if (!missing(x) && !is.null(x)) {
check_discrete_data(x)
x = sort(x)
tab = table(x)
values = as.numeric(names(tab))
freq = as.vector(tab)
internal[["cum_n"]] <<- rev(cumsum(rev(freq)))
internal[["freq"]] <<- freq
internal[["values"]] <<- values
internal[["dat"]] <<- x
xmin <<- min(values)
} else internal[["dat"]]
},
xmin = function(x) {
if (!missing(x) && !is.null(x)) {
if ("estimate_xmin" %in% class(x)) {
pars <<- x$pars
x = x$xmin
}
internal[["xmin"]] <<- x
if (length(internal[["values"]])) {
selection = min(which(internal[["values"]] >= x))
internal[["n"]] <<- internal[["cum_n"]][selection]
}
} else internal[["xmin"]]
},
pars = function(x) {
if (!missing(x) && !is.null(x)) {
if ("estimate_pars" %in% class(x)) x = x$pars
internal[["pars"]] <<- x
} else internal[["pars"]]
}
))
dislnorm$methods(
list(
initialize = function(dat) {
no_pars <<- 2
if (!missing(dat)) {
check_discrete_data(dat)
x = sort(dat)
tab = table(x)
values = as.numeric(names(tab))
freq = as.vector(tab)
internal[["cum_n"]] <<- rev(cumsum(rev(freq)))
internal[["freq"]] <<- freq
internal[["values"]] <<- values
internal[["dat"]] <<- x
xmin <<- min(values)
}
}
)
)
setMethod("dist_pdf",
signature = signature(m = "dislnorm"),
definition = function(m, q = NULL, log = FALSE) {
xmin = m$getXmin(); pars = m$getPars()
if (is.null(q)) q = m$dat
l1 = plnorm(q - 0.5, pars[1], pars[2], lower.tail = FALSE, log.p = TRUE)
l2 = plnorm(q + 0.5, pars[1], pars[2], lower.tail = FALSE, log.p = TRUE)
pdf = l1 + log(1 - exp(l2 - l1)) -
plnorm(xmin - 0.5, pars[1], pars[2], lower.tail = FALSE, log.p = TRUE)
if (!log) {
pdf = exp(pdf)
pdf[q < xmin] = 0
} else {
pdf[q < xmin] = -Inf
}
pdf
}
)
setMethod("dist_cdf",
signature = signature(m = "dislnorm"),
definition = function(m, q = NULL, lower_tail = TRUE) {
xmin = m$getXmin(); pars = m$getPars()
if (is.null(pars)) stop("Model parameters not set.")
if (is.null(q)) q = m$dat
if (lower_tail) {
p = plnorm(q + 0.5, pars[1], pars[2], lower.tail = lower_tail)
C = plnorm(xmin - 0.5, pars[1], pars[2], lower.tail = FALSE)
cdf = (p / C - 1 / C + 1)
} else {
log_p = plnorm(q + 0.5, pars[1], pars[2], lower.tail = FALSE, log.p = TRUE)
log_C = plnorm(xmin + 0.5, pars[1], pars[2], lower.tail = FALSE, log.p = TRUE)
cdf = exp(log_p - log_C)
}
cdf[q < xmin] = 0
cdf
}
)
setMethod("dist_all_cdf",
signature = signature(m = "dislnorm"),
definition = function(m, lower_tail = TRUE, xmax = 1e5) {
xmin = m$getXmin()
xmax = max(m$dat[m$dat <= xmax])
dist_cdf(m, q = xmin:xmax, lower_tail = lower_tail)
}
)
setMethod("dist_ll",
signature = signature(m = "dislnorm"),
definition = function(m) {
xmin = m$getXmin()
dv = m$internal[["values"]]
cut_off = (dv >= xmin)
dv = dv[cut_off]
df = m$internal[["freq"]][cut_off]
dis_lnorm_tail_ll(dv, df, m$getPars(), xmin)
}
)
dis_lnorm_tail_ll = function(xv, xf, pars, xmin) {
if (is.vector(pars)) pars = t(as.matrix(pars))
n = sum(xf)
p = function(par) {
m_log = par[1]; sd_log = par[2]
plnorm(xv - 0.5, m_log, sd_log, lower.tail = FALSE) -
plnorm(xv + 0.5, m_log, sd_log, lower.tail = FALSE)
}
if (length(xv) == 1L) {
joint_prob = sum(xf * log(apply(pars, 1, p)))
} else {
joint_prob = colSums(xf * log(apply(pars, 1, p)))
}
prob_over = apply(pars, 1, function(i)
plnorm(xmin - 0.5, i[1], i[2],
lower.tail = FALSE, log.p = TRUE))
return(joint_prob - n * prob_over)
}
setMethod("dist_rand",
signature = signature(m = "dislnorm"),
definition = function(m, n = "numeric") {
xmin = m$getXmin(); pars = m$getPars()
lower = xmin - 0.5
rns = numeric(n)
i = 0; N = 0
while (i < (n - 0.5)) {
N = ceiling((n - i) / plnorm(lower, pars[1L], pars[2L], lower.tail = FALSE))
x = rlnorm(N, pars[1L], pars[2L])
x = x[x >= lower]
if (length(x)) {
x = x[1:min(length(x), n - i)]
rns[(i + 1L):(i + length(x))] = x
i = i + length(x)
}
}
round(rns)
}
)
dislnorm$methods(
mle = function(set = TRUE, initialise = NULL) {
xv = internal[["values"]]
cut_off = (xv > xmin - 0.5)
xv = xv[cut_off]
xf = internal[["freq"]][cut_off]
if (is.null(initialise)) {
n = sum(xf)
x_log = log(xv)
expect2 = sum(x_log^2 * xf) / n
x_log_mean = sum(x_log * xf) / n
x_log_sd = sqrt((expect2 - x_log_mean^2))
theta_0 = c(x_log_mean, x_log_sd)
} else {
theta_0 = initialise
}
negloglike = function(par) {
r = -dis_lnorm_tail_ll(xv, xf, par, xmin)
if (!is.finite(r)) r = 1e12
r
}
mle = suppressWarnings(optim(par = theta_0,
fn = negloglike,
method = "L-BFGS-B",
lower = c(-Inf, .Machine$double.eps)))
if (set)
pars <<- mle$par
class(mle) = "estimate_pars"
names(mle)[1L] = "pars"
mle
}
)
|
Clest.base <-
function(data,B,alpha,nstart,n,L1=12,maxK,sparse,silent){
ClassError<-matrix(NA,B,(maxK-1));colnames(ClassError)<-c("k=2",3:maxK)
for (b in 1:B){
if (!silent)cat(" Assessing a random partition", b, "out of",B,"\n")
Ln<-ceiling(2*n/3);Tn<-n-Ln
ind<-sample(1:n,Ln,replace=FALSE)
L<-data[ind,];T<-data[-ind,];p<-ncol(L);
cer<-vector(length=(maxK-1))
for (ncl in 2:maxK){
if (!silent)cat(" K=",ncl,"\n")
rL<-RSKC(L,ncl,alpha,L1=L1,nstart=nstart,silent=TRUE)
W<-rL$weights
sumW<-sum(W)
trans.L<-reduce.dimention.data(L,W)
trans.mu<-reduce.dimention.mu(trans.L,W,rL$labels,out=rL$oW,K=ncl,N=Ln)
trans.T<-reduce.dimention.data(T,W)
WdisC<-WDISC(trans.T,trans.mu,ncl,Tn,W[W!=0],sumW)
T1<-max.col(-WdisC)
rT<-RSKC(T,ncl,alpha,L1=L1,nstart=nstart,silent=TRUE)
T2<-rT$labels
ClassError[b,(ncl-1)]<-CER(T1,T2,Tn)
}
}
cer<-apply(ClassError,2,median,na.rm=TRUE)
return(list(cer=cer,bootstrap=ClassError))
}
reduce.dimention.data<-function(data,W){
return(t(t(data[,W!=0,drop=FALSE])*sqrt(W[W!=0])))
}
reduce.dimention.mu<-function(reduced.data,W,C,out,K=unique(C),N=nrow(data)){
if (is.character(out)) out<-N+1
w.mu<-matrix(NA,K,sum(W!=0))
C2<-C;C2[out]<--1;C2<-C2[1:N]
for (k in 1 : K)
{
w.mu[k,]<-colMeans(reduced.data[C2==k,,drop=FALSE],na.rm=TRUE)
}
return(w.mu)
}
|
context("test-summary-stats")
test_that("output from ds_summary is as expected", {
actual <- round(ds_summary(mtcarz, mpg)$variance, 2)
expected <- 36.32
expect_equal(actual, expected)
})
test_that("ds_summary_stats throws appropriate errors", {
fdata <- dplyr::select(mtcarz, cyl, gear, am, vs)
expect_error(ds_summary_stats(fdata), 'Data has no continuous variables.')
expect_error(ds_summary_stats(mtcarz, cyl, gear), 'Data has no continuous variables.')
})
|
clusterPIC_Z <-
function(
L,
R,
y,
xcov,
IC,
scale.designX,
scaled,
zcov,
area,
binary,
I,
order,
knots,
grids,
a_eta,
b_eta,
a_ga,
b_ga,
a_tau,
b_tau,
beta_iter,
phi_iter,
beta_cand,
phi_cand,
beta_sig0,
x_user,
total,
burnin,
thin,
conf.int,
seed){
Ispline<-function(x,order,knots){
k=order+1
m=length(knots)
n=m-2+k
t=c(rep(1,k)*knots[1], knots[2:(m-1)], rep(1,k)*knots[m])
yy1=array(rep(0,(n+k-1)*length(x)),dim=c(n+k-1, length(x)))
for (l in k:n){
yy1[l,]=(x>=t[l] & x<t[l+1])/(t[l+1]-t[l])
}
yytem1=yy1
for (ii in 1:order){
yytem2=array(rep(0,(n+k-1-ii)*length(x)),dim=c(n+k-1-ii, length(x)))
for (i in (k-ii):n){
yytem2[i,]=(ii+1)*((x-t[i])*yytem1[i,]+(t[i+ii+1]-x)*yytem1[i+1,])/(t[i+ii+1]-t[i])/ii
}
yytem1=yytem2
}
index=rep(0,length(x))
for (i in 1:length(x)){
index[i]=sum(t<=x[i])
}
yy=array(rep(0,(n-1)*length(x)),dim=c(n-1,length(x)))
if (order==1){
for (i in 2:n){
yy[i-1,]=(i<index-order+1)+(i==index)*(t[i+order+1]-t[i])*yytem2[i,]/(order+1)
}
}else{
for (j in 1:length(x)){
for (i in 2:n){
if (i<(index[j]-order+1)){
yy[i-1,j]=1
}else if ((i<=index[j]) && (i>=(index[j]-order+1))){
yy[i-1,j]=(t[(i+order+1):(index[j]+order+1)]-t[i:index[j]])%*%yytem2[i:index[j],j]/(order+1)
}else{
yy[i-1,j]=0
}
}
}
}
return(yy)
}
Mspline<-function(x,order,knots){
k1=order
m=length(knots)
n1=m-2+k1
t1=c(rep(1,k1)*knots[1], knots[2:(m-1)], rep(1,k1)*knots[m])
tem1=array(rep(0,(n1+k1-1)*length(x)),dim=c(n1+k1-1, length(x)))
for (l in k1:n1){
tem1[l,]=(x>=t1[l] & x<t1[l+1])/(t1[l+1]-t1[l])
}
if (order==1){
mbases=tem1
}else{
mbases=tem1
for (ii in 1:(order-1)){
tem=array(rep(0,(n1+k1-1-ii)*length(x)),dim=c(n1+k1-1-ii, length(x)))
for (i in (k1-ii):n1){
tem[i,]=(ii+1)*((x-t1[i])*mbases[i,]+(t1[i+ii+1]-x)*mbases[i+1,])/(t1[i+ii+1]-t1[i])/ii
}
mbases=tem
}
}
return(mbases)
}
poissrndpositive<-function(lambda){
q=200
t=seq(0,q,1)
p=dpois(t,lambda)
pp=cumsum(p[2:(q+1)])/(1-p[1])
u=runif(1)
while(u>pp[q]){
q=q+1
pp[q]=pp[q-1]+dpois(q,lambda)/(1-p[1])
}
ll=sum(u>pp)+1
}
set.seed(seed)
L=matrix(L,ncol=1)
R=matrix(R,ncol=1)
y=matrix(y,ncol=1)
xcov=as.matrix(xcov)
zcov=as.matrix(zcov)
area=matrix(area,ncol=1)
IC=matrix(IC,ncol=1)
p=ncol(xcov)
q=ncol(zcov)
if (scale.designX==TRUE){
mean_X<-apply(xcov,2,mean)
sd_X<-apply(xcov,2,sd)
for (r in 1:p){
if (scaled[r]==1) xcov[,r]<-(xcov[,r]-mean_X[r])/sd_X[r]
}
}
n1=sum(IC==0)
n2=sum(IC==1)
N=n1+n2
t<-rep(0,N)
for (i in 1:N) {t[i]=ifelse(IC[i]==0,L[i],0)}
K=length(knots)-2+order
kgrids=length(grids)
G<-length(x_user)/p
bmsT=Mspline(t[1:n1],order,knots)
bisT=Ispline(t[1:n1],order,knots)
bisL=Ispline(L[(n1+1):N],order,knots)
bisR=Ispline(R[(n1+1):N],order,knots)
bisg=Ispline(grids,order,knots)
eta=rgamma(1,a_eta,rate=b_eta)
tau=rgamma(q,a_tau,rate=b_tau)
gamcoef=matrix(rgamma(K, 1, rate=1),ncol=K)
phicoef<-matrix(rep(0,I*q),ncol=q)
phicoef2<-matrix(rep(0,N*q),ncol=q)
for (j in 1:N){
phicoef2[j,]<-phicoef[area[j],]
}
beta=matrix(rep(0,p),p,1)
beta_original=matrix(rep(0,p),p,1)
u=array(rep(0,n1*K),dim=c(n1,K))
for (i in 1:n1){
u[i,]=rmultinom(1,1,gamcoef*t(bmsT[,i]))
}
lambdatT=t(gamcoef%*%bmsT)
LambdatT=t(gamcoef%*%bisT)
LambdatL=t(gamcoef%*%bisL)
LambdatR=t(gamcoef%*%bisR)
Lambdatg=t(gamcoef%*%bisg)
parbeta=array(rep(0,total*p),dim=c(total,p))
parbeta_original=array(rep(0,total*p),dim=c(total,p))
pareta=array(rep(0,total),dim=c(total,1))
partau=array(rep(0,total*q),dim=c(total,q))
parphi=array(rep(0,I*q*total),dim=c(I,q,total))
pargam=array(rep(0,total*K),dim=c(total,K))
parsurv0=array(rep(0,total*kgrids),dim=c(total,kgrids))
parlambdatT=array(rep(0,total*n1),dim=c(total,n1))
parLambdatT=array(rep(0,total*n1),dim=c(total,n1))
parLambdatL=array(rep(0,total*n2),dim=c(total,n2))
parLambdatR=array(rep(0,total*n2),dim=c(total,n2))
pardev=array(rep(0,total),dim=c(total,1))
parfinv_exact=array(rep(0,total*n1),dim=c(total,n1))
parfinv_IC=array(rep(0,total*n2),dim=c(total,n2))
if (is.null(x_user)){parsurv=parsurv0} else {
G<-length(x_user)/p
parsurv=array(rep(0,total*kgrids*G),dim=c(total,kgrids*G))}
iter=1
while (iter<total+1)
{
z=array(rep(0,n2),dim=c(n2,1)); w=z
zz=array(rep(0,n2*K),dim=c(n2,K)); ww=zz
for (j in 1:n2){
if (y[n1+j]==0){
templam1=LambdatR[j]*exp(xcov[(n1+j),]%*%beta+zcov[(n1+j),]%*%phicoef[area[n1+j],])
z[j]=poissrndpositive(templam1)
zz[j,]=rmultinom(1,z[j],gamcoef*t(bisR[,j]))
} else if (y[n1+j]==1){
templam1=(LambdatR[j]-LambdatL[j])*exp(xcov[(n1+j),]%*%beta+zcov[(n1+j),]%*%phicoef[area[n1+j],])
w[j]=poissrndpositive(templam1)
ww[j,]=rmultinom(1,w[j],gamcoef*t(bisR[,j]-bisL[,j]))
}
}
te1=z*(y[(n1+1):N]==0)+w*(y[(n1+1):N]==1)
te2=(LambdatR*(y[(n1+1):N]==0)+LambdatR*(y[(n1+1):N]==1)+LambdatL*(y[(n1+1):N]==2))
te3=LambdatT
for (r in 1:p){
if (binary[r]==0){
beta1<-beta2<-beta
if (iter<beta_iter) sd_cand<-beta_cand[r] else sd_cand<-sd(parbeta[1:(iter-1),r])
xt<-beta[r]
yt<-rnorm(1,xt,sd_cand)
beta1[r]<-yt
beta2[r]<-xt
log_f1<-sum(yt*xcov[1:n1,r]-te3*exp(xcov[1:n1,]%*%beta1+apply(zcov[1:n1,]*phicoef2[1:n1,],1,sum)))+sum(yt*xcov[(n1+1):N,r]*te1)-sum(exp(xcov[(n1+1):N,]%*%beta1+apply(zcov[(n1+1):N,]*phicoef2[(n1+1):N,],1,sum))*te2)-0.5*(yt^2)/(beta_sig0^2)
log_f2<-sum(xt*xcov[1:n1,r]-te3*exp(xcov[1:n1,]%*%beta2+apply(zcov[1:n1,]*phicoef2[1:n1,],1,sum)))+sum(xt*xcov[(n1+1):N,r]*te1)-sum(exp(xcov[(n1+1):N,]%*%beta2+apply(zcov[(n1+1):N,]*phicoef2[(n1+1):N,],1,sum))*te2)-0.5*(xt^2)/(beta_sig0^2)
num<-log_f1
den<-log_f2
if (log(runif(1))<(num-den)) beta[r]<-yt else beta[r]<-xt
}
if (binary[r]==1 & p>1){
te4=sum(xcov[1:n1,r])+sum(xcov[(n1+1):N,r]*te1)
te5=sum(te3*exp(as.matrix(xcov[1:n1,-r])%*%as.matrix(beta[-r])+apply(zcov[1:n1,]*phicoef2[1:n1,],1,sum))*xcov[1:n1,r])+sum(te2*exp(as.matrix(xcov[(n1+1):N,-r])%*%as.matrix(beta[-r])+apply(zcov[(n1+1):N,]*phicoef2[(n1+1):N,],1,sum))*xcov[(n1+1):N,r])
beta[r]<-log(rgamma(1,a_ga+te4,rate=b_ga+te5))
}
if (binary[r]==1 & p==1){
te4=sum(xcov[1:n1,r])+sum(xcov[(n1+1):N,r]*te1)
te5=sum(te3*exp(apply(zcov[1:n1,]*phicoef2[1:n1,],1,sum))*xcov[1:n1,r])+sum(te2*exp(apply(zcov[(n1+1):N,]*phicoef2[(n1+1):N,],1,sum))*xcov[(n1+1):N,r])
beta[r]<-log(rgamma(1,a_ga+te4,rate=b_ga+te5))
}
}
if (scale.designX==TRUE){
for (r in 1:p) beta_original[r]<-ifelse(scaled[r]==1,beta[r]/sd_X[r],beta[r])
}
if (scale.designX==FALSE) beta_original<-beta
for (l in 1:K){
tempa=1+sum(u[,l])+sum(zz[,l]*(y[(n1+1):N]==0)+ww[,l]*(y[(n1+1):N]==1))
tempb=eta+sum(bisT[l,]*exp(xcov[1:n1,]%*%beta+apply(zcov[1:n1,]*phicoef2[1:n1,],1,sum)))+sum(((bisR[l,])*(y[(n1+1):N]==0)+(bisR[l,])*(y[(n1+1):N]==1)
+(bisL[l,])*(y[(n1+1):N]==2))*exp(xcov[(n1+1):N,]%*%beta+apply(zcov[(n1+1):N,]*phicoef2[(n1+1):N,],1,sum)))
gamcoef[l]=rgamma(1,tempa,rate=tempb)
}
lambdatT=t(gamcoef%*%bmsT)
LambdatT=t(gamcoef%*%bisT)
LambdatL=t(gamcoef%*%bisL)
LambdatR=t(gamcoef%*%bisR)
u=array(rep(0,n1*K),dim=c(n1,K))
for (i in 1:n1){
u[i,]=rmultinom(1,1,gamcoef*t(bmsT[,i]))
}
eta=rgamma(1,a_eta+K, rate=b_eta+sum(gamcoef))
for (r in 1:q){
phi1<-array(rep(0,N*q),dim=c(N,q))
phi2<-array(rep(0,N*q),dim=c(N,q))
for (i in 1:I){
phi1<-phi2<-phicoef2
if (iter<phi_iter) sd_cand<-phi_cand else sd_cand<-sd(parphi[i,r,1:(iter-1)])
xt<-phicoef[i,r]
yt<-rnorm(1,xt,sd_cand)
phi1[area==i,r]<-yt
phi2[area==i,r]<-xt
log_f1<-sum(zcov[1:n1,r]*phi1[1:n1,r])-sum(te3*exp(xcov[1:n1,]%*%beta+apply(zcov[1:n1,]*phi1[1:n1,],1,sum)))+sum(zcov[(n1+1):N,r]*phi1[(n1+1):N,r]*te1)-sum((exp(xcov[(n1+1):N,]%*%beta+apply(zcov[(n1+1):N,]*phi1[(n1+1):N,],1,sum)))*te2)-0.5*tau[r]*(yt^2)
log_f2<-sum(zcov[1:n1,r]*phi2[1:n1,r])-sum(te3*exp(xcov[1:n1,]%*%beta+apply(zcov[1:n1,]*phi2[1:n1,],1,sum)))+sum(zcov[(n1+1):N,r]*phi2[(n1+1):N,r]*te1)-sum((exp(xcov[(n1+1):N,]%*%beta+apply(zcov[(n1+1):N,]*phi2[(n1+1):N,],1,sum)))*te2)-0.5*tau[r]*(xt^2)
num<-log_f1
den<-log_f2
if (log(runif(1))<(num-den)) phicoef[i,r]<-yt else phicoef[i,r]<-xt
phicoef2[area==i,r]<-phicoef[i,r]
}
}
for (r in 1:q){
tau[r]<-rgamma(1,0.5*I+a_tau,rate=0.5*t(phicoef[,r])%*%phicoef[,r]+b_tau)
}
f_iter_exact<-lambdatT*exp(xcov[1:n1,]%*%beta+apply(zcov[1:n1,]*phicoef2[1:n1,],1,sum))*exp(-LambdatT*exp(xcov[1:n1,]%*%beta+apply(zcov[1:n1,]*phicoef2[1:n1,],1,sum)))
FL<-1-exp(-LambdatL*exp(xcov[(n1+1):N,]%*%beta+apply(zcov[(n1+1):N,]*phicoef2[(n1+1):N,],1,sum)))
FR<-1-exp(-LambdatR*exp(xcov[(n1+1):N,]%*%beta+apply(zcov[(n1+1):N,]*phicoef2[(n1+1):N,],1,sum)))
f_iter_IC<-(FR^(y[(n1+1):N]==0))*((FR-FL)^(y[(n1+1):N]==1))*((1-FL)^(y[(n1+1):N]==2))
finv_iter_exact<-1/f_iter_exact
finv_iter_IC<-1/f_iter_IC
loglike<-sum(log(f_iter_exact))+sum(log(FR^(y[(n1+1):N]==0))+log((FR-FL)^(y[(n1+1):N]==1))+log((1-FL)^(y[(n1+1):N]==2)))
dev<--2*loglike
parbeta[iter,]=beta
parbeta_original[iter,]=beta_original
pareta[iter]=eta
partau[iter,]=tau
parphi[,,iter]=phicoef
pargam[iter,]=gamcoef
ttt=gamcoef%*%bisg
if (scale.designX==FALSE) {parsurv0[iter,]<-exp(-ttt)}
if (scale.designX==TRUE) {parsurv0[iter,]<-exp(-ttt*exp(-sum((beta*mean_X/sd_X)[scaled==1])))}
parlambdatT[iter,]=lambdatT
parLambdatT[iter,]=LambdatT
parLambdatL[iter,]=LambdatL
parLambdatR[iter,]=LambdatR
pardev[iter]=dev
parfinv_exact[iter,]=finv_iter_exact
parfinv_IC[iter,]=finv_iter_IC
if (is.null(x_user)){parsurv[iter,]=parsurv0[iter,]} else {
A<-matrix(x_user,byrow=TRUE,ncol=p)
if (scale.designX==TRUE){
for (r in 1:p){
if (scaled[r]==1) A[,r]<-(A[,r]-mean_X[r])/sd_X[r]}
}
B<-exp(A%*%beta)
for (g in 1:G){
parsurv[iter,((g-1)*kgrids+1):(g*kgrids)]=exp(-ttt*B[g,1])}
}
iter=iter+1
if (iter%%100==0) print(iter)
}
wbeta=as.matrix(parbeta_original[seq((burnin+thin),total,by=thin),],ncol=p)
wparsurv0=as.matrix(parsurv0[seq((burnin+thin),total,by=thin),],ncol=kgrids)
wparsurv=as.matrix(parsurv[seq((burnin+thin),total,by=thin),],ncol=kgrids*G)
coef<-apply(wbeta,2,mean)
coef_ssd<-apply(wbeta,2,sd)
coef_ci<-array(rep(0,p*2),dim=c(p,2))
S0_m<-apply(wparsurv0,2,mean)
S_m<- apply(wparsurv,2,mean)
colnames(coef_ci)<-c(paste(100*(1-conf.int)/2,"%CI"),paste(100*(0.5+conf.int/2),"%CI"))
for (r in 1:p) coef_ci[r,]<-quantile(wbeta[,r],c((1-conf.int)/2,0.5+conf.int/2))
CPO_exact=1/apply(parfinv_exact[seq((burnin+thin),total,by=thin),],2,mean)
CPO_IC=1/apply(parfinv_IC[seq((burnin+thin),total,by=thin),],2,mean)
NLLK_exact=-sum(log(CPO_exact))
NLLK_IC=-sum(log(CPO_IC))
NLLK=NLLK_exact+NLLK_IC
LambdatL_m<-apply(parLambdatL[seq((burnin+thin),total,by=thin),],2,mean)
LambdatR_m<-apply(parLambdatR[seq((burnin+thin),total,by=thin),],2,mean)
beta_m<-apply(parbeta[seq((burnin+thin),total,by=thin),],2,mean)
phicoef_m<-apply(parphi[,,seq((burnin+thin),total,by=thin)],c(1,2),mean)
phicoef2_m<-array(rep(0,N*q),dim=c(N,q))
for (j in 1:N){
phicoef2_m[j,]<-phicoef_m[area[j],]
}
FL_m<-1-exp(-LambdatL_m*exp(xcov[(n1+1):N,]%*%beta_m+apply(zcov[(n1+1):N,]*phicoef2_m[(n1+1):N,],1,sum)))
FR_m<-1-exp(-LambdatR_m*exp(xcov[(n1+1):N,]%*%beta_m+apply(zcov[(n1+1):N,]*phicoef2_m[(n1+1):N,],1,sum)))
loglike_m_IC<-sum(log(FR_m^(y[(n1+1):N]==0))+log((FR_m-FL_m)^(y[(n1+1):N]==1))+log((1-FL_m)^(y[(n1+1):N]==2)))
D_thetabar_IC<--2*loglike_m_IC
lambdatT_m<-apply(parlambdatT[seq((burnin+thin),total,by=thin),],2,mean)
LambdatT_m<-apply(parLambdatT[seq((burnin+thin),total,by=thin),],2,mean)
loglike_m_exact<-sum(log(lambdatT_m)+xcov[1:n1,]%*%beta_m+apply(zcov[1:n1,]*phicoef2[1:n1,],1,sum)-LambdatT_m*exp(xcov[1:n1,]%*%beta_m+apply(zcov[1:n1,]*phicoef2_m[1:n1,],1,sum)))
D_thetabar_exact<--2*loglike_m_exact
D_bar=mean(pardev[seq((burnin+thin),total,by=thin)])
D_thetabar=D_thetabar_IC+D_thetabar_exact
DIC=2*D_bar-D_thetabar
est<-list(
N=nrow(xcov),
nameX=colnames(xcov),
parbeta=parbeta_original,
parsurv0=parsurv0,
parsurv=parsurv,
coef = coef,
coef_ssd = coef_ssd,
coef_ci = coef_ci,
S0_m = S0_m,
S_m = S_m,
grids=grids,
DIC = DIC,
NLLK = NLLK
)
est
}
|
standardize <-function(xx)
{
n <- nrow(xx)
p <- ncol(xx)
aa <- matrix(rep(apply(xx,2,mean), n), ncol=p, byrow=TRUE)
bb <- sqrt(matrix(rep(apply(xx,2,var), n), ncol=p, byrow=TRUE))
return((xx-aa)/bb)
}
unitize <-function(xx)
{
n <- nrow(xx)
p <- ncol(xx)
aa <- matrix(rep(apply(xx,2,mean), n), ncol=p, byrow=TRUE)
bb <- sqrt((n-1)*matrix(rep(apply(xx,2,var), n), ncol=p, byrow=TRUE))
return((xx-aa)/bb)
}
cubitize <-function(xx)
{
n <- nrow(xx)
p <- ncol(xx)
aa <- matrix(rep(apply(xx,2,min), n), ncol=p, byrow=TRUE)
bb <- matrix(rep(apply(xx,2,max), n), ncol=p, byrow=TRUE)
return((xx-aa)/(bb-aa))
}
intervalize <-function(xx, a=-1, b=1)
{
n <- nrow(xx)
p <- ncol(xx)
aa <- matrix(rep(apply(xx,2,min), n), ncol=p, byrow=TRUE)
bb <- matrix(rep(apply(xx,2,max), n), ncol=p, byrow=TRUE)
alf <- (b-a)/(bb-aa)
return(alf*xx + ((b+a-alf*(aa+bb))/2))
}
|
context("Testing tk_get_timeseries functions")
n <- 29
test_datetime <- c("2016-01-01 00:00:00",
"2016-01-01 00:00:03",
"2016-01-01 00:00:06") %>%
ymd_hms()
test_that("tk_get_timeseries_signature(datetime) test returns correct format.", {
test <- tk_get_timeseries_signature(test_datetime)
expect_true(inherits(test, "tbl"))
expect_equal(nrow(test), 3)
expect_equal(ncol(test), n)
})
test_date <- c("2016-01-01",
"2016-01-02",
"2016-01-03") %>%
as_date()
test_that("tk_get_timeseries_signature(date) test returns correct format.", {
test <- tk_get_timeseries_signature(test_date)
expect_true(inherits(test, "tbl"))
expect_equal(nrow(test), 3)
expect_equal(ncol(test), n)
})
test_yearmon <- c("2016-01-01",
"2016-02-01",
"2016-03-01") %>%
ymd() %>%
as.yearmon()
test_that("tk_get_timeseries_signature(yearmon) test returns correct format.", {
test <- tk_get_timeseries_signature(test_yearmon)
expect_true(inherits(test, "tbl"))
expect_equal(nrow(test), 3)
expect_equal(ncol(test), n)
})
test_yearqtr <- c("2016-01-01",
"2016-04-01",
"2016-07-01",
"2016-10-01") %>%
ymd() %>%
as.yearqtr()
test_that("tk_get_timeseries_signature(yearqtr) test returns correct format.", {
test <- tk_get_timeseries_signature(test_yearqtr)
expect_true(inherits(test, "tbl"))
expect_equal(nrow(test), 4)
expect_equal(ncol(test), n)
})
test_numeric <- c(2016.00, 2016.25, 2016.50, 2016.75)
test_that("tk_get_timeseries_signature(numeric) test returns correct format.", {
expect_error(tk_get_timeseries_signature(test_numeric))
})
test_default <- letters
test_that("tk_get_timeseries_signature(default) test returns correct format.", {
expect_error(tk_get_timeseries_signature(test_default))
})
test_datetime <- c("2016-01-01 00:00:00",
"2016-01-01 00:00:03",
"2016-01-01 00:00:06") %>%
ymd_hms()
test_that("tk_get_timeseries_summary(datetime) test returns correct format.", {
test <- tk_get_timeseries_summary(test_datetime)
expect_true(inherits(test, "tbl"))
expect_equal(nrow(test), 1)
expect_equal(ncol(test), 12)
})
test_date <- c("2016-01-01",
"2016-01-02",
"2016-01-03") %>%
as_date()
test_that("tk_get_timeseries_summary(date) test returns correct format.", {
test <- tk_get_timeseries_summary(test_date)
expect_true(inherits(test, "tbl"))
expect_equal(nrow(test), 1)
expect_equal(ncol(test), 12)
})
test_yearmon <- c("2016-01",
"2016-02",
"2016-03") %>%
as.yearmon()
test_that("tk_get_timeseries_summary(yearmon) test returns correct format.", {
test <- tk_get_timeseries_summary(test_yearmon)
expect_true(inherits(test, "tbl"))
expect_equal(nrow(test), 1)
expect_equal(ncol(test), 12)
})
test_yearqtr <- c("2016 Q1",
"2016 Q2",
"2016 Q3",
"2016 Q4") %>%
as.yearqtr()
test_that("tk_get_timeseries_summary(yearqtr) test returns correct format.", {
test <- tk_get_timeseries_summary(test_yearqtr)
expect_true(inherits(test, "tbl"))
expect_equal(nrow(test), 1)
expect_equal(ncol(test), 12)
})
test_numeric <- c(2016.00, 2016.25, 2016.50, 2016.75)
test_that("tk_get_timeseries_summary(numeric) test returns correct format.", {
expect_error(tk_get_timeseries_summary(test_numeric))
})
test_default <- letters
test_that("tk_get_timeseries_summary(default) test returns correct format.", {
expect_error(tk_get_timeseries_summary(test_default))
})
test_date_vars <- tibble::tibble(
my.date = ymd(c("2016-01-01", "2016-01-02")),
my.chr = c("a", "b"),
my.datetime = ymd_hms(c("2016-01-01 00:00:00", "2016-01-02 00:00:00")),
my.yearmon = as.yearmon(c("2016-01", "2016-01")),
more.chr = c("x", "y"),
my.yearqtr = as.yearqtr(c("2016 Q1", "2016 Q1"))
)
test_that("tk_get_timeseries_variables() test returns correct format.", {
expect_equal(tk_get_timeseries_variables(test_date_vars),
c("my.date", "my.datetime", "my.yearmon", "my.yearqtr"))
})
test <- tk_get_timeseries_unit_frequency()
test_that("tk_get_timeseries_unit_frequency() test returns correct format.", {
expect_equal(nrow(test), 1)
})
|
library(fda)
fdaarray = handwrit
fdatime <- seq(0, 2300, len=1401)
fdarange <- c(0, 2300)
nbasis <- 1406
norder <- 7
fdabasis <- create.bspline.basis(fdarange, nbasis, norder)
fdafd <- fd(array(0, c(nbasis,20,2)), fdabasis)
lambda <- 1e8
fdaPar <- fdPar(fdafd, 5, lambda)
smoothList <- smooth.basis(fdatime, fdaarray, fdaPar)
fdafd <- smoothList$fd
df <- smoothList$df
gcv <- smoothList$gcv
fdafd$fdnames[[1]] <- "Milliseconds"
fdafd$fdnames[[2]] <- "Replications"
fdafd$fdnames[[3]] <- "Metres"
df
totalgcv <- sum(gcv)
totalgcv
RMSgcv <- sqrt(totalgcv)*1000
RMSgcv
par(mfrow=c(2,1),pty="m")
plotfit.fd(fdaarray, fdatime, fdafd)
par(mfrow=c(2,1),pty="m",ask=FALSE)
plot(fdafd)
fdameanfd <- mean(fdafd)
fdamat <- eval.fd(fdatime, fdafd)
fdameanmat <- apply(fdamat, c(1,3), mean)
cycle <- seq(0,2300,119)
ncycle <- length(cycle)
fdamatcycle <- eval.fd(cycle, fdafd)
fdameanmatcycle <- apply(fdamatcycle,c(1,3),mean)
featureindex <- c(3, 5, 7, 10, 13, 16, 19)
fdafeature <- fdamatcycle[featureindex,,]
fdameanfeature <- fdameanmatcycle[featureindex,]
par(mfrow=c(1,1), pty="s")
plot(fdameanmat[,1], fdameanmat[,2], type="l", lwd=2,
xlab="Metres", ylab="Metres",
xlim=c(-.040, .040), ylim=c(-.040, .040),
main="Mean script")
points(fdameanmatcycle[-featureindex,1],
fdameanmatcycle[-featureindex,2], cex=1.2,col=2,lwd=4)
points(fdameanfeature[,1], fdameanfeature[,2], cex=1.2,col=3,lwd=4)
par(mfrow=c(1,1), pty="s",ask=TRUE)
for (i in 1:20) {
plot(fdamat[,i,1], fdamat[,i,2], type="l", lwd=2,
xlab="Metres", ylab="Metres",
xlim=c(-.040, .040), ylim=c(-.040, .040),
main=paste("Script",i))
points(fdamatcycle[-featureindex,i,1],
fdamatcycle[-featureindex,i,2], cex=1.2,col=2,lwd=4)
points(fdafeature[,i,1], fdafeature[,i,2], cex=1.2,col=3,lwd=4)
lines( fdameanmat[,1], fdameanmat[,2], lty=4)
points(fdameanmatcycle[-featureindex,1],
fdameanmatcycle[-featureindex,2], cex=1.2,col=2,lwd=4)
points(fdameanfeature[,1], fdameanfeature[,2], cex=1.2,col=3,lwd=4)
}
D1fdamat <- eval.fd(fdatime, fdafd, 1)
D2fdamat <- eval.fd(fdatime, fdafd, 2)
D3fdamat <- eval.fd(fdatime, fdafd, 3)
D1fdameanmat <- apply(D1fdamat, c(1,3), mean)
D2fdameanmat <- apply(D2fdamat, c(1,3), mean)
D3fdameanmat <- apply(D3fdamat, c(1,3), mean)
par(mfrow=c(1,1), mar=c(5,5,4,2), pty="m",ask=TRUE)
for (i in 1:20) {
matplot(fdatime, cbind(1e6*D2fdamat[,i,1],1e6*D2fdamat[,i,2]),
type="l", lty=1, cex=1.2, col=c(2,4),
xlim=c(0, 2300), ylim=c(-12, 12),
xlab="Milliseconds", ylab="Meters/msec/msec",
main=paste("Curve ",i))
abline(h=0, lty=2)
plotrange <- c(-12,12)
for (k in 1:length(cycle)) abline(v=cycle[k], lty=2)
for (j in 1:length(featureindex))
abline(v=cycle[featureindex[j]], lty=1)
legend(1800,11.5, c("X", "Y"), lty=1, col=c(2,4))
}
D2mag <- sqrt(D2fdamat[,,1]^2 + D2fdamat[,,2]^2)
D2magmean <- apply(D2mag,1,mean)
cexval <- 1.2
par(mfrow=c(1,1), mar=c(5,5,4,2)+cexval+2, pty="m",ask=FALSE)
matplot(fdatime, 1e6*D2mag, type="l", cex=1.2,
xlab="Milliseconds", ylab="Metres/sec/sec",
xlim=c(0,2300), ylim=c(0,12),
main="Acceleration Magnitude")
plotrange <- c(0,12)
for (k in 1:length(cycle)) abline(v=cycle[k], lty=2)
for (j in 1:length(featureindex))
abline(v=cycle[featureindex[j]], lty=1)
par(mfrow=c(1,1), mar=c(5,5,4,2)+cexval+2, pty="m")
plot(fdatime, 1e6*D2magmean, type="l", cex=1.2,
xlab="Milliseconds", ylab="Metres/sec/sec",
xlim=c(0,2300), ylim=c(0,8),
main="Mean acceleration Magnitude")
plotrange <- c(0,8)
for (k in 1:length(cycle)) abline(v=cycle[k], lty=2)
for (j in 1:length(featureindex))
abline(v=cycle[featureindex[j]], lty=1)
par(mfrow=c(1,1), mar=c(5,5,4,2), pty="m", ask=TRUE)
plotrange <- c(0,12)
for (i in 1:20) {
plot(fdatime, 1e6*D2mag[,i], type="l", cex=1.2,
xlim=c(0,2300), ylim=c(0,12),
xlab="Milliseconds", ylab="Metres/sec/sec",
main=paste("Script ",i))
lines(fdatime, 1e6*D2magmean, lty=3)
for (k in 1:length(cycle)) abline(v=cycle[k], lty=2)
for (j in 1:length(featureindex))
abline(v=cycle[featureindex[j]], lty=1)
}
difeorder <- 3
ufdlist <- vector("list", 2)
constbasis <- create.constant.basis(fdarange)
constfd <- fd(matrix(1,1,20), constbasis)
ufdlist[[1]] <- constfd
linbasis <- create.monomial.basis(fdarange, 2)
lincoef <- matrix(0,2,20)
lincoef[2,] <- 1
ufdlist[[2]] <- fd(lincoef, linbasis)
awtlist <- vector("list", 2)
constfd <- fd(1, constbasis)
constfdPar <- fdPar(constfd)
awtlist[[1]] <- constfdPar
awtlist[[2]] <- constfdPar
wbasis125 <- create.bspline.basis(fdarange, 125)
fdafdX <- smooth.basis(fdatime, fdaarray[,,1], fdaPar)$fd
xfdlist <- vector("list", 1)
xfdlist[[1]] <- fdafdX
bwtlist <- vector("list", 3)
bfd <- fd(matrix(0,1,1), constbasis)
bfdPar <- fdPar(bfd, 1, 0)
bwtlist[[1]] <- bfdPar
bwtlist[[2]] <- bfdPar
bwtlist[[3]] <- bfdPar
pdaList <- pda.fd(xfdlist, bwtlist, awtlist, ufdlist)
bestwtlist <- pdaList$bwtlist
aestwtlist <- pdaList$awtlist
resfdlist <- pdaList$resfdlist
resfd <- resfdlist[[1]]
resmat <- eval.fd(fdatime, resfd)
MSY <- mean(resmat^2)
MSY
bfd <- fd(matrix(0,125,1), wbasis125)
bfdPar <- fdPar(bfd, 1, 0)
bwtlist <- vector("list", 3)
bwtlist[[1]] <- bfdPar
bwtlist[[2]] <- bfdPar
bwtlist[[3]] <- bfdPar
pdaList <- pda.fd(xfdlist, bwtlist, awtlist, ufdlist, difeorder)
bestwtlist <- pdaList$bwtlist
aestwtlist <- pdaList$awtlist
resfdlist <- pdaList$resfdlist
resfd <- resfdlist[[1]]
resmat <- eval.fd(fdatime, resfd)
MSE <- mean(resmat^2)
RSQ <- (MSY-MSE)/MSY
RSQ
par(mfrow=c(3,1), ask=FALSE)
for (j in 1:3) {
betafdPar <- bestwtlist[[j]]
plot(betafdPar$fd, cex=1, ylab=paste("Weight function ",j-1))
}
b2fdParX <- bestwtlist[[2]]
b2fdX <- b2fdParX$fd
b2vecX <- eval.fd(fdatime, b2fdX)
b2meanX <- mean(b2vecX)
par(mfrow=c(1,1), pty="m")
plot(fdatime, b2vecX, type="l", cex=1.2,
xlim=c(0, 2300), ylim=c(0, 6e-3))
abline(h=b2meanX, lty=3)
plotrange <- c(0,6e-3)
for (k in 1:length(cycle)) abline(v=cycle[k], lty=2)
for (j in 1:length(featureindex))
abline(v=cycle[featureindex[j]], lty=1)
aestwtlist[[1]]$fd$coefs
aestwtlist[[2]]$fd$coefs
par(mfrow=c(1,1), pty="m")
matplot(fdatime, 1e9*resmat, type="l", cex=1.2,
xlim=c(0,2300), ylim=c(-200,200),
xlab="Milliseconds", ylab="Meters/sec/sec/sec")
lines(fdatime, 1e9*D3fdameanmat[,1], lty=2)
resmeanfd <- mean(resfd)
resmeanvec <- eval.fd(fdatime, resmeanfd)
par(mfrow=c(1,1), pty="m")
plot(fdatime, 1e9*resmeanvec, type="l", cex=1.2, col=2,
xlim=c(0,2300), ylim=c(-200,200),
xlab="Milliseconds", ylab="Meters/sec/sec/sec")
lines(fdatime, 1e9*D3fdameanmat[,1], lty=2, col=3)
wcoef1 <- bestwtlist[[1]]$fd$coefs
wcoef2 <- bestwtlist[[2]]$fd$coefs
wcoef3 <- bestwtlist[[3]]$fd$coefs
wcoef <- cbind(wcoef1, wcoef2, wcoef3)
wfd <- fd(wcoef,wbasis125)
fdaLfd <- Lfd(difeorder, fd2list(wfd))
ystart <- matrix(0,3,3)
ystart[1,1] <- fdameanmat[1,1]
ystart[2,2] <- D1fdameanmat[1,1]
ystart[3,3] <- D2fdameanmat[1,1]
EPSval = 1e-4
odeList <- odesolv(bestwtlist, ystart, EPS=EPSval, MAXSTP=1e6)
tX <- odeList[[1]]
yX <- odeList[[2]]
par(mfrow=c(3,1), pty="m")
pltrng <- c(min(yX[1,,]), max(yX[1,,]))
matplot(tX, t(yX[1,,]), type="l", lty=1, ylim=pltrng, main="Function")
abline(h=0, lty=2)
pltrng <- c(min(yX[2,,]), max(yX[2,,]))
matplot(tX, t(yX[2,,]), type="l", lty=1, ylim=pltrng, main="Derivative")
abline(h=0, lty=2)
pltrng <- c(min(yX[3,,]), max(yX[3,,]))
matplot(tX, t(yX[3,,]), type="l", lty=1, ylim=pltrng, main="Derivative")
abline(h=0, lty=2)
umatx <- matrix(0,length(fdatime),3)
umatx[,1] <- approx(tX, t(yX[1,1,]), fdatime)$y
umatx[,2] <- approx(tX, t(yX[1,2,]), fdatime)$y
umatx[,3] <- approx(tX, t(yX[1,2,]), fdatime)$y
Dumatx <- matrix(0,length(fdatime),3)
Dumatx[,1] <- approx(tX, t(yX[2,1,]), fdatime)$y
Dumatx[,2] <- approx(tX, t(yX[2,2,]), fdatime)$y
Dumatx[,3] <- approx(tX, t(yX[2,3,]), fdatime)$y
D2umatx <- matrix(0,length(fdatime),3)
D2umatx[,1] <- approx(tX, t(yX[3,1,]), fdatime)$y
D2umatx[,2] <- approx(tX, t(yX[3,2,]), fdatime)$y
D2umatx[,3] <- approx(tX, t(yX[3,3,]), fdatime)$y
par(mfrow=c(1,1), ask=TRUE, pty="m")
index <- 1:20
fdamat <- eval.fd(fdatime, fdafd)
zmat <- cbind(fdatime-1150,umatx)
for (i in index) {
xhat <- fdamat[,i,1] - lsfit(zmat, fdamat[,i,1])$residual
matplot(fdatime, cbind(xhat, fdamat[,i,1]),
type="l", lty=c(1,3), cex=1.2,
xlim=c(0, 2300), ylim=c(-0.04, 0.04),
main=paste("X curve ",i))
}
fdafdY <- smooth.basis(fdatime, fdaarray[,,2], fdaPar)$fd
yfdlist <- vector("list", 1)
yfdlist[[1]] <- fdafdY
bwtlist <- vector("list", 3)
bfd <- fd(matrix(0,1,1), constbasis)
bfdPar <- fdPar(bfd, 1, 0)
bwtlist[[1]] <- bfdPar
bwtlist[[2]] <- bfdPar
bwtlist[[3]] <- bfdPar
pdaList <- pda.fd(yfdlist, bwtlist, awtlist, ufdlist, difeorder)
bestwtlist <- pdaList$bwtlist
aestwtlist <- pdaList$awtlist
resfdlist <- pdaList$resfdlist
resfd <- resfdlist[[1]]
resmat <- eval.fd(fdatime, resfd)
MSY <- mean(resmat^2)
MSY
bfd <- fd(matrix(0,125,1), wbasis125)
bfdPar <- fdPar(bfd, 1, 0)
bwtlist <- vector("list", 3)
bwtlist[[1]] <- bfdPar
bwtlist[[2]] <- bfdPar
bwtlist[[3]] <- bfdPar
pdaList <- pda.fd(yfdlist, bwtlist, awtlist, ufdlist, difeorder)
bestwtlist <- pdaList$bwtlist
aestwtlist <- pdaList$awtlist
resfdlist <- pdaList$resfdlist
resfd <- resfdlist[[1]]
resmat <- eval.fd(fdatime, resfd)
MSE <- mean(resmat^2)
RSQ <- (MSY-MSE)/MSY
RSQ
par(mfrow=c(3,1),ask=FALSE)
for (j in 1:3) {
betafdPar <- bestwtlist[[j]]
plot(betafdPar$fd, cex=1, ylab=paste("Weight function ",j-1))
}
b2fdParY <- bestwtlist[[2]]
b2fdY <- b2fdParY$fd
b2vecY <- eval.fd(fdatime, b2fdY)
b2meanY <- mean(b2vecY)
par(mfrow=c(1,1), pty="m", ask=FALSE)
plot(fdatime, b2vecY, type="l", cex=1.2,
xlim=c(0, 2300), ylim=c(0, 6e-3))
abline(h=b2meanY, lty=3)
plotrange <- c(0,6e-3)
for (k in 1:length(cycle)) abline(v=cycle[k], lty=2)
for (j in 1:length(featureindex))
abline(v=cycle[featureindex[j]], lty=1)
aestwtlist[[1]]$fd$coefs
aestwtlist[[2]]$fd$coefs
par(mfrow=c(1,1), ask=FALSE, pty="m")
matplot(fdatime, 1e9*resmat, type="l", cex=1.2,
xlim=c(0,2300), ylim=c(-200,200),
xlab="Milliseconds", ylab="Meters/sec/sec/sec")
lines(fdatime, 1e9*D3fdameanmat[,1], lty=2)
resmeanfd <- mean(resfd)
resmeanvec <- eval.fd(fdatime, resmeanfd)
par(mfrow=c(1,1), ask=FALSE, pty="m")
plot(fdatime, 1e9*resmeanvec, type="l", cex=1.2, col=2,
xlim=c(0,2300), ylim=c(-200,200),
xlab="Milliseconds", ylab="Meters/sec/sec/sec")
lines(fdatime, 1e9*D3fdameanmat[,1], lty=2, col=3)
wcoef1 <- bestwtlist[[1]]$fd$coefs
wcoef2 <- bestwtlist[[2]]$fd$coefs
wcoef3 <- bestwtlist[[3]]$fd$coefs
wcoef <- cbind(wcoef1, wcoef2, wcoef3)
wfd <- fd(wcoef,wbasis125)
fdaLfd <- Lfd(difeorder, fd2list(wfd))
ystart <- matrix(0,3,3)
ystart[1,1] <- fdameanmat[1,2]
ystart[2,2] <- D1fdameanmat[1,2]
ystart[3,3] <- D2fdameanmat[1,2]
EPSval = 1e-4
odeList <- odesolv(bestwtlist, ystart, EPS=EPSval, MAXSTP=1e6)
tY <- odeList[[1]]
yY <- odeList[[2]]
par(mfrow=c(3,1), ask=FALSE, pty="m")
pltrng <- c(min(yY[1,,]), max(yY[1,,]))
matplot(tY, t(yY[1,,]), type="l", lty=1, ylim=pltrng, main="Function")
abline(h=0, lty=2)
pltrng <- c(min(yY[2,,]), max(yY[2,,]))
matplot(tY, t(yY[2,,]), type="l", lty=1, ylim=pltrng, main="Derivative")
abline(h=0, lty=2)
pltrng <- c(min(yY[3,,]), max(yY[3,,]))
matplot(tY, t(yY[3,,]), type="l", lty=1, ylim=pltrng, main="Derivative")
abline(h=0, lty=2)
umaty <- matrix(0,length(fdatime),3)
umaty[,1] <- approx(tY, t(yY[1,1,]), fdatime)$y
umaty[,2] <- approx(tY, t(yY[1,2,]), fdatime)$y
umaty[,3] <- approx(tY, t(yY[1,2,]), fdatime)$y
Dumaty <- matrix(0,length(fdatime),3)
Dumaty[,1] <- approx(tY, t(yY[2,1,]), fdatime)$y
Dumaty[,2] <- approx(tY, t(yY[2,2,]), fdatime)$y
Dumaty[,3] <- approx(tY, t(yY[2,3,]), fdatime)$y
D2umaty <- matrix(0,length(fdatime),3)
D2umaty[,1] <- approx(tY, t(yY[3,1,]), fdatime)$y
D2umaty[,2] <- approx(tY, t(yY[3,2,]), fdatime)$y
D2umaty[,3] <- approx(tY, t(yY[3,3,]), fdatime)$y
par(mfrow=c(1,1), ask=TRUE, pty="m")
index <- 1:20
fdamat <- array(0,c(1401,20,2))
fdamat[,,1] <- eval.fd(fdatime, fdafdX)
fdamat[,,2] <- eval.fd(fdatime, fdafdY)
zmat <- cbind(fdatime-1150,umaty)
for (i in index) {
yhat <- fdamat[,i,2] - lsfit(zmat, fdamat[,i,2])$residual
matplot(fdatime, cbind(yhat, fdamat[,i,2]),
type="l", lty=c(1,3), cex=1.2,
xlim=c(0, 2300), ylim=c(-0.04, 0.04),
main=paste("Y curve ",i))
}
par(mfrow=c(1,1), mar=c(5,5,4,2)+cexval+2, pty="m")
matplot(fdatime, cbind(b2vecX, b2vecY), type="l", lty=1, col=c(2,4),
xlim=c(0, 2300), ylim=c(0, 6e-3))
abline(h=b2meanX, lty=3, col=2)
abline(h=b2meanY, lty=3, col=4)
plotrange <- c(0,6e-3)
for (k in 1:length(cycle)) abline(v=cycle[k], lty=2)
for (j in 1:length(featureindex))
abline(v=cycle[featureindex[j]], lty=1)
|
"job_retention"
|
isGBK <- function(string, combine = FALSE)
{
string <- .verifyChar(string)
if (length(string) == 1) {
OUT <- .C("CWrapper_encoding_isgbk",
characters = as.character(string),
numres = 2L)
OUT <- as.logical(OUT$numres)
} else {
if (combine) {
OUT <- isGBK(paste(string, collapse = ""))
} else {
OUT <- as.vector(sapply(string, isGBK))
}
}
return(OUT)
}
|
ds.summ <- function(x,n){
des <- as.data.frame(matrix(nrow = dim(x)[2], ncol = 11))
names(des) <- c("name","obs","max","min","mean","median","mode","var","std","skew","kurt")
des[,1] <- names(x)
des[,2] <- round(apply(x,2, length), n)
des[,3] <- round(apply(x,2, max, na.rm = T), n)
des[,4] <- round(apply(x,2, min, na.rm = T), n)
des[,5] <- round(apply(x,2,mean, na.rm = T), n)
des[,6] <- round(apply(x,2,median, na.rm = T), n)
des[,7] <- round(apply(x,2,ds.mode), n)
des[,8] <- round(apply(x,2,var, na.rm = T), n)
des[,9] <- round(apply(x,2,sd, na.rm = T), n)
des[,10] <- round(apply(x,2,ds.skew), n)
des[,11] <- round(apply(x,2,ds.kurt), n)
return(des)
}
|
timestamp <- Sys.time()
library(caret)
library(plyr)
library(recipes)
library(dplyr)
model <- "sbf_nb"
set.seed(2)
training_class <- twoClassSim(50)
testing_class <- twoClassSim(500)
trainX_class <- training_class[, -ncol(training_class)]
trainY_class <- training_class$Class
testX_class <- testing_class[, -ncol(testing_class)]
testY_class <- testing_class$Class
training_class$fact <- factor(sample(letters[1:3], size = nrow(training_class), replace = TRUE))
testing_class$fact <- factor(sample(letters[1:3], size = nrow(testing_class), replace = TRUE))
cctrl1 <- sbfControl(method = "cv", number = 3, returnResamp = "all", functions = nbSBF)
cctrl2 <- sbfControl(method = "LOOCV", functions = nbSBF)
set.seed(849)
test_cv_model_class <- sbf(x = trainX_class, y = trainY_class,
sbfControl = cctrl1)
set.seed(849)
test_loo_model_class <- sbf(x = trainX_class, y = trainY_class,
sbfControl = cctrl2)
set.seed(849)
test_cv_model_form_class <- sbf(Class ~ ., data = training_class,
sbfControl = cctrl1)
set.seed(849)
test_loo_model_form_class <- sbf(Class ~ ., data = training_class,
sbfControl = cctrl2)
test_cv_pred_class <- predict(test_cv_model_class, testX_class)
test_loo_pred_class <- predict(test_loo_model_class, testX_class)
test_cv_pred_form_class <- predict(test_cv_model_form_class, testing_class[, colnames(testing_class) != "Class"])
test_loo_pred_form_class <- predict(test_loo_model_form_class, testing_class[, colnames(testing_class) != "Class"])
tests <- grep("test_", ls(), fixed = TRUE, value = TRUE)
sInfo <- sessionInfo()
timestamp_end <- Sys.time()
save(list = c(tests, "sInfo", "timestamp", "timestamp_end"),
file = file.path(getwd(), paste(model, ".RData", sep = "")))
if(!interactive())
q("no")
|
Votes.getBillsByStateRecent <-
function (amount=100, state="NA") {
Votes.getBillsByStateRecent.basic <- function (.amount, .state) {
request <- "Votes.getBillsByStateRecent?"
inputs <- paste("&amount=",.amount,"&state=",.state,sep="")
output <- pvsRequest(request,inputs)
output$state <- .state
output
}
output.list <- lapply(amount, FUN= function (y) {
lapply(state, FUN= function (s) {
Votes.getBillsByStateRecent.basic(.amount=y, .state=s)
}
)
}
)
output.list <- do.call("c",output.list)
coln <- which.is.max(sapply(output.list, ncol));
max.cols <- max(sapply(output.list, ncol));
output.list2 <- lapply(output.list, function(x){
if (ncol(x) < max.cols) x <- data.frame(cbind(matrix(NA, ncol=max.cols-ncol(x), nrow = 1, ),x),row.names=NULL)
names(x) <- names(output.list[[coln]])
x
})
output <- do.call("rbind",output.list2)
output
}
|
DAISIE_sim_trait_dependent <- function(
time,
M,
pars,
replicates,
divdepmodel = "CS",
nonoceanic_pars = c(0, 0),
num_guilds = NULL,
sample_freq = 25,
plot_sims = TRUE,
island_ontogeny = "const",
sea_level = "const",
hyper_pars = create_hyper_pars(d = 0, x = 0),
area_pars = DAISIE::create_area_pars(
max_area = 1,
current_area = 1,
proportional_peak_t = 0,
total_island_age = 0,
sea_level_amplitude = 0,
sea_level_frequency = 0,
island_gradient_angle = 0),
extcutoff = 1000,
verbose = TRUE,
trait_pars = NULL,
...
) {
testit::assert(
"island_ontogeny is not valid input. Specify 'const',\n
or 'beta'", is_island_ontogeny_input(island_ontogeny)
)
testit::assert(
"sea_level is not valid input. Specify 'const, \n or 'sine'",
is_sea_level_input(sea_level)
)
testit::assert(
"length(pars) is not five",
length(pars) == 5
)
totaltime <- time
island_replicates <- list()
island_ontogeny <- translate_island_ontogeny(island_ontogeny)
sea_level <- translate_sea_level(sea_level)
if (divdepmodel == "IW") {
for (rep in 1:replicates) {
island_replicates[[rep]] <- DAISIE_sim_core_trait_dependent(
time = totaltime,
mainland_n = M,
pars = pars,
nonoceanic_pars = nonoceanic_pars,
island_ontogeny = island_ontogeny,
sea_level = sea_level,
hyper_pars = hyper_pars,
area_pars = area_pars,
extcutoff = extcutoff,
trait_pars = trait_pars
)
if (verbose == TRUE) {
print(paste("Island replicate ", rep, sep = ""))
}
}
island_replicates <- DAISIE_format_IW(
island_replicates = island_replicates,
time = totaltime,
M = M,
sample_freq = sample_freq,
verbose = verbose,
trait_pars = trait_pars)
}
if (divdepmodel == "CS") {
for (rep in 1:replicates) {
island_replicates[[rep]] <- list()
full_list <- list()
if(M == 0){
if(is.null(trait_pars)){
stop("There is no species on mainland.")
}else{
trait_pars_onecolonize <- create_trait_pars(
trans_rate = trait_pars$trans_rate,
immig_rate2 = trait_pars$immig_rate2,
ext_rate2 = trait_pars$ext_rate2,
ana_rate2 = trait_pars$ana_rate2,
clado_rate2 = trait_pars$clado_rate2,
trans_rate2 = trait_pars$trans_rate2,
M2 = 1)
for (m_spec in 1:trait_pars$M2) {
full_list[[m_spec]] <- DAISIE_sim_core_trait_dependent(
time = totaltime,
mainland_n = 0,
pars = pars,
nonoceanic_pars = nonoceanic_pars,
island_ontogeny = island_ontogeny,
sea_level = sea_level,
hyper_pars = hyper_pars,
area_pars = area_pars,
extcutoff = extcutoff,
trait_pars = trait_pars_onecolonize
)
}
}
}else{
trait_pars_addcol <- create_trait_pars(
trans_rate = trait_pars$trans_rate,
immig_rate2 = trait_pars$immig_rate2,
ext_rate2 = trait_pars$ext_rate2,
ana_rate2 = trait_pars$ana_rate2,
clado_rate2 = trait_pars$clado_rate2,
trans_rate2 = trait_pars$trans_rate2,
M2 = 0)
for (m_spec in 1:M) {
full_list[[m_spec]] <- DAISIE_sim_core_trait_dependent(
time = totaltime,
mainland_n = 1,
pars = pars,
nonoceanic_pars = nonoceanic_pars,
island_ontogeny = island_ontogeny,
sea_level = sea_level,
hyper_pars = hyper_pars,
area_pars = area_pars,
extcutoff = extcutoff,
trait_pars = trait_pars_addcol
)
}
for(m_spec in (M + 1):(M + trait_pars$M2))
{
trait_pars_onecolonize <- create_trait_pars(
trans_rate = trait_pars$trans_rate,
immig_rate2 = trait_pars$immig_rate2,
ext_rate2 = trait_pars$ext_rate2,
ana_rate2 = trait_pars$ana_rate2,
clado_rate2 = trait_pars$clado_rate2,
trans_rate2 = trait_pars$trans_rate2,
M2 = 1)
full_list[[m_spec]] <- DAISIE_sim_core_trait_dependent(
time = totaltime,
mainland_n = 0,
pars = pars,
nonoceanic_pars = nonoceanic_pars,
island_ontogeny = island_ontogeny,
sea_level = sea_level,
hyper_pars = hyper_pars,
area_pars = area_pars,
extcutoff = extcutoff,
trait_pars = trait_pars_onecolonize
)
}
}
island_replicates[[rep]] <- full_list
if (verbose == TRUE) {
print(paste("Island replicate ", rep, sep = ""))
}
}
island_replicates <- DAISIE_format_CS(
island_replicates = island_replicates,
time = totaltime,
M = M,
sample_freq = sample_freq,
verbose = verbose,
trait_pars = trait_pars
)
}
if (divdepmodel == "GW") {
if (!is.numeric(num_guilds)) {
stop("num_guilds must be numeric")
}
guild_size <- M / num_guilds
testit::assert(num_guilds < M)
testit::assert(M %% num_guilds == 0)
for (rep in 1:replicates) {
island_replicates[[rep]] <- list()
full_list <- list()
for (m_spec in 1:num_guilds) {
full_list[[m_spec]] <- DAISIE_sim_core_trait_dependent(
time = totaltime,
mainland_n = guild_size,
pars = pars,
nonoceanic_pars = nonoceanic_pars,
island_ontogeny = island_ontogeny,
sea_level = sea_level,
hyper_pars = hyper_pars,
area_pars = area_pars,
extcutoff = extcutoff
)
}
island_replicates[[rep]] <- full_list
if (verbose == TRUE) {
print(paste("Island replicate ", rep, sep = ""))
}
}
island_replicates <- DAISIE_format_GW(island_replicates = island_replicates,
time = totaltime,
M = M,
sample_freq = sample_freq,
num_guilds = num_guilds,
verbose = verbose)
}
if (plot_sims == TRUE) {
DAISIE_plot_sims(
island_replicates = island_replicates,
sample_freq = sample_freq,
trait_pars = trait_pars
)
}
return(island_replicates)
}
|
loessreg=function(trat,
resp,
degree=2,
ylab="Dependent",
xlab="Independent",
theme=theme_classic(),
legend.position="top",
error="SE",
point="all",
width.bar=NA,
scale="none",
textsize = 12,
pointsize = 4.5,
linesize = 0.8,
pointshape = 21,
fontfamily="sans"){
requireNamespace("ggplot2")
ymean=tapply(resp,trat,mean)
if(is.na(width.bar)==TRUE){width.bar=0.01*mean(trat)}
if(error=="SE"){ysd=tapply(resp,trat,sd)/sqrt(tapply(resp,trat,length))}
if(error=="SD"){ysd=tapply(resp,trat,sd)}
if(error=="FALSE"){ysd=0}
desvio=ysd
xmean=tapply(trat,trat,mean)
mod=loess(resp~trat,degree = degree)
xp=seq(min(trat),max(trat),length.out = 1000)
preditos=data.frame(x=xp,
y=predict(mod,newdata = data.frame(trat=xp)))
x=preditos$x
y=preditos$y
data=data.frame(xmean,ymean)
data1=data.frame(trat=xmean,resp=ymean)
s="~~~ Loess~regression"
if(point=="mean"){
graph=ggplot(data,aes(x=xmean,y=ymean))
if(error!="FALSE"){graph=graph+geom_errorbar(aes(ymin=ymean-ysd,ymax=ymean+ysd),
width=width.bar,
size=linesize)}
graph=graph+
geom_point(aes(color="black"),size=pointsize,shape=pointshape,fill="gray")}
if(point=="all"){
graph=ggplot(data.frame(trat,resp),aes(x=trat,y=resp))
graph=graph+
geom_point(aes(color="black"),size=pointsize,shape=pointshape,fill="gray")}
graph=graph+theme+
geom_line(data=preditos,aes(x=preditos$x,
y=preditos$y,color="black"),size=linesize)+
scale_color_manual(name="",values=1,label="Loess regression")+
theme(axis.text = element_text(size=textsize,color="black",family = fontfamily),
axis.title = element_text(size=textsize,color="black",family = fontfamily),
legend.position = legend.position,
legend.text = element_text(size=textsize,family = fontfamily),
legend.direction = "vertical",
legend.text.align = 0,
legend.justification = 0)+
ylab(ylab)+xlab(xlab)
if(scale=="log"){graph=graph+scale_x_log10()}
temp1=seq(min(trat),max(trat),length.out=10000)
result=predict(mod,newdata = data.frame(trat=temp1),type="response")
maximo=temp1[which.max(result)]
respmax=result[which.max(result)]
minimo=temp1[which.min(result)]
respmin=result[which.min(result)]
graphs=data.frame("Parameter"=c("X Maximum",
"Y Maximum",
"X Minimum",
"Y Minimum"),
"values"=c(maximo,
respmax,
minimo,
respmin))
graficos=list("test"=graphs,graph)
print(graficos)
}
|
convertFx <- function(x,
data,
from = c("mx", "qx", "dx", "lx"),
to = c("mx", "qx", "dx", "lx", "Lx", "Tx", "ex"),
...) {
from <- match.arg(from)
to <- match.arg(to)
L <- switch(from,
mx = function(x, w, ...) LifeTable(x, mx = w, ...),
qx = function(x, w, ...) LifeTable(x, qx = w, ...),
dx = function(x, w, ...) LifeTable(x, dx = w, ...),
lx = function(x, w, ...) LifeTable(x, lx = w, ...))
if (is.vector(data)) {
if (length(x) != length(data))
stop("The 'x' and 'data' do not have the same length", call. = FALSE)
out <- L(x = x, data, ...)$lt[, to]
names(out) <- names(data)
} else {
if (length(x) != nrow(data))
stop("The length of 'x' must be equal to the numebr of rows in 'data'",
call. = FALSE)
LT <- function(D) L(x = x, as.numeric(D), ...)$lt[, to]
out <- apply(X = data, 2, FUN = LT)
dimnames(out) <- dimnames(data)
}
return(out)
}
|
require(coda)
tess.pathSampling <- function(likelihoodFunction,priorFunction,parameters,logTransforms,iterations,burnin=round(iterations/3),K=50) {
x <- K:0 / K
beta <- qbeta(x,0.3,1)
pp <- likelihoodFunction(parameters)
prior <- 0
for ( j in 1:length(parameters) ) {
prior <- prior + priorFunction[[j]](parameters[j])
}
pathValues <- c()
for (k in 1:length(beta)) {
b <- beta[k]
samples <- c()
for (i in 1:(iterations+burnin)) {
for ( j in 1:length(parameters) ) {
new_prior <- prior - priorFunction[[j]](parameters[j])
if ( logTransforms[j] == TRUE ) {
if (parameters[j] == 0) {
stop("Cannot propose new value for a parameter with value 0.0.")
}
eta <- log(parameters[j])
new_eta <- eta + rnorm(1,0,1)
new_val <- exp(new_eta)
hr <- log(new_val / parameters[j])
parameters[j] <- new_val
new_pp <- likelihoodFunction(parameters)
new_prior <- new_prior + priorFunction[[j]](parameters[j])
if ( b == 0.0 ) {
acceptance_ratio <- new_prior-prior+hr
} else {
acceptance_ratio <- new_prior-prior+b*new_pp-b*pp+hr
}
if (is.finite(new_pp) && is.finite(new_prior) && acceptance_ratio > log(runif(1,0,1)) ) {
pp <- new_pp
prior <- new_prior
} else {
parameters[j] <- exp(eta)
}
} else {
eta <- parameters[j]
new_val <- eta + rnorm(1,0,1)
hr <- 0.0
parameters[j] <- new_val
new_pp <- likelihoodFunction(parameters)
new_prior <- new_prior + priorFunction[[j]](parameters[j])
if ( b == 0.0 ) {
acceptance_ratio <- new_prior-prior+hr
} else {
acceptance_ratio <- new_prior-prior+b*new_pp-b*pp+hr
}
if (is.finite(new_pp) && is.finite(new_prior) && acceptance_ratio > log(runif(1,0,1)) ) {
pp <- new_pp
prior <- new_prior
} else {
parameters[j] <- eta
}
}
}
if (i > burnin) {
samples[i-burnin] <- pp
}
}
tmp <- max(samples)
pathValues[k] <- mean(samples)
}
BF <- 0
for (i in 1:K) {
BF <- BF + (pathValues[i] + pathValues[i+1])*(beta[i]-beta[i+1])/2
}
return (BF)
}
|
gng.plot.fit <-
function (data, obj, resolution=100,breaks=100,legpos=NULL,xlim=NULL,
main=NULL,...)
{
obs <- unlist(data);
if(is.null(xlim)){ xlim=range(obs);}
if(is.null(main)){main="Goodness of Fit";}
hist(obs,freq=FALSE,breaks=breaks,xlim=xlim,main=main,...);
gng.plot.mix(obj,resolution=resolution,col='black',lwd=3,new.plot=FALSE,...);
gng.plot.comp(data,obj,new.plot=FALSE,xlim=xlim,legpos=legpos,lwd=2,...);
}
|
treecheck<-function(trees){
if(class(trees)=="multiPhylo"){
b<-c()
u<-c()
for (i in 1:length(trees)){
if(is.binary.phylo(trees[[i]])!= TRUE) warning(paste("Tree number",i,"is not binary"))
b[i]<-is.binary.phylo(trees[[i]])
if(is.ultrametric(trees[[i]])!= TRUE) warning(paste("Tree number",i,"is not ultrametric"))
u[i]<-is.ultrametric(trees[[i]])
}
if(all(b==TRUE)) print("All trees are binary")
if(all(u==TRUE)) print("All trees are ultrametric")
}
else if(class(trees)=="phylo") {
if(is.binary.phylo(trees)==TRUE) print("Tree is binary") else print("Tree is not binary")
if(is.ultrametric(trees)==TRUE) print("Tree is ultrametric") else print("Tree is not ultrametric")
}
else stop('Trees must be of class multiPhylo or phylo')
}
|
get_distname_family <- function(distname) {
check_distname(distname)
out <- list(location = FALSE,
scale = FALSE,
is.non.negative = FALSE)
if (distname %in% c("cauchy", "normal", "t", "unif")) {
out$location <- TRUE
out$scale <- TRUE
} else if (distname %in% c("exp", "chisq", "gamma", "F", "f")) {
out$scale <- TRUE
out$is.non.negative <- TRUE
}
return(out)
}
|
context("partition matrix to vector")
test_that(
"part_matrix_to_vector returns the index of the nonzero column for each row", {
test_matrix <- matrix(
c(
1, 0, 0,
1, 0, 0,
0, 0, 1,
0, 1, 0,
0, 0, 1,
0, 1, 0
),
ncol = 3, byrow = TRUE
)
expect_equal(part_matrix_to_vector(test_matrix), c(1, 1, 3, 2, 3, 2))
})
|
library(knitr)
options(width = 90, tidy = TRUE, warning = FALSE, message = FALSE)
opts_chunk$set(comment = "", warning = FALSE, message = FALSE,
echo = TRUE, tidy = TRUE)
library(lsasim)
packageVersion("lsasim")
set.seed(4388)
bg <- questionnaire_gen(n_obs = 100, family = "gaussian")
str(bg)
set.seed(4388)
bg <- questionnaire_gen(n_obs = 100, theta = TRUE, family = "gaussian")
str(bg)
set.seed(4388)
bg <- questionnaire_gen(n_obs = 100, n_vars = 4, family = "gaussian")
str(bg)
set.seed(4388)
bg <- questionnaire_gen(n_obs = 100, n_vars = 4, theta = TRUE, family = "gaussian")
str(bg)
set.seed(4388)
bg <- questionnaire_gen(n_obs = 100, n_X = 3, n_W = 0, theta = TRUE, family = "gaussian")
str(bg)
set.seed(4388)
bg <- questionnaire_gen(n_obs = 100, n_X = 3, theta = TRUE, family = "gaussian")
str(bg)
set.seed(4388)
bg <- questionnaire_gen(n_obs = 100, cat_prop = list(1, 1, 1, 1), theta = TRUE, family = "gaussian")
str(bg)
set.seed(4388)
bg <- questionnaire_gen(n_obs = 100, n_X = 0, n_W = 2, family = "gaussian")
str(bg)
set.seed(4388)
bg <- questionnaire_gen(n_obs = 100, n_X = 0, n_W = list(2, 4, 4, 4), family = "gaussian")
str(bg)
set.seed(4388)
bg <- questionnaire_gen(n_obs = 100, n_X = 2, n_W = list(2, 2), theta = TRUE,
c_mean = c(500, 0, 0), c_sd = c(100, 1, 1), family = "gaussian")
str(bg)
set.seed(4388)
props <- list(1, c(.25, 1), c(.2, .8, 1))
yw_cov <- matrix(c(1, .5, .5, .5, 1, .8, .5, .8, 1), nrow = 3)
bg <- questionnaire_gen(n_obs = 100, cat_prop = props, cov_matrix = yw_cov,
c_mean = 2,
family = "gaussian")
str(bg)
|
quote2box <- function(quote, boxsize=1, log=FALSE) {
warning("Function quote2box() is deprecated and should be replaced by quote2boxnumber()!")
if (!is.numeric(quote)) {
stop("Argument quote has to be numeric!")
}
if (!is.numeric(boxsize)) {
stop("Argument boxsize has to be numeric!")
}
if (!is.logical(log)) {
stop("Argument log has to be logical")
}
if (log & min(quote)<=0) {
stop("Argument quotes must be greater than zero, if log=TRUE!")
}
if (length(boxsize)>1){
stop("Argument boxsize is vector of length greater than 1. This is not supported yet!")
}
if (log==TRUE) {
mylog <- function(x) {
log(x)
}
} else {
mylog <- function(x) {
x
}
}
result <- as.integer(floor(mylog(quote)/boxsize))
result
}
|
new_linear_variable <- function(column_idx) {
structure(list(column_idx = column_idx), class = "OmprLinearVariable")
}
new_variable_collection <- function(map = map) {
structure(
list(map = map),
class = "OmprLinearVariableCollection"
)
}
new_linear_term <- function(variable, coefficient) {
structure(
list(variable = variable, coefficient = coefficient),
class = c("LinearTerm", "AbstractLinearFunction")
)
}
new_linear_function <- function(terms, constant) {
map <- fastmap()
if (length(terms) > 0) {
terms <- setNames(
terms,
vapply(terms, function(x) {
as.character(x$variable$column_idx)
}, character(1))
)
map$mset(.list = terms)
}
owner <- new_ownership_id()
map$set("owner", owner)
structure(
list(terms = map, constant = constant, owner = owner),
class = c("LinearFunction", "AbstractLinearFunction")
)
}
.OwnerShipManager <- new.env(hash = FALSE, size = 1L)
.OwnerShipManager$counter <- 1
new_ownership_id <- function() {
.OwnerShipManager$counter <- .OwnerShipManager$counter + 1
}
`+.AbstractLinearFunction` <- function(x, y) {
if (inherits(x, "LinearTerm")) {
add.LinearTerm(x, y)
} else if (inherits(x, "LinearFunction")) {
add.LinearFunction(x, y)
} else if (is.numeric(x)) {
add.numeric(x, y)
} else {
not_supported()
}
}
add.LinearFunction <- function(x, y) {
if (missing(y)) {
x
} else if (inherits(y, "LinearTerm")) {
x + (y + 0)
} else if (inherits(y, "LinearFunction")) {
x$constant <- x$constant + y$constant
new_terms <- merge_terms(x, y)
x$terms <- new_terms
update_owner(x)
} else if (is.numeric(y)) {
x$constant <- x$constant + y
x
} else {
not_supported()
}
}
add.LinearTerm <- function(x, y) {
if (missing(y)) {
x
} else if (inherits(y, "LinearTerm")) {
(x + 0) + (y + 0)
} else if (inherits(y, "LinearFunction")) {
y + x
} else if (is.numeric(y)) {
new_linear_function(list(x), y)
} else {
not_supported()
}
}
add.numeric <- function(x, y) {
if (missing(y)) {
x
} else if (inherits(y, "LinearTerm")) {
new_linear_function(list(y), x)
} else if (inherits(y, "LinearFunction")) {
y$constant <- y$constant + x
y
} else if (is.numeric(y)) {
unreachable()
} else {
not_supported()
}
}
`-.AbstractLinearFunction` <- function(x, y) {
if (missing(y)) {
-1 * x
} else {
x + (-1 * y)
}
}
`*.AbstractLinearFunction` <- function(x, y) {
if (inherits(x, "LinearTerm")) {
multiply.LinearTerm(x, y)
} else if (inherits(x, "LinearFunction")) {
multiply.LinearFunction(x, y)
} else if (is.numeric(x)) {
multiply.numeric(x, y)
} else {
not_supported()
}
}
multiply.LinearTerm <- function(x, y) {
if (inherits(y, "LinearTerm")) {
abort("Quadratic expression are not supported")
} else if (inherits(y, "LinearFunction")) {
abort("Quadratic expression are not supported")
} else if (is.numeric(y)) {
x$coefficient <- x$coefficient * y
x
} else {
not_supported()
}
}
multiply.LinearFunction <- function(x, y) {
if (inherits(y, "LinearTerm")) {
abort("Quadratic expression are not supported")
} else if (inherits(y, "LinearFunction")) {
abort("Quadratic expression are not supported")
} else if (is.numeric(y)) {
x$constant <- x$constant * y
update_terms(x, function(value) value * y)
update_owner(x)
} else {
not_supported()
}
}
multiply.numeric <- function(x, y) {
if (inherits(y, "LinearTerm")) {
y$coefficient <- y$coefficient * x
y
} else if (inherits(y, "LinearFunction")) {
y$constant <- y$constant * x
update_terms(y, function(value) value * x)
update_owner(y)
} else {
not_supported()
}
}
`/.AbstractLinearFunction` <- function(x, y) {
if (inherits(x, "LinearTerm")) {
divide.LinearTerm(x, y)
} else if (inherits(x, "LinearFunction")) {
divide.LinearFunction(x, y)
} else {
not_supported()
}
}
divide.LinearFunction <- function(x, y) {
if (is.numeric(y)) {
x$constant <- x$constant / y
update_terms(x, function(value) value / y)
update_owner(x)
} else {
abort("Operation not supported")
}
}
divide.LinearTerm <- function(x, y) {
if (is.numeric(y)) {
x$coefficient <- x$coefficient / y
x
} else {
abort("Operation not supported")
}
}
update_terms <- function(linear_fun, update_fun) {
terms <- linear_fun$terms
check_ownership(linear_fun)
for (key in terms$keys()) {
if (key == "owner") {
next
}
terms$set(key, update_fun(terms$get(key)))
}
invisible(linear_fun)
}
merge_terms <- function(linear_fun1, linear_fun2) {
terms1 <- linear_fun1$terms
terms2 <- linear_fun2$terms
check_ownership(linear_fun1)
check_ownership(linear_fun2)
for (key in terms2$keys()) {
if (key == "owner") {
next
}
if (terms1$has(key)) {
term1 <- terms1$get(key)
term2 <- terms2$get(key)
term1$coefficient <- term1$coefficient + term2$coefficient
terms1$set(key, term1)
} else {
terms1$set(key, terms2$get(key))
}
}
terms1
}
terms_list <- function(linear_function) {
check_ownership(linear_function)
x <- linear_function$terms$as_list()
x[names(x) != "owner"]
}
update_owner <- function(fun) {
fun$owner <- new_ownership_id()
fun$terms$set("owner", fun$owner)
fun
}
check_ownership <- function(linear_function) {
if (linear_function$owner != linear_function$terms$get("owner")) {
abort(
paste(
"A linear functions is used without being the owner of the",
"underlying data structure. This is a bug. Please report that",
"as an issue."
)
)
}
}
not_supported <- function() {
abort("Operation not supported")
}
|
require('foreach')
require('doParallel')
pc <- function (x.vars=NULL, x=NULL, x.svd=NULL, df = NULL, center=TRUE,
scale=TRUE, tol = NULL, max.number=min(p,n), k=log(n),
method=c("t-values", "GAIC","k-fold"))
{
scall <- deparse(sys.call(), width.cutoff = 500L)
method <- match.arg(method)
rexpr <- grepl("gamlss",sys.calls())
newData <- beta <- scaleold <- centerold <- NULL
for (i in length(rexpr):1) {
position <- i
if (rexpr[i]==TRUE) break
}
gamlss.env <- sys.frame(position)
if (sys.call(position)[1]=="predict.gamlss()")
{
object <- get("object", envir=gamlss.env)
beta <- getSmo(object)$beta
centerold <- getSmo(object)$pc["center"]
scaleold <- getSmo(object)$pc["scale"]
newData <- get("newdata", envir=gamlss.env)
Data <- get("data", envir=gamlss.env)
} else if (sys.call(position)[1]=="gamlss()") {
if (is.null(get("gamlsscall", envir=gamlss.env)$data)) {
Data <- data.frame(cbind(x))
} else {
Data <- get("gamlsscall", envir=gamlss.env)$data
}
} else {
Data <- get("data", envir=gamlss.env)
}
Data <- data.frame(eval(substitute(Data)))
if (is.null(x) && is.null(x.vars) && is.null(x.svd))
stop("x or x.vars or x.svd has to be set in pc()")
if (sum(c(!is.null(x), !is.null(x.vars), !is.null(x.svd)))>1L)
stop("use only one of the arguments x, x.vars or x.svd")
if (!is.null(x.vars))
{
Xmtrx <- as.matrix(Data[, x.vars])
Xmtrx <- scale(Xmtrx, center = center, scale = scale)
center <- attr(Xmtrx,"scaled:center")
scale <- attr(Xmtrx,"scaled:scale")
n <- dim(Xmtrx)[1]
p <- dim(Xmtrx)[2]
S <- La.svd(Xmtrx, nu = 0, nv= max.number)
cNames <- colnames(Xmtrx)
}
if (!is.null(x))
{
if ("scaled:center"%in%names(attributes(x)))
warning("the x matrix will be scaled again \n", "This will creates problems if prediction is used later")
Xmtrx <- scale(x, center = center, scale = scale)
center <- attr(Xmtrx,"scaled:center")
scale <- attr(Xmtrx,"scaled:scale")
n <- dim(Xmtrx)[1]
p <- dim(Xmtrx)[2]
S <- La.svd(Xmtrx, nu = 0, nv= max.number)
cNames <- colnames(Xmtrx)
}
if (!is.null(x.svd))
{
if (length(x.svd)!=6) stop("x.svd should be a list of 6, created by GetSVD()")
n <- dim(x.svd$u)[1]
p <- dim(x.svd$vt)[1]
center <- x.svd$center
scale <- x.svd$scale
Xmtrx <- x.svd[["X"]]
cNames <- colnames(x.svd[["X"]])
S <- x.svd[c("d","u","vt")]
}
if (!is.null(df)&&df>min(n,p)) stop("the df have to be less than the number of variables")
if (!is.null(df)) max.number <- df
if (!is.null(tol))
{
rank <- sum(S$d > (S$d[1] * tol))
if (rank < ncol(x))
S$vt <- S$vt[, 1:rank, drop = FALSE]
}
S$d <- S$d/sqrt(max(1, n - 1))
dimnames(S$vt) <- list(paste("PC", seq(len = nrow(S$vt)),
sep = ""), cNames)
r <- list(sdev = S$d, rotation = t(S$vt), center=center, scale=scale)
r$x <- Xmtrx %*% t(S$vt)
class(r) <- "prcomp"
x <- rep(0, n)
attr(x, "PC") <- r
attr(x, "name") <- cNames
attr(x, "maxNo") <- max.number
attr(x, "call") <- substitute(gamlss.pc(data[[scall]], z, w))
attr(x, "df") <- df
attr(x, "k") <- k
attr(x, "method") <- method
attr(x, "newData") <- newData
attr(x, "beta") <- beta
class(x) <- c("smooth", class(x))
x
}
gamlss.pc <- function(x, y, w, xeval = NULL, ...)
{
if (is.null(xeval))
{
PC <- attr(x, "PC")
method <- attr(x, "method")
names <- as.character(attr(x, "name"))
edf <- as.vector(attr(x, "df"))
k <- as.vector(attr(x, "k"))
n <- nrow(PC$x)
p <- ncol(PC$x)
maxno <- as.numeric(attr(x, "maxNo"))
fun <- function(ind, k=k)
{
m <- lm(y~PC$x[,1:ind, drop = FALSE]-1, weights=w)
AIC(m, k=k)
}
if (is.null(edf))
{
MaxNo <- min(n-5, p, maxno)
if (method=="GAIC")
{
AiC <- foreach(i = 1 : MaxNo, .packages="gamlss",
.export=c('fun', "MaxNo"),
.combine = rbind)%dopar%{
m <- lm(y~I(PC$x[,1:i, drop = FALSE])-1, weights=w)
AA <- AIC(m, k=k)}
edf <- which.min( AiC[is.finite(AiC)])
T <- as.matrix(PC$x[,1:edf, drop = FALSE])
fit <- lm(y~T-1, weights=w)
beta <- as.vector(PC$rotation[,1:edf]%*%matrix(coef(fit)))
}
if (method=="t-values")
{
T <- as.matrix(PC$x[,1:MaxNo, drop = FALSE])
m <- lm(y~T-1, weights=w)
sigterms <- abs(summary(m)$coefficients[, 3])>2
T1 <- T[, sigterms]
edf <- sum(sigterms)
fit <- if (edf==0) lm(y~1, weights=w) else lm(y~T1-1, weights=w)
ii <- (1:MaxNo)*sigterms
expCoef <- rep(0,MaxNo)
expCoef[ii] <- coef(fit)
beta <- as.vector(PC$rotation[,1:MaxNo, drop = FALSE]%*%matrix(expCoef))
AiC <- AIC(fit, k=k)
}
} else
{
T <- as.matrix(PC$x[,1:edf, drop = FALSE])
fit <- lm(y~T-1, weights=w)
AiC <- AIC(fit, k=k)
beta <- as.vector(PC$rotation[,1:edf]%*%matrix(coef(fit)))
}
coefSmo <- list( coef = coef(fit),
beta = beta,
pc = PC,
edf = edf,
AIC = AiC
)
class(coefSmo) <- "pc"
list(fitted.values = fitted(fit), residuals = resid(fit), var=(predict(fit, se.fit=TRUE)$se)^2,
nl.df = edf, lambda=0, coefSmo = coefSmo)
} else
{
newdata <- data.frame(attr(x, "newData"))
names <- as.character(attr(x, "name"))
nX <- as.matrix(newdata[,names])
scale <- attr(x, "scale")[[1]]
center <- attr(x, "center")[[1]]
beta <- attr(x, "beta")
pred <- scale(nX, center=center, scale=scale )%*%beta
pred
}
}
plot.pc <- function(x,...)
{
plot(x[["beta"]], type="h", xlab="knots", ylab="coefficients")
abline(h=0)
}
coef.pc <- function(object, ...)
{
object[["coef"]]
}
print.pc <- function (x, digits = max(3, getOption("digits") - 3), ...)
{
cat("Principal componet fit using the gamlss function pc() \n")
cat("Degrees of Freedom for the fit :", x$edf, "\n")
}
which.Data.Corr <- function(data, r=.90)
{
if (abs(r)>=1||abs(r)<=0) stop("r should be greater than 0 and lass than 1")
Dim <- dim(data)
CC <- cor(data)
CCC <- CC-diag(rep(1,Dim[2]))
if (is.null(colnames(data))) colnames(data) <- paste0("X", seq(1:dim(data)[2]))
if (!any(which(abs(CCC)>r))) stop(cat("no correlation above", r, "\n"))
mm <- which(abs(CCC)>r, arr.ind=T)
nn <- mm[mm[,1]< mm[,2],]
if (is.vector(nn))
{
name1 <- colnames(data)[nn[1]]
name2 <- colnames(data)[nn[2]]
corrs <- CCC[nn[1],nn[2]]
} else
{ name1 <- colnames(data)[nn[,1]]
name2 <- colnames(data)[nn[,2]]
corrs <- CCC[nn]
}
cbind(name1, name2, corrs)
}
which.yX.Corr <- function(y, x,
r =.50 ,
plot = TRUE,
hierarchical = TRUE,
print = TRUE)
{
CC <- cor(y, x)
if (plot)
{
plot(as.vector(CC), pch=20, col="gray")
abline(h=c(r, -r), col='red')
}
if(hierarchical)
{
DF <- as.data.frame(x)
nnames <- colnames(CC)[abs(CC)>r]
ff <- as.formula(paste("~ ",paste(nnames, collapse='+')),)
if (any(grep(":", nnames))) ff <- eval(call("~",ff))
MM <- model.matrix(ff, DF)[,-1]
} else
{
nnames <- colnames(CC)[abs(CC)>r]
MM <- x[,nnames]
}
if (print)
{
cat("cup point for correlation", r, "\n")
cat("dimesions of new matrix", dim(MM), "\n")
}
invisible(MM)
}
getSVD <- function(x=NULL, nu=min(n, p), nv=min(n, p))
{
n <- dim(x)[1]
p <- dim(x)[2]
x <- scale(x)
center <- attr(x,"scaled:center")
scale <- attr(x,"scaled:scale")
cNames <- colnames(x)
S <- La.svd(x, nu=nu, nv=nv)
S$X <- x
S$center <- attr(x,"scaled:center")
S$scale <- attr(x,"scaled:scale")
S
}
|
GMLAbstractTimeObject <- R6Class("GMLAbstractTimeObject",
inherit = GMLAbstractGML,
private = list(
xmlElement = "AbstractTimeObject",
xmlNamespacePrefix = "GML"
),
public = list(
initialize = function(xml = NULL, defaults = list()){
super$initialize(xml, element = private$xmlElement, defaults)
}
)
)
|
gl.plot.heatmap <- function(D,verbose=NULL){
pkg <- "RColorBrewer"
if (!(requireNamespace(pkg, quietly = TRUE))) {
stop("Package ",pkg," needed for this function to work. Please install it.") }
pkg <- "pheatmap"
if (!(requireNamespace(pkg, quietly = TRUE))) {
stop("Package ",pkg," needed for this function to work. Please install it.") }
funname <- match.call()[[1]]
build <- "Jacob"
if (is.null(verbose)){
verbose <- 2
}
if (verbose < 0 | verbose > 5){
cat(paste(" Warning: Parameter 'verbose' must be an integer between 0 [silent] and 5 [full report], set to 2\n"))
verbose <- 2
}
if (verbose >= 1){
if(verbose==5){
cat("Starting",funname,"[ Build =",build,"]\n")
} else {
cat("Starting",funname,"\n")
}
}
if(class(D)!="dist" & class(D)!="matrix" & class(D)!="fd") {
cat(" Fatal Error: distance matrix of class 'dist' or class 'fd' required!\n"); stop("Execution terminated\n")
}
if (class(D)=="dist") {
if (max(D)==0){cat(" Warning: matrix contains no nonzero distances\n")}
}
if (class(D)=="fd") {
if (max(D$fd)==0){cat(" Warning: matrix contains no nonzero distances\n")}
}
if (class(D)=="dist"){
pheatmap::pheatmap(as.matrix(D),color=RColorBrewer::brewer.pal(n = 8, name = 'Blues'))
}
if (class(D)=="fd"){
pheatmap::pheatmap(as.matrix(D$fd),color=RColorBrewer::brewer.pal(n = 8, name = 'Blues'))
}
if (verbose > 0) {
cat("Completed:",funname,"\n")
}
return(NULL)
}
|
analyse_baSAR <- function(
object,
XLS_file = NULL,
aliquot_range = NULL,
source_doserate = NULL,
signal.integral,
signal.integral.Tx = NULL,
background.integral,
background.integral.Tx = NULL,
irradiation_times = NULL,
sigmab = 0,
sig0 = 0.025,
distribution = "cauchy",
baSAR_model = NULL,
n.MCMC = 100000,
fit.method = "EXP",
fit.force_through_origin = TRUE,
fit.includingRepeatedRegPoints = TRUE,
method_control = list(),
digits = 3L,
distribution_plot = "kde",
plot = TRUE,
plot_reduced = TRUE,
plot.single = FALSE,
verbose = TRUE,
...
){
.baSAR_function <-
function(Nb_aliquots,
distribution,
data.Dose,
data.Lum,
data.sLum,
fit.method,
n.MCMC,
fit.force_through_origin,
fit.includingRepeatedRegPoints,
method_control,
baSAR_model,
verbose)
{
lower_centralD <- method_control[["lower_centralD"]]
upper_centralD <- method_control[["upper_centralD"]]
n.chains <- if (is.null(method_control[["n.chains"]])) {
3
} else{
method_control[["n.chains"]]
}
inits <- if (is.null(method_control[["inits"]])) {
NULL
} else{
method_control[["inits"]]
}
thin <- if (is.null(method_control[["thin"]])) {
if(n.MCMC >= 1e+05){
thin <- n.MCMC/1e+05 * 250
}else{
thin <- 10
}
} else{
method_control[["thin"]]
}
variable.names <- if (is.null(method_control[["variable.names"]])) {
c('central_D', 'sigma_D', 'D', 'Q', 'a', 'b', 'c', 'g')
} else{
method_control[["variable.names"]]
}
stopifnot(lower_centralD >= 0)
Limited_cycles <- vector()
if (fit.method == "EXP") {ExpoGC <- 1 ; LinGC <- 0 }
if (fit.method == "LIN") {ExpoGC <- 0 ; LinGC <- 1 }
if (fit.method == "EXP+LIN") {ExpoGC <- 1 ; LinGC <- 1 }
if (fit.force_through_origin == TRUE) {GC_Origin <- 1} else {GC_Origin <- 0}
if (fit.includingRepeatedRegPoints) {
for (i in 1:Nb_aliquots) {
Limited_cycles[i] <- length(stats::na.exclude(data.Dose[,i]))
}
}else{
for (i in 1:Nb_aliquots) {
temp.logic <- !duplicated(data.Dose[,i], incomparables=c(0))
m <- length(which(!temp.logic))
data.Dose[,i] <- c(data.Dose[,i][temp.logic], rep(NA, m))
data.Lum[,i] <- c(data.Lum[,i][temp.logic], rep(NA, m))
data.sLum[,i] <- c(data.sLum[,i][temp.logic], rep(NA, m))
rm(m)
rm(temp.logic)
}
for (i in 1:Nb_aliquots) {
Limited_cycles[i] <- length(data.Dose[, i]) - length(which(is.na(data.Dose[, i])))
}
}
if(!is.null(baSAR_model)){
if(distribution != "user_defined"){
distribution <- "user_defined"
warning("[analyse_baSAR()] 'distribution' set to 'user_defined'.", call. = FALSE)
}
}
baSAR_model <- list(
cauchy = "model {
central_D ~ dunif(lower_centralD,upper_centralD)
precision_D ~ dt(0, pow(0.16*central_D, -2), 1)T(0, )
sigma_D <- 1/sqrt(precision_D)
for (i in 1:Nb_aliquots) {
a[i] ~ dnorm(6.5 , 1/(9.2^2) ) T(0, )
b[i] ~ dnorm(50 , 1/(1000^2) ) T(0, )
c[i] ~ dnorm(1.002 , 1/(0.9^2) ) T(0, )
g[i] ~ dnorm(0.5 , 1/(2.5^2) ) I(-a[i], )
sigma_f[i] ~ dexp (20)
D[i] ~ dt ( central_D , precision_D, 1)
S_y[1,i] <- 1/(sLum[1,i]^2 + sigma_f[i]^2)
Lum[1,i] ~ dnorm ( Q[1,i] , S_y[1,i])
Q[1,i] <- GC_Origin * g[i] + LinGC * (c[i] * D[i] ) + ExpoGC * (a[i] * (1 - exp (-D[i] /b[i])) )
for (m in 2:Limited_cycles[i]) {
S_y[m,i] <- 1/(sLum[m,i]^2 + sigma_f[i]^2)
Lum[m,i] ~ dnorm( Q[m,i] , S_y[m,i] )
Q[m,i] <- GC_Origin * g[i] + LinGC * (c[i] * Dose[m,i]) + ExpoGC * (a[i] * (1 - exp (-Dose[m,i]/b[i])) )
}
}
}",
normal = "model {
central_D ~ dunif(lower_centralD,upper_centralD)
sigma_D ~ dunif(0.01, 1 * central_D)
for (i in 1:Nb_aliquots) {
a[i] ~ dnorm(6.5 , 1/(9.2^2) ) T(0, )
b[i] ~ dnorm(50 , 1/(1000^2) ) T(0, )
c[i] ~ dnorm(1.002 , 1/(0.9^2) ) T(0, )
g[i] ~ dnorm(0.5 , 1/(2.5^2) ) I(-a[i], )
sigma_f[i] ~ dexp (20)
D[i] ~ dnorm ( central_D , 1/(sigma_D^2) )
S_y[1,i] <- 1/(sLum[1,i]^2 + sigma_f[i]^2)
Lum[1,i] ~ dnorm ( Q[1,i] , S_y[1,i])
Q[1,i] <- GC_Origin * g[i] + LinGC * (c[i] * D[i] ) + ExpoGC * (a[i] * (1 - exp (-D[i] /b[i])) )
for (m in 2:Limited_cycles[i]) {
S_y[m,i] <- 1/(sLum[m,i]^2 + sigma_f[i]^2)
Lum[m,i] ~ dnorm( Q[m,i] , S_y[m,i] )
Q[m,i] <- GC_Origin * g[i] + LinGC * (c[i] * Dose[m,i]) + ExpoGC * (a[i] * (1 - exp (-Dose[m,i]/b[i])) )
}
}
}",
log_normal = "model {
central_D ~ dunif(lower_centralD,upper_centralD)
log_central_D <- log(central_D) - 0.5 * l_sigma_D^2
l_sigma_D ~ dunif(0.01, 1 * log(central_D))
sigma_D <- sqrt((exp(l_sigma_D^2) -1) * exp( 2*log_central_D + l_sigma_D^2) )
for (i in 1:Nb_aliquots) {
a[i] ~ dnorm(6.5 , 1/(9.2^2) ) T(0, )
b[i] ~ dnorm(50 , 1/(1000^2) ) T(0, )
c[i] ~ dnorm(1.002 , 1/(0.9^2) ) T(0, )
g[i] ~ dnorm(0.5 , 1/(2.5^2) ) I(-a[i], )
sigma_f[i] ~ dexp (20)
log_D[i] ~ dnorm ( log_central_D , 1/(l_sigma_D^2) )
D[i] <- exp(log_D[i])
S_y[1,i] <- 1/(sLum[1,i]^2 + sigma_f[i]^2)
Lum[1,i] ~ dnorm ( Q[1,i] , S_y[1,i])
Q[1,i] <- GC_Origin * g[i] + LinGC * (c[i] * D[i] ) + ExpoGC * (a[i] * (1 - exp (-D[i] /b[i])) )
for (m in 2:Limited_cycles[i]) {
S_y[m,i] <- 1/(sLum[m,i]^2 + sigma_f[i]^2)
Lum[m,i] ~ dnorm( Q[m,i] , S_y[m,i] )
Q[m,i] <- GC_Origin * g[i] + LinGC * (c[i] * Dose[m,i]) + ExpoGC * (a[i] * (1 - exp (-Dose[m,i]/b[i])) )
}
}
}",
user_defined = baSAR_model
)
if(!any(distribution%in%names(baSAR_model))){
stop(paste0("[analyse_baSAR()] No model is pre-defined for the requested distribution. Please select ", paste(rev(names(baSAR_model))[-1], collapse = ", ")), " or define an own model using the argument 'baSAR_model'!", call. = FALSE)
}else{
if(is.null(baSAR_model)){
stop("[analyse_baSAR()] You have specified a 'user_defined' distribution, but you have not provided a model via 'baSAR_model'!", call. = FALSE)
}
}
data_Liste <- list(
'Dose' = data.Dose,
'Lum' = data.Lum,
'sLum' = data.sLum,
'LinGC' = LinGC,
'ExpoGC' = ExpoGC,
'GC_Origin' = GC_Origin,
'Limited_cycles' = Limited_cycles,
'lower_centralD' = lower_centralD,
'upper_centralD' = upper_centralD,
'Nb_aliquots' = Nb_aliquots
)
if(verbose){
cat("\n[analyse_baSAR()] ---- baSAR-model ---- \n")
cat("\n++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n")
cat("[analyse_baSAR()] Bayesian analysis in progress ...\n")
message(paste(".. >> bounds set to: lower_centralD =", lower_centralD, "| upper_centralD =", upper_centralD))
}
Nb_Iterations <- n.MCMC
if (verbose) {
message(paste0(
".. >> calculation will be done assuming a '",
distribution,
"' distribution\n"
))
}
jagsfit <- rjags::jags.model(
file = textConnection(baSAR_model[[distribution]]),
data = data_Liste,
inits = inits,
n.chains = n.chains,
n.adapt = Nb_Iterations,
quiet = if(verbose){FALSE}else{TRUE}
)
update(
object = jagsfit,
n.iter = Nb_Iterations,
progress.bar = if(verbose){"text"}else{NULL}
)
sampling <- rjags::coda.samples(
model = jagsfit,
variable.names = variable.names,
n.iter = Nb_Iterations,
thin = thin
)
sampling_reduced <- rjags::coda.samples(
model = jagsfit,
variable.names = c('central_D', 'sigma_D'),
n.iter = Nb_Iterations,
thin = thin
)
pt_zero <- 0
nb_decal <- 2
pt_zero <- Nb_aliquots
output.mean <-
round(summary(sampling_reduced)[[1]][c("central_D", "sigma_D"), 1:2], digits)
if(distribution == "log_normal"){
temp.vector <- unlist(lapply(sampling_reduced, function(x){as.vector(x[,1])}))
gm <- round(exp(sum(log(temp.vector))/length(temp.vector)),digits)
rm(temp.vector)
}else{
gm <- NULL
}
output.quantiles <-
round(summary(sampling_reduced, quantiles = c(0.025, 0.16, 0.84, 0.975))[[2]][c("central_D", "sigma_D"), 1:4], digits)
baSAR.output <- data.frame(
DISTRIBUTION = distribution,
NB_ALIQUOTS = Nb_aliquots,
N.CHAINS = n.chains,
N.MCMC = n.MCMC,
FIT_METHOD = fit.method,
CENTRAL = if(is.null(gm)){output.mean[1,1]}else{gm},
CENTRAL.SD = output.mean[1,2],
SIGMA = output.mean[2,1],
SIGMA.SD = output.mean[2,2],
CENTRAL_Q_.16 = output.quantiles[1,2],
CENTRAL_Q_.84 = output.quantiles[1,3],
SIGMA_Q_.16 = output.quantiles[2,2],
SIGMA_Q_.84 = output.quantiles[2,3],
CENTRAL_Q_.025 = output.quantiles[1,1],
CENTRAL_Q_.975 = output.quantiles[1,4],
SIGMA_Q_.025 = output.quantiles[2,1],
SIGMA_Q_.975 = output.quantiles[2,4]
)
return(
baSAR.output = list(
baSAR.output_summary = baSAR.output,
baSAR.output_mcmc = sampling,
models = list(
cauchy = baSAR_model[["cauchy"]],
normal = baSAR_model[["normal"]],
log_normal = baSAR_model[["log_normal"]],
user_defined = baSAR_model[["user_defined"]]
)
)
)
}
if (!requireNamespace("rjags", quietly = TRUE)) {
stop("[analyse_baSAR()] To use this function you have to first install the package 'rjags'.",
call. = FALSE)
}
if (!requireNamespace("coda", quietly = TRUE)) {
stop("[analyse_baSAR()] To use this function you have to first install the package 'coda'.",
call. = FALSE)
}
additional_arguments <- list(
threshold = 30,
background.count.distribution = "non-poisson",
sheet = 1,
col_names = TRUE,
col_types = NULL,
skip = 0,
n.records = NULL,
duplicated.rm = TRUE,
position = NULL,
pattern = NULL,
fit.weights = TRUE,
fit.bounds = TRUE,
NumberIterations.MC = 100,
output.plot = if(plot){TRUE}else{FALSE},
output.plotExtended = if(plot){TRUE}else{FALSE}
)
additional_arguments <- modifyList(x = additional_arguments,
val = list(...))
function_arguments <- NULL
if (fit.method != "EXP" &
fit.method != "EXP+LIN" &
fit.method != "LIN"){
stop("[analyse_baSAR()] Unsupported fitting method. Supported: 'EXP', 'EXP+LIN' and 'LIN'", call. = FALSE)
}
if(is(object, "RLum.Results")){
if(object@originator == "analyse_baSAR"){
function_arguments <- as.list(object@info$call)
function_arguments.new <- modifyList(x = function_arguments, val = as.list(match.call()))
max_cycles <- max(object$input_object[["CYCLES_NB"]])
Nb_aliquots <- nrow(object$input_object)
if(Nb_aliquots < 2){
try(stop("[analyse_baSAR()] number of aliquots < 3, this makes no sense, NULL returned!", call. = FALSE))
return(NULL)
}
if(!is.null(function_arguments.new$distribution)){
distribution <- function_arguments.new$distribution
}
if(!is.null(function_arguments.new$n.MCMC)){
n.MCMC <- function_arguments.new$n.MCMC
}
if(!is.null(function_arguments.new$fit.method)){
fit.method <- function_arguments.new$fit.method
}
if(!is.null(function_arguments.new$fit.force_through_origin)){
fit.force_through_origin <- function_arguments.new$fit.force_through_origin
}
if(!is.null(function_arguments.new$fit.includingRepeatedRegPoints)){
fit.includingRepeatedRegPoints <- function_arguments.new$fit.includingRepeatedRegPoints
}
if(length(as.list(match.call())$source_doserate) > 0){
warning("[analyse_baSAR()] Argument 'source_doserate' is ignored in this modus, as it was alreay set.", call. = FALSE)
}
if(!is.null(function_arguments.new$aliquot_range)){
aliquot_range <- eval(function_arguments.new$aliquot_range)
}
if(!is.null(function_arguments.new$method_control)){
method_control <- eval(function_arguments.new$method_control)
}
if(!is.null(function_arguments.new$baSAR_model)){
baSAR_model <- eval(function_arguments.new$baSAR_model)
}
if(!is.null(function_arguments.new$plot)){
plot <- function_arguments.new$plot
}
if(!is.null(function_arguments.new$verbose)){
verbose <- function_arguments.new$verbose
}
if (!is.null(aliquot_range)) {
if (max(aliquot_range) <= nrow(object$input_object)) {
input_object <- object$input_object[aliquot_range, ]
removed_aliquots <-rbind(object$removed_aliquots, object$input_object[-aliquot_range,])
Nb_aliquots <- nrow(input_object)
} else{
try(stop("[analyse_basAR()] aliquot_range out of bounds! Input ignored!",
call. = FALSE))
aliquot_range <- NULL
input_object <- object$input_object
removed_aliquots <- object$removed_aliquots
}
} else{
input_object <- object$input_object
removed_aliquots <- object$removed_aliquots
}
Doses <- t(input_object[,9:(8 + max_cycles)])
LxTx <- t(input_object[,(9 + max_cycles):(8 + 2 * max_cycles)])
LxTx.error <- t(input_object[,(9 + 2 * max_cycles):(8 + 3 * max_cycles)])
rm(max_cycles)
}else{
stop("[analyse_baSAR()] 'object' is of type 'RLum.Results', but has not been produced by analyse_baSAR()!", call. = FALSE)
}
}else{
if(verbose){
cat("\n[analyse_baSAR()] ---- PRE-PROCESSING ----\n")
}
if(class(object) == "list" && all(vapply(object, function(x){class(x) == "RLum.Analysis"}, logical(1)))){
if(verbose)
cat("[analyse_baSAR()] List of RLum.Analysis-objects detected .. ")
if(length(object) < 2)
stop("[analyse_baSAR()] At least two aliquots are needed for the calculation!", call. = FALSE)
if(class(object) == "list"){
n_objects <- length(object)
}else{
n_objects <- 1
}
if(verbose)
cat("\n\t\t .. extract 'OSL (UVVIS)' and 'irradiation (NA)'")
object <- get_RLum(object, recordType = c("OSL (UVVIS)", "irradiation (NA)"), drop = FALSE)
if(is.null(irradiation_times)){
if(verbose)
cat("\n\t\t .. extract irradiation times")
irradiation_times <- extract_IrradiationTimes(object[[1]])$irr.times$IRR_TIME
}
if(verbose)
cat("\n\t\t .. run conversion")
object <- try(convert_RLum2Risoe.BINfileData(object), silent = TRUE)
if(class(object) == "try-error"){
stop("[analyse_baSAR()] Object conversion failed. Return NULL!", call. = FALSE)
return(NULL)
}
if(is.null(irradiation_times)){
if(verbose)
cat("\n\t\t .. set irradiation times")
object@METADATA[["IRR_TIME"]] <- rep(irradiation_times,n_objects)
}
if(verbose && !all("OSL" %in% object@METADATA[["LTYPE"]])){
cat("\n\t\t .. remove non-OSL curves")
rm_id <- which(object@METADATA[["LTYPE"]] != "OSL")
object@METADATA <- object@METADATA[-rm_id,]
object@DATA[rm_id] <- NULL
object@METADATA[["ID"]] <- 1:length(object@METADATA[["ID"]])
rm(rm_id)
}
}
if (is(object, "Risoe.BINfileData")) {
fileBIN.list <- list(object)
} else if (is(object, "list")) {
object_type <-
unique(unlist(lapply(
1:length(object),
FUN = function(x) {
is(object[[x]])[1]
}
)))
if (length(object_type) == 1) {
if (object_type == "Risoe.BINfileData") {
fileBIN.list <- object
} else if (object_type == "character") {
fileBIN.list <- read_BIN2R(
file = object,
position = additional_arguments$position,
duplicated.rm = additional_arguments$duplicated.rm,
n.records = additional_arguments$n.records,
pattern = additional_arguments$pattern,
verbose = verbose
)
} else{
stop(
"[analyse_baSAR()] data type in the input list provided for 'object' is not supported!",
call. = FALSE
)
}
} else{
stop("[analyse_baSAR()] 'object' only accepts a list with objects of similar type!", call. = FALSE)
}
} else if (is(object, "character")) {
fileBIN.list <- list(
read_BIN2R(
file = object,
position = additional_arguments$position,
duplicated.rm = additional_arguments$duplicated.rm,
n.records = additional_arguments$n.records,
verbose = verbose
)
)
} else{
stop(
paste0(
"[analyse_baSAR()] '",
is(object)[1],
"' as input is not supported. Check manual for allowed input objects."
), call. = FALSE
)
}
if(!all(unlist(lapply(fileBIN.list, FUN = function(x){(x@METADATA[["SEL"]])})))){
fileBIN.list <- lapply(fileBIN.list, function(x){
x@DATA <- x@DATA[x@METADATA[["SEL"]]]
x@METADATA <- x@METADATA[x@METADATA[["SEL"]], ]
x@METADATA[["ID"]] <- 1:nrow(x@METADATA)
return(x)
})
if(verbose){
cat("\n[analyse_baSAR()] Record pre-selection in BIN-file detected >> record reduced to selection")
}
}
Dose <- list()
LxTx <- list()
sLxTx <- list()
Disc <- list()
Grain <- list()
Disc_Grain.list <- list()
Nb_aliquots <- 0
previous.Nb_aliquots <- 0
object.file_name <- list()
Mono_grain <- TRUE
Limited_cycles <- vector()
for (i in 1 : length(fileBIN.list)) {
Disc[[i]] <- list()
Grain[[i]] <- list()
object.file_name[[i]] <- unique(fileBIN.list[[i]]@METADATA[["FNAME"]])
}
if(any(duplicated(unlist(object.file_name)))){
if(verbose){
message(paste0(
"[analyse_baSAR()] '",
paste(
object.file_name[which(duplicated(unlist(object.file_name)))],
collapse = ", ",
"' is a duplicate and therefore removed from the input!"
)
))
}
warning(paste0(
"[analyse_baSAR()] '",
paste(
object.file_name[which(duplicated(unlist(object.file_name)))],
collapse = ", ",
"' is a duplicate and therefore removed from the input!"
)
))
Disc[which(duplicated(unlist(object.file_name)))] <- NULL
Grain[which(duplicated(unlist(object.file_name)))] <- NULL
fileBIN.list[which(duplicated(unlist(object.file_name)))] <- NULL
object.file_name[which(duplicated(unlist(object.file_name)))] <- NULL
}
if(!is.null(source_doserate)){
if(is(source_doserate, "list")){
source_doserate <- rep(source_doserate, length = length(fileBIN.list))
}else{
source_doserate <- rep(list(source_doserate), length = length(fileBIN.list))
}
}else{
stop("[analyse_baSAR()] 'source_doserate' is missing, but required as the current implementation expects dose values in Gy!",
call. = FALSE)
}
if(is(sigmab, "list")){
sigmab <- rep(sigmab, length = length(fileBIN.list))
}else{
sigmab <- rep(list(sigmab), length = length(fileBIN.list))
}
if(is(sig0, "list")){
sig0 <- rep(sig0, length = length(fileBIN.list))
}else{
sig0 <- rep(list(sig0), length = length(fileBIN.list))
}
if(is(signal.integral, "list")){
signal.integral <- rep(signal.integral, length = length(fileBIN.list))
}else{
signal.integral <- rep(list(signal.integral), length = length(fileBIN.list))
}
if (!is.null(signal.integral.Tx)) {
if (is(signal.integral.Tx, "list")) {
signal.integral.Tx <- rep(signal.integral.Tx, length = length(fileBIN.list))
} else{
signal.integral.Tx <- rep(list(signal.integral.Tx), length = length(fileBIN.list))
}
}
if(is(background.integral, "list")){
background.integral <- rep(background.integral, length = length(fileBIN.list))
}else{
background.integral <- rep(list(background.integral), length = length(fileBIN.list))
}
if(is(background.integral, "list")){
background.integral <- rep(background.integral, length = length(fileBIN.list))
}else{
background.integral <- rep(list(background.integral), length = length(fileBIN.list))
}
if (!is.null(background.integral.Tx)) {
if (is(background.integral.Tx, "list")) {
background.integral.Tx <-
rep(background.integral.Tx, length = length(fileBIN.list))
} else{
background.integral.Tx <-
rep(list(background.integral.Tx), length = length(fileBIN.list))
}
}
if(is.null(XLS_file)){
if(verbose){
cat("\n[analyse_baSAR()] No XLS-file provided, running automatic grain selection ...")
}
for (k in 1:length(fileBIN.list)) {
if(length(unique(fileBIN.list[[k]]@METADATA[["GRAIN"]])) > 1){
aliquot_selection <-
verify_SingleGrainData(
object = fileBIN.list[[k]],
cleanup_level = "aliquot",
threshold = additional_arguments$threshold,
cleanup = FALSE
)
if (sum(aliquot_selection$unique_pairs[["GRAIN"]] == 0, na.rm = TRUE) > 0) {
warning(
paste(
"[analyse_baSAR()] Automatic grain selection:",
sum(aliquot_selection$unique_pairs[["GRAIN"]] == 0, na.rm = TRUE),
"curve(s) with grain index 0 had been removed from the dataset."
),
call. = FALSE
)
}
datalu <-
aliquot_selection$unique_pairs[!aliquot_selection$unique_pairs[["GRAIN"]] == 0,]
if(nrow(datalu) == 0){
try(stop("[analyse_baSAR()] Sorry, nothing was left after the automatic grain selection! NULL returned!", call. = FALSE))
return(NULL)
}
}else{
warning("[analyse_baSAR()] Only multiple grain data provided, automatic selection skipped!", call. = FALSE)
datalu <- unique(fileBIN.list[[k]]@METADATA[, c("POSITION", "GRAIN")])
Mono_grain <- FALSE
aliquot_selection <- NA
}
Nb_aliquots <- nrow(datalu)
Disc[[k]] <- datalu[["POSITION"]]
Grain[[k]] <- datalu[["GRAIN"]]
rm(datalu, aliquot_selection)
}
rm(k)
} else if (is(XLS_file, "data.frame") || is(XLS_file, "character")) {
if (is(XLS_file, "character")) {
if(!file.exists(XLS_file)){
stop("[analyse_baSAR()] XLS_file does not exist!", call. = FALSE)
}
datalu <- as.data.frame(readxl::read_excel(
path = XLS_file,
sheet = additional_arguments$sheet,
col_names = additional_arguments$col_names,
col_types = additional_arguments$col_types,
skip = additional_arguments$skip
), stringsAsFactors = FALSE)
if(!all(grepl(colnames(datalu), pattern = " ")[1:3])){
stop("[analyse_baSAR()] One of the first three columns in your XLS_file has no column header. Your XLS_file requires
at least three columns for 'BIN_file', 'DISC' and 'GRAIN'",
call. = FALSE)
}
datalu <- datalu[!is.na(datalu[[1]]), ]
} else{
datalu <- XLS_file
if(ncol(datalu) < 3){
stop("[analyse_baSAR()] The data.frame provided via XLS_file should consist of at least three columns (see manual)!", call. = FALSE)
}
datalu[[1]] <- as.character(datalu[[1]])
datalu[[2]] <- as.numeric(datalu[[2]])
datalu[[3]] <- as.numeric(datalu[[3]])
}
if (!is.null(aliquot_range)) {
datalu <- datalu[aliquot_range,]
}
Nb_ali <- 0
k <- NULL
for (nn in 1:length((datalu[, 1]))) {
if (!is.na(datalu[nn, 1])) {
if (any(grepl(
pattern = strsplit(
x = basename(datalu[nn, 1]),
split = ".",
fixed = TRUE
)[[1]][1],
x = unlist(object.file_name)
))) {
k <- grep(pattern = strsplit(
x = basename(datalu[nn, 1]),
split = ".",
fixed = TRUE
)[[1]][1],
x = unlist(object.file_name))
nj <- length(Disc[[k]]) + 1
Disc[[k]][nj] <- as.numeric(datalu[nn, 2])
Grain[[k]][nj] <- as.numeric(datalu[nn, 3])
Nb_ali <- Nb_ali + 1
if (is.na(Grain[[k]][nj]) || Grain[[k]][nj] == 0) {
Mono_grain <- FALSE
}
}else{
warning(
paste0("[analyse_baSAR] '", (datalu[nn, 1]), "' not recognized or not loaded; skipped!"),
call. = FALSE
)
}
} else{
if (Nb_ali == 0) {
stop("[analyse_baSAR()] Nb. discs/grains = 0 !", call. = FALSE)
}
break()
}
}
if(is.null(k)){
stop("[analyse_baSAR()] BIN-file names in XLS-file do not fit to the loaded BIN-files!", call. = FALSE)
}
} else{
stop("[analyse_baSAR()] input type for 'XLS_file' not supported!", call. = FALSE)
}
for (k in 1:length(fileBIN.list)) {
Disc_Grain.list[[k]] <- list()
n_aliquots_k <- length((Disc[[k]]))
if(n_aliquots_k == 0){
fileBIN.list[[k]] <- NULL
if(verbose){
message(paste("[analyse_baSAR()] No data has been seletecd from BIN-file", k, ">> BIN-file removed from input!"))
}
warning(paste("[analyse_baSAR()] No data has been seletecd from BIN-file", k, ">> BIN-file removed from input!"), call. = FALSE)
next()
}
for (d in 1:n_aliquots_k) {
dd <- as.integer(unlist(Disc[[k]][d]))
Disc_Grain.list[[k]][[dd]] <- list()
}
for (d in 1:n_aliquots_k) {
dd <- as.integer(unlist(Disc[[k]][d]))
if (Mono_grain == FALSE) {
gg <- 1
}
if (Mono_grain == TRUE) {
gg <- as.integer(unlist(Grain[[k]][d]))}
Disc_Grain.list[[k]][[dd]][[gg]] <- list()
for (z in 1:6) {
Disc_Grain.list[[k]][[dd]][[gg]][[z]] <- list()
}
}
}
if(verbose){
cat("\n[analyse_baSAR()] Preliminary analysis in progress ... ")
cat("\n[analyse_baSAR()] Hang on, this may take a while ... \n")
}
for (k in 1:length(fileBIN.list)) {
n_index.vector <- vector("numeric")
measured_discs.vector <- vector("numeric")
measured_grains.vector <- vector("numeric")
measured_grains.vector_list <- vector("numeric")
irrad_time.vector <- vector("numeric")
disc_pos <- vector("numeric")
grain_pos <- vector("numeric")
length_BIN <- length(fileBIN.list[[k]])
n_index.vector <- fileBIN.list[[k]]@METADATA[["ID"]][1:length_BIN]
measured_discs.vector <- fileBIN.list[[k]]@METADATA[["POSITION"]][1:length_BIN]
measured_grains.vector <- fileBIN.list[[k]]@METADATA[["GRAIN"]][1:length_BIN]
if(is.null(irradiation_times)){
irrad_time.vector <- fileBIN.list[[k]]@METADATA[["IRR_TIME"]][1:length_BIN]
}else{
irrad_time.vector <- rep(irradiation_times,n_objects)
}
if (length(unique(irrad_time.vector)) == 1) {
try(stop(
"[analyse_baSAR()] It appears the the irradiation times are all the same. Analysis stopped and NULL returned!",
call. = FALSE
))
return(NULL)
}
disc_pos <- as.integer(unlist(Disc[[k]]))
grain_pos <- as.integer(unlist(Grain[[k]]))
for (i in 1: length(Disc[[k]])) {
disc_selected <- as.integer(Disc[[k]][i])
if (Mono_grain == TRUE) {grain_selected <- as.integer(Grain[[k]][i])} else { grain_selected <-0}
disc_logic <- (disc_selected == measured_discs.vector)
if (!any(disc_logic)) {
try(stop(
paste0(
"[analyse_baSAR()] In BIN-file '",
unique(fileBIN.list[[k]]@METADATA[["FNAME"]]),
"' position number ",
disc_selected,
" does not exist! NULL returned!"
),
call. = FALSE
))
return(NULL)
}
grain_logic <- (grain_selected == measured_grains.vector)
if (!any(grain_logic)) {
try(stop(
paste0(
"[analyse_baSAR()] In BIN-file '",
unique(fileBIN.list[[k]]@METADATA[["FNAME"]]),
"' grain number ",
grain_selected,
" does not exist! NULL returned!"
),
call. = FALSE
))
return(NULL)
}
index_liste <- n_index.vector[disc_logic & grain_logic]
if (Mono_grain == FALSE) {grain_selected <-1}
for (kn in 1: length(index_liste)) {
t <- index_liste[kn]
if(!is.null(unlist(source_doserate))){
dose.value <- irrad_time.vector[t] * unlist(source_doserate[[k]][1])
}else{
dose.value <- irrad_time.vector[t]
}
s <- 1 + length( Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[1]] )
Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[1]][s] <- n_index.vector[t]
if ( s%%2 == 1) { Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[2]][as.integer(1+s/2)] <- dose.value }
}
}
}
max_cycles <- 0
count <- 1
calc_OSLLxTxRatio_warning <- list()
for (k in 1:length(fileBIN.list)) {
if (Mono_grain == TRUE) (max.grains <- 100) else (max.grains <- 1)
if (plot) {
curve_index <- vapply((1:length(Disc[[k]])), function(i) {
disc_selected <- as.integer(Disc[[k]][i])
if (Mono_grain == TRUE) {
grain_selected <- as.integer(Grain[[k]][i])
} else {
grain_selected <- 1
}
Ln_index <-
as.numeric(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[1]][1])
Tn_index <-
as.numeric(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[1]][2])
return(c(Ln_index, Tn_index))
}, FUN.VALUE = vector(mode = "numeric", length = 2))
Ln_matrix <- cbind(1:length(fileBIN.list[[k]]@DATA[[curve_index[1, 1]]]),
matrix(unlist(fileBIN.list[[k]]@DATA[curve_index[1, ]]), ncol = ncol(curve_index)))
Tn_matrix <- cbind(1:length(fileBIN.list[[k]]@DATA[[curve_index[2, 1]]]),
matrix(unlist(fileBIN.list[[k]]@DATA[curve_index[2, ]]), ncol = ncol(curve_index)))
if(!plot.single){
par.default <- par()$mfrow
par(mfrow = c(1, 2))
}
graphics::matplot(
x = Ln_matrix[, 1],
y = Ln_matrix[, -1],
col = rgb(0, 0, 0, 0.3),
ylab = "Luminescence [a.u.]",
xlab = "Channel",
main = expression(paste(L[n], " - curves")),
type = "l"
)
abline(v = range(signal.integral[[k]]), lty = 2, col = "green")
abline(v = range(background.integral[[k]]), lty = 2, col = "red")
mtext(paste0("ALQ: ",count, ":", count + ncol(curve_index)))
graphics::matplot(
x = Tn_matrix[, 1],
y = Tn_matrix[, -1],
col = rgb(0, 0, 0, 0.3),
ylab = "Luminescence [a.u.]",
xlab = "Channel",
main = expression(paste(T[n], " - curves")),
type = "l"
)
if(is.null(signal.integral.Tx[[k]])){
abline(v = range(signal.integral[[k]]), lty = 2, col = "green")
}else{
abline(v = range(signal.integral.Tx[[k]]), lty = 2, col = "green")
}
if(is.null(background.integral.Tx[[k]])){
abline(v = range(background.integral[[k]]), lty = 2, col = "red")
}else{
abline(v = range(background.integral.Tx[[k]]), lty = 2, col = "red")
}
mtext(paste0("ALQ: ",count, ":", count + ncol(curve_index)))
if(!plot.single){
par(mfrow = par.default)
}
rm(curve_index, Ln_matrix, Tn_matrix)
}
for (i in 1:length(Disc[[k]])) {
disc_selected <- as.integer(Disc[[k]][i])
if (Mono_grain == TRUE) {
grain_selected <- as.integer(Grain[[k]][i])
} else {
grain_selected <- 1
}
for (nb_index in 1:((length(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[1]]))/2 )) {
index1 <- as.numeric(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[1]][2*nb_index-1])
index2 <- as.numeric(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[1]][2*nb_index])
Lx.data <- data.frame(seq(1:length( fileBIN.list[[k]]@DATA[[index1]])), fileBIN.list[[k]]@DATA[[index1]])
Tx.data <- data.frame(seq(1:length( fileBIN.list[[k]]@DATA[[index2]])), fileBIN.list[[k]]@DATA[[index2]])
temp_LxTx <- withCallingHandlers(
calc_OSLLxTxRatio(
Lx.data = Lx.data,
Tx.data = Tx.data,
signal.integral = signal.integral[[k]],
signal.integral.Tx = signal.integral.Tx[[k]],
background.integral = background.integral[[k]],
background.integral.Tx = background.integral.Tx[[k]],
background.count.distribution = additional_arguments$background.count.distribution,
sigmab = sigmab[[k]],
sig0 = sig0[[k]]
),
warning = function(c) {
calc_OSLLxTxRatio_warning[[i]] <<- c
invokeRestart("muffleWarning")
}
)
LxTx.table <- temp_LxTx$LxTx.table
Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[3]][nb_index] <- LxTx.table[[9]]
Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[4]][nb_index] <- LxTx.table[[10]]
Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[5]][nb_index] <- LxTx.table[[7]]
rm(LxTx.table)
rm(temp_LxTx)
}
sample_dose <- unlist(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[2]])
sample_LxTx <- unlist(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[3]])
sample_sLxTx <- unlist(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[4]])
TnTx <- unlist(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[5]])
selected_sample <- as.data.frame(cbind(sample_dose, sample_LxTx, sample_sLxTx, TnTx))
print(additional_arguments)
fitcurve <-
suppressWarnings(plot_GrowthCurve(
sample = selected_sample,
na.rm = TRUE,
fit.method = fit.method,
fit.force_through_origin = fit.force_through_origin,
fit.weights = additional_arguments$fit.weights,
fit.includingRepeatedRegPoints = fit.includingRepeatedRegPoints,
fit.bounds = additional_arguments$fit.bounds,
NumberIterations.MC = additional_arguments$NumberIterations.MC,
output.plot = additional_arguments$output.plot,
output.plotExtended = additional_arguments$output.plotExtended,
txtProgressBar = FALSE,
verbose = verbose,
main = paste0("ALQ: ", count," | POS: ", Disc[[k]][i], " | GRAIN: ", Grain[[k]][i])
))
if(!is.null(fitcurve)){
fitcurve_De <- get_RLum(fitcurve, data.object = "De")
Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[6]][1] <-
fitcurve_De[["De"]]
Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[6]][2] <-
fitcurve_De[["De.Error"]]
Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[6]][3] <-
fitcurve_De[["D01"]]
Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[6]][4] <-
fitcurve_De[["D01.ERROR"]]
}else{
Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[6]][1:4] <- NA
}
Limited_cycles[previous.Nb_aliquots + i] <-
length(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[2]])
if (length(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[2]]) > max_cycles) {
max_cycles <-
length(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[2]])
}
previous.Nb_aliquots <-
length(stats::na.exclude(Limited_cycles))
count <- count + 1
}
}
rm(count)
if(length(calc_OSLLxTxRatio_warning)>0){
w_table <- table(unlist(calc_OSLLxTxRatio_warning))
w_table_names <- names(w_table)
for(w in 1:length(w_table)){
warning(paste(w_table_names[w], "This warning occurred", w_table[w], "times!"), call. = FALSE)
}
rm(w_table)
rm(w_table_names)
}
rm(calc_OSLLxTxRatio_warning)
Nb_aliquots <- previous.Nb_aliquots
OUTPUT_results <-
matrix(nrow = Nb_aliquots,
ncol = (8 + 3 * max_cycles),
byrow = TRUE)
colnames(OUTPUT_results) <- c(
"INDEX_BINfile",
"DISC",
"GRAIN",
"DE",
"DE.SD",
"D0",
"D0.SD",
"CYCLES_NB",
paste0("DOSE_", 1:max_cycles),
paste0("LxTx_", 1:max_cycles),
paste0("LxTx_", 1:max_cycles, ".SD")
)
comptage <- 0
for (k in 1:length(fileBIN.list)) {
for (i in 1:length(Disc[[k]])) {
disc_selected <- as.numeric(Disc[[k]][i])
if (Mono_grain == TRUE) {
grain_selected <-
as.numeric(Grain[[k]][i])
} else {
grain_selected <- 1
}
comptage <- comptage + 1
OUTPUT_results[comptage, 1] <- k
OUTPUT_results[comptage, 2] <- as.numeric(disc_selected)
if (Mono_grain == TRUE) {
OUTPUT_results[comptage, 3] <- grain_selected
}
else {
OUTPUT_results[comptage, 3] <- 0
}
if (length(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[6]]) != 0) {
OUTPUT_results[comptage, 4] <-
as.numeric(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[6]][1])
OUTPUT_results[comptage, 5] <-
as.numeric(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[6]][2])
OUTPUT_results[comptage, 6] <-
as.numeric(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[6]][3])
OUTPUT_results[comptage, 7] <-
as.numeric(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[6]][4])
OUTPUT_results[comptage, 8] <-
length(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[2]])
llong <-
length(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[2]])
OUTPUT_results[comptage, 9:(8 + llong)] <-
as.numeric(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[2]])
OUTPUT_results[comptage, (9 + max_cycles):(8 + max_cycles + llong)] <-
as.numeric(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[3]])
OUTPUT_results[comptage, (9 + 2*max_cycles):(8 + 2*max_cycles + llong)] <-
as.numeric(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[4]])
}
}
}
OUTPUT_results <- OUTPUT_results[!is.na(OUTPUT_results[,2]),]
OUTPUT_results_reduced <- t(OUTPUT_results)
selection <- vapply(X = 1:ncol(OUTPUT_results_reduced), FUN = function(x){
!any(is.nan(OUTPUT_results_reduced[9:(8+3*max_cycles), x]) | is.infinite(OUTPUT_results_reduced[9:(8+3*max_cycles), x]))
}, FUN.VALUE = vector(mode = "logical", length = 1))
removed_aliquots <- t(OUTPUT_results_reduced[,!selection])
OUTPUT_results_reduced <- t(OUTPUT_results_reduced[,selection])
if(length(unique(OUTPUT_results_reduced[,"CYCLES_NB"])) > 1){
warning("[analyse_baSAR()] The number of dose points differs across your data set. Check your data!", call. = FALSE)
}
if(Nb_aliquots > nrow(OUTPUT_results_reduced)) {
Nb_aliquots <- nrow(OUTPUT_results_reduced)
warning(
paste0(
"[analyse_baSAR()] 'Nb_aliquots' corrected due to NaN or Inf values in Lx and/or Tx to ", Nb_aliquots, ". You might want to check 'removed_aliquots' in the function output."), call. = FALSE)
}
Doses <- t(OUTPUT_results_reduced[,9:(8 + max_cycles)])
LxTx <- t(OUTPUT_results_reduced[, (9 + max_cycles):(8 + 2 * max_cycles)])
LxTx.error <- t(OUTPUT_results_reduced[, (9 + 2 * max_cycles):(8 + 3 * max_cycles)])
input_object <- data.frame(
BIN_FILE = unlist(object.file_name)[OUTPUT_results_reduced[[1]]],
OUTPUT_results_reduced[, -1],
stringsAsFactors = FALSE
)
if (length(removed_aliquots) > 0) {
removed_aliquots <-
as.data.frame(removed_aliquots, stringsAsFactors = FALSE)
removed_aliquots <- cbind(BIN_FILE = unlist(object.file_name)[removed_aliquots[[1]]],
removed_aliquots[, -1])
}else{
removed_aliquots <- NULL
}
}
if (is.null(method_control[["upper_centralD"]])) {
method_control <- c(method_control, upper_centralD = 1000)
}else{
if(distribution == "normal" | distribution == "cauchy" | distribution == "log_normal"){
warning("[analyse_baSAR()] You have modified the upper central_D boundary, while applying a predefined model. This is possible but not recommended!", call. = FALSE)
}
}
if (is.null(method_control[["lower_centralD"]])) {
method_control <- c(method_control, lower_centralD = 0)
}else{
if(distribution == "normal" | distribution == "cauchy" | distribution == "log_normal"){
warning("[analyse_baSAR()] You have modified the lower central_D boundary while applying a predefined model. This is possible but not recommended!", call. = FALSE)
}
}
if(min(input_object[["DE"]][input_object[["DE"]] > 0], na.rm = TRUE) < method_control$lower_centralD |
max(input_object[["DE"]], na.rm = TRUE) > method_control$upper_centralD){
warning("[analyse_baSAR()] Your set lower_centralD and/or upper_centralD value seem to do not fit to your input data. This may indicate a wronlgy set 'source_doserate'.", call. = FALSE)
}
results <-
try(.baSAR_function(
Nb_aliquots = Nb_aliquots,
distribution = distribution,
data.Dose = Doses,
data.Lum = LxTx,
data.sLum = LxTx.error,
fit.method = fit.method,
n.MCMC = n.MCMC,
fit.force_through_origin = fit.force_through_origin,
fit.includingRepeatedRegPoints = fit.includingRepeatedRegPoints,
method_control = method_control,
baSAR_model = baSAR_model,
verbose = verbose
))
if(!is(results, "try-error")){
if(!is.null(unlist(source_doserate)) || !is.null(function_arguments$source_doserate)){
if(!is.null(function_arguments$source_doserate)){
source_doserate <- eval(function_arguments$source_doserate)
if(!is(source_doserate, "list")){
source_doserate <- list(source_doserate)
}
}
systematic_error <- unlist(lapply(source_doserate, function(x){
if(length(x) == 2) {
x[2]
} else{
NULL
}
}))
}else{
systematic_error <- 0
}
if(mean(systematic_error) != systematic_error[1]){
warning("[analyse_baSAR()] Provided source dose rate errors differ. The mean was taken, but the calculated
systematic error might be not valid!", .call = FALSE)
}
DE_FINAL.ERROR <- sqrt(results[[1]][["CENTRAL.SD"]]^2 + mean(systematic_error)^2)
if(is.na(DE_FINAL.ERROR)){
DE_FINAL.ERROR <- results[[1]][["CENTRAL.SD"]]
}
results[[1]] <- cbind(results[[1]], DE_FINAL = results[[1]][["CENTRAL"]], DE_FINAL.ERROR = DE_FINAL.ERROR)
}else{
results <- NULL
verbose <- FALSE
plot <- FALSE
}
if(verbose){
cat("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n\n")
cat("\n[analyse_baSAR()] ---- RESULTS ---- \n")
cat("------------------------------------------------------------------\n")
cat(paste0("Used distribution:\t\t", results[[1]][["DISTRIBUTION"]],"\n"))
if(!is.null(removed_aliquots)){
if(!is.null(aliquot_range)){
cat(paste0("Number of aliquots used:\t", results[[1]][["NB_ALIQUOTS"]],"/",
results[[1]][["NB_ALIQUOTS"]] + nrow(removed_aliquots),
" (manually removed: " ,length(aliquot_range),")\n"))
}else{
cat(paste0("Number of aliquots used:\t", results[[1]][["NB_ALIQUOTS"]],"/",
results[[1]][["NB_ALIQUOTS"]] + nrow(removed_aliquots),"\n"))
}
}else{
cat(paste0("Number of aliquots used:\t", results[[1]][["NB_ALIQUOTS"]],"/", results[[1]][["NB_ALIQUOTS"]],"\n"))
}
if(!is.null(baSAR_model)){
cat(paste0("Considered fitting method:\t", results[[1]][["FIT_METHOD"]]," (user defined)\n"))
}else{
cat(paste0("Considered fitting method:\t", results[[1]][["FIT_METHOD"]],"\n"))
}
cat(paste0("Number of independent chains:\t", results[[1]][["N.CHAINS"]],"\n"))
cat(paste0("Number MCMC iterations/chain:\t", results[[1]][["N.MCMC"]],"\n"))
cat("------------------------------------------------------------------\n")
if(distribution == "log_normal"){
cat("\t\t\t\tmean*\tsd\tHPD\n")
}else{
cat("\t\t\t\tmean\tsd\tHPD\n")
}
cat(paste0(">> Central dose:\t\t", results[[1]][["CENTRAL"]],"\t",
results[[1]][["CENTRAL.SD"]],"\t",
"[", results[[1]][["CENTRAL_Q_.16"]]," ; ", results[[1]][["CENTRAL_Q_.84"]], "]**\t"))
cat(paste0("\n\t\t\t\t\t\t[", results[[1]][["CENTRAL_Q_.025"]]," ; ", results[[1]][["CENTRAL_Q_.975"]],"]***"))
cat(paste0("\n>> sigma_D:\t\t\t", results[[1]][["SIGMA"]],"\t", results[[1]][["SIGMA.SD"]], "\t",
"[",results[[1]][["SIGMA_Q_.16"]]," ; ", results[[1]][["SIGMA_Q_.84"]], "]**\t"))
cat(paste0("\n\t\t\t\t\t\t[",results[[1]][["SIGMA_Q_.025"]]," ; ", results[[1]][["SIGMA_Q_.975"]], "]***"))
cat(paste0("\n>> Final central De:\t\t", results[[1]][["DE_FINAL"]],"\t", round(results[[1]][["DE_FINAL.ERROR"]], digits = digits), "\t",
" - \t -"))
cat("\n------------------------------------------------------------------\n")
cat(
paste("(systematic error contribution to final De:",
format((1-results[[1]][["CENTRAL.SD"]]/results[[1]][["DE_FINAL.ERROR"]])*100, scientific = TRUE), "%)\n")
)
if(distribution == "log_normal"){
cat("* mean of the central dose is the geometric mean\n")
}
cat("** 68 % level | *** 95 % level\n")
}
if(plot){
col <- get("col", pos = .LuminescenceEnv)
varnames <- coda::varnames(results[[2]])
if(plot_reduced){
plot_check <- try(plot(results[[2]][,c("central_D","sigma_D"),drop = FALSE]), silent = TRUE)
if(is(plot_check, "try-error")){
stop("[analyse_baSAR()] Plots for 'central_D' and 'sigma_D' could not be produced. You are probably monitoring the wrong variables!", .call = FALSE)
}
}else{
try(plot(results[[2]]))
}
if (!plot.single) {
par(mfrow = c(2, 2))
}
plot_matrix <- as.matrix(results[[2]][,grep(x = varnames, pattern = "D[", fixed = TRUE)])
aliquot_quantiles <- t(matrixStats::colQuantiles(x = plot_matrix, probs = c(0.25,0.75)))
box.col <- vapply(1:ncol(aliquot_quantiles), function(x){
if(aliquot_quantiles[2,x] < results[[1]][,c("CENTRAL_Q_.025")] |
aliquot_quantiles[1,x] > results[[1]][,c("CENTRAL_Q_.975")]
){
col[2]
}else if(aliquot_quantiles[2,x] < results[[1]][,c("CENTRAL_Q_.16")] |
aliquot_quantiles[1,x] > results[[1]][,c("CENTRAL_Q_.84")]){
"orange"
}else{
"white"
}
}, FUN.VALUE = vector(mode = "character", length = 1))
i <- 1
while(i < ncol(plot_matrix)){
step <- if((i + 14) > ncol(plot_matrix)){ncol(plot_matrix)}else{i + 14}
plot_check <- try(boxplot(
x = plot_matrix[,i:step],
use.cols = TRUE,
horizontal = TRUE,
outline = TRUE,
col = box.col[i:step],
xlab = if(is.null(unlist(source_doserate))){"Dose [s]"}else{"Dose [Gy]"},
ylab = "Aliquot index",
yaxt = "n",
xlim = c(1,19),
main = paste0("Individual Doses | ALQ: ", i,":",step)
))
if(!is(plot_check, "try-error")){
if(step == ncol(plot_matrix)){
axis(side = 2, at = 1:15, labels = as.character(c(i:step, rep(" ", length = 15 - length(i:step)))),
cex.axis = 0.8
)
}else{
axis(side = 2, at = 1:15, labels = as.character(i:step), cex.axis = 0.8)
}
lines(
x = c(
results[[1]][, c("CENTRAL_Q_.16")], results[[1]][, c("CENTRAL_Q_.16")],
results[[1]][, c("CENTRAL_Q_.84")], results[[1]][, c("CENTRAL_Q_.84")]),
y = c(par()$usr[3], 16, 16, par()$usr[3]),
lty = 3,
col = col[3],
lwd = 1.5
)
text(
x = results[[1]][, c("CENTRAL")],
y = 16,
labels = "68 %",
pos = 3,
col = col[3],
cex = 0.9 * par()$cex
)
lines(
x = c(
results[[1]][, c("CENTRAL_Q_.025")], results[[1]][, c("CENTRAL_Q_.025")],
results[[1]][, c("CENTRAL_Q_.975")], results[[1]][, c("CENTRAL_Q_.975")]),
y = c(par()$usr[3], 17.5, 17.5, par()$usr[3]),
lty = 3,
col = col[2],
lwd = 1.5
)
text(
x = results[[1]][, c("CENTRAL")],
y = 17.5,
labels = "95 %",
pos = 3,
col = col[2],
cex = 0.9 * par()$cex)
}
i <- i + 15
}
rm(plot_matrix)
if(!plot.single){
par(mfrow = c(1,2))
on.exit(par(mfrow = c(1,1), bg = "white", xpd = FALSE))
}
selection <- c("a[", "b[", "c[", "g[", "Q[1,")
list_selection <- lapply(X = selection, FUN = function(x){
unlist(results[[2]][,grep(x = varnames, pattern = x, fixed = TRUE)])
})
plot_matrix <- t(do.call(what = "cbind", args = list_selection))
rm(list_selection)
if (fit.method == "EXP") {ExpoGC <- 1 ; LinGC <- 0 }
if (fit.method == "LIN") {ExpoGC <- 0 ; LinGC <- 1 }
if (fit.method == "EXP+LIN") {ExpoGC <- 1 ; LinGC <- 1 }
if (fit.force_through_origin) {GC_Origin <- 0} else {GC_Origin <- 1}
if(!is.null(baSAR_model)){
fit.method_plot <- paste(fit.method, "(user defined)")
}else{
fit.method_plot <- fit.method
}
xlim <- c(0, max(input_object[,grep(x = colnames(input_object), pattern = "DOSE")], na.rm = TRUE)*1.1)
ylim <- c(
min(input_object[,grep(x = colnames(input_object), pattern = "LxTx")], na.rm = TRUE),
max(input_object[,grep(x = colnames(input_object), pattern = "LxTx")], na.rm = TRUE)*1.1)
if(results[[1]][["CENTRAL_Q_.975"]] < max(xlim)/2){
legend_pos <- "topright"
}else{
legend_pos <- "topleft"
}
plot_check <- try(plot(
NA,
NA,
ylim = ylim,
xlim = xlim,
ylab = expression(paste(L[x] / T[x])),
xlab = if(is.null(unlist(source_doserate))){"Dose [s]"}else{"Dose [Gy]"},
main = "baSAR Dose Response Curves"
))
if (!is(plot_check, "try-error")) {
mtext(side = 3, text = paste("Fit:", fit.method_plot))
if (ncol(plot_matrix) != 0) {
x <- NA
for (i in seq(1, ncol(plot_matrix), length.out = 1000)) {
curve(
GC_Origin * plot_matrix[4, i] + LinGC * (plot_matrix[3, i] * x) +
ExpoGC * (plot_matrix[1, i] * (1 - exp (
-x / plot_matrix[2, i]
))),
add = TRUE,
col = rgb(0, 0, 0, .1)
)
}
}else{
try(stop("[analyse_baSAR()] Wrong 'variable.names' monitored, dose responses curves could not be plotted!", call. = FALSE))
}
n.col <-
length(input_object[, grep(x = colnames(input_object), pattern = "DOSE")])
rug(side = 2, x = input_object[[9 + n.col]])
for (i in 2:length(input_object[, grep(x = colnames(input_object), pattern = "DOSE")])) {
segments(
x0 = input_object[[8 + i]],
x1 = input_object[[8 + i]],
y0 = input_object[[8 + n.col + i]] - input_object[[8 + 2 * n.col + i]],
y1 = input_object[[8 + n.col + i]] + input_object[[8 + 2 * n.col + i]],
col = "grey"
)
points(
x = input_object[[8 + i]],
y = input_object[[8 + n.col + i]],
pch = 21,
col = col[11],
bg = "grey"
)
}
abline(
v = results[[1]][, c("CENTRAL_Q_.16", "CENTRAL_Q_.84")],
lty = 3,
col = col[3],
lwd = 1.2
)
abline(v = results[[1]][, c("CENTRAL_Q_.025", "CENTRAL_Q_.975")], lty = 2, col = col[2])
legend(
legend_pos,
bty = "n",
horiz = FALSE,
lty = c(3, 2),
col = c(col[3], col[2]),
legend = c("HPD - 68 %", "HPD - 95 %")
)
legend(
"bottomright",
bty = "n",
horiz = FALSE,
pch = 21,
col = col[11],
bg = "grey",
legend = "measured dose points"
)
}
rm(plot_matrix)
if(distribution_plot == "abanico"){
plot_check <- plot_AbanicoPlot(
data = input_object[, c("DE", "DE.SD")],
zlab = if(is.null(unlist(source_doserate))){expression(paste(D[e], " [s]"))}else{expression(paste(D[e], " [Gy]"))},
log.z = if (distribution != "log_normal") {
FALSE
} else{
TRUE
},
z.0 = results[[1]]$CENTRAL,
y.axis = FALSE,
polygon.col = FALSE,
line = results[[1]][,c(
"CENTRAL_Q_.16", "CENTRAL_Q_.84", "CENTRAL_Q_.025", "CENTRAL_Q_.975")],
line.col = c(col[3], col[3], col[2], col[2]),
line.lty = c(3,3,2,2),
output = TRUE,
mtext = paste0(
nrow(input_object) - length(which(is.na(input_object[, c("DE", "DE.SD")]))),
"/",
nrow(input_object),
" plotted (removed are NA values)"
)
)
if (!is.null(plot_check)) {
legend(
"topleft",
legend = c("Central dose", "HPD - 68%", "HPD - 95 %"),
lty = c(2, 3, 2),
col = c("black", col[3], col[2]),
bty = "n",
cex = par()$cex * 0.8
)
}
}else{
plot_check <- NULL
}
if(is.null(plot_check) && distribution_plot == "kde"){
plot_check <- try(suppressWarnings(plot_KDE(
data = input_object[, c("DE", "DE.SD")],
xlab = if(is.null(unlist(source_doserate))){expression(paste(D[e], " [s]"))}else{expression(paste(D[e], " [Gy]"))},
mtext = paste0(
nrow(input_object) - length(which(is.na(input_object[, c("DE", "DE.SD")]))),
"/",
nrow(input_object),
" (removed are NA values)"
)
)))
if(!is(plot_check, "try-error")) {
abline(v = results[[1]]$CENTRAL, lty = 2)
abline(
v = results[[1]][, c("CENTRAL_Q_.16", "CENTRAL_Q_.84")],
lty = 3,
col = col[3],
lwd = 1.2
)
abline(v = results[[1]][, c("CENTRAL_Q_.025", "CENTRAL_Q_.975")], lty = 2, col = col[2])
if(results[[1]][["CENTRAL_Q_.975"]] < max(xlim)/2){
legend_pos <- "right"
}else{
legend_pos <- "topleft"
}
legend(
legend_pos,
legend = c("Central dose", "HPD - 68%", "HPD - 95 %"),
lty = c(2, 3, 2),
col = c("black", col[3], col[2]),
bty = "n",
cex = par()$cex * 0.8
)
}
}
}
return(set_RLum(
class = "RLum.Results",
data = list(
summary = results[[1]],
mcmc = results[[2]],
models = results[[3]],
input_object = input_object,
removed_aliquots = removed_aliquots
),
info = list(call = sys.call())
))
}
|
simHMM=function(data,ddl=NULL,begin.time=1,model="hmmCJS",title="",model.parameters=list(),
design.parameters=list(),initial=NULL,groups=NULL,time.intervals=NULL,accumulate=TRUE,strata.labels=NULL)
{
setup=crm(data=data,ddl=ddl,begin.time=begin.time,model=model,title=title,model.parameters=model.parameters,
design.parameters=design.parameters,initial=initial,groups=groups,time.intervals=time.intervals,
accumulate=accumulate,run=FALSE,strata.labels=strata.labels)
if(nrow(setup$data$data)==1)stop(" Use at least 2 capture histories; unique ch if accumulate=T")
parlist=setup$results$par
T=setup$data$nocc
m=setup$data$m
ch=NULL
df2=NULL
pars=list()
for(parname in names(setup$model.parameters))
pars[[parname]]=do.call("rbind",split(reals(ddl=setup$ddl[[parname]],dml=setup$dml[[parname]],parameters=setup$model.parameters[[parname]],
parlist=parlist[[parname]]),setup$ddl[[parname]]$id))
dmat=setup$data$fct_dmat(pars,m,setup$data$start[,2],T)
gamma=setup$data$fct_gamma(pars,m,setup$data$start[,2],T)
delta=setup$data$fct_delta(pars,m,setup$data$start[,2],T,setup$data$start)
for (id in as.numeric(setup$data$data$id))
{
history=matrix(0,nrow=setup$data$data$freq[id],ncol=T)
state=matrix(0,nrow=setup$data$data$freq[id],ncol=T)
state[,setup$data$start[id,2]]=apply(rmultinom(setup$data$data$freq[id],1,delta[id,]),
2,function(x)which(x==1))
for(k in 1:m)
{
instate=sum(state[,setup$data$start[id,2]]==k)
if(instate>0)
{
rmult=rmultinom(instate,1,dmat[id,setup$data$start[id,2],,k])
history[state[,setup$data$start[id,2]]==k,setup$data$start[id,2]]=
setup$data$ObsLevels[apply(rmult,2,function(x) which(x==1))]
}
}
for(j in (setup$data$start[id,2]+1):T)
{
for(k in 1:m)
{
instate=sum(state[,j-1]==k)
if(instate>0)
{
rmult=rmultinom(instate,1,gamma[id,j-1,k,])
state[state[,j-1]==k,j]= apply(rmult,2,function(x) which(x==1))
}
}
for(k in 1:m)
{
instate=sum(state[,j]==k)
if(instate>0)
{
rmult=rmultinom(instate,1,dmat[id,j,,k])
history[state[,j]==k,j]= setup$data$ObsLevels[apply(rmult,2,function(x) which(x==1))]
}
}
}
ch=c(ch,apply(history,1,paste,collapse=","))
cols2xclude=-which(names(setup$data$data)%in%c("ch","freq","id"))
if(is.null(df2))
df2=setup$data$data[rep(id,setup$data$data$freq[id]),cols2xclude,drop=FALSE]
else
df2=rbind(df2,setup$data$data[rep(id,setup$data$data$freq[id]),cols2xclude,drop=FALSE])
}
df=data.frame(ch=ch,stringsAsFactors=FALSE)
if(nrow(df2)==0)
return(df)
else
return(cbind(df,df2))
}
|
dag_edge <- function(graph,
from,
to,
type = as.character(NA)) {
class_g <- class(graph)
if(length(class_g) > 1){
if(class_g[1] == chr("grViz") && class_g[2]=="htmlwidget"){
errorMessage <- paste0("Given rendered Causact Graph. Check the declaration for a dag_render() call.")
}
else {
errorMessage <- paste0("Cannot add dag_edge() to given object as it is not a Causact Graph.")
}
stop(errorMessage)
}
if(class_g != "causact_graph"){
errorMessage <- paste0("Cannot add dag_edge() to given object as it is not a Causact Graph.")
stop(errorMessage)
}
if(anyDuplicated(c(from,to))){
errorMessage <- paste("You have attempted to connect", c(from,to)[anyDuplicated(c(from,to))],"to itself, but this would create a cycle and is not a Directed Acyclic Graph. You cannot connect a node to itself in a DAG.")
stop(errorMessage)
}
fromIDs = findNodeID(graph,from)
if(anyNA(fromIDs)){
errorMessage <- paste("Node",from,"does not exist. Check for spelling errors. Check the order the nodes were created!")
stop(errorMessage)
}
toIDs = findNodeID(graph,to)
if(anyNA(toIDs)){
errorMessage <- paste("Node",to,"does not exist. Check for spelling errors. Check the order the nodes were created!")
stop(errorMessage)
}
numberOfEdges = max(length(from),length(to))
if(numberOfEdges == 0) {return(graph)}
edgeIDstart = max(graph$edges_df$id,0) + 1
edf = data.frame(
id = edgeIDstart:(edgeIDstart+numberOfEdges-1),
from = fromIDs,
to = toIDs,
type = type,
stringsAsFactors = FALSE
)
graph$edges_df = dplyr::bind_rows(graph$edges_df,edf)
return(graph)
}
|
view.seis<-function(aday, ihour, inkhour, SAVEFILE, days, DB, usta, acomp, STDLAB =c("QUIT", "NEXT", "PREV", "HALF") , TZ=NULL )
{
if(missing(STDLAB ))
{
STDLAB = c("QUIT", "NEXT", "PREV", "HALF", "WPIX", "zoom out",
"refresh", "restore", "SPEC", "SGRAM" ,"WLET", "FILT", "UNFILT",
"Pinfo", "WINFO","Postscript")
}
if(missing(TZ)) { TZ=NULL }
if(is.null(attr(DB, "origyr"))) attr(DB, "origyr") = min(DB$yr, na.rm=TRUE)
if(!is.list(days) )
{
print("NO days list")
print("using:")
udays = unique(paste(DB$yr, DB$jd))
sdays = as.numeric( unlist( strsplit(udays, split=" ") ) )
ye = sdays[seq(from=1, to=length(sdays), by=2)]
d = sdays[seq(from=2, to=length(sdays), by=2)]
o = order(ye+d/366)
days = list(yr = ye[o], jd=d[o])
print("using:")
print(days)
}
else{
if(length(days$jd)<1 | length(days$yr)<1 )
{
udays = unique(paste(DB$yr, DB$jd))
sdays = as.numeric( unlist( strsplit(udays, split=" ") ) )
ye = sdays[seq(from=1, to=length(sdays), by=2)]
d = sdays[seq(from=2, to=length(sdays), by=2)]
o = order(ye+d/366)
days = list(yr = ye[o], jd=d[o])
}
}
eday = EPOCHday(days$yr[aday], jd=days$jd[aday], origyr = attr(DB, "origyr") )
while( ihour >= 0 & ihour < 24)
{
at1 = eday$jday+(ihour)/24
cat(paste(days$yr[aday], days$jd[aday],ihour, eday$jday, at1), sep="\n")
at2 = at1+inkhour/24
GH = Mine.seis(at1, at2, DB, usta, acomp)
GH$TZ=TZ
if(length(GH$STNS)<1)
{
ihour = ihour+inkhour
next
}
KOUT = swig(GH, STDLAB=STDLAB)
if(length(KOUT$WPX$yr)>1) { save.wpix(KOUT, fn= SAVEFILE) }
if(KOUT$but == "QUIT") {
break }
if(KOUT$but == "NEXT") {
ihour = ihour+inkhour;
next }
if(KOUT$but == "PREV") {
ihour = ihour-inkhour;
next }
if(KOUT$but == "HALF") {
ihour = ihour+inkhour/2;
next }
ihour = ihour+inkhour
}
}
|
amod <- structure(function
(
x,
mp = c(1,1),
fun = y ~ a*(x ^ b)
) {
xn. <- FALSE
if(is.data.frame(x)){
xnu <- cClass(x,'numeric')
xn <- c(cClass(x,'integer'),
cClass(x,'factor'))
xn. <- length(xn)!=0
xn.. <- xn[!xn%in%c('x','csx')]
cd <- x
x <- x[,'csx']
names(x) <- cd[,'year']}
feval <- function(fun,...){
e <- list(...)
y <- eval(parse(text=fun), e)
return(y)}
allv <- all.vars(fun)
prm <- allv[!allv%in%letters[20:26]]
spt <- ceiling(seq_along(mp)/length(prm))
if(!is.list(mp)){
mp <- split(mp,spt)
}
dpr <- data.frame(do.call(rbind,mp))
names(dpr) <- prm
for(i in 1:length(mp)){
x <- do.call(feval,
c(fun,as.list(dpr[i,])))
}
x1 <- c(NA,diff(x))
names(x1) <- names(x)
xd <- data.frame(x = x1, csx = x)
if(xn.&& length(xnu) > 1){
xd <- cd[,xnu]
xd[,'x'] <- x1
xd[,'csx'] <- x }
if(xn.)
xd <- cbind(xd,cd[,xn..])
return(xd)
} , ex=function() {
set.seed(1)
trw <- ts(abs(rnorm(12,1,1)),start = 1950)
cri <- cumsum(trw)
td <- amod(cri,mp = c(2,1))
plot(ts(td))
})
|
netrank <- function(x, small.values = x$small.values, method, nsim,
fixed = x$fixed, random = x$random,
warn.deprecated = gs("warn.deprecated"),
...) {
chkclass(x, c("netmeta", "netcomb", "rankogram"))
x <- updateversion(x)
if (missing(method))
if (inherits(x, c("netmeta", "netcomb")))
method <- "P-score"
else
method <- "SUCRA"
else
method <- setchar(method, c("P-score", "SUCRA"))
if (is.null(small.values))
small.values <- "good"
else
small.values <- setchar(small.values, c("good", "bad"))
args <- list(...)
chklogical(warn.deprecated)
fixed <- deprecated(fixed, missing(fixed), args, "comb.fixed",
warn.deprecated)
chklogical(fixed)
random <- deprecated(random, missing(random), args, "comb.random",
warn.deprecated)
chklogical(random)
if (method == "SUCRA") {
if (inherits(x, "netcomb"))
stop("netcomb object is not compatible with SUCRAs.",
call. = FALSE)
else if (inherits(x, "netmeta")) {
if (missing(nsim))
nsim <- 1000
rnk <- rankogram(x,
nsim = nsim,
small.values = small.values,
fixed = fixed, random = random)
}
else {
if (!missing(nsim))
warning("Argument 'nsim' ignored for rankogram object.",
call. = FALSE)
rnk <- x
x <- rnk$x
nsim <- rnk$nsim
}
P.fixed <- NULL
P.random <- NULL
ranking.fixed <- rnk$ranking.fixed
ranking.random <- rnk$ranking.random
}
else {
nsim <- NULL
TE.fixed <- x$TE.fixed
pval.fixed <- x$pval.fixed
TE.random <- x$TE.random
pval.random <- x$pval.random
w.fixed <- (1 + sign(TE.fixed)) / 2
p.fixed <- pval.fixed
if (small.values == "good")
P.fixed <-
w.fixed * p.fixed / 2 + (1 - w.fixed) * (1 - p.fixed / 2)
else
P.fixed <-
w.fixed * (1 - p.fixed / 2) + (1 - w.fixed) * p.fixed / 2
w.random <- (1 + sign(TE.random)) / 2
p.random <- pval.random
if (small.values == "good")
P.random <-
w.random * p.random / 2 + (1 - w.random) * (1 - p.random / 2)
else
P.random <-
w.random * (1 - p.random / 2) + (1 - w.random) * p.random / 2
ranking.fixed <- rowMeans(P.fixed, na.rm = TRUE)
if (!all(is.na(TE.random)))
ranking.random <- rowMeans(P.random, na.rm = TRUE)
else
ranking.random <- NA
}
res <- list(ranking.fixed = ranking.fixed,
Pmatrix.fixed = P.fixed,
ranking.random = ranking.random,
Pmatrix.random = P.random,
method = method,
small.values = small.values,
nsim = nsim,
fixed = fixed,
random = random,
x = x,
title = x$title,
version = packageDescription("netmeta")$Version,
Pscore = paste("'Pscore' replaced by 'ranking.fixed'",
"and 'ranking.random'."),
Pmatrix = paste("'Pmatrix' replaced by 'Pmatrix.fixed'",
"and 'Pmatrix.random'."),
Pscore.fixed = if (method == "P-score") ranking.fixed else NULL,
Pscore.random = if (method == "P-score") ranking.random else NULL
)
class(res) <- "netrank"
res
}
print.netrank <- function(x,
fixed = x$fixed,
random = x$random,
sort = TRUE,
digits = max(4, .Options$digits - 3),
warn.deprecated = gs("warn.deprecated"),
...) {
chkclass(x, "netrank")
x <- updateversion(x)
if (is.character(sort))
sort <- setchar(sort, c("fixed", "random"))
else
chklogical(sort)
chknumeric(digits, length = 1)
args <- list(...)
chklogical(warn.deprecated)
fixed <- deprecated(fixed, missing(fixed), args, "comb.fixed",
warn.deprecated)
chklogical(fixed)
random <- deprecated(random, missing(random), args, "comb.random",
warn.deprecated)
chklogical(random)
both <- (fixed + random) == 2
if (!both & is.character(sort)) {
if (fixed & sort == "random") {
warning("Argument 'sort=\"random\"' ignored for fixed effects model.",
call. = FALSE)
sort <- TRUE
}
if (random & sort == "fixed") {
warning("Argument 'sort=\"fixed\"' ignored for random effects model.",
call. = FALSE)
sort <- TRUE
}
}
else if (both & !is.character(sort) && sort)
sort <- "random"
if (is.null(x$method)) {
x$method <- "P-score"
x$ranking.fixed <- x$Pscore.fixed
x$ranking.random <- x$Pscore.random
}
if (both) {
if (is.character(sort)) {
res.both <- data.frame(fixed = round(x$ranking.fixed, digits),
random = round(x$ranking.random, digits))
res.both <- res.both[order(-res.both[, sort]), ]
}
else if (!sort) {
res.both <- data.frame(fixed = round(x$ranking.fixed[x$x$seq], digits),
random = round(x$ranking.random[x$x$seq], digits))
}
colnames(res.both) <- paste(x$method, c("(fixed)", "(random)"))
}
else {
if (sort) {
if (fixed)
res.fixed <-
as.data.frame(round(x$ranking.fixed[order(-x$ranking.fixed)],
digits))
if (random)
res.random <-
as.data.frame(round(x$ranking.random[order(-x$ranking.random)],
digits))
}
else {
if (fixed)
res.fixed <- as.data.frame(round(x$ranking.fixed[x$x$seq], digits))
if (random)
res.random <- as.data.frame(round(x$ranking.random[x$x$seq], digits))
}
if (fixed)
colnames(res.fixed) <- x$method
if (random)
colnames(res.random) <- x$method
}
matitle(x)
if (both)
prmatrix(res.both, quote = FALSE, ...)
else if (fixed) {
prmatrix(res.fixed, quote = FALSE, ...)
if (random)
cat("\n")
}
else if (random) {
prmatrix(res.random, quote = FALSE, ...)
}
if (x$method == "SUCRA" & !is.null(x$nsim))
cat(paste0("\n- based on ", x$nsim,
" simulation", if (x$nsim > 1) "s", "\n"))
invisible(NULL)
}
|
context("at")
df1 <- data.frame(x = 1:2, y = 4:5, col = 'black', type = letters[1:2], stringsAsFactors = FALSE)
df2 <- data.frame(x = 11:12, y = 14:15, col = 'white', type = letters[1], stringsAsFactors = FALSE)
test_that("tween_at works", {
tween <- tween_at(df1, df2, 0.5, 'linear')
expect_equal(nrow(tween), 2)
expect_named(tween, names(df1))
expect_equal(tween$x, c(6, 7))
expect_equal(tween$col[1], '
})
test_that("tween_at handles weird input", {
tween <- tween_at(df1, df2[1,], 0.5, 'linear')
expect_equal(nrow(tween), 2)
tween <- tween_at(df1[1,], df2, 0.5, 'linear')
expect_equal(nrow(tween), 2)
tween <- tween_at(df1, df2[integer(),], 0.5, 'linear')
expect_equal(nrow(tween), 0)
tween <- tween_at(df1[integer(),], df2, 0.5, 'linear')
expect_equal(nrow(tween), 0)
expect_error(tween_at(df1[c(1,2,1), ], df2, 0.5, 'linear'))
expect_error(tween_at(df1, df2, numeric(), 'linear'))
expect_error(tween_at(df1, df2, 0.5, character()))
})
test_that('tween_at works with vectors', {
tween <- tween_at(df1$x, df2$x, 0.5, 'linear')
expect_is(tween, 'numeric')
expect_equal(tween, c(6,7))
expect_error(tween_at(df1$x, df2$col))
})
|
QQplot <-
function(nsample=100, yobs,nameC, taudf){
dev.new()
x=rep(1:nsample)
x=as.matrix(t(x)/(nsample+1))
yy=as.matrix(qchisq(x, df=taudf))
zm=max(yobs)
zmm=max(yy)
zmax=max(zm,zmm)
xlimite=c(0,1.1*zmax)
ylimite=c(0,1.1*zmax)
chi_scores<-qchisq(ppoints(nsample), df = taudf)
qqplot(chi_scores, yobs,
xlab=c(nameC,taudf), ylab="Sample Quantiles ",pch=".",cex=2,col="blue",mar = c(5, 4, 4, 2) + 0.1,xlim=xlimite,ylim=ylimite)
abline(a=0,b=1,col="red")
points(quantile(chi_scores,c(.01,.99)),
quantile(chi_scores,c(.01,.99)),cex=1,bg="green",pch=21)
abline(v=quantile(chi_scores,c(.01,.99)),
col="green",lty=2)
points(quantile(chi_scores,c(.05,.95)),
quantile(chi_scores,c(.05,.95)),cex=1,bg="red",pch=21)
abline(v=quantile(chi_scores,c(.05,.95)),
col="red",lty=2)
}
|
XML2R <- function(urls, xpath, df=FALSE) {
obs <- XML2Obs(urls, xpath)
collapse_obs(obs)
}
XML2Obs <- function(urls, xpath, append.value=TRUE, as.equiv=TRUE, url.map=FALSE, async=FALSE, quiet=FALSE) {
if (missing(xpath)) xpath <- "/"
docs <- urlsToDocs(urls, async=async, quiet=quiet)
valid.urls <- sapply(docs, function(x) attr(x, "XMLsource"))
nodes <- docsToNodes(docs, xpath)
rm(docs)
gc()
l <- nodesToList(nodes)
rm(nodes)
gc()
obs <- listsToObs(l, urls=valid.urls, append.value=append.value, as.equiv=as.equiv, url.map=url.map)
return(obs)
}
|
setGeneric("startMareyMapGUI", function(dummy) standardGeneric("startMareyMapGUI"))
setMethod("startMareyMapGUI", "missing", function(dummy) {
regVar("curSet", "")
regVar("curMap", "")
regVar("curMkr", 0)
mareymaps <- MapCollection()
mainWin <- tktoplevel(height = 700, width = 1000)
tkwm.title(mainWin, "Marey Map")
regVar("coll", mareymaps)
tkconfigure(mainWin, menu = MenuBar(mainWin))
tkgrid(tklabel(mainWin, text = ""))
tkgrid(namlbl <- tklabel(mainWin, text = "Load a data set (File - Open)"), columnspan = 7)
leftFrm <- tkframe(mainWin, relief = "flat")
tkpack(MapList(leftFrm), side = "top", fill = "x")
tkpack(tklabel(leftFrm, text = ""), side = "top")
tkpack(MarkerInfo(leftFrm), side = "top")
rightFrm <- tkframe(mainWin, relief = "flat")
tkpack(InterpolationList(rightFrm), side = "top")
tkpack(tklabel(rightFrm, text = ""), side = "top")
tkpack(LocalRateQuery(rightFrm), side = "top", fill = "x")
tkgrid(tklabel(mainWin, text = ""))
tkgrid(tklabel(mainWin, text = ""), leftFrm, tklabel(mainWin, text = ""), PlotArea(mainWin), tklabel(mainWin, text = ""), rightFrm, tklabel(mainWin, text = ""))
tkgrid(tklabel(mainWin, text = ""))
tkgrid.configure(leftFrm, sticky = "ns")
tkgrid.configure(rightFrm, sticky = "ns")
attachCallback("updateSpcLbl", function() {tkconfigure(namlbl, text = getVar("curSet"))}, "curSet")
attachCallback("updateMapLbl", function() {
map <- regEval("coll[[curSet, curMap]]")
if(length(map) == 0) {
if(getVar("curSet") != "")
tkconfigure(namlbl, text = getVar("curSet"))
else
tkconfigure(namlbl, text = "Please select a set from the menu ('Data')")
} else {
tkconfigure(namlbl, text = paste(setName(map), " - ", mapName(map), ", ", length(physicalPositions(map)), " markers", sep = ""))
}
}, "curMkr")
})
|
ReadModflowBinary <- function(path, data.type=c("array", "flow"),
endian=c("little", "big"), rm.totim.0=FALSE) {
checkmate::assertFileExists(path)
data.type <- match.arg(data.type)
endian <- match.arg(endian)
checkmate::assertFlag(rm.totim.0)
ans <- try(.ReadBinary(path, data.type, endian, nbytes=4L), silent=TRUE)
if (inherits(ans, "try-error"))
ans <- .ReadBinary(path, data.type, endian, nbytes=8L)
if (rm.totim.0)
ans <- ans[vapply(ans, function(i) i$totim, 0) != 0]
ans
}
.ReadBinary <- function(path, data.type, endian, nbytes) {
checkmate::assertFileExists(path)
checkmate::assertString(data.type)
checkmate::assertString(endian)
stopifnot(nbytes %in% c(4L, 8L))
con <- file(path, open="rb", encoding="bytes")
on.exit(close(con, type="rb"))
if (data.type == "array")
valid.desc <- c("center elevation",
"change in eff-st",
"change in g-strs",
"change in pcstrs",
"compaction",
"critical head",
"d critical head",
"drawdown",
"dsys compaction",
"effective stress",
"geostatic stress",
"head",
"head in hgu",
"layer compaction",
"nd critical head",
"ndsys compaction",
"preconsol stress",
"subsidence",
"system compaction",
"thickness",
"void ratio",
"z displacement")
else
valid.desc <- c("constant head",
"drains",
"flow front face",
"flow lower face",
"flow right face",
"lake seepage",
"river leakage",
"storage",
"wells")
lst <- list()
repeat {
kstp <- readBin(con, "integer", n=1L, size=4L, endian=endian)
if (length(kstp) == 0) break
kper <- readBin(con, "integer", n=1L, size=4L, endian=endian)
if (data.type == "array") {
pertim <- readBin(con, "numeric", n=1L, size=nbytes, endian=endian)
totim <- readBin(con, "numeric", n=1L, size=nbytes, endian=endian)
desc <- readBin(readBin(con, "raw", n=16L, size=1L, endian=endian),
"character", n=1L, endian=endian)
desc <- .TidyDescription(desc)
if (!desc %in% valid.desc) break
ncol <- readBin(con, "integer", n=1L, size=4L, endian=endian)
nrow <- readBin(con, "integer", n=1L, size=4L, endian=endian)
layer <- readBin(con, "integer", n=1L, size=4L, endian=endian)
v <- readBin(con, "numeric", n=nrow * ncol, size=nbytes, endian=endian)
d <- matrix(v, nrow=nrow, ncol=ncol, byrow=TRUE)
lst[[length(lst) + 1]] <- list("d" = d,
"kstp" = kstp,
"kper" = kper,
"desc" = desc,
"layer" = layer,
"pertim" = pertim,
"totim" = totim)
} else if (data.type == "flow") {
desc <- readBin(readBin(con, "raw", n=16L, size=1L, endian=endian),
"character", n=1L, endian=endian)
desc <- .TidyDescription(desc)
if (!desc %in% valid.desc) break
ncol <- readBin(con, "integer", n=1L, size=4L, endian=endian)
nrow <- readBin(con, "integer", n=1L, size=4L, endian=endian)
nlay <- readBin(con, "integer", n=1L, size=4L, endian=endian)
if (nlay > 0) {
x <- .Read3dArray(con, nrow, ncol, nlay, nbytes, endian)
for (i in seq_len(nlay)) {
lst[[length(lst) + 1]] <- list("d" = x[[i]],
"kstp" = kstp,
"kper" = kper,
"desc" = desc,
"layer" = i)
}
} else {
nlay <- abs(nlay)
itype <- readBin(con, "integer", n=1L, size=4L, endian=endian)
delt <- readBin(con, "numeric", n=1L, size=nbytes, endian=endian)
pertim <- readBin(con, "numeric", n=1L, size=nbytes, endian=endian)
totim <- readBin(con, "numeric", n=1L, size=nbytes, endian=endian)
if (itype == 5L)
nval <- readBin(con, "integer", n=1L, size=4L, endian=endian)
else
nval <- 1L
if (nval > 100) stop("more than one-hundred varaiables for each cell")
if (nval > 1) {
ctmp <- readBin(readBin(con, "raw", n=16L, size=1L, endian=endian),
"character", n=nval - 1L, endian=endian)
ctmp <- .TidyDescription(ctmp)
} else {
ctmp <- NULL
}
if (itype %in% c(0L, 1L)) {
d <- .Read3dArray(con, nrow, ncol, nlay, nbytes, endian)
for (i in seq_along(d)) {
lst[[length(lst) + 1]] <- list("d" = d[[i]],
"kstp" = kstp,
"kper" = kper,
"desc" = desc,
"layer" = i,
"delt" = delt,
"pertim" = pertim,
"totim" = totim)
}
} else if (itype %in% c(2L, 5L)) {
nlist <- readBin(con, "integer", n=1L, size=4L, endian=endian)
if (nlist > (nrow * ncol * nlay))
stop("large number of cells for which values will be stored")
if (nlist > 0) {
d <- matrix(0, nrow=nlist, ncol=nval + 4L)
colnames(d) <- make.names(c("icell", "layer", "row", "column", "flow", ctmp),
unique=TRUE)
for (i in seq_len(nlist)) {
d[i, 1] <- readBin(con, "integer", n=1L, size=4L, endian=endian)
d[i, seq_len(nval) + 4] <- readBin(con, "numeric", n=nval,
size=nbytes, endian=endian)
}
nrc <- nrow * ncol
d[, "layer"] <- as.integer((d[, "icell"] - 1L) / nrc + 1L)
d[, "row"] <- as.integer(((d[, "icell"] - (d[, "layer"] - 1L) * nrc)
- 1L) / ncol + 1L)
d[, "column"] <- as.integer(d[, "icell"] - (d[, "layer"] - 1L)
* nrc - (d[, "row"] - 1L) * ncol)
lst[[length(lst) + 1]] <- list("d" = d,
"kstp" = kstp,
"kper" = kper,
"desc" = desc,
"delt" = delt,
"pertim" = pertim,
"totim" = totim)
}
} else if (itype == 3L) {
layers <- readBin(con, "integer", n=nrow * ncol, size=4L, endian=endian)
values <- readBin(con, "numeric", n=nrow * ncol, size=nbytes, endian=endian)
for (i in sort(unique(layers))) {
v <- values[layers == i]
d <- matrix(v, nrow=nrow, ncol=ncol, byrow=TRUE)
lst[[length(lst) + 1]] <- list("d" = d,
"kstp" = kstp,
"kper" = kper,
"desc" = desc,
"layer" = i,
"delt" = delt,
"pertim" = pertim,
"totim" = totim)
}
} else if (itype == 4L) {
v <- readBin(con, "numeric", n=nrow * ncol, size=nbytes, endian=endian)
d <- matrix(v, nrow=nrow, ncol=ncol, byrow=TRUE)
lst[[length(lst) + 1]] <- list("d" = d,
"kstp" = kstp,
"kper" = kper,
"desc" = desc,
"layer" = 1L,
"delt" = delt,
"pertim" = pertim,
"totim" = totim)
d[, ] <- 0
for (i in seq_len(nlay)[-1]) {
lst[[length(lst) + 1]] <- list("d" = d,
"kstp" = kstp,
"kper" = kper,
"desc" = desc,
"layer" = i,
"delt" = delt,
"pertim" = pertim,
"totim" = totim)
}
} else {
stop("data storage type is not recognized")
}
}
}
}
lst
}
.Read3dArray <- function(con, nrow, ncol, nlay, nbytes, endian) {
checkmate::assertClass(con, c("file", "connection"))
checkmate::assertCount(nrow, positive=TRUE)
checkmate::assertCount(ncol, positive=TRUE)
checkmate::assertCount(nlay, positive=TRUE)
checkmate::assertInt(nbytes)
checkmate::assertString(endian)
lapply(seq_len(nlay), function(i) {
v <- readBin(con, "numeric", n=nrow * ncol, size=nbytes, endian=endian)
matrix(v, nrow=nrow, ncol=ncol, byrow=TRUE)
})
}
.TidyDescription <- function(desc) {
checkmate::assertCharacter(desc)
tolower(gsub("(^ +)|( +$)", "", desc))
}
|
data("cdnow")
data("apparelTrans")
data("apparelStaticCov")
fct.testthat.correctness.clvfittedtransactions(name.model = "BG/NBD", method=bgnbd, data.cdnow=cdnow,
data.apparelTrans=apparelTrans, data.apparelStaticCov=apparelStaticCov,
correct.start.params.model = c(r=1, alpha = 3, a = 1, b = 3),
correct.params.nocov.coef = c(r = 0.2425945, alpha = 4.4136019, a = 0.7929199, b = 2.4258881),
correct.LL.nocov = -9582.429,
kkt2.true = TRUE)
context("Correctness - BG/NBD nocov - Recover parameters")
fct.testthat.correctness.clvfitted.correct.coefs(method = bgnbd,
cdnow = cdnow,
start.params.model = c(r=1, alpha = 3, a = 1, b = 3),
params.nocov.coef = c(r = 0.243, alpha = 4.414, a = 0.793, b = 2.426),
LL.nocov = -9582.4)
context("Correctness - BG/NBD nocov - Expectation")
test_that("Expectation in Rcpp matches expectation in R (nocov)", {
skip_on_cran()
expect_silent(clv.cdnow <- clvdata(data.transactions = cdnow,
date.format = "ymd", time.unit = "W", estimation.split = 38,
name.id = "Id", name.date = "Date", name.price = "Price"))
expect_silent(obj.fitted <- bgnbd(clv.data = clv.cdnow, verbose = FALSE))
params_i <- obj.fitted@cbs[, c("Id", "T.cal", "date.first.actual.trans")]
params_i[, r := [email protected][["r"]]]
params_i[, alpha := [email protected][["alpha"]]]
params_i[, a := [email protected][["a"]]]
params_i[, b := [email protected][["b"]]]
fct.expectation.R <- function(params_i.t){
term1 <- params_i.t[,(a + b - 1)/(a - 1)]
term2 <- params_i.t[,(alpha/(alpha + t_i))^r]
term3 <- params_i.t[, vec_gsl_hyp2f1_e(r, b, a+b-1, t_i/(alpha+t_i) )$value]
return(term1 * (1 - term2 * term3))
}
fct.testthat.correctness.clvfittedtransactions.same.expectation.in.R.and.Cpp(fct.expectation.R = fct.expectation.R,
params_i = params_i,
obj.fitted = obj.fitted)
})
context("Correctness - BG/NBD staticcov - Expectation")
test_that("Expectation in Rcpp matches expectation in R (staticcov)", {
skip_on_cran()
apparelTrans.later <- copy(apparelTrans)
apparelTrans.later[Id %in% c("1", "10", "100"), Date := Date + lubridate::weeks(10)]
clv.apparel.static <- fct.helper.create.clvdata.apparel.staticcov(data.apparelTrans = apparelTrans.later,
data.apparelStaticCov = apparelStaticCov,
estimation.split = 38)
expect_silent(obj.fitted <- bgnbd(clv.data = clv.apparel.static, verbose = FALSE))
params_i <- obj.fitted@cbs[, c("Id", "T.cal", "date.first.actual.trans")]
m.cov.data.life <- clv.data.get.matrix.data.cov.life([email protected], correct.row.names=params_i$Id,
correct.col.names=names([email protected]))
m.cov.data.trans <- clv.data.get.matrix.data.cov.trans([email protected], correct.row.names=params_i$Id,
correct.col.names=names([email protected]))
params_i[, r := [email protected][["r"]]]
params_i[, alpha_i := [email protected][["alpha"]] * exp( -m.cov.data.trans %*% [email protected])]
params_i[, a_i := [email protected][["a"]] * exp( m.cov.data.life %*% [email protected])]
params_i[, b_i := [email protected][["b"]] * exp( m.cov.data.life %*% [email protected])]
fct.expectation.R <- function(params_i.t){
term1 <- params_i.t[,(a_i + b_i - 1)/(a_i - 1)]
term2 <- params_i.t[,(alpha_i/(alpha_i + t_i))^r]
term3 <- params_i.t[, vec_gsl_hyp2f1_e(r, b_i, a_i+b_i-1, t_i/(alpha_i+t_i))$value]
return(term1 * (1 - term2 * term3))
}
fct.testthat.correctness.clvfittedtransactions.same.expectation.in.R.and.Cpp(fct.expectation.R = fct.expectation.R,
params_i = params_i,
obj.fitted = obj.fitted)
})
|
library(ggplot2)
library(dplyr)
library(scales)
library(photobiology)
library(photobiologyWavebands)
library(ggspectra)
library(ggrepel)
good_label_repel <- packageVersion('ggrepel') != "0.8.0"
library(knitr)
opts_chunk$set(fig.align = 'center',
fig.show = 'hold', fig.width = 7, fig.height = 4,
cache = FALSE)
options(warnPartialMatchArgs = FALSE)
two_suns.mspct <- source_mspct(list(sun1 = sun.spct, sun2 = sun.spct / 2))
two_suns.spct <- rbindspct(two_suns.mspct)
theme_set(theme_bw())
ggplot(sun.spct) + geom_line()
ggplot(two_suns.spct) + aes(color = spct.idx) + geom_line()
ggplot(two_suns.spct, aes(w.length, s.e.irrad, color = spct.idx)) + geom_line()
ggplot(sun.spct, unit.out = "photon") + geom_line()
photon_as_default()
ggplot(sun.spct) + geom_line()
ggplot(sun.spct, unit.out = "energy") + geom_line()
unset_user_defaults()
ggplot(yellow_gel.spct) + geom_line()
ggplot(yellow_gel.spct, plot.qty = "absorbance") + geom_line()
Afr_as_default()
ggplot(yellow_gel.spct) + geom_line()
ggplot(polyester.spct) + geom_line()
unset_user_defaults()
ggplot(two_suns.mspct) +
aes(linetype = spct.idx) +
wl_guide(ymax = -0.05) +
geom_line()
ggplot(two_suns.mspct) +
wl_guide(ymax = -0.05) +
geom_spct() +
geom_line() +
facet_wrap(~spct.idx, ncol = 1L)
ggplot(two_suns.mspct) +
wl_guide(ymax = -0.05) +
geom_spct() +
geom_line() +
facet_wrap(~spct.idx, ncol = 1L, scales = "free_y")
ggplot(sun.spct) +
geom_line() +
scale_x_wl_continuous() +
scale_y_s.e.irrad_continuous()
ggplot(sun.spct) +
geom_line() +
scale_x_wl_continuous(unit.exponent = -6) +
scale_y_s.e.irrad_continuous(unit.exponent = -3)
ggplot(sun.spct) +
geom_line() +
scale_x_wl_continuous(unit.exponent = -7) +
scale_y_s.e.irrad_continuous()
nearest_SI_exponent(-4)
ggplot(sun.spct) +
geom_line() +
scale_x_wl_continuous(unit.exponent = nearest_SI_exponent(-4)) +
scale_y_s.e.irrad_continuous()
ggplot(sun.spct) +
geom_line() +
scale_x_wl_continuous() +
scale_y_s.e.irrad_continuous(unit.exponent = 0,
labels = SI_tg_format(exponent = -3))
temp.spct <- clean(sun.spct, range.s.data = c(1e-20, Inf), fill = 1e-20)
ggplot(temp.spct) +
geom_line(na.rm = TRUE) +
scale_x_wl_continuous() +
scale_y_s.e.irrad_continuous(unit.exponent = 0,
trans = "log10",
labels = trans_format("log10", math_format()),
limits = c(1e-6, NA))
ggplot(sun.spct) +
geom_line() +
scale_x_wl_continuous(label.text = "Longitud de onda,") +
scale_y_s.e.irrad_continuous(label.text = "Irradiancia,")
norm_sun.spct <- normalize(sun.spct)
ggplot(norm_sun.spct) +
geom_line() +
scale_x_wl_continuous() +
scale_y_s.e.irrad_continuous(normalized = getNormalized(norm_sun.spct))
scaled_sun.spct <- fscale(sun.spct)
ggplot(scaled_sun.spct) +
geom_line() +
scale_x_wl_continuous() +
scale_y_s.e.irrad_continuous(scaled = is_scaled(scaled_sun.spct))
ggplot(sun.spct) +
geom_line() +
scale_x_wl_continuous()
ggplot(sun.spct) +
geom_line() +
scale_x_wl_continuous(sec.axis = sec_axis_w_number())
ggplot(sun.spct) +
geom_line() +
scale_x_wl_continuous(sec.axis = sec_axis_w_frequency())
ggplot(sun.spct) +
geom_line() +
scale_x_wl_continuous(sec.axis = sec_axis_w_frequency(15))
ggplot(white_led.raw_spct) +
geom_line() +
scale_x_wl_continuous() +
scale_y_counts_continuous()
ggplot(white_led.raw_spct) +
geom_line() +
scale_x_wl_continuous() +
scale_y_counts_tg_continuous()
ggplot(white_led.cps_spct) +
geom_line() +
scale_x_wl_continuous() +
scale_y_cps_continuous(unit.exponent = 3)
ggplot(sun.spct) +
geom_line() +
scale_x_wl_continuous() +
scale_y_s.e.irrad_continuous()
ggplot(sun.spct, unit.out = "photon") +
geom_line() +
scale_x_wl_continuous() +
scale_y_s.e.irrad_log10(unit.exponent = -6)
ggplot(ccd.spct) +
geom_line() +
scale_x_wl_continuous() +
scale_y_s.e.response_continuous(unit.exponent = 6)
ggplot(ccd.spct) +
geom_line() +
scale_x_wl_continuous() +
scale_y_s.e.action_continuous(unit.exponent = 6)
ggplot(yellow_gel.spct) +
geom_line() +
scale_x_wl_continuous() +
scale_y_Tfr_continuous(Tfr.type = getTfrType(yellow_gel.spct))
ggplot(yellow_gel.spct) +
geom_line() +
scale_x_wl_continuous() +
scale_y_Tfr_continuous(Tfr.type = getTfrType(yellow_gel.spct),
labels = percent)
gel_internal.spct <- convertTfrType(yellow_gel.spct, Tfr.type = "internal")
ggplot(gel_internal.spct) +
geom_line() +
scale_x_wl_continuous() +
scale_y_Tfr_continuous(Tfr.type = getTfrType(gel_internal.spct))
ggplot(gel_internal.spct, plot.qty = "absorbance") +
geom_line() +
scale_x_wl_continuous() +
scale_y_A_continuous(Tfr.type = getTfrType(gel_internal.spct))
ggplot(yellow_gel.spct, plot.qty = "absorptance") +
geom_line() +
scale_x_wl_continuous() +
scale_y_Afr_continuous()
ggplot(green_leaf.spct) +
geom_line() +
scale_x_wl_continuous() +
scale_y_Rfr_continuous(Rfr.type = getRfrType(green_leaf.spct))
ggplot(sun.spct) + geom_line() + stat_peaks(color = "red")
ggplot(sun.spct, unit.out = "photon") + geom_line() + stat_peaks(color = "red")
ggplot(sun.spct) + geom_line() + stat_valleys(color = "blue")
ggplot(yellow_gel.spct) + geom_line() + stat_find_wls(color = "darkgreen")
ggplot(sun.spct) + geom_line() +
stat_peaks(shape = 21, color = "black") + scale_fill_identity()
ggplot(sun.spct) + geom_line() +
stat_peaks(span = 21, shape = 4, color = "red", size = 2) +
stat_peaks(span = 21, color = "red", geom = "rug", sides = "b")
ggplot(sun.spct) + geom_line() +
stat_peaks(span = 21, geom = "text", color = "red", vjust = "bottom")
ggplot(sun.spct) + geom_line() +
stat_peaks(shape = 21, span = 25, size = 2) +
scale_fill_identity() +
stat_peaks(aes(color = ..BW.color..),
geom = "label", span = 25, vjust = "bottom", size = 3) +
scale_color_identity()
ggplot(sun.spct) + geom_line() +
stat_peaks(shape = 21, span = 25, size = 2) +
stat_label_peaks(geom = "label_repel", span = 41,
segment.color = "black",
size = 3, vjust = 1, nudge_y = 0.1,
direction = "y") +
stat_valleys(shape = 21, span = 25, size = 2) +
stat_label_valleys(geom = "label_repel", span = 51,
segment.color = "black",
size = 3, vjust = 0, nudge_y = -0.1,
direction = "y") +
scale_fill_identity() + scale_color_identity() +
expand_limits(y = c(-0.08, 0.9))
ggplot(sun.spct) + geom_line() +
stat_peaks(shape = 21, span = 25, size = 2) + scale_fill_identity() +
stat_label_peaks(geom = "label", span = 25, size = 3, na.rm = TRUE) +
scale_color_identity() +
expand_limits(y = c(NA, 0.9))
ggplot(sun.spct) + geom_line() +
stat_peaks(span = NULL, geom = "vline", linetype = "dotted", color = "red") +
stat_peaks(span = NULL, geom = "hline", linetype = "dotted", color = "red")
ggplot(sun.spct) + geom_line() +
stat_label_peaks(aes(label = stat(y.label)),
span = 31, geom = "label_repel", size = 3,
label.fmt = "y = %1.2f", segment.colour = "red",
min.segment.length = unit(0.05, "lines"),
nudge_y = 0.1) +
expand_limits(y = 1) +
scale_fill_identity() + scale_color_identity()
ggplot(sun.spct) + geom_line() +
stat_peaks(span = NULL, color = "red") +
stat_peaks(span = NULL, geom = "text", vjust = -0.5, color = "red",
aes(label = paste(stat(y.label), "at", stat(x.label), "nm"))) +
expand_limits(y = c(NA, 0.9))
ggplot(sun.spct) + geom_line() +
stat_peaks(span = 21, geom = "point", colour = "red") +
stat_valleys(span = 21, geom = "point", colour = "blue") +
stat_peaks(span = 51, geom = "text", colour = "red",
vjust = -0.3, label.fmt = "%3.0f nm") +
stat_valleys(span = 51, geom = "text", colour = "blue",
vjust = 1.2, label.fmt = "%3.0f nm")
ggplot(two_suns.spct) + aes(color = spct.idx) +
geom_line() + ylim(NA, 0.9) +
stat_peaks(span = NULL, color = "black") +
stat_peaks(span = NULL, geom = "text", vjust = -0.5, size = 3,
color = "black",
aes(label = paste(stat(y.label), "at", stat(x.label), "nm"))) +
facet_grid(spct.idx~.)
ggplot(white_led.raw_spct, aes(w.length, counts_3)) +
geom_line() +
stat_spikes(color = "red", z.threshold = 8, max.spike.width = 7)
ggplot(despike(white_led.raw_spct, z.threshold = 8, max.spike.width = 7),
aes(w.length, counts_3)) +
geom_line()
ggplot(sun.spct) +
stat_color() + scale_color_identity()
ggplot(sun.spct) +
stat_color(chroma.type = "CC") + scale_color_identity()
ggplot(clip_wl(sun.spct)) +
stat_color(chroma.type = beesxyzCMF.spct) + scale_color_identity()
ggplot(sun.spct) +
geom_line() +
stat_color(shape = 21, color = "black") +
scale_fill_identity()
ggplot(sun.spct) +
stat_color(geom = "bar") +
geom_line(color = "black") +
geom_point(shape = 21, color = "black", stroke = 1.2, fill = "white") +
scale_fill_identity() +
scale_color_identity() +
theme_bw()
ggplot(sun.spct) +
stat_color(geom = "bar", chroma.type = beesxyzCMF.spct) +
geom_line(color = "black") +
geom_point(shape = 21, color = "black", stroke = 1.2, fill = "white") +
scale_fill_identity() +
scale_color_identity() +
theme_bw()
ggplot(two_suns.spct) + aes(shape = spct.idx) +
stat_color() + scale_color_identity() +
geom_line() +
facet_grid(spct.idx~., scales = "free_y")
ggplot(sun.spct) + geom_line() +
stat_wb_box(w.band = VIS_bands(), color = "white") +
scale_fill_identity()
ggplot(sun.spct) + stat_wb_column(w.band = VIS_bands()) + geom_line() +
scale_fill_identity()
ggplot(sun.spct) + geom_line() +
stat_wb_hbar(w.band = VIS_bands(), size = 1.2) +
scale_color_identity()
ggplot(sun.spct) + geom_line() +
stat_wb_box(w.band = PAR(), color = "white", ypos.fixed = 0.85) +
stat_wb_label(w.band = PAR(), ypos.fixed = 0.85) +
scale_fill_identity() + scale_color_identity()
ggplot(sun.spct) + geom_line() + stat_wl_summary()
ggplot(sun.spct) +
stat_wl_summary(range = c(300,350), geom = "rect") +
geom_line()
ggplot(sun.spct) +
geom_line() +
stat_wl_summary(geom = "hline", color = "red") +
stat_wl_summary(label.fmt = "Mean = %.3g", color = "red", vjust = -0.3)
ggplot(sun.spct) +
stat_wl_summary(range = c(400,500), geom = "rect", alpha = 0.2, fill = color_of(450)) +
stat_wl_summary(range = c(400,500), label.fmt = "Mean = %.3g", vjust = -0.3, geom = "text") +
geom_line()
ggplot(two_suns.spct) + aes(color = spct.idx) +
geom_line() +
stat_wl_summary(geom = "hline") +
stat_wl_summary(label.fmt = "Mean = %.3g", vjust = 1.2, show.legend = FALSE) +
facet_grid(spct.idx~.)
ggplot(two_suns.spct) + aes(color = spct.idx) +
geom_line() +
stat_wl_summary(geom = "hline") +
stat_wl_summary(label.fmt = "Mean = %.3g", vjust = 1.2, show.legend = FALSE) +
facet_grid(spct.idx~., scales = "free_y")
ggplot(sun.spct) +
geom_line() +
stat_wb_hbar(w.band = PAR(), size = 1.3) +
stat_wb_mean(aes(color = ..wb.color..), w.band = PAR(), ypos.mult = 0.95) +
scale_color_identity() +
scale_fill_identity() +
theme_bw()
ggplot(sun.spct) +
stat_wb_hbar(w.band = c(400,500), size = 1.2) +
stat_wb_mean(aes(color = ..wb.color..),
w.band = c(400,500), ypos.mult = 0.95) +
geom_line() +
scale_color_identity() +
scale_fill_identity() +
theme_bw()
ggplot(sun.spct) +
geom_line() +
stat_wb_hbar(w.band = list(Blue(), Red()), size = 1.2) +
stat_wb_mean(aes(color = ..wb.color..),
w.band = list(Blue(), Red()), ypos.mult = 0.95,
hjust = 1, angle = 90) +
scale_color_identity() +
scale_fill_identity() +
theme_bw()
ggplot(sun.spct) +
stat_wb_box(w.band = PAR()) +
stat_wb_total(w.band = PAR()) +
geom_line() +
scale_color_identity() +
scale_fill_identity() +
theme_bw()
ggplot(sun.spct) +
stat_wb_box(w.band = c(400,500)) +
stat_wb_total(w.band = c(400,500)) +
geom_line() +
scale_color_identity() +
scale_fill_identity() +
theme_bw()
ggplot(sun.spct * yellow_gel.spct) +
stat_wb_box(w.band = Plant_bands(), color = "white", ypos.fixed = 0.7) +
stat_wb_column(w.band = Plant_bands(), color = "white", alpha = 0.5) +
stat_wb_mean(w.band = Plant_bands(), label.fmt = "%1.2f",
ypos.fixed = 0.7, size = 2) +
geom_line() +
scale_fill_identity() + scale_color_identity() +
theme_bw()
ggplot(sun.spct) +
stat_wb_box(w.band = PAR()) +
stat_wb_irrad(w.band = PAR(), unit.in = "energy", time.unit = "second") +
geom_line() +
scale_color_identity() +
scale_fill_identity() +
theme_bw()
ggplot(sun.spct, unit.out = "photon") +
stat_wb_box(w.band = PAR()) +
stat_wb_irrad(w.band = PAR(),
unit.in = "photon", time.unit = "second",
aes(label = sprintf("%s = %.3g", ..wb.name.., ..wb.yint.. * 1e6))) +
geom_line() +
scale_color_identity() +
scale_fill_identity() +
theme_bw()
ggplot(sun.spct) +
stat_wb_box(w.band = PAR()) +
stat_wb_e_irrad(w.band = PAR()) +
geom_line() +
scale_color_identity() +
scale_fill_identity() +
theme_bw()
ggplot(sun.spct) +
stat_wb_box(w.band = CIE()) +
stat_wb_e_irrad(w.band = CIE()) +
geom_line() +
scale_color_identity() +
scale_fill_identity() +
theme_bw()
ggplot(sun.daily.spct) +
stat_wb_box(w.band = CIE()) +
stat_wb_e_irrad(w.band = CIE(), time.unit = "day", label.mult = 1e-3) +
geom_line() +
scale_color_identity() +
scale_fill_identity() +
theme_bw()
ggplot(sun.spct, unit.out = "photon") +
stat_wb_box(w.band = VIS_bands(), color = "black") +
stat_wb_column(w.band = VIS_bands(), color = NA, alpha = 0.5) +
stat_wb_q_irrad(w.band = VIS_bands(), label.mult = 1e6, size = 2) +
geom_line() +
scale_color_identity() +
scale_fill_identity() +
theme_bw()
ggplot(sun.spct) +
geom_line() +
stat_wb_hbar(w.band = PAR(), size = 1.4) +
stat_wb_e_sirrad(aes(color = ..wb.color..),
w.band = PAR(), ypos.mult = 0.95) +
scale_color_identity() +
scale_fill_identity() +
theme_bw()
ggplot(sun.spct, unit.out = "photon") +
stat_wb_column(w.band = PAR(), alpha = 0.8) +
stat_wb_q_sirrad(w.band = PAR(),
mapping =
aes(label = sprintf("Total %s = %.3g",
..wb.name.., ..wb.yint.. * 1e6)),
ypos.mult = 0.55) +
stat_wb_q_sirrad(w.band = PAR(),
mapping =
aes(label = sprintf("Mean %s = %.3g",
..wb.name.., ..wb.ymean.. * 1e6)),
ypos.mult = 0.45) +
geom_line() +
scale_color_identity() +
scale_fill_identity() +
theme_bw()
ggplot(sun.spct) +
stat_wb_box(w.band = waveband(CIE()), ypos.fixed = 0.85) +
stat_wb_e_sirrad(w.band = CIE(), ypos.fixed = 0.85) +
geom_line() +
scale_color_identity() +
scale_fill_identity() +
theme_bw()
ggplot(sun.daily.spct) +
stat_wb_box(w.band = waveband(CIE()), ypos.fixed = 34e3) +
stat_wb_e_sirrad(w.band = CIE(),
label.fmt = "%.2g kj / day",
time.unit = "day",
ypos.fixed = 34e3) +
geom_line() +
scale_color_identity() +
scale_fill_identity() +
theme_bw()
my.bands <- split_bands(c(300,800), length.out = 10)
ggplot(sun.spct, unit.out = "photon") +
stat_wb_hbar(w.band = my.bands, size = 1.4) +
stat_wb_q_sirrad(geom = "label", w.band = my.bands,
size = 2.5, ypos.fixed = 3.5e-6) +
geom_line() +
scale_color_identity() +
scale_fill_identity() +
theme_bw()
ggplot(sun.spct) +
stat_wb_column(w.band = VIS_bands(), alpha = 0.5) +
stat_wb_e_irrad(w.band = VIS_bands(), angle = 90,
ypos.fixed = 0.05, hjust = 0,
aes(label = paste(..wb.name.., ..y.label.., sep = " = "))) +
geom_line() +
scale_color_identity() +
scale_fill_identity() +
theme_bw()
ggplot(sun.spct, unit.out = "photon") +
stat_wb_column(w.band = VIS_bands(), alpha = 0.5) +
stat_wb_q_irrad(w.band = VIS_bands(), angle = 90,
label.mult = 1e6, ypos.fixed = 1e-7, hjust = 0,
aes(label = paste(..wb.name.., ..y.label.., sep = " = "))) +
geom_line() +
scale_color_identity() +
scale_fill_identity() +
theme_bw()
my.bands <- split_bands(c(300,800), length.out = 10)
ggplot(sun.spct) +
stat_wb_column(w.band = my.bands, alpha = 0.5) +
stat_wb_e_irrad(w.band = my.bands, angle = 90,
ypos.fixed = 0.05, hjust = 0) +
geom_line() +
scale_color_identity() +
scale_fill_identity() +
theme_bw()
ggplot(data.frame(w.length = 300:800), aes(w.length)) +
stat_wl_strip(w.band = VIS_bands(), ymax = Inf, ymin = -Inf) +
stat_wb_label(w.band = VIS_bands(), angle = 90) +
scale_fill_identity() +
scale_color_identity() +
scale_y_continuous(labels = NULL) +
scale_x_continuous(breaks = seq(from = 300, to = 800, by = 25)) +
labs(x = "Wavelength (nm)", title = "Colours according to ISO standard") +
theme_minimal()
ggplot(data.frame(w.length = 300:1100), aes(w.length)) +
stat_wl_strip(w.band = RBV_bands(), ymax = 1, ymin = 3) +
stat_wb_label(w.band = RBV_bands(), ypos.fixed = 2, angle = 90, vjust = 0.3, size = 3) +
stat_wl_strip(w.band = MSS_bands(), ymax = 4, ymin = 6, na.rm = TRUE) +
stat_wb_label(w.band = MSS_bands(), ypos.fixed = 5, angle = 90, vjust = 0.3, size = 3) +
stat_wl_strip(w.band = ETM_bands(), ymax = 7, ymin = 9, na.rm = TRUE) +
stat_wb_label(w.band = ETM_bands(), ypos.fixed = 8, angle = 90, vjust = 0.3, size = 3) +
stat_wl_strip(w.band = OLI_bands(), ymax = 10, ymin = 12, na.rm = TRUE) +
stat_wb_label(w.band = OLI_bands(), ypos.fixed = 11, angle = 90, vjust = 0.3, size = 3) +
scale_fill_identity() +
scale_color_identity() +
scale_y_continuous(labels = c("RBV", "MSS", "TM/ETM", "OLI"),
breaks = c(2,5,8,11),
limits = c(0, 13),
name = "Imager",
sec.axis = dup_axis(labels = c("L1-L2", "L1-L5", "L4-L7", "L8"), name = "Landsat mission")) +
scale_x_continuous(breaks = seq(from = 300, to = 1200, by = 100),
limits = c(400, 1100),
sec.axis = dup_axis()) +
labs(x = "Wavelength (nm)", title = "Landsat imagers: VIS and NIR bands") +
theme_classic()
ggplot(data.frame(w.length = 100:400), aes(w.length)) +
stat_wl_strip(w.band = UV_bands("ISO"), ymax = 1, ymin = 3, color = "white") +
stat_wb_label(w.band = UV_bands("ISO"), ypos.fixed = 2, size = 3) +
stat_wl_strip(w.band = UV_bands("CIE"), ymax = 4, ymin = 6, color = "white") +
stat_wb_label(w.band = UV_bands("CIE"), ypos.fixed = 5, size = 3) +
stat_wl_strip(w.band = UV_bands("plants"), ymax = 7, ymin = 9, color = "white") +
stat_wb_label(w.band = UV_bands("plants"), ypos.fixed = 8, size = 3) +
stat_wl_strip(w.band = UV_bands("none"), ymax = 10, ymin = 12, color = "white") +
stat_wb_label(w.band = UV_bands("none"), ypos.fixed = 11, size = 3) +
stat_wl_strip(w.band = UV_bands("medical"), ymax = 13, ymin = 15, color = "white") +
stat_wb_label(w.band = UV_bands("medical"), ypos.fixed = 14, size = 3) +
scale_fill_identity() +
scale_color_identity() +
scale_y_continuous(labels = c("ISO", "CIE", "plants", "none", "medical"),
breaks = c(2,5,8,11,14),
limits = c(0, 16),
name = "Definition",
sec.axis = dup_axis(labels =
c("use", "use", "use?", "avoid!", "avoid!"), name = "Recommendation")) +
scale_x_continuous(breaks = c(seq(from = 100, to = 400, by = 50), 280, 315),
limits = c(100, 400),
sec.axis =
dup_axis(breaks =
c(100, 150, 200, 220, 250, 290, 320, 340, 400))) +
labs(x = "Wavelength (nm)", title = "UV bands",
subtitle = "According to ISO standard, CIE recommendations, and non-standard use") +
theme_classic()
my.data <- data.frame(x = 300:800)
ggplot(my.data, aes(x)) + stat_wl_strip(ymin = -1, ymax = 1, color = NA) +
scale_fill_identity()
ggplot(sun.spct) +
geom_line() +
stat_wl_strip(ymin = -Inf, ymax = -0.025) +
scale_fill_identity() +
theme_bw()
ggplot(sun.spct) +
geom_line() +
stat_wl_strip(w.band = VIS_bands(), ymin = -Inf, ymax = -0.025) +
scale_fill_identity() +
theme_bw()
ggplot(sun.spct) +
stat_wl_strip(w.band = VIS_bands(), ymin = -Inf, ymax = Inf, alpha = 0.4) +
scale_fill_identity() +
geom_line() +
theme_bw()
ggplot(sun.spct) +
stat_wl_strip(alpha = 0.4, ymin = -Inf, ymax = Inf) +
scale_fill_identity() +
geom_line() +
theme_bw()
ggplot(sun.spct, unit.out = "photon") +
stat_wl_strip(alpha = 0.4, ymin = -Inf, ymax = Inf) +
stat_wb_box(w.band = PAR()) +
stat_wb_total(w.band = PAR(), label.mult = 1e6,
aes(label = paste(..wb.name.., " = ", ..y.label.., sep = ""))) +
geom_line() +
scale_fill_identity() + scale_color_identity() +
theme_bw()
ggplot(sun.spct) + geom_spct()
ggplot(sun.spct) +
geom_spct(fill = color_of(sun.spct))
ggplot(sun.spct * yellow_gel.spct) +
geom_spct(fill = color_of(sun.spct * yellow_gel.spct))
ggplot(sun.spct) +
wl_guide(alpha = 0.4) +
geom_line()
ggplot(sun.spct) +
wl_guide(ymax = -0.025) +
geom_line()
ggplot(sun.spct) +
wl_guide() +
geom_spct(alpha = 0.75, colour = "white", size = 1)
ggplot(sun.spct) +
wl_guide() +
geom_line(size = 2, colour = "white") +
geom_line(size = 1, colour = "black") +
geom_hline(yintercept = 0, colour = "grey92") +
theme_bw()
color_chart(colors())
color_chart(grep("blue", colors(), value = TRUE), ncol = 5, text.size = 4)
color_chart(w_length2rgb(570:689, color.name = as.character(570:689)),
use.names = TRUE, text.size = 4) +
ggtitle("Reddish colors", subtitle = "Labels: wavelength (nm)")
|
testthat::context(desc="Test xo.processor() function")
testthat::test_that(desc="Test, if xo.processor() throws errors/warnings on wrong arguments",
{
testthat::expect_error(object=xo.processor())
})
readXoProcessorResultFile <- function(filename) {
result <- read.csv(file = paste0("./testdata/",filename),
colClasses = c("Date","numeric","numeric","numeric","numeric","character","numeric","numeric","character","numeric","numeric"))
result
}
testthat::test_that(desc="Test xoProcessor with only one quote",
{
result <- readXoProcessorResultFile("testdata-xoProcessor-singleQuote.csv")
testthat::expect_equal(object = xo.processor(result$high,result$low,result$date),
expected = result)
})
testthat::test_that(desc="Test xoProcessor with two quotes",
{
result <- readXoProcessorResultFile("testdata-xoProcessor-doubleQuote-OX.csv")
testthat::expect_equal(object = xo.processor(result$high,result$low,result$date),
expected = result)
result <- readXoProcessorResultFile("testdata-xoProcessor-doubleQuote-OO.csv")
testthat::expect_equal(object = xo.processor(result$high,result$low,result$date),
expected = result)
result <- readXoProcessorResultFile("testdata-xoProcessor-doubleQuote-O.csv")
testthat::expect_equal(object = xo.processor(result$high,result$low,result$date),
expected = result)
})
testthat::test_that(desc="Test xoProcessor with three quotes",
{
result <- readXoProcessorResultFile("testdata-xoProcessor-tripleQuote-OX.csv")
testthat::expect_equal(object = xo.processor(result$high,result$low,result$date),
expected = result)
result <- readXoProcessorResultFile("testdata-xoProcessor-tripleQuote-OXX.csv")
testthat::expect_equal(object = xo.processor(result$high,result$low,result$date),
expected = result)
result <- readXoProcessorResultFile("testdata-xoProcessor-tripleQuote-OXO.csv")
testthat::expect_equal(object = xo.processor(result$high,result$low,result$date),
expected = result)
result <- readXoProcessorResultFile("testdata-xoProcessor-tripleQuote-OXO_2.csv")
testthat::expect_equal(object = xo.processor(result$high,result$low,result$date),
expected = result)
result <- readXoProcessorResultFile("testdata-xoProcessor-tripleQuote-OO.csv")
testthat::expect_equal(object = xo.processor(result$high,result$low,result$date),
expected = result)
result <- readXoProcessorResultFile("testdata-xoProcessor-tripleQuote-OOO.csv")
testthat::expect_equal(object = xo.processor(result$high,result$low,result$date),
expected = result)
result <- readXoProcessorResultFile("testdata-xoProcessor-tripleQuote-OOX.csv")
testthat::expect_equal(object = xo.processor(result$high,result$low,result$date),
expected = result)
result <- readXoProcessorResultFile("testdata-xoProcessor-tripleQuote-O__.csv")
testthat::expect_equal(object = xo.processor(result$high,result$low,result$date),
expected = result)
result <- readXoProcessorResultFile("testdata-xoProcessor-tripleQuote-O_O.csv")
testthat::expect_equal(object = xo.processor(result$high,result$low,result$date),
expected = result)
result <- readXoProcessorResultFile("testdata-xoProcessor-tripleQuote-O_X.csv")
testthat::expect_equal(object = xo.processor(result$high,result$low,result$date),
expected = result)
})
testthat::test_that(desc="Test xoProcessor with four quotes",
{
result <- readXoProcessorResultFile("testdata-xoProcessor-quadrupleQuote-OXO_.csv")
testthat::expect_equal(object = xo.processor(result$high,result$low,result$date),
expected = result)
result <- readXoProcessorResultFile("testdata-xoProcessor-quadrupleQuote-OXOX.csv")
testthat::expect_equal(object = xo.processor(result$high,result$low,result$date),
expected = result)
result <- readXoProcessorResultFile("testdata-xoProcessor-quadrupleQuote-OXOX_2.csv")
testthat::expect_equal(object = xo.processor(result$high,result$low,result$date),
expected = result)
result <- readXoProcessorResultFile("testdata-xoProcessor-quadrupleQuote-OXOO.csv")
testthat::expect_equal(object = xo.processor(result$high,result$low,result$date),
expected = result)
})
testthat::test_that(desc="Test xoProcessor with five quotes",
{
result <- readXoProcessorResultFile("testdata-xoProcessor-quintupleQuote-OXOX_.csv")
testthat::expect_equal(object = xo.processor(result$high,result$low,result$date),
expected = result)
result <- readXoProcessorResultFile("testdata-xoProcessor-quintupleQuote-OXOXX.csv")
testthat::expect_equal(object = xo.processor(result$high,result$low,result$date),
expected = result)
result <- readXoProcessorResultFile("testdata-xoProcessor-quintupleQuote-OXOXO.csv")
testthat::expect_equal(object = xo.processor(result$high,result$low,result$date),
expected = result)
})
|
isIntegerOrNaOrInfVectorOrNull <- function(argument, default = NULL, stopIfNot = FALSE, n = NA, message = NULL, argumentName = NULL) {
checkarg(argument, "N", default = default, stopIfNot = stopIfNot, nullAllowed = TRUE, n = NA, zeroAllowed = TRUE, negativeAllowed = TRUE, positiveAllowed = TRUE, nonIntegerAllowed = FALSE, naAllowed = TRUE, nanAllowed = FALSE, infAllowed = TRUE, message = message, argumentName = argumentName)
}
|
slegendre.weight <- function( x )
{
n <- length( x )
y <- rep( 0, n )
for ( i in 1:n ) {
if ( ( x[i] > 0 ) && ( x[i] < 1 ) )
y[i] <- 1
}
return( y )
}
|
write.settings <- function(
settings,
outputfile,
outputdir = settings$outdir) {
pecanfile <- file.path(outputdir, outputfile)
if (file.exists(pecanfile)) {
PEcAn.logger::logger.warn(
paste(
"File already exists [", pecanfile,
"] file will be overwritten"))
}
XML::saveXML(listToXml(settings, "pecan"), file = pecanfile)
}
|
fixLMtps <- function(data,comp=3,weight=TRUE,weightfun=NULL)
{
n <- dim(data)[3]
k <- dim(data)[1]
m <- dim(data)[2]
checklist <- list()
checkvec <- rep(0,n)
out <- data
for (i in 1:n) {
count <- 0
checklist[[i]] <- NA
for (j in 1:k) {
if (sum(is.na(data[j,,i]))) {
count <- count+1
checklist[[i]][count] <- j
checkvec[i] <- 1
}
}
}
check <- which(checkvec==1)
if (!length(check)) {
message("nothing to fix")
return(data)
}
data.c <- data[,,-check]
if (length(dim(data.c)) < 3) {
if (is.matrix(data.c)) {
data.c <- array(data.c,dim=c(dim(data.c),1))
ngood <- 1
} else
stop("there is no complete configuration to use")
} else {
ngood <- dim(data.c)[3]
}
if (ngood < comp) {
if (ngood == 0) {
stop("no complete configuration found")
} else {
comp <- ngood
if (weight)
warning(paste("only",ngood,"configuration(s) found. comp is set to",ngood,"\n"))
}
}
if (ngood > 1) {
proc.c <- ProcGPA(data.c,silent = TRUE)
mean0 <- proc.c$mshape
} else
mean0 <- data.c[,,1]
for (i in 1:length(check)) {
miss <- checklist[[check[i]]]
if (weight && ngood > 1) {
rotmiss <- rotonto(mean0[-miss,],data[-miss,,check[i]],scale=TRUE,reflection = FALSE)$yrot
allrot <- bindArr(rotmiss,proc.c$rotated[-miss,,], along=3)
wcalc <- proc.weight(allrot,comp,1,report=FALSE,weightfun=weightfun)
lms <- proc.c$rotated[,,wcalc$data$nr-1]
if (is.matrix(lms))
lms <- array(lms,dim=c(dim(lms),1))
lm.est <- matrix(0,dim(data)[1],m)
for (j in 1:comp) {
lm.est <- lm.est+lms[,,j]*wcalc$data$weight[j]
}
tpsout <- tps3d(lm.est,lm.est[-miss,],data[-miss,,check[i]],threads=1)
} else {
tpsout <- tps3d(mean0,mean0[-miss,],data[-miss,,check[i]],threads=1)
}
out[,,check[i]] <- tpsout
}
return(list(out=out,mshape=mean0,checklist=checklist,check=check))
}
|
simulate_HD_data = function(size_vector = c(20, 20, 20, 20), p = 220,
mu = matrix(c(1.5, 2.5, 0, 1.5, 0, -1.5, -2.5, -1.5), ncol = 2, byrow = TRUE),
signal_variance = 1, noise_variance = 1, sparsity = 0.09, seed = 1234)
{
K = length(size_vector)
if (K != nrow(mu))
{
stop("The number of rows of mu must equal clusters (the length of size_vector)")
}
n = size_vector
set.seed(seed)
X = matrix(rnorm(sum(n)*p, 0, 1), nrow = sum(n), ncol = p, byrow = TRUE)
start_value = floor(floor(sparsity*p)/2)
for (i in 1:K)
{
row_start = ifelse(i == 1, 0, sum(n[1:(i - 1)]))
X[(row_start + 1):(row_start + n[i]), 1:start_value] = rnorm(n[i]*start_value, mu[i, 1], signal_variance)
X[(row_start + 1):(row_start + n[i]), (1 + start_value):(2*start_value)] = rnorm(n[i]*start_value, mu[i, 2], signal_variance)
}
num_new_noise = floor((p - (2*start_value))/2)
n_temp = length(c(2*start_value + 1):num_new_noise)
X[, c(2*start_value + 1):num_new_noise] = matrix(rnorm(n_temp*sum(n), 0, noise_variance), nrow = nrow(X))
Y = c()
for (i in 1:K)
{
Y = c(Y, rep(i, n[i]))
}
return(list(X = X, Y = Y))
}
|
context("download and load")
test_that("download from geojob works", {
testthat::skip_on_cran()
cancel()
fabric <- readRDS("data/test_webdata_fabric.rds")
job <<- geoknife(stencil = c(-89,42), fabric = fabric, wait=TRUE)
file = download(job)
expect_true(file.exists(file))
})
test_that("load result works from geojob object.", {
testthat::skip_on_cran()
expect_is(result(job),'data.frame')
})
test_that("load result works from job id only", {
testthat::skip_on_cran()
expect_is(result(id(job)),'data.frame')
})
test_that("download result overwrite works", {
testthat::skip_on_cran()
expect_true(file.exists(download(job, overwrite = TRUE)))
})
|
set_curve <- function(semPaths_plot, curve_list = NULL) {
if (is.null(curve_list)) {
stop("curve_list not specified.")
}
if (is.null(semPaths_plot)) {
stop("semPaths_plot not specified.")
} else {
if (!inherits(semPaths_plot, "qgraph")) {
stop("semPaths_plot is not a qgraph object.")
}
}
if (!is.list(curve_list) && is.numeric(curve_list)) {
curve_list_org <- curve_list
curve_list <- to_list_of_lists(curve_list,
name1 = "from",
name2 = "to",
name3 = "new_curve")
}
Nodes_names <- semPaths_plot$graphAttributes$Nodes$names
Nodes_id <- seq_len(length(Nodes_names))
names(Nodes_id) <- Nodes_names
curve_old <- semPaths_plot$graphAttributes$Edges$curve
curve_new <- curve_old
curve_index <- sapply(curve_list, function(x) {
edge_index(semPaths_plot, from = x$from, to = x$to)
})
curve_new[curve_index] <- sapply(curve_list, function(x) x$new_curve)
semPaths_plot$graphAttributes$Edges$curve <- curve_new
semPaths_plot
}
|
knitr::opts_chunk$set(
collapse = TRUE,
comment = "
)
library(optimall)
library(DiagrammeR)
library(dplyr)
data(MatWgt_Sim, package = "optimall")
MatWgt_Sim <- MatWgt_Sim %>%
dplyr::mutate(race =
dplyr::case_when(race == "Asian" ~ "Other",
race == "Other" ~ "Other",
race == "White" ~ "White",
race == "Black" ~ "Black"))
phase1 <- dplyr::select(MatWgt_Sim, -mat_weight_true)
phase1$strata <- phase1$race
set.seed(452)
phase1 <- split_strata(data = phase1, strata = "strata", split = NULL,
split_var = "mat_weight_est",
type = "global quantile",
split_at = c(0.25,0.75),
trunc = "MWC_est")
phase1_data_dictionary <- data.frame("Variable" = c("id",
"race",
"mat_weight_est",
"diabetes"),
"Description" = c("unique identifier",
"race of mother",
"error-prone estimate of maternal weight change during pregnancy",
"1/0 indicator for diabetes in the mother during pregnancy"))
MySurvey <- new_multiwave(phases = 2, waves = c(1,3))
MySurvey@metadata
MySurvey@metadata <- list(title = "Maternal Weight Survey")
MySurvey@phases$phase2@metadata
MySurvey@phases$phase2@waves$wave2@design
get_data(MySurvey, phase = NA, slot = "metadata")
get_data(MySurvey, phase = NA, slot = "metadata") <- list(title = "Maternal Weight Survey")
get_data(MySurvey, phase = 2, slot = "metadata")
get_data(MySurvey, phase = 2, wave = 2, slot = "design")
head(phase1)
get_data(MySurvey, phase = 1, slot = "data") <- phase1
phase1_data_dictionary <- data.frame(
"Variable" = c( "id", "race", "mat_weight_est", "diabetes", "obesity"),
"Description" = c("unique identifier",
"race of mother",
"error-prone estimate of maternal weight change
during pregnancy",
"1/0 indicator for diabetes in the mother during
pregnancy",
"1/0 indicator for childhood obesity in child"))
head(phase1_data_dictionary)
get_data(MySurvey, phase = 1, slot = "metadata") <- list(data_dict = phase1_data_dictionary)
IrisSurvey <- new_multiwave(phases = 2, waves = c(1,3))
iris <- cbind(datasets::iris, id = 1:150)
get_data(IrisSurvey, phase = 1, slot = "data") <-
subset(iris, select = -Sepal.Width)
IrisSurvey <- apply_multiwave(IrisSurvey, phase = 2, wave = 1,
fun = "optimum_allocation",
strata = "Species", y = "Sepal.Length",
nsample = 30, method = "WrightII")
get_data(IrisSurvey, phase = 2, slot = "metadata") <-
list(strata = "Species")
IrisSurvey <- apply_multiwave(IrisSurvey, phase = 2, wave = 1,
fun = "optimum_allocation",
y = "Sepal.Length",
nsample = 30, method = "WrightII")
get_data(IrisSurvey, phase = 2, wave = 1, slot = "design")
IrisSurvey <- apply_multiwave(IrisSurvey, phase = 2, wave = 1,
fun = "sample_strata", id = "id",
design_strata = "strata",
n_allocated = "stratum_size")
get_data(IrisSurvey, phase = 2, wave = 1, slot = "samples")
get_data(IrisSurvey, phase = 2, wave = 1, slot = "sampled_data") <-
iris[iris$id %in% get_data(IrisSurvey,
phase = 2,
wave = 1,
slot = "samples"),
c("id", "Sepal.Width")]
IrisSurvey <- merge_samples(IrisSurvey, phase = 2, wave = 1,
id = "id", sampled_ind = "sampled_phase2")
head(get_data(IrisSurvey, phase = 2, wave = 1, slot = "data"))
get_data(MySurvey, phase = 2, slot = "metadata") <-
list(description = "Phase 2 of Maternal Weight Survey in which we
seek to validate 750 samples across three waves.",
strata = "new_strata",
id = "id",
y = "mat_weight_true",
design_strata = "strata",
n_allocated = "n_to_sample"
)
get_data(MySurvey, phase = 2, wave = 1, slot = "metadata") <-
list(description = "First wave of 250
sampled using proportional sampling")
get_data(MySurvey, phase = 2, wave = 1, slot = "design") <- data.frame(
strata = names(table(phase1$new_strata)),
n_to_sample = round(250.3*as.vector(table(phase1$new_strata))/10335)
)
get_data(MySurvey, phase = 2, wave = 1, slot = "design")
set.seed(456)
MySurvey <- apply_multiwave(MySurvey, phase = 2, wave = 1,
fun = "sample_strata",
strata = "new_strata",
id = "id",
wave2a = NULL,
design_strata = "strata",
n_allocated = "n_to_sample"
)
head(get_data(MySurvey, phase = 2, wave = 1, slot = "samples"))
length(get_data(MySurvey, phase = 2, wave = 1, slot = "samples"))
set.seed(456)
MySurvey <- apply_multiwave(MySurvey, phase = 2, wave = 1, fun = "sample_strata")
ids_wave1 <- get_data(MySurvey, phase = 2, wave = 1, slot = "samples")
head(ids_wave1)
length(ids_wave1)
get_data(MySurvey, phase = 2, wave = 1, slot = "sampled_data") <-
MatWgt_Sim[MatWgt_Sim$id %in% ids_wave1, c("id", "mat_weight_true")]
MySurvey <- apply_multiwave(MySurvey, phase = 2, wave = 1, fun = "merge_samples",
sampled_ind = "already_sampled_ind")
get_data(MySurvey, phase = 2, wave = 1) <-
get_data(MySurvey, phase = 2, wave = 1) %>%
dplyr::mutate(already_sampled_ind =
ifelse(id %in%
get_data(MySurvey,
phase = 2,
wave = 1,
slot = "samples"), 1, 0))
MySurvey <- apply_multiwave(MySurvey,
phase = 2,
wave = 2,
fun = "allocate_wave",
nsample = 250,
already_sampled = "already_sampled_ind")
get_data(MySurvey, phase = 2, wave = 2, slot = "design")
|
comb.samples <-
function (svy.A, svy.B, svy.C = NULL, y.lab, z.lab, form.x, estimation = NULL,
micro = FALSE, ...)
{
data.A <- svy.A$variables
if(is.factor(data.A[, y.lab])){
y.lev <- levels(data.A[, y.lab])
levels(data.A[, y.lab]) <- 1:nlevels(data.A[, y.lab])
svy.A$variables <- data.A
}
n.A <- nrow(data.A)
w.A <- weights(svy.A)
data.B <- svy.B$variables
if(is.factor(data.B[, z.lab])){
z.lev <- levels(data.B[, z.lab])
levels(data.B[, z.lab]) <- 1:nlevels(data.B[, z.lab])
svy.B$variables <- data.B
}
n.B <- nrow(data.B)
w.B <- weights(svy.B)
X.A <- model.matrix(form.x, data = data.A)
X.B <- model.matrix(form.x, data = data.B)
form.y <- as.formula(paste("~", y.lab, "-1", collapse = ""))
form.z <- as.formula(paste("~", z.lab, "-1", collapse = ""))
Y.A <- model.matrix(form.y, data = data.A)
Z.B <- model.matrix(form.z, data = data.B)
QR.A <- qr(X.A * sqrt(w.A))
beta.yx.A <- qr.coef(QR.A, Y.A * sqrt(w.A))
beta.yx.A[is.na(beta.yx.A)] <- 0
QR.B <- qr(X.B * sqrt(w.B))
beta.zx.B <- qr.coef(QR.B, Z.B * sqrt(w.B))
beta.zx.B[is.na(beta.zx.B)] <- 0
XX.wA <- t(X.A) %*% (X.A * w.A)
XX.wB <- t(X.B) %*% (X.B * w.B)
gamma.p <- n.A/(n.A + n.B)
XX.pool <- gamma.p * XX.wA + (1 - gamma.p) * XX.wB
YZ.CIA <- t(beta.yx.A) %*% XX.pool %*% beta.zx.B
if(is.factor(data.A[, y.lab])) rownames(YZ.CIA) <- y.lev
if(is.factor(data.B[, z.lab])) colnames(YZ.CIA) <- z.lev
out <- list(yz.CIA = YZ.CIA, call = match.call())
if (!is.null(svy.C)) {
data.C <- svy.C$variables
if(is.factor(data.C[, y.lab])){
y.lev.C <- levels(data.C[, y.lab])
if (all.equal(y.lev, y.lev.C))
levels(data.C[, y.lab]) <- 1:nlevels(data.C[, y.lab])
else stop("The levels of y.lab in svy.A and in svy.C do not match")
}
if(is.factor(data.C[, z.lab])) {
z.lev.C <- levels(data.C[, z.lab])
if (all.equal(z.lev, z.lev.C))
levels(data.C[, z.lab]) <- 1:nlevels(data.C[, z.lab])
else stop("The levels of z.lab in svy.B and in svy.C do not match")
}
svy.C$variables <- data.C
n.C <- nrow(data.C)
w.C <- weights(svy.C)
if (estimation == "ITWS" || estimation == "i2ws" || estimation ==
"incomplete") {
tot.y.A <- colSums(Y.A * w.A)
tot.z.B <- colSums(Z.B * w.B)
tot.yz <- c(tot.y.A, tot.z.B[-1])
form.yz <- as.formula(paste("~", paste(y.lab, z.lab,
sep = "+"), "- 1", sep = ""))
cal.C <- calibrate(design = svy.C, formula = form.yz,
population = tot.yz, ...)
}
if (estimation == "STWS" || estimation == "s2ws" || estimation ==
"synthetic") {
X.C <- model.matrix(form.x, data = data.C)
Y.C <- model.matrix(form.y, data = data.C)
Z.C <- model.matrix(form.z, data = data.C)
resY.C <- Y.C - (X.C %*% beta.yx.A)
resZ.C <- Z.C - (X.C %*% beta.zx.B)
c.y <- ncol(Y.C)
c.z <- ncol(Z.C)
new.YZ <- matrix(NA, nrow = n.C, ncol = (c.y * c.z))
for (i in 1:n.C) {
m1 <- cbind(Y.C[i, ]) %*% rbind(Z.C[i, ])
m2 <- cbind(resY.C[i, ]) %*% rbind(resZ.C[i, ])
new.YZ[i, ] <- c(m1) - c(m2)
}
lab1 <- rep(colnames(Y.C), c.z)
lab2 <- rep(colnames(Z.C), each = c.y)
lab <- paste(lab1, lab2, sep = "_")
colnames(new.YZ) <- lab
orig.vars <- colnames(svy.C$variables)
svy.C$variables <- data.frame(svy.C$variables, new.YZ)
vec.tot <- c(YZ.CIA)
names(vec.tot) <- lab
form.yz <- as.formula(paste("~", paste(lab, collapse = "+"),
"- 1", sep = ""))
cal.C <- calibrate(design = svy.C, formula = form.yz,
population = vec.tot, ...)
cal.C$variables <- cal.C$variables[, orig.vars]
}
ww.C <- weights(cal.C)
f.yz <- paste("ww.C", paste(y.lab, z.lab, sep = "+"),
sep = "~")
YZ.noCIA <- xtabs(as.formula(f.yz), data = data.C)
if(is.factor(data.A[, y.lab])) rownames(YZ.noCIA) <- y.lev
if(is.factor(data.B[, z.lab])) colnames(YZ.noCIA) <- z.lev
out <- list(yz.CIA = YZ.CIA, cal.C = cal.C, yz.est = YZ.noCIA,
call = match.call())
}
if (micro) {
pred.Y.A <- X.A %*% beta.yx.A
res.Y.A <- Y.A - pred.Y.A
pred.Z.A <- X.A %*% beta.zx.B
pred.Z.B <- X.B %*% beta.zx.B
res.Z.B <- Z.B - pred.Z.B
pred.Y.B <- X.B %*% beta.yx.A
if (!is.null(svy.C) & (estimation == "STWS" || estimation ==
"s2ws" || estimation == "synthetic")) {
pred.Y.C <- X.C %*% beta.yx.A
res.Y.C <- Y.C - pred.Y.C
pred.Z.C <- X.C %*% beta.zx.B
res.Z.C <- Z.C - pred.Z.C
alfa2.1 <- t(res.Y.A) %*% (res.Y.A * w.A)
alfa2.2 <- t(res.Y.C) %*% (res.Z.C * ww.C)
qr.alfa <- qr(alfa2.1)
alfa2 <- qr.coef(qr.alfa, alfa2.2)
alfa2[is.na(alfa2)] <- 0
cat(alfa2, fill = T)
pred.Z.A <- pred.Z.A + res.Y.A %*% alfa2
beta2.1 <- t(res.Z.B) %*% (res.Z.B * w.B)
beta2.2 <- t(res.Z.C) %*% (res.Y.C * ww.C)
qr.beta <- qr(beta2.1)
beta2 <- qr.coef(qr.beta, beta2.2)
beta2[is.na(beta2)] <- 0
cat(beta2, fill = T)
pred.Y.B <- pred.Y.B + res.Z.B %*% beta2
}
pred <- list(Y.A=pred.Y.A, Z.A = pred.Z.A,
Z.B=pred.Z.B, Y.B = pred.Y.B)
out <- c(out, pred)
}
out
}
|
session <- RevoIOQ:::saveRUnitSession(packages=c("nlme"), datasets=c("Orthodont"))
"augPredmissing.stress" <- function()
{
library(nlme)
data(Orthodont)
Orthodont$Others = runif(nrow(Orthodont))
is.na(Orthodont$Others[3]) = TRUE
fm1 = lme(Orthodont, random = ~1)
augPred(fm1, length.out = 2, level = c(0,1))
}
"test.augPredmissing.stress" <- function()
{
res <- try(augPredmissing.stress())
checkTrue(!is(res, "try-error"), msg="augPredmissing stress test failed")
}
"testzzz.restore.session" <- function()
{
checkTrue(RevoIOQ:::restoreRUnitSession(session), msg="Session restoration failed")
}
|
calculate_jmax <- function(PPFD,
alpha,
J,
theta_J) {
J * (J * theta_J - alpha * PPFD) / (J - alpha * PPFD)
}
calculate_j <- function(PPFD,
alpha,
J_max,
theta_J) {
(alpha * PPFD + J_max -
sqrt((alpha * PPFD + J_max)^2 - 4 * alpha * theta_J * PPFD * J_max)) /
(2 * theta_J)
}
|
"G519C18"
|
library(sim1000G)
vcf_file = "CEU-TSI-GBR-region-chr4-357-ANK2.vcf.gz"
vcf = readVCF( vcf_file, maxNumberOfVariants = 442 ,min_maf = 0.0005,max_maf = 0.01)
dim( vcf$gt1 )
downloadGeneticMap(4)
readGeneticMap(4)
sample.size=3000
data_sim = function(seed.num){
startSimulation(vcf, totalNumberOfIndividuals = sample.size)
SIM$reset()
id = generateUnrelatedIndividuals(sample.size)
gt = retrieveGenotypes(id)
freq = apply(gt,2,sum)/(2*nrow(gt))
causal = sample(setdiff(1:ncol(gt),which(freq==0)),45)
beta.sign = rep(1,45)
c.value = 0.402
beta.abs = c.value*abs(log10(freq[causal]))
beta.val = beta.sign*beta.abs
x.bar = apply(gt[,causal],2,mean)
x.bar = as.matrix(x.bar)
beta.val = t(as.matrix(beta.val))
beta0 = 0-beta.val %*% x.bar
eta = beta.val %*% t(gt[,causal])
eta = as.vector(eta) + rep(beta0,nrow(gt))
prob = exp(eta)/(1+exp(eta))
genocase = rep(NA, sample.size)
set.seed(seed.num)
for(i in 1:sample.size){
genocase[i] = rbinom(1, 1, prob[i])
}
case.idx = sample(which(genocase==1),1000)
control.idx = sample(which(genocase==0),1000)
return(rbind(gt[case.idx,],gt[control.idx,]))
}
library(SKAT)
res = NULL
for(seed_number in 1:100){
set.seed(seed_number)
print(seed_number)
Z1.skat = data_sim(seed_number)
write.csv(Z1.skat,paste("/Volumes/Briollais_lab/jingxiong/Project/sequencing_data/sim1000G/result/simulated_dataset/data_SKAT_2000_442_",seed_number,".csv",sep=""),row.names = F,quote=F)
obj = SKAT_Null_Model(c(rep(1,1000),rep(0,1000)) ~ 1,out_type="D")
p1_skat = SKAT(as.matrix(Z1.skat),obj)$p.value
p1_burden = SKAT(as.matrix(Z1.skat),obj,r.corr=1)$p.value
p1_skat_O = SKAT(as.matrix(Z1.skat),obj,method="optimal.adj")$p.value
res = rbind(res,c(p1_skat,p1_burden,p1_skat_O))
}
write.csv(res,"/Volumes/Briollais_lab/jingxiong/Project/sequencing_data/sim1000G/result/SKAT/skat_442_2000.csv",row.names = F,quote=F)
|
ev_nmi <- function(pred.lab, ref.lab, method = "emp") {
U <- data.frame(ref.lab, pred.lab)
Hyx <- infotheo::entropy(U, method)
Hx <- infotheo::entropy(pred.lab, method)
Hy <- infotheo::entropy(ref.lab, method)
I <- ifelse(Hx + Hy - Hyx < 0, 0, Hx + Hy - Hyx)
NMI <- I / sqrt(Hx * Hy)
NMI
}
ev_confmat <- function(pred.lab, ref.lab) {
if (!all(unique(pred.lab) %in% unique(ref.lab))) {
stop("Cluster labels should be the same in the predicted and reference
classes.")
}
pred.relab <- relabel_class(pred.lab, ref.lab) %>%
factor(levels = sort(unique(ref.lab)))
CM <- table(pred.relab, ref.lab)
summary(yardstick::conf_mat(CM))
}
|
predict.inbagg <- function(object, newdata, ...) {
if(!is.data.frame(newdata)) newdata <- as.data.frame(newdata)
if(any(names(object$W) %in% names(newdata))) newdata <- newdata[!(names(newdata) %in% names(object$W))]
NBAGG <- length(object$mtrees)
N <- nrow(newdata)
classes <- levels(object$y)
vote <- matrix(0, nrow=N, ncol=length(classes))
for(i in 1:NBAGG) {
intermed <- object$mtrees[[i]]$bfct(newdata)
if(!is.null(object$mtrees[[i]]$btree$fixed.function)) {
names(intermed) <- sub(".[0-9]$", "", names(intermed))
XX <- data.frame(newdata, intermed)
res <- object$mtrees[[i]]$btree$fixed.function(XX)
} else {
XX <- data.frame(newdata, intermed)
if(is.null(object$mtrees[[i]]$btree$predict)) {
res <- try(predict(object$mtrees[[i]]$btree$model, newdata = XX, ...))
} else {
res <- try(object$mtrees[[i]]$btree$predict(object$mtrees[[i]]$btree$model, newdata = XX, ...))
}
}
res <- cbind(1:N, res)
vote[res] <- vote[res] +1
}
RET <- factor(classes[apply(vote, 1, uwhich.max)])
RET
}
|
genLP <- function(n=100, nl=1, np=1, iso.var=0.1){
m=3
K=round(nl)+round(np)
n=round(n)
isotropic.var=as.double(iso.var)
k.true= c(rep(1, round(nl)), rep(2, round(np)))
Utrue.list = list()
for(k in 1:K){
Utrue.list[[k]] = rstiefel::rustiefel(m=m,R=k.true[k])
}
NU.true = list()
for(k in 1:K){
NU.true[[k]]=MASS::Null(Utrue.list[[k]])
}
PNU.list = list()
for(k in 1:K){
PNU.list[[k]] = NU.true[[k]]%*%t(NU.true[[k]])
}
mutrue.list = list()
for(k in 1:K){
mutrue.list[[k]] = rnorm(k.true[k])
}
phitrue.list = rep(10,K)
sigmatrue.list = rep(isotropic.var,K)
sigma0.true.list = list()
for(k in 1:K){
sigma0.true.list[[k]]=runif(k.true[k],isotropic.var,5.1)
}
Sigma0.true.list = list()
for(k in 1:K){
Sigma0.true.list[[k]]=diag(sigma0.true.list[[k]],k.true[k])
}
theta.true.list = list()
for(k in 1:K){
theta.true.list[[k]] = PNU.list[[k]]%*%rnorm(m)
}
X = c()
label = c()
dimension = c()
for(k in 1:K){
X = rbind(X,MASS::mvrnorm(n,mu=Utrue.list[[k]]%*%mutrue.list[[k]]+theta.true.list[[k]],
Sigma = sigmatrue.list[k]^2*diag(m)/10+Utrue.list[[k]]%*%(Sigma0.true.list[[k]]-sigmatrue.list[k]*diag(k.true[k]))%*%t(Utrue.list[[k]])))
label = c(label, rep(k,n))
dimension = c(dimension, rep(k.true[k], n))
}
output = list()
output$data = X
output$class = label
output$dimension = dimension
return(output)
}
|
if (require(testthat)) {
context("Tests for BoxCox")
test_that("tests for biasadj automatically set based on model fit", {
fit <- tslm(USAccDeaths ~ trend, lambda = 0.5, biasadj = TRUE)
expect_true(all.equal(forecast(fit), forecast(fit, biasadj = TRUE)))
fit <- ses(USAccDeaths, initial = "simple", lambda = 0.5, biasadj = TRUE)
expect_true(all.equal(forecast(fit), forecast(fit, biasadj = TRUE)))
x <- fracdiff::fracdiff.sim(100, ma = -.4, d = .3)$series
fit <- arfima(x)
expect_true(all.equal(forecast(fit), forecast(fit, biasadj=TRUE)))
fit1 <- Arima(USAccDeaths, order = c(0,1,1), seasonal = c(0,1,1), lambda = 0.5, biasadj = TRUE)
fit2 <- auto.arima(USAccDeaths, max.p=0, max.d=1, max.q=1, max.P=0, max.D=1, max.Q=1, lambda = 0.5, biasadj = TRUE)
expect_true(all.equal(forecast(fit1), forecast(fit1, biasadj=TRUE)))
expect_true(all.equal(forecast(fit2), forecast(fit2, biasadj=TRUE)))
expect_true(all.equal(forecast(fit1)$mean, forecast(fit2)$mean))
fit <- ets(USAccDeaths, model = "ANA", lambda = 0.5, biasadj = TRUE)
expect_true(all.equal(forecast(fit), forecast(fit, biasadj = TRUE)))
})
test_that("tests for automatic lambda selection in BoxCox transformation", {
lambda_auto <- BoxCox.lambda(USAccDeaths)
fit <- tslm(USAccDeaths ~ trend, lambda = "auto", biasadj = TRUE)
expect_equal(as.numeric(fit$lambda), lambda_auto, tolerance=1e-5)
fit <- ets(USAccDeaths, model = "ANA", lambda = "auto", biasadj = TRUE)
expect_equal(as.numeric(fit$lambda), lambda_auto, tolerance=1e-5)
fit <- Arima(USAccDeaths, order = c(0,1,1), seasonal = c(0,1,1), lambda = "auto", biasadj = TRUE)
expect_equal(as.numeric(fit$lambda), lambda_auto, tolerance=1e-5)
})
}
|
expected_dist <- function(alpha,n_items,metric){
if(n_items < 1 | floor(n_items) != n_items){
stop("Number of items must be a positive integer")
}
alpha <- alpha / n_items
if(alpha < 0){
stop("alpha must be a non-negative value")
}else{
if(metric=="kendall"){
out <- exp_d_tau(alpha,n_items)
}
if(metric=="cayley"){
out <- exp_d_cay(alpha,n_items)
}
if(metric=="hamming"){
out <- exp_d_ham(alpha,n_items)
}
if(metric%in%c("ulam","footrule","spearman")){
pfd <- dplyr::filter(partition_function_data,
.data$metric == !!metric, .data$n_items == !!n_items,
.data$type == "cardinalities")
if(nrow(pfd) == 0){
stop("Given number of items currently not available for the specified metric")
} else{
card <- pfd$values[[1]]
}
out <- exp(
log_expected_dist(
alpha = alpha * n_items,
n_items = n_items,
cardinalities = card,
metric = metric))
}
}
return(out)
}
|
context("soil depth estimation")
d <-
rbind(
data.frame(
id = c(1, 1, 1),
top = c(0, 20, 35),
bottom = c(20, 35, 110),
name = c('A', 'Bt', 'C')
),
data.frame(
id = c(2, 2, 2),
top = c(0, 20, 55),
bottom = c(20, 55, 80),
name = c('A', 'Bt', 'Cr')
),
data.frame(
id = c(3, 3, 3),
top = c(0, 20, 48),
bottom = c(20, 48, 130),
name = c('A', 'Bt', 'Cd')
),
data.frame(
id = c(4, 4),
top = c(0, 20),
bottom = c(20, 180),
name = c('A', 'R')
))
depths(d) <- id ~ top + bottom
test_that("error conditions", {
expect_error(estimateSoilDepth(d, name='name', top='top', bottom='bottom'))
expect_error(profileApply(d, estimateSoilDepth))
expect_error(profileApply(d, estimateSoilDepth, name='goo'))
})
test_that("basic soil depth evaluation, based on pattern matching of hz designation", {
res <- profileApply(d, estimateSoilDepth, name = 'name')
expect_equivalent(res, c(110, 55, 48, 20))
hzdesgnname(d) <- "name"
res <- estimateSoilDepth(d[1,])
expect_equivalent(res, 110)
})
test_that("application of reasonable depth assumption of 150, given threshold of 100", {
res <- profileApply(d, estimateSoilDepth, name = 'name', no.contact.depth=100, no.contact.assigned=150)
expect_equivalent(res, c(150, 55, 48, 20))
})
test_that("depth to feature using REGEX on hzname: [Bt]", {
res <- profileApply(d, estimateSoilDepth, name = 'name', p = 'Bt', no.contact.depth=0, no.contact.assigned=NA)
expect_equivalent(res, c(20, 20, 20, NA))
})
test_that("depthOf - simple match", {
expect_equal(depthOf(d[1,], "Cr|R|Cd"), NA_real_)
expect_equal(depthOf(d[2,], "Cr|R|Cd"), 55)
expect_equal(minDepthOf(d[2,], "Cr|R|Cd"), 55)
expect_equal(maxDepthOf(d[2,], "Cr|R|Cd"), 55)
expect_equal(maxDepthOf(d[2,], "Cr|R|Cd", top = FALSE), 80)
})
test_that("depthOf - multiple match", {
expect_equal(depthOf(d[1,], "A|B|C"), c(0,20,35))
expect_equal(depthOf(d[1,], "A|B|C", top = FALSE), c(20,35,110))
expect_equal(minDepthOf(d[1,],"A|B|C"), 0)
expect_equal(maxDepthOf(d[1,],"A|B|C"), 35)
expect_equal(minDepthOf(d[1,], "A|B|C", top = FALSE), 20)
expect_equal(maxDepthOf(d[1,], "A|B|C", top = FALSE), 110)
})
test_that("depthOf - no match", {
expect_equal(depthOf(d[1,], "X"), NA_real_)
expect_equal(depthOf(d[2,], "Cr|R|Cd", no.contact.depth = 50), NA_real_)
d2 <- d
d2$name <- NULL
expect_error(depthOf(d2[1,], "A|B|C"))
})
test_that("soil depth class assignment, using USDA-NRCS class breaks", {
res <- getSoilDepthClass(d, name = 'name')
expect_true(inherits(res, 'data.frame'))
expect_equal(nrow(res), length(d))
expect_equivalent(res$depth, c(110, 55, 48, 20))
dc <- factor(c('deep', 'mod.deep', 'shallow', 'very.shallow'), levels=c('very.shallow', 'shallow', 'mod.deep', 'deep', 'very.deep'))
expect_equivalent(res$depth.class, dc)
})
test_that("data.table safety", {
d <-
rbind(
data.frame(
id = c(1, 1, 1),
top = c(0, 20, 35),
bottom = c(20, 35, 110),
name = c('A', 'Bt', 'C')
),
data.frame(
id = c(2, 2, 2),
top = c(0, 20, 55),
bottom = c(20, 55, 80),
name = c('A', 'Bt', 'Cr')
),
data.frame(
id = c(3, 3, 3),
top = c(0, 20, 48),
bottom = c(20, 48, 130),
name = c('A', 'Bt', 'Cd')
),
data.frame(
id = c(4, 4),
top = c(0, 20),
bottom = c(20, 180),
name = c('A', 'R')
))
d <- data.table(d)
depths(d) <- id ~ top + bottom
res <- profileApply(d, estimateSoilDepth, name = 'name', no.contact.depth=100, no.contact.assigned=150)
expect_equivalent(res, c(150, 55, 48, 20))
sdc <- getSoilDepthClass(d, name = 'name', no.contact.depth=100, no.contact.assigned=150)
expect_equivalent(sdc$depth, c(150, 55, 48, 20))
})
test_that("really deep", {
d <-
rbind(
data.frame(
id = c(1, 1, 1),
top = c(0, 20, 35),
bottom = c(20, 35, 2000),
name = c('A', 'Bt', 'C')
))
depths(d) <- id ~ top + bottom
res <- profileApply(d, estimateSoilDepth, name = 'name')
expect_equivalent(res, 2000)
sdc <- getSoilDepthClass(d, name = 'name')
expect_equivalent(sdc$depth, 2000)
expect_equivalent(as.character(sdc$depth.class), 'very.deep')
})
|
NULL
theme_zoom_T = function(x = 1.0,...){
stopifnot(x > 0)
args = list(...); args$L = args$R = x; args$T = 1
do.call(limit_tern,args=args)
}
theme_zoom_L = function(x = 1.0,...){
stopifnot(x > 0)
args = list(...); args$T = args$R = x; args$L = 1
do.call(limit_tern,args=args)
}
theme_zoom_R = function(x = 1.0,...){
stopifnot(x > 0)
args = list(...); args$T = args$L = x; args$R = 1
do.call(limit_tern,args=args)
}
theme_zoom_center = function(x=1.0,...){
stopifnot(x > 1/3)
args = list(...); args$T = args$L = args$R = x
do.call(limit_tern,args=args)
}
theme_zoom_M = theme_zoom_center
|
head(FloridaLakes)
histogram( ~ Alkalinity, width = 10, type = "count", data = FloridaLakes)
|
initSurvival <- function (Time, event, id, W2, W2s, P, wk, id.GK, times, b = NULL, betas = NULL,
indBetas = NULL, W = NULL, baseHaz = NULL, diff = NULL,
Data = NULL, param = NULL, long = NULL, long.extra = NULL,
transFun.value = NULL, transFun.extra = NULL,
vl = NULL, vls = NULL, ex = NULL, exs = NULL) {
nTime <- length(Time)
nLong <- length(id)
if (param == "shared-betasRE" || param == "shared-RE") {
DF <- data.frame(Time = Time, event = event)
Wdat <- as.data.frame(W)
if (!is.null(W))
DF <- cbind(DF, Wdat)
DF <- cbind(DF, b)
tdCox <- coxph(Surv(Time, event) ~ ., data = DF)
} else {
DF <- data.frame(id = id, Time = Time[id], event = event[id])
if (!is.null(W)) {
Wdat <- as.data.frame(W)
DF <- cbind(DF, Wdat[id, ])
}
if (!is.null(long)) {
long <- as.data.frame(transFun.value(long, Data$data))
DF <- cbind(DF, long)
}
if (!is.null(long.extra)) {
long.extra <- as.data.frame(transFun.extra(long.extra, Data$data))
DF <- cbind(DF, long.extra)
}
row.names(DF) <- 1:nLong
DF$start <- times
splitID <- split(DF[c("start", "Time")], DF$id)
DF$stop <- unlist(lapply(splitID, function (d) c(d$start[-1], d$Time[1])))
DF$event <- with(DF, ave(event, id, FUN = function (x) c(rep(0, length(x)-1), x[1])))
DF <- DF[!names(DF) %in% c("Time", "id")]
tdCox <- coxph(Surv(start, stop, event) ~ ., data = DF[DF$stop > DF$start, ])
}
coefs <- coef(tdCox)
V <- vcov(tdCox)
out <- NULL
if (!is.null(W)) {
iW <- 1:ncol(W)
out$gammas <- coefs[iW]
out$cov.gammas <- V[iW, iW]
coefs <- coefs[-iW]
V <- V[-iW, -iW, drop = FALSE]
}
if (!is.null(long) || param %in% c("shared-betasRE", "shared-RE")) {
iL <- if (!is.null(long)) 1:ncol(long) else 1:ncol(b)
out$alphas <- coefs[iL]
out$cov.alphas <- V[iL, iL]
coefs <- coefs[-iL]
V <- V[-iL, -iL, drop = FALSE]
}
if (!is.null(long.extra)) {
iLe <- 1:ncol(long.extra)
out$Dalphas <- coefs[iLe]
out$cov.Dalphas <- V[iLe, iLe]
}
if (baseHaz == "P-splines") {
K <- crossprod(diff(diag(ncol(W2)), diff = diff))
fn1 <- function (Bs.gammas, tauBs) {
pen <- 0.5 * tauBs * drop(crossprod(Bs.gammas, K %*% Bs.gammas))
- sum(event * drop(W2 %*% Bs.gammas) - P * fastSumID(rep(wk, nTime) * exp(drop(W2s %*% Bs.gammas)), id.GK)) + pen
}
out$tauBs <- tauBs <- 200
opt <- optim(rep(0, ncol(W2)), fn1, tauBs = tauBs, method = "BFGS", hessian = TRUE)
} else {
fn2 <- function (Bs.gammas) {
- sum(event * drop(W2 %*% Bs.gammas) - P * fastSumID(rep(wk, nTime) * exp(drop(W2s %*% Bs.gammas)), id.GK))
}
opt <- optim(rep(0, ncol(W2)), fn2, method = "BFGS", hessian = TRUE)
}
out$Bs.gammas <- opt$par
out$cov.Bs.gammas <- solve(opt$hessian)
ind <- !names(out) %in% c("cov.gammas", "cov.alphas", "cov.Dalphas", "cov.Bs.gammas", "tauBs")
out.vec <- unlist(as.relistable(out[ind]))
fn3 <- function (thetas) {
thetas <- relist(thetas, out[ind])
gammas <- thetas$gammas
Bs.gammas <- thetas$Bs.gammas
alphas <- thetas$alphas
Dalphas <- thetas$Dalphas
ns <- length(id.GK)
Mtime <- rep(0, nTime)
Ms <- rep(0, ns)
if (param %in% c("td-value", "td-both")) {
Mtime <- Mtime + if (is.matrix(vl)) c(vl %*% alphas) else vl * alphas
Ms <- Ms + if (is.matrix(vls)) c(vls %*% alphas) else vls * alphas
}
if (param %in% c("td-extra", "td-both")) {
Mtime <- Mtime + if (is.matrix(ex)) c(ex %*% Dalphas) else ex * Dalphas
Ms <- Ms + if (is.matrix(exs)) c(exs %*% Dalphas) else exs * Dalphas
}
if (param == "shared-RE")
Mtime <- c(b %*% alphas)
if (param == "shared-betasRE")
Mtime <- c((rep(betas[indBetas], each = nrow(b)) + b) %*% alphas)
log.h0 <- c(W2 %*% Bs.gammas)
log.h0s <- c(W2s %*% Bs.gammas)
etaW <- if (is.null(W)) rep(0, nTime) else c(W %*% gammas)
log.h <- log.h0 + etaW + Mtime
if (param == "shared-betasRE" || param == "shared-RE")
etaW <- etaW + Mtime
log.S <- exp(etaW) * P * fastSumID(rep(wk, nTime) * exp(log.h0s + Ms), id.GK)
pen <- if (baseHaz == "P-splines") {
0.5 * tauBs * c(crossprod(Bs.gammas, K %*% Bs.gammas))
} else 0
- sum(event * log.h - log.S, na.rm = TRUE) + pen
}
test <- try(opt2 <- suppressWarnings(optim(out.vec, fn3, method = "BFGS", hessian = TRUE,
control = list(parscale = rep(0.1, length(out.vec))))), silent = TRUE)
if (!inherits(test, "try-error") && !opt2$convergence && eigen(opt2$hessian, TRUE)$values > 0) {
res <- relist(opt2$par, out[ind])
out[names(res)] <- res
V <- solve(nearPD(opt2$hessian))
if (!is.null(W)) {
iW <- 1:ncol(W)
out$cov.gammas <- V[iW, iW]
V <- V[-iW, -iW, drop = FALSE]
}
if (param %in% c("td-value", "td-both")) {
iL <- 1:ncol(long)
out$cov.alphas <- V[iL, iL]
V <- V[-iL, -iL, drop = FALSE]
}
if (param %in% c("td-extra", "td-both")) {
iLe <- 1:ncol(long.extra)
out$cov.Dalphas <- V[iLe, iLe]
V <- V[-iLe, -iLe, drop = FALSE]
}
if (param %in% c("shared-betasRE", "shared-RE")) {
iSRE <- 1:ncol(b)
out$cov.alphas <- V[iSRE, iSRE]
V <- V[-iSRE, -iSRE, drop = FALSE]
}
out$cov.Bs.gammas <- V
}
if (!is.null(out$alphas) && is.na(out$alphas)) {
out$alphas <- rep(0, length(out$alphas))
out$cov.alphas <- diag(0.1, length(out$alphas))
}
if (!is.null(out$Dalphas) && is.na(out$Dalphas)) {
out$Dalphas <- rep(0, length(out$Dalphas))
out$cov.Dalphas <- diag(0.1, length(out$Dalphas))
}
out
}
|
context("DAISIE_sumstats_rates")
test_that("use simple ontogeny code", {
out <- DAISIE_calc_sumstats_pcrates(
pars = c(0.2, 0.2, 40, 0.1, 1),
area_pars = create_area_pars(max_area = 1000,
current_area = 500,
proportional_peak_t = 0.1,
total_island_age = 12,
sea_level_amplitude = 0,
sea_level_frequency = 0,
island_gradient_angle = 0),
totaltime = 10,
island_ontogeny = 1,
extcutoff = 100,
mainland_n = 1000,
resol = 100,
hyper_pars = create_hyper_pars(
d = 0.2,
x = 0.1)
)
expect_true(is.list(out))
})
|
AutoXGBoostClassifier <- function(OutputSelection = c('Importances', 'EvalPlots', 'EvalMetrics', 'Score_TrainData'),
data = NULL,
TrainOnFull = FALSE,
ValidationData = NULL,
TestData = NULL,
TargetColumnName = NULL,
FeatureColNames = NULL,
WeightsColumnName = NULL,
IDcols = NULL,
model_path = NULL,
metadata_path = NULL,
SaveInfoToPDF = FALSE,
ModelID = "FirstModel",
EncodingMethod = "credibility",
ReturnFactorLevels = TRUE,
ReturnModelObjects = TRUE,
SaveModelObjects = FALSE,
Verbose = 0L,
NumOfParDepPlots = 3L,
NThreads = max(1L, parallel::detectCores()-2L),
LossFunction = 'reg:logistic',
CostMatrixWeights = c(1,0,0,1),
grid_eval_metric = "MCC",
eval_metric = "auc",
TreeMethod = "hist",
GridTune = FALSE,
BaselineComparison = "default",
MaxModelsInGrid = 10L,
MaxRunsWithoutNewWinner = 20L,
MaxRunMinutes = 24L*60L,
PassInGrid = NULL,
Trees = 1000L,
eta = 0.30,
max_depth = 9,
min_child_weight = 1,
subsample = 1,
colsample_bytree = 1,
DebugMode = FALSE) {
if(DebugMode) print("Check args ----")
XGBoostArgsCheck(GridTune.=GridTune, model_path.=model_path, metadata_path.=metadata_path, Trees.=Trees, max_depth.=max_depth, eta.=eta, min_child_weight.=min_child_weight, subsample.=subsample, colsample_bytree.=colsample_bytree)
ArgsList <- c(as.list(environment()))
ArgsList[['data']] <- NULL
ArgsList[['ValidationData']] <- NULL
ArgsList[['TestData']] <- NULL
if(SaveModelObjects) {
if(!is.null(metadata_path)) {
save(ArgsList, file = file.path(metadata_path, paste0(ModelID, "_ArgsList.Rdata")))
} else if(!is.null(model_path)) {
save(ArgsList, file = file.path(model_path, paste0(ModelID, "_ArgsList.Rdata")))
}
}
if(DebugMode) print("Data prep ----")
Output <- XGBoostDataPrep(Algo="xgboost", ModelType="classification", data.=data, ValidationData.=ValidationData, TestData.=TestData, TargetColumnName.=TargetColumnName, FeatureColNames.=FeatureColNames, WeightsColumnName.=WeightsColumnName, IDcols.=IDcols, TransformNumericColumns.=NULL, Methods.=NULL, ModelID.=ModelID, model_path.=model_path, TrainOnFull.=TrainOnFull, SaveModelObjects.=SaveModelObjects, ReturnFactorLevels.=ReturnFactorLevels, EncodingMethod.=EncodingMethod, DebugMode.=DebugMode)
FactorLevelsList <- Output$FactorLevelsList; Output$FactorLevelsList <- NULL
FinalTestTarget <- Output$FinalTestTarget; Output$FinalTestTarget <- NULL
WeightsVector <- Output$WeightsVector; Output$WeightsVector <- NULL
datavalidate <- Output$datavalidate; Output$datavalidate <- NULL
TrainTarget <- Output$TrainTarget; Output$TrainTarget <- NULL
TestTarget <- Output$TestTarget; Output$TestTarget <- NULL
TrainMerge <- Output$TrainMerge; Output$TrainMerge <- NULL
datatrain <- Output$datatrain; Output$datatrain <- NULL
TestMerge <- Output$TestMerge; Output$TestMerge <- NULL
dataTrain <- Output$dataTrain; Output$dataTrain <- NULL
TestData <- Output$TestData; Output$TestData <- NULL
datatest <- Output$datatest; Output$datatest <- NULL
EvalSets <- Output$EvalSets; Output$EvalSets <- NULL
dataTest <- Output$dataTest; Output$dataTest <- NULL
IDcols <- Output$IDcols; Output$IDcols <- NULL
Names <- Output$Names; rm(Output)
ExperimentalGrid <- NULL; BestGrid <- NULL
if(DebugMode) print("Grid tuning ----")
if(GridTune) {
Output <- XGBoostGridTuner(ModelType="classification", TrainOnFull.=TrainOnFull, TargetColumnName.=TargetColumnName, DebugMode.=DebugMode, TreeMethod.=TreeMethod, Trees.=Trees, Depth.=max_depth, LearningRate.=eta, min_child_weight.=min_child_weight, subsample.=subsample, colsample_bytree.=colsample_bytree, LossFunction=LossFunction, EvalMetric=eval_metric, grid_eval_metric.=grid_eval_metric, CostMatrixWeights=CostMatrixWeights, datatrain.=datatrain, datavalidate.=datavalidate, datatest.=datatest, EvalSets.=EvalSets, TestTarget.=TestTarget, FinalTestTarget.=FinalTestTarget, TargetLevels.=TargetLevels, MaxRunsWithoutNewWinner=MaxRunsWithoutNewWinner, MaxModelsInGrid=MaxModelsInGrid, MaxRunMinutes=MaxRunMinutes, BaselineComparison.=BaselineComparison, SaveModelObjects=SaveModelObjects, metadata_path=metadata_path, model_path=model_path, ModelID=ModelID, Verbose.=Verbose, NumLevels.=NULL)
ExperimentalGrid <- Output$ExperimentalGrid
BestGrid <- Output$BestGrid
}
if(DebugMode) print("Final Params ----")
Output <- XGBoostFinalParams(PassInGrid.=PassInGrid, TrainOnFull.=TrainOnFull, BestGrid.=BestGrid, GridTune.=GridTune, LossFunction.=LossFunction, eval_metric.=eval_metric, NThreads.=NThreads, TreeMethod.=TreeMethod, Trees.=Trees)
base_params <- Output$base_params
NTrees <- if(length(Output$NTrees) > 1L) max(Output$NTrees) else Output$NTrees; rm(Output)
if(DebugMode) print("Build model ----")
if(!is.null(WeightsVector)) {
model <- xgboost::xgb.train(params = base_params, data = datatrain, watchlist = EvalSets, nrounds = NTrees, Verbose = Verbose, weight = WeightsVector)
} else {
model <- xgboost::xgb.train(params = base_params, data = datatrain, watchlist = EvalSets, nrounds = NTrees, Verbose = Verbose)
}
if(DebugMode) print("Save Model ----")
if(SaveModelObjects) save(model, file = file.path(model_path, ModelID))
ShapValues <- list()
if("score_traindata" %chin% tolower(OutputSelection) && !TrainOnFull) {
predict <- data.table::as.data.table(stats::predict(model, datatrain))
if(!is.null(datatest)) {
predict_validate <- data.table::as.data.table(stats::predict(model, datavalidate))
predict <- data.table::rbindlist(list(predict, predict_validate))
data.table::setnames(predict, names(predict), "p1")
rm(predict_validate)
}
Output <- XGBoostValidationData(model.=model, TestData.=NULL, ModelType="classification", TrainOnFull.=TRUE, TestDataCheck=FALSE, FinalTestTarget.=FinalTestTarget, TestTarget.=TestTarget, TrainTarget.=TrainTarget, TrainMerge.=TrainMerge, TestMerge.=TestMerge, dataTest.=dataTest, data.=dataTrain, predict.=predict, TargetColumnName.=TargetColumnName, SaveModelObjects. = SaveModelObjects, metadata_path.=metadata_path, model_path.=model_path, ModelID.=ModelID, LossFunction.=NULL, TransformNumericColumns.=NULL, GridTune.=GridTune, TransformationResults.=NULL, TargetLevels.=NULL)
TrainData <- Output$ValidationData; rm(Output)
if(!"p1" %chin% names(TrainData)) data.table::setnames(TrainData, "V1", "p1")
} else {
TrainData <- NULL
}
if(DebugMode) print("Score Model ----")
predict <- stats::predict(model, if(!is.null(TestData)) datatest else if(!TrainOnFull) datavalidate else datatrain)
if(DebugMode) print("Running ValidationData()")
Output <- XGBoostValidationData(model.=model, TestData.=TestData, ModelType="classification", TrainOnFull.=TrainOnFull, TestDataCheck=!is.null(TestData), FinalTestTarget.=FinalTestTarget, TestTarget.=TestTarget, TrainTarget.=TrainTarget, TestMerge.=TestMerge, dataTest.=dataTest, data.=dataTrain, predict.=predict, TargetColumnName.=TargetColumnName, SaveModelObjects. = SaveModelObjects, metadata_path.=metadata_path, model_path.=model_path, ModelID.=ModelID, LossFunction.=NULL, TransformNumericColumns.=NULL, GridTune.=GridTune, TransformationResults.=NULL, TargetLevels.=NULL)
ValidationData <- Output[['ValidationData']]; Output$ValidationData <- NULL
VariableImportance <- Output[['VariableImportance']]; rm(Output)
if(DebugMode) print("Running BinaryMetrics()")
EvalMetricsList <- list()
EvalMetrics2List <- list()
if("evalmetrics" %chin% tolower(OutputSelection)) {
if("score_traindata" %chin% tolower(OutputSelection) && !TrainOnFull) {
EvalMetricsList[["TrainData"]] <- BinaryMetrics(ClassWeights.=NULL, CostMatrixWeights.=CostMatrixWeights, SaveModelObjects.=FALSE, ValidationData.=TrainData, TrainOnFull.=TrainOnFull, TargetColumnName.=TargetColumnName, ModelID.=ModelID, model_path.=model_path, metadata_path.=metadata_path, Method = "threshold")
EvalMetrics2List[["TrainData"]] <- BinaryMetrics(ClassWeights.=NULL, CostMatrixWeights.=CostMatrixWeights, SaveModelObjects.=FALSE, ValidationData.=TrainData, TrainOnFull.=TrainOnFull, TargetColumnName.=TargetColumnName, ModelID.=ModelID, model_path.=model_path, metadata_path.=metadata_path, Method = "bins")
if(SaveModelObjects) {
if(!is.null(metadata_path)) {
data.table::fwrite(EvalMetricsList[['TrainData']], file = file.path(metadata_path, paste0(ModelID, "_Train_EvaluationMetrics.csv")))
} else if(!is.null(model_path)) {
data.table::fwrite(EvalMetricsList[['TrainData']], file = file.path(model_path, paste0(ModelID, "_Train_EvaluationMetrics.csv")))
}
}
}
EvalMetricsList[["TestData"]] <- BinaryMetrics(ClassWeights.=NULL, CostMatrixWeights.=CostMatrixWeights, SaveModelObjects.=FALSE, ValidationData.=ValidationData, TrainOnFull.=TrainOnFull, TargetColumnName.=TargetColumnName, ModelID.=ModelID, model_path.=model_path, metadata_path.=metadata_path, Method = "threshold")
EvalMetrics2List[["TestData"]] <- BinaryMetrics(ClassWeights.=NULL, CostMatrixWeights.=CostMatrixWeights, SaveModelObjects.=FALSE, ValidationData.=ValidationData, TrainOnFull.=TrainOnFull, TargetColumnName.=TargetColumnName, ModelID.=ModelID, model_path.=model_path, metadata_path.=metadata_path, Method = "bins")
if(SaveModelObjects) {
if(!is.null(metadata_path)) {
data.table::fwrite(EvalMetricsList[['TestData']], file = file.path(metadata_path, paste0(ModelID, "_Test_EvaluationMetrics.csv")))
} else if(!is.null(model_path)) {
data.table::fwrite(EvalMetricsList[['TestData']], file = file.path(model_path, paste0(ModelID, "_Test_EvaluationMetrics.csv")))
}
}
}
if(DebugMode) print("Running ML_EvalPlots()")
PlotList <- list()
if("evalplots" %chin% tolower(OutputSelection)) {
if("score_traindata" %chin% tolower(OutputSelection) && !TrainOnFull) {
Output <- ML_EvalPlots(ModelType="classification", TrainOnFull.=TrainOnFull, DataType = 'Train', ValidationData.=TrainData, NumOfParDepPlots.=NumOfParDepPlots, VariableImportance.=VariableImportance, TargetColumnName.=TargetColumnName, FeatureColNames.=FeatureColNames, SaveModelObjects.=SaveModelObjects, ModelID.=ModelID, metadata_path.=metadata_path, model_path.=model_path, LossFunction.=NULL, EvalMetric.=NULL, EvaluationMetrics.=NULL, predict.=NULL)
PlotList[["Train_EvaluationPlot"]] <- Output$EvaluationPlot; Output$EvaluationPlot <- NULL
PlotList[["Train_ParDepPlots"]] <- Output$ParDepPlots; Output$ParDepPlots <- NULL
PlotList[["Train_GainsPlot"]] <- Output$GainsPlot; Output$GainsPlot <- NULL
PlotList[["Train_LiftPlot"]] <- Output$LiftPlot; Output$LiftPlot <- NULL
PlotList[["Train_ROC_Plot"]] <- Output$ROC_Plot; rm(Output)
}
Output <- ML_EvalPlots(ModelType="classification", TrainOnFull.=TrainOnFull, DataType = 'Test', ValidationData.=ValidationData, NumOfParDepPlots.=NumOfParDepPlots, VariableImportance.=VariableImportance, TargetColumnName.=TargetColumnName, FeatureColNames.=FeatureColNames, SaveModelObjects.=SaveModelObjects, ModelID.=ModelID, metadata_path.=metadata_path, model_path.=model_path, LossFunction.=NULL, EvalMetric.=NULL, EvaluationMetrics.=NULL, predict.=NULL)
PlotList[["Test_EvaluationPlot"]] <- Output$EvaluationPlot; Output$EvaluationPlot <- NULL
PlotList[["Test_ParDepPlots"]] <- Output$ParDepPlots; Output$ParDepPlots <- NULL
PlotList[["Test_GainsPlot"]] <- Output$GainsPlot; Output$GainsPlot <- NULL
PlotList[["Test_LiftPlot"]] <- Output$LiftPlot; Output$LiftPlot <- NULL
PlotList[["Test_ROC_Plot"]] <- Output$ROC_Plot; rm(Output)
if(!is.null(VariableImportance) && "plotly" %chin% installed.packages()) PlotList[['Train_VariableImportance']] <- plotly::ggplotly(VI_Plot(Type = "xgboost", VariableImportance)) else if(!is.null(VariableImportance)) PlotList[['Train_VariableImportance']] <- VI_Plot(Type = "xgboost", VariableImportance)
}
if(DebugMode) print("Running CatBoostPDF()")
if("pdfs" %chin% tolower(OutputSelection) && SaveModelObjects) {
CatBoostPDF(ModelClass = "xgboost", ModelType="classification", TrainOnFull.=TrainOnFull, SaveInfoToPDF.=SaveInfoToPDF, PlotList.=PlotList, VariableImportance.=VariableImportance, EvalMetricsList.=EvalMetricsList, Interaction.=NULL, model_path.=model_path, metadata_path.=metadata_path)
}
if(!exists("FactorLevelsList")) FactorLevelsList <- NULL
if(DebugMode) print("Return objects ----")
if(ReturnModelObjects) {
return(list(
Model = model,
TrainData = if(exists("TrainData")) TrainData else NULL,
TestData = if(exists("ValidationData")) ValidationData else NULL,
PlotList = if(exists("PlotList")) PlotList else NULL,
EvaluationMetrics = if(exists("EvalMetricsList")) EvalMetricsList else NULL,
EvaluationMetrics2 = if(exists("EvalMetrics2List")) EvalMetrics2List else NULL,
VariableImportance = if(exists("VariableImportance")) VariableImportance else NULL,
GridMetrics = if(exists("ExperimentalGrid") && !is.null(ExperimentalGrid)) data.table::setorderv(ExperimentalGrid, cols = "EvalMetric", order = -1L, na.last = TRUE) else NULL,
ColNames = if(exists("Names")) Names else NULL,
FactorLevelsList = if(exists("FactorLevelsList")) FactorLevelsList else NULL,
ArgsList = ArgsList))
}
}
|
summary.learnIQ2 <-
function (object, ...){
res <- list (s2Reg=object$s2Fit);
class (res) <- "summary.learnIQ2"
res
}
|
plot.PrepGR <- function(x, type = "l", col.Precip = "royalblue", col.Q = "black", col.na = "grey",
xlab = NULL, ylab = NULL, main = NULL, plot.na = TRUE, ...) {
if (!inherits(x, "PrepGR")) {
stop("Non convenient data for x argument. Must be of class \"PrepGR\"")
}
if (is.null(xlab)) {
xlab <- "Time"
}
if (is.null(ylab)) {
yunit <- .TypeModelGR(x)$TimeUnit
ylab <- paste0(c("precip. [mm/", "flow [mm/"), yunit, "]")
} else {
if (length(ylab) < 2) {
ylab <- c(ylab, "")
}
}
data <- data.frame(DatesR = x$InputsModel$DatesR,
Precip = x$InputsModel$Precip,
Qobs = x$Qobs)
opar <- par(no.readonly = TRUE)
on.exit(par(opar))
layout(mat = matrix(1:2), widths = c(1, 2), heights = c(1, 2))
par(mar = c(0.1, 4, 4, 2), xaxt = "n")
plot(Precip ~ DatesR, data = data, type = "h", col = col.Precip,
xlab = "", ylab = ylab[1L], main = main, ylim = rev(range(data$Precip)))
par(mar = c(5, 4, 0.1, 2), xaxt = "s")
if (all(is.na(data$Qobs))) {
par(yaxt = "n")
plot(x = range(data$DatesR), y = c(0, 0), type = "n",
xlab = xlab, ylab = ylab[2L], main = "")
text(x = median(data$DatesR), y = 0, labels = "No observed\ndischarges")
} else {
plot(Qobs ~ DatesR, data = data, type = type, col = col.Q,
xlab = xlab, ylab = ylab[2L], main = "")
}
if (plot.na) {
axis(side = 1, at = as.POSIXct(data$DatesR[is.na(data$Qobs)]),
labels = FALSE, lwd.ticks = 3, col.ticks = col.na, tck = 0.025, lend = "butt")
legend("topright", legend = "NA", pch = 15, col = col.na, bty = "n", cex = 0.8)
}
box()
}
|
LLRA <- function(X, W, mpoints, groups, baseline=NULL, itmgrps=NULL,...)
{
if(missing(mpoints)) stop("Please specify the number of time points. If there are none, you might want to try PCM() or LPCM().")
Xprep <- llra.datprep(X,mpoints,groups,baseline)
itmgrps <- rep(1:Xprep$nitems)
groupvec <- Xprep$assign.vec
pplgrps <- length(Xprep$grp_n)
if(missing(W)) W <- build_W(Xprep$X,length(unique(itmgrps)),mpoints,Xprep$grp_n,groupvec,itmgrps)
fit <- LPCM(Xprep$X,W,mpoints=mpoints,groupvec=groupvec,sum0=FALSE)
refg <- unique(names(which(groupvec==max(groupvec))))
out <- c(fit,"itms"=Xprep$nitems,"refGroup"=refg)
out$call <- match.call()
class(out) <- c("llra","Rm","eRm")
cat("Reference group: ",refg,"\n\n")
return(out)
}
|
sfarima.sim <- function(n_x, n_t, model)
{
ar_mat <- as.matrix(model$ar); ma_mat = as.matrix(model$ma)
ar_x <- -ar_mat[-1, 1]; ar_t = -ar_mat[1, -1]
ma_x <- ma_mat[-1, 1]; ma_t = ma_mat[1, -1]
if (isFALSE(all.equal(as.matrix(ar_mat[-1, -1]), ar_x %*% t(ar_t))) ||
isFALSE(all.equal(as.matrix(ma_mat[-1, -1]), ma_x %*% t(ma_t))))
{
warning("Provided coefficient matrices do not specify a separable process.")
}
if (!any(is.numeric(model$d)) || any(abs(model$d) > 0.5))
{
stop("Long memory parameter \"d\" incorrectly specified.")
}
nstart <- max(floor(1.5 * c(n_x, n_t)), 150)
k_x <- min(50, n_x); k_t = min(50, n_t)
n_x <- n_x + nstart
n_t <- n_t + nstart
eps_mat <- matrix(stats::rnorm(n_x * n_t), n_x, n_t) * model$sigma
ma_inf_x <- c(1, stats::ARMAtoMA(ar = ar_x, ma = ma_x, lag.max = k_x))
d_x <- choose(-model$d[1], 0:k_x) * ((-1)^(0:k_x))
coef_x <- cumsum_part_reverse(d_x, ma_inf_x)
ma_inf_t <- t(c(1, stats::ARMAtoMA(ar = ar_t, ma = ma_t, lag.max = k_t)))
d_t <- choose(-model$d[2], 0:k_t) * ((-1)^(0:k_t))
coef_t <- cumsum_part_reverse(d_t, ma_inf_t)
X1.sim <- X2.sim <- matrix(0, n_x, n_t)
for(j in 1:n_t) {
if (j <= k_t) {
X2.sim[, j] <- eps_mat[, j:1, drop = FALSE] %*% coef_t[1:j, drop = FALSE]
}
else {
X2.sim[, j] <- eps_mat[, j:(j - k_t)] %*% coef_t
}
}
for(i in 1:n_x) {
if (i <= k_x) {
X1.sim[i, ] <- coef_x[1:i, drop = FALSE] %*% X2.sim[i:1, , drop = FALSE]
}
else {
X1.sim[i, ] <- t(coef_x) %*% X2.sim[i:(i - k_x), ]
}
}
sfarima_out <- X1.sim[(nstart + 1):n_x, (nstart + 1):n_t]
error_out <- eps_mat[(nstart + 1):n_x, (nstart + 1):n_t]
coef_out <- list(Y = sfarima_out, innov = error_out, model = model,
stnry = TRUE)
class(coef_out) <- "sfarima"
attr(coef_out, "subclass") <- "sim"
return(coef_out)
}
sfarima.ord <- function(Rmat, pmax = c(0, 0), qmax = c(0, 0), crit = "bic",
restr = NULL, sFUN = min, parallel = TRUE)
{
if(crit == "bic") {
crit.fun = stats::BIC
}
else if (crit == "aic") {
crit.fun = stats::AIC
}
bic_x = matrix(0, pmax[1] + 1, qmax[1] + 1)
bic_t = matrix(0, pmax[2] + 1, qmax[2] + 1)
R_x = as.vector(Rmat)
R_t = as.vector(t(Rmat))
if (parallel == TRUE)
{
n.cores = parallel::detectCores(logical = TRUE) - 1
doParallel::registerDoParallel(n.cores)
`%dopar%` = foreach::`%dopar%`
`%:%` = foreach::`%:%`
bic_x = foreach::foreach(i = 1:(pmax[1] + 1), .combine = "rbind") %:%
foreach::foreach(j = 1:(qmax[1] + 1), .combine = "c") %dopar%
{
bic = crit.fun(suppressWarnings(fracdiff::fracdiff(R_x, nar = i - 1,
nma = j - 1, drange = c(0, 0.5))))
}
bic_t = foreach::foreach(i = 1:(pmax[2] + 1), .combine = "rbind") %:%
foreach::foreach(j = 1:(qmax[2] + 1), .combine = "c") %dopar%
{
bic = crit.fun(suppressWarnings(fracdiff::fracdiff(R_t, nar = i - 1,
nma = j - 1, drange = c(0, 0.5))))
}
doParallel::stopImplicitCluster()
} else {
for (i in 1:(pmax[1] + 1))
{
for (j in 1:(qmax[1] + 1))
{
bic_x[i, j] = crit.fun(suppressWarnings(fracdiff::fracdiff(R_x,
nar = i - 1, nma = j - 1, drange = c(0, 0.5))))
}
}
for (i in 1:(pmax[2] + 1))
{
for (j in 1:(qmax[2] + 1))
{
bic_t[i, j] = crit.fun(suppressWarnings(fracdiff::fracdiff(R_t,
nar = i - 1, nma = j - 1, drange = c(0, 0.5))))
}
}
}
restr = substitute(restr)
if(!is.null(restr)){
ord.opt_x <- c(which(bic_x == sFUN(bic_x[eval(restr)]), arr.ind = TRUE) - 1)
ord.opt_t <- c(which(bic_t == sFUN(bic_t[eval(restr)]), arr.ind = TRUE) - 1)
} else {
ord.opt_x <- c(which(bic_x == sFUN(bic_x), arr.ind = TRUE) - 1)
ord.opt_t <- c(which(bic_t == sFUN(bic_t), arr.ind = TRUE) - 1)
}
ar = c(ord.opt_x[1], ord.opt_t[1])
ma = c(ord.opt_x[2], ord.opt_t[2])
model_order = list(ar = ar, ma = ma)
return(model_order)
}
|
tX <- timeSequence("2014-03-07 00:00:00", "2014-03-07 23:59:59", by="sec")
s <- sample(1:length(tX))[1:length(tX)/10]
tX <- tX[-s]
require(timeSeries)
set.seed(1953)
tX <- timeSequence("2014-03-07 09:03:17", "2014-03-07 15:53:16", by="sec")
s <- sample(1:length(tX))[1:length(tX)/10]
tX <- sort(tX[-s])
tS <- 201.7*cumulated(timeSeries(data=rnorm(length(tX))/(24*3600), charvec=tX))
plot(tS)
head(tS)
tZ <- align(tS, by="1min", method="fillNA", offset="42s")
head(tZ)
tZ <- align(tS, by="3min", method="fillNA", offset="162s")
head(tZ)
tZ <- align(tS, by="5min", method="fillNA", offset="102")
head(tZ)
tZ <- align(tS, by="15min", method="fillNA", offset="702s")
head(tZ)
tZ <- align(tS, by="30min", method="fillNA", offset="1602s")
head(tZ)
tZ <- align(tS, by="60min", method="fillNA", offset="3402")
head(tZ)
toPeriod <- function(x, by, method, offset="0s"")
{
open <- function(x) as.vector(x)[1]
high <- function(x) max(x)
low <- function(x) min(x)
close <- function(x) rev(as.vector(x))[1]
cbind(
aggregate(SPI, by, open),
aggregate(SPI, by, high),
aggregate(SPI, by, low),
aggregate(SPI, by, close))
}
A1 <- timeSeries::align(tS, by="60min")
A2 <- xts::to.period(as.xts(tS), period = "minutes", k = 2)
open <- function(x) as.vector(x)[1]
close <- function(x) rev(as.vector(x))[1]
high <- function(x) max(x)
low <- function(x) min(x)
SPI <- tS[, "SPI"]
by <- timeLastDayInMonth(time(tS))
OHLC <- cbind(
aggregate(SPI, by, open),
aggregate(SPI, by, high),
aggregate(SPI, by, low),
aggregate(SPI, by, close))
OHLC
xts::to.minutes(x,k,name,...)
xts::to.minutes3(x,name,...)
xts::to.minutes5(x,name,...)
xts::to.minutes10(x,name,...)
xts::to.minutes15(x,name,...)
xts::to.minutes30(x,name,...)
xts::to.hourly(x,name,...)
alignDaily(x=time(tS), include.weekends=FALSE)
alignMonthly(x=time(tS), include.weekends=FALSE)
alignQuarterly(x=time(tS), include.weekends=FALSE)
tD <- Sys.timeDate() + 1:1000
timeDate::align(tD, by="10s")
timeDate::align(tD, by="60s")
timeDate::align(tD, by="10m")
td <- as.xts(Sys.time()) + 1:1000
xts::align.time(td, n=10)
xts::align.time(td, n=60)
xts::align.time(td, n=10*60)
xts::shift.time(td, n=10)
xts::shift.time(td, n=60)
xts::shift.time(td)
xts::to.daily(x,drop.time=TRUE,name,...)
xts::to.weekly(x,drop.time=TRUE,name,...)
xts::to.monthly(x,indexAt='yearmon',drop.time=TRUE,name,...)
xts::to.quarterly(x,indexAt='yearqtr',drop.time=TRUE,name,...)
xts::to.yearly(x,drop.time=TRUE,name,...)
xts::to.period(
x,
period = 'months',
k = 1,
indexAt,
name=NULL,
OHLC = TRUE,
...)
Convert an object to a specified periodicity lower than the given data
object. For example, convert a daily series to a monthly series, or a
monthly series to a yearly one, or a one minute series to an hourly
series.
data(sample_matrix)
xts <- as.xts(sample_matrix)
to.weekly(xts)
to.monthly(xts)
to.quarterly(xts)
to.yearly(xts)
tS <- as.timeSeries(sample_matrix)
% -----------------------------------------------------------------------------
as.numeric(as.POSIXct(time(tS)))
getFinCenter(tS)
indexTZ(xts, )
tzone(xts, )
tzone(xts) <- "GMT"
.index(xts, )
indexClass(xts)
class(time(tS))
% -----------------------------------------------------------------------------
.index <- function(x) as.numeric(as.POSIXct(time(x)))
.indexDate <- function(x) .index(x)%/%86400L
.indexday <- function(x) .index(x)%/%86400L
.indexmday <- function(x) as.POSIXlt(.POSIXct(.index(x)))$mday
.indexwday <- function(x) as.POSIXlt(.POSIXct(.index(x)))$wday
.indexweek <- function(x)
.indexmon <- function(x)
.indexyday <- function(x)
.indexyear <- function(x)
.indexhour <- function(x)
.indexmin <- function(x)
.indexsec <- function(x)
atoms
timeSeries::rollMin(
x, k, na.pad = FALSE, align = c("center", "left", "right"), ...)
timeSeries::rollMax(
x, k, na.pad = FALSE, align = c("center", "left", "right"), ...)
timeSeries::rollMean(
x, k, na.pad = FALSE, align = c("center", "left", "right"), ...)
timeSeries::rollMedian(
x, k, na.pad = FALSE, align = c("center", "left", "right"), ...)
timeSeries::rollStats(
x, k, FUN = mean, na.pad = FALSE, align = c("center", "left", "right"), ...)
rollDailySeries(x, period="7d", FUN, ...)
rollMonthlySeries(x, period="12m", by="1m", FUN, ...)
rollMonthlyWindows(x, period="12m", by="1m")
apply
applySeries
x1 <- xts(matrix(1:(9*6),nc=6),
order.by=as.Date(13000,origin="1970-01-01")+1:9)
x2 <- x1
xtsAttributes(x1) <- list(series1="1")
xtsAttributes(x2) <- list(series2="2")
xtsAttributes(x1)
xtsAttributes(x2)
x3 <- x1+x2
xtsAttributes(x3)
x33 <- cbind(x1, x2)
xtsAttributes(x33)
x33 <- rbind(x2, x1)
xtsAttributes(x33)
appendList <- function (x, value) {
stopifnot(is.list(x), is.list(value))
xnames <- names(x)
for (v in names(value)) {
x[[v]] <-
if (v %in% xnames && is.list(x[[v]]) && is.list(value[[v]]))
appendList(x[[v]], value[[v]])
else c(x[[v]], value[[v]]) }
x }
"setAttributes<-" <- function(obj, value) {
stopifnot(is.list(value))
ATTRIBUTES <- getAttributes(obj)
VALUE <- appendList(ATTRIBUTES, value)
attr(obj@documentation, "Attributes") <- VALUE
obj }
getAttributes <- function(obj) {
attr(obj@documentation, "Attributes") }
obj1 <- dummySeries()
getAttributes(obj1)
setAttributes(obj1) <- list(series="obj1")
getAttributes(obj1)
obj2 <- dummySeries()
getAttributes(obj2)
setAttributes(obj2) <- list(series="obj2")
getAttributes(obj2)
getAttributes(obj1+obj2)
getAttributes(obj1-obj2)
getAttributes(cbind(obj1, obj2))
getAttributes(cbind(obj1, as.matrix(obj2)))
getAttributes(rbind(obj1, obj2))
getAttributes(rbind(obj1, as.matrix(obj2)))
getAttributes( rev(obj) )
getAttributes( obj[, 1] )
getAttributes( sample(obj) )
getAttributes( sort(sample(obj)) )
getAttributes( scale(obj) )
getAttributes( returns(obj) )
getAttributes( cumulated(returns(obj)) )
BIND(
ATTRIBUTES <- attr(obj@documentation, "Attributes")
ATTRIBUTES
ATTRIBUTES <- appendList(ATTRIBUTES, list(say="hello"))
ATTRIBUTES
attr(obj@documentation, "Attributes") <- ATTRIBUTES
cbind(obj, obj, documentation = obj@documentation)
|
"as.list.relimplm" <- function(from,to)
{
to <- slot(from,"var.y")
to <- append(to,list(R2=slot(from,"R2")))
to<-append(to,list(R2.decomp=slot(from,"R2.decomp")))
if (length(from@lmg)>0) to <- append(to,list(lmg=as.vector(from@lmg)))
if (length([email protected])>0) to <- append(to,list(lmg.rank=as.vector([email protected])))
if (length([email protected])>0) to <- append(to,list(lmg.diff=as.vector([email protected])))
if (length(from@pmvd)>0) to <- append(to,list(pmvd=as.vector(from@pmvd)))
if (length([email protected])>0) to <- append(to,list(pmvd.rank=as.vector([email protected])))
if (length([email protected])>0) to <- append(to,list(pmvd.diff=as.vector([email protected])))
if (length(from@last)>0) to <- append(to,list(last=as.vector(from@last)))
if (length([email protected])>0) to <- append(to,list(last.rank=as.vector([email protected])))
if (length([email protected])>0) to <- append(to,list(last.diff=as.vector([email protected])))
if (length(from@first)>0) to <- append(to,list(first=as.vector(from@first)))
if (length([email protected])>0) to <- append(to,list(first.rank=as.vector([email protected])))
if (length([email protected])>0) to <- append(to,list(first.diff=as.vector([email protected])))
if (length(from@betasq)>0) to <- append(to,list(betasq=as.vector(from@betasq)))
if (length([email protected])>0) to <- append(to,list(betasq.rank=as.vector([email protected])))
if (length([email protected])>0) to <- append(to,list(betasq.diff=as.vector([email protected])))
if (length(from@pratt)>0) to <- append(to,list(pratt=as.vector(from@pratt)))
if (length([email protected])>0) to <- append(to,list(pratt.rank=as.vector([email protected])))
if (length([email protected])>0) to <- append(to,list(pratt.diff=as.vector([email protected])))
if (length(from@genizi)>0) to <- append(to,list(genizi=as.vector(from@genizi)))
if (length([email protected])>0) to <- append(to,list(genizi.rank=as.vector([email protected])))
if (length([email protected])>0) to <- append(to,list(genizi.diff=as.vector([email protected])))
if (length(from@car)>0) to <- append(to,list(car=as.vector(from@car)))
if (length([email protected])>0) to <- append(to,list(car.rank=as.vector([email protected])))
if (length([email protected])>0) to <- append(to,list(car.diff=as.vector([email protected])))
if (length(from@namen)>0) to <- append(to,list(namen=as.vector(from@namen)))
if (length(from@type)>0) to <- append(to,list(type=as.vector(from@type)))
if (length(from@rela)>0) to<-append(to,list(rela=as.vector(from@rela)))
if (length(from@always)>0) to<-append(to,list(namen=as.vector(from@always)))
if (length(from@alwaysnam)>0) to<-append(to,list(namen=as.vector(from@alwaysnam)))
return(to)
}
|
library(testthat)
library(stars)
context("gdal utils")
test_that('gdal_utils work', {
skip_on_appveyor()
fname = system.file("nc/tos_O1_2001-2002.nc", package = "stars")
info = gdal_utils("info", fname, quiet = TRUE)
sd2 = gdal_subdatasets(fname)[[4]]
info = gdal_utils("info", sd2, quiet = TRUE)
tf = tempfile()
tf2 = tempfile()
tf3 = tempfile()
expect_true(gdal_utils("warp", sd2, tf, c("-t_srs", "+proj=utm +zone=11 +datum=WGS84")))
expect_true(gdal_utils("translate", sd2, tf))
expect_true(gdal_utils("vectortranslate", sd2, tf2))
expect_warning(gdal_utils("nearblack", sd2, tf))
points = system.file("gpkg/nc.gpkg", package="sf")
expect_true(gdal_utils("grid", points, tf))
expect_true(gdal_utils("buildvrt", sd2, tf3))
})
|
create.FEM.basis = function(mesh)
{
if(class(mesh)!='mesh.2D' & class(mesh)!='mesh.2.5D' & class(mesh)!='mesh.3D')
stop("Unknown mesh class")
if (class(mesh)=="mesh.2D"){
nbasis = dim(mesh$nodes)[[1]]
eleProp = R_elementProperties(mesh)
FEMbasis = list(mesh = mesh, order = as.integer(mesh$order), nbasis = nbasis, detJ=eleProp$detJ, transf_coord = eleProp$transf_coord)
class(FEMbasis) = "FEMbasis"
FEMbasis
} else if (class(mesh) == "mesh.2.5D" || class(mesh) == "mesh.3D"){
FEMbasis = list(mesh = mesh, order = as.integer(mesh$order),nbasis = mesh$nnodes)
class(FEMbasis) = "FEMbasis"
FEMbasis
}
}
FEM<-function(coeff,FEMbasis)
{
if (is.null(coeff))
stop("coeff required; is NULL.")
if (is.null(FEMbasis))
stop("FEMbasis required; is NULL.")
if(class(FEMbasis) != "FEMbasis")
stop("FEMbasis not of class 'FEMbasis'")
coeff = as.matrix(coeff)
if(nrow(coeff) != FEMbasis$nbasis)
stop("Number of row of 'coeff' different from number of basis")
fclass = NULL
fclass = list(coeff=coeff, FEMbasis=FEMbasis)
class(fclass)<-"FEM"
return(fclass)
}
R_elementProperties=function(mesh)
{
nele = dim(mesh$triangles)[[1]]
nodes = mesh$nodes
triangles = mesh$triangles
transf_coord = NULL
transf_coord$diff1x = nodes[triangles[,2],1] - nodes[triangles[,1],1]
transf_coord$diff1y = nodes[triangles[,2],2] - nodes[triangles[,1],2]
transf_coord$diff2x = nodes[triangles[,3],1] - nodes[triangles[,1],1]
transf_coord$diff2y = nodes[triangles[,3],2] - nodes[triangles[,1],2]
detJ = transf_coord$diff1x*transf_coord$diff2y - transf_coord$diff2x*transf_coord$diff1y
FEStruct <- list(detJ=detJ, transf_coord=transf_coord)
return(FEStruct)
}
|
mrf_forecast <- function(Model, Horizon=1){
Data = Model$Data
Coefficients = Model$CoefficientCombination
Aggregation = Model$Aggregation
Method = Model$Method
Threshold = Model$Threshold
Lambda = Model$Lambda
if(!is.vector(Data)){
message("Data must be of type vector")
return()
}
if(!is.double(Horizon)){
message("Horizon must be of type double")
return()
}
if(!is.vector(Aggregation)){
message("Aggregation must be of type vector")
return()
}
if(!is.character(Method)){
message("Method must be of type character")
return()
}
if(Method %in% c("r", "nn")){
if(!is.vector(Coefficients)){
message("ccps must be of type vector")
return()
}
}
if(is.null(Coefficients) && (Method %in% c("r", "nn"))){
message("CoefficientCombination must be given for all methods except 'elm' or 'nnetar'.")
return()
}
if(Horizon != Model$Horizon){
message("Forecasting horizon must be the same as the horizon for which the model was trained for.")
return()
}
Forecast = mrf_multi_step_forecast(UnivariateData=Data, Horizon=Horizon,
Aggregation=Aggregation,
CoefficientCombination=Coefficients,
Method=Method,
Threshold=Threshold, Lambda=Lambda)
return(list("Forecast"=Forecast,
"Model"=Model))
}
|
"book_banning"
|
H2OIsolationForest <- function(data,
Features = NULL,
IDcols = NULL,
ModelID = "TestModel",
SavePath = NULL,
Threshold = 0.975,
MaxMem = "28G",
NThreads = -1,
NTrees = 100,
MaxDepth = 8,
MinRows = 1,
RowSampleRate = (sqrt(5)-1)/2,
ColSampleRate = 1,
ColSampleRatePerLevel = 1,
ColSampleRatePerTree = 1,
CategoricalEncoding = c("AUTO"),
Debug = FALSE) {
if(!is.null(SavePath) && !dir.exists(SavePath)) stop("SavePath is not a valid directory")
if(!data.table::is.data.table(data)) data.table::setDT(data)
if(!is.null(IDcols) && !is.character(IDcols)) stop("IDcols needs to be a character scalar or vector")
if(!is.null(ModelID) && !is.character(ModelID)) stop("ModelID needs to be a character scalar or vector")
if(!is.null(Features) && !is.character(ModelID)) stop("Features needs to be a character scalar or vector")
if(!is.null(SavePath) && !is.character(SavePath)) stop("SavePath needs to be a character scalar or vector")
if(!is.null(SavePath) && is.character(SavePath) && !dir.exists(SavePath)) warning("SavePath directory did not exist but one was made")
ID <- IDcols
for(i in seq_len(length(names(data)))) {
if(any(class(data[[i]]) %in% c("Date","POSIXct","IDate","IDateTime"))) {
Features <- Features[!Features %in% names(data)[i]]
ID <- c(ID, names(data)[i])
}
if(names(data)[i] %chin% ID) {
Features <- Features[!Features %in% names(data)[i]]
}
}
Features <- unique(Features)
ID <- unique(ID)
if(!is.null(ID) && (length(ID) + length(Features) == length(names(data)))) {
IDcolData <- data[, .SD, .SDcols = c(ID)]
data[, (ID) := NULL]
} else if(!is.null(ID) && (length(ID) + length(Features) != length(names(data)))) {
ID <- c(ID, setdiff(names(data), c(Features, ID)))
IDcolData <- data[, .SD, .SDcols = c(ID)]
data[, (ID) := NULL]
}
data <- ModelDataPrep(
data = data,
Impute = TRUE,
CharToFactor = TRUE,
FactorToChar = FALSE,
IntToNumeric = TRUE,
LogicalToBinary = TRUE,
DateToChar = FALSE,
IDateConversion = FALSE,
RemoveDates = FALSE,
MissFactor = "0",
MissNum = -1,
IgnoreCols = NULL)
if(Debug) print(str(data))
if(Debug) print(str(IDcolData))
if(Debug) print(Features)
localH2O <- h2o::h2o.init(max_mem_size = MaxMem, nthreads = NThreads, enable_assertions = FALSE)
Data <- h2o::as.h2o(data)
IsolationForest <- h2o::h2o.isolationForest(
training_frame = Data,
x = Features,
model_id = ModelID,
ntrees = NTrees,
sample_rate = RowSampleRate,
max_depth = MaxDepth,
min_rows = MinRows,
stopping_rounds = 0,
stopping_metric = "AUTO",
col_sample_rate_change_per_level = ColSampleRatePerLevel,
col_sample_rate_per_tree = ColSampleRatePerTree,
categorical_encoding = CategoricalEncoding)
OutliersRaw <- data.table::as.data.table(h2o::h2o.predict(object = IsolationForest, newdata = Data))
if(!is.null(SavePath)) SaveModel <- h2o::h2o.saveModel(object = IsolationForest, path = SavePath, force = TRUE)
h2o::h2o.shutdown(prompt = FALSE)
data.table::setnames(OutliersRaw, c("predict", "mean_length"), c("PredictIsoForest", "MeanLength"))
Cutoff <- quantile(OutliersRaw[["PredictIsoForest"]], probs = Threshold)[[1L]]
OutliersRaw[, PredictedOutlier := data.table::fifelse(PredictIsoForest > eval(Cutoff), 1, 0)]
OutliersRaw[, Rank := data.table::frank(PredictIsoForest) / .N]
data.table::setcolorder(OutliersRaw, c(4L, 3L, 1L, 2L))
data <- cbind(data, OutliersRaw)
if(exists("IDcolData")) data <- cbind(IDcolData, data)
return(data)
}
H2OIsolationForestScoring <- function(data,
Features = NULL,
IDcols = NULL,
H2OStart = TRUE,
H2OShutdown = TRUE,
ModelID = "TestModel",
SavePath = NULL,
Threshold = 0.975,
MaxMem = "28G",
NThreads = -1,
Debug = FALSE) {
if(!is.null(SavePath) && !dir.exists(SavePath)) stop("SavePath is not a valid directory")
if(!data.table::is.data.table(data)) data.table::setDT(data)
if(!is.null(IDcols) && !is.character(IDcols)) stop("IDcols needs to be a character scalar or vector")
if(!is.null(ModelID) && !is.character(ModelID)) stop("ModelID needs to be a character scalar or vector")
if(!is.null(Features) && !is.character(ModelID)) stop("Features needs to be a character scalar or vector")
if(!is.null(SavePath) && !is.character(SavePath)) stop("SavePath needs to be a character scalar or vector")
if(!is.null(SavePath) && is.character(SavePath) && !dir.exists(SavePath)) warning("SavePath directory did not exist but one was made")
ID <- IDcols
for(i in seq_len(length(names(data)))) {
if(any(class(data[[i]]) %in% c("Date","POSIXct","IDate","IDateTime"))) {
Features <- Features[!Features %in% names(data)[i]]
ID <- c(ID, names(data)[i])
}
if(names(data)[i] %chin% ID) {
Features <- Features[!Features %in% names(data)[i]]
}
}
Features <- unique(Features)
ID <- unique(ID)
if(!is.null(ID) && (length(ID) + length(Features) == length(names(data)))) {
IDcolData <- data[, .SD, .SDcols = c(ID)]
data[, (ID) := NULL]
} else if(!is.null(ID) && (length(ID) + length(Features) != length(names(data)))) {
ID <- c(ID, setdiff(names(data), c(Features, ID)))
IDcolData <- data[, .SD, .SDcols = c(ID)]
data[, (ID) := NULL]
}
data <- ModelDataPrep(
data = data,
Impute = TRUE,
CharToFactor = TRUE,
FactorToChar = FALSE,
IntToNumeric = TRUE,
LogicalToBinary = TRUE,
DateToChar = FALSE,
IDateConversion = FALSE,
RemoveDates = FALSE,
MissFactor = "0",
MissNum = -1,
IgnoreCols = NULL)
if(Debug) print(str(data))
if(Debug) print(str(IDcolData))
if(Debug) print(Features)
if(H2OStart) localH2O <- h2o::h2o.init(nthreads = NThreads, max_mem_size = MaxMem, enable_assertions = FALSE)
H2O_Data <- h2o::as.h2o(data)
ModelObject <- h2o::h2o.loadModel(path = file.path(SavePath, ModelID))
OutliersRaw <- data.table::as.data.table(h2o::h2o.predict(object = ModelObject, newdata = H2O_Data))
rm(H2O_Data, ModelObject)
if(H2OShutdown) h2o::h2o.shutdown(prompt = FALSE)
data.table::setnames(OutliersRaw, c("predict", "mean_length"), c("PredictIsoForest", "MeanLength"))
Cutoff <- quantile(OutliersRaw[["PredictIsoForest"]], probs = Threshold)[[1L]]
OutliersRaw[, PredictedOutlier := data.table::fifelse(PredictIsoForest > eval(Cutoff), 1, 0)]
OutliersRaw[, Rank := data.table::frank(PredictIsoForest) / .N]
data.table::setcolorder(OutliersRaw, c(4L, 3L, 1L, 2L))
data <- cbind(data, OutliersRaw)
if(exists("IDcolData")) data <- cbind(IDcolData, data)
return(data)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.