code
stringlengths 1
13.8M
|
---|
setGeneric("setRowHeight",
function(object, sheet, row, height = -1) standardGeneric("setRowHeight"))
setMethod("setRowHeight",
signature(object = "workbook", sheet = "numeric"),
function(object, sheet, row, height = -1) {
xlcCall(object, "setRowHeight", as.integer(sheet - 1), as.integer(row - 1), height)
invisible()
}
)
setMethod("setRowHeight",
signature(object = "workbook", sheet = "character"),
function(object, sheet, row, height = -1) {
xlcCall(object, "setRowHeight", sheet, as.integer(row - 1), height)
invisible()
}
)
|
is.binary <- function(phy) UseMethod("is.binary")
is.binary.phylo <- function(phy)
length(phy$tip.label) - phy$Nnode + is.rooted.phylo(phy) == 2
is.binary.tree <- function(phy)
{
message("is.binary.tree() is deprecated; using is.binary() instead.\n\nis.binary.tree() will be removed soon: see ?is.binary and update your code.")
is.binary(phy)
}
is.binary.multiPhylo <- function(phy)
{
phy <- unclass(phy)
n <- length(attr(phy, "TipLabel"))
if (n)
n - sapply(phy, "[[", "Nnode") + is.rooted.multiPhylo(phy) == 2
else
sapply(phy, is.binary.phylo)
}
|
autoplot_MAPraster <- function(object, ...){
object <- as.MAPraster(object)
plot <- autoplot.MAPraster(object, ...)
return(invisible(plot))
}
|
aggregateSimulationReplications <- function(results) {
.SD <- NULL
if (!inherits(results, "data.table")) {
results <- extractSimulationResults(results)
}
assertDataTable(results)
agg <- data.table::dcast(results, date + resource ~ .,
value.var="count", fill=0,
fun.aggregate=list(min=min,
lq=function(x) quantile(x, 0.25),
median=median,
uq=function(x) quantile(x, 0.75),
max=max))
oldNames <- grep("^count_", colnames(agg), value=TRUE)
newNames <- gsub("count_", "", oldNames)
setnames(agg, oldNames, newNames)
agg[, (newNames) := lapply(.SD, round), .SDcols=newNames]
agg
}
|
library(otvPlots)
context("Summary stats for numerical variables")
load("../testthat/testData.rda")
setDT(testData)
suppressMessages(PrepData(testData, dateNm = "date", dateGp = "weeks", dateGpBp = "weeks", weightNm = "weight"))
test_that("Numerical statistics are calculated correctly without weight", {
mdx = SummaryStats(myVar = "age", dataFl = testData, dateGp = "weeks")$meltdx
Mean = mdx[variable=='Mean']
p1 = mdx[variable=='p1']
p99 = mdx[variable=='p99']
zerorate = mdx[variable=='zerorate']
missingrate = mdx[variable=='missingrate']
p99_g = unique(mdx[variable=='p99_g', value])
p1_g = unique(mdx[variable=='p1_g', value])
cl1 = unique(mdx[variable=='cl1', value])
cl2 = unique(mdx[variable=='cl2', value])
expect_equivalent(p99_g, quantile(testData[, age], p=.99))
expect_equivalent(p1_g, quantile(testData[, age], p=.01))
expect_equivalent(cl1, mean(testData[, age]) + sd(testData[,age]))
expect_equivalent(cl2, mean(testData[, age]) - sd(testData[,age]))
mdx2 = mdx[weeks == "2008-05-06" & variable%in%c("p99", "p50", "p1", "mean", "zerorate", "missingrate")]
expect_equivalent(mdx2[variable=="p99", value], quantile(testData[weeks==as.IDate("2008-05-06"),age], .99))
expect_equivalent(mdx2[variable=="p50", value], quantile(testData[weeks==as.IDate("2008-05-06"),age], .5))
expect_equivalent(mdx2[variable=="p1", value], quantile(testData[weeks==as.IDate("2008-05-06"),age], .01))
expect_equivalent(mdx2[variable=="mean", value], mean(testData[weeks==as.IDate("2008-05-06"),age]))
expect_equivalent(mdx2[variable=="zerorate", value], mean(testData[weeks==as.IDate("2008-05-06"),age]==0))
expect_equivalent(mdx2[variable=="missingrate", value], mean(is.na(testData[weeks==as.IDate("2008-05-06"),age])))
})
test_that("Numerical statistics are calculated correctly with weight", {
mdx = SummaryStats(myVar = "age", dataFl = testData, dateGp = "weeks", weightNm = "weight")$meltdx
Mean = mdx[variable=='Mean']
p1 = mdx[variable=='p1']
p99 = mdx[variable=='p99']
zerorate = mdx[variable=='zerorate']
missingrate = mdx[variable=='missingrate']
p99_g = unique(mdx[variable=='p99_g', value])
p1_g = unique(mdx[variable=='p1_g', value])
cl1 = unique(mdx[variable=='cl1', value])
cl2 = unique(mdx[variable=='cl2', value])
expect_equivalent(p99_g, Hmisc::wtd.quantile(testData[, age], testData[, weight], probs=.99, normwt=TRUE))
expect_equivalent(p1_g, Hmisc::wtd.quantile(testData[, age], testData[, weight], probs=.01, normwt=TRUE))
expect_equivalent(cl2, Hmisc::wtd.mean(testData[, age], testData[,weight], na.rm=TRUE, normwt=TRUE) -
sqrt(Hmisc::wtd.var(testData[,age], testData[,weight], na.rm=TRUE,normwt=TRUE)))
expect_equivalent(cl1, Hmisc::wtd.mean(testData[, age], testData[,weight], na.rm=TRUE, normwt=TRUE) +
sqrt(Hmisc::wtd.var(testData[,age], testData[,weight], na.rm=TRUE,normwt=TRUE)))
mdx2 = mdx[weeks == "2008-05-06" & variable%in%c("p99", "p50", "p1", "mean", "zerorate", "missingrate")]
testData2 = testData[weeks==as.IDate("2008-05-06")]
expect_equivalent(mdx2[variable=="p99", value], Hmisc::wtd.quantile(testData2[, age],testData2[, weight], .99, normwt=TRUE))
expect_equivalent(mdx2[variable=="p50", value], Hmisc::wtd.quantile(testData2[, age],testData2[, weight], .5, normwt=TRUE))
expect_equivalent(mdx2[variable=="p1", value], Hmisc::wtd.quantile(testData2[, age],testData2[, weight], .01, normwt=TRUE))
expect_equivalent(mdx2[variable=="mean", value], Hmisc::wtd.mean(testData2[,age], testData2[,weight]))
expect_equivalent(mdx2[variable=="zerorate", value], Hmisc::wtd.mean((testData2[,age]==0), testData2[,weight]))
expect_equivalent(mdx2[variable=="missingrate", value], Hmisc::wtd.mean(is.na(testData2[,age]), testData2[,weight]))
})
|
.internal.mle.1wre <- function(xi, si2, ni, labi = c(1:length(xi)),
max.iter = 200, tol = .Machine$double.eps ^ 0.5, trace = FALSE,
init.mu = mean(xi), init.sigma2 = var(xi), lambda = 1) {
xi <- xi[order(labi)]
si2 <- si2[order(labi)]
ni <- ni[order(labi)]
labi <- labi[order(labi)]
xi <- xi[!is.na(si2)]
ni <- ni[!is.na(si2)]
labi <- labi[!is.na(si2)]
si2 <- si2[!is.na(si2)]
p <- length(xi)
N <- sum(ni)
Theta <- matrix(-Inf, max.iter + 1, p + 3)
mu <- init.mu
sigma2 <- init.sigma2
sigmai2 <- si2
t <- 1
llh <- 0
llh <- -N/2 * log(2 * pi) - sum((ni - 1)*log(sigmai2))/2 -
sum(log(sigmai2 + ni * sigma2))/2 - sum((ni - 1)*si2/sigmai2)/2 -
sum(ni*(xi - mu)^2/(sigmai2 + ni*sigma2))/2
Theta[t, ] <- c(mu, sigmai2, sigma2, llh)
cur.rel.abs.error <- Inf
while ((cur.rel.abs.error > tol) && (t < max.iter) && all(sigmai2 > 0) &&
(sigma2 > 0)) {
mu <- Theta[t,1]
sigmai2 <- Theta[t, c(2:(p + 1))]
sigma2 <- Theta[t, p + 2]
wi <- ni/(sigmai2 + ni*sigma2)
var.mu <- 1/sum(wi)
llh<- -N/2*log(2*pi) - sum((ni - 1)*log(sigmai2))/2 -
sum(log(sigmai2 + ni*sigma2))/2 - sum((ni-1)*si2/sigmai2)/2 -
sum(ni*(xi - mu)^2/(sigmai2 + ni*sigma2))/2
S<- c(sum((xi - mu)/(sigma2 + sigmai2/ni)),
-(ni - 1)/sigmai2/2 - 1/(ni*sigma2 + sigmai2)/2 +
(ni - 1)*si2/sigmai2^2/2 +
ni*(xi - mu)^2/(ni*sigma2 + sigmai2)^2/2,
-sum(1/(sigma2 + sigmai2/ni))/2 +
sum((xi - mu)^2/(sigma2 + sigmai2/ni)^2)/2)
I <- matrix(0, p + 2, p + 2)
I[1, 1] <- sum(1/(sigma2 + sigmai2/ni))
for (j in 1:p) {
I[j + 1, j + 1] <- (1/2)*(ni[j] - 1)/sigmai2[j]^2 +
1/(sigmai2[j] + ni[j]*sigma2)^2/2
I[j + 1, p + 2] <- ni[j]/2/(sigmai2[j]+ni[j]*sigma2)^2
I[p + 2, j + 1] <- I[j+1,p+2]
}
I[p + 2, p + 2] <- sum(1/(sigma2 + sigmai2/ni)^2)/2
Iinv <- ginv(I)
Theta[t + 1, 1:(p + 2)] <- Theta[t, 1:(p + 2)] + lambda*(Iinv %*% S)
Theta[t + 1, p + 3] <- llh
old.theta <- Theta[t, ]
new.theta <- Theta[t+1, ]
map.old.theta <- old.theta
map.old.theta[2:(p + 1)] <- 1/(old.theta[2:(p + 1)]/ni +
old.theta[p + 2])
map.old.theta[p + 2] <- 1/sum(map.old.theta[2:(p + 1)])
map.new.theta <- new.theta
map.new.theta[2:(p + 1)] <- 1/(new.theta[2:(p + 1)]/ni +
new.theta[p + 2])
map.new.theta[p + 2] <- 1/sum(map.new.theta[2:(p + 1)])
cur.rel.abs.error <- max(abs((map.old.theta -
map.new.theta)/map.new.theta))
t <- t + 1
mu <- Theta[t, 1]
sigmai2 <- Theta[t, c(2:(p + 1))]
sigma2 <- Theta[t, p + 2]
if (is.na(sigma2)) stop("sigma2 became undefined.")
if (any(is.na(sigmai2))) stop("some sigmai2 became undefined")
if (is.na(mu)) stop("mu became undefined")
if (is.na(cur.rel.abs.error))
stop("current relative absolute error became undefined")
}
if ((t == max.iter) || (cur.rel.abs.error > tol) || any(sigmai2 <= 0) ||
(sigma2 <= 0)) {
warning("Non convergence or slow convergence condition was found.")
}
Theta <- Theta[1:t,]
mu <- Theta[t, 1]
sigmai2 <- Theta[t, c(2:(p + 1))]
sigma2 <- Theta[t, p + 2]
wi <- ni/(sigmai2 + ni*sigma2)
var.mu <- 1/sum(wi)
llh <- -N/2*log(2*pi) - sum((ni - 1)*log(sigmai2))/2 -
sum(log(sigmai2 + ni*sigma2))/2 - sum((ni - 1)*si2/sigmai2)/2 -
sum(ni*(xi - mu)^2/(sigmai2 + ni*sigma2))/2
if (!trace) Theta <-NULL
result<- list( mu = as.vector(mu),
var.mu = as.vector(var.mu),
sigma2 = as.vector(sigma2),
llh = as.vector(llh),
tot.iter = as.vector(t),
max.rel.abs.error = as.vector(cur.rel.abs.error),
sigmai2 = as.vector(sigmai2),
trace = Theta
)
class(result)<- "summary.mle.1wre"
return(result)
}
.internal.newton.raphson <- function(f, fp, init.value,
max.tol = .Machine$double.eps^0.5, trace = FALSE, range.tol = 0) {
max.iter <- 40
tol <- 1
iter <- 0
x <- init.value
if (fp(x) != 0) {
while ((iter < max.iter) & (tol > max.tol)) {
xn <- x - f(x)/fp(x)
if (trace) print( c(iter, xn, x, f(x), fp(x), tol) )
iter <- iter + 1
if (range.tol == 0) {
tol <- abs(xn - x)
} else
if (range.tol == 1) {
tol <- abs(xn - x)/abs(max(x,xn))
} else {
tol <- abs(f(xn))
}
x <- xn
if (trace) print( c(iter, xn, x, f(x), fp(x), tol) )
}
if (x > 1) x <- 1
if (x < 0) x <- 0
if (trace) print( c(iter, xn, x, f(x), fp(x), tol) )
} else {
x <- init.value
}
return(x)
}
.internal.find.roots <- function(mu, sigma2, gammai, xi, si2, ni,
tol = .Machine$double.eps^0.25, trace = FALSE)
{
p <- length(xi)
N <- sum(ni)
wi <- gammai/sigma2
sigmai2 <- ni*sigma2*(1 - gammai)/gammai
var.mu <- 1/sum(wi)
nroots <- rep(NA, p)
llh <- -N/2*log(2*pi) - sum((ni - 1)*log(sigmai2))/2 -
sum(log(sigmai2 + ni*sigma2))/2 - sum((ni - 1)*si2/sigmai2)/2 -
sum(ni*(xi - mu)^2/(sigmai2 + ni*sigma2))/2
if (trace) print( c(t, mu, var.mu, sigma2, sigmai2, llh) )
s2 <- sigma2
nj <-1
xj <-1
sj2 <-1
ai <- sigma2/(xi - mu)^2
bi <- si2/(ni*(xi - mu)^2)
bb <- -(ai + 2)
cc <- ((ni + 1)*ai + (ni - 1)*bi + 1)
dd <- -ni*ai
pol <- function(x) {
return( x^3 + bb*x^2 + cc*x + dd )
}
roots <- c(1:p)
vroots <- matrix(NA, p, 3)
if (s2 == 0) {
roots <- (xi - mu)^2 + (ni - 1)*si2/ni
} else {
delta <- roots
delta.w <- roots
if (trace) print( "list of positive roots" )
for (i in 1:p) {
nj <- ni[i]
xj <- xi[i]
sj2 <- si2[i]
aj <- sigma2/(xj - mu)^2
bj <- sj2/(nj*(xj - mu)^2)
a <- (2*nj - 1)*s2 - (xj - mu)^2 - (nj - 1)*sj2/nj
b <- (nj - 1)*s2*(nj*s2 - 2*sj2)
c <- -(nj - 1)*nj*sj2*(s2^2)
delta[i] <- -4*a^3*c + a^2*b^2 - 4*b^3 + 18*a*b*c - 27*c^2
bb <- -(aj + 2)
cc <- ((nj + 1)*aj+(nj - 1)*bj + 1)
dd <- -nj*aj
delta.w[i] <- -18*bb*cc*dd + 4*bb^3*dd + bb^2*cc^2 - 4*cc^3 +
27*dd^2
if (delta.w[i] < 0) {
j <- 1
root <- rep(0, j)
root[1] <- uniroot( pol, interval=c(0, 1), tol = tol )$root
if (trace) print( c(i, root) )
vroots[i, 1] <- root[1]
nroots[i] <- 1
if (root[1] <= 0) {
stop( c("single negative root was found for dataset ", i) )
}
pllh <- rep(0, nroots[i])
for (j in 1:nroots[i])
pllh[j] <- -nj/2*log(2*pi) - (nj - 1)*log(root[j])/2 -
log(root[j] + nj*s2)/2 - (nj - 1)*sj2/root[j]/2 -
nj*(xj - mu)^2/(root[j] + nj*s2)/2
if (trace) print( c(i, pllh) )
if (trace) print( c(i, roots[i]) )
} else if (delta[i] == 0) {
lim.root1 <- (-2*a + sqrt(4*a^2 - 12*b))/6
lim.root2 <- (-2*a - sqrt(4*a^2 - 12*b))/6
lim <- c(1:4)
lim[1] <- min(lim.root1, lim.root2)
lim[2] <- max(lim.root1, lim.root2)
if (lim[1] > 0) {
j <- 0
lim[3] <- lim[1] - 1
while (pol(lim[3] - 10^j) > 0) j <- j + 1
lim[3] <- lim[3] - 10^j
} else {
lim[3] <- 0
}
j <-0
lim[4] <- lim[2] + 1
while (pol(lim[4] + 10^j) < 0) j <- j + 1
lim[4] <- lim[4] + 10^j
lim<- sort(lim)
if (lim[1] == lim[2]) lim <- lim[-1]
root <- c(1:2)
root[1] <- uniroot( pol, interval = c(lim[1], lim[2]),
tol=tol)$root
root[2] <- uniroot( pol, interval = c(lim[2], lim[3]),
tol=tol)$root
if (trace) print( c(i, root) )
if (root[1] <= 0) {
root <- root[2]
}
if (trace) print( c(i, root) )
nroots[i] <- length(root)
pllh <- rep(0, nroots[i])
for (j in 1:nroots[i])
pllh[j] <- -nj/2*log(2*pi) - (nj - 1)*log(root[j])/2 -
log(root[j] + nj*s2)/2 - (nj - 1)*sj2/root[j]/2 -
nj*(xj - mu)^2/(root[j] + nj*s2)/2
if (any(is.na(pllh))) {
print(pllh)
print(c(N, mu, s2, root, xj, sj2, nj))
stop("Abnormal condition found")
}
for (j in 1:nroots[i])
if (pllh[j] == max(pllh)) roots[i] <- root[j]
if (trace) print( c(i, pllh) )
if (trace) print( c(i, roots[i]) )
} else {
lim.root1 <- (-2*a + sqrt(4*a^2 - 12*b))/6
lim.root2 <- (-2*a - sqrt(4*a^2 - 12*b))/6
lim <- c(1:4)
lim[1] <- min(lim.root1, lim.root2)
lim[2] <- max(lim.root1, lim.root2)
j <-0
lim[3] <- lim[1] - 1
while (pol(lim[3] - 10^j)>0) j <- j + 1
lim[3] <- lim[3] - 10^j
j <-0
lim[4] <- lim[2] + 1
while (pol(lim[4] + 10^j)<0) j <- j + 1
lim[4] <- lim[4] + 10^j
lim <- sort(lim)
root <- c(1:3)
root[1] <- uniroot( pol, interval = c(lim[1],lim[2]),
tol = tol)$root
root[2] <- uniroot( pol, interval = c(lim[2],lim[3]),
tol = tol)$root
root[3] <- uniroot( pol, interval = c(lim[3],lim[4]),
tol = tol)$root
if (trace) print( c(i, root) )
if (root[1] <= 0) {
root <- root[2:3]
}
if (root[1] <= 0) {
root <- root[2]
}
if (trace) print( c(i, root) )
nroots[i] <- length(root)
pllh <- rep(0, nroots[i])
for (j in 1:nroots[i])
pllh[j] <- -nj/2*log(2*pi) - (nj - 1)*log(root[j])/2 -
log(root[j] + nj*s2)/2 - (nj - 1)*sj2/root[j]/2 -
nj*(xj - mu)^2/(root[j] + nj*s2)/2
if (any(is.na(pllh))) {
print(pllh)
print(c(N, mu, s2, root, xj, sj2, nj))
stop("Abnormal condition found")
}
for (j in 1:nroots[i])
if (pllh[j] == max(pllh, na.rm = TRUE)) roots[i] <- root[j]
if (trace) print( c(i, pllh) )
if (trace) print( c(i, roots[i]) )
}
}
}
return( c(roots, nroots) )
}
vr.mle <- function(xi, si2, ni, labi = c(1:length(xi)), max.iter = 1000,
tol = .Machine$double.eps^0.5, init.mu = mean(xi), init.sigma2 = var(xi),
trace = FALSE, alpha = 0.05)
{
xi <- xi[order(labi)]
si2 <- si2[order(labi)]
ni <- ni[order(labi)]
labi <- labi[order(labi)]
xi <- xi[!is.na(si2)]
ni <- ni[!is.na(si2)]
labi <- labi[!is.na(si2)]
si2 <- si2[!is.na(si2)]
p <- length(xi)
if (p <= 1) { stop("vr.mle requires 2 or more sources of information.") }
N <- sum(ni)
Theta <- matrix(-Inf, max.iter + 1, p + 3)
Delta.theta <- matrix(0, max.iter + 1, p)
mu <- init.mu
sigma2 <- init.sigma2
sigmai2 <- si2
gammai <- sigma2/(sigma2 + sigmai2/ni)
t <- 1
var.mu <- sigma2/sum(gammai)
llh <- -N/2*log(2*pi) - sum(ni*log(ni))/2+sum(ni*log(gammai/sigma2))/2 -
sum((ni - 1)*log(1-gammai))/2 - sum(gammai*((xi - mu)^2 +
(ni - 1)*si2/ni/(1 - gammai)))/sigma2/2
Theta[t, ] <- c(mu, gammai, sigma2, llh)
cur.rel.abs.error <- Inf
while ((cur.rel.abs.error > tol) && (t < max.iter) && all(gammai > 0) &&
(sigma2 > 0)) {
mu <- Theta[t, 1]
gammai <- Theta[t, c(2:(p + 1))]
sigma2 <- Theta[t, p + 2]
var.mu <- sigma2/sum(gammai)
llh <- -N/2*log(2*pi) - sum(ni*log(ni))/2 +
sum(ni*log(gammai/sigma2))/2 - sum((ni - 1)*log(1 - gammai))/2 -
sum(gammai*((xi - mu)^2 + (ni - 1)*si2/ni/(1 - gammai)))/sigma2/2
ai <- sigma2/(xi - mu)^2
bi <- si2/ni/(xi - mu)^2
aa <- rep(1, p)
bb <- -(ai + 2)
cc <- ((ni + 1)*ai + (ni - 1)*bi + 1)
dd <- -ni*ai
Delta <- -(18*aa*bb*cc*dd - 4*bb^3*dd + bb^2*cc^2 - 4*aa*cc^3 -
27*aa^2*dd^2)
Delta.theta[t, ] <- Delta
ss <- Delta.theta[t,] <= 0
if (any(ss)) {
sss <- (bb^2 - 3*cc)[ss] >= 0
if (any(sss)) {
gamma.lim <- matrix(NA, sum(sss), 2)
for(iii in c(1:length(sss))[sss]) {
gamma.lim[iii, ] <- sort((-bb[ss][sss][iii] +
c(-1,1)*sqrt( (bb^2 - 3*cc)[ss][sss][iii] ))/3)
if (min(gamma.lim[iii, ]) > 1) {
} else {
warning(paste("There are additional points ",
"where the MLE was not evaluated."))
}
}
}
}
for (i in 1:p) {
nlm.res <- .internal.newton.raphson(function(x) {
x^3 - (ai[i] + 2)*x^2 +
((ni[i] + 1)*ai[i] + (ni[i] - 1)*bi[i] + 1)*x -
ni[i]*ai[i]},
function(x) {3*x^2 - 2*(ai[i] + 2)*x + (ni[i] + 1)*ai[i] +
(ni[i] - 1)*bi[i] + 1},
init.value = Theta[t, 1 + i], max.tol = tol)
gammai[i] <- nlm.res
if (gammai[i] > 1) stop("gamma[", i, "]>1")
if (gammai[i] < 0) stop("gamma[", i, "]<0")
}
mu <- sum(gammai/sum(gammai)*xi)
sigma2 <- sum(gammai/sum(gammai)*gammai*(xi - mu)^2)
Theta[t+1, ] <- c(mu, gammai, sigma2, llh)
old.theta <- Theta[t, ]
new.theta <- Theta[t + 1, ]
map.old.theta <- old.theta
map.old.theta[2:(p + 1)] <- old.theta[2:(p + 1)]/
sum(old.theta[2:(p + 1)])
map.old.theta[p + 2] <- old.theta[p + 2]/sum(old.theta[2:(p + 1)])
map.new.theta <- new.theta
map.new.theta[2:(p + 1)] <- new.theta[2:(p + 1)]/
sum(new.theta[2:(p + 1)])
map.new.theta[p + 2] <- new.theta[p + 2]/sum(new.theta[2:(p + 1)])
cur.rel.abs.error<- max(abs((map.old.theta - map.new.theta)/
map.new.theta))
t <- t + 1
mu <- Theta[t, 1]
gammai <- Theta[t, c(2:(p + 1))]
sigma2 <- Theta[t, p + 2]
if (is.na(sigma2)) stop("sigma2 became undefined.")
if (any(is.na(gammai))) stop("some gammai became undefined")
if (is.na(mu)) stop("mu became undefined")
if (is.na(cur.rel.abs.error))
stop("current relative absolute error became undefined")
}
ccm<- TRUE
if ((t == max.iter)||(cur.rel.abs.error > tol)|| any(gammai <= 0) ||
(sigma2 <= 0)) {
ccm <- FALSE
}
Theta <- Theta[1:t,]
t <- max(c(1:t)[Theta[, p + 3] == max(Theta[, p + 3])])
Theta <- Theta[1:t, ]
Delta.theta <- Delta.theta[1:t, ]
mu <- Theta[t, 1]
gammai <- Theta[t, c(2:(p + 1))]
sigma2 <- Theta[t, p + 2]
var.mu <- sigma2/sum(gammai)
old.theta <- Theta[t - 1, ]
new.theta <- Theta[t, ]
map.old.theta <- old.theta
map.old.theta[2:(p + 1)] <- old.theta[2:(p + 1)]/sum(old.theta[2:(p + 1)])
map.old.theta[p + 2] <- old.theta[p + 2]/sum(old.theta[2:(p + 1)])
map.new.theta <- new.theta
map.new.theta[2:(p + 1)] <- new.theta[2:(p + 1)]/sum(new.theta[2:(p + 1)])
map.new.theta[p + 2] <- new.theta[p + 2]/sum(new.theta[2:(p + 1)])
cur.rel.abs.error <- max(abs((map.old.theta - map.new.theta)/
map.new.theta))
if (sigma2 <= 0) {
sigma2 <- 0
si <- sqrt(si2)
sigmai <- si
iter <- 0
tol <- 1
mu <- mean(xi)
max.tol <- .Machine$double.eps^0.5
while ((iter < 100) & (tol > max.tol)) {
mu.n <- sum(xi/sigmai^2)/sum(1/sigmai^2)
sigmai.n <- sqrt(ni*(xi - mu)^2 + (ni - 1)*si^2)/ni
tol <- abs(mu.n - mu)
mu <- mu.n
sigmai <- sigmai.n
iter <- iter + 1
}
wi <- 1/sigmai^2
u.mu <- 1/sqrt(sum(wi))
N <- sum(ni)
llh <- -N*log(2*pi)/2 - sum(log(sigmai^2))/2 -
sum((ni*(xi - mu)^2 + (ni - 1)*si^2)/(2*sigmai^2))
var.mu <- u.mu^2
gammai <- wi
} else {
wi <- gammai/sigma2
llh <- -N/2*log(2*pi) - sum(ni*log(ni))/2 +
sum(ni*log(gammai/sigma2))/2 - sum((ni - 1)*log(1-gammai))/2 -
sum(gammai*((xi - mu)^2 + (ni - 1)*si2/ni/(1 - gammai)))/sigma2/2
}
if (sigma2 > 0) {
v <- sum(wi)^2/sum(wi^2/(ni - 1))
sigma2.wi <- (1/gammai - 1)*sigma2*ni
v.w <- sum((ni - 1)*sigma2.wi)^2/sum((ni - 1)*sigma2.wi^2)
var.w <- (sum(gammai)^2*var.mu - sum(gammai^2)*sigma2)/sum(gammai^2/ni)
v.w <- (sum(ni)*var.mu - var.w)/sigma2 - 1
} else {
v <- N - 1
v.w <- v
var.w <- (sum(gammai)^2*var.mu)/sum(gammai^2/ni)
v.w <- var.w/var.mu*sum(gammai^2)/sum(gammai)^2
}
if (!trace) {
Theta<- NULL
}
result <- list( mu = as.vector(mu),
u.mu = as.vector(sqrt(var.mu)),
ci.mu = as.vector(mu + qnorm(alpha/2, 1 - alpha/2)*sqrt(var.mu)),
var.mu = as.vector(var.mu),
var.b = as.vector(sigma2),
var.w = as.vector(var.w),
dof.w = v.w,
llh = as.vector(llh),
tot.iter = as.vector(t),
max.rel.abs.error = as.vector(cur.rel.abs.error),
gammai = as.vector(gammai),
ccm = ccm,
reduced.model = (sigma2 == 0),
dof = v,
trace = Theta,
discriminant = Delta.theta
)
class(result) <- "summary.vr.mle"
return(result)
}
.internal.vr.mle.fixed <- function(xi, si2, ni, labi = c(1:length(xi)),
max.iter = 1000, tol = .Machine$double.eps^0.5, fixed.mu = mean(xi),
init.sigma2 = var(xi), trace = FALSE, alpha = 0.05)
{
xi <- xi[order(labi)]
si2 <- si2[order(labi)]
ni <- ni[order(labi)]
labi <- labi[order(labi)]
xi <- xi[!is.na(si2)]
ni <- ni[!is.na(si2)]
labi <- labi[!is.na(si2)]
si2 <- si2[!is.na(si2)]
p <- length(xi)
if (p <= 1) stop("vr.mle requires 2 or more sources of information.")
N <- sum(ni)
Theta <- matrix(-Inf, max.iter + 1, p + 2)
Delta.theta <- matrix(0, max.iter + 1, p)
mu <- fixed.mu
sigma2 <- init.sigma2
sigmai2 <- si2
gammai <- sigma2/(sigma2 + sigmai2/ni)
t <- 1
var.mu <- sigma2/sum(gammai)
llh <- -N/2*log(2*pi) - sum(ni*log(ni))/2 + sum(ni*log(gammai/sigma2))/2 -
sum((ni - 1)*log(1 - gammai))/2 -
sum(gammai*((xi - mu)^2 + (ni - 1)*si2/ni/(1 - gammai)))/sigma2/2
Theta[t, ] <- c(gammai, sigma2, llh)
cur.rel.abs.error <- Inf
while ((cur.rel.abs.error > tol) && (t < max.iter) && all(gammai > 0) &&
(sigma2 > 0)) {
gammai <- Theta[t, c(1:p)]
sigma2 <- Theta[t, p + 1]
var.mu <- sigma2/sum(gammai)
llh <- -N/2*log(2*pi) - sum(ni*log(ni))/2 + sum(ni*log(gammai/sigma2))/2 -
sum((ni - 1)*log(1 - gammai))/2 -
sum(gammai*((xi - mu)^2 + (ni - 1)*si2/ni/(1 - gammai)))/sigma2/2
ai <- sigma2/(xi - mu)^2
bi <- si2/ni/(xi - mu)^2
aa <- rep(1, p)
bb <- -(ai + 2)
cc <- ((ni + 1)*ai + (ni - 1)*bi + 1)
dd <- -ni*ai
Delta <- -(18*aa*bb*cc*dd - 4*bb^3*dd + bb^2*cc^2 - 4*aa*cc^3 -
27*aa^2*dd^2)
Delta.theta[t, ] <- Delta
ss <- Delta.theta[t, ] <= 0
if (any(ss)) {
sss <- (bb^2 - 3*cc)[ss] >= 0
if (any(sss)) {
gamma.lim <- matrix(NA, sum(sss), 2)
for(iii in c(1:length(sss))[sss]) {
gamma.lim[iii, ] <- sort((-bb[ss][sss][iii] +
c(-1, 1)*sqrt( (bb^2 - 3*cc)[ss][sss][iii] ))/3)
if (min(gamma.lim[iii, ]) > 1) {
} else {
warning(paste("There are additional points where ",
"the MLE was not evaluated."))
}
}
}
}
for (i in 1:p) {
nlm.res <- .internal.newton.raphson(function(x) {x^3 -
(ai[i] + 2)*x^2 +
((ni[i] + 1)*ai[i] + (ni[i] - 1)*bi[i] + 1)*x -
ni[i]*ai[i]},
function(x) {3*x^2 - 2*(ai[i] + 2)*x +
(ni[i] + 1)*ai[i] + (ni[i] - 1)*bi[i] + 1},
init.value = Theta[t, 1 + i], max.tol = tol)
gammai[i] <- nlm.res
if (gammai[i] > 1) stop("gamma[",i,"]>1")
if (gammai[i] < 0) stop("gamma[",i,"]<0")
}
sigma2 <- sum(gammai/sum(gammai)*gammai*(xi - mu)^2)
Theta[t+1, ] <- c(gammai, sigma2, llh)
old.theta <- Theta[t, ]
new.theta <- Theta[t + 1, ]
map.old.theta <- old.theta
map.old.theta[1:p] <- old.theta[1:p]/sum(old.theta[1:p])
map.old.theta[p + 1] <- old.theta[p + 1]/sum(old.theta[1:p])
map.new.theta <- new.theta
map.new.theta[1:p] <- new.theta[1:p]/sum(new.theta[1:p])
map.new.theta[p + 1] <- new.theta[p + 1]/sum(new.theta[1:p])
cur.rel.abs.error <- max(abs((map.old.theta - map.new.theta)/
map.new.theta))
t <- t + 1
gammai <- Theta[t, c(1:p)]
sigma2 <- Theta[t, p + 1]
if (is.na(sigma2)) stop("sigma2 became undefined.")
if (any(is.na(gammai))) stop("some gammai became undefined")
if (is.na(mu)) stop("mu became undefined")
if (is.na(cur.rel.abs.error))
stop("current relative absolute error became undefined")
}
ccm <- TRUE
if ((t == max.iter) && (cur.rel.abs.error > tol)) {
ccm <- FALSE
}
Theta <- Theta[1:t, ]
t <- max(c(1:t)[Theta[, p + 2] == max(Theta[, p + 2])])
Theta <- as.matrix(Theta[1:t, ], t, p + 2)
Delta.theta <- Delta.theta[1:t, ]
gammai <- Theta[t, c(1:p)]
sigma2 <- Theta[t, p + 1]
var.mu <- sigma2/sum(gammai)
old.theta <- Theta[t - 1, ]
new.theta <- Theta[t, ]
map.old.theta <- old.theta
map.old.theta[1:p] <- old.theta[1:p]/sum(old.theta[1:p])
map.old.theta[p + 1] <- old.theta[p + 1]/sum(old.theta[1:p])
map.new.theta <- new.theta
map.new.theta[1:p] <- new.theta[1:p]/sum(new.theta[1:p])
map.new.theta[p + 1] <- new.theta[p + 1]/sum(new.theta[1:p])
cur.rel.abs.error <- max(abs((map.old.theta - map.new.theta)/
map.new.theta))
if (sigma2 == 0) {
sigma2 <- 0
si <- sqrt(si2)
sigmai <- si
iter <- 0
tol <- 1
mu <- mean(xi)
max.tol <- .Machine$double.eps^0.5
while ((iter < 100) & (tol > max.tol)) {
mu.n <- sum(xi/sigmai^2)/sum(1/sigmai^2)
sigmai.n <- sqrt(ni*(xi - mu)^2 + (ni - 1)*si^2)/ni
tol <- abs(mu.n - mu)
mu <- mu.n
sigmai <- sigmai.n
iter <- iter + 1
}
wi <- 1/sigmai^2
u.mu <- 1/sqrt(sum(wi))
N <- sum(ni)
llh <- -N*log(2*pi)/2 - sum(log(sigmai^2))/2 -
sum((ni*(xi - mu)^2 + (ni - 1)*si^2)/(2*sigmai^2))
var.mu <- u.mu^2
gammai <- wi
} else {
wi <- gammai/sigma2
llh <- -N/2*log(2*pi) - sum(ni*log(ni))/2 +
sum(ni*log(gammai/sigma2))/2 - sum((ni - 1)*log(1 - gammai))/2 -
sum(gammai*((xi - mu)^2 + (ni - 1)*si2/ni/(1 - gammai)))/sigma2/2
}
if (sigma2 > 0) {
v <- sum(wi)^2/sum(wi^2/(ni - 1))
sigma2.wi <- (1/gammai - 1)*sigma2*ni
v.w <- sum((ni - 1)*sigma2.wi)^2/sum((ni - 1)*sigma2.wi^2)
var.w <- (sum(gammai)^2*var.mu - sum(gammai^2)*sigma2)/sum(gammai^2/ni)
v.w <- (sum(ni)*var.mu - var.w)/sigma2 - 1
} else {
v <- N - 1
v.w <- v
var.w <- (sum(gammai)^2*var.mu)/sum(gammai^2/ni)
v.w <- var.w/var.mu*sum(gammai^2)/sum(gammai)^2
}
if (!trace) {
Theta <- NULL
}
result <- list( mu = as.vector(mu),
u.mu = as.vector(sqrt(var.mu)),
ci.mu = as.vector(mu + qnorm(alpha/2, 1 - alpha/2)*sqrt(var.mu)),
var.mu = as.vector(var.mu),
var.b = as.vector(sigma2),
var.w = as.vector(var.w),
dof.w = v.w,
llh = as.vector(llh),
tot.iter = as.vector(t),
max.rel.abs.error = as.vector(cur.rel.abs.error),
gammai = as.vector(gammai),
ccm = ccm,
reduced.model = (sigma2 == 0),
dof = v,
trace = Theta,
discriminant = Delta.theta
)
class(result) <- "summary.vr.mle.fixed"
return(result)
}
|
if ( at_home() ){
expect_equal(1 + 1, 2)
}
|
qdiffIRT=function(p,ai,vi,ter,sd2A,sd2V,maxRT,A,W,model,eps){
ou=c()
for(ii in 1:length(p)){
tmp=function(x) pdiffIRT(x,ai,vi,ter,sd2A,sd2V,A,W,model,eps) -p[ii]
if(ii==1) ou[ii] = uniroot(tmp,c(exp(ter)+0.001,maxRT))$root
else ou[ii] = uniroot(tmp,c(ou[ii-1],maxRT))$root
}
return(ou)
}
|
setClass (Class = 'SubtimeDataFrame',
representation = representation (when='POSIXst',
data='data.frame'),
prototype = prototype (when=new('POSIXst'),
data=data.frame()),
validity=function(object) {
if (length (when (object)) != nrow (object))
stop ("In a 'SubtimeDataFrame, 'data' must have a number of rows as long as 'when'.")
return (TRUE)
} )
SubtimeDataFrame <- function (when, data=NULL, ...) {
if (is.null (data))
data <- data.frame (matrix (NA, ncol=0, nrow=length(when) ) )
new ('SubtimeDataFrame', when=when, data=data)
}
setMethod (f='when', signature='SubtimeDataFrame',
definition=function(x, ...) return(x@when) )
setMethod (f='timezone', signature='SubtimeDataFrame',
definition=function(object) return(timezone(when(object))) )
unit.SubtimeDataFrame <- function(x, ...) unit(when(x))
of.SubtimeDataFrame <- function(x, ...) of(when(x))
print.SubtimeDataFrame <- function (x, ...)
print(data.frame (when=format(when(x), ...), x@data) )
setMethod ('show', 'SubtimeDataFrame',
function (object) {
print(data.frame (when=format(when(object)), object@data) )
} )
tail.SubtimeDataFrame <- function (x, ...)
print (tail (data.frame (when=format(when(x), ...), x@data) ) )
head.SubtimeDataFrame <- function (x, ...)
print(head (data.frame (when=format(when(x), ...), x@data) ) )
summary.SubtimeDataFrame <- function (object, ...)
print (summary (data.frame (when=format(when(object), ...),
object@data) ) )
'[.SubtimeDataFrame' <- function(x, i, j, drop=FALSE) {
n.args <- nargs() - hasArg(drop)
if (missing (j) & n.args==2) {
j <- i
i <- seq_len(nrow(x))
}
if(missing(i)) i <- seq_len(nrow(x))
y <- new ('SubtimeDataFrame',
when =when (x)[i],
data = x@data[i, j, drop=drop])
validObject(y)
return(y)
}
setMethod (f='[[', signature='SubtimeDataFrame',
definition=function(x, i, ...) {
'[[.data.frame'(x@data, i, ...)
})
setMethod (f='$', signature='SubtimeDataFrame',
definition=function(x, name) {
do.call ('$', list(x=x@data, name=name))
})
'[<-.SubtimeDataFrame' <- function(x, i, j, value) {
n.args <- nargs()
if (missing (j) & n.args==3) {
j <- i
i <- seq_len(nrow(x))
}
if(missing(i)) i <- seq_len(nrow(x))
x@data[i,j] <- value
validObject(x)
return(x)
}
'[[<-.SubtimeDataFrame' <- function(x, i, j, value) {
if (missing (j) )
x@data[[i]] <- value else
x@data[[i,j]] <- value
validObject(x)
return(x)
}
setMethod (f='$<-', signature='SubtimeDataFrame',
definition=function(x, name, value) {
x@data <- "$<-.data.frame"(x@data, name, value)
validObject(x)
return(x)
})
setMethod (f='dim', signature='SubtimeDataFrame',
definition=function(x) dim (x@data))
setMethod (f='nrow', signature='SubtimeDataFrame',
definition=function(x) nrow (x@data))
setMethod (f='ncol', signature='SubtimeDataFrame',
definition=function(x) ncol (x@data))
row.names.SubtimeDataFrame <- function(x) row.names (x@data)
'row.names<-.SubtimeDataFrame' <- function(x, value)
{
row.names (x@data) <- value
x
}
setMethod (f='names', signature='SubtimeDataFrame',
definition=function(x) names (x@data))
setMethod (f='names<-', signature='SubtimeDataFrame',
definition=function(x, value) {
names (x@data) <- value
x
} )
merge.SubtimeDataFrame <- function(x, y, by, all=TRUE, sort=FALSE, ...) {
if (!inherits(y, 'SubtimeDataFrame'))
stop ("'y' must be a 'SubtimeDataFrame'.")
if( any(duplicated(when(x))) | any(duplicated(when(y))) )
stop("'when' slots must be unique in each SubtimeDataFrame")
if (missing (by) ) by <- NULL
when.vec <- list (when(x), when(y))
if( unit(x) != unit(y) )
stop("x and y must have same unit") else
u <- unit(x)
if( of(x) != of(y) )
stop("x and y must have same of") else
o <- of(x)
if( timezone(x) != timezone(y) )
stop("x and y must have same timezone") else
tz <- timezone(x)
x.data <- data.frame(when=as.numeric(format(when(x), '%v')),
x@data)
y.data <- data.frame(when=as.numeric(format(when(y), '%v')),
y@data)
z <- merge (x.data, y.data,
by=unique (c('when', by) ), all=all, ...)
when <- POSIXst( z$when, u, o, tz )
z <- new ('SubtimeDataFrame',
when=when,
data=z[setdiff(names(z), c('when'))])
if (sort) z <- z[order(as.numeric(when(z))),]
return (z)
}
setMethod ('lapply', signature('SubtimeDataFrame', 'ANY'),
function (X, FUN, ...)
{
res <- lapply (data.frame(X), FUN, ...)
if (all (sapply (res, length) == nrow(X))) {
X@data <- data.frame (res[names(X)])
} else {
warning ("Result has a number of rows differents from object. A data.frame is returned.")
X <- data.frame (res)
}
return (X)
} )
as.data.frame.SubtimeDataFrame <- function (x, row.names=NULL, optional=FALSE,
include.dates=FALSE, ...) {
if (include.dates)
return (data.frame (when=when(x), x@data) ) else
return (x@data)
}
|
binomialquantile<-function(q,n,p) {
small_gamma=matrix(data =0, nrow = 1, ncol = (n+1))
Cheb_Polyn<-rep(0,n+1)
Cheb_Polyn[1]<-1
Cheb_Polyn[2]<-1-2*p
for (l in 3:(n+1)) {
i<-l-2
Cheb_Polyn[l]<-((2*i+1)*(1-2*p)*Cheb_Polyn[l-1]-i*Cheb_Polyn[l-2])/(i+1)
}
small_gamma[1,]<-Cheb_Polyn
Calculate_row_big_gamma<-function(r){
Cheb_Polyn_star<-rep(0,n+1)
x=r-1
Cheb_Polyn_star[1]<-exp(0.5)
Cheb_Polyn_star[2]<-1-2*x/n
for (l in 3:(n+1)) {
i<-l-2
Cheb_Polyn_star_current<-((2*i+1)*(n-2*x)*(exp(-0.5))*Cheb_Polyn_star[l-1]-i*(n+i+1)*(exp(-1))*Cheb_Polyn_star[l-2])/((i+1)*(n-i))
Cheb_Polyn_star_current
if (Cheb_Polyn_star_current>=+1e+250) {
Cheb_Polyn_star[l]<-+1e+250
} else if (Cheb_Polyn_star_current<=-1e+250) {
Cheb_Polyn_star[l]<--1e+250
} else {
Cheb_Polyn_star[l]<-Cheb_Polyn_star_current
}
}
return(Cheb_Polyn_star)
}
big_gamma_without_normalized<-t(apply(t(t(1:(n+1))),1,Calculate_row_big_gamma))
big_gamma=matrix(data =0, nrow = (n+1), ncol = (n+1))
for (c in 1:(n+1)) {
i=c-1
eps<-big_gamma_without_normalized[,c]
norm_term<-t(eps)%*%eps
big_gamma[,c]<-eps/(norm_term*exp((i-1)/2))
}
BIG_pi_large_N=small_gamma%*%t(big_gamma)
Table_distribution=matrix(data =0, nrow = 1, ncol = (n+1))
density<-BIG_pi_large_N
distribution<-rep(0,n+1)
distribution[1]=density[1]
for (i in 2:(n+1)) {
distribution[i]=distribution[i-1]+density[i]
}
Table_distribution[1,]<-distribution
Table_result<-round(Table_distribution,8)
current_quantile=0
Table_Quantile=matrix(data =0, nrow =1, ncol = 1)
current_quantile=0
for (c in 1:(n+1)){
if (Table_result[1,c]>q) {
break
} else if (Table_result[1,c]==q) {
current_quantile=c-1
break
} else {
current_quantile=c-1
}
}
Table_Quantile[1,1]=current_quantile
Table_Quantile
rownames(Table_Quantile)<-c("x")
colnames(Table_Quantile)<-paste("Quantile q=",q)
return(Table_Quantile)
}
|
Horvitz=function(z,p,alpha,pi,type=c("total","mean"),cl,N=NULL,pij=NULL){
call=match.call()
out1=Qualitative(z,2,p,1,alpha=alpha)
out2=Estimator(out1,pi,type,cl,N,pij)
out=c(Call=call,out1,out2,Name="Horvitz",Model="Qualitative",Type=type,Param=list(c("p"=p,"alpha"=alpha)),ConfidenceLevel=cl)
class(out)="RRT"
return(out)
}
|
nest_dt = function(.data,...,mcols = NULL,.name = "ndt"){
dt = as_dt(.data)
if(substitute(mcols) %>% deparse() == "NULL")
setnames(nest_by(dt,...),old = "ndt",new = .name)[]
else{
name_list = substitute(mcols)%>%
lapply(deparse) %>%
.[-1] %>%
lapply(function(x)
eval(parse(text = str_glue("names(select_dt(dt[0],{x}))"))))
group_names = setdiff(names(dt),unique(unlist(name_list)))
lapply(name_list,function(x) c(group_names,x)) %>%
lapply(function(x) nest_by(dt,cols = group_names)) -> list_table
for(i in seq_along(list_table)){
list_table[[i]] = setnames(list_table[[i]],
old = "ndt",new = names(list_table[i]))
}
Reduce(f = merge, x = list_table)
}
}
nest_by = function(.data,...){
dt = as_dt(.data)
dt[0] %>% select_dt(...) %>% names() %>%
str_c(collapse = ",")-> group
eval(parse(text = str_glue("dt[,.(ndt = list(.SD)),by = .({group})]")))
}
unnest_dt = function(.data,...){
dt = as_dt(.data)
col_names = dt[0] %>% select_dt(...) %>% names()
if(length(col_names) == 1) unnest_col(dt,...)
else
lapply(col_names,function(x) unnest_col(dt,cols = x)) %>%
Reduce(x = ., f = function(x,y) merge(x,y,all = TRUE))
}
unnest_col = function(.data,...){
dt = as_dt(.data)
col_name = dt[0] %>% select_dt(...) %>% names()
lapply(dt,class) -> dt_class
names(subset(dt_class,dt_class != "list")) -> valid_col_names
if(!col_name %chin% names(dt)) stop("The column does not exist.")
valid_col_names %>%
str_c(collapse = ",") %>%
str_c("list(",.,")") -> group_name
dt[[col_name]][[1]] -> first_element
if(is.vector(first_element))
eval(parse(text = str_glue("dt[,.({col_name} = unlist({col_name},recursive = FALSE)),by = {group_name}]")))
else
eval(parse(text = str_glue("dt[,unlist({col_name},recursive = FALSE),by = {group_name}]")))
}
squeeze_dt = function(.data,...,.name = "ndt"){
dt = as.data.table(.data)
dt %>% select_dt(...) %>%
setNames(NULL) %>%
apply(1,list) %>%
lapply(unlist)-> ndt
dt[,(.name) := ndt][]
}
chop_dt = function(.data,...){
dt = as_dt(.data)
dt[0] %>% select_dt(...) %>% names() -> data_cols
setdiff(names(dt),data_cols) -> group_cols
group_cols %>%
str_c(collapse = ",") %>%
str_c("list(",.,")") -> group_names
eval(parse(text = str_glue("dt[,lapply(.SD,list),
by = {group_names}]")))
}
unchop_dt = function(.data,...){
dt = as_dt(.data)
col_names = dt[0] %>% select_dt(...) %>% names()
group_names = setdiff(names(dt),col_names)
if(length(col_names) == 1) unnest_col(dt,...)
else
lapply(col_names,function(x) unnest_col(dt,cols = x)) %>%
Reduce(x = ., f = function(x,y) cbind(x,y)) %>%
.[,unique(names(.)),with=FALSE]
}
|
assign_observed_data <-
function(obj, eggs = NA, larvae = NA, pupae = NA, adults) {
obj_mean <- apply(obj, 2, function(x) mean(na.omit(x)))
obj_sd <- apply(obj, 2, function(x) sd(na.omit(x)))
PDprob <- pnorm(adults,
obj_mean["post_diapause_day"], obj_sd["post_diapause_day"],
lower.tail = FALSE)
OVprob <- pnorm(adults,
obj_mean["ovipositing_day"], obj_sd["ovipositing_day"],
lower.tail = FALSE)
AEprob <- pnorm(adults,
obj_mean["adult_emergence_day"], obj_sd["adult_emergence_day"])
PD_points <- PDprob > AEprob
OV_points <- (OVprob > AEprob) & !PD_points
AE_points <- !(PD_points | OV_points)
obs_lengths <- c(length(eggs) + sum(OV_points), length(larvae), length(pupae), sum(PD_points), sum(AE_points))
max_length <- max(obs_lengths)
post_diapause <- c(adults[PD_points], rep(NA, max_length - sum(PD_points)))
eggs <- c(eggs, adults[OV_points], rep(NA, max_length - length(eggs) - sum(OV_points)))
larvae <- c(larvae, rep(NA, max_length - length(larvae)))
pupae <- c(pupae, rep(NA, max_length - length(pupae)))
adults <- c(adults[AE_points], rep(NA, max_length - sum(AE_points)))
observed_data <- data.frame(post_diapause, eggs, larvae, pupae, adults)
return(observed_data)
}
|
test_that("clip_grad_norm_", {
compute_norm <- function(parameters, norm_type) {
if (is.finite(norm_type)) {
total_norm <- 0
for (p in parameters)
total_norm <- total_norm + p$grad$data()$abs()$pow(norm_type)$sum()
total_norm ^ (1 / norm_type)
} else {
torch_tensor(max(sapply(parameters, function(p) p$grad$data()$abs()$max()$item())))
}
}
grads <- list(
torch_arange(start = 1, end = 100)$view(c(10, 10)),
torch_ones(10)$div(1000)
)
l <- nn_linear(10, 10)
max_norm <- 2
for (norm_type in c(0.5, 1.5, 2, 4, Inf)) {
for (i in seq_along(l$parameters)) {
l$parameters[[i]]$set_grad_(grads[[i]]$clone())
}
norm_before <- compute_norm(l$parameters, norm_type)
norm <- nn_utils_clip_grad_norm_(l$parameters, max_norm, norm_type)
norm_after <- compute_norm(l$parameters, norm_type)
expect_equal_to_tensor(norm_before, norm, tolerance = 1e-2)
expect_equal_to_tensor(norm_after, torch_tensor(max_norm), tolerance = 1e-2)
expect_equal_to_r(norm_after < norm_before, TRUE)
}
})
test_that("clip_grad_value_", {
grads <- list(
torch_arange(start = 1, end = 100)$view(c(10, 10)),
torch_ones(10)$div(1000)
)
l <- nn_linear(10, 10)
max_norm <- 2
for (i in seq_along(l$parameters)) {
l$parameters[[i]]$set_grad_(grads[[i]])
}
nn_utils_clip_grad_value_(l$parameters, 2.5)
for (p in l$parameters) {
expect_equal_to_r(p$grad$max() <= 2.5, TRUE)
}
})
|
residuals.BTm <- function(object, type = c("deviance", "pearson", "working",
"response", "partial", "grouped"), by = object$id,
...) {
type <- match.arg(type)
if (type != "grouped") return(NextMethod())
formula <- as.formula(paste("~", by, "- 1"))
mt <- terms(formula)
mf1 <- model.frame(mt, data = c(object$player1, object$data))
X1 <- model.matrix(mt, data = mf1)
mf2 <- model.frame(mt, data = c(object$player2, object$data))
X2 <- model.matrix(mt, data = mf2)
X <- X1 - X2
r <- object$residuals
w <- object$weights
total.resid <- crossprod(X, r * w)
total.weight <- crossprod(abs(X), w)
result <- total.resid / total.weight
attr(result, "weights") <- total.weight
result
}
|
knitr::opts_chunk$set(echo = TRUE)
library(spdep)
library(geocmeans)
library(ggplot2)
data("LyonIris")
AnalysisFields <-c("Lden","NO2","PM25","VegHautPrt","Pct0_14",
"Pct_65","Pct_Img","TxChom1564","Pct_brevet","NivVieMed")
Data <- LyonIris@data[AnalysisFields]
for (Col in names(Data)){
Data[[Col]] <- scale(Data[[Col]])
}
Neighbours <- poly2nb(LyonIris,queen = TRUE)
WMat <- nb2listw(Neighbours,style="W",zero.policy = TRUE)
SFCM <- SFCMeans(Data, WMat, k = 4, m = 1.5, alpha = 0.7,
tol = 0.0001, standardize = FALSE,
verbose = FALSE, seed = 456)
consistIndex <- spConsistency(SFCM$Belongings, WMat, nrep = 500)
ggplot() +
geom_histogram(aes(x = consistIndex$samples),
bins = 30, fill = "white", color = "black") +
geom_vline(aes(xintercept = consistIndex$Mean),
color = "red", linetype="dashed", size = 1) +
geom_text(aes(x = consistIndex$Mean+0.0015, y = 43,
label = round(consistIndex$Mean,2))) +
labs(x = "Spatial Inconsistency Index", y = "")
WMat2 <- adjustSpatialWeights(Data, WMat$neighbours, style = "C")
consistIndex2 <- spConsistency(SFCM$Belongings, WMat2, nrep = 500)
ggplot() +
geom_histogram(aes(x = consistIndex2$samples),
bins = 30, fill = "white", color = "black") +
geom_vline(aes(xintercept = consistIndex2$Mean),
color = "red", linetype="dashed", size = 1) +
geom_text(aes(x = consistIndex2$Mean+0.0015, y = 43,
label = round(consistIndex2$Mean,2))) +
labs(x = "Adjusted Spatial Inconsistency Index", y = "")
|
library("quanteda")
MWEcounts <- function(candidate, text, stopword = "xxxx") {
K <- length(candidate)
J <- length(text) - K + 1
count.vectors <- list(
c("00", "01", "10", "11")
)
count.vectors[[2]] <- paste(rep(c("0","1"), each=4), rep(count.vectors[[1]], 2), sep = "")
count.vectors[[3]] <- paste(rep(c("0","1"), each=8), rep(count.vectors[[2]], 2), sep = "")
noyes <- c("no","yes")
array.dims <- list(
list(W2=noyes,W1=noyes),
list(W3=noyes,W2=noyes,W1=noyes),
list(W4=noyes,W3=noyes,W2=noyes,W1=noyes)
)
data.frames <- list(
data.frame(count=NA,W1=gl(2,2,4,labels=noyes),W2=gl(2,1,4,labels=noyes)),
data.frame(count=NA,W1=gl(2,4,8,labels=noyes),W2=gl(2,2,8,labels=noyes),W3=gl(2,1,8,labels=noyes)),
data.frame(count=NA,W1=gl(2,8,16,labels=noyes),W2=gl(2,4,16,labels=noyes),W3=gl(2,2,16,labels=noyes),W4=gl(2,1,16,labels=noyes))
)
counts <- rep(0,2^K)
names(counts) <- count.vectors[[K-1]]
for(j in seq(J)){
text.j <- text[j:(j+K-1)]
if(all(text.j!=stopword)){
agreement <- text.j==candidate
tmp <- paste(as.numeric(agreement),collapse="")
counts[tmp] <- counts[tmp]+1
}
}
counts.table <- array(counts,dim=rep(2,K),dimnames=array.dims[[K-1]])
counts.data.frame <- data.frames[[K-1]]
counts.data.frame$count <- counts
result <- list(expression=paste(candidate,collapse=" "),counts=counts,counts.table=counts.table,counts.data.frame=counts.data.frame)
return(result)
}
MWEstatistics <- function (counts, smooth=0.5) {
counts.n <- counts$counts
counts.table <- counts$counts.table
counts.df <- counts$counts.data.frame
K <- length(dim(counts.table))
results <- matrix(NA,1,9+(2^K))
colnames(results) <- c("length","lambda","se.lambda","z.lambda","LRtest","smooth","mInfty","Infty","N",names(counts.n))
rownames(results) <- counts$expression
results[,"length"] <- K
results[,"smooth"] <- smooth
results[,-(1:9)] <- counts.n
results[,"N"] <- sum(counts.n)
results[,"mInfty"] <- as.numeric(counts.n[2^K]==0)
results[,"Infty"] <- as.numeric(any(counts.n[-(2^K)]==0))
loglin.margins <- list(
list(1,2),
list(1:2,2:3,c(1,3)),
list(1:3,2:4,c(1,2,4),c(1,3,4))
)[[K-1]]
formula <- list(
count~W1*W2,
count~W1*W2*W3,
count~W1*W2*W3*W4
)[[K-1]]
counts.df$count <- counts.df$count+smooth
suppressWarnings(mod1 <- glm(formula,family=poisson,data=counts.df))
tmp <- length(coef(mod1))
results[,"lambda"] <- coef(mod1)[tmp]
results[,"se.lambda"] <- sqrt(diag(vcov(mod1)))[tmp]
results[,"z.lambda"] <- results[,"lambda"]/results[,"se.lambda"]
counts.table <- counts.table+smooth
mod2 <- loglin(counts.table,loglin.margins,print=F)
results[,"LRtest"] <- mod2$lrt
return(results)
}
test_that("test that collocations do not span texts", {
toks <- quanteda::tokens(c('this is a test', 'this also a test'))
cols <- rbind(textstat_collocations(toks, size = 2, min_count = 1),
textstat_collocations(toks, size = 3, min_count = 1))
expect_false('test this' %in% cols$collocation)
expect_false('test this also' %in% cols$collocation)
expect_true('this also a' %in% cols$collocation)
})
test_that("test that collocations only include selected features", {
toks <- quanteda::tokens(c('This is a Twitter post to @someone on
toks <- quanteda::tokens_select(toks, "^([a-z]+)$", valuetype = "regex")
cols <- textstat_collocations(toks, min_count = 1, size = 2, tolower = FALSE)
expect_true('This is' %in% cols$collocation)
expect_true('a Twitter' %in% cols$collocation)
expect_false('to @someone' %in% cols$collocation)
expect_false('on
})
test_that("test that extractor works with collocation", {
toks <- quanteda::tokens(quanteda::data_corpus_inaugural[2], remove_punct = TRUE)
toks <- quanteda::tokens_remove(toks, quanteda::stopwords(), padding = TRUE)
cols <- textstat_collocations(toks, size = 2)
cols <- cols[1:5, ]
expect_equal(nrow(cols), 5)
expect_true(quanteda::is.collocations(cols))
})
test_that("bigrams and trigrams are all sorted correctly, issue
toks <- quanteda::tokens(quanteda::data_corpus_inaugural[2], remove_punct = TRUE)
toks <- quanteda::tokens_remove(toks, quanteda::stopwords("english"), padding = TRUE)
cols <- textstat_collocations(toks, method = 'lambda', min_count = 1, size = 2:3)
expect_equal(order(cols$z, decreasing = TRUE), seq_len(nrow(cols)))
})
test_that("test the correctness of significant with smoothing", {
toks <- quanteda::tokens('capital other capital gains other capital word2 other gains capital')
seqs <- textstat_collocations(toks, min_count=1, size = 2)
expect_equal(seqs$collocation[1], 'other capital')
})
test_that("test the correctness of significant", {
toks <- quanteda::tokens('capital other capital gains other capital word2 other gains capital')
seqs <- textstat_collocations(toks, min_count=1, size = 2, smoothing = 0)
expect_equal(seqs$collocation[1], 'other capital')
})
test_that("collocation is counted correctly in racing conditions, issue
n <- 100
txt <- unname(rep(as.character(quanteda::data_corpus_inaugural)[1], n))
toks <- quanteda::tokens(txt)
out1 <- textstat_collocations(toks[1], size = 2, min_count = 1)
out100 <- textstat_collocations(toks, size = 2, min_count = 1)
out1 <- out1[order(out1$collocation),]
out100 <- out100[order(out100$collocation),]
expect_true(all(out1$count * n == out100$count))
})
test_that("lambda & [ function",{
toks <- quanteda::tokens('E E G F a b c E E G G f E E f f G G')
toks_capital <- quanteda::tokens_select(toks, "^[A-Z]$", valuetype="regex",
case_insensitive = FALSE, padding = TRUE)
seqs <- textstat_collocations(toks_capital, min_count = 1)
a_seq <- seqs[1, ]
tt <- as.character(toks_capital)
test2 <- MWEcounts(c("G", "F"), tt)
test2_stat <- suppressWarnings(MWEstatistics(test2))
expect_equal(a_seq$collocation, 'g f')
expect_equal(a_seq$lambda, test2_stat[2])
expect_equal(class(a_seq), c("collocations", "textstat", "data.frame"))
})
test_that("textstat_collocations.tokens works ok with zero-length documents (
txt <- c('I like good ice cream.', 'Me too! I like good ice cream.', '')
toks <- quanteda::tokens(tolower(txt), remove_punct = TRUE, remove_symbols = TRUE)
expect_equal(
textstat_collocations(txt, size = 2, min_count = 2, tolower = TRUE)$collocation,
c("good ice", "i like", "ice cream", "like good")
)
expect_equal(
textstat_collocations(toks, size = 2, min_count = 2)$collocation,
c("good ice", "i like", "ice cream", "like good")
)
})
test_that("textstat_collocations works when texts are shorter than size", {
toks <- quanteda::tokens(c('a', 'bb', ''))
expect_equivalent(
textstat_collocations(toks, size = 2:3, min_count = 1, tolower = TRUE),
data.frame(collocation = character(0),
count = integer(0),
count_nested = integer(0),
length = numeric(0),
lambda = numeric(0),
z = numeric(0),
stringsAsFactors = FALSE))
})
test_that("textstat_collocations error when size = 1 and warn when size > 5", {
toks <- quanteda::tokens('a b c d e f g h a b c d e f')
expect_silent(textstat_collocations(toks, size = 2:5))
expect_error(textstat_collocations(toks, size = 1:5),
"Collocation sizes must be larger than 1")
expect_warning(textstat_collocations(toks, size = 2:6),
"Computation for large collocations may take long time")
})
test_that("textstat_collocations counts sequences correctly when recursive = FALSE", {
txt <- c("a b c . . a b c . . a b c . . . a b c",
"a b . . a b . . a b . . a b . a b")
toks <- quanteda::tokens_keep(quanteda::tokens(txt), c("a", "b", "c"), padding = TRUE)
col1 <- textstat_collocations(toks, size = 2:3)
expect_equal(col1$collocation, c('a b', 'b c', 'a b c'))
expect_equal(col1$count, c(9, 4, 4))
expect_equal(col1$count_nested, c(4, 4, 0))
txt2 <- c(". . . . a b c . . a b c . . .",
"a b . . a b . . a b . . a b . a b",
"b c . . b c . b c . . . b c")
toks2 <- quanteda::tokens_keep(quanteda::tokens(txt2), c("a", "b", "c"), padding = TRUE)
col2 <- textstat_collocations(toks2, size = 2:3, min_count = 1)
expect_equal(col2$collocation, c('a b', 'b c', 'a b c'))
expect_equal(col2$count, c(7, 6, 2))
expect_equal(col2$count_nested, c(2, 2, 0))
txt3 <- c(". . . . a b c d . . a b c d . . .",
"a b . . a b . . a b . . a b . a b",
"b c . . b c . b c . . . b c")
toks3 <- quanteda::tokens_keep(quanteda::tokens(txt3), c("a", "b", "c", "d"), padding = TRUE)
col3 <- textstat_collocations(toks3, size = c(2, 4), min_count = 1)
expect_equal(col3$collocation, c('a b', 'b c', 'c d', 'a b c d'))
expect_equal(col3$count, c(7, 6, 2, 2))
expect_equal(col3$count_nested, c(2, 2, 2, 0))
txt4 <- c(". . . . a b c d . . a b c . . .")
toks4 <- quanteda::tokens_keep(quanteda::tokens(txt4), c("a", "b", "c", "d"), padding = TRUE)
col4 <- textstat_collocations(toks4, size = c(2:4), min_count = 1)
expect_equal(col4$collocation, c('a b', 'b c', 'c d', 'a b c d', 'a b c', 'b c d'))
expect_equal(col4$count, c(2, 2, 1, 1, 2, 1))
expect_equal(col4$count_nested, c(2, 2, 1, 0, 1, 1))
})
|
`betadisper` <-
function(d, group, type = c("median","centroid"), bias.adjust=FALSE,
sqrt.dist = FALSE, add = FALSE)
{
dblcen <- function(x, na.rm = TRUE) {
cnt <- colMeans(x, na.rm = na.rm)
x <- sweep(x, 2L, cnt, check.margin = FALSE)
cnt <- rowMeans(x, na.rm = na.rm)
sweep(x, 1L, cnt, check.margin = FALSE)
}
spatialMed <- function(vectors, group, pos) {
axes <- seq_len(NCOL(vectors))
spMedPos <- ordimedian(vectors, group, choices = axes[pos])
spMedNeg <- ordimedian(vectors, group, choices = axes[!pos])
cbind(spMedPos, spMedNeg)
}
centroidFUN <- function(vec, group) {
cent <- apply(vec, 2,
function(x, group) tapply(x, INDEX = group, FUN = mean),
group = group)
if(!is.matrix(cent)) {
cent <- matrix(cent, nrow = 1,
dimnames = list(as.character(levels(group)),
paste0("Dim", seq_len(NCOL(vec)))))
}
cent
}
Resids <- function(x, c) {
if(is.matrix(c))
d <- x - c
else
d <- sweep(x, 2, c)
rowSums(d^2)
}
TOL <- sqrt(.Machine$double.eps)
if(!inherits(d, "dist"))
stop("distances 'd' must be a 'dist' object")
if (any(d < -TOL, na.rm = TRUE))
stop("dissimilarities 'd' must be non-negative")
if (sqrt.dist)
d <- sqrt(d)
if (is.logical(add) && isTRUE(add))
add <- "lingoes"
if (is.character(add)) {
add <- match.arg(add, c("lingoes", "cailliez"))
if (add == "lingoes") {
ac <- addLingoes(as.matrix(d))
d <- sqrt(d^2 + 2 * ac)
}
else if (add == "cailliez") {
ac <- addCailliez(as.matrix(d))
d <- d + ac
}
}
if(missing(type))
type <- "median"
type <- match.arg(type)
group <- if(!is.factor(group)) {
as.factor(group)
} else {
droplevels(group, exclude = NA)
}
n <- attr(d, "Size")
x <- matrix(0, ncol = n, nrow = n)
x[row(x) > col(x)] <- d^2
labs <- attr(d, "Labels")
if(any(gr.na <- is.na(group))) {
group <- group[!gr.na]
x <- x[!gr.na, !gr.na]
n <- n - sum(gr.na)
labs <- labs[!gr.na]
message("missing observations due to 'group' removed")
}
if(any(x.na <- apply(x, 1, function(x) any(is.na(x))))) {
x <- x[!x.na, !x.na]
group <- group[!x.na]
n <- n - sum(x.na)
labs <- labs[!x.na]
message("missing observations due to 'd' removed")
}
x <- x + t(x)
x <- dblcen(x)
e <- eigen(-x/2, symmetric = TRUE)
vectors <- e$vectors
eig <- e$values
eig <- eig[(want <- abs(eig) > max(TOL, TOL * eig[1L]))]
vectors <- vectors[, want, drop = FALSE] %*% diag(sqrt(abs(eig)),
nrow = length(eig))
pos <- eig > 0
centroids <-
switch(type,
centroid = centroidFUN(vectors, group),
median = spatialMed(vectors, group, pos)
)
dist.pos <- Resids(vectors[, pos, drop=FALSE],
centroids[group, pos, drop=FALSE])
dist.neg <- 0
if(any(!pos))
dist.neg <- Resids(vectors[, !pos, drop=FALSE],
centroids[group, !pos, drop=FALSE])
if (any(dist.neg > dist.pos)) {
warning("some squared distances are negative and changed to zero")
zij <- Re(sqrt(as.complex(dist.pos - dist.neg)))
} else {
zij <- sqrt(dist.pos - dist.neg)
}
if (bias.adjust) {
n.group <- as.vector(table(group))
zij <- zij*sqrt(n.group[group]/(n.group[group]-1))
}
if (any(want))
colnames(vectors) <- names(eig) <-
paste("PCoA", seq_along(eig), sep = "")
if(is.matrix(centroids))
colnames(centroids) <- names(eig)
else
names(centroids) <- names(eig)
rownames(vectors) <- names(zij) <- labs
retval <- list(eig = eig, vectors = vectors, distances = zij,
group = group, centroids = centroids, call = match.call())
class(retval) <- "betadisper"
attr(retval, "method") <- attr(d, "method")
attr(retval, "type") <- type
attr(retval, "bias.adjust") <- bias.adjust
retval
}
|
gen_points <- function(table=2,modulo=10,r=1){
circleFun(c(0, 0), r, npoints = modulo) %>%
mutate(depart = seq_len(nrow(.)),
destination = (depart * table) %% nrow(.),
destination = case_when(destination == 0 ~ as.numeric(nrow(.)),
TRUE ~ destination)
)
}
|
dotPlot(~ prop, width = .005, xlim = c(0.05, 0.5), data = Sampledist.1000)
dotPlot(~ prop, width = .005, xlim = c(0.05, 0.5), data = Sampledist.200)
dotPlot(~ prop, width = .005, xlim = c(0.05, 0.5), data = Sampledist.50)
|
.postInit <- function(initialization,Y,Xnorm,Xpois,Xbin,n,k,start.z){
if (k==1) {
z <- matrix(rep(1,n),ncol=1)
return(z)
}
if(initialization=="mclust"){
data <- cbind(Y[,1],Xnorm,Xpois,Xbin)
z <- Mclust(data=data,modelNames=ifelse(ncol(data)==1,"V","VVV"),G=k)$z
}
if(initialization=="kmeans"){
clusters <- kmeans(x=cbind(Y,Xnorm,Xpois,Xbin),centers=k)
z <- mclust::unmap(clusters$cluster,1:k)
}
if(initialization=="random.soft"){
z <- array(runif(n*k),c(n,k))
z <- z/rowSums(z)
}
if(initialization=="random.hard"){
z <- t(rmultinom(n, size = 1, prob=rep(1/k,k)))
}
if(initialization=="manual"){
z <- start.z
}
z = (z+0.0001)/rowSums(z+0.0001)
z
}
|
.onLoad <- function(libname, pkgname) {
additive_make(modes = c("classification", "regression"))
}
|
ATev <- function(copula, w) {
rho <- copula@parameters[1]
nu <- getdf(copula)
wnu <- (w / (1 - w))^(1 / nu)
x <- (wnu - rho) / sqrt(1 - rho^2) * sqrt(nu + 1)
y <- (1 / wnu - rho) / sqrt(1 - rho^2) * sqrt(nu + 1)
A <- w * pt(x, nu + 1) + (1 - w) * pt(y, nu + 1)
ifelse(w == 0 | w == 1, 1, A)
}
dAduTev <- function(copula, w) {
rho <- copula@parameters[1]
nu <- getdf(copula)
wnu <- (w / (1 - w))^(1 / nu)
x <- (wnu - rho) / sqrt(1 - rho^2) * sqrt(nu + 1)
y <- (1 / wnu - rho) / sqrt(1 - rho^2) * sqrt(nu + 1)
dx <- eval(deriv(~ (w / (1 - w))^(1 / nu) / sqrt(1 - rho^2) * sqrt(nu + 1), "w", hessian=TRUE), list(w = w))
dxdw <- c(attr(dx, "gradient"))
d2xdw2 <- c(attr(dx, "hessian"))
dy <- eval(deriv(~ (w / (1 - w))^( - 1 / nu) / sqrt(1 - rho^2) * sqrt(nu + 1), "w", hessian=TRUE), list(w = w))
dydw <- c(attr(dy, "gradient"))
d2ydw2 <- c(attr(dy, "hessian"))
dens <- ~ gamma(0.5 * (nu + 1)) / gamma(0.5 * nu) / sqrt(nu * pi) * (1 + u^2 / nu)^(-0.5 * (nu + 1))
ddens <- deriv(dens, "u")
ddtx <- c(attr(eval(ddens, list(u = x, nu = nu + 1)), "gradient"))
ddty <- c(attr(eval(ddens, list(u = y, nu = nu + 1)), "gradient"))
der1 <- pt(x, nu + 1) + w * dt(x, nu + 1) * dxdw - pt(y, nu + 1) + (1 - w) * dt(y, nu + 1) * dydw
der2 <- dt(x, nu + 1) * dxdw +
dt(x, nu + 1) * dxdw + w * ddtx * dxdw^2 + w * dt(x, nu + 1) * d2xdw2 +
(- dt(y, nu + 1) * dydw ) +
(- dt(y, nu + 1) * dydw) + (1 - w) * ddty * dydw^2 + (1 - w) * dt(y, nu + 1) * d2ydw2
data.frame(der1 = der1, der2 = der2)
}
tevCopula <- function(param = NA_real_, df = 4, df.fixed = FALSE) {
dim <- 2L
pdim <- length(param)
parameters <- c(param, df)
param.names <- c(paste("rho", 1:pdim, sep="."), "df")
param.lowbnd <- c(rep(-1, pdim), 1e-6)
param.upbnd <- c(rep(1, pdim), Inf)
attr(parameters, "fixed") <- c(!isFreeP(param), df.fixed)
new("tevCopula",
dimension = dim,
parameters = parameters,
df.fixed = df.fixed,
param.names = param.names,
param.lowbnd = param.lowbnd,
param.upbnd = param.upbnd,
fullname = "<deprecated slot>")
}
ptevCopula <- function(u, copula) {
u1 <- u[,1]; u2 <- u[,2]
p <- (r <- uu <- u1 * u2) > 0
p <- p & (nna <- !is.na(p))
logu <- log(uu[p])
r[p ] <- exp(logu * ATev(copula, log(u2[p]) / logu))
r[!p & nna] <- 0
r
}
dtevCopula <- function(u, copula, log=FALSE, ...) {
u1 <- u[,1]; u2 <- u[,2]
C <- ptevCopula(u, copula)
logu <- log(u1 * u2)
w <- log(u2) / logu
wexpr <- ~ log(u2) / log(u1 * u2)
dw <- eval(deriv(wexpr, c("u1", "u2"), hessian=TRUE), list(u1 = u1, u2 = u2))
dwdu1 <- c(attr(dw, "gradient")[,"u1"])
dwdu2 <- c(attr(dw, "gradient")[,"u2"])
d2wdu1du2 <- c(attr(dw, "hessian")[,"u1","u2"])
A <- ATev(copula, w)
Ader <- dAduTev(copula, w)
Ader1 <- Ader$der1; Ader2 <- Ader$der2
dCdu2 <- C * (1 / u2 * A + logu * Ader1 * dwdu2)
pdf <- dCdu2 * (1 / u1 * A + logu * Ader1 * dwdu1) +
C * (1 / u1 * Ader1 * dwdu2 + 1 / u2 * Ader1 * dwdu1 +
logu * Ader2 * dwdu2 * dwdu1 + logu * Ader1 * d2wdu1du2)
if(log) log(pdf) else pdf
}
dCduSymEvCopula <- function(copula, u, ...) {
mat <- matrix(NA, nrow(u), 2)
pcop <- pCopula(u, copula)
loguv <- log(u[,1]) + log(u[,2])
w <- log(u[,2]) / loguv
a <- A(copula, w)
aDer <- dAdu(copula, w)$der1
mat[,1] <- pcop * (a / u[,1] - loguv * aDer * log(u[,2]) / (loguv)^2 / u[,1])
mat[,2] <- pcop * (a / u[,2] + loguv * aDer * log(u[,1]) / (loguv)^2 / u[,2])
mat
}
setMethod("dCdu", signature("tevCopula"), dCduSymEvCopula)
tevTauFun <- function(alpha) {
ss <- .tevTau$ss
forwardTransf <- .tevTau$trFuns$forwardTransf
valFun <- .tevTau$assoMeasFun$valFun
theta <- forwardTransf(alpha, ss)
valFun(theta)
}
tauTevCopula <- function(copula) {
alpha <- copula@parameters[1]
tevTauFun(alpha)
}
iTauTevCopula <- function(copula, tau) {
if (any(neg <- tau < 0)) {
warning("For the t-ev copula, tau must be >= 0. Replacing negative values by 0.")
tau[neg] <- 0
}
tevTauInv <- approxfun(x = .tevTau$assoMeasFun$fm$ysmth,
y = .tevTau$assoMeasFun$fm$x, rule=2)
ss <- .tevTau$ss
theta <- tevTauInv(tau)
.tevTau$trFuns$backwardTransf(theta, ss)
}
tevdTau <- function(alpha) {
ss <- .tevTau$ss
forwardTransf <- .tevTau$trFuns$forwardTransf
forwardDer <- .tevTau$trFuns$forwardDer
valFun <- .tevTau$assoMeasFun$valFun
theta <- forwardTransf(alpha, ss)
valFun(theta, 1) * forwardDer(alpha, ss)
}
dTauTevCopula <- function(copula) {
alpha <- copula@parameters[1]
tevdTau(alpha)
}
tevRhoFun <- function(alpha) {
ss <- .tevRho$ss
forwardTransf <- .tevRho$trFuns$forwardTransf
valFun <- .tevRho$assoMeasFun$valFun
theta <- forwardTransf(alpha, ss)
valFun(theta)
}
rhoTevCopula <- function(copula) {
alpha <- copula@parameters[1]
tevRhoFun(alpha)
}
iRhoTevCopula <- function(copula, rho) {
if (any(neg <- rho < 0)) {
warning("For the t-ev copula, rho must be >= 0. Replacing negative values by 0.")
rho[neg] <- 0
}
tevRhoInv <- approxfun(x = .tevRho$assoMeasFun$fm$ysmth,
y = .tevRho$assoMeasFun$fm$x, rule = 2)
ss <- .tevRho$ss
theta <- tevRhoInv(rho)
.tevRho$trFuns$backwardTransf(theta, ss)
}
tevdRho <- function(alpha) {
ss <- .tevRho$ss
forwardTransf <- .tevRho$trFuns$forwardTransf
forwardDer <- .tevRho$trFuns$forwardDer
valFun <- .tevRho$assoMeasFun$valFun
theta <- forwardTransf(alpha, ss)
valFun(theta, 1) * forwardDer(alpha, ss)
}
dRhoTevCopula <- function(copula) {
alpha <- copula@parameters[1]
tevdRho(alpha)
}
setMethod("pCopula", signature("matrix", "tevCopula"), ptevCopula)
setMethod("dCopula", signature("matrix", "tevCopula"), dtevCopula)
setMethod("A", signature("tevCopula"), ATev)
setMethod("dAdu", signature("tevCopula"), dAduTev)
setMethod("tau", signature("tevCopula"), tauTevCopula)
setMethod("rho", signature("tevCopula"), rhoTevCopula)
setMethod("iTau", signature("tevCopula"), iTauTevCopula)
setMethod("iRho", signature("tevCopula"), iRhoTevCopula)
setMethod("dTau", signature("tevCopula"), dTauTevCopula)
setMethod("dRho", signature("tevCopula"), dRhoTevCopula)
|
size(c(2,3,4,5,6))
size(10)
size(zeros(4,7))
|
"print.trackdata"<- function(x, ...)
{
if(is.null(x$trackname))
cat("trackdata from unknown track.\n")
else
cat("trackdata from track:", x$trackname,"\n")
cat("index:\n")
print(x$index, ...)
cat("ftime:\n")
print(x$ftime, ...)
cat("data:\n")
print(x$data, ...)
}
"[.trackdata" <- function (dataset, i, j, ...)
{
if (missing(i)) {
i <- 1:nrow(dataset$index)
}
ftime <- dataset$ftime[i, , drop = FALSE]
index <- dataset$index[i, , drop = FALSE]
datarows <- NULL
for (ind in 1:nrow(index)) {
datarows <- c(datarows, seq(from = index[ind, 1], to = index[ind,
2]))
}
if (is.matrix(dataset$data)) {
if (missing(j))
data <- dataset$data[datarows, , drop = FALSE]
else data <- dataset$data[datarows, j, drop = FALSE]
} else {
data <- dataset$data[datarows, drop = FALSE]
}
lval <- index[, 2] - index[, 1] + 1
right <- cumsum(lval)
left <- right + 1
left <- left[-length(left)]
left <- c(1, left)
nindex <- cbind(left, right)
dataset$index <- nindex
dataset$ftime <- ftime
dataset$data <- data
return(dataset)
}
"summary.trackdata" <- function(object, ...)
{
if( is.matrix(object$data)){
dimens <- ncol(object$data)
len <- nrow(object$data)
}
else {
dimens <- 1
len <- length(object$data)
}
cat("Emu track data from", nrow(object$index), "segments\n\n")
cat("Data is ", dimens, "dimensional from track",
object$trackname,"\n")
cat("Mean data length is ", len/nrow(object$index), " samples\n")
invisible()
}
"as.trackdata" <- function( data, index, ftime, trackname="" )
{
mat <- list( data=as.matrix(data),
index=index,
ftime=ftime,
trackname=trackname)
if( version$major >= 5 ) {
oldClass(mat) <- "trackdata"
} else {
class(mat) <- "trackdata"
}
mat
}
"is.trackdata" <- function (object)
{
return(inherits(object, "trackdata"))
}
`plot.trackdata` <- function (x, timestart = NULL, xlim = NULL,
ylim = NULL, labels = NULL, col = TRUE,
lty = FALSE, type="p", pch=NULL,
contig = TRUE, ...)
{
trackdata <- x
N <- nrow(trackdata$data)
if(is.logical(col))
{
if (col)
col <- 1:ncol(trackdata)
else
col <- rep(1, ncol(trackdata))
}
else
{
if(length(col)!=ncol(trackdata))
col <- rep(col[1], ncol(trackdata))
}
if(is.logical(lty))
{
if (lty)
lty <- 1:ncol(trackdata)
else
lty <- rep(1, ncol(trackdata))
}
else
{
if(length(lty)!=ncol(trackdata))
lty <- rep(lty[1], ncol(trackdata))
}
if(is.null(pch))
pch <- rep(1, ncol(trackdata))
else
{
if(length(pch)!=ncol(trackdata))
pch <- rep(pch[1], ncol(trackdata))
}
n <- nrow(trackdata)
if (!is.null(xlim))
labels <- NULL
if (!is.null(labels)) {
if (length(labels) != nrow(trackdata))
stop("if labels are supplied, there must be one label per segment")
label.times <- apply(trackdata$ftime, 1, mean)
boundary.times <- c(trackdata$ftime[, 1], trackdata$ftime[n])
}
if (n > 1 & contig) {
inds <- cbind(1, N)
ftime <- cbind(trackdata$ftime[1, 1], trackdata$ftime[n,
2])
trackdata <- as.trackdata(trackdata$data, inds, ftime)
}
if (!is.null(xlim)) {
if (nrow(trackdata) != 1)
stop("can't specify xlim if there's more than one segment")
}
left <- trackdata$ftime[1]
right <- trackdata$ftime[2]
times <- seq(left, right, length = nrow(trackdata$data))
if (!is.null(timestart)) {
times <- times - left + timestart
if (!is.null(labels)) {
label.times <- label.times - left + timestart
boundary.times <- boundary.times - left + timestart
}
}
data <- trackdata$data
if (nrow(trackdata) == 1) {
if (is.null(xlim))
xlim <- range(times)
if (is.null(ylim))
ylim <- range(data)
for (k in 1:ncol(data)) {
if(k==ncol(data))
graphics::plot(times, data[, k], xlim = xlim, ylim = ylim,
col = col[k], lty = lty[k], pch=pch[k], type=type, ...)
else
graphics::plot(times, data[, k], xlim = xlim, ylim = ylim,
col = col[k], lty = lty[k], pch=pch[k], xlab="", ylab="", main="", axes=FALSE, bty="n", type=type)
graphics::par(new = TRUE)
}
graphics::par(new = FALSE)
if (!is.null(labels)) {
if (length(boundary.times) > 2)
graphics::abline(v = boundary.times)
graphics::mtext(labels, at = label.times)
}
}
else {
if (is.null(labels))
labels <- rep("", nrow(trackdata))
for (j in 1:nrow(trackdata)) {
graphics::plot(trackdata[j, ], timestart = timestart, xlim = xlim,
ylim = ylim, labels = labels[j], col=col, lty=lty, type=type, pch=pch,contig = TRUE, ...)
}
}
}
"bark.trackdata" <- function(f, ...)
{
trackdata = f
if(is.spectral(trackdata$data))
return(bark.spectral(trackdata))
else
{
trackdata$data <- bark(trackdata$data)
return(trackdata)
}
}
"mel.trackdata" <- function(a)
{
trackdata = a
if(is.spectral(trackdata$data))
return(mel.spectral(trackdata))
else
{
trackdata$data <- mel(trackdata$data)
return(trackdata)
}
}
"trackfreq" <- function(specdata){
if(is.trackdata(specdata))
return(attr(specdata$data, "fs"))
else
return(attr(specdata, "fs"))
}
"get.trackkeywrd" <- function (fname)
{
line <- readLines(fname, n = 2)
if (length(line) < 2) {
return(NULL)
}
line <- splitstring(line[2], " ")
if ((length(line) == 3) && (line[2] == "Trackname")) {
trackname <- line[3]
}
else {
return(NULL)
}
if (trackname != "") {
return(trackname)
}
else {
return(NULL)
}
}
"dur.trackdata" <- function (x)
{
x$ftime[,2] - x$ftime[,1]
}
"frames" <- function(trackdata)
{
if(!(is.trackdata(trackdata)))
stop ("Object must be of class trackdata")
trackdata$data
}
|
rand.test <-
function(set1, set2, sims=1000, crit=.95, graph=TRUE, seed=2) {
set1 <- data.frame(set1)
set2 <- data.frame(set2)
samp.distr=c()
samp.distsig=c()
complete = complete.cases(cbind(set1,set2))
set1.set = subset(set1, subset=complete)
set2.set = subset(set2, subset=complete)
n = nrow(set1.set)
critT = qt(.025, n-2, lower.tail=FALSE)
critr = sqrt( critT^2 / (critT^2 + n - 2) )
AbsRObs = mean(abs(cor(set1.set, set2.set)))
SigObs = sum(abs(cor(set1.set, set2.set)) >= critr)
if(seed!=F) {set.seed(seed)}
for (i in 1:sims) {
rand.order = sample(n, n, replace=FALSE)
cor.mat = cor(set1.set[rand.order,],set2.set)
samp.distr[i] = mean(abs(cor.mat))
samp.distsig[i] = sum(abs(cor.mat) >= critr)
}
SimMeanR = mean(samp.distr)
SimSDr = sd(samp.distr)
Crit95r = quantile(samp.distr,crit)
pr = sum(samp.distr >= AbsRObs) / sims
pr.me <- sqrt(pr * (1-pr) / sims) * qnorm(.9995)
SimMeanSig = mean(samp.distsig)
SimSDsig = sd(samp.distsig)
Crit95Sig = quantile(samp.distsig,crit)
pSig = sum(samp.distsig >= SigObs) / sims
pSig.me <- sqrt(pSig * (1-pSig) / sims) * qnorm(.9995)
if(pr + pr.me > 1.00 | pr - pr.me < .00) {warning("Confidence intervals for p-values may be inaccurate. Try a larger number of sims.")}
if(pSig + pSig.me > 1.00 | pSig - pSig.me < .00) {warning("Confidence intervals for p-values may be inaccurate. Try a larger number of sims.")}
if(pr == .00 | pSig == .00) {warning("When p=.00, confidence interval for p not valid.")}
out.AbsR = round(rbind(n, AbsRObs, SimMeanR, SimSDr, pr, pr + pr.me, pr - pr.me, Crit95r),4)
colnames(out.AbsR) = c("Average Absolute r")
rownames(out.AbsR) = c("N", "Observed", "Exp. By Chance", "Standard Error", "p", "99.9% Upperbound p", "99.9% Lowerbound p", "95th %")
out.Sig = round(rbind(n, SigObs, SimMeanSig, SimSDsig, pSig, pSig + pSig.me, pSig - pSig.me, Crit95Sig),4)
colnames(out.Sig) = c("Number Significant")
rownames(out.Sig) = c("N", "Observed", "Exp. By Chance", "Standard Error", "p", "99.9% Upperbound p", "99.9% Lowerbound p", "95th %")
results <- list("AbsR"=out.AbsR, "Sig"=out.Sig)
if (graph == TRUE) {
old.par = par(mfrow=c(2,1))
hist(samp.distr, freq=TRUE, col="cyan",
main="Approximate Sampling Distribution \n For Average Absolute r",
xlab = "Average Absolute r", ylab="Frequency",
xlim= range(min(samp.distr)-.01,AbsRObs+.01) )
abline (v=(Crit95r), col="red")
points(AbsRObs,0, col="red", pch=19)
hist(samp.distsig, freq=TRUE, col="cyan",
main="Approximate Sampling Distribution \n For Number Significant",
xlab = "Number Statistically Significant", ylab="Frequency",
xlim= range(min(samp.distsig)-1,(SigObs+1)))
abline (v=(Crit95Sig), col="red")
points(SigObs,0, col="red", pch=19)
}
return(results)
}
|
summary.lmvar <- function(object, mu = TRUE, sigma = TRUE, ...){
beta_mu = numeric()
beta_sigma = numeric()
if (mu){
beta_mu = coef.lmvar( object, sigma = FALSE)
}
if (sigma){
beta_sigma = coef.lmvar( object, mu = FALSE)
}
beta_sigma_names = beta_sigma_names( names(beta_mu), names(beta_sigma))
beta_names = c( names(beta_mu), beta_sigma_names)
variances_mu = numeric()
variances_sigma = numeric()
if (mu){
M = vcov.lmvar( object, sigma = FALSE)
variances_mu = Matrix::diag(M)
}
if (sigma){
M = vcov.lmvar( object, mu = FALSE)
variances_sigma = Matrix::diag(M)
}
variances = c( variances_mu, variances_sigma)
sterr = sqrt(variances)
z_values = c(beta_mu, beta_sigma) / sterr
pr_z = 2 * stats::pnorm( abs(z_values), lower.tail = FALSE)
estimate = c( beta_mu, beta_sigma)
coeff = cbind( estimate, sterr, z_values, pr_z)
colnames(coeff) = c( "Estimate", "Std. Error", "z value", "Pr(>|z|)")
rownames(coeff) = beta_names
sigma_fit = fitted.lmvar( object, mu=FALSE)
res = residuals.lmvar(object) / sigma_fit
res = stats::quantile( res, c( 0, 0.25, 0.5, 0.75, 1))
names(res) = c( "Min", "1Q", "Median", "3Q", "Max")
sigma_fit = stats::quantile( sigma_fit, c( 0, 0.25, 0.5, 0.75, 1))
names(sigma_fit) = c( "Min", "1Q", "Median", "3Q", "Max")
if (object$intercept_sigma){
df_additional = dfree( object, mu = FALSE) - 1
}
else {
df_additional = NULL
}
if (is.null(df_additional)){
p_value = NULL
}
else {
p_value = stats::pchisq(2 * (object$logLik - object$logLik_lm), df_additional, lower.tail = FALSE)
}
rlist = list( call = object$call,
residuals = res,
coefficients = coeff,
sigma = sigma_fit,
aliased_mu = object$aliased_mu,
aliased_sigma = object$aliased_sigma,
logLik_ratio = object$logLik - object$logLik_lm,
df_additional = df_additional,
p_value = p_value,
nobs = nobs(object),
df = dfree(object),
options = list(mu = mu, sigma = sigma))
class(rlist) = "summary_lmvar"
return(rlist)
}
|
predict <- function(
.object = NULL,
.benchmark = c("lm", "unit", "PLS-PM", "GSCA", "PCA", "MAXVAR"),
.cv_folds = 10,
.handle_inadmissibles = c("stop", "ignore", "set_NA"),
.r = 10,
.test_data = NULL
) {
.benchmark <- match.arg(.benchmark)
.handle_inadmissibles <- match.arg(.handle_inadmissibles)
if(inherits(.object, "cSEMResults_multi")) {
out <- lapply(.object, predict,
.benchmark = .benchmark,
.cv_folds = .cv_folds,
.handle_inadmissibles = .handle_inadmissibles,
.r = .r,
.test_data = .test_data
)
class(out) <- c("cSEMPredict", "cSEMPredict_multi")
return(out)
} else {
if(inherits(.object, "cSEMResults_2ndorder")) {
stop2('Currently, `predict()` is not implemented for models containing higher-order constructs.')
}
if(all(.object$Information$Model$structural == 0)) {
stop2("`predict()` requires a structural model.")
}
if(.object$Information$Model$model_type != 'Linear'){
stop2('Currently, `predict()` works only for linear models.')
}
if(.object$Information$Type_of_indicator_correlation != 'Pearson'){
stop2('Currently, `predict()` works only in combination with Pearson correlation.')
}
args <- .object$Information$Arguments
indicators <- colnames(.object$Information$Data)
if(.benchmark == args$.approach_weights) {
warning2(
"The following warning occured in the `predict()` function:\n",
"Original estimation is based on the same approach as the benchmark approach.",
" Target and benchmark predicitons are identical."
)
}
if(args$.disattenuate & .benchmark %in% c("unit", "GSCA", "MAXVAR") &
any(.object$Information$Model$construct_type == "Composite")) {
args$.disattenuate <- FALSE
warning2(
"The following warning occured in the `predict()` function:\n",
"Disattenuation only applicable if all constructs are modeled as common factors.",
" Results based on benchmark = `", .benchmark, "` are not disattenuated."
)
}
if(!is.null(.test_data)) {
if(!any(class(.test_data) %in% c("data.frame", "matrix"))) {
stop2("The following error occured in the `predict()` function:\n",
".test_data must be a matrix or a data frame.")
}
.r <- 1
.cv_folds <- NA
dat_train <- args$.data[, indicators]
.test_data = as.matrix(.test_data)
rownames(.test_data) <- 1:nrow(.test_data)
if(is.null(colnames(.test_data))) {
stop2(
"The following error occured in the `predict()` function:\n",
"The test data does not have column names that match the training data. "
)
}
dat_test <- .test_data[, indicators]
dat <- list("test" = dat_test, "train" = dat_train)
}
out_all <- list()
for(i in 1:.r) {
if(is.null(.test_data)) {
dat <- resampleData(
.object = .object,
.resample_method = "cross-validation",
.cv_folds = .cv_folds,
.R = 1,
.seed = NULL
)[[1]]
dat <- lapply(dat, processData, .model = .object$Information$Model)
ii <- length(dat)
} else {
ii <- 1
}
out_cv <- list()
for(j in 1:ii) {
X_train <- as.matrix(do.call(rbind, dat[-j]))[, indicators]
X_test <- dat[[j]][, indicators]
mean_train <- colMeans(X_train)
sd_train <- matrixStats::colSds(as.matrix(X_train))
names(sd_train) <- names(mean_train)
X_test_scaled <- sapply(1:ncol(X_test), function(x){
(X_test[, x] - mean_train[x]) / sd_train[x]
})
colnames(X_test_scaled) <- colnames(X_test)
rownames(X_test_scaled) <- rownames(X_test)
args$.data <- X_train
args_target <- args_benchmark <- args
if(.benchmark %in% c("unit", "PLS-PM", "GSCA", "PCA", "MAXVAR")) {
args_benchmark$.approach_weights <- .benchmark
kk <- 2
} else {
kk <- 1
}
args_list <- list(args_target, args_benchmark)
results <- list()
for(k in 1:kk) {
Est <- do.call(foreman, args_list[[k]])
cons_exo <- Est$Information$Model$cons_exo
cons_endo <- Est$Information$Model$cons_endo
endo_indicators <- colnames(Est$Information$Model$measurement)[colSums(Est$Information$Model$measurement[cons_endo, , drop = FALSE]) != 0]
exo_indicators <- colnames(Est$Information$Model$measurement)[colSums(Est$Information$Model$measurement[cons_exo, , drop = FALSE]) != 0]
W_train <- Est$Estimates$Weight_estimates
loadings_train <- Est$Estimates$Loading_estimates
path_train <- Est$Estimates$Path_estimates
B_train <- path_train[cons_endo, cons_endo, drop = FALSE]
Gamma_train <- path_train[cons_endo, cons_exo, drop = FALSE]
status_code <- sum(unlist(verify(Est)))
if(status_code == 0 | (status_code != 0 & .handle_inadmissibles == "ignore")) {
eta_hat_exo <- X_test_scaled %*% t(W_train[cons_exo, ,drop = FALSE])
eta_hat_endo <- eta_hat_exo %*% t(Gamma_train) %*% t(solve(diag(nrow(B_train)) - B_train))
X_hat <- eta_hat_endo %*% loadings_train[cons_endo, , drop = FALSE]
X_hat_rescaled <- sapply(colnames(X_hat), function(x) {
mean_train[x] + X_hat[, x] * sd_train[x]
})
X_hat_rescaled <- X_hat_rescaled[, endo_indicators]
residuals_target <- X_test[, endo_indicators] - X_hat_rescaled[, endo_indicators]
} else if(status_code != 0 & .handle_inadmissibles == "set_NA"){
X_hat_rescaled <- residuals_target <- X_test[, endo_indicators]
X_hat_rescaled[] <- NA
residuals_target[] <- NA
} else {
stop2("Estimation based on one of the cross-validation folds yielded an inadmissible results.\n",
" Consider setting handle_inadmissibles = 'ignore'.")
}
results[[k]] <- list(X_hat_rescaled, residuals_target)
}
if(.benchmark %in% c("unit", "PLS-PM", "GSCA", "PCA", "MAXVAR")) {
predictions_benchmark <- results[[2]][[1]]
residuals_benchmark <- results[[2]][[2]]
} else if(.benchmark == "lm") {
beta_exo <- solve(t(X_train[, exo_indicators]) %*%
X_train[, exo_indicators]) %*%
t(X_train[, exo_indicators]) %*% X_train[, endo_indicators, drop = FALSE]
predictions_benchmark <- as.matrix(X_test[, exo_indicators]) %*% beta_exo
residuals_benchmark <- X_test[, endo_indicators] - predictions_benchmark
}
residuals_mb <- t(t(X_test[, endo_indicators]) - mean_train[endo_indicators])
out_cv[[j]] <- list(
"Predictions_target" = results[[1]][[1]],
"Residuals_target" = results[[1]][[2]],
"Predictions_benchmark" = predictions_benchmark,
"Residuals_benchmark" = residuals_benchmark,
"Residuals_mb" = residuals_mb
)
}
out_temp <- lapply(purrr::transpose(out_cv), function(x) {
x <- do.call(rbind, x)
x <- x[order(as.numeric(rownames(x))), ]
x
})
out_all[[i]] <- out_temp
}
out_temp <- lapply(purrr::transpose(out_all), function(x) {
a <- apply(abind::abind(x, along = 3), 1:2, function(y) sum(y, na.rm = TRUE))
b <- Reduce("+", lapply(x, function(y) !is.na(y)))
a / b
})
mae_target <- apply(out_temp$Residuals_target, 2, function(x) mean(abs(x - mean(x))))
mae_benchmark <- apply(out_temp$Residuals_benchmark, 2, function(x) mean(abs(x - mean(x))))
rmse_target <- apply(out_temp$Residuals_target, 2, function(x) sqrt(mean((x - mean(x))^2)))
rmse_benchmark<- apply(out_temp$Residuals_benchmark, 2, function(x) sqrt(mean((x - mean(x))^2)))
q2_predict <- c()
for(i in colnames(out_temp$Residuals_target)) {
q2_predict[i] <- 1- sum((out_temp$Residuals_target[, i] - mean(out_temp$Residuals_target[, i]))^2) /
sum((out_temp$Residuals_mb[, i] - mean(out_temp$Residuals_mb[, i]))^2)
}
df_metrics <- data.frame(
"Name" = endo_indicators,
"MAE_target" = mae_target,
"MAE_benchmark" = mae_benchmark,
"RMSE_target" = rmse_target,
"RMSE_benchmark" = rmse_benchmark,
"Q2_predict" = q2_predict,
stringsAsFactors = FALSE
)
rownames(df_metrics) <- NULL
out <- list(
"Actual" = if(is.null(.test_data)) {
.object$Information$Arguments$.data[, endo_indicators]
} else {
.test_data[, endo_indicators]
},
"Predictions_target" = out_temp$Predictions_target,
"Residuals_target" = out_temp$Residuals_target,
"Residuals_benchmark" = out_temp$Residuals_benchmark,
"Prediction_metrics" = df_metrics,
"Information" = list(
"Target" = .object$Information$Arguments$.approach_weights,
"Benchmark" = .benchmark,
"Handle_inadmissibles" = .handle_inadmissibles,
"Number_of_observations_training" = nrow(X_train),
"Number_of_observations_test" = nrow(X_test),
"Number_of_folds" = .cv_folds,
"Number_of_repetitions" = .r
)
)
class(out) <- "cSEMPredict"
out
}
}
|
acontext("params")
df <- data.frame(z=rnorm(100))
viz <-
list(step=ggplot()+
geom_step(aes(seq_along(z), z),
data=df,
size=3,
color="grey50"))
pattern <-
paste0("(?<name>\\S+?)",
": *",
"(?<value>.+?)",
";")
test_that("transparent does not convert", {
expect_identical(toRGB("transparent"), "transparent")
})
test_that("NA converts to transparent", {
expect_identical(toRGB(NA), "transparent")
})
test_that("grey50 converts", {
expect_identical(toRGB("grey50"), "
})
test_that("color is converted to RGB colour", {
info <- animint2HTML(viz)
expect_equal(length(info$geoms), 1)
g <- info$geoms[[1]]
expected.colour <- as.character(toRGB("grey50"))
expect_identical(g$params$colour, expected.colour)
node.list <- getNodeSet(info$html, '//g[@class="geom1_step_step"]//path')
expect_equal(length(node.list), 1)
node <- node.list[[1]]
attr.vec <- xmlAttrs(node)
style.str <- attr.vec[["style"]]
style.mat <- str_match_all_perl(style.str, pattern)[[1]]
style.vec <- style.mat[, "value"]
expect_identical(style.vec[["fill"]], "none")
expect_match(style.vec[["stroke-width"]], "3")
stroke <- style.vec[["stroke"]]
if(grepl("rgb", stroke)){
expected.regex <- paste(col2rgb(expected.colour), collapse=", *")
expect_match(stroke, expected.regex)
}else{
expect_identical(toupper(stroke), toupper(expected.colour))
}
})
|
GetLogLik = function(fpcaObj, K, Ly = NULL, Lt = NULL){
if(fpcaObj$optns$lean == TRUE && (is.null(Ly) || is.null(Lt))){
stop("Option lean is TRUE, need input data Ly and measurement time list Lt to calculate log-likelihood.")
}
if(fpcaObj$optns$lean == FALSE){
Ly <- fpcaObj$inputData$Ly
Lt <- fpcaObj$inputData$Lt
}
lambda = fpcaObj$lambda[1:K]
sigma2 = fpcaObj$sigma2
if(is.null(sigma2) && fpcaObj$optns$dataType == "Dense"){
ymat = matrix(unlist(Ly),nrow=length(Ly), byrow=TRUE)
sddiag = sqrt(diag(var(ymat)))
sigma2 = sddiag*1e-4
sigma2 = ConvertSupport(fromGrid = fpcaObj$obsGrid, toGrid = fpcaObj$workGrid, mu = sigma2)
}
logLik = 0
phi = fpcaObj$phi[,1:K, drop=FALSE]
if(fpcaObj$optns$dataType %in% c('Dense'
)){
if(K == 1){
Sigma_y = phi %*% (lambda*diag(K)) %*% t(phi) + sigma2*diag(rep(1,nrow(phi)))
} else {
Sigma_y = phi %*% diag(lambda, length(lambda)) %*% t(phi) + sigma2*diag(rep(1,nrow(phi)))
}
detSigma_y = prod(c(lambda,rep(0,nrow(phi)-K))[1:length(lambda)]+sigma2)
if(detSigma_y == 0){
logLik = NULL
return(logLik)
}
ymatcenter = matrix(unlist(Ly)-fpcaObj$mu, nrow = length(Ly), byrow = TRUE)
svd_Sigma_y = svd(Sigma_y)
Sigma_y_inv = svd_Sigma_y$v %*% diag(1/svd_Sigma_y$d, length(svd_Sigma_y$d)) %*% t(svd_Sigma_y$u)
logLik = sum(diag(t(Sigma_y_inv %*% t(ymatcenter)) %*% t(ymatcenter))) + length(Ly)*log(detSigma_y)
return(logLik)
} else {
if(is.null(sigma2)){ sigma2 <- fpcaObj$rho }
if(fpcaObj$optns$error == TRUE && sigma2 <= fpcaObj$rho){
sigma2 <- fpcaObj$rho
}
for(i in 1:length(Ly)){
if(length(Lt[[i]]) == 1){
phi_i = t(as.matrix(ConvertSupport(fromGrid = fpcaObj$workGrid, toGrid = Lt[[i]],
phi = phi)))
} else {
phi_i = ConvertSupport(fromGrid = fpcaObj$workGrid, toGrid = Lt[[i]],
phi = phi)
}
mu_i = ConvertSupport(fromGrid = fpcaObj$workGrid, toGrid = Lt[[i]],
mu = fpcaObj$mu)
if(K == 1){
Sigma_yi = phi_i %*% (lambda*diag(K)) %*% t(phi_i) + sigma2 * diag(rep(1,length(mu_i)))
} else{
Sigma_yi = phi_i %*% diag(lambda, length(lambda)) %*% t(phi_i) + sigma2 * diag(rep(1,length(mu_i)))
}
detSigma_yi = det(Sigma_yi)
if(detSigma_yi == 0){
logLik = NULL
return(logLik)
}
invtempi = solve(Sigma_yi, Ly[[i]] - mu_i)
logLik = logLik + log(detSigma_yi) + invtempi %*% (Ly[[i]] - mu_i)
}
return(logLik)
}
}
|
test1 <- function(x, y){
x + y
}
test2 <- function(x, y){
x * y
}
test3 <- function(x, y){
x - y
}
|
.defaults <- list(niter = 1000,
standardize.data = TRUE,
prior.level.sd = 0.01,
nseasons = 1,
season.duration = 1,
dynamic.regression = FALSE,
max.flips = -1)
FormatInputData <- function(data) {
assert_that(is.zoo(data) || is.data.frame(data) ||
((is.vector(data) || is.matrix(data)) && is.numeric(data)))
if (is.data.frame(data) && tolower(names(data)[1]) %in% c("date", "time")) {
if (class(data$date) == "Date") {
data <- zoo(data[, -1], data$date)
} else {
warning(paste0("Did you mean: data = zoo(data[, -1], data$",
names(data)[1], ")"))
}
}
data <- TryStop(as.zoo(data), "could not convert input data to zoo object")
assert_that(is.numeric(data))
if (is.null(ncol(data))) {
dim(data) <- c(length(data), 1)
}
assert_that(nrow(data) > 3)
if (ncol(data) >= 2) {
assert_that(!anyNA(data[, -1]), msg = "covariates must not have NA values")
}
if (is.integer(data)) {
data.matrix <- coredata(data)
coredata(data) <- matrix(as.numeric(data.matrix), nrow = nrow(data.matrix),
ncol = ncol(data.matrix),
dimnames = dimnames(data.matrix))
}
return(data)
}
FormatInputPrePostPeriod <- function(pre.period, post.period, data) {
assert_that(!is.null(pre.period))
assert_that(!is.null(post.period))
assert_that(length(pre.period) == 2, length(post.period) == 2)
assert_that(!anyNA(pre.period), !anyNA(post.period))
assert_that(isTRUE(all.equal(class(time(data)), class(pre.period))) ||
(is.numeric(time(data)) && is.numeric(pre.period)),
msg = paste0("pre.period (", class(pre.period)[1], ") ",
"must have the same class as the time points in ",
"the data (", class(time(data))[1], ")"))
assert_that(isTRUE(all.equal(class(time(data)), class(post.period))) ||
(is.numeric(time(data)) && is.numeric(post.period)),
msg = paste0("post.period (", class(post.period)[1], ") ",
"must have the same class as the time points in ",
"the data (", class(time(data))[1], ")"))
if (pre.period[1] < start(data)) {
warning(paste0("Setting pre.period[1] to start of data: ", start(data)))
}
if (pre.period[2] > end(data)) {
warning(paste0("Setting pre.period[2] to end of data: ", end(data)))
}
if (post.period[2] > end(data)) {
warning(paste0("Setting post.period[2] to end of data: ", end(data)))
}
period.indices <- list(
pre.period = GetPeriodIndices(pre.period, time(data)),
post.period = GetPeriodIndices(post.period, time(data)))
assert_that(diff(period.indices$pre.period) >= 2,
msg = "pre.period must span at least 3 time points")
assert_that(period.indices$post.period[1] > period.indices$pre.period[2])
return(period.indices)
}
FormatInputForCausalImpact <- function(data, pre.period, post.period,
model.args, bsts.model,
post.period.response, alpha) {
assert_that(
xor(!is.null(data) && !is.null(pre.period) && !is.null(post.period) &&
is.null(bsts.model) && is.null(post.period.response),
is.null(data) && is.null(pre.period) && is.null(post.period) &&
!is.null(bsts.model) && !is.null(post.period.response)),
msg = paste0("must either provide data, pre.period, post.period, ",
"model.args; or bsts.model and post.period.response"))
if (!is.null(data)) {
data <- FormatInputData(data)
}
if (!is.null(data)) {
checked <- FormatInputPrePostPeriod(pre.period, post.period, data)
pre.period <- checked$pre.period
post.period <- checked$post.period
}
model.args <- ParseArguments(model.args, .defaults)
assert_that(is.scalar(model.args$standardize.data))
assert_that(is.logical(model.args$standardize.data))
assert_that(!is.na(model.args$standardize.data))
if (!is.null(bsts.model)) {
assert_that(class(bsts.model) == "bsts")
}
if (!is.null(bsts.model)) {
assert_that(!is.null(post.period.response),
is.vector(post.period.response),
is.numeric(post.period.response))
}
assert_that(is.numeric(alpha))
assert_that(is.scalar(alpha))
assert_that(!is.na(alpha))
assert_that(alpha > 0, alpha < 1)
return(list(data = data, pre.period = pre.period, post.period = post.period,
model.args = model.args, bsts.model = bsts.model,
post.period.response = post.period.response, alpha = alpha))
}
CausalImpact <- function(data = NULL,
pre.period = NULL,
post.period = NULL,
model.args = NULL,
bsts.model = NULL,
post.period.response = NULL,
alpha = 0.05) {
checked <- FormatInputForCausalImpact(data, pre.period, post.period,
model.args, bsts.model,
post.period.response, alpha)
data <- checked$data
pre.period <- checked$pre.period
post.period <- checked$post.period
model.args <- checked$model.args
bsts.model <- checked$bsts.model
post.period.response <- checked$post.period.response
alpha <- checked$alpha
if (!is.null(data)) {
impact <- RunWithData(data, pre.period, post.period, model.args, alpha)
times <- time(data)
impact$model$pre.period <- times[pre.period]
impact$model$post.period <- times[post.period]
} else {
impact <- RunWithBstsModel(bsts.model, post.period.response, alpha)
}
return(impact)
}
RunWithData <- function(data, pre.period, post.period, model.args, alpha) {
times <- time(data)
time(data) <- seq_len(nrow(data))
pre.period[1] <- max(pre.period[1], which.max(!is.na(data[, 1])))
data.modeling <- window(data, start = pre.period[1])
times.modeling <- window(times, start = pre.period[1])
if (is.null(ncol(data.modeling))) {
dim(data.modeling) <- c(length(data.modeling), 1)
}
UnStandardize <- identity
if (model.args$standardize.data) {
fit.range <- c(1, diff(pre.period) + 1)
sd.results <- StandardizeAllVariables(data.modeling, fit.range)
data.modeling <- sd.results$data
UnStandardize <- sd.results$UnStandardize
}
window(data.modeling[, 1], start = pre.period[2] + 1) <- NA
bsts.model <- ConstructModel(data.modeling, model.args)
if (!is.null(bsts.model)) {
y.cf <- window(data[, 1], start = pre.period[2] + 1)
inferences <- CompilePosteriorInferences(bsts.model, y.cf,
post.period - pre.period[1] + 1,
alpha, UnStandardize)
} else {
inferences <- CompileNaInferences(data[, 1])
}
time(inferences$series) <- times.modeling
empty <- zoo(, times)
inferences$series <- merge(inferences$series, empty, all = TRUE)
assert_that(nrow(inferences$series) == nrow(data))
inferences$series[, 1] <- data[, 1]
names(inferences$series)[1] <- "response"
names(inferences$series)[2] <- "cum.response"
model <- list(pre.period = times[pre.period],
post.period = times[post.period],
model.args = model.args,
bsts.model = bsts.model,
alpha = alpha)
impact <- list(series = inferences$series,
summary = inferences$summary,
report = inferences$report,
model = model)
class(impact) <- "CausalImpact"
return(impact)
}
RunWithBstsModel <- function(bsts.model, post.period.response, alpha = 0.05) {
y <- as.vector(bsts.model$original.series)
indices <- TryStop(InferPeriodIndicesFromData(y),
paste0("bsts.model must have been fitted on data where ",
"the values in the post-intervention period have ",
"been set to NA"))
if (is.integer(time(bsts.model$original.series))) {
indices <- lapply(indices, as.integer)
}
inferences <- CompilePosteriorInferences(bsts.model = bsts.model,
y.cf = post.period.response,
post.period = indices$post.period,
alpha = alpha)
names(inferences$series)[1] <- "response"
names(inferences$series)[2] <- "cum.response"
times <- time(bsts.model$original.series)
model <- list(pre.period = times[indices$pre.period],
post.period = times[indices$post.period],
bsts.model = bsts.model,
alpha = alpha)
impact <- list(series = inferences$series,
summary = inferences$summary,
report = inferences$report,
model = model)
class(impact) <- "CausalImpact"
return(impact)
}
PrintSummary <- function(impact, digits = 2L) {
assert_that(class(impact) == "CausalImpact")
assert_that(is.numeric(digits), is.scalar(digits), as.integer(digits) > 0,
msg = "<digits> must be a positive integer")
summary <- impact$summary
alpha <- impact$model$alpha
assert_that(!is.null(alpha) && alpha > 0,
msg = "invalid <alpha>; <impact> must be a CausalImpact object")
cat("Posterior inference {CausalImpact}\n")
if (is.null(summary)) {
cat("(Inference aborted)\n")
return()
}
FormatNumber <- function(x) format(x, digits = digits, trim = TRUE)
FormatPercent <- function(x) {
paste0(format(x * 100, digits = digits, trim = TRUE), "%")
}
FormatCI <- function(a, b) {
paste0("[", format(a, digits = min(digits, 2), trim = TRUE),
", ", format(b, digits = min(digits, 2), trim = TRUE),
"]")
}
FormatPercentCI <- function(a, b) {
paste0("[", format(a * 100, digits = min(digits, 2), trim = TRUE),
"%, ", format(b * 100, digits = min(digits, 2), trim = TRUE),
"%]")
}
fsummary <- data.frame(
Actual = FormatNumber(summary$Actual),
Pred = paste0(FormatNumber(summary$Pred),
" (", FormatNumber(summary$Pred.sd), ")"),
Pred.ci = FormatCI(summary$Pred.lower, summary$Pred.upper),
Separator1 = c("", ""),
AbsEffect = paste0(FormatNumber(summary$AbsEffect),
" (", FormatNumber(summary$AbsEffect.sd), ")"),
AbsEffect.ci = FormatCI(summary$AbsEffect.lower, summary$AbsEffect.upper),
Separator2 = c("", ""),
RelEffect = paste0(FormatPercent(summary$RelEffect),
" (", FormatPercent(summary$RelEffect.sd), ")"),
RelEffect.ci = FormatPercentCI(summary$RelEffect.lower,
summary$RelEffect.upper))
tsummary <- t(fsummary)
colnames(tsummary) <- c("Average", "Cumulative")
ci.label <- paste0(round((1 - alpha) * 100), "% CI")
row.names(tsummary) <- c("Actual", "Prediction (s.d.)", ci.label,
" ",
"Absolute effect (s.d.)", paste(ci.label, ""),
" ",
"Relative effect (s.d.)", paste(ci.label, " "))
cat("\n")
print.default(tsummary, print.gap = 3L, quote = FALSE)
cat("\n")
p <- summary$p[1]
cat(paste0("Posterior tail-area probability p: ", round(p, 5), "\n"))
cat(paste0("Posterior prob. of a causal effect: ",
round((1 - p) * 100, ifelse(p < 0.01, 5, ifelse(p < 0.05, 3, 0))),
"%\n"))
cat("\n")
cat(paste0("For more details, type: summary(impact, \"report\")\n"))
cat("\n")
}
PrintReport <- function(impact, digits = 2L) {
assert_that(class(impact) == "CausalImpact")
cat("Analysis report {CausalImpact}\n")
if (is.null(impact$report)) {
cat("(Report empty)")
} else {
cat(paste(InterpretSummaryTable(impact$summary, digits), collapse = " "),
"\n")
}
}
.summary.CausalImpact <- function(impact,
output = c("summary", "report"),
...) {
output <- tolower(match.arg(output))
if (output == "summary") {
PrintSummary(impact, ...)
} else if (output == "report") {
PrintReport(impact, ...)
}
}
summary.CausalImpact <- function(object, ...) {
.summary.CausalImpact(object, ...)
}
print.CausalImpact <- function(x, ...) {
.summary.CausalImpact(x, ...)
}
as.CausalImpact <- function(x, ...) {
UseMethod("as.CausalImpact")
}
as.CausalImpact.default <- function(x, ...) {
stop("No method available to coerce an object of class ", class(x)[1],
" to CausalImpact")
}
|
context("api_query")
test_that("api_query works", {
skip_on_cran()
expect_is(api_query(api("http://api.plos.org/search")), "req")
aa <- api("http://api.plos.org/search") %>%
api_query(q = ecology, wt = json, fl = 'id,journal') %>%
peep
bb <- api("http://api.plos.org/search") %>%
api_query(q = ecology, wt = json, fl = id, fl = journal) %>%
peep
cc <- api("http://api.plos.org/search") %>%
api_query_(q = "ecology", wt = "json", fl = 'id', fl = 'journal') %>%
peep
expect_is(aa, "req")
expect_is(bb, "req")
expect_is(cc, "req")
expect_is(aa$url, "url")
expect_is(bb$query, "list")
expect_is(aa %>% http, "list")
expect_is(bb %>% http, "list")
expect_is(cc %>% http, "list")
expect_identical(cc %>% http,
api("http://api.plos.org/search") %>%
api_query_(q = "ecology", wt = "json", fl = 'id', fl = 'journal')
)
})
test_that("api_query fails well", {
skip_on_cran()
xx <- api("http://api.plos.org/search")
expect_error(api_query(), "argument \".data\" is missing")
})
|
precintcon.H <- function(beta, gamm, pzero, x) {
if (x > 0)
return(pzero + (1 - pzero) * pgamma(x/beta, gamm))
else
return(pzero)
}
|
setaggre <- function(x, values = NULL, by = NULL, breaks = NULL) {
all_names_present(x, c(values, by))
if (!length(by) && length(values)) by <- setdiff(names(x), values)
if (length(by) && !length(values)) values <- setdiff(names(x), by)
if (!inherits(x, "aggre")) {
cl <- class(x)
wh <- which(cl %in% c("data.table", "data.frame"))
wh <- min(wh)
cl <- c(cl[0:(wh-1)], "aggre", cl[wh:length(cl)])
setattr(x, "class", cl)
}
setattr(x, "aggre.meta", list(values = values, by = by, breaks = breaks))
setattr(x, "breaks", breaks)
invisible(x)
}
as.aggre <- function(x, values = NULL, by = NULL, breaks = NULL, ...) {
UseMethod("as.aggre", x)
}
as.aggre.data.frame <- function(x, values = NULL, by = NULL, breaks = NULL, ...) {
x <- copy(x)
setaggre(x, values = values, by = by, breaks = breaks, ...)
setattr(x, "class", c("aggre", "data.frame"))
x[]
}
as.aggre.data.table <- function(x, values = NULL, by = NULL, breaks = NULL, ...) {
x <- copy(x)
setaggre(x, values = values, by = by, breaks = breaks, ...)
setattr(x, "class", c("aggre", "data.table", "data.frame"))
x[]
}
as.aggre.default <- function(x, ...) {
stop(gettextf("cannot coerce class \"%s\" to 'aggre'", deparse(class(x))),
domain = NA)
}
aggre <- function(lex, by = NULL, type = c("unique", "full"), sum.values = NULL, subset = NULL, verbose = FALSE) {
allTime <- proc.time()
lex.Cst <- lex.Xst <- lex.id <- at.risk <- NULL
PF <- parent.frame(1L)
TF <- environment()
type <- match.arg(type[1], c("non-empty", "unique", "full", "cartesian"))
if (type == "cartesian") type <- "full"
if (type == "non-empty") type <- "unique"
if (verbose) cat("Aggregation type: '", type, "' \n", sep = "")
checkLexisData(lex)
breaks <- copy(attr(lex, "breaks"))
checkBreaksList(lex, breaks)
allScales <- copy(attr(lex, "time.scales"))
if (length(allScales) == 0 ) {
stop("could not determine names of time scales; ",
"is the data a Lexis object?")
}
subset <- substitute(subset)
subset <- evalLogicalSubset(lex, subset)
sumSub <- substitute(sum.values)
sum.values <- evalPopArg(lex[1:min(nrow(lex), 20L), ], arg = sumSub,
enclos = PF, recursive = TRUE, DT = TRUE)
sumType <- attr(sum.values, "arg.type")
sumVars <- attr(sum.values, "all.vars")
sumSub <- attr(sum.values, "quoted.arg")
if (is.null(sum.values)) {
sumType <- "NULL"
sumVars <- NULL
sumSub <- quote(list())
}
badSum <- names(sum.values)[!sapply(sum.values, is.numeric)]
if (length(badSum) > 0L) {
badSum <- paste0("'", badSum, "'", collapse = ", ")
stop("Following variables resulting from evaluating supplied sum.values ",
"argument are not numeric and cannot be summed: ", badSum,
". Evaluated sum.values: ", deparse(sumSub))
}
ags <- substitute(by)
if (verbose) cat("Used by argument:", paste0(deparse(ags)),"\n")
by <- evalPopArg(data = lex[1:min(nrow(lex), 20),],
arg = ags, DT = TRUE, enclos = PF, recursive = TRUE)
ags <- attr(by, "quoted.arg")
av <- attr(by, "all.vars")
argType <- attr(by, "arg.type")
if (is.null(by)) {
ags <- substitute(list())
av <- NULL
argType <- "NULL"
type <- "unique"
}
if (verbose) cat("Type of by argument:", argType, "\n")
keepVars <- unique(c("lex.id", allScales, "lex.dur",
"lex.Cst", "lex.Xst", av, sumVars))
lex.orig <- lex
lex <- subsetDTorDF(lex, subset = subset, select = keepVars)
lex <- data.table(lex)
forceLexisDT(lex, breaks = breaks, allScales = allScales, key = FALSE)
lex <- intelliDrop(lex, breaks = breaks)
setkeyv(lex, c("lex.id", allScales[1]))
setcolsnull(lex, delete = setdiff(allScales, names(breaks)))
aggScales <- intersect(av, allScales)
if (any(!aggScales %in% names(breaks))) {
aggScales <- paste0("'", setdiff(aggScales, names(breaks)), "'", collapse = ", ")
stop("Requested aggregating by time scale(s) by which data ",
"has not been split: ", aggScales)
}
tmpAtRisk <- makeTempVarName(lex, pre = "at.risk_")
set(lex, j = tmpAtRisk, value = TRUE)
survScale <- NULL
if (length(aggScales) > 0) {
cutTime <- proc.time()
survScale <- aggScales[length(aggScales)]
lex[, c(tmpAtRisk) := lex[[survScale]] %in% breaks[[survScale]] ]
catAggScales <- paste0("'", aggScales, "'", collapse = ", ")
if (verbose) {
cat("Following time scales mentioned in by argument and will be",
"categorized into intervals (defined by breaks in object",
"attributes) for aggregation:", catAggScales, "\n")
}
for (sc in aggScales) {
set(lex, j = sc, value = cutLow(lex[[sc]], breaks = breaks[[sc]]))
}
if (verbose) cat("Time taken by cut()'ting time scales: ", timetaken(cutTime), "\n")
}
othVars <- setdiff(av, aggScales)
if (verbose && length(othVars) > 0) {
catOthVars <- paste0("'", othVars, "'", collapse = ", ")
cat("Detected the following non-time-scale variables to be utilized in aggregating:", catOthVars, "\n")
}
by <- evalPopArg(data = lex, arg = ags, DT = TRUE, enclos = PF, recursive = TRUE)
byNames <- names(by)
pyrsTime <- proc.time()
vdt <- data.table(pyrs = lex$lex.dur, at.risk = lex[[tmpAtRisk]],
lex.id = lex$lex.id)
pyrs <- vdt[, .(pyrs = sum(pyrs),
at.risk = sum(!duplicated(lex.id) & at.risk)),
keyby = by]
setDT(pyrs)
rm(vdt)
sumNames <- NULL
if (sumType != "NULL") {
if (sumType == "character") {
sumNames <- evalPopArg(lex, sumSub, n = 1L, DT = FALSE, recursive = TRUE, enclos = PF)
sum.values <- lex[, lapply(.SD, sum), keyby = by, .SDcols = c(sumNames)]
} else {
sum.values <- evalPopArg(lex, sumSub, n = 1L, enclos = PF)
sumNames <- names(sum.values)
sumTmpNames <- makeTempVarName(lex, pre = sumNames)
set(lex, j = sumTmpNames, value = sum.values)
sum.values <- lex[, lapply(.SD, sum), keyby = by, .SDcols = sumTmpNames]
setnames(sum.values, sumTmpNames, sumNames)
setcolsnull(lex, sumTmpNames)
}
setDT(sum.values)
pyrs <- merge(pyrs, sum.values, all = TRUE)
rm(sum.values)
}
if (verbose) cat("Time taken by aggregating pyrs: ", timetaken(pyrsTime), "\n")
valVars <- setdiff(names(pyrs), byNames)
pyrs[is.na(pyrs), pyrs := 0]
pyrs <- pyrs[pyrs > 0]
aggPyrs <- pyrs[, sum(pyrs)]
lexPyrs <- sum(lex.orig$lex.dur[subset])
pyrsDiff <- aggPyrs - lexPyrs
if (!isTRUE(all.equal(aggPyrs, lexPyrs, scale = NULL))) {
warning("Found discrepancy of ", abs(round(pyrsDiff, 4)), " ",
"in total aggregated pyrs compared to ",
"sum(lex$lex.dur); compare results by hand and make sure ",
"settings are right \n")
}
rm(subset, aggPyrs, lexPyrs)
if (type == "full") {
carTime <- proc.time()
varsUsingScales <- NULL
if (argType == "character") {
varsUsingScales <- intersect(names(by), aggScales)
whScaleUsed <- varsUsingScales
} else if (argType != "NULL") {
whScaleUsed <- lapply(ags[-1], function(x) intersect(all.vars(x), aggScales))
oneScaleTest <- any(sapply(whScaleUsed, function(x) length(x) > 1L))
if (oneScaleTest) stop("Only one Lexis time scale can be used in any one variable in by argument!")
varsUsingScales <- byNames[sapply(whScaleUsed, function (x) length(x) == 1L)]
whScaleUsed <- unlist(whScaleUsed)
}
ceejay <- lapply(by, function(x) if (is.factor(x)) levels(x) else sort(unique(x)))
if (length(aggScales) > 0) {
ceejay[varsUsingScales] <- lapply(breaks[whScaleUsed], function(x) x[-length(x)])
}
ceejay <- do.call(CJ, ceejay)
setkeyv(ceejay, byNames)
setkeyv(pyrs, byNames)
pyrs <- pyrs[ceejay]
rm(ceejay)
if (verbose) cat("Time taken by making aggregated data large in the cartesian product sense: ", timetaken(carTime), "\n")
}
transTime <- proc.time()
if (is.null(by) || (is.data.table(by) && nrow(by) == 0L)) {
by <- quote(list(lex.Cst, lex.Xst))
} else {
for (var in c("lex.Cst", "lex.Xst")) {
set(by, j = var, value = lex[[var]])
}
}
detBr <- breaks[survScale]
if (!length(survScale)) detBr <- NULL
hasEvent <- detectEvents(lex, breaks = detBr, by = "lex.id") %in% 1:2
if (!is.language(by)) by <- by[hasEvent]
trans <- lex[hasEvent, list(obs = .N), keyby = by]
rm(by, lex)
if (verbose) cat("Time taken by aggregating events: ", timetaken(transTime), "\n")
mergeTime <- proc.time()
setDT(trans)
setDT(pyrs)
tmpTr <- makeTempVarName(trans, pre = "trans_")
trans[, c(tmpTr) := paste0("from", lex.Cst, "to", lex.Xst)]
transitions <- sort(unique(trans[[tmpTr]]))
trans[, c("lex.Cst", "lex.Xst") := NULL]
tmpDum <- makeTempVarName(trans)
byNames <- c(byNames, tmpDum)
byNames <- setdiff(byNames, c("lex.Cst", "lex.Xst"))
trans[, c(tmpDum) := 1L]
pyrs[, c(tmpDum) := 1L]
valVars <- unique(c(valVars, transitions))
trans <- cast_simple(trans, rows = byNames, columns = tmpTr, values = "obs")
setkeyv(trans, NULL); setkeyv(pyrs, NULL)
setkeyv(trans, byNames); setkeyv(pyrs, byNames)
trans <- trans[pyrs]; rm(pyrs)
trans[, c(tmpDum) := NULL]
byNames <- setdiff(byNames, tmpDum)
setcolorder(trans, c(byNames, valVars))
if (verbose) cat("Time taken by merging pyrs & transitions: ", timetaken(mergeTime), "\n")
if (length(valVars) > 0L) {
trans[, c(valVars) := lapply(.SD, function(x) {
x[is.na(x)] <- 0
x
}), .SDcols = c(valVars)]
}
trans <- data.table(trans)
setaggre(trans, values = c("pyrs", "at.risk", transitions, sumNames),
by = byNames, breaks = breaks)
if (!return_DT()) setDFpe(trans)
if (verbose) cat("Time taken by aggre(): ", timetaken(allTime), "\n")
trans[]
}
|
print.DS_GF_micro <-
function(x, ...){
cat(paste0("\tPosterior Mean = ",round(x$DS.mean,4), "\n"))
cat(paste0("\tPosterior Mode = ",round(x$DS.mode,4), "\n"))
cat(paste0("Use plot(x) to generate posterior plot\n"))
}
|
evRisk <- function(x, m = 50, r.free = "tbill", ...) {
if (!inherits(x, "evReturn")) stop("Need an object from 'evReturn'.\n")
reg.n <- c("N", "firm", "event.date",
"alpha.c", "alpha.e", "alpha.t", "alpha.p", "alpha.s",
"beta.c", "beta.e", "beta.t", "beta.p", "beta.s",
"gama.c", "gama.e", "gama.t", "gama.p", "gama.s")
reg <- data.frame(matrix(0, nrow=x$N, ncol=length(reg.n)))
colnames(reg) <- reg.n
for (i in 1:x$N ) {
loca <- which(x$y[, x$y.date] == x$event.date[i])
daEst <- x$y[(loca - m):(loca + m), c(x$y.date, x$firm[i], x$index, r.free)]
rownames(daEst) <- 1:length((loca - m):(loca + m))
if (sum(as.numeric(is.na(daEst))) > 0) {
stop(paste("\nSome observations in the data",
"for firm --", x$firm[i], "-- are NA.\n\n", sep=" "))
}
daEst$firm.s <- daEst[, x$firm[i]] - daEst[, r.free]
daEst$index.a <- daEst[, x$index ] - daEst[, r.free]
daEst$dummy <- as.numeric( as.numeric(rownames(daEst)) > m+1 )
daEst$index.b <- daEst$dummy * daEst$index.a
rb <- lm(as.formula(firm.s ~ index.a + index.b), data=daEst)
coe <- bsTab(rb, need = "5", digits = x$digits)
reg[i,"N"] <- i
reg[i,"firm"] <- x$firm[i]
reg[i,"event.date"] <- x$event.date[i]
reg[i, 4:18] <- c(coe[1, -1], coe[2, -1], coe[3, -1])
}
result <- listn(x, daEst, rb, reg)
class(result) <- "evRisk"
return(result)
}
print.evRisk <- function(x, ...) {print(x$reg)}
|
SS=function(data,group){
n=tapply(data, group, length)
k=length(tapply(data, group, length))
xbar=tapply(data, group, mean)
var=tapply(data, group, var)
var.=var*((n-1)/(n-3));
SS=sum((n*(xbar-mean(data))^2)/var.);
pvalue=1-pchisq(SS,k);
result=matrix(c(round(SS,digits=4),round(k),round(pvalue,digits=4)))
rownames(result)=c("Test Statistic","df","p-value")
colnames(result)=c("Scott-Smith")
return(t(result))
}
|
.joe.12.density <- function(theta, u) {
if (is.null(dim(u))) {
u1 <- u[1]
u2 <- u[2]
} else {
u1 <- u[, 1]
u2 <- u[, 2]
}
u1t <- (-1 + ((1 - u1)^theta))
u2t <- (-1 + ((1 - u2)^theta))
(((1 - u1)^(-1 + theta)) * ((1 - u1t * u2t)^(1 / theta)) *
(theta - u1t * u2t) * ((1 - u2)^(-1 + theta))) /
((((1 - u1)^theta) - u1t * ((1 - u2)^theta))^2)
}
.joe.12.V <- function(theta, u1, u2) {
u1tt <- (1 - u1)^theta
u2tt <- (1 - u2)^theta
u1t <- (-1 + u1tt)
u2t <- (-1 + u2tt)
-((theta * ((-((-1 + theta)^2) + u1tt) * u1tt + ((-1 + theta) * theta +
(2 + (-1 + theta) * theta) * u1tt - 2 * ((1 - u1)^(2 * theta))) *
u2tt + u1t * (-theta + u1tt) * ((1 - u2)^(2 * theta))) * log(1 - u1) -
(-theta + u1t * u2t) * (-u1tt + u1t * u2tt) * log(1 - u1t * u2t) +
theta * (((-1 + u1tt)^2) * ((1 - u2)^(2 * theta)) * log(1 - u2) +
theta * u1tt * (1 + (-1 + theta + u1tt) * log(1 - u2)) - u1t * u2tt *
(theta + (-((-1 + theta)^2) + (1 + theta) * u1tt) * log(1 - u2)))) /
((theta^2) * (-theta + u1t * u2t) * (u1tt - u1t * u2tt)))
}
.joe.12.S <- function(theta, u1, u2) {
u1tt <- (1 - u1)^theta
u2tt <- (1 - u2)^theta
u1t <- (-1 + u1tt)
u2t <- (-1 + u2tt)
(-((theta^3) * ((u1tt - u1t * u2tt)^2)) + (-1 + theta) * (theta^2) * u1tt *
u2t * (-(theta * u2t * (-((1 - u1)^(2 * theta)) + u1t * (3 + u1tt) *
u2tt)) + 2 * (theta^2) * u2tt + (u1t^2) * (u2t^2) * u2tt) *
((log(1 - u1))^2) + 2 * ((theta - u1t * u2t)^2) * ((u1tt - u1t *
u2tt)^2) * log(1 - u1t * u2t) - 2 * theta * u1tt * log(1 - u1) *
((-2 * theta * u1t * u2t + (u1t^2) * (u2t^2) + (theta^2) * (1 +
u1tt - u1t * u2tt)) * u2t * (-u1tt + u1t * u2tt) + (-1 + theta) *
theta * (-2 * (theta^2) - (u1t^2) * (u2t^2) + theta * u1t * u2t *
(3 - u1tt + u1t * u2tt)) * u2tt * log(1 - u2)) + theta * u1t * u2tt *
log(1 - u2) * (-2 * (u1t^3) * ((1 - u2)^(3 * theta)) + (u1t^2) *
((1 - u2)^(2 * theta)) * (-4 + 2 * theta * (2 + theta) + 6 * u1tt +
(-1 + theta) * theta * (-theta + u1tt) * log(1 - u2)) + u1tt * (2 *
(((-1 + theta)^2) + (-2 + theta * (2 + theta)) * u1tt + ((1 - u1)^(2 *
theta))) + (-1 + theta) * theta * (-1 + theta + u1tt) * (-1 + 2 *
theta + u1tt) * log(1 - u2)) - 2 * u1t * u2tt * (((-1 + theta)^2) +
((1 - u1)^(2 * theta)) * (3 + (-1 + theta) * theta * log(1 - u2)) +
u1tt * (-4 + 2 * theta * (2 + theta) + ((-1 + theta)^2) * theta *
log(1 - u2))))) / ((theta^3) * ((theta - u1t * u2t)^2) *
((u1tt - u1t * u2tt)^2))
}
.joe.123.density <- function(theta, u) {
if (is.null(dim(u))) {
u1 <- u[1]
u2 <- u[2]
u3 <- u[3]
} else {
u1 <- u[, 1]
u2 <- u[, 2]
u3 <- u[, 3]
}
u1tt <- (1 - u1)^theta
u2tt <- (1 - u2)^theta
u3tt <- (1 - u3)^theta
u1t <- -1 + u1tt
u2t <- -1 + u2tt
u3t <- -1 + u3tt
(theta^2) * ((1 - u1)^(-1 + theta)) * ((1 - u2)^(-1 + theta)) *
((1 + (u1t) * (u2t) * (u3t))^(-3 + 1 / theta)) * (2 - u1tt - u2tt +
u1tt * u2tt + (3 * (u1t) * (u2t) * (u3t)) / theta + ((u1t^2) * (u2t^2) *
(u3t^2)) / (theta^2) - (u1t) * (u2t) * u3tt) * ((1 - u3)^(-1 + theta))
}
.joe.123.V <- function(theta, u) {
if (is.null(dim(u))) {
u1 <- u[1]
u2 <- u[2]
u3 <- u[3]
} else {
u1 <- u[, 1]
u2 <- u[, 2]
u3 <- u[, 3]
}
u1tt <- (1 - u1)^theta
u2tt <- (1 - u2)^theta
u3tt <- (1 - u3)^theta
u1t <- -1 + u1tt
u2t <- -1 + u2tt
u3t <- -1 + u3tt
u1t2 <- u1t^2
u2t2 <- u2t^2
u3t2 <- u3t^2
u123t <- u1t * u2t * u3t
t2 <- theta^2
t.i <- 1 / theta
(2 * theta * ((1 + u123t)^2) + 3 * (1 - u1tt) * (1 - u2tt) * (1 + u123t) *
(1 - u3tt) - 6 * (-1 + t.i) * theta * (1 - u1tt) * (1 - u2tt) *
(1 + u123t) * (1 - u3tt) + ((-1 + theta) * u1t2 * u2t2 * u3t2) /
theta + ((-1 + 2 * theta) * u1t2 * u2t2 * u3t2) / theta + (2 *
(-1 + theta) * (-1 + 2 * theta) * u1t2 * u2t2 * u3t2) / theta +
((theta + theta * u123t)^2) * log(1 - u1) - 3 * (-1 + t.i) * t2 *
(1 - u1tt) * (1 - u2tt) * (1 + u123t) * (1 - u3tt) * log(1 - u1) +
3 * (-1 + t.i) * t2 * u1tt * (1 - u2tt) * (1 + u123t) * (1 - u3tt) *
log(1 - u1) + (-1 + theta) * (-1 + 2 * theta) * u1t2 * u2t2 * u3t2 *
log(1 - u1) + 2 * (-1 + theta) * (-1 + 2 * theta) * (u1t) * u1tt * u2t2 *
u3t2 * log(1 - u1) + ((theta + theta * u123t)^2) * log(1 - u2) -
3 * (-1 + t.i) * t2 * (1 - u1tt) * (1 - u2tt) * (1 + u123t) * (1 - u3tt) *
log(1 - u2) + 3 * (-1 + t.i) * t2 * (1 - u1tt) * u2tt * (1 + u123t) *
(1 - u3tt) * log(1 - u2) + (-1 + theta) * (-1 + 2 * theta) * u1t2 * u2t2 *
u3t2 * log(1 - u2) + 2 * (-1 + theta) * (-1 + 2 * theta) * u1t2 * u2t *
u2tt * u3t2 * log(1 - u2) + ((theta + theta * u123t)^2) * log(1 - u3) -
3 * (-1 + t.i) * t2 * (1 - u1tt) * (1 - u2tt) * (1 + u123t) * (1 - u3tt) *
log(1 - u3) + (-1 + theta) * (-1 + 2 * theta) * u1t2 * u2t2 * u3t2 *
log(1 - u3) + 3 * (-1 + t.i) * t2 * (1 - u1tt) * (1 - u2tt) *
(1 + u123t) * u3tt * log(1 - u3) + 2 * (-1 + theta) * (-1 + 2 * theta) *
u1t2 * u2t2 * u3t * u3tt * log(1 - u3) + (1 + u123t) * ((-1 - u123t) *
log(1 + u123t) + (1 - theta) * theta * (u1tt * u2t * u3t * log(1 - u1) +
u1t * (u2tt * u3t * log(1 - u2) + u2t * u3tt * log(1 - u3)))) +
(-2 + t.i) * (-1 + t.i) * t2 * u1t2 * u2t2 * u3t2 *
(-(log(1 + u123t) / t2) + ((-3 + t.i) * (u1tt * u2t * u3t * log(1 - u1) +
u1t * (u2tt * u3t * log(1 - u2) + u2t * u3tt * log(1 - u3)))) /
(1 + u123t)) - 3 * (-1 + t.i) * t2 * (1 - u1tt) * (1 - u2tt) *
(1 + u123t) * (1 - u3tt) * (-(log(1 + u123t) / t2) + ((-2 + t.i) *
(u1tt * u2t * u3t * log(1 - u1) + u1t * (u2tt * u3t * log(1 - u2) + u2t *
u3tt * log(1 - u3)))) / (1 + u123t))) / (t2 * (2 - u1tt - u2tt + u1tt *
u2tt + (3 * u123t) / theta + (u1t2 * u2t2 * u3t2) / t2 -
u1t * u2t * u3tt))
}
.joe.123.S <- function(theta, u) {
if (is.null(dim(u))) {
u1 <- u[1]
u2 <- u[2]
u3 <- u[3]
} else {
u1 <- u[, 1]
u2 <- u[, 2]
u3 <- u[, 3]
}
u1tt <- (1 - u1)^theta
u2tt <- (1 - u2)^theta
u3tt <- (1 - u3)^theta
u1t <- -1 + u1tt
u2t <- -1 + u2tt
u3t <- -1 + u3tt
u1t2 <- u1t^2
u2t2 <- u2t^2
u3t2 <- u3t^2
u123t <- u1t * u2t * u3t
u123t2 <- u123t^2
u123tt <- u1t * u2t * u3tt
t2 <- theta^2
it <- 1 / theta
tm1 <- (-1 + theta)
t2m1 <- (-1 + 2 * theta)
lu1 <- log(1 - u1)
lu2 <- log(1 - u2)
lu3 <- log(1 - u3)
tim1 <- (-1 + it)
u1ttm1 <- 1 - u1tt
u2ttm1 <- 1 - u2tt
u3ttm1 <- 1 - u3tt
aaa <- (u1tt * u2t * u3t * lu1 + u1t * (u2tt * u3t * lu2 + u2t * u3tt * lu3))
bbb <- (u1tt * u2t * u3t * lu1 + u1t * u2tt * u3t * lu2 + u123tt * lu3)
ccc <- tim1 * t2 * u1ttm1
u123tp1 <- 1 + u123t
l1u123t <- log(u123tp1)
(-2 * (2 - u1tt - u2tt + u1tt * u2tt + (3 * u123t) / theta + u123t2 /
t2 - u123tt) * (2 * theta * (u123tp1^2) + 3 * u1ttm1 * u2ttm1 * u123tp1 *
u3ttm1 - 6 * tim1 * theta * u1ttm1 * u2ttm1 * u123tp1 * u3ttm1 +
(tm1 * u123t2) / theta + (t2m1 * u123t2) / theta + (2 * tm1 * t2m1 *
u123t2) / theta + ((theta + theta * u123t)^2) * lu1 - 3 * ccc * u2ttm1 *
u123tp1 * u3ttm1 * lu1 + 3 * tim1 * t2 * u1tt * u2ttm1 * u123tp1 *
u3ttm1 * lu1 + tm1 * t2m1 * u123t2 * lu1 + 2 * tm1 * t2m1 * u1t * u1tt *
u2t2 * u3t2 * lu1 + ((theta + theta * u123t)^2) * lu2 - 3 * ccc * u2ttm1 *
u123tp1 * u3ttm1 * lu2 + 3 * ccc * u2tt * u123tp1 * u3ttm1 * lu2 + tm1 *
t2m1 * u123t2 * lu2 + 2 * tm1 * t2m1 * u1t2 * u2t * u2tt * u3t2 * lu2 +
((theta + theta * u123t)^2) * lu3 - 3 * ccc * u2ttm1 * u123tp1 * u3ttm1 *
lu3 + tm1 * t2m1 * u123t2 * lu3 + 3 * ccc * u2ttm1 * u123tp1 * u3tt * lu3 +
2 * tm1 * t2m1 * u1t2 * u2t2 * u3t * u3tt * lu3 + u123tp1 * ((-1 - u123t) *
l1u123t + (1 - theta) * theta * aaa) + (-2 + it) * tim1 * t2 * u123t2 *
(-(l1u123t / t2) + ((-3 + it) * aaa) / u123tp1) - 3 * ccc * u2ttm1 *
u123tp1 * u3ttm1 * (-(l1u123t / t2) + ((-2 + it) * aaa) / u123tp1)) -
theta * ((-3 * u123t) / t2 - (2 * u123t2) / (theta^3) - u1tt * lu1 +
u1tt * u2tt * lu1 + (3 * u1tt * u2t * u3t * lu1) / theta + (2 * u1t *
u1tt * u2t2 * u3t2 * lu1) / t2 - u1tt * u2t * u3tt * lu1 - u2tt * lu2 +
u1tt * u2tt * lu2 + (3 * u1t * u2tt * u3t * lu2) / theta + (2 * u1t2 *
u2t * u2tt * u3t2 * lu2) / t2 - u1t * u2tt * u3tt * lu2 - u123tt * lu3 +
(3 * u123tt * lu3) / theta + (2 * u1t2 * u2t2 * u3t * u3tt * lu3) / t2) *
(2 * theta * (u123tp1^2) + 3 * u1ttm1 * u2ttm1 * u123tp1 * u3ttm1 -
6 * tim1 * theta * u1ttm1 * u2ttm1 * u123tp1 * u3ttm1 + (tm1 * u123t2) /
theta + (t2m1 * u123t2) / theta + (2 * tm1 * t2m1 * u123t2) / theta +
((theta + theta * u123t)^2) * lu1 - 3 * ccc * u2ttm1 * u123tp1 * u3ttm1 *
lu1 + 3 * tim1 * t2 * u1tt * u2ttm1 * u123tp1 * u3ttm1 * lu1 + tm1 *
t2m1 * u123t2 * lu1 + 2 * tm1 * t2m1 * u1t * u1tt * u2t2 * u3t2 * lu1 +
((theta + theta * u123t)^2) * lu2 - 3 * ccc * u2ttm1 * u123tp1 * u3ttm1 *
lu2 + 3 * ccc * u2tt * u123tp1 * u3ttm1 * lu2 + tm1 * t2m1 * u123t2 *
lu2 + 2 * tm1 * t2m1 * u1t2 * u2t * u2tt * u3t2 * lu2 +
((theta + theta * u123t)^2) * lu3 - 3 * ccc * u2ttm1 * u123tp1 *
u3ttm1 * lu3 + tm1 * t2m1 * u123t2 * lu3 + 3 * ccc * u2ttm1 * u123tp1 *
u3tt * lu3 + 2 * tm1 * t2m1 * u1t2 * u2t2 * u3t * u3tt * lu3 + u123tp1 *
((-1 - u123t) * l1u123t + (1 - theta) * theta * aaa) + (-2 + it) * tim1 *
t2 * u123t2 * (-(l1u123t / t2) + ((-3 + it) * aaa) / u123tp1) - 3 * ccc *
u2ttm1 * u123tp1 * u3ttm1 * (-(l1u123t / t2) + ((-2 + it) * aaa) /
u123tp1)) + theta * (2 - u1tt - u2tt + u1tt * u2tt + (3 * u123t) / theta +
u123t2 / t2 - u123tt) * (2 * (u123tp1^2) - 6 * tim1 * u1ttm1 * u2ttm1 *
u123tp1 * u3ttm1 + (6 * u1ttm1 * u2ttm1 * u123tp1 * u3ttm1) / theta -
(tm1 * u123t2) / t2 + (3 * u123t2) / theta + (4 * tm1 * u123t2) / theta -
(t2m1 * u123t2) / t2 - (2 * tm1 * t2m1 * u123t2) / t2 + (2 * t2m1 *
u123t2) / theta + 3 * u1ttm1 * u2ttm1 * u123tp1 * u3ttm1 * lu1 - 6 *
tim1 * theta * u1ttm1 * u2ttm1 * u123tp1 * u3ttm1 * lu1 - 6 * u1tt *
u2ttm1 * u123tp1 * u3ttm1 * lu1 + 12 * tim1 * theta * u1tt * u2ttm1 *
u123tp1 * u3ttm1 * lu1 + 2 * tm1 * u123t2 * lu1 + t2m1 * u123t2 * lu1 +
4 * tm1 * u1t * u1tt * u2t2 * u3t2 * lu1 + (2 * tm1 * u1t * u1tt * u2t2 *
u3t2 * lu1) / theta + 2 * t2m1 * u1t * u1tt * u2t2 * u3t2 * lu1 + (2 *
t2m1 * u1t * u1tt * u2t2 * u3t2 * lu1) / theta + (4 * tm1 * t2m1 * u1t *
u1tt * u2t2 * u3t2 * lu1) / theta + 6 * tim1 * t2 * u1tt * u2ttm1 *
u123tp1 * u3ttm1 * (lu1^2) + 4 * tm1 * t2m1 * u1t * u1tt * u2t2 * u3t2 *
(lu1^2) + 2 * tm1 * t2m1 * ((1 - u1)^(2 * theta)) * u2t2 * u3t2 * (lu1^2) +
3 * u1ttm1 * u2ttm1 * u123tp1 * u3ttm1 * lu2 - 6 * tim1 * theta * u1ttm1 *
u2ttm1 * u123tp1 * u3ttm1 * lu2 - 6 * u1ttm1 * u2tt * u123tp1 * u3ttm1 *
lu2 + 12 * tim1 * theta * u1ttm1 * u2tt * u123tp1 * u3ttm1 * lu2 +
2 * tm1 * u123t2 * lu2 + t2m1 * u123t2 * lu2 + 4 * tm1 * u1t2 * u2t *
u2tt * u3t2 * lu2 + (2 * tm1 * u1t2 * u2t * u2tt * u3t2 * lu2) / theta +
2 * t2m1 * u1t2 * u2t * u2tt * u3t2 * lu2 + (2 * t2m1 * u1t2 * u2t *
u2tt * u3t2 * lu2) / theta + (4 * tm1 * t2m1 * u1t2 * u2t * u2tt * u3t2 *
lu2) / theta + 3 * tim1 * t2 * u1tt * u2ttm1 * u123tp1 * u3ttm1 * lu1 *
lu2 + 3 * ccc * u2tt * u123tp1 * u3ttm1 * lu1 * lu2 - 6 * tim1 * t2 *
u1tt * u2tt * u123tp1 * u3ttm1 * lu1 * lu2 + 2 * tm1 * t2m1 * u1t * u1tt *
u2t2 * u3t2 * lu1 * lu2 + 2 * tm1 * t2m1 * u1t2 * u2t * u2tt * u3t2 *
lu1 * lu2 + 8 * tm1 * t2m1 * u1t * u1tt * u2t * u2tt * u3t2 * lu1 * lu2 +
6 * ccc * u2tt * u123tp1 * u3ttm1 * (lu2^2) + 4 * tm1 * t2m1 * u1t2 *
u2t * u2tt * u3t2 * (lu2^2) + 2 * tm1 * t2m1 * u1t2 *
((1 - u2)^(2 * theta)) * u3t2 * (lu2^2) + 3 * u1ttm1 * u2ttm1 * u123tp1 *
u3ttm1 * lu3 - 6 * tim1 * theta * u1ttm1 * u2ttm1 * u123tp1 * u3ttm1 *
lu3 + 2 * tm1 * u123t2 * lu3 + t2m1 * u123t2 * lu3 - 6 * u1ttm1 * u2ttm1 *
u123tp1 * u3tt * lu3 + 12 * tim1 * theta * u1ttm1 * u2ttm1 * u123tp1 *
u3tt * lu3 + 4 * tm1 * u1t2 * u2t2 * u3t * u3tt * lu3 + (2 * tm1 * u1t2 *
u2t2 * u3t * u3tt * lu3) / theta + 2 * t2m1 * u1t2 * u2t2 * u3t * u3tt *
lu3 + (2 * t2m1 * u1t2 * u2t2 * u3t * u3tt * lu3) / theta + (4 * tm1 *
t2m1 * u1t2 * u2t2 * u3t * u3tt * lu3) / theta + 3 * tim1 * t2 * u1tt *
u2ttm1 * u123tp1 * u3ttm1 * lu1 * lu3 + 2 * tm1 * t2m1 * u1t * u1tt *
u2t2 * u3t2 * lu1 * lu3 + 3 * ccc * u2ttm1 * u123tp1 * u3tt * lu1 * lu3 -
6 * tim1 * t2 * u1tt * u2ttm1 * u123tp1 * u3tt * lu1 * lu3 + 2 * tm1 *
t2m1 * u1t2 * u2t2 * u3t * u3tt * lu1 * lu3 + 8 * tm1 * t2m1 * u1t *
u1tt * u2t2 * u3t * u3tt * lu1 * lu3 + 3 * ccc * u2tt * u123tp1 * u3ttm1 *
lu2 * lu3 + 2 * tm1 * t2m1 * u1t2 * u2t * u2tt * u3t2 * lu2 * lu3 + 3 *
ccc * u2ttm1 * u123tp1 * u3tt * lu2 * lu3 - 6 * ccc * u2tt * u123tp1 *
u3tt * lu2 * lu3 + 2 * tm1 * t2m1 * u1t2 * u2t2 * u3t * u3tt * lu2 * lu3 +
8 * tm1 * t2m1 * u1t2 * u2t * u2tt * u3t * u3tt * lu2 * lu3 + 6 * ccc *
u2ttm1 * u123tp1 * u3tt * (lu3^2) + 4 * tm1 * t2m1 * u1t2 * u2t2 * u3t *
u3tt * (lu3^2) + 2 * tm1 * t2m1 * u1t2 * u2t2 * ((1 - u3)^(2 * theta)) *
(lu3^2) + 4 * theta * u123tp1 * (u1tt * u2t * u3t * lu1 + u1t * u2tt *
u3t * lu2 + u123tt * lu3) + 3 * u1ttm1 * u2ttm1 * u3ttm1 * bbb - 6 * tim1 *
theta * u1ttm1 * u2ttm1 * u3ttm1 * (u1tt * u2t * u3t * lu1 + u1t * u2tt *
u3t * lu2 + u123tt * lu3) - 3 * ccc * u2ttm1 * u3ttm1 * lu1 * bbb + 3 *
tim1 * t2 * u1tt * u2ttm1 * u3ttm1 * lu1 * bbb - 3 * ccc * u2ttm1 *
u3ttm1 * lu2 * bbb + 3 * ccc * u2tt * u3ttm1 * lu2 * bbb - 3 * ccc *
u2ttm1 * u3ttm1 * lu3 * bbb + 3 * ccc * u2ttm1 * u3tt * lu3 * bbb + 2 *
(theta + theta * u123t) * lu1 * (u123tp1 + theta * u1tt * u2t * u3t *
lu1 + theta * u1t * u2tt * u3t * lu2 + theta * u123tt * lu3) + 2 *
(theta + theta * u123t) * lu2 * (u123tp1 + theta * u1tt * u2t * u3t *
lu1 + theta * u1t * u2tt * u3t * lu2 + theta * u123tt * lu3) + 2 *
(theta + theta * u123t) * lu3 * (u123tp1 + theta * u1tt * u2t * u3t *
lu1 + theta * u1t * u2tt * u3t * lu2 + theta * u123tt * lu3) + (u1tt *
u2t * u3t * lu1 + u1t * u2tt * u3t * lu2 + u123tt * lu3) * ((-1 - u123t) *
l1u123t + (1 - theta) * theta * aaa) - (-2 + it) * u123t2 *
(-(l1u123t / t2) + ((-3 + it) * aaa) / u123tp1) - tim1 * u123t2 *
(-(l1u123t / t2) + ((-3 + it) * aaa) / u123tp1) + 2 * (-2 + it) * tim1 *
theta * u123t2 * (-(l1u123t / t2) + ((-3 + it) * aaa) / u123tp1) + 2 *
(-2 + it) * tim1 * t2 * u1t * u1tt * u2t2 * u3t2 * lu1 * (-(l1u123t / t2) +
((-3 + it) * aaa) / u123tp1) + 2 * (-2 + it) * tim1 * t2 * u1t2 * u2t *
u2tt * u3t2 * lu2 * (-(l1u123t / t2) + ((-3 + it) * aaa) / u123tp1) + 2 *
(-2 + it) * tim1 * t2 * u1t2 * u2t2 * u3t * u3tt * lu3 * (-(l1u123t / t2) +
((-3 + it) * aaa) / u123tp1) + 3 * u1ttm1 * u2ttm1 * u123tp1 * u3ttm1 *
(-(l1u123t / t2) + ((-2 + it) * aaa) / u123tp1) - 6 * tim1 * theta *
u1ttm1 * u2ttm1 * u123tp1 * u3ttm1 * (-(l1u123t / t2) + ((-2 + it) * aaa) /
u123tp1) + 3 * tim1 * t2 * u1tt * u2ttm1 * u123tp1 * u3ttm1 * lu1 *
(-(l1u123t / t2) + ((-2 + it) * aaa) / u123tp1) + 3 * ccc * u2tt *
u123tp1 * u3ttm1 * lu2 * (-(l1u123t / t2) + ((-2 + it) * aaa) / u123tp1) +
3 * ccc * u2ttm1 * u123tp1 * u3tt * lu3 * (-(l1u123t / t2) + ((-2 + it) *
aaa) / u123tp1) - 3 * ccc * u2ttm1 * u3ttm1 * bbb * (-(l1u123t / t2) +
((-2 + it) * aaa) / u123tp1) + u123tp1 * (-(u1tt * u2t * u3t * lu1) -
u1t * u2tt * u3t * lu2 - u123tt * lu3 - l1u123t * bbb + (1 - theta) *
(u1tt * u2t * u3t * lu1 + u1t * (u2tt * u3t * lu2 + u2t * u3tt * lu3)) -
theta * aaa + (1 - theta) * theta * (u1tt * u2t * u3t * (lu1^2) + 2 *
u1tt * lu1 * (u2tt * u3t * lu2 + u2t * u3tt * lu3) + u1t * (u2tt * u3t *
(lu2^2) + 2 * u2tt * u3tt * lu2 * lu3 + u2t * u3tt * (lu3^2)))) + (3 *
tim1 * u1ttm1 * u2ttm1 * u123tp1 * u3ttm1 * (-2 * l1u123t + (theta * bbb) /
u123tp1 + (theta * aaa) / u123tp1 - (t2 * t2m1 * ((u1tt * u2t * u3t *
lu1 + u1t * (u2tt * u3t * lu2 + u2t * u3tt * lu3))^2)) / ((u1tt + u2tt -
u1tt * u2tt + u3tt - u1tt * u3tt - u2tt * u3tt + u1tt * u2tt * u3tt)^2) +
(t2 * t2m1 * (u1tt * u2t * u3t * (lu1^2) + 2 * u1tt * lu1 * (u2tt * u3t *
lu2 + u2t * u3tt * lu3) + u1t * (u2tt * u3t * (lu2^2) + 2 * u2tt * u3tt *
lu2 * lu3 + u2t * u3tt * (lu3^2)))) / (u1tt + u2tt - u1tt * u2tt + u3tt -
u1tt * u3tt - u2tt * u3tt + u1tt * u2tt * u3tt))) / theta - ((-2 + it) *
tim1 * u123t2 * (-2 * l1u123t + (theta * bbb) / u123tp1 + (theta * aaa) /
u123tp1 - (t2 * (-1 + 3 * theta) * ((u1tt * u2t * u3t * lu1 + u1t *
(u2tt * u3t * lu2 + u2t * u3tt * lu3))^2)) / ((u1tt + u2tt - u1tt *
u2tt + u3tt - u1tt * u3tt - u2tt * u3tt + u1tt * u2tt * u3tt)^2) + (t2 *
(-1 + 3 * theta) * (u1tt * u2t * u3t * (lu1^2) + 2 * u1tt * lu1 * (u2tt *
u3t * lu2 + u2t * u3tt * lu3) + u1t * (u2tt * u3t * (lu2^2) + 2 * u2tt *
u3tt * lu2 * lu3 + u2t * u3tt * (lu3^2)))) / (u1tt + u2tt - u1tt * u2tt +
u3tt - u1tt * u3tt - u2tt * u3tt + u1tt * u2tt * u3tt))) / theta)) /
((theta^3) * ((-2 + u1tt + u2tt - u1tt * u2tt - (3 * u123t) / theta -
u123t2 / t2 + u123tt)^2))
}
|
is.not.informative.variable<-function(x, includeNA=T){
if(includeNA){
if(includeNA&(sum(is.na(x))>0)){
x=as.character(x)
x[is.na(x)] <- c("NA")
x=as.factor(x)
}
}
!(
sum(!is.na(x))>0 &&
!(is.numeric(x)&&(var(x, na.rm=T)%in%c(0,NA))) &&
!(!is.numeric(x)&&(length(levels(factor(x)))<2))
)
}
emptyVar<-function(dataset){
names(dataset)[unlist(lapply(dataset,function(x){sum(!is.na(x))==0}))]
}
uselessVar<-function(dataset){
names(dataset)[lapply(dataset,function(x){length(levels(as.factor(x)))})==1]
}
|
omniRLRT_fast = function(y, X,K1, K2, N = 10000, length.rho = 200, length.lambda = 21){
method = "REML"
Lambdas = exp(seq(from = -12, to = 12, length.out = length.lambda))
all_rho = seq(from = 0,to = 1, length.out = length.rho)
n = length(y)
if (is.null(X)){
X1 = matrix(1, nrow=n)
px = 1
}else{
X1 = cbind(1,X)
px = ncol(X1)
}
XX = MatMult_C(t(X1),X1)
P0 = diag(n)- MatMult_C(MatMult_C(X1,ginv(XX)),t(X1))
eP = Eigen_C(P0)
A = eP$vector[,eP$values > 1e-10]
eK1 = Eigen_C(K1)
wK1 = which(eK1$values > 1e-10)
if (length(wK1) == 1){
phi1 = eK1$vectors[,wK1] * sqrt(eK1$values[wK1])
}else{
phi1 = t(t(eK1$vectors[,wK1])*sqrt(eK1$values[wK1]))
}
eK2 = Eigen_C(K2)
wK2 = which(eK2$values > 1e-10)
if (length(wK2) == 1){
phi2 = eK2$vectors[,wK2] * sqrt(eK2$values[wK2])
}else{
phi2 = t(t(eK2$vectors[,wK2])*sqrt(eK2$values[wK2]))
}
group= rep(1,n)
fit1 = lme(y~X, random = list(group=pdIdent(~-1+phi1), group = pdIdent(~-1+phi2)))
fit0 = lm(y~X)
LR = max(0, 2*(logLik(fit1, REML = T) -logLik(fit0, REML = T)))
if (LR <= 0){
p.dir =p.au1= p.aud = 1
}else{
LR0_allRho = matrix(NA, N, length.rho)
w = matrix(rnorm(N*(n-px)), n-px,N)
LR0_fixRho = matrix(NA, N, length.lambda)
rho = 0
K = K2
k = length(wK2)
xi = eK2$values[wK2]
AKA = MatMult_C(MatMult_C(t(A),K),A)
eV = Eigen_C(AKA)
U_1 = eV$vectors
mu = eV$values[eV$values > 1e-10]
mu = mu/max(mu, xi)
xi = xi/max(mu,xi)
W1 = MatMult_C(A,U_1)
w.double <- w^2
w1 = w.double[1:k,]
w2 = ColSum_C((w.double)[-(1:k),])
if (length(mu) < k){mu = c(mu,rep(0, k - length(mu)))}
if (length(xi) < k){xi = c(xi,rep(0, k - length(xi)))}
LR0_fixRho <- LR0_fixRho_C(Lambdas,
mu,
w1,
w2,
n-px)
LR0_allRho[,1] = MatrixRowMax_C(LR0_fixRho)
LR0_allRho <- doubleloop(K1,
K2,
P0,
A,
U_1,
w,
Lambdas,
n-px,
all_rho,
LR0_allRho)
LR0 = MatrixRowMax_C(LR0_allRho)
LR0 = ifelse(LR0 > 0, LR0, 0)
p.dir = mean(LR < LR0)
p.au1 = getp_au1(null = LR0, LR = LR)$p
p.aud= getp_aud_estimate_pi_first(null = LR0, LR = LR)$p
}
out = list(p.dir = p.dir,p.aud = p.aud, LR = LR)
return(out)
}
|
nobs.pot <- function(object, ...) {
return(object$nat)
}
vcov.pot <- function(object, complete = FALSE, ...) {
vc <- object$var.cov
free_pars <- names(coef(object, complete = FALSE))
dimnames(vc) <- list(free_pars, free_pars)
if (complete) {
all_pars <- names(coef(object, complete = TRUE))
np <- length(all_pars)
dummy <- matrix(NA, np, np)
which_free <- which(all_pars %in% free_pars)
dummy[which_free, which_free] <- vc
vc <- dummy
dimnames(vc) <- list(all_pars, all_pars)
}
return(vc)
}
|
context("PV")
test_that("PV correctly produces values, no fv", {
check <- 68.14
expect_true(PV(0.1, 12, -10) == check)
check <- c(68.14, 134.77)
df <- data.frame(rate = c(0.1, 0.1), nper = c(12, 24), pmt = c(-10, -15))
expect_true(identical(PV(df$rate, df$nper, df$pmt), check))
})
test_that("PV correctly produces values, fv", {
check <- 66.54
expect_true(PV(0.1, 12, -10, 5) == check)
check <- c(66.54, 134.26)
df <- data.frame(rate = c(0.1, 0.1), nper = c(12, 24), pmt = c(-10, -15), fv = c(5, 5))
expect_true(identical(PV(df$rate, df$nper, df$pmt, df$fv), check))
})
test_that("PV errors given incorrect inputs", {
expect_error(PV(0, 12, -10))
expect_error(PV(0.1, 0, -10))
expect_error(PV(0.1, 12, 10))
expect_error(PV("0", 12, -10))
expect_error(PV(0.1, "12", -10))
expect_error(PV(0.1, 12, "-10"))
expect_error(PV(0.1, 12, -10, "5"))
})
|
update_r = function(model){
if(model$update_rule == "sgd"){
update_sgd(model)
}else if(model$update_rule == "adagrad"){
update_adagrad(model)
}else{
stop("update_rule unknown")
}
}
update_sgd = function(model){
if(!is.null(model$clipping)){
clipping = function(x){
x[is.nan(x)] = runif(sum(is.nan(x)),-1,1)
x[x > model$clipping] = model$clipping
x[x < -model$clipping] = - model$clipping
return(x)
}
model$recurrent_synapse_update = lapply(model$recurrent_synapse_update,clipping)
model$time_synapse_update = lapply(model$time_synapse_update,clipping)
model$bias_synapse_update = lapply(model$bias_synapse_update, clipping)
}
for(i in seq(length(model$time_synapse))){
model$time_synapse[[i]] <- model$time_synapse[[i]] + model$time_synapse_update[[i]]
model$bias_synapse[[i]] <- model$bias_synapse[[i]] + model$bias_synapse_update[[i]]
}
for(i in seq(length(model$recurrent_synapse))){
model$recurrent_synapse[[i]] <- model$recurrent_synapse[[i]] + model$recurrent_synapse_update[[i]]
}
model$time_synapse_update = lapply(model$time_synapse_update,function(x){x* model$momentum})
model$bias_synapse_update = lapply(model$bias_synapse_update,function(x){x* model$momentum})
model$recurrent_synapse_update = lapply(model$recurrent_synapse_update,function(x){x* model$momentum})
return(model)
}
update_adagrad = function(model){
if(!is.null(model$clipping)){
clipping = function(x){
x[is.nan(x)] = runif(sum(is.nan(x)),-1,1)
x[x > model$clipping] = model$clipping
x[x < -model$clipping] = - model$clipping
return(x)
}
model$recurrent_synapse_update = lapply(model$recurrent_synapse_update,clipping)
model$time_synapse_update = lapply(model$time_synapse_update,clipping)
model$bias_synapse_update = lapply(model$bias_synapse_update, clipping)
}
if(is.null(model$recurrent_synapse_update_old)){
model$recurrent_synapse_update_old = lapply(model$recurrent_synapse_update,function(x){x*0})
model$time_synapse_update_old = lapply(model$time_synapse_update,function(x){x*0})
}
for(i in seq(length(model$time_synapse))){
model$time_synapse_update_old[[i]] <- model$time_synapse_update_old[[i]] + model$time_synapse_update[[i]]
}
for(i in seq(length(model$recurrent_synapse))){
model$recurrent_synapse_update_old[[i]] <- model$recurrent_synapse_update_old[[i]] + model$recurrent_synapse_update[[i]]
}
for(i in seq(length(model$time_synapse))){
model$time_synapse[[i]] <- model$time_synapse[[i]] + model$learningrate * model$time_synapse_update[[i]] / (model$time_synapse_update_old[[i]] + 0.000000001)
model$bias_synapse[[i]] <- model$bias_synapse[[i]] + model$bias_synapse_update[[i]]
}
for(i in seq(length(model$recurrent_synapse))){
model$recurrent_synapse[[i]] <- model$recurrent_synapse[[i]] + model$learningrate * model$recurrent_synapse_update[[i]] / (model$recurrent_synapse_update_old[[i]] + 0.000000001)
}
return(model)
}
|
NULL
check_annotations <- function(x) {
if (!length(x))
return(x)
if (is.vector(x) && !is.list(x))
x <- cbind(x)
if (!inherits(x, c("data.frame", "matrix")))
stop("-annotations- must be either an object of class -data.frame- or -matrix- (is of class ",
class(x),").")
test <- apply(x, 2, typeof)
test <- which(!sapply(test, `%in%`, table = c("integer", "double")))
if (length(test))
stop("All annotation columns (but the first one) must be either -integer- or -double-.",
" The following columns are not: ", paste(test, collapse=", "), ".")
node_annotations <- apply(x, 2, as.integer)
test <- which(apply(node_annotations, 1, function(y) any(!(y %in% c(0, 1, 9, NA)))))
if (length(test))
stop("The following rows of -annotations- have values different from c(0, 1, 9, NA): ",
paste(test, collapse=", "), ".")
node_annotations[is.na(node_annotations)] <- 9L
fun_names <- colnames(node_annotations)
if (!length(fun_names))
fun_names <- sprintf("fun%03i", ncol(node_annotations))
dimnames(node_annotations) <- list(
1L:nrow(node_annotations),
fun_names
)
node_annotations
}
new_aphylo <- function(tree, tip.annotation, ...) UseMethod("new_aphylo")
new_aphylo.phylo <- function(
tree,
tip.annotation,
node.annotation = NULL,
tip.type = NULL,
node.type = NULL,
...
) {
tip.annotation <- check_annotations(tip.annotation)
node.annotation <- check_annotations(node.annotation)
if (nrow(tip.annotation) != length(tree$tip.label))
stop("The number of `tip.annotation` differs with the number of tips in `tree`.",
call. = FALSE)
if (length(node.annotation) && (nrow(node.annotation) != tree$Nnode))
stop("The number of `node.annotation` differs with the number of internal nodes in `tree`.",
call. = FALSE)
if (is.null(tip.type)) {
tip.type <- integer(ape::Ntip(tree))
} else {
tip.type <- as.vector(tip.type)
if (length(tip.type) != ape::Ntip(tree))
stop(
"The provided -tip.type- has not the same length as the number of tips",
" in the tree. The actual length should be ", ape::Ntip(tree),
call. = FALSE)
}
if (is.null(node.type)) {
node.type <- integer(ape::Nnode(tree))
} else {
node.type <- as.vector(node.type)
if (length(node.type) != ape::Nnode(tree))
stop(
"The provided -node.type- has not the same length as the number of tips",
" in the tree. The actual length should be ", ape::Nnode(tree),
call. = FALSE)
}
as_aphylo(
tip.annotation = tip.annotation,
node.annotation = node.annotation,
tree = tree,
tip.type = tip.type,
node.type = node.type,
checks = FALSE
)
}
as_aphylo <- function(
tip.annotation,
node.annotation,
tree,
tip.type,
node.type,
checks = TRUE
) {
if (checks) {
stopifnot(is.matrix(tip.annotation))
stopifnot(inherits(tree, "phylo"))
stopifnot(nrow(tip.annotation) == length(tree$tip.label))
if (length(node.annotation)) {
stopifnot(is.matrix(node.annotation))
stopifnot(nrow(node.annotation) == tree$Nnode)
}
if (length(tip.type))
stopifnot(length(tip.type) == ape::Ntip(tree))
if (length(node.type))
stopifnot(length(node.type) == ape::Nnode(tree))
}
if (!length(node.annotation))
node.annotation <- matrix(
9L, nrow = tree$Nnode,
ncol = ncol(tip.annotation),
dimnames = list(
(nrow(tip.annotation) + 1):(nrow(tip.annotation) + tree$Nnode),
colnames(tip.annotation)
)
)
if (!length(tip.type))
tip.type <- integer(ape::Ntip(tree))
if (!length(node.type))
node.type <- integer(ape::Nnode(tree))
offspring <- list_offspring(tree)
pseq <- ape::postorder(tree)
pseq <- c(tree$edge[pseq, 2], length(tree$tip.label) + 1L)
pseq_reduced <- reduce_pseq(pseq, rbind(tip.annotation, node.annotation), offspring)
structure(
c(
list(tree = tree),
list(tip.annotation = tip.annotation),
list(node.annotation = node.annotation),
list(offspring = offspring),
list(pseq = pseq),
list(reduced_pseq = pseq_reduced),
list(Ntips.annotated = length(intersect(1:nrow(tip.annotation), pseq_reduced))),
list(tip.type = tip.type),
list(node.type = node.type)
),
class = c("aphylo")
)
}
print.aphylo <- function(x, ...) {
print(x$tree)
cat("\n Tip (leafs) annotations:\n")
print(utils::head(x$tip.annotation))
if (nrow(x$tip.annotation) > 6)
cat("\n...(", nrow(x$tip.annotation) - 6, " obs. omitted)...\n\n", sep="")
if (length(x$node.annotation)) {
cat("\n Internal node annotations:\n")
print(utils::head(x$node.annotation))
if (nrow(x$node.annotation) > 6)
cat("\n...(", nrow(x$node.annotation) - 6, " obs. omitted)...\n\n", sep="")
} else {
cat("\nNo annotations for internal nodes.")
}
invisible(x)
}
summary.aphylo <- function(object, ...) {
ans <- list()
for (x in c("tip.annotation", "node.annotation")) {
ans[[x]] <- lapply(1:ncol(object[[x]]), function(i) table(object[[x]][,i]))
if (!inherits(ans[[x]], "list"))
ans[[x]] <- list(ans[[x]][,1,drop=TRUE])
ans[[x]] <- do.call(
rbind,
lapply(ans[[x]], function(x)
data.frame(
`0` = unname(x["0"]),
`1` = unname(x["1"]),
`NA` = unname(x["9"]),
check.names = FALSE
)
)
)
ans[[x]][is.na(ans[[x]])] <- 0
rownames(ans[[x]]) <- colnames(object[[x]])
cat("\nDistribution of functions in", x, ":\n")
print(ans[[x]])
}
invisible(ans)
}
is.aphylo <- function(x) inherits(x, "aphylo")
is.multiAphylo <- function(x) inherits(x, "multiAphylo")
|
library(testthat)
test_check("EpiModel")
|
test_that("lm2 works", {
mtcars$cyl <- factor(mtcars$cyl)
m <- lm(mpg ~ hp * cyl, data = mtcars)
x <- model.matrix(m)
y <- mtcars$mpg
m2 <- JWileymisc:::lm2(mpg ~ 1 + cyl + hp:cyl, data = mtcars,
designMatrix = x[, -2, drop = FALSE],
yObserved = y)
expect_true(inherits(m2, "lm"))
expect_null(m2[["x"]])
expect_null(m2[["y"]])
expect_false(is.null(m2[["qr"]]))
m2 <- JWileymisc:::lm2(mpg ~ 1 + cyl + hp:cyl, data = mtcars,
designMatrix = x[, -2, drop = FALSE],
yObserved = y,
x = TRUE, y = TRUE, qr = FALSE)
expect_true(inherits(m2, "lm"))
expect_is(m2[["x"]], "matrix")
expect_is(m2[["y"]], "numeric")
expect_null(m2[["qr"]])
m2 <- JWileymisc:::lm2(mpg ~ 0, data = mtcars,
designMatrix = x[, -2, drop = FALSE],
yObserved = y,
offset = mtcars$hp)
expect_true(inherits(m2, "lm"))
})
|
"ctsub"<-
function(x, y, z)
{
junk <- .Fortran("f_ctsub",
length(x),
as.double(x),
as.double(y),
as.double(z),
ans=double(length(x)),
PACKAGE = "bootstrap")
return(junk$ans)
}
|
diet_score_fun <-
function(FVCDFRU, FVCDSAL, FVCDPOT, FVCDCAR, FVCDVEG, FVCDJUI, DHH_SEX) {
total_fruitveg <-
if_else2(!is.na(FVCDFRU) & !is.na(FVCDSAL) & !is.na(FVCDPOT) &
!is.na(FVCDCAR) & !is.na(FVCDVEG), FVCDFRU + FVCDSAL +
FVCDPOT + FVCDCAR + FVCDVEG, NA)
max_fruitveg <-
if_else2(is.na(total_fruitveg), NA,
if_else2(total_fruitveg>8, 8, total_fruitveg))
daily_pot_limit <-
if_else2(DHH_SEX==1, 1,
if_else2(DHH_SEX==2, 5/7, NA))
FVCDPOT_high <-
if_else2(is.na(FVCDPOT), NA,
if_else2(FVCDPOT>=(daily_pot_limit), 1, 0))
FVCDCAR_nil <-
if_else2(is.na(FVCDCAR), NA,
if_else2(FVCDCAR==0, 1, 0))
FVCDJUI_high <-
if_else2(is.na(FVCDJUI), NA,
if_else2(FVCDJUI <=1, 0, FVCDJUI - 1))
diet_raw_score <- if_else2(!is.na(max_fruitveg) & !is.na(FVCDPOT_high) &
!is.na(FVCDCAR_nil) & !is.na(FVCDJUI_high), 2 +
max_fruitveg - (2*FVCDPOT_high) - (2*FVCDCAR_nil) -
(2*FVCDJUI_high), NA)
diet_score <- if_else2(diet_raw_score <0, 0,
if_else2(diet_raw_score >10, 10,
if_else2(!is.na(diet_raw_score), diet_raw_score,
tagged_na("b"))))
return(diet_score)
}
diet_score_fun_cat <-
function(diet_score){
if_else2(diet_score >=0 & diet_score < 2, 1,
if_else2(diet_score >=2 & diet_score < 8, 2,
if_else2(diet_score >=8 & diet_score <= 10, 3,
if_else2(diet_score == tagged_na("a"), "NA(a)", "NA(b)"))))
}
|
predict.modelKriging <- function(object,x,...){
ret <- modelKrigingInternalPredictor(object,x)
psi <- ret$psi
res <- list(y=ret$y)
if (object$predAll){
Psinv <- object$Psinv
lambda <- object$lambda
SigmaSqr <- object$SSQ
if(object$indefiniteType=="PSD" & any(object$indefiniteMethod==c("clip","flip","square","diffusion"))){
if(object$isIndefinite){
if(!object$indefiniteRepair){
Psinv <- object$PsinvA
}
}
}
if(object$reinterpolate & lambda > 0){
SigmaSqr <- object$SSQReint
Psinv <- object$PsinvReint
lambda <- 0
}
SSqr <- SigmaSqr*(1+lambda-diag(psi%*%Psinv%*%t(psi)))
s <- sqrt(abs(SSqr))
res$s <- as.numeric(s)
}
res
}
simulate.modelKriging <- function(object,nsim=1,seed=NA,xsim,conditionalSimulation=TRUE,returnAll=FALSE,...){
if (!is.na(seed)){
if (!exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE))
runif(1)
R.seed <- get(".Random.seed", envir = .GlobalEnv)
set.seed(seed)
on.exit(assign(".Random.seed", R.seed, envir = .GlobalEnv))
}
len <- length(xsim)
noise <- matrix(rnorm(len*nsim),len, nsim)
res <- computeCorrelationMatrix(object,xsim)
covar <- res$psi
if(conditionalSimulation){
ret <- modelKrigingInternalPredictor(object,xsim)
y <- ret$y
psi <- ret$psi
covarDifference <- covar - psi %*% object$Psinv %*% t(psi)
eigv <- eigen(object$SSQ *covarDifference,symmetric=T)
covarDecomposed <- eigv$vectors %*% diag(sqrt(abs(eigv$values))) %*% eigv$vectors
ysim <- covarDecomposed %*% noise
y <- matrix(y,len,nsim) + ysim
}else{
eigv <- eigen(object$SSQ *covar,symmetric=T)
covarDecomposed <- eigv$vectors %*% diag(sqrt(abs(eigv$values))) %*% eigv$vectors
y <- object$mu + covarDecomposed %*% noise
}
res$y <- y
if(returnAll)
return(res)
else
return(y)
}
modelKrigingInternalPredictor <- function(object,x){
if(!is.list(x))x<-list(x)
xo <- object$x
Psinv <- object$Psinv
n <- length(xo)
mu <- object$mu
yMu <- object$yMu
psi <- matrix(1,length(x),n)
fundist <- object$distanceFunction
if(is.list(fundist)){
psi <- replicate(length(fundist),psi,simplify=FALSE)
if(object$useDistanceParameters){
indices <- rep(1:length(fundist),sapply(object$distanceParametersLower,length))
}
for(j in 1:length(fundist)){
for (i in 1:n){
if(!object$useDistanceParameters){
psi[[j]][,i] <- distanceVector(xo[[i]],x,fundist[[j]])
}else{
psi[[j]][,i] <- distanceVector(xo[[i]],x,fundist[[j]],object$distanceParameters[indices==j])
}
}
if(object$scaling){
psi[[j]] <- psi[[j]]/object$maximumDistance[[j]]
}
}
psi <- Reduce("+",mapply("*",psi,object$distanceWeights,SIMPLIFY=FALSE))
}else{
for (i in 1:n){
if(!object$useDistanceParameters){
psi[,i] <- distanceVector(xo[[i]],x,fundist)
}else{
psi[,i] <- distanceVector(xo[[i]],x,fundist,object$distanceParameters)
}
}
if(object$scaling){
psi <- psi/object$maximumDistance
}
}
if(object$indefiniteRepair == 3){
rd <- 1/psi
}else if(object$indefiniteRepair == 4){
rd1 <- apply(psi,1,function(x) which(x==min(x)))
}
if(any(object$indefiniteMethod==c("clip","flip","near","square","diffusion"))){
if(object$indefiniteType=="NSD" & !object$indefiniteRepair){
if(!object$isCNSD)
psi <- psi %*% t(object$A)
}
if(object$indefiniteType=="CNSD" | (object$indefiniteType=="NSD" & object$indefiniteRepair)){
if(!object$isCNSD){
if(object$indefiniteRepair == 1 | object$indefiniteType=="CNSD"){
if(object$indefiniteMethod!="near")
psi <- correctionAugmentedDistanceVector(psi,object,x)
}else if(object$indefiniteRepair == 2){
psi <- psi %*% t(object$A)
add <- diag(psi %*% ginv(object$matNoRep) %*% t(psi))
add <- matrix(add,nrow(psi),nrow(object$matNoRep))
add2 <- matrix(diag(object$matNoRep),nrow(psi),nrow(object$matNoRep),byrow=T)
psi <- 2*psi - add - add2
rdif <- rowSums(psi<0)>0
if(any(rdif))
psi[rdif,] <- psi[rdif,,drop=FALSE] - apply(psi[rdif,,drop=FALSE],1,min)
}else if(object$indefiniteRepair == 3){
rd1 <- rd / rowSums(rd)
if(any(is.na(rd1))){
nas <- is.na(rd1)
naIndex <- which(nas)
rd1[naIndex] <- (nas / rowSums(nas))[naIndex]
}
psi <- psi %*% t(object$A)
add <- rd1 %*% diag(object$matNoRep)
add <- matrix(add,nrow(psi),nrow(object$matNoRep))
add2 <- matrix(diag(object$matNoRep),nrow(psi),nrow(object$matNoRep),byrow=T)
psi <- 2*psi - add2 - add
rdif <- rowSums(psi<0)>0
if(any(rdif))
psi[rdif,] <- psi[rdif,,drop=FALSE] - apply(psi[rdif,,drop=FALSE],1,min)
}else if(object$indefiniteRepair == 4){
psi <- psi %*% t(object$A)
diagNN <- diag(object$matNoRep)
meanidx <- function(x){
return(mean(diagNN[x]))
}
meanDiagNN <- sapply(rd1,meanidx)
add <- matrix(meanDiagNN,nrow(psi),nrow(object$matNoRep))
add2 <- matrix(diag(object$matNoRep),nrow(psi),nrow(object$matNoRep),byrow=T)
psi <- 2*psi - add2 - add
rdif <- rowSums(psi<0)>0
if(any(rdif))
psi[rdif,] <- psi[rdif,,drop=FALSE] - apply(psi[rdif,,drop=FALSE],1,min)
}
}
}
}
if((object$indefiniteType=="CNSD" | object$indefiniteType=="NSD") & object$indefiniteMethod=="feature"){
if(!object$isCNSD){
tempx <- split(psi,seq(nrow(psi)))
for (i in 1:n)
psi[,i] <- distanceVector(object$origD[i,],tempx,distanceRealEuclidean)
}
}
if(is.null(object$theta))
psi <- object$corr(psi)
else
psi <- object$corr(psi,object$theta)
if(object$indefiniteType=="PSD" & any(object$indefiniteMethod==c("clip","flip","near","square","diffusion"))){
if(object$isIndefinite){
if(!object$indefiniteRepair){
}else{
if(object$indefiniteRepair == 1){
psi <- correctionAugmentedKernelVector(psi,object,x)
}else{
if(object$indefiniteRepair == 2){
div <- diag(psi %*% object$unrepairedAPsinvA %*% t(psi))
div <- diag(1/sqrt(div),nrow(psi))
psi <- div %*% psi %*% object$ADividedSqrtDiagPsi
}else if(object$indefiniteRepair == 3){
rd1 <- rd / rowSums(rd)
if(any(is.na(rd1))){
nas <- is.na(rd1)
naIndex <- which(nas)
rd1[naIndex] <- (nas / rowSums(nas))[naIndex]
}
rd2 <- rd1 %*% object$diagUnrepairedPsi
div <- diag(1/sqrt(as.numeric(rd2)),nrow(psi))
psi <- div %*% psi %*% object$ADividedSqrtDiagPsi
}else if(object$indefiniteRepair == 4){
diagNN <- object$diagUnrepairedPsi
meanidx <- function(x){
return(mean(diagNN[x]))
}
meanDiagNN <- sapply(rd1,meanidx)
div <- diag(1/sqrt(as.numeric(meanDiagNN)),nrow(psi))
psi <- div %*% psi %*% object$ADividedSqrtDiagPsi
}
psi[psi > 1] <- 1
psi[psi < -1] <- -1
}
}
}
}
y <- as.numeric(psi%*%Psinv%*%yMu)+mu
list(y=y,psi=psi)
}
computeCorrelationMatrix <- function(object,x){
if(!is.list(x))x<-list(x)
if(is.null(object$distanceParameters))
object$distanceParameters <- NA
ret <- modelKrigingDistanceCalculation(x,object$distanceFunction,parameters=object$distanceParameters,
NULL,object$scaling,object$combineDistances,object$indefiniteMethod,object$indefiniteType,object$indefiniteRepair,object$distanceParametersLower)
psi <- ret$D
if(is.null(object$theta))
psi <- object$corr(psi)
else
psi <- object$corr(psi,object$theta)
ret$U <- NA
ret$a <- NA
ret$isIndefinite <- NA
ret$origPsi <- NA
if(object$indefiniteType=="PSD" & any(object$indefiniteMethod==c("clip","flip","near","square","diffusion"))){
ret$origPsi <- psi
ret2 <- correctionKernelMatrix(psi,object$indefiniteMethod,object$indefiniteRepair)
ret$a <- ret2$a
ret$U <- ret2$U
ret$A <- ret2$A
ret$isIndefinite <- !ret2$isPSD
psi <- ret2$mat
}
if(object$useLambda){
psi <- psi + diag(object$lambda,length(x))
}
ret$psi <- psi
ret
}
|
gof.trial.fi <- function(model,breaks=NULL,nc=NULL){
width <- model$meta.data$width
left <- model$meta.data$left
xmat <- model$mr$data
n <- dim(xmat)[1]
xmat$omega <- rep(1,dim(xmat)[1])
xmat$omega[xmat$timesdetected==2] <- 2
if(is.null(nc)){
nc<-round(sqrt(min(length(xmat$distance[xmat$observer==1 &
xmat$detected==1]),
length(xmat$distance[xmat$observer==1 &
xmat$timesdetected==2]))), 0)
}
if(is.null(breaks)){
breaks <- left + ((width-left)/nc)*(0:nc)
}else{
nc <- length(breaks)-1
}
xmat$detected <- 1
p1 <- predict(model,newdata=xmat,compute=TRUE,integrate=FALSE)$fitted
p.omega <- data.frame(object=rep(1:n,2),
omega=c(rep(1,n),rep(2,n)),
distance=rep(xmat$distance,2),
prob=rep(0,2*n))
p.omega$prob[p.omega$omega==1] <- (1-p1)
p.omega$prob[p.omega$omega==2] <- p1
expected.2 <- by(p.omega$prob,list(as.factor(p.omega$omega),
cut(p.omega$distance,breaks,
include.lowest=TRUE)),
sum,na.rm=TRUE)
expected.1 <- rep(0,nc)
for(j in 1:nc){
expected.1[j] <- sum(predict(model,integrate=TRUE,compute=TRUE,
int.range=breaks[j+1])$fitted/model$fitted)
}
n <- expected.1[nc]
expected.1[2:nc] <- expected.1[2:nc]- expected.1[1:(nc-1)]
expected.1 <- n*expected.1/sum(expected.1)
distances <- model$data$distance[model$data$observer==1&model$data$object %in%
as.numeric(names(model$fitted))]
observed.count.1 <- table(cut(distances,breaks,include.lowest=TRUE))
observed.count.2 <- table(as.factor(xmat$omega),
cut(xmat$distance,breaks,include.lowest=TRUE))
chisq.1 <- sum((observed.count.1-expected.1)^2/expected.1,na.rm=TRUE)
chisq.2 <- sum((observed.count.2-expected.2)^2/expected.2,na.rm=TRUE)
df.1 <- NA
p.1 <- NA
df.2 <- NA
p.2 <- NA
return(list(chi1=list(observed=observed.count.1,
expected=expected.1,
chisq=chisq.1,
p=p.1,
df=df.1),
chi2=list(observed=observed.count.2,
expected=expected.2[1:2,],
chisq=chisq.2,
p=p.2,
df=df.2),
pooled.chi=list(chisq=chisq.1+chisq.2,
df=2*nc-length(model$par)-1,
p=1-pchisq(chisq.1+chisq.2,
2*nc-length(model$par)-1))))
}
|
lpr <- function (object, file = "Rplotlpr.ps", ...) {
if (missing(object)) {
current.device <- dev.cur()
dev.off(dev.copy(device = postscript, file = file, ...))
dev.set(current.device)
system(paste("lpr", file))
print(paste(file, "printed."))
}
else {
if (missing(file))
file <- "Robjlpr.txt"
sink(file)
object <- as.character(substitute(object))
print(get(object))
sink()
system(paste("lpr", file))
print(paste(object, "printed."))
}
}
|
LKrig.sim.conditional <- function(LKrigObj, M = 1, x.grid = NULL,
grid.list = NULL, nx = 80, ny = 80, ..., Z.grid = NULL, seed=42, verbose=FALSE) {
if (is.null(x.grid)) {
if (is.null(grid.list)) {
grid.list <- fields.x.to.grid(LKrigObj$x, nx = nx, ny = ny)
}
x.grid <- make.surface.grid(grid.list)
}
x.grid<- as.matrix( x.grid)
if( verbose){
cat("LKrig.sim.conditional: x.grid")
print( x.grid)
}
if( length(seed)==1){
seeds<- seed + ( 0:(M-1))
}
g.conditional.draw <- matrix(NA, ncol = M, nrow = nrow(x.grid))
N <- nrow(LKrigObj$y)
PHIGrid<- LKrig.basis(x.grid,LKrigObj$LKinfo)
if( verbose){
cat("LKrig.sim.conditional: dim(PHIGrid)", dim(PHIGrid), fill=TRUE)
}
spatialPart<- (PHIGrid%*% LKrigObj$c.coef)
ghat <- spatialPart
if( !is.null(LKrigObj$LKinfo$fixedFunction) ){
d.coef.draw<- matrix(NA, ncol = M, nrow = length( LKrigObj$d.coef) )
}
else{
d.coef.draw<- matrix(NA, ncol = M, nrow=1)
}
ghat<- predict( LKrigObj, x= x.grid, Z = Z.grid )
for (k in 1:M) {
cat(k, " ")
out<- simConditionalDraw( k, LKrigObj, ghat, x.grid, Z.grid, PHIGrid,
seeds, ..., verbose=verbose)
if( !is.null(LKrigObj$LKinfo$fixedFunction) ){
d.coef.draw[,k] <- out$d.coef
}
g.conditional.draw[, k] <- out$g.conditional
}
cat(k, " ", fill=TRUE)
SE<- apply(g.conditional.draw, 1, FUN=sd)
return(list(x.grid = x.grid, ghat = ghat, g.draw = g.conditional.draw,
SE = SE,
d.coef.draw= d.coef.draw))
}
simConditionalDraw <- function(index=1, LKrigObj, ghat, x.grid, Z.grid,
PHIGrid, seeds= 123, verbose=FALSE){
require(LatticeKrig)
set.seed( seeds[index] )
simCoefficients<- LKrig.sim(LKinfo = LKrigObj$LKinfo, just.coefficients=TRUE)
g.unconditional.data <-LKrigObj$wX %*%simCoefficients
g.unconditional.data <- sqrt(LKrigObj$rho.MLE) * g.unconditional.data
g.unconditional.grid <-sqrt(LKrigObj$rho.MLE) *PHIGrid%*%simCoefficients
N<- length( LKrigObj$y)
y.synthetic.data <- g.unconditional.data + LKrigObj$sigma.MLE *
rnorm(N)
y.synthetic.data<- y.synthetic.data / sqrt(LKrigObj$weights)
if(verbose){
cat("simConditionalDraw Call to LKrig: ", fill=TRUE)
}
obj.fit.synthetic <- LKrig(LKrigObj$x, y.synthetic.data,
LKinfo = LKrigObj$LKinfo,
wX = LKrigObj$wX,
wU = LKrigObj$wU,
lambda = LKrigObj$lambda,
Z = LKrigObj$Z,
weights = LKrigObj$weights,
use.cholesky = LKrigObj$Mc,
verbose = verbose)
spatialPart<- (PHIGrid%*% obj.fit.synthetic$c.coef)
ghat.synthetic<- spatialPart
if( !is.null(LKrigObj$LKinfo$fixedFunction) ){
fixedPart<- predict(
obj.fit.synthetic, xnew=x.grid, Znew = Z.grid,
just.fixed=TRUE)
d.coef <- obj.fit.synthetic$d.coef
ghat.synthetic<- ghat.synthetic + fixedPart
}
else{
d.coef<- NA
}
g.conditional <- ghat + (g.unconditional.grid - ghat.synthetic)
return(
list( g.conditional = g.conditional, d.coef = d.coef) )
}
|
sim_mst = function(pars, theta, test_design, routing_rules, routing=c('last','all'))
{
routing_type = match.arg(routing)
dat = dexter::r_score(pars)(theta)
nmod=max(routing_rules$module_nbr)
if(nmod <=2)
routing_type='last'
test_design$item_id = as.character(test_design$item_id)
test_design$module_id = as.character(test_design$module_id)
routing_rules$booklet_id = as.character(routing_rules$booklet_id )
routing_rules$module_id = as.character(routing_rules$module_id)
stopifnot(setequal(test_design$module_id,routing_rules$module_id))
mdlist = split(test_design$item_id, test_design$module_id)
msum = matrix(0L,length(theta), length(mdlist))
mdl = names(mdlist)
colnames(msum) = mdl
for(module_id in mdl)
msum[,module_id] = rowSums(dat[,mdlist[[module_id]]])
if(routing_type=='last')
{
routing_rules$exit_min = coalesce(routing_rules$exit_min ,0L)
routing_rules$exit_max = coalesce(routing_rules$exit_max ,as.integer(1e9))
} else
{
routing_rules = routing_rules %>%
group_by(.data$booklet_id) %>%
arrange(.data$module_nbr) %>%
mutate(exit_min = coalesce(.data$exit_min,lag(.data$exit_min,default=0L)),
exit_max=c(.data$exit_max[-n()],coalesce(.data$exit_max[n()],as.integer(1e9)))) %>%
ungroup()
}
lapply(split(routing_rules, routing_rules$booklet_id), function(rl){
indx = rep(TRUE,length(theta))
if(routing_type == 'last')
{
for(i in 1:nrow(rl))
indx[indx] = between(msum[indx,rl$module_id[i]],rl$exit_min[i],rl$exit_max[i])
} else
{
sm=integer(length(theta))
for(i in 1:nrow(rl))
{
indx[indx] = between(msum[indx,rl$module_id[i]] + sm[indx],rl$exit_min[i],rl$exit_max[i])
sm[indx] = sm[indx] + msum[indx,rl$module_id[i]]
}
}
items = unlist(mdlist[rl$module_id])
persons = which(indx)
tibble(person_id = rep(persons,length(items)),
item_id = rep(items,each=length(persons)),
item_score = as.integer(dat[persons,items]))
}) %>%
bind_rows(.id='booklet_id') %>%
mutate_if(is.factor, as.character)
}
|
beastier_report <- function(
beast2_folder = get_default_beast2_folder(),
os = rappdirs::app_dir()$os
) {
message("***********")
message("* beastier *")
message("***********")
message("OS: ", os)
message("beast2_folder: ", beast2_folder)
beast2_path <- beastier::get_default_beast2_path(
beast2_folder = beast2_folder
)
message("beast2_path: ", beast2_path)
message("****************")
message("* Dependencies *")
message("****************")
message("beautier version: ", utils::packageVersion("beautier"))
message("beastier version: ", utils::packageVersion("beastier"))
message("**********")
message("* BEAST2 *")
message("**********")
message("Java version: ", beastier::get_java_version())
message(
"Is BEAST2 installed: ",
beastier::is_beast2_installed(folder_name = beast2_folder)
)
if (beastier::is_beast2_installed(folder_name = beast2_folder)) {
message(
"BEAST2 version: ",
beastier::get_beast2_version(beast2_path = beast2_path)
)
message(
"BEAST2 default path: ",
beastier::get_default_beast2_bin_path(beast2_folder = beast2_folder)
)
}
message("****************")
message("* session info *")
message("****************")
message(paste0(devtools::session_info(), collapse = "\n"))
}
|
translate_SectionTypeCd <- function(code, variant) {
tbl <- .codelist[[glue::glue("SectionTypeCd_{variant}")]]
tbl$label[match(code, tbl$code)]
}
match_A03 <- function(d, id, variant, translate_colnames = TRUE, translate_codelist = TRUE) {
idx_SectionTypeCd <- which(colnames(d) == "A03_006")
idx_SectionCd <- which(colnames(d) == "A03_007")
d <- match_by_name(d, id, translate_colnames = translate_colnames, translate_codelist = translate_codelist)
if (!isTRUE(translate_codelist)) {
return(d)
}
d[[idx_SectionTypeCd]] <- translate_SectionTypeCd(d[[idx_SectionTypeCd]], variant)
d
}
replace_year <- function(d, prefix, format) {
idx <- stringr::str_detect(colnames(d), paste0(prefix, "[12][0-9]{3}"))
year <- stringr::str_sub(colnames(d)[idx], -4L)
colnames(d)[idx] <- glue::glue(format)
d
}
`match_A22-m` <- function(d, id, variant = NULL, translate_colnames = TRUE, translate_codelist = TRUE) {
old_names <- colnames(d)
d <- match_by_name(d, id,
translate_colnames = translate_colnames,
translate_codelist = translate_codelist,
skip_check = TRUE)
if (!isTRUE(translate_colnames)) {
return(d)
}
d <- replace_year(d, "A22_01", "\u6700\u6df1\u7a4d\u96ea_{year}\u5e74\u5ea6")
d <- replace_year(d, "A22_02", "\u7d2f\u8a08\u964d\u96ea\u91cf_{year}\u5e74\u5ea6")
d <- replace_year(d, "A22_03", "\u6700\u4f4e\u6c17\u6e29_{year}\u5e74\u5ea6")
d <- replace_year(d, "A22_04", "\u5e73\u5747\u98a8\u901f_{year}\u5e74\u5ea6")
d <- replace_year(d, "A22_10", "\u6b7b\u8005\u6570_{year}\u5e74\u5ea6")
d <- replace_year(d, "A22_11", "\u884c\u65b9\u4e0d\u660e\u8005\u6570_{year}\u5e74\u5ea6")
d <- replace_year(d, "A22_12", "\u91cd\u50b7\u8005\u6570_{year}\u5e74\u5ea6")
d <- replace_year(d, "A22_13", "\u8efd\u50b7\u8005\u6570_{year}\u5e74\u5ea6")
d <- replace_year(d, "A22_14", "\u4f4f\u5bb6\u5168\u58ca\u68df\u6570_{year}\u5e74\u5ea6")
d <- replace_year(d, "A22_15", "\u4f4f\u5bb6\u534a\u58ca\u68df\u6570_{year}\u5e74\u5ea6")
d <- replace_year(d, "A22_16", "\u4f4f\u5bb6\u4e00\u90e8\u7834\u640d\u6570_{year}\u5e74\u5ea6")
d <- replace_year(d, "A22_17", "\u9664\u96ea\u30dc\u30e9\u30f3\u30c6\u30a3\u30a2\u56e3\u4f53\u6570_{year}\u5e74\u5ea6")
d <- replace_year(d, "A22_18", "\u9664\u96ea\u30dc\u30e9\u30f3\u30c6\u30a3\u30a2\u767b\u9332\u4eba\u6570_{year}\u5e74\u5ea6")
d <- replace_year(d, "A22_19", "\u9664\u96ea\u30dc\u30e9\u30f3\u30c6\u30a3\u30a2\u6d3b\u52d5\u56de\u6570_{year}\u5e74\u5ea6")
d <- replace_year(d, "A22_20", "\u9664\u96ea\u30dc\u30e9\u30f3\u30c6\u30a3\u30a2\u306e\u5ef6\u3079\u53c2\u52a0\u4eba\u6570_{year}\u5e74\u5ea6")
assert_all_translated(colnames(d), old_names, id)
d
}
`match_A37` <- function(d, id, variant = NULL, translate_colnames = TRUE, translate_codelist = TRUE) {
old_names <- colnames(d)
d <- match_by_name(d, id,
translate_colnames = translate_colnames,
translate_codelist = translate_codelist,
skip_check = TRUE)
if (!isTRUE(translate_colnames)) {
return(d)
}
d <- replace_year(d, "A37_34", "\u6551\u6025\u8eca\u51fa\u52d5\u4ef6\u6570_{year}\u5e74")
d <- replace_year(d, "A37_35", "\u6d88\u9632\u9632\u707d\u30d8\u30ea\u51fa\u52d5\u4ef6\u6570_{year}\u5e74")
d <- replace_year(d, "A37_36", "\u5e73\u5747\u73fe\u5834\u5230\u7740\u6240\u8981\u6642\u9593_{year}\u5e74")
d <- replace_year(d, "A37_37", "\u5e73\u5747\u75c5\u9662\u53ce\u5bb9\u6642\u9593_{year}\u5e74")
assert_all_translated(colnames(d), old_names, id)
d
}
match_C02 <- function(d, id, variant = NULL, translate_colnames = TRUE, translate_codelist = TRUE) {
colnames(d) <- stringr::str_replace(colnames(d), "^C12_", "C02_")
match_by_name(d, id,
translate_colnames = translate_colnames,
translate_codelist = translate_codelist)
}
match_L01 <- function(d, id, variant = NULL, translate_colnames = TRUE, translate_codelist = TRUE) {
dc <- .col_info$other[.col_info$other$id == id, ]
old_names <- colnames(d)
d <- match_by_position(d, id, dc = dc,
translate_colnames = translate_colnames,
translate_codelist = translate_codelist,
skip_check = TRUE)
if (!isTRUE(translate_colnames)) {
return(d)
}
nenji_bits <- stringr::str_detect(d[["\u9078\u5b9a\u5e74\u6b21\u30d3\u30c3\u30c8"]], "^[01]+$")
if (!all(nenji_bits)) {
abort("Failed to match colnames")
}
nendo <- as.integer(unique(d[["\u5e74\u5ea6"]]))
if (length(nendo) != 1) {
warn("Data seems to over multiple years, using the latest one to calculate colnames...")
nendo <- max(nendo, na.rm = TRUE)
}
col_price <- paste0("\u8abf\u67fb\u4fa1\u683c_", seq(1983, nendo))
col_move <- paste0("\u5c5e\u6027\u79fb\u52d5_", seq(1984, nendo))
if (!isTRUE(translate_codelist)) {
inserted_rows <- 0
} else {
inserted_rows <- sum(!is.na(dc$codelist_id))
}
idx_col_price <- length(dc$name) + inserted_rows + seq_along(col_price)
idx_col_move <- max(idx_col_price) + seq_along(col_move)
if (max(idx_col_move) != ncol(d) - 1L) {
warn("The number of columns doesn't match with the expectation")
}
is_probably_move <- function(i) {
all(nchar(d[[i]]) == 14L & stringr::str_detect(d[[i]], "^[0124]+$"))
}
if (any(vapply(idx_col_price, is_probably_move, logical(1L))) ||
!all(vapply(idx_col_move, is_probably_move, logical(1L)))) {
abort("The values of columns don't match with the expectation")
}
colnames(d)[idx_col_price] <- col_price
colnames(d)[idx_col_move] <- col_move
assert_all_translated(colnames(d), old_names, id)
d
}
match_L02 <- match_L01
`match_L03-a` <- function(d, id, variant = NULL, translate_colnames = TRUE, translate_codelist = TRUE) d
`match_L03-b` <- function(d, id, variant = NULL, translate_colnames = TRUE, translate_codelist = TRUE) d
`match_L03-b-u` <- function(d, id, variant = NULL, translate_colnames = TRUE, translate_codelist = TRUE) d
match_N04 <- function(d, id, variant = NULL, translate_colnames = TRUE, translate_codelist = TRUE) {
ncol <- ncol(d) - 1L
dc_N04 <- .col_info$N04
dc <- dc_N04[dc_N04$columns == ncol, ]
if (nrow(dc) == 0) {
abort("Unexpected number of columns")
}
match_by_name(d, id, dc = dc,
translate_colnames = translate_colnames,
translate_codelist = translate_codelist)
}
`match_S05-a` <- match_N04
`match_S05-b` <- match_N04
`match_P02` <- match_N04
match_A42 <- match_N04
match_P17 <- function(d, id, variant = NULL, translate_colnames = TRUE, translate_codelist = TRUE) {
old_names <- colnames(d)
d <- match_by_name(d, id,
translate_colnames = translate_colnames,
translate_codelist = translate_codelist,
skip_check = TRUE)
if (!isTRUE(translate_colnames)) {
return(d)
}
idx <- which(stringr::str_detect(colnames(d), paste0("^", id)))
if (length(idx) > 0) {
offset <- nchar(id) + 2L
num <- as.integer(stringr::str_sub(colnames(d)[idx], offset))
if (any(diff(num) != 1)) {
colnames_joined <- paste(colnames(d), collapse = ", ")
msg <- glue::glue("Columns are not sequencial: {colnames_joined}")
abort(msg)
}
colnames(d)[idx] <- paste0("\u7ba1\u8f44\u7bc4\u56f2", seq_along(num))
}
assert_all_translated(colnames(d), old_names, id)
d
}
match_P18 <- match_P17
match_P21 <- function(d, id, variant = NULL, translate_colnames = TRUE, translate_codelist = TRUE) {
colnames <- colnames(d)
idx <- stringr::str_detect(colnames, "^P21[A-Z]_00$")
if (any(idx)) {
msg <- glue::glue("Found invalid colname(s): {colnames[idx]}")
warn(msg)
colnames(d)[idx] <- stringr::str_replace(colnames[idx], "\\d+$", sprintf("%03d", which(idx)))
}
match_by_name(d, id,
translate_colnames = translate_colnames,
translate_codelist = translate_codelist)
}
|
s = nint_space()
s
nint_validateSpace(s)
s = nint_space(nint_intvDim(-1, 1))
s
nint_validateSpace(s)
s = nint_space(1)
s
nint_validateSpace(s)
s = nint_space(list(nint_scatDim(c(1, 2)), nint_scatDim(c(1, 2, 3))))
s
nint_validateSpace(s)
s = nint_space(nint_scatDim(c(1, 2)),
nint_scatDim(c(1, 2, 3)))
s
nint_validateSpace(s)
nint_validateSpace(1)
nint_validateSpace(list(nint_space()))
nint_validateSpace(list(1))
s1 = nint_space(nint_gridDim(1:3),
nint_scatDim(c(0, 1)))
s2 = nint_space(s1[[1]])
s1
s2
nint_validateSpace(list(s1, s2))
|
names_as_unique <- function(names, ..., quiet = FALSE) {
check_dots_empty0(...)
.Call(ffi_names_as_unique, names, quiet)
}
names_inform_repair <- function(old, new) {
if (is_null(old)) {
old <- rep_along(new, "")
}
stopifnot(
is_character(old),
is_character(new),
length(old) == length(new)
)
if (peek_name_repair_verbosity() == "quiet") {
return(invisible())
}
old <- old %|% ""
new_names <- new != old
if (!any(new_names)) {
return(invisible())
}
bullets <- paste0(
map_chr(old[new_names], format_var),
" -> ",
map_chr(new[new_names], format_var),
.problem = ""
)
message <- c(
"New names:",
set_names(bullets, "*")
)
inform(message = message, class = "rlib_message_name_repair")
}
peek_name_repair_verbosity <- function() {
opt <- "rlib_name_repair_verbosity"
out <- peek_option(opt) %||% "verbose"
out <- arg_match0(out, c("verbose", "quiet"), opt)
out
}
|
plot.new_ezmmek_sat_fit <- function(x, ...) {
columns <- rlang::enquos(...)
point_plot <- plot.new_ezmmek_calibrate(x, columns = columns)
unnest_sat_df <- tidyr::unnest(x, pred_activities)
sat_fit_plot <- point_plot +
ggplot2::geom_line(data = unnest_sat_df,
ggplot2::aes(x = substrate_conc, y = activity_m)) +
ggplot2::facet_wrap(columns)
sat_fit_plot
}
|
AWD_LSTM <- function(vocab_sz, emb_sz, n_hid, n_layers, pad_token = 1,
hidden_p = 0.2, input_p = 0.6, embed_p = 0.1,
weight_p = 0.5, bidir = FALSE) {
if(missing(vocab_sz) & missing(emb_sz) & missing(n_hid) & missing(n_layers)) {
text()$AWD_LSTM
} else {
args <- list(
vocab_sz = vocab_sz,
emb_sz = emb_sz,
n_hid = n_hid,
n_layers = n_layers,
pad_token = as.integer(pad_token),
hidden_p = hidden_p,
input_p = input_p,
embed_p = embed_p,
weight_p = weight_p,
bidir = bidir
)
do.call(text()$AWD_LSTM, args)
}
}
language_model_learner <- function(dls, arch, config = NULL, drop_mult = 1.0,
backwards = FALSE, pretrained = TRUE,
pretrained_fnames = NULL,
opt_func = Adam(), lr = 0.001,
cbs = NULL, metrics = NULL, path = NULL,
model_dir = "models", wd = NULL, wd_bn_bias = FALSE,
train_bn = TRUE, moms = list(0.95, 0.85, 0.95),
...) {
args <- list(
dls = dls,
arch = arch,
config = config,
drop_mult = drop_mult,
backwards = backwards,
pretrained = pretrained,
pretrained_fnames = pretrained_fnames,
opt_func = opt_func,
lr = lr,
cbs = cbs,
metrics = metrics,
path = path,
model_dir = model_dir,
wd = wd,
wd_bn_bias = wd_bn_bias,
train_bn = train_bn,
moms = moms,
...
)
strings = c('config', 'pretrained_fnames', 'cbs', 'metrics', 'path', 'wd')
for(i in 1:length(strings)) {
if(is.null(args[[strings[i]]]))
args[[strings[i]]] <- NULL
}
do.call(text()$language_model_learner, args)
}
get_text_files <- function(path, recurse = TRUE, folders = NULL) {
if(missing(path)) {
text()$get_text_files
} else {
text()$get_text_files(
path = path,
recurse = recurse,
folders = folders
)
}
}
LinearDecoder <- function(n_out, n_hid, output_p = 0.1, tie_encoder = NULL, bias = TRUE) {
text()$LinearDecoder(
n_out = n_out,
n_hid = n_hid,
output_p = output_p,
tie_encoder = tie_encoder,
bias = bias
)
}
SequentialRNN <- function(...) {
args = list(...)
do.call(text()$SequentialRNN, args)
}
get_language_model <- function(arch, vocab_sz, config = NULL, drop_mult = 1.0) {
text()$get_language_model(
arch = arch,
vocab_sz = vocab_sz,
config = config,
drop_mult = drop_mult
)
}
SentenceEncoder <- function(bptt, module, pad_idx = 1, max_len = NULL) {
args = list(
bptt = bptt,
module = module,
pad_idx = as.integer(pad_idx),
max_len = max_len
)
if(is.null(args$max_len))
args$max_len <- NULL
else
args$max_len <- as.integer(args$max_len)
do.call(text()$SentenceEncoder, args)
}
masked_concat_pool <- function(output, mask, bptt) {
text()$masked_concat_pool(
output = output,
mask = mask,
bptt = bptt
)
}
PoolingLinearClassifier <- function(dims, ps, bptt, y_range = NULL) {
args =list(
dims = dims,
ps = ps,
bptt = bptt,
y_range = y_range
)
if(is.null(args$y_range))
args$y_range <- NULL
do.call(text()$PoolingLinearClassifier, args)
}
get_text_classifier <- function(arch, vocab_sz, n_class, seq_len = 72,
config = NULL, drop_mult = 1.0,
lin_ftrs = NULL, ps = NULL,
pad_idx = 1, max_len = 1440,
y_range = NULL) {
args = list(
arch = arch,
vocab_sz = vocab_sz,
n_class = n_class,
seq_len = as.integer(seq_len),
config = config,
drop_mult = drop_mult,
lin_ftrs = lin_ftrs,
ps = ps,
pad_idx = as.integer(pad_idx),
max_len = as.integer(max_len),
y_range = y_range
)
strings = c('config', 'ps', 'lin_ftrs', 'y_range')
for(i in 1:length(strings)) {
if(is.null(args[[strings[i]]]))
args[[strings[i]]] <- NULL
}
do.call(text()$get_text_classifier, args)
}
dropout_mask <- function(x, sz, p) {
text()$dropout_mask(
x = x,
sz = sz,
p = p
)
}
RNNDropout <- function(p = 0.5) {
text()$RNNDropout(
p = p
)
}
WeightDropout <- function(module, weight_p, layer_names = "weight_hh_l0") {
text()$WeightDropout(
module = module,
weight_p = weight_p,
layer_names = layer_names
)
}
EmbeddingDropout <- function(emb, embed_p) {
text()$EmbeddingDropout(
emb = emb,
embed_p = embed_p
)
}
awd_lstm_lm_split <- function(model) {
text()$awd_lstm_lm_split(
model = model
)
}
awd_lstm_clas_split <- function(model) {
text()$awd_lstm_clas_split(
model = model
)
}
AWD_QRNN <- function(vocab_sz, emb_sz, n_hid, n_layers, pad_token = 1,
hidden_p = 0.2, input_p = 0.6, embed_p = 0.1,
weight_p = 0.5, bidir = FALSE) {
python_function_result <- text()$AWD_QRNN(
vocab_sz = as.integer(vocab_sz),
emb_sz = as.integer(emb_sz),
n_hid = as.integer(n_hid),
n_layers = as.integer(n_layers),
pad_token = as.integer(pad_token),
hidden_p = hidden_p,
input_p = input_p,
embed_p = embed_p,
weight_p = weight_p,
bidir = bidir
)
}
forget_mult_CPU <- function(x, f, first_h = NULL, batch_first = TRUE, backward = FALSE) {
args= list(
x = x,
f = f,
first_h = first_h,
batch_first = batch_first,
backward = backward
)
if(is.null(args$first_h))
args$first_h <- NULL
do.call(fastai2$text$models$qrnn$forget_mult_CPU, args)
}
ForgetMultGPU <- function(...) {
invisible(fastai2$text$models$qrnn$ForgetMultGPU(...
) )
}
QRNNLayer <- function(input_size, hidden_size = NULL, save_prev_x = FALSE,
zoneout = 0, window = 1, output_gate = TRUE,
batch_first = TRUE, backward = FALSE) {
args = list(
input_size = input_size,
hidden_size = hidden_size,
save_prev_x = save_prev_x,
zoneout = as.integer(zoneout),
window = as.integer(window),
output_gate = output_gate,
batch_first = batch_first,
backward = backward
)
if(is.null(args$hidden_size))
args$hidden_size <- NULL
do.call(fastai2$text$models$qrnn$QRNNLayer, args)
}
QRNN <- function(input_size, hidden_size, n_layers = 1, batch_first = TRUE,
dropout = 0, bidirectional = FALSE, save_prev_x = FALSE,
zoneout = 0, window = NULL, output_gate = TRUE) {
args = list(
input_size = input_size,
hidden_size = hidden_size,
n_layers = as.integer(n_layers),
batch_first = batch_first,
dropout = as.integer(dropout),
bidirectional = bidirectional,
save_prev_x = save_prev_x,
zoneout = as.integer(zoneout),
window = window,
output_gate = output_gate
)
if(is.null(args$window))
args$window <- NULL
do.call(fastai2$text$models$qrnn$QRNN, args)
}
|
ft_standard_scaler <- function(x, input_col = NULL, output_col = NULL,
with_mean = FALSE, with_std = TRUE,
uid = random_string("standard_scaler_"), ...) {
check_dots_used()
UseMethod("ft_standard_scaler")
}
ml_standard_scaler <- ft_standard_scaler
ft_standard_scaler.spark_connection <- function(x, input_col = NULL, output_col = NULL,
with_mean = FALSE, with_std = TRUE,
uid = random_string("standard_scaler_"), ...) {
.args <- list(
input_col = input_col,
output_col = output_col,
with_mean = with_mean,
with_std = with_std,
uid = uid
) %>%
c(rlang::dots_list(...)) %>%
validator_ml_standard_scaler()
estimator <- spark_pipeline_stage(
x, "org.apache.spark.ml.feature.StandardScaler",
input_col = .args[["input_col"]], output_col = .args[["output_col"]], uid = .args[["uid"]]
) %>%
invoke(
"%>%",
list("setWithMean", .args[["with_mean"]]),
list("setWithStd", .args[["with_std"]])
) %>%
new_ml_standard_scaler()
estimator
}
ft_standard_scaler.ml_pipeline <- function(x, input_col = NULL, output_col = NULL,
with_mean = FALSE, with_std = TRUE,
uid = random_string("standard_scaler_"), ...) {
stage <- ft_standard_scaler.spark_connection(
x = spark_connection(x),
input_col = input_col,
output_col = output_col,
with_mean = with_mean,
with_std = with_std,
uid = uid,
...
)
ml_add_stage(x, stage)
}
ft_standard_scaler.tbl_spark <- function(x, input_col = NULL, output_col = NULL,
with_mean = FALSE, with_std = TRUE,
uid = random_string("standard_scaler_"), ...) {
stage <- ft_standard_scaler.spark_connection(
x = spark_connection(x),
input_col = input_col,
output_col = output_col,
with_mean = with_mean,
with_std = with_std,
uid = uid,
...
)
if (is_ml_transformer(stage)) {
ml_transform(stage, x)
} else {
ml_fit_and_transform(stage, x)
}
}
new_ml_standard_scaler <- function(jobj) {
new_ml_estimator(jobj, class = "ml_standard_scaler")
}
new_ml_standard_scaler_model <- function(jobj) {
new_ml_transformer(
jobj,
mean = possibly_null(read_spark_vector)(jobj, "mean"),
std = possibly_null(read_spark_vector)(jobj, "std"),
class = "ml_standard_scaler_model"
)
}
validator_ml_standard_scaler <- function(.args) {
.args <- validate_args_transformer(.args)
.args[["with_mean"]] <- cast_scalar_logical(.args[["with_mean"]])
.args[["with_std"]] <- cast_scalar_logical(.args[["with_std"]])
.args
}
|
library(checkargs)
context("isNonZeroNumberOrInfVector")
test_that("isNonZeroNumberOrInfVector works for all arguments", {
expect_identical(isNonZeroNumberOrInfVector(NULL, stopIfNot = FALSE, message = NULL, argumentName = NULL), FALSE)
expect_identical(isNonZeroNumberOrInfVector(TRUE, stopIfNot = FALSE, message = NULL, argumentName = NULL), FALSE)
expect_identical(isNonZeroNumberOrInfVector(FALSE, stopIfNot = FALSE, message = NULL, argumentName = NULL), FALSE)
expect_identical(isNonZeroNumberOrInfVector(NA, stopIfNot = FALSE, message = NULL, argumentName = NULL), FALSE)
expect_identical(isNonZeroNumberOrInfVector(0, stopIfNot = FALSE, message = NULL, argumentName = NULL), FALSE)
expect_identical(isNonZeroNumberOrInfVector(-1, stopIfNot = FALSE, message = NULL, argumentName = NULL), TRUE)
expect_identical(isNonZeroNumberOrInfVector(-0.1, stopIfNot = FALSE, message = NULL, argumentName = NULL), TRUE)
expect_identical(isNonZeroNumberOrInfVector(0.1, stopIfNot = FALSE, message = NULL, argumentName = NULL), TRUE)
expect_identical(isNonZeroNumberOrInfVector(1, stopIfNot = FALSE, message = NULL, argumentName = NULL), TRUE)
expect_identical(isNonZeroNumberOrInfVector(NaN, stopIfNot = FALSE, message = NULL, argumentName = NULL), FALSE)
expect_identical(isNonZeroNumberOrInfVector(-Inf, stopIfNot = FALSE, message = NULL, argumentName = NULL), TRUE)
expect_identical(isNonZeroNumberOrInfVector(Inf, stopIfNot = FALSE, message = NULL, argumentName = NULL), TRUE)
expect_identical(isNonZeroNumberOrInfVector("", stopIfNot = FALSE, message = NULL, argumentName = NULL), FALSE)
expect_identical(isNonZeroNumberOrInfVector("X", stopIfNot = FALSE, message = NULL, argumentName = NULL), FALSE)
expect_identical(isNonZeroNumberOrInfVector(c(TRUE, FALSE), stopIfNot = FALSE, message = NULL, argumentName = NULL), FALSE)
expect_identical(isNonZeroNumberOrInfVector(c(FALSE, TRUE), stopIfNot = FALSE, message = NULL, argumentName = NULL), FALSE)
expect_identical(isNonZeroNumberOrInfVector(c(NA, NA), stopIfNot = FALSE, message = NULL, argumentName = NULL), FALSE)
expect_identical(isNonZeroNumberOrInfVector(c(0, 0), stopIfNot = FALSE, message = NULL, argumentName = NULL), FALSE)
expect_identical(isNonZeroNumberOrInfVector(c(-1, -2), stopIfNot = FALSE, message = NULL, argumentName = NULL), TRUE)
expect_identical(isNonZeroNumberOrInfVector(c(-0.1, -0.2), stopIfNot = FALSE, message = NULL, argumentName = NULL), TRUE)
expect_identical(isNonZeroNumberOrInfVector(c(0.1, 0.2), stopIfNot = FALSE, message = NULL, argumentName = NULL), TRUE)
expect_identical(isNonZeroNumberOrInfVector(c(1, 2), stopIfNot = FALSE, message = NULL, argumentName = NULL), TRUE)
expect_identical(isNonZeroNumberOrInfVector(c(NaN, NaN), stopIfNot = FALSE, message = NULL, argumentName = NULL), FALSE)
expect_identical(isNonZeroNumberOrInfVector(c(-Inf, -Inf), stopIfNot = FALSE, message = NULL, argumentName = NULL), TRUE)
expect_identical(isNonZeroNumberOrInfVector(c(Inf, Inf), stopIfNot = FALSE, message = NULL, argumentName = NULL), TRUE)
expect_identical(isNonZeroNumberOrInfVector(c("", "X"), stopIfNot = FALSE, message = NULL, argumentName = NULL), FALSE)
expect_identical(isNonZeroNumberOrInfVector(c("X", "Y"), stopIfNot = FALSE, message = NULL, argumentName = NULL), FALSE)
expect_error(isNonZeroNumberOrInfVector(NULL, stopIfNot = TRUE, message = NULL, argumentName = NULL))
expect_error(isNonZeroNumberOrInfVector(TRUE, stopIfNot = TRUE, message = NULL, argumentName = NULL))
expect_error(isNonZeroNumberOrInfVector(FALSE, stopIfNot = TRUE, message = NULL, argumentName = NULL))
expect_error(isNonZeroNumberOrInfVector(NA, stopIfNot = TRUE, message = NULL, argumentName = NULL))
expect_error(isNonZeroNumberOrInfVector(0, stopIfNot = TRUE, message = NULL, argumentName = NULL))
expect_identical(isNonZeroNumberOrInfVector(-1, stopIfNot = TRUE, message = NULL, argumentName = NULL), TRUE)
expect_identical(isNonZeroNumberOrInfVector(-0.1, stopIfNot = TRUE, message = NULL, argumentName = NULL), TRUE)
expect_identical(isNonZeroNumberOrInfVector(0.1, stopIfNot = TRUE, message = NULL, argumentName = NULL), TRUE)
expect_identical(isNonZeroNumberOrInfVector(1, stopIfNot = TRUE, message = NULL, argumentName = NULL), TRUE)
expect_error(isNonZeroNumberOrInfVector(NaN, stopIfNot = TRUE, message = NULL, argumentName = NULL))
expect_identical(isNonZeroNumberOrInfVector(-Inf, stopIfNot = TRUE, message = NULL, argumentName = NULL), TRUE)
expect_identical(isNonZeroNumberOrInfVector(Inf, stopIfNot = TRUE, message = NULL, argumentName = NULL), TRUE)
expect_error(isNonZeroNumberOrInfVector("", stopIfNot = TRUE, message = NULL, argumentName = NULL))
expect_error(isNonZeroNumberOrInfVector("X", stopIfNot = TRUE, message = NULL, argumentName = NULL))
expect_error(isNonZeroNumberOrInfVector(c(TRUE, FALSE), stopIfNot = TRUE, message = NULL, argumentName = NULL))
expect_error(isNonZeroNumberOrInfVector(c(FALSE, TRUE), stopIfNot = TRUE, message = NULL, argumentName = NULL))
expect_error(isNonZeroNumberOrInfVector(c(NA, NA), stopIfNot = TRUE, message = NULL, argumentName = NULL))
expect_error(isNonZeroNumberOrInfVector(c(0, 0), stopIfNot = TRUE, message = NULL, argumentName = NULL))
expect_identical(isNonZeroNumberOrInfVector(c(-1, -2), stopIfNot = TRUE, message = NULL, argumentName = NULL), TRUE)
expect_identical(isNonZeroNumberOrInfVector(c(-0.1, -0.2), stopIfNot = TRUE, message = NULL, argumentName = NULL), TRUE)
expect_identical(isNonZeroNumberOrInfVector(c(0.1, 0.2), stopIfNot = TRUE, message = NULL, argumentName = NULL), TRUE)
expect_identical(isNonZeroNumberOrInfVector(c(1, 2), stopIfNot = TRUE, message = NULL, argumentName = NULL), TRUE)
expect_error(isNonZeroNumberOrInfVector(c(NaN, NaN), stopIfNot = TRUE, message = NULL, argumentName = NULL))
expect_identical(isNonZeroNumberOrInfVector(c(-Inf, -Inf), stopIfNot = TRUE, message = NULL, argumentName = NULL), TRUE)
expect_identical(isNonZeroNumberOrInfVector(c(Inf, Inf), stopIfNot = TRUE, message = NULL, argumentName = NULL), TRUE)
expect_error(isNonZeroNumberOrInfVector(c("", "X"), stopIfNot = TRUE, message = NULL, argumentName = NULL))
expect_error(isNonZeroNumberOrInfVector(c("X", "Y"), stopIfNot = TRUE, message = NULL, argumentName = NULL))
})
|
testthat::context("Quick test of pop dyn")
library(MSEtool)
OM <- tinyErr(testOM, silent = TRUE)
OM@Vmaxlen <- c(1,1)
OM@SRrel <- 1
testpopdyn <- function(OM) {
Hist <- Simulate(OM, silent=TRUE)
Data <- Hist@Data
set.seed(OM@seed)
StockPars <- SampleStockPars(OM, OM@nsim, OM@nyears, OM@proyears, list(), msg=FALSE)
FleetPars <- SampleFleetPars(SubOM(OM, "Fleet"), Stock=StockPars, OM@nsim,
OM@nyears, OM@proyears, cpars=list())
dnormal<-function(lens,lfs,sl,sr){
cond<-lens<=lfs
sel<-rep(NA,length(lens))
sel[cond]<-2.0^-((lens[cond]-lfs)/sl*(lens[cond]-lfs)/sl)
sel[!cond]<-2.0^-((lens[!cond]-lfs)/sr*(lens[!cond]-lfs)/sr)
sel
}
BHSRR <- function(SBcurr, SB0, R0, steepness) {
(4 * R0 * steepness * SBcurr)/(SB0/R0 * R0 * (1-steepness) + (5*steepness-1)*SBcurr)
}
simpop <- function(logapicF, OM, FleetPars, StockPars, sim=1, opt=1) {
for (X in 1:length(StockPars)) assign(names(StockPars)[X], StockPars[[X]])
for (X in 1:length(FleetPars)) assign(names(FleetPars)[X], FleetPars[[X]])
Len <- Len_age[sim,,1]
Wght <- a * Len^b
MAA <- 1/(1 + exp(-log(19) * ((Len - L50[sim])/(L95[sim]-L50[sim]))))
sl <- (LFS_y[1,sim] - L5_y[1,sim]) /((-log(0.05,2))^0.5)
sr <- (Linf[sim] - LFS_y[1,sim]) / ((-log(Vmaxlen_y[1,sim],2))^0.5)
SAA <- dnormal(Len, LFS_y[1,sim], sl, sr)
M_array <- rep(M[sim],maxage+1)
FAA <- exp(logapicF) * SAA
ZAA <- (FAA * Find[sim,1]) + M_array
ages <- 1:maxage
N <- VB <- matrix(NA, OM@nyears, maxage+1)
Rec <- SB <- rep(NA, OM@nyears)
N[1,1] <- R0[sim]
N[1,2:(maxage+1)] <- R0[sim] * exp(-cumsum(M_array[2:(maxage+1)]))
SB[1] <- sum(N[1,] * MAA * Wght)
SB0 <- sum(SB[1])
Rec[1] <- R0[sim]
for (yr in 2:OM@nyears) {
Rec[yr] <- BHSRR(SB[yr-1], SB0, R0[sim], steepness=hs[sim])
N[yr,1] <- Rec[yr]
ZAA <- (FAA * Find[sim,yr-1]) + M_array
N[yr,2:(maxage+1)] <- N[yr-1, 1:(maxage)] * exp(-ZAA[1:(maxage)])
SB[yr] <- sum(N[yr,]* MAA * Wght)
}
dep <- SB[yr]/SB0
if (opt==1) return((dep-StockPars$D[sim])^2)
return(list(N=N, SAA=SAA, LenCV=LenCV, Len=Len))
}
chk <- rep(NA, OM@nsim)
for (sim in 1:OM@nsim) {
opt <- optimize(simpop, interval=log(c(0.01, 0.9)), OM=OM, FleetPars=FleetPars,
StockPars=StockPars, sim=sim, opt=1)
simple <- simpop(opt$minimum, OM=OM, FleetPars=FleetPars,
StockPars=StockPars, sim=sim, opt=2)
chk[sim] <- prod(round(Hist@AtAge$Nage[sim,,]/t(simple$N) ,0))
}
chk
}
testthat::expect_equal(testpopdyn(OM), rep(1, OM@nsim))
|
library(devtools)
if (Sys.getenv("SL_CRAN") == "true" &&
Sys.getenv("TRAVIS_PULL_REQUEST") %in% c("", "false") &&
Sys.getenv("APPVEYOR_PULL_REQUEST_NUMBER") == "") {
cat("Checking reverse dependencies.\n")
if (!requireNamespace("BiocInstaller", quietly = T)) {
source('https://bioconductor.org/biocLite.R')
biocLite()
}
print(sessionInfo())
devtools::revdep_check_reset()
ignore_packages = NULL
if (Sys.getenv("TRAVIS") == "true") {
cat("Using R library:", Sys.getenv("R_LIBS_USER"), "\n")
options(devtools.revdep.libpath = Sys.getenv("R_LIBS_USER"))
}
result = devtools::revdep_check(bioconductor = T, recursive = F,
ignore = ignore_packages,
threads = 1,
quiet_check = F)
if (length(result) > 0) {
devtools::revdep_check_save_summary()
print(devtools::revdep_check_print_problems())
if (Sys.getenv("TRAVIS_R_VERSION") != "") {
q(status = 1, save = "no");
}
} else {
cat("No reverse dependency problems found. Great job!\n")
}
} else {
cat("Skipping revdep.\n")
}
|
languageserver_remove_from_rprofile <- function(
rlsLib = getOption("langserver_library"),
rprofilePath = locate_rprofile(),
code = append_code(rlsLib = rlsLib),
confirmBeforeChanging = TRUE
) {
filePath <- make_rprofile_path(rprofilePath)
oldContent <- readLines(filePath)
lg("Read oldContent, length: ", length(oldContent), " from: ", filePath)
toRemove <- vapply(
X = oldContent,
FUN = is.element,
set = code,
FUN.VALUE = logical(1)
)
newContent <- oldContent[!toRemove]
if ((length(oldContent) - length(newContent)) != length(code)) {
warning(
"The code to remove is inconsistent with content.",
"Please remove it manually."
)
return(invisible(FALSE))
}
continue <- if (isTRUE(confirmBeforeChanging)) {
try(askYesNo(
paste0(
"This will remove lines: ", toString(which(toRemove)), "\n",
"from: ", filePath, "\n",
"Do you agree?"
),
default = FALSE
))
} else {
TRUE
}
if (!isTRUE(continue)) {
message(confirm_message())
return(invisible(FALSE))
}
lg("Writing newContent, length: ", length(newContent), " to: ", filePath)
writeLines(newContent, filePath)
}
|
plot.refcurve <- function(x, newdata = data.frame(x = seq(0,1,0.01)), tau = seq(0.1,0.9,0.2),...){
mean <- predict(x$modelo_m, newdata = newdata)
sd <- sqrt(exp(predict(x$modelo_v, newdata = newdata)))
epsilon <- quantile(x$res, probs = tau)
cuants = matrix(0, ncol = length(epsilon), nrow = dim(newdata)[1])
for(i in 1:length(epsilon)) cuants[,i] = mean + sd*epsilon[i]
plot(rev(x$modelo_m$model), col = "grey")
invisible(apply(cuants, 2, function(x) lines(x ~ newdata[,1])))
}
|
create.model.list<-function(model)
{
parameters=setup.parameters(model,check=TRUE)
model.list=list()
for(n in parameters)
{
vec=ls(pattern=paste("^",n,"\\.",sep=""),envir=parent.frame())
if(length(vec)>0)
{
for (i in 1:length(vec))
{
if(eval(parse(text=paste("is.list(",vec[i],")",sep="")),envir=parent.frame()))
{
if(eval(parse(text=paste("!is.null(",vec[i],"$formula)",sep="")),envir=parent.frame()) |
(eval(parse(text=paste("is.list(",vec[i],"[[1]])",sep="")),envir=parent.frame())&&
eval(parse(text=paste("!is.null(",vec[i],"[[1]]$formula)",sep="")),envir=parent.frame())))
model.list[[n]]=c(model.list[[n]],vec[i])
}
}
} else
message("Using default formula for ",n,"\n")
}
if(length(model.list)==0)
stop("\nNo model specifications found. Use case sensitive parameter.description notation (e.g., Phi.time)\n")
if(length(model.list)>1)
{
model.list=expand.grid(model.list)
for (j in 1:dim(model.list)[2])
model.list[,j]=as.character(model.list[,j])
}
else
model.list=as.data.frame(model.list)
return(model.list)
}
|
fitpoly2 = function(conc, resp, bidirectional = TRUE, verbose = FALSE, nofit = FALSE){
fenv <- environment()
pars <- paste0(c("a", "b", "er"))
sds <- paste0(c("a", "b", "er"), "_sd")
myparams = c("success", "aic", "cov", "rme", "modl", pars, sds, "pars", "sds")
if(nofit){
out = as.list(rep(NA_real_, length(myparams)))
names(out) = myparams
out[["success"]] = out[["cov"]] = NA_integer_
out[["pars"]] = pars
out[["sds"]] = sds
return(out)
}
rmds <- tapply(resp, conc, median)
if(!bidirectional) mmed = rmds[which.max(rmds)] else mmed = rmds[which.max(abs(rmds))]
mmed_conc <- as.numeric(names(mmed))
resp_max <- max(resp)
resp_min <- min(resp)
conc_min <- min(conc)
conc_max <- max(conc)
er_est <- if ((rmad <- mad(resp)) > 0) log(rmad) else log(1e-16)
a0 = mmed
if(a0 == 0) a0 = .01
g <- c(a0/2,
conc_max,
er_est)
Ui <- matrix(c( 1, 0, 0,
-1, 0, 0,
0, 1, 0,
0, -1, 0),
byrow = TRUE, nrow = 4, ncol = 3)
if(!bidirectional){
bnds <- c(0, -1e8*abs(a0),
1e-8*conc_max, -1e8*conc_max)
} else {
bnds <- c(-1e8*abs(a0), -1e8*abs(a0),
1e-8*conc_max, -1e8*conc_max)
}
Ci <- matrix(bnds, nrow = 4, ncol = 1)
fit <- try(constrOptim(g,
tcplObj,
ui = Ui,
ci = Ci,
mu = 1e-6,
method = "Nelder-Mead",
control = list(fnscale = -1,
reltol = 1e-10,
maxit = 6000),
conc = conc,
resp = resp,
fname = "poly2"),
silent = !verbose)
if (!is(fit, "try-error")) {
if(verbose) cat("poly2 >>>",fit$counts[1],fit$convergence,"\n")
success <- 1L
aic <- 2*length(fit$par) - 2*fit$value
mapply(assign,
c(pars),
fit$par,
MoreArgs = list(envir = fenv))
modl <- poly2(fit$par,conc)
rme <- sqrt(mean((modl - resp)^2, na.rm = TRUE))
fit$cov <- try(solve(-hessian(tcplObj,
fit$par,
conc = conc,
resp = resp,
fname = "poly2")),
silent = !verbose)
if (!is(fit$cov, "try-error")) {
cov <- 1L
diag_sqrt <- suppressWarnings(sqrt(diag(fit$cov)))
if (any(is.nan(diag_sqrt))) {
mapply(assign,
sds,
NaN,
MoreArgs = list(envir = fenv))
} else {
mapply(assign,
sds,
diag_sqrt,
MoreArgs = list(envir = fenv))
}
} else {
cov <- 0L
mapply(assign,
c(sds),
NA_real_,
MoreArgs = list(envir = fenv))
}
} else {
success <- 0L
aic <- NA_real_
cov <- NA_integer_
rme <- NA_real_
modl = NA_real_
mapply(assign,
c(pars, sds),
NA_real_,
MoreArgs = list(envir = fenv))
}
return(mget(myparams))
}
|
in_range <- function(x, left, right, closed = TRUE){
if(length(closed) == 1) closed <- rep(closed, 2)
n <- length(x)
if(length(left) == 1) left <- rep(left, n)
if(length(right) == 1) right <- rep(right, n)
if(length(right) != n || length(left) != n)
rlang::abort("left and right need to of length 1 or same length as x")
swap <- left > right
if(any(swap)){
left_tmp <- left
left[swap] <- right[swap]
right[swap] <- left_tmp[swap]
}
in_range_impl(x, min=left, max=right, closed=closed)
}
in_range_impl <- function(x, min, max, closed = c(TRUE, TRUE)){
if(!any(closed)) min < x & x < max
else if(all(closed)) min <= x & x <= max
else if(closed[1]) min <= x & x < max
else if(closed[2]) min < x & x <= max
}
width <- function(start, end, base=1){
abs(end-start)+base
}
width0 <- function(start, end, base=0){
width(start=start, end=end, base=base)
}
max_width <- function(..., base=1){
diff(range(...))+base
}
as_tibble.GRanges <- function(data){
select(as_tibble(as.data.frame(data)), seq_id=seqnames, start, end, strand, everything())
}
|
dbReadTable_SQLiteConnection_character <- function(conn, name, ...,
row.names = pkgconfig::get_config("RSQLite::row.names.table", FALSE),
check.names = TRUE, select.cols = NULL) {
name <- check_quoted_identifier(name)
row.names <- compatRowNames(row.names)
if ((!is.logical(row.names) && !is.character(row.names)) || length(row.names) != 1L) {
stopc("`row.names` must be a logical scalar or a string")
}
if (!is.logical(check.names) || length(check.names) != 1L) {
stopc("`check.names` must be a logical scalar")
}
if (is.null(select.cols)) {
select.cols <- "*"
} else {
warning_once("`select.cols` is deprecated, use `dbGetQuery()` for complex queries.",
call. = FALSE
)
}
name <- dbQuoteIdentifier(conn, name)
out <- dbGetQuery(conn, paste("SELECT", select.cols, "FROM", name),
row.names = row.names
)
if (check.names) {
names(out) <- make.names(names(out), unique = TRUE)
}
out
}
setMethod("dbReadTable", c("SQLiteConnection", "character"), dbReadTable_SQLiteConnection_character)
|
rowSums.spam <- function(x,...) {
if( getOption("spam.force64") )
SS <- .format64()
else
SS <- .format.spam(x)
return(.C64("rowsums",
SIGNATURE=c("double", SS$signature, SS$signature,
"double"),
x@entries,
x@rowpointers,
x@dimension[1],
rs = vector_dc("double", x@dimension[1]),
INTENT=c("r", "r", "r",
"w"),
NAOK = getOption("spam.NAOK"),
PACKAGE = SS$package)$rs )
}
colSums.spam <- function(x,...) {
if( getOption("spam.force64") )
SS <- .format64()
else
SS <- .format.spam(x)
return(.C64("colsums",
SIGNATURE=c("double", SS$signature, SS$signature, SS$signature,
"double"),
x@entries,
x@colindices,
x@rowpointers,
x@dimension[1],
cs = vector_dc("double", x@dimension[2]),
INTENT=c("r", "r", "r","r",
"w"),
NAOK = getOption("spam.NAOK"),
PACKAGE = SS$package)$cs )
}
rowMeans.spam <- function(x,...) {
if( getOption("spam.force64") )
SS <- .format64()
else
SS <- .format.spam(x)
return(.C64("rowmeans",
SIGNATURE=c("double", SS$signature, SS$signature,
SS$signature, SS$signature,
"double"),
x@entries,
x@rowpointers,
x@dimension[1],
x@dimension[2],
getOption("spam.structurebased"),
rm = vector_dc("double", x@dimension[1]),
INTENT=c("r", "r", "r",
"r", "r",
"rw"),
NAOK = getOption("spam.NAOK"),
PACKAGE = SS$package)$rm )
}
colMeans.spam <- function(x,...) {
if( getOption("spam.force64") )
SS <- .format64()
else
SS <- .format.spam(x)
return(.C64("colmeans",
SIGNATURE=c("double", SS$signature, SS$signature,
SS$signature, SS$signature, SS$signature,
"double", SS$signature),
x@entries,
x@colindices,
x@rowpointers,
x@dimension[1],
x@dimension[2],
getOption("spam.structurebased"),
cm = vector_dc("double", x@dimension[2]),
vector_dc(SS$type, x@dimension[2]),
INTENT=c("r", "r", "r",
"r", "r", "r",
"rw", "rw"),
NAOK = getOption("spam.NAOK"),
PACKAGE = SS$package)$cm )
}
setMethod("rowSums","spam",rowSums.spam)
setMethod("colSums","spam",colSums.spam)
setMethod("rowMeans","spam",rowMeans.spam)
setMethod("colMeans","spam",colMeans.spam)
|
univar_freq <- function(df, var, na.rm = FALSE) {
quo_var <- quo_by <- rlang::sym(var)
if (isTRUE(na.rm)) df <- filter(df, !is.na(!!quo_var))
df %>%
group_by(!!quo_var) %>%
summarize(NObs = n(),
Percent = round(n() / nrow(.), 3)) %>%
mutate(!!quo_name(quo_var) := as.character(!!quo_var)) %>%
bind_rows(df %>%
summarize(!!quo_name(quo_var) := "Total",
NObs = n(),
Percent = round(n() / nrow(.), 3)))
}
|
.rcpp_error_recorder <- function(e) {
invisible(.Call(rcpp_error_recorder, e))
}
.warningsEnv <- new.env()
.warningsEnv$warnings <- character()
.rcpp_warning_recorder <- function(w){
.warningsEnv$warnings <- append(.warningsEnv$warnings, w$message)
invokeRestart("muffleWarning")
}
.rcpp_collect_warnings <- function() {
warnings <- .warningsEnv$warnings
.warningsEnv$warnings <- character()
warnings
}
print.Rcpp_stack_trace <- function(x, ...) {
cat(format(x, ...))
}
str.Rcpp_stack_trace <- function(object, ...) {
cat(format(object, ...))
}
format.Rcpp_stack_trace <- function(x, ...) {
paste0(
if (nzchar(x$file)) paste0(x$file, ":", x$line),
"\n ", paste(collapse = "\n ", seq_along(x$stack), ":", x$stack), "\n")
}
|
.datatable.aware <- TRUE
|
`balanced.specaccum` <-
function (comm, permutations=100, strata=strata, grouped=TRUE, reps=0, scale=NULL) {
accumulator <- function(x, ind) {
rowSums(apply(x[ind, ], 2, cumsum) > 0)
}
stratified.sample <- function(factor,grouped=TRUE,reps=0) {
n <- length(factor)
levs <- levels(droplevels(factor))
minimum <- min(summary(factor))
if (reps > 0) {
alllevs <- summary(factor)
goodlevs <- alllevs > (reps-1)
levs <- names(alllevs[goodlevs])
minimum <- reps
}
nl <- length(levs)
seq2 <- array(nl*minimum)
seq1 <- sample(n)
strat <- sample(nl)
count <- 0
for (i in 1:nl) {
for (j in 1:n) {
if (factor[seq1[j]]==levs[strat[i]]) {
count <- count+1
if (count > i*minimum) {count <- count-1}
seq2[count] <- seq1[j]
}
}
}
if (grouped==FALSE) {
seq3 <- sample(seq2)
seq2 <- seq3
}
return(seq2)
}
x <- comm
x <- as.matrix(x)
n <- nrow(x)
p <- ncol(x)
if (p == 1) {
x <- t(x)
n <- nrow(x)
p <- ncol(x)
}
specaccum <- sdaccum <- sites <- perm <- NULL
if (n == 1)
stop(paste("only 1 site provided"))
if (is.factor(strata) != TRUE)
stop(paste("strata should be a categorical variable"))
n1 <- length(stratified.sample(strata,grouped,reps))
perm <- array(dim = c(n1, permutations))
for (i in 1:permutations) {
perm[, i] <- accumulator(x, stratified.sample(strata,grouped,reps))
}
sites <- 1:n1
specaccum <- apply(perm, 1, mean)
sdaccum <- apply(perm, 1, sd)
out <- list(call = match.call(), method = "balanced species accumulation", sites = sites,
richness = specaccum, sd = sdaccum, perm = perm)
class(out) <- "specaccum"
if (is.null(scale)!=TRUE) {
n <- length(strata)
levs <- levels(droplevels(strata))
if (reps > 0) {
alllevs <- summary(strata)
goodlevs <- alllevs > (reps-1)
levs <- names(alllevs[goodlevs])
}
nlevs <- length(levs)
tot <- 0
for (i in 1:nlevs) {
ind <- strata==levs[i]
tot <- tot + mean(scale[ind])
}
tot <- tot/nlevs
out$sites <- out$sites * tot
}
out
}
|
pgu.correlator <- R6::R6Class("pgu.correlator",
private = list(
.featureNames = "character",
.method = "character",
.r = "matrix",
.pPearson = "matrix",
.tau = "matrix",
.pKendall = "matrix",
.rho = "matrix",
.pSpearman = "matrix",
.abscissa = "character",
.ordinate = "character",
.test = "htest"
),
active = list(
featureNames = function(){
return(private$.featureNames)
},
setFeatureNames = function(names = "character"){
private$.featureNames <- names
private$.r <- self$resetMatrix(value = 0)
private$.pPearson <- self$resetMatrix(value = 1)
private$.tau <- self$resetMatrix(value = 0)
private$.pKendall <- self$resetMatrix(value = 1)
private$.rho <- self$resetMatrix(value = 0)
private$.pSpearman <- self$resetMatrix(value = 1)
},
method = function(){
return(private$.method)
},
r = function(){
return(private$.r)
},
pPearson = function(){
return(private$.pPearson)
},
tau = function(){
return(private$.tau)
},
pKendall = function(){
return(private$.pKendall)
},
rho = function(){
return(private$.rho)
},
pSpearman = function(){
return(private$.pSpearman)
},
abscissa = function(){
return(private$.abscissa)
},
setAbscissa = function(value = "character"){
private$.abscissa <- value
},
ordinate = function(){
return(private$.ordinate)
},
setOrdinate = function(value = "character"){
private$.ordinate <- value
},
test = function(){
return(private$.test)
}
),
public = list(
initialize = function(data = "tbl_df"){
if(!tibble::is_tibble(data)){
data <- tibble::tibble(names <- "none",
values <- c(NA))
}
self$resetCorrelator(data)
},
finalize = function(){
print("Instance of pgu.correlator removed from heap")
},
print = function(){
rString <- sprintf("\npgu.correlator\n")
cat(rString)
print(self$featureNames)
mString <- sprintf("\nmethod: %s\n", self$method[1])
cat(mString)
coefString <- sprintf("\nPearson's r:\n")
cat(coefString)
print(self$r)
pString <- sprintf("\nPearson's p-Value:\n")
cat(pString)
print(self$pPearson)
mString <- sprintf("\nmethod: %s\n", self$method[2])
cat(mString)
coefString <- sprintf("\nKendall's tau:\n")
cat(coefString)
print(self$tau)
pString <- sprintf("\nKendall's p-Value:\n")
cat(pString)
print(self$pKendall)
mString <- sprintf("\nmethod: %s\n", self$method[3])
cat(mString)
coefString <- sprintf("\nSpearman's rho:\n")
cat(coefString)
print(self$rho)
pString <- sprintf("\nSpearman's p-Value:\n")
cat(pString)
print(self$pPearson)
cat("\n\n")
invisible(self)
},
resetCorrelator = function(data = "tbl_df", progress = "Progress"){
private$.featureNames <- data %>%
dplyr::select_if(is.numeric) %>%
colnames()
private$.method <- c("pearson", "kendall","spearman")
private$.r <- self$resetMatrix(value = 0)
private$.pPearson <- self$resetMatrix(value = 1)
private$.tau <- self$resetMatrix(value = 0)
private$.pKendall <- self$resetMatrix(value = 1)
private$.rho <- self$resetMatrix(value = 0)
private$.pSpearman <- self$resetMatrix(value = 1)
self$correlate(data, progress)
},
resetMatrix = function(value = "numeric"){
n = length(self$featureNames)
df <- matrix(data = value,
nrow = n,
ncol = n,
dimnames = list(self$featureNames, self$featureNames))
return(df)
},
featureIdx = function(feature = "character"){
idx <- match(feature, self$featureNames)
if(is.na(idx)){
rString <- sprintf("\nWarning in pgu.correlator: feature %s is not known\n",
feature)
cat(rString)
}
return(idx)
},
calcCorrelationNumeric = function(abscissa = "numeric", ordinate = "numeric", method = "character"){
private$.test <- stats::cor.test(x = abscissa,
y = ordinate,
alternative = "two.sided",
exact = FALSE,
method = method)
},
createCorrelationMatrixPearson = function(data = "tbl_df", progress = "Progress"){
for (abs in self$featureNames){
for (ord in self$featureNames){
if(("shiny" %in% (.packages())) & (class(progress)[1] == "Progress")){
progress$inc(1)
}
self$calcCorrelationNumeric(abscissa = data %>%
dplyr::pull(abs),
ordinate = data %>%
dplyr::pull(ord),
method = self$method[1])
private$.r[ord,abs] <-self$test$estimate[[1]]
private$.pPearson[ord, abs] <- self$test$p.value
}
}
},
createCorrelationMatrixKendall = function(data = "tbl_df", progress = "Progress"){
for (abs in self$featureNames){
for (ord in self$featureNames){
if(("shiny" %in% (.packages())) & (class(progress)[1] == "Progress")){
progress$inc(1)
}
self$calcCorrelationNumeric(abscissa = data %>%
dplyr::pull(abs),
ordinate = data %>%
dplyr::pull(ord),
method = self$method[2])
private$.tau[ord,abs] <-self$test$estimate[[1]]
private$.pKendall[ord, abs] <- self$test$p.value
}
}
},
createCorrelationMatrixSpearman = function(data = "tbl_df", progress = "Progress"){
for (abs in self$featureNames){
for (ord in self$featureNames){
if(("shiny" %in% (.packages())) & (class(progress)[1] == "Progress")){
progress$inc(1)
}
self$calcCorrelationNumeric(abscissa = data %>%
dplyr::pull(abs),
ordinate = data %>%
dplyr::pull(ord),
method = self$method[3])
private$.rho[ord,abs] <-self$test$estimate[[1]]
private$.pSpearman[ord, abs] <- self$test$p.value
}
}
},
correlate = function(data = "tbl_df", progress = "Progress"){
self$createCorrelationMatrixPearson(data, progress)
self$createCorrelationMatrixKendall(data, progress)
self$createCorrelationMatrixSpearman(data, progress)
},
printFeature = function(){
df <- data.frame(
abscissa = self$abscissa,
ordinate = self$ordinate,
r = self$r[self$ordinate, self$abscissa],
p.Pearson = self$pPearson[self$ordinate, self$abscissa],
tau = self$tau[self$ordinate, self$abscissa],
p.Kendall = self$pPearson[self$ordinate, self$abscissa],
rho = self$rho[self$ordinate, self$abscissa],
p.Spearman = self$pSpearman[self$ordinate, self$abscissa]) %>%
t() %>%
as.data.frame() %>%
tibble::rownames_to_column("correlation parameter") %>%
tibble::as_tibble() %>%
dplyr::rename(value = "V1")
return(df)
},
printRTbl = function(){
self$r %>%
tibble::as_tibble() %>%
dplyr::mutate(features = self$featureNames) %>%
dplyr::select(features, dplyr::everything()) %>%
return()
},
printPPearsonTbl = function(){
self$pPearson %>%
tibble::as_tibble() %>%
dplyr::mutate(features = self$featureNames) %>%
dplyr::select(features, dplyr::everything()) %>%
return()
},
printTauTbl = function(){
self$tau %>%
tibble::as_tibble() %>%
dplyr::mutate(features = self$featureNames) %>%
dplyr::select(features, dplyr::everything()) %>%
return()
},
printPKendallTbl = function(){
self$pKendall %>%
tibble::as_tibble() %>%
dplyr::mutate(features = self$featureNames) %>%
dplyr::select(features, dplyr::everything()) %>%
return()
},
printRhoTbl = function(){
self$rho %>%
tibble::as_tibble() %>%
dplyr::mutate(features = self$featureNames) %>%
dplyr::select(features, dplyr::everything()) %>%
return()
},
printPSpearmanTbl = function(){
self$pSpearman %>%
tibble::as_tibble() %>%
dplyr::mutate(features = self$featureNames) %>%
dplyr::select(features, dplyr::everything()) %>%
return()
}
)
)
|
WCE.default <- WCE
|
lb_config <- function(type=NULL, rules=list(), probes=list(), ...)
{
rule_probe_names <- sapply(rules, function(x) x$properties$probe$id)
probe_names <- sapply(probes, `[[`, "name")
for(r in rule_probe_names)
{
found <- FALSE
for(p in probe_names)
{
found <- grepl(p, r, fixed=TRUE)
if(found) break
}
if(!found)
stop("Rule with no matching probe: ", r, call.=FALSE)
}
props <- list(
type=type,
rules=rules,
probes=probes,
other=list(...)
)
structure(props, class="lb_config")
}
build_resource_fields.lb_config <- function(config, ...)
{
props <- c(
list(
loadBalancingRules=lapply(config$rules, unclass),
probes=lapply(config$probes, unclass)
),
config$other
)
sku <- list(name=config$type)
utils::modifyList(lb_default, list(properties=props, sku=sku))
}
add_template_variables.lb_config <- function(config, ...)
{
name <- "[concat(parameters('vmName'), '-lb')]"
id <- "[resourceId('Microsoft.Network/loadBalancers', variables('lbName'))]"
ref <- "[concat('Microsoft.Network/loadBalancers/', variables('lbName'))]"
frontend <- "frontend"
backend <- "backend"
frontend_id <- "[concat(variables('lbId'), '/frontendIPConfigurations/', variables('lbFrontendName'))]"
backend_id <- "[concat(variables('lbId'), '/backendAddressPools/', variables('lbBackendName'))]"
list(
lbName=name,
lbId=id,
lbRef=ref,
lbFrontendName=frontend,
lbBackendName=backend,
lbFrontendId=frontend_id,
lbBackendId=backend_id
)
}
lb_probe <- function(name, port, interval=5, fail_on=2, protocol="Tcp")
{
props <- list(
port=port,
intervalInSeconds=interval,
numberOfProbes=fail_on,
protocol=protocol
)
structure(list(name=name, properties=props), class="lb_probe")
}
lb_rule <- function(name, frontend_port, backend_port=frontend_port, protocol="Tcp", timeout=5,
floating_ip=FALSE, probe_name)
{
frontend_id <- "[variables('lbFrontendId')]"
backend_id <- "[variables('lbBackendId')]"
probe_id <- sprintf("[concat(variables('lbId'), '/probes/%s')]", probe_name)
props <- list(
frontendIpConfiguration=list(id=frontend_id),
backendAddressPool=list(id=backend_id),
protocol=protocol,
frontendPort=frontend_port,
backendPort=backend_port,
enableFloatingIp=floating_ip,
idleTimeoutInMinutes=timeout,
probe=list(id=probe_id)
)
structure(list(name=name, properties=props), class="lb_rule")
}
lb_rule_ssh <- lb_rule("lb-ssh", 22, 22, probe_name="probe-ssh")
lb_rule_http <- lb_rule("lb-http", 80, 80, probe_name="probe-http")
lb_rule_https <- lb_rule("lb-https", 443, 443, probe_name="probe-https")
lb_rule_rdp <- lb_rule("lb-rdp", 3389, 3389, probe_name="probe-rdp")
lb_rule_jupyter <- lb_rule("lb-jupyter", 8000, 8000, probe_name="probe-jupyter")
lb_rule_rstudio <- lb_rule("lb-rstudio", 8787, 8787, probe_name="probe-rstudio")
lb_rule_mssql <- lb_rule("lb-mssql", 1433, 1433, probe_name="probe-mssql")
lb_rule_mssql_browser <- lb_rule("lb-mssql-browser", 1434, 1434, probe_name="probe-mssql-browser")
lb_probe_ssh <- lb_probe("probe-ssh", 22)
lb_probe_http <- lb_probe("probe-http", 80)
lb_probe_https <- lb_probe("probe-https", 443)
lb_probe_rdp <- lb_probe("probe-rdp", 3389)
lb_probe_jupyter <- lb_probe("probe-jupyter", 8000)
lb_probe_rstudio <- lb_probe("probe-rstudio", 8787)
lb_probe_mssql <- lb_probe("probe-mssql", 1433)
lb_probe_mssql_browser <- lb_probe("probe-mssql-browser", 1434)
|
context("Function createCluster")
sapply(studies, function(study) {
setup_study(study, sourcedir)
opts <- antaresRead::setSimulationPath(studyPath, "input")
test_that("Create a new cluster", {
area <- sample(x = getOption("antares")$areaList, size = 1)
createCluster(area = area, cluster_name = "mycluster")
expect_true(paste(area, "mycluster", sep = "_") %in% levels(antaresRead::readClusterDesc()$cluster))
})
test_that("Create a new cluster with bad parameters", {
area <- sample(x = getOption("antares")$areaList, size = 1)
expect_error(createCluster(area = area, cluster_name = "mybadcluster", time_series = matrix(rep(0, 100), ncol = 1)))
expect_error(createCluster(area = area, cluster_name = "mybadcluster", prepro_modulation = matrix(rep(0, 100), ncol = 1)))
expect_error(createCluster(area = area, cluster_name = "mybadcluster", prepro_modulation = matrix(rep(0, 2*8760), ncol = 2)))
})
area2 <- sample(x = getOption("antares")$areaList, size = 1)
test_that("Create a cluster with default properties", {
createCluster(
area = area2,
cluster_name = "mycluster2",
group = "Other",
unitcount = 1,
nominalcapacity = 8000,
`min-down-time` = 0,
`marginal-cost` = 0.010000,
`market-bid-cost` = 0.010000,
time_series = matrix(rep(c(0, 8000), each = 24*364), ncol = 2),
prepro_modulation = matrix(rep(c(1, 1, 1, 0), each = 24*365), ncol = 4)
)
expect_true(paste(area2, "mycluster2", sep = "_") %in% levels(antaresRead::readClusterDesc()$cluster))
})
test_that("Remove a cluster", {
removeCluster(area = area2, cluster_name = "mycluster2")
expect_false(paste(area2, "mycluster2", sep = "_") %in% levels(antaresRead::readClusterDesc()$cluster))
})
test_that("Remove all clusters", {
all_clusters <- antaresRead::readClusterDesc()
for (i in seq_len(nrow(all_clusters))) {
removeCluster(
area = unlist(all_clusters[i, ]$area),
cluster_name = unlist(all_clusters[i, ]$cluster),
add_prefix = FALSE
)
}
expect_error(antaresRead::readClusterDesc())
})
unlink(x = file.path(pathstd, "test_case"), recursive = TRUE)
})
|
setGeneric('depths<-', function(object, value) standardGeneric('depths<-'))
setReplaceMethod("depths", signature(object = "SoilProfileCollection"),
function(object, value) {
message('This is already a SoilProfileCollection-class object, doing nothing.')
object
})
setReplaceMethod("depths", "data.frame",
function(object, value) {
if (inherits(value, "formula")) {
mf <- model.frame(value, object)
res <- .initSPCfromMF(data = object, mf = mf)
} else {
if (inherits(value, "character") &
inherits(object, 'data.frame')) {
mf <- .data.frame.j(object, value, class(object)[1])
res <- .initSPCfromMF(data = object, mf = mf)
} else {
stop('invalid initial object for promotion to SoilProfileCollection', call. = FALSE)
}
}
site.temp <- data.frame(id = profile_id(res), stringsAsFactors = FALSE)
names(site.temp) <- idname(res)
adc <- aqp_df_class(res)
res@site <- .as.data.frame.aqp(site.temp, adc)
res@horizons <- .as.data.frame.aqp(res@horizons, adc)
return(res)
}
)
.checkNAdepths <- function(depth, l) {
if(is.factor(depth)) {
warning(sprintf("Horizon %s depth is a factor! This happens with automatic character to factor conversions!", l), call. = FALSE)
depth <- as.character(depth)
}
if(!is.numeric(depth)) {
depth <- suppressWarnings(try(as.numeric(depth)))
if(!inherits(depth, 'try-error')) {
message(sprintf("Horizon %s depths converted to numeric.", l))
} else stop(sprintf("Unable to convert %s depths to numeric!", l))
}
if(any(is.na(depth)))
warning(sprintf("Horizon %s depths contain NA! Check depth logic with aqp::checkHzDepthLogic()", l), call. = FALSE)
return(depth)
}
.prototypeSPC <- function(idn, hzd,
hz = data.frame(), st = data.frame(),
fill_top = NA_integer_, fill_bottom = NA_integer_) {
id <- hz[[idn]]
nid <- length(id)
iid <- seq_along(id)
nuhz <- data.frame(id = as.character(id),
hzID = iid,
top = fill_top[nid],
bottom = fill_bottom[nid],
stringsAsFactors = FALSE)
colnames(nuhz) <- c(idn, "hzID", hzd)
nust <- data.frame(id = as.character(id),
stringsAsFactors = FALSE)
colnames(nust) <- idn
if (ncol(hz) > 0) {
hz$.dummyVar <- ""[nrow(hz)]
nuhz <- cbind(nuhz, hz[0, !colnames(hz) %in% colnames(nuhz), drop = FALSE][iid,])
nuhz$.dummyVar <- NULL
}
if (ncol(st) > 0) {
st$.dummyVar <- ""[nrow(st)]
nust <- cbind(nust, st[0, !colnames(st) %in% colnames(nust), drop = FALSE][iid,])
nust$.dummyVar <- NULL
}
return(SoilProfileCollection(idcol = idn,
depthcols = hzd,
horizons = nuhz,
site = nust))
}
.initSPCfromMF <- function(data, mf, use_class, top = NA_integer_, bottom = NA_integer_) {
nm <- names(mf)
if (inherits(data[[nm[1]]], 'factor')) {
message('converting profile IDs from factor to character')
data[[nm[1]]] <- as.character(data[[nm[1]]])
}
if (inherits(data[[nm[1]]], 'integer')) {
message('converting profile IDs from integer to character')
data[[nm[1]]] <- as.character(data[[nm[1]]])
}
depthcols <- c(nm[2], nm[3])
data_id <- data[[nm[1]]]
if (all(is.na(data[[depthcols[1]]]))) {
warning("all top depths missing from input data", call. = FALSE)
return(.prototypeSPC(nm[1], depthcols, hz = data, fill_top = top, fill_bottom = bottom))
} else if(nrow(data) == 0) {
warning("input data have 0 rows", call. = FALSE)
return(.prototypeSPC(nm[1], depthcols, hz = data, fill_top = top, fill_bottom = bottom))
}
data[[depthcols[1]]] <- .checkNAdepths(data[[depthcols[1]]], "top")
data[[depthcols[2]]] <- .checkNAdepths(data[[depthcols[2]]], "bottom")
tdep <- data[[depthcols[1]]]
id_tdep_order <- order(as.character(data_id), tdep)
data <- data[id_tdep_order,]
nusite <- .as.data.frame.aqp(data.frame(.coalesce.idx(data[[nm[1]]]),
stringsAsFactors = FALSE), class(data)[1])
names(nusite) <- nm[1]
res <- SoilProfileCollection(idcol = nm[1],
hzidcol = 'hzID',
depthcols = depthcols,
site = nusite,
horizons = data)
if (hzidname(res) %in% names(data)) {
o.hzid <- hzidname(res)
res.status <- try(hzID(res) <- data[[o.hzid]], silent = TRUE)
if(inherits(res.status, 'try-error')) {
n.hzid <- sprintf("%s_", o.hzid)
res@horizons[[n.hzid]] <- as.character(1:nrow(res))
hzidname(res) <- n.hzid
warning(sprintf('`%s` is not a unique horizon ID, using `%s`', o.hzid, n.hzid), call. = FALSE)
} else {
message(sprintf('using `%s` as a unique horizon ID', o.hzid))
}
} else {
hzID(res) <- as.character(1:nrow(res))
}
return(res)
}
setGeneric('site<-', function(object, value)
standardGeneric('site<-'))
setReplaceMethod("site", signature(object = "SoilProfileCollection"),
function(object, value) {
ids <- as.character(horizons(object)[[idname(object)]])
ids.coalesce <- .coalesce.idx(ids)
if (inherits(value, "formula")) {
mf <- model.frame(value, object@horizons, na.action = na.pass)
nm <- names(mf)
mf <- data.frame(ids, mf, stringsAsFactors = FALSE)
names(mf) <- c(idname(object), nm)
object <- .createSiteFromHorizon(object, mf)
}
if (inherits(value, "data.frame")) {
ns <- names(value)
nh <- horizonNames(object)
if (all(colnames(value) %in% siteNames(object)) &
idname(object) %in% colnames(value) &
nrow(value) == length(object)) {
if (!all(value[[idname(object)]] %in% profile_id(object))) {
message("Some profile IDs in input data are not present in object and no new columns to merge. Doing nothing.")
return(object)
}
sort.idx <- match(profile_id(object), value[[idname(object)]])
object@site <- .as.data.frame.aqp(value, aqp_df_class(object))[sort.idx, , drop = FALSE]
return(object)
}
ID.idx <- match(idname(object), nh)
if(any(ns %in% nh[-ID.idx]))
stop('duplicate names in new site / existing horizon data not allowed', call. = FALSE)
s <- object@site
site.new <- merge(s, value, all.x = TRUE, sort = FALSE)
new.id.order <- site.new[[idname(object)]]
if(length(new.id.order) != length(ids.coalesce) ||
any(new.id.order != ids.coalesce)) {
if (any(is.na(ids.coalesce)))
message("profile IDs derived from horizon data contain NA!")
site.new <- site.new[match(ids.coalesce, new.id.order),]
}
if(nrow(s) != nrow(site.new)) {
message(paste('original data (', nrow(s), ' rows) new data (', nrow(site.new), ' rows)', sep=''))
stop('invalid join condition, site data not changed', call.=FALSE)
}
object@site <- .as.data.frame.aqp(site.new, metadata(object)$aqp_df_class)
}
if(length(object) != nrow(site(object))){
print(paste('pedons (', length(object), ') rows of site data (', nrow(site(object)), ')', sep=''))
stop('invalid site data, non-unique values present in horizon data?', call.=FALSE)
}
return(object)
}
)
.createSiteFromHorizon <- function(object, mf){
names_attr <- names(mf)
idx <- match(names_attr, horizonNames(object))
idx <- idx[-match(idname(object), names_attr)]
.SD <- NULL
dth <- as.data.table(horizons(object))
new_site_data <- .as.data.frame.aqp(unique(dth[, .SD, .SDcols = names_attr]), aqp_df_class(object))
if (nrow(new_site_data) != length(object)) {
warning("One or more horizon columns cannot be normalized to site. Leaving site data unchanged.", call. = FALSE)
return(object)
}
site_data <- merge(object@site, new_site_data, by = idname(object), all.x = TRUE, sort = FALSE)
h <- object@horizons
hnames <- colnames(h)
for(i in idx) {
h[[hnames[i]]] <- NULL
}
object@horizons <- .as.data.frame.aqp(h, aqp_df_class(object))
object@site <- .as.data.frame.aqp(site_data, aqp_df_class(object))
return(object)
}
setGeneric('replaceHorizons<-', function(object, value)
standardGeneric('replaceHorizons<-'))
setReplaceMethod("replaceHorizons",
signature(object = "SoilProfileCollection"),
function(object, value) {
required.columns <- c(idname(object), horizonDepths(object))
required.missing <- !required.columns %in% names(value)
if(any(required.missing))
stop(paste0("required horizon data are missing: ",
paste0(required.columns[required.missing], collapse=", ")), call. = FALSE)
ids.match1 <- all(profile_id(object) %in% value[[idname(object)]])
if(!ids.match1)
stop("profile IDs in site are missing from replacement horizons!", call. = FALSE)
ids.match2 <- all(value[[idname(object)]] %in% profile_id(object))
if(!ids.match2)
stop("profile IDs in replacement are missing from site!", call. = FALSE)
optional.columns <- c(hzidname(object),
hzdesgnname(object),
hztexclname(object))
optional.missing <- !optional.columns %in% names(value)
if(optional.missing[1]) {
value$hzID <- as.character(1:nrow(value))
hzidname(object) <- "hzID"
message("no horizon ID present, defaulting to `hzID`")
}
object@horizons <- .as.data.frame.aqp(value, aqp_df_class(object))
return(object)
})
setGeneric('horizons<-', function(object, value)
standardGeneric('horizons<-'))
setReplaceMethod("horizons", signature(object = "SoilProfileCollection"),
function(object, value) {
if (is.null(value))
stop("new horizon data must not be NULL; to remove a site or horizon attribute use `spc$attribute <- NULL`", call.=FALSE)
if (!inherits(value, "data.frame"))
stop("new horizon data input value must inherit from data.frame", call.=FALSE)
if (all(colnames(value) %in% horizonNames(object)) &
all(c(idname(object), hzidname(object), horizonDepths(object)) %in% colnames(value)) &
nrow(value) == nrow(object)) {
if (!all(value[[idname(object)]] %in% profile_id(object))) {
message("Some profile IDs in input data are not present in object and no new columns to merge. Doing nothing.")
return(object)
}
target.order <- order(object@horizons[[idname(object)]], object@horizons[[horizonDepths(object)[1]]])
input.order <- order(value[[idname(object)]], value[[horizonDepths(object)[1]]])
idx.order <- match(input.order, target.order)
object@horizons <- .as.data.frame.aqp(value, aqp_df_class(object))[idx.order,]
return(object)
}
ids <- as.character(horizons(object)[[idname(object)]])
ns <- names(value)
nh <- siteNames(object)
ID.idx <- match(idname(object), nh)
if(any(ns %in% nh[-ID.idx]))
stop('horizons left join value contains duplicate names', call.=FALSE)
h.id <- as.character(object@horizons[[hzidname(object)]])
original.horizon.order <- 1:length(h.id)
names(original.horizon.order) <- h.id
original.site.order <- match(.coalesce.idx(object@site[[idname(object)]]),
object@site[[idname(object)]])
suppressMessages(horizon.new <- merge(object@horizons,
value,
all.x = TRUE, sort = FALSE))
chnew <- .coalesce.idx(horizon.new[[idname(object)]])
if (length(chnew) != length(original.site.order) |
suppressWarnings(any(original.site.order != chnew))) {
new.horizon.order <- order(horizon.new[[idname(object)]],
horizon.new[[horizonDepths(object)[1]]])
horizon.new <- horizon.new[new.horizon.order,]
}
if(nrow(object@horizons) != nrow(horizon.new)) {
message(paste('original data (', nrow(object@horizons), ' rows) new data (', nrow(horizon.new), ' rows)', sep=''))
stop("invalid horizons left join condition, data not changed", call.=FALSE)
}
object@horizons <- .as.data.frame.aqp(horizon.new, aqp_df_class(object))
if(any(!(ids %in% as.character(object@horizons[[idname(object)]])))) {
print(paste('pedons (', nrow(object), ') rows of horizon data (', nrow(object@horizons), ')', sep=''))
stop('profile IDs are missing from join result, data not changed', call.=FALSE)
}
return(object)
})
setGeneric('diagnostic_hz<-', function(object, value)
standardGeneric('diagnostic_hz<-'))
setReplaceMethod("diagnostic_hz",
signature(object = "SoilProfileCollection"),
function(object, value) {
d <- diagnostic_hz(object)
nm <- names(value)
idn <- idname(object)
pIDs <- profile_id(object)
if (!inherits(value, "data.frame"))
stop("diagnostic horizon data must be a data.frame", call. = FALSE)
if(nrow(d) == 0 & nrow(value) == 0)
return(object)
if(! idn %in% nm)
stop(paste("diagnostic horizon data are missing pedon ID column: ", idn), call.=FALSE)
if(all( ! unique(value[[idn]]) %in% pIDs) )
warning('candidate diagnostic horizon data have NO matching IDs in target SoilProfileCollection object!', call. = FALSE)
if(any( ! unique(value[[idn]]) %in% pIDs) ) {
warning('some records in candidate diagnostic horizon data have no matching IDs in target SoilProfileCollection object')
}
if(nrow(d) > 0)
warning('overwriting existing diagnostic horizon data!', call.=FALSE)
object@diagnostic <- .as.data.frame.aqp(value, metadata(object)$aqp_df_class)
return(object)
})
setGeneric('restrictions<-', function(object, value)
standardGeneric('restrictions<-'))
setReplaceMethod("restrictions", signature(object = "SoilProfileCollection"),
function(object, value) {
d <- restrictions(object)
nm <- names(value)
idn <- idname(object)
pIDs <- profile_id(object)
if (!inherits(value, "data.frame"))
stop("restriction data must be a data.frame", call.=FALSE)
if(nrow(d) == 0 & nrow(value) == 0)
return(object)
if(! idn %in% nm)
stop(paste("restriction data are missing pedon ID column: ", idn), call.=FALSE)
if(all(!unique(value[[idn]]) %in% pIDs) )
warning('restriction data have no matching IDs in target SoilProfileCollection object!', call. = FALSE)
if(any( ! unique(value[[idn]]) %in% pIDs) ) {
warning('some records in restriction data have no matching IDs in target SoilProfileCollection object')
}
if(nrow(d) > 0)
warning('overwriting existing restriction data!', call.=FALSE)
object@restrictions <- .as.data.frame.aqp(value, metadata(object)$aqp_df_class)
return(object)
}
)
|
NULL
mediastoredata_delete_object <- function(Path) {
op <- new_operation(
name = "DeleteObject",
http_method = "DELETE",
http_path = "/{Path+}",
paginator = list()
)
input <- .mediastoredata$delete_object_input(Path = Path)
output <- .mediastoredata$delete_object_output()
config <- get_config()
svc <- .mediastoredata$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.mediastoredata$operations$delete_object <- mediastoredata_delete_object
mediastoredata_describe_object <- function(Path) {
op <- new_operation(
name = "DescribeObject",
http_method = "HEAD",
http_path = "/{Path+}",
paginator = list()
)
input <- .mediastoredata$describe_object_input(Path = Path)
output <- .mediastoredata$describe_object_output()
config <- get_config()
svc <- .mediastoredata$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.mediastoredata$operations$describe_object <- mediastoredata_describe_object
mediastoredata_get_object <- function(Path, Range = NULL) {
op <- new_operation(
name = "GetObject",
http_method = "GET",
http_path = "/{Path+}",
paginator = list()
)
input <- .mediastoredata$get_object_input(Path = Path, Range = Range)
output <- .mediastoredata$get_object_output()
config <- get_config()
svc <- .mediastoredata$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.mediastoredata$operations$get_object <- mediastoredata_get_object
mediastoredata_list_items <- function(Path = NULL, MaxResults = NULL, NextToken = NULL) {
op <- new_operation(
name = "ListItems",
http_method = "GET",
http_path = "/",
paginator = list()
)
input <- .mediastoredata$list_items_input(Path = Path, MaxResults = MaxResults, NextToken = NextToken)
output <- .mediastoredata$list_items_output()
config <- get_config()
svc <- .mediastoredata$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.mediastoredata$operations$list_items <- mediastoredata_list_items
mediastoredata_put_object <- function(Body, Path, ContentType = NULL, CacheControl = NULL, StorageClass = NULL, UploadAvailability = NULL) {
op <- new_operation(
name = "PutObject",
http_method = "PUT",
http_path = "/{Path+}",
paginator = list()
)
input <- .mediastoredata$put_object_input(Body = Body, Path = Path, ContentType = ContentType, CacheControl = CacheControl, StorageClass = StorageClass, UploadAvailability = UploadAvailability)
output <- .mediastoredata$put_object_output()
config <- get_config()
svc <- .mediastoredata$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.mediastoredata$operations$put_object <- mediastoredata_put_object
|
det.query <- function(det, x, cores = 1) {
errorstrg <- sprintf(
"invalid x, expecting d x n matrix with d = %i dimensions and n query points",
length(det$lb))
if (is.matrix(x)) {
if (nrow(x) != length(det$lb)) {stop(errorstrg)}
} else {stop(errorstrg)}
n <- ncol(x)
x <- (det$A%*%(x-det$mu%*%t(rep(1,n))) - det$lb%*%t(rep(1,n))) /
((det$ub - det$lb)%*%t(rep(1,n)))
x <- split(x, col(x))
ddet <- function(x) {
if (any(x < 0) | any(x > 1)) {p <- 0}
else {
ind <- 1
while (!is.na(det$tree[ind,2])) {
dimens <- det$sd[ind]; pos <- det$sp[ind]
if (x[dimens] <= pos) {
ind <- det$tree[ind,2]
x[dimens] <- x[dimens] / pos
} else {
ind <- det$tree[ind,3]
x[dimens] <- (x[dimens]-pos) / (1-pos)
}
}
p <- det$p[ind]
if (p > 0) {p <- p * prod(((x-1/2)*det$theta[[ind]]+1))}
}
return(p)
}
if (cores > 1) {
if (is.infinite(cores)) {cores <- max(1,ceiling(parallel::detectCores()/2))}
clster <- parallel::makeCluster(cores)
p <- parallel::parLapply(clster, x, ddet)
parallel::stopCluster(clster)
} else {
p <- lapply(x, ddet)
}
p <- unlist(p) / prod(det$ub - det$lb)
return(p)
}
|
if (interactive()) pkgload::load_all()
test_excerption <- function() {
file.copy(system.file("runit_tests", "files", "excerption_file.Rasciidoc",
package = "rasciidoc"), tempdir(), overwrite = TRUE)
path <- rasciidoc:::excerpt_slides(file.path(tempdir(),
"excerption_file.Rasciidoc"))
result <- readLines(path)
expectation <- readLines(system.file("runit_tests", "files",
"expected", "excerpted_slides.txt",
package = "rasciidoc"))
RUnit::checkIdentical(result, expectation)
path <- rasciidoc:::excerpt_no_slides(file.path(tempdir(),
"excerption_file.Rasciidoc")
)
result <- readLines(path)
expectation <- readLines(system.file("runit_tests", "files",
"expected", "excerpted_no_slides.txt",
package = "rasciidoc"))
RUnit::checkIdentical(result, expectation)
}
|
power_standardized_alpha <- function(power_function,
alpha = 0.05,
power = 0.8,
standardize_N = 100,
verbose = Superpower_options("verbose")) {
f = function(x, power_function, power, standardize_N) {
a_stan <- alpha/sqrt(x/standardize_N)
y <- eval(parse(text=paste(power_function)))
max(y - power, power - y)
}
res <- optimize(f,
c(3, 1000),
tol = 0.001,
power_function = power_function,
power = power,
standardize_N = standardize_N)
a_stan <- alpha/sqrt(ceiling(res$minimum)/standardize_N)
if(verbose == TRUE){
mes = paste0("The standardized alpha is", a_stan)
print(mes)
}
invisible(list(N = ceiling(res$minimum),
a_stan = a_stan,
objective = res$objective
))
}
|
test_that("country = 'all' returns the expected output", {
expect_equal(assign_country(country = "all"), all_countries)
})
test_that("Arguments other than 'all' are returned as is", {
country <- c("ALB", "CHN")
expect_equal(assign_country(country), country)
})
|
oxfordWeather <- function() {
ox <- cholera::oxford.weather
ox$date <- monthEndDate()
class(ox) <- c("oxfordWeather", class(ox))
ox
}
monthEndDate <- function(start.yr = 1853, end.yr = NULL) {
if (is.null(end.yr)) end.yr <- max(cholera::oxford.weather$year)
start <- paste0(start.yr, "-1-1")
end <- paste0(end.yr, "-12-31")
cal <- seq.Date(as.Date(start), as.Date(end), by = "day")
cal <- data.frame(date = cal, year = as.numeric(format(cal, "%Y")),
month = as.numeric(format(cal, "%m")), day = as.numeric(format(cal, "%d")))
last.day.month <- lapply(start.yr:end.yr, function(yr) {
tmp <- cal[cal$year == yr, ]
out <- lapply(1:12, function(mo) max(tmp[tmp$month == mo, "date"]))
do.call(c, out)
})
do.call(c, last.day.month)
}
plot.oxfordWeather <- function(x, statistic = "temperature",
month = "september", ...) {
outbreak <- as.Date("1854-10-01") - 1
sept <- as.Date(paste0(unique(x$year), "-09-30"))
if (statistic == "temperature") temperaturePlot(x, month)
else if (statistic == "rain") rainPlot(x, month)
else stop('statistic must be "temperature" or "rain".', call. = FALSE)
}
rainPlot <- function(x, month) {
if (month == "august") {
outbreak <- as.Date("1854-09-01") - 1
suffix <- "-08-31"
mo.col <- "blue"
} else if (month == "september") {
outbreak <- as.Date("1854-10-01") - 1
suffix <- "-09-30"
mo.col <- "red"
} else stop('month must be "august" or "september".', call. = FALSE)
mo.sel <- x$date == outbreak
mos <- x$date %in% as.Date(paste0(unique(x$year), suffix))
ttl <- paste0("Monthly Rainfall in Oxford UK (", tools::toTitleCase(month),
")")
plot(x$date, x$rain, xlab = "Year", ylab = "millimeters", col = "gray",
main = ttl)
points(x[mos, "date"], x[mos, "rain"], col = mo.col, pch = 16)
axis(3, at = outbreak, labels = format(outbreak, "%b %Y"), padj = 0.9,
col.ticks = mo.col, cex.axis = 0.9, col.axis = mo.col)
abline(v = outbreak, col = mo.col)
abline(h = x[mo.sel, "rain"], col = mo.col)
axis(4, at = x[mo.sel, "rain"], labels = round(x[mo.sel, "rain"], 1),
col.axis = mo.col, col = mo.col)
rug(x[mos, "rain"], side = 4, col = mo.col)
lines(stats::lowess(x[mos, "date"], x[mos, "rain"]), col = mo.col,
lty = "dashed", lwd = 2)
}
temperaturePlot <- function(x, month) {
if (month == "august") {
outbreak <- as.Date("1854-09-01") - 1
suffix <- "-08-31"
} else if (month == "september") {
outbreak <- as.Date("1854-10-01") - 1
suffix <- "-09-30"
} else stop('month must be "august" or "september".', call. = FALSE)
sel <- x$date %in% as.Date(paste0(unique(x$year), suffix))
mo.data <- x[sel, ]
ttl <- paste0("Monthly High and Low Temperatures in Oxford UK (",
tools::toTitleCase(month), ")")
plot(mo.data$date, mo.data$tmax, pch = NA, xlab = "Year",
ylab = "Celsius", ylim = range(mo.data$tmax, mo.data$tmin), main = ttl)
axis(3, at = outbreak, labels = format(outbreak, "%b %Y"), padj = 0.9)
invisible(lapply(seq_len(nrow(mo.data )), function(i) {
segments(mo.data$date[i], mo.data$tmax[i],
mo.data$date[i], mo.data$tmin[i], col = "gray")
}))
points(mo.data$date, mo.data$tmax, col = "red")
points(mo.data$date, mo.data$tmin, col = "blue")
points(mo.data[mo.data$date == outbreak, "date"],
mo.data[mo.data$date == outbreak, "tmax"], col = "red", pch = 16)
points(mo.data[mo.data$date == outbreak, "date"],
mo.data[mo.data$date == outbreak, "tmin"], col = "blue", pch = 16)
lines(stats::lowess(mo.data$date, mo.data$tmax), col = "red", lty = "dashed",
lwd = 1.5)
lines(stats::lowess(mo.data$date, mo.data$tmin), col = "blue",
lty = "dashed", lwd = 1.5)
rug(mo.data$tmax, side = 4, col = "red")
rug(mo.data$tmin, side = 4, col = "blue")
abline(v = as.Date("1854-09-30"), lty = "dotted")
abline(h = mo.data[mo.data$date == outbreak, "tmax"], col = "red",
lwd = 2)
abline(h = mo.data[mo.data$date == outbreak, "tmin"], col = "blue",
lwd = 2)
}
|
tkdensity <- function(y, n = 1024, log.bw = TRUE, showvalue = TRUE,
xlim = NULL, do.rug = size < 1000, kernels = NULL,
from.f = if(log.bw) -2 else 1/1000,
to.f = if(log.bw) +2.2 else 2, col = 2)
{
requireNamespace("tcltk") || stop("tcltk support is absent")
tclVar <- tcltk::tclVar
tclvalue <- tcltk::tclvalue
tkframe <- tcltk::tkframe
tkpack <- tcltk::tkpack
tklabel <- tcltk::tklabel
tkscale <- tcltk::tkscale
nbw <- xZ <- xM <- NA_real_
dFun <- density.default
all.kerns <- eval(formals(dFun)$kernel)
kernels <-
if(is.null(kernels)) all.kerns
else match.arg(kernels, all.kerns, several.ok = TRUE)
ynam <- deparse(substitute(y))
size <- length(y)
sd.y <- sqrt(var(y))
hi <- sd.y
if ((lo <- min(hi, IQR(y)/1.34)) == 0)
(lo <- hi) || (lo <- abs(y[1])) || (lo <- 1)
bw <- bw0 <- 0.9 * lo * size^(-0.2)
if(log.bw) lbw <- lbw0 <- log10(bw0)
ry <- range(y)
xlim <- if(is.null(xlim)) ry + c(-2,2)* bw0 else as.numeric(xlim)
xlmid <- xm0 <- mean(xlim)
xr0 <- diff(xlim)
xZoom <- tclVar(100)
xlmid <- tclVar(xlmid)
if(log.bw)
Lbw <- tclVar(log10(bw))
else
bw <- tclVar(bw)
kernel <- tclVar("gaussian")
replot <- function(...) {
if (is.null(y)) return()
b <- if(log.bw) 10 ^ (lbw <<- as.numeric(tclvalue(Lbw))) else
nbw <<- as.numeric(tclvalue(bw))
k <- tclvalue(kernel)
xZ <<- as.numeric(tclvalue(xZoom))
xM <<- as.numeric(tclvalue(xlmid))
if(0 > 1)
b <- xlim + b + k
xr.half <- (xr0 / 2) * 100 / xZ
xlim <- xM + c(-xr.half, xr.half)
eval(substitute(plot(density(y, bw = b, kernel = k, n = n),
main = paste("density(",ynam,
", bw = ",format(b, dig = 3),
", kernel = \"", k, "\")", sep=""),
xlim = xlim, col = col)))
if(do.rug) rug(y)
}
replot.maybe <- function(...)
if ((log.bw && !identical(lbw, as.numeric(tclvalue(Lbw)))) ||
(!log.bw && !identical(nbw, as.numeric(tclvalue(bw)))) ||
!identical(xZ, as.numeric(tclvalue(xZoom))) ||
!identical(xM, as.numeric(tclvalue(xlmid)))
)
replot()
base <- tcltk::tktoplevel()
tcltk::tkwm.title(base, paste("Tk Density(",ynam,")"))
base.frame <- tkframe(base, borderwidth = 2)
bw.frame <- tkframe(base.frame, relief = "groove", borderwidth = 3)
kern.frame <- tkframe(base.frame, relief = "groove", borderwidth = 2)
x.frame <- tkframe(base.frame)
xr.frame <- tkframe(x.frame)
xmid.frame <- tkframe(x.frame)
tkpack(xr.frame, xmid.frame, side = "left", anchor = "s")
q.but <- tcltk::tkbutton(base, text = "Quit", command = function() {
par(op)
tcltk::tkdestroy(base) })
tkpack(base.frame,
bw.frame, kern.frame,
x.frame,
q.but)
tkpack(tklabel (bw.frame,
text = if(log.bw)"log10(Bandwidth)" else "Bandwidth"))
tkpack(tkscale (bw.frame, command = replot.maybe,
from = if(log.bw) lbw0 + (from.f) else bw0 * from.f,
to = if(log.bw) lbw0 + (to.f) else bw0 * to.f,
showvalue = showvalue,
variable = if(log.bw) Lbw else bw,
resolution = if(log.bw) lbw0/20 else bw0/4 * from.f,
length = 200,
orient = "horiz"))
tkpack(tklabel(kern.frame, text = "Kernel"))
for (k.name in kernels)
tkpack(tcltk::tkradiobutton(kern.frame, command = replot,
text = k.name, value = k.name, variable=kernel),
anchor = "w")
tkpack(tklabel (xr.frame, text = "x zoom [%]"))
tkpack(tkscale (xr.frame, command = replot.maybe,
from = 5,
to = 500,
showvalue = TRUE, variable = xZoom,
length = 80, orient = "horiz"))
tkpack(tklabel (xmid.frame, text = "x pan"))
tkpack(tkscale (xmid.frame, command = replot.maybe,
from = xm0 - xr0,
to = xm0 + xr0,
showvalue = FALSE, variable = xlmid,
resolution = xr0/2000,
length = 80, orient = "horiz"))
if((op <- par("ask")) || prod(par("mfrow")) > 1)
op <- par(ask = FALSE, mfrow = c(1,1))
replot()
}
|
GetVarsFromFormula = function(formula, cn)
{
cForm = as.character(formula)
vars = c()
for (i in 1:length(cn))
{
notVarString = "((^)|([^\\^_\\.0-9A-Za-z$])|($))"
if (regexpr(paste(notVarString, cn[i], notVarString, sep=''),
cForm[3], perl=TRUE)[1] > 0)
{
vars = c(vars, cn[i])
}
if (regexpr(paste(notVarString, cn[i], notVarString, sep=''), cForm[2],
perl=TRUE) > 0)
{
vars = c(vars, cn[i])
}
}
return(vars)
}
chunkGenerator=function(chunkSize, lastIndex)
{
.chunkSize=chunkSize
.currentIndex=1
.lastIndex=lastIndex
function(reset)
{
if (reset==TRUE)
{
.currentIndex <<- 1
return(NULL)
}
if (.currentIndex > .lastIndex)
{
return(NULL)
}
else
{
newLast = min(.currentIndex+.chunkSize-1,lastIndex)
ret = .currentIndex:newLast
.currentIndex <<- newLast+1
return(ret)
}
}
}
dataFrameGenerator=function(data, cols, levelList, getNextChunk)
{
.cols=col
.data=data
.levelList = levelList
.getNextChunk = getNextChunk
function(reset)
{
nextIndices = .getNextChunk(reset)
if (is.null(nextIndices))
{
return(NULL)
}
df = as.data.frame(data[nextIndices, cols])
if (!is.null(.levelList))
{
for (name in names(.levelList))
{
df[,name] = factor( df[,name], levels=.levelList[[name]] )
}
}
return(df)
}
}
CreateNextDataFrameGenerator = function( formula, data, chunksize, fc,
getNextChunkFunc, ...)
{
if (!is.null(chunksize) && is.null(getNextChunkFunc))
{
getNextChunkFunc = chunkGenerator(chunksize, nrow(data))
}
if (is.null(chunksize) && is.null(getNextChunkFunc))
{
chunksize = max(floor(nrow(data)/ncol(data)^2), 10000)
if (chunksize > nrow(data)) chunksize = nrow(data)
getNextChunkFunc = chunkGenerator(chunksize, nrow(data))
}
levelList=NA
if (!is.null(fc))
{
levelList = list()
{
for (i in 1:length(fc))
{
levelList = append( levelList,
list( sort( unique( as.numeric(data[,fc[i]])) ) ) )
}
}
names(levelList)=fc
}
vars = all.vars(formula)
if (!is.null( list(...)[['weights']]))
vars = c(vars, all.vars(list(...)[['weights']]))
cols = bigmemory:::mmap(vars, colnames(data))
return(dataFrameGenerator(data, cols, levelList, getNextChunkFunc))
}
bigglm.big.matrix = function( formula, data, chunksize=NULL, ..., fc=NULL,
getNextChunkFunc=NULL)
{
getNextDataFrame = CreateNextDataFrameGenerator(formula, data,
chunksize, fc, getNextChunkFunc, ...)
return(bigglm(formula=formula, data=getNextDataFrame, chunksize=chunksize,
... ))
}
biglm.big.matrix = function( formula, data, chunksize=NULL, ..., fc=NULL,
getNextChunkFunc=NULL)
{
getNextDataFrame = CreateNextDataFrameGenerator(formula, data,
chunksize, fc, getNextChunkFunc, ...)
data = getNextDataFrame(FALSE)
blm = biglm(formula=formula, data=data, ...)
data = getNextDataFrame(FALSE)
while(!is.null(data))
{
blm = update(blm, data)
data = getNextDataFrame(FALSE)
}
return(blm)
}
|
output$selectize_casesByCountries_new <- renderUI({
selectizeInput(
"selectize_casesByCountries_new",
label = "Select Country",
choices = c("All", unique(data_evolution$`Country/Region`)),
selected = "All"
)
})
output$case_evolution_new <- renderPlotly({
req(input$selectize_casesByCountries_new)
data <- data_evolution %>%
mutate(var = sapply(var, capFirst)) %>%
filter(if (input$selectize_casesByCountries_new == "All") TRUE else `Country/Region` %in% input$selectize_casesByCountries_new) %>%
group_by(date, var, `Country/Region`) %>%
summarise(new_cases = sum(value_new, na.rm = T))
if (input$selectize_casesByCountries_new == "All") {
data <- data %>%
group_by(date, var) %>%
summarise(new_cases = sum(new_cases, na.rm = T))
}
p <- plot_ly(data = data, x = ~date, y = ~new_cases, color = ~var, type = 'bar') %>%
layout(
yaxis = list(title = "
xaxis = list(title = "Date")
)
})
|
options(rmarkdown.html_vignette.check_title = FALSE)
knitr::opts_chunk$set(eval = F)
|
setClass("DAG",
representation(
alpha = "numeric",
sets = "list",
leaf_based_sets = "list",
allpvalues = "numeric",
implications = "logical",
isadjusted = "logical",
rejected = "logical",
method = "character",
twoway = "logical"
)
)
DAGmethod <- function(DAGstructure, test, alpha_max = 0.05, method = "all", isadjusted = FALSE, optimization = "none", degree = "group", pvalues = NULL, verbose = FALSE)
{
if(optimization != "none" && !DAGstructure@twoway)
stop("Your DAG structure does not allow optimization, because it has no twoway relations.")
if(optimization != "none")
solveLP <- getLPsolver()
sets <- DAGstructure@sets
parents <- DAGstructure@parents
children <- DAGstructure@children
twoway <- DAGstructure@twoway
n <- length(sets)
if(isadjusted)
{
cur_degree <- degree
cur_optim <- optimization
}
else
{
cur_degree <- "group"
cur_optim <- "none"
}
leaf_based_sets <- set_in_leaves(parents,children)
num_rejected <- 0
rejected <- rep(FALSE, n)
to_be_rejected <- rep(FALSE,n)
if(length(pvalues)==0)
pvalues <- rep(NA_real_, n)
adj_pvalues <- rep(NA_real_, n)
ratios <- rep(0,n)
if(method == "any" && !isadjusted && degree == "individual" && optimization != "none")
{
max_weights <- update_max_weights(parents, children, rejected)
}
num_leaves <- 0
for(i in 1:n)
if(length(children[[i]])==0)
num_leaves <- num_leaves + 1
implications <- rep(FALSE,n)
if(isadjusted)
alpha <- 0
else
alpha <- alpha_max
while(num_rejected < n)
{
weights <- update_weights(parents, children, rejected, method)
impl <- leaf_based_sets[implications]
if(cur_optim == "none")
{
unrejected_leaves <- 0
for(i in 1:n)
if(!rejected[i] && length(children[[i]]) == 0)
unrejected_leaves <- unrejected_leaves + 1
}
else
{
if(length(impl)>0)
{
if(cur_degree == "group")
rejected_leaves_easy <- LPdenom(solveLP, impl, NULL, relaxation = (cur_optim == "LP"))
}
else
rejected_leaves_easy <- 0
}
min_alpha <- Inf
for(i in 1:n)
{
if(weights[i] > 0)
{
if(is.na(pvalues[i]))
pvalues[i] <- test(sets[[i]])
if(pvalues[i] <= alpha_max)
{
if(cur_optim == "none")
{
ratios[i] <- weights[i]/unrejected_leaves
}
else if(cur_degree == "group")
{
ratios[i] <- weights[i]/(num_leaves - rejected_leaves_easy)
}
else if(cur_degree == "individual" && (method == "all" || isadjusted) )
{
if(length(impl)>0)
rejected_leaves <- LPdenom(solveLP, impl,leaf_based_sets[[i]], relaxation = (cur_optim == "LP"))
else
rejected_leaves <- 0
ratios[i] <- weights[i] / (num_leaves - rejected_leaves)
}
min_alpha <- min(min_alpha, pvalues[i] / ratios[i])
if(method == "any" && !isadjusted && cur_degree == "individual" && cur_optim != "none")
{
if(length(impl)>0)
{
par <- parents[[i]]
num_par <- length(par)
parentscand <- vector("list", num_par)
counter <- 1
for(j in par)
{
parentscand[[counter]] <- leaf_based_sets[[j]]
counter <- counter + 1
}
g1 <- max_weights[i]/num_par
palpha <- pvalues[i]/alpha_max
res <- LPnumdenom(solveLP, impl, leaf_based_sets[[i]], parentscand, g1, palpha, relaxation = (cur_optim == "LP"))
if(res >= palpha*num_leaves)
{
to_be_rejected[i] <- TRUE
min_alpha <- min(min_alpha, alpha_max)
}
}
else
{
rejected_leaves <- 0
ratios[i] <- weights[i] / (num_leaves - rejected_leaves)
min_alpha <- min(min_alpha, pvalues[i] / ratios[i])
}
}
}
}
}
new_alpha <- max(alpha, min_alpha)
if(new_alpha > alpha_max)
{
if(cur_degree != degree || cur_optim != optimization)
{
cur_degree <- degree
cur_optim <- optimization
next
}
else
break
}
else
alpha <- new_alpha
for(i in 1:n)
{
if(weights[i] > 0 && !rejected[i] && pvalues[i]<=alpha_max)
{
if(alpha >= pvalues[i]/ratios[i] || to_be_rejected[i])
{
if(method == "all")
{
rejected[i] <- TRUE
implications[i] <- TRUE
for(j in parents[[i]])
implications[j] <- FALSE
num_rejected <- num_rejected + 1
adj_pvalues[i] <- alpha
}
if(method == "any")
{
rejected[i] <- TRUE
implications[i] <- TRUE
num_rejected <- num_rejected + 1
adj_pvalues[i] <- alpha
queue <- rep(0, n)
head <- tail <- 1
queue[tail] <- i
tail <- tail + 1
while(head < tail)
{
current <- queue[head]
head <- head + 1
for(j in parents[[current]])
{
if(!rejected[j])
{
queue[tail] <- j
tail <- tail + 1
rejected[j] <- TRUE
num_rejected <- num_rejected + 1
adj_pvalues[j] <- alpha
}
else
{
implications[j] <- FALSE
}
}
}
}
if(verbose)
{
cat(sprintf("\r
flush.console()
}
}
}
}
}
if(verbose)
cat("\n")
names(leaf_based_sets) <- names(sets)
names(adj_pvalues) <- names(sets)
names(implications) <- names(sets)
names(rejected) <- names(sets)
out <- new("DAG",
alpha = alpha_max,
sets = sets,
leaf_based_sets = leaf_based_sets,
allpvalues = adj_pvalues,
implications = implications,
isadjusted = isadjusted,
rejected = rejected,
method = method,
twoway = DAGstructure@twoway)
return(out)
}
update_weights <- function(parents, children, rejected, method)
{
n <- length(rejected)
queue <- rep(0, n)
head <- tail <- 1
children_left <- rep(NA,n)
for(i in 1:n)
children_left[i] <- length(children[[i]])
weights <- rep(0, n)
for(i in 1:n)
if(!rejected[i] && children_left[i] == 0)
{
queue[tail] <- i
tail <- tail + 1
weights[i] <- 1
}
while(head < tail)
{
current <- queue[head]
head <- head + 1
num_parents <- length(parents[[current]])
num_unrejected <- 0
for(i in parents[[current]])
if(!rejected[i])
num_unrejected <- num_unrejected + 1
if(num_unrejected > 0)
{
for(i in parents[[current]])
if(!rejected[i])
{
if(method == "all")
{
weights[i] <- weights[i] + weights[current] / num_unrejected
}
else if(method == "any")
{
weights[i] <- weights[i] + weights[current] / num_parents
}
children_left[i] <- children_left[i] - 1
if(children_left[i] == 0)
{
queue[tail] <- i
tail <- tail + 1
}
}
if(method == "all")
{
weights[current] <- 0
}
else if(method == "any")
{
weights[current] <- weights[current] - weights[current] * (num_unrejected/ num_parents)
}
}
}
return(weights)
}
set_in_leaves <- function(parents, children)
{
n <- length(parents)
queue <- rep(0, n)
head <- tail <- 1
children_left <- rep(NA,n)
for(i in 1:n)
children_left[i] <- length(children[[i]])
leaf_based_sets <- vector("list",n)
for(i in 1:n)
if(children_left[i] == 0)
{
queue[tail] <- i
tail <- tail + 1
leaf_based_sets[[i]] <- i
}
while(head < tail)
{
current <- queue[head]
head <- head + 1
for(i in parents[[current]])
{
leaf_based_sets[[i]] <- c(leaf_based_sets[[i]],leaf_based_sets[[current]])
children_left[i] <- children_left[i] - 1
if(children_left[i] == 0)
{
queue[tail] <- i
tail <- tail + 1
leaf_based_sets[[i]] <- unique(leaf_based_sets[[i]])
}
}
}
return(leaf_based_sets)
}
update_max_weights <- function(parents, children, rejected)
{
n <- length(rejected)
queue <- rep(0, n)
head <- tail <- 1
children_left <- rep(NA,n)
for(i in 1:n)
children_left[i] <- length(children[[i]])
weights <- rep(0, n)
max_weights <- rep(0,n)
for(i in 1:n)
if(!rejected[i] && children_left[i] == 0)
{
queue[tail] <- i
tail <- tail + 1
weights[i] <- 1
max_weights[i] <- 1
}
while(head < tail)
{
current <- queue[head]
head <- head + 1
num_parents <- length(parents[[current]])
num_unrejected <- 0
for(i in parents[[current]])
if(!rejected[i])
num_unrejected <- num_unrejected + 1
if(num_unrejected > 0)
{
for(i in parents[[current]])
if(!rejected[i])
{
weights[i] <- weights[i] + weights[current] / num_parents
children_left[i] <- children_left[i] - 1
if(children_left[i] == 0)
{
queue[tail] <- i
tail <- tail + 1
max_weights[i] <- weights[i]
}
}
weights[current] <- weights[current] - weights[current] * (num_unrejected/ num_parents)
}
}
return(max_weights)
}
solveLP_lpsolve <- function(col_index, row_index, val_index, obj, rhs, dir, relaxation, minmax)
{
A <- cbind(row_index,col_index,val_index)
result <- lp(direction = minmax , objective.in = obj, const.dir = dir, const.rhs = rhs, all.bin=!relaxation, dense.const = A)
result <- result$objval
return(result)
}
getLPsolver <- function()
{
solveLP_lpsolve
}
LPdenom <- function(solveLP, implications, candidate, relaxation = TRUE)
{
maxelem <- max(unlist(implications),unlist(candidate))
n_impl <- length(implications)
num_non_zero <- sum(sapply(implications,length))
if(length(candidate) != 0)
num_non_zero <- num_non_zero + length(candidate)
row_index <- rep(NA,num_non_zero)
col_index <- rep(NA,num_non_zero)
val_index <- rep(NA,num_non_zero)
counter <- 1
for(i in 1:n_impl)
{
for(j in implications[[i]])
{
row_index[counter] <- i
col_index[counter] <- j
val_index[counter] <- 1
counter <- counter + 1
}
}
if(length(candidate) != 0)
{
for(j in candidate)
{
row_index[counter] <- n_impl + 1
col_index[counter] <- j
val_index[counter] <- 1
counter <- counter + 1
}
}
obj <- rep(1,maxelem)
if(length(candidate) != 0)
rhs <- c(rep(1,n_impl), 0)
else
rhs <- rep(1, n_impl)
if(length(candidate) != 0)
dir <- c(rep('>=', n_impl), '=')
else
dir <- rep('>=', n_impl)
result <- solveLP(col_index=col_index, row_index=row_index, val_index=val_index, obj=obj, rhs=rhs, dir=dir, relaxation=relaxation, minmax="min")
return(ifelse(isTRUE(all.equal(result, round(result))), result, ceiling(result)))
}
LPnumdenom <- function(solveLP, implications, candidate, parentscand, g1, palpha, relaxation = TRUE)
{
maxelem <- max(unlist(implications),unlist(candidate),unlist(parentscand))
n_impl <- length(implications)
n_par <- length(parentscand)
num_non_zero <- sum(sapply(implications,length))
if(n_par>0)
num_non_zero <- num_non_zero + sum(sapply(parentscand,length))*2
num_non_zero <- num_non_zero + length(candidate)
row_index <- rep(NA,num_non_zero)
col_index <- rep(NA,num_non_zero)
val_index <- rep(NA,num_non_zero)
counter <- 1
for(i in 1:n_impl)
{
for(j in implications[[i]])
{
row_index[counter] <- i
col_index[counter] <- j
val_index[counter] <- 1
counter <- counter + 1
}
}
if(n_par>0)
{
row <- n_impl + 1
for(i in 1:n_par)
{
for(j in parentscand[[i]])
{
row_index[counter] <- row
col_index[counter] <- j
val_index[counter] <- 1
counter <- counter + 1
row_index[counter] <- row
col_index[counter] <- maxelem + i
val_index[counter] <- -1
counter <- counter + 1
row <- row + 1
}
}
}
for(j in candidate)
{
row_index[counter] <- n_impl + sum(sapply(parentscand,length)) + 1
col_index[counter] <- j
val_index[counter] <- 1
counter <- counter + 1
}
obj <- c(rep(palpha,maxelem), rep(g1,n_par))
rhs <- c(rep(1,n_impl),rep(0,sum(sapply(parentscand,length))+1))
dir <- c(rep('>=', n_impl),rep('<=',sum(sapply(parentscand,length))+1))
result <- solveLP(col_index=col_index, row_index=row_index, val_index=val_index, obj=obj, rhs=rhs, dir=dir, relaxation=relaxation, minmax="min")
return(result)
}
DAGpick <- function(DAG, indicators, optimization = "ILP")
{
if(!DAG@twoway)
stop("The DAG you work with has no two-way properties. At the moment it is not yet possible to use a DAG structure without these properties in this function.")
implications <- DAG@leaf_based_sets[DAG@implications]
sets <- DAG@leaf_based_sets[indicators]
unionsets <- Reduce(union, sets, NULL)
implications <- Filter(function(i) all(i %in% unionsets), implications)
maxelem <- max(unlist(implications))
n_impl <- length(implications)
n_sets <- length(sets)
num_non_zero <- sum(sapply(implications,length)) + sum(sapply(sets,length))*2
row_index <- rep(NA,num_non_zero)
col_index <- rep(NA,num_non_zero)
val_index <- rep(NA,num_non_zero)
counter <- 1
for(i in 1:n_impl)
{
for(j in implications[[i]])
{
row_index[counter] <- i
col_index[counter] <- j
val_index[counter] <- 1
counter <- counter + 1
}
}
row <- n_impl + 1
for(i in 1:n_sets)
{
for(j in sets[[i]])
{
row_index[counter] <- row
col_index[counter] <- j
val_index[counter] <- 1
counter <- counter + 1
row_index[counter] <- row
col_index[counter] <- maxelem + i
val_index[counter] <- -1
counter <- counter + 1
row <- row + 1
}
}
obj <- c(rep(0,maxelem), rep(1, n_sets))
rhs <- c(rep(1, n_impl), rep(0, sum(sapply(sets,length))))
dir <- c(rep('>', n_impl), rep('<', sum(sapply(sets,length))))
solveLP <- getLPsolver()
result <- solveLP(col_index=col_index, row_index=row_index, val_index=val_index, obj=obj, rhs=rhs, dir=dir, relaxation=(optimization == "LP"), minmax="min")
return(ifelse(isTRUE(all.equal(result, round(result))), result, ceiling(result)))
}
setMethod("show", "DAG", function(object) {
meth <- switch(object@method,
all = "all-parents",
any = "any-parent",
holm = "structured holm")
cat("The ", meth, " method result on ", length(object@sets), " hypotheses.\n", sep="")
cat("There are ", sum(object@rejected), " hypotheses rejected out of a total of ", length(object@sets), "\n", "at an alpha-level of ", object@alpha, ".\n", sep="")
})
setMethod("summary", "DAG", function(object) {
meth <- switch(object@method,
all = "all-parents",
any = "any-parent",
holm = "structured holm")
cat("The ", meth, " method result on ", length(object@sets), " hypotheses.\n", sep="")
cat("There are ", sum(object@rejected), " hypotheses rejected out of a total of ", length(object@sets), "\n", "at an alpha-level of ", object@alpha, ".\n", sep="")
})
setMethod("implications", "DAG", function(object) {
allpvalues <- object@allpvalues
if(length(names(allpvalues))==0)
names(allpvalues) <- 1:length(allpvalues)
impls <- allpvalues[object@implications]
impls
})
setMethod("pvalue", "DAG", function(object, indicator) {
object@allpvalues[indicator]
})
setMethod("alpha", "DAG", function(object) {
object@alpha
})
|
ts <- ts1
type <- "copula"
N <- 128
R <- 100
freq <- 2*pi*(1:16)/32
levels <- c(0.25, 0.5, 0.75)
J <- length(freq)
K <- length(levels)
csd <- quantileSD(N=2^8, seed.init = 2581, type = type,
ts = ts, levels.1=levels, R = 100)
sims <- array(0, dim=c(4,R,J,K,K))
weight <- kernelWeight(W=W1, bw=0.3)
for (i in 1:R) {
Y <- ts1(N)
CR <- quantilePG(Y, levels.1=levels, type="clipped")
sims[1,i,,,] <- getValues(CR, frequencies=freq)[,,,1]
LP <- quantilePG(Y, levels.1=levels, type="qr")
sims[2,i,,,] <- getValues(LP, frequencies=freq)[,,,1]
sCR <- smoothedPG(CR, weight=weight)
sims[3,i,,,] <- getValues(sCR, frequencies=freq)[,,,1]
sLP <- smoothedPG(LP, weight=weight)
sims[4,i,,,] <- getValues(sLP, frequencies=freq)[,,,1]
}
trueV <- getValues(csd, frequencies=freq)
SqDev <- array(apply(sims, c(1,2),
function(x) {abs(x-trueV)^2}), dim=c(J,K,K,4,R))
rimse <- sqrt(apply(SqDev, c(2,3,4), mean))
rimse
f <- getFrequencies(sCR)
plot(sCR, qsd=csd, frequencies = f[f > 0])
plot(sCR, qsd=csd, plotPG=TRUE, frequencies = f[f > 0])
plot(sLP, qsd=csd, frequencies = f[f > 0],
ptw.CIs = 0, type.scaling="real-imaginary")
plot(sLP, qsd=csd, plotPG=TRUE, frequencies = f[f > 0],
ptw.CIs = 0, type.scaling="real-imaginary")
|
initialize<-function(){
utils::data("envData",package="SubpathwayLNCE")
}
Getenvir<-function(envData){
if(!exists("envData")) initialize()
return(get(envData,envir=envData))
}
|
setMethod('boundaries', signature(x='RasterLayer'),
function(x, type='inner', classes=FALSE, directions=8, asNA=FALSE, filename="", ...) {
stopifnot( nlayers(x) == 1 )
stopifnot( hasValues(x) )
filename <- trim(filename)
out <- raster(x)
gll <- as.integer( .isGlobalLonLat(out) )
type <- tolower(type)
if (! type %in% c('inner', 'outer')) {
stop("type must be 'inner', or 'outer'")
}
if (type=='inner') {
type <- FALSE
} else {
type <- TRUE
}
classes <- as.logical(classes)
directions <- as.integer(directions)
stopifnot(directions %in% c(4,8))
datatype <- list(...)$datatype
if (is.null(datatype)) {
datatype <- 'INT2S'
}
if (canProcessInMemory(out, 4)) {
x <- as.matrix(x)
if (gll) {
x <- cbind(x[, ncol(x)], x, x[, 1])
} else {
x <- cbind(x[, 1], x, x[, ncol(x)])
}
x <- rbind(x[1,], x, x[nrow(x),])
paddim <- as.integer(dim(x))
x <- .edge(round(t(x)), paddim, classes[1], type[1], directions[1])
if (asNA) {
x[x==0] <- as.integer(NA)
}
x <- matrix(x, nrow=paddim[1], ncol=paddim[2], byrow=TRUE)
x <- x[2:(nrow(x)-1), 2:(ncol(x)-1)]
x <- setValues(out, as.vector(t(x)))
if (filename != '') {
x <- writeRaster(x, filename, datatype=datatype, ...)
}
return(x)
} else {
out <- writeStart(out, filename, datatype=datatype, ...)
tr <- blockSize(out, minblocks=3, minrows=3)
pb <- pbCreate(tr$n, label='boundaries', ...)
nc <- ncol(out)+2
v <- getValues(x, row=1, nrows=tr$nrows[1]+1)
v <- matrix(v, ncol=tr$nrows[1]+1)
if (gll) {
v <- rbind(v[nrow(v),], v, v[1,])
} else {
v <- rbind(v[1,], v, v[nrow(v),])
}
v <- round(cbind(v[,1], v))
v <- .edge(v, as.integer(c(tr$nrows[1]+2, nc)), classes, type, directions)
if (asNA) {
v[v==0] <- as.integer(NA)
}
v <- matrix(v, ncol=nc, byrow=TRUE)
v <- as.integer(t(v[2:(nrow(v)-1), 2:(ncol(v)-1)]))
out <- writeValues(out, v, 1)
pbStep(pb, 1)
if (tr$n > 2) {
for (i in 2:(tr$n-1)) {
v <- getValues(x, row=tr$row[i]-1, nrows=tr$nrows[i]+2)
v <- matrix(v, ncol=tr$nrows[1]+2)
if (gll) {
v <- rbind(v[nrow(v),], v, v[1,])
} else {
v <- rbind(v[1,], v, v[nrow(v),])
}
v <- .edge(round(v), as.integer(c(tr$nrows[i]+2, nc)), classes, type, directions)
v <- matrix(v, ncol=nc, byrow=TRUE)
v <- as.integer(t(v[2:(nrow(v)-1), 2:(ncol(v)-1)]))
out <- writeValues(out, v, tr$row[i])
pbStep(pb, i)
}
}
i <- tr$n
v <- getValues(x, row=tr$row[i]-1, nrows=tr$nrows[i]+1)
v <- matrix(v, ncol=tr$nrows[i]+1)
if (gll) {
v <- rbind(v[nrow(v),], v, v[1,])
} else {
v <- rbind(v[1,], v, v[nrow(v),])
}
v <- round(cbind(v, v[,ncol(v)]))
v <- .edge(v, as.integer(c(tr$nrows[i]+2, nc)), classes, type, directions)
v <- matrix(v, ncol=nc, byrow=TRUE)
v <- as.integer(t(v[2:(nrow(v)-1), 2:(ncol(v)-1)]))
out <- writeValues(out, v, tr$row[i])
pbStep(pb, tr$n)
out <- writeStop(out)
pbClose(pb)
}
return(out)
}
)
|
knitr::opts_chunk$set(
collapse = TRUE,
comment = "
)
knitr::include_graphics('MSCE.jpg',dpi=96)
library(msce)
data("lungCancerSmoking")
lungCancerSmoking$laggedAge <- lungCancerSmoking$age-5
lungCancerSmoking$nonSmoke <- (lungCancerSmoking$ageStart==lungCancerSmoking$ageQuit) |
(lungCancerSmoking$ageStart >= lungCancerSmoking$laggedAge)
lungCancerSmoking$exSmoke <- (lungCancerSmoking$ageQuit < lungCancerSmoking$laggedAge) &
!lungCancerSmoking$nonSmoke
lungCancerSmoking$smoke <- !lungCancerSmoking$nonSmoke & !lungCancerSmoking$exSmoke
t<-matrix(0,nrow=NROW(lungCancerSmoking),ncol=3)
t[,3] <- lungCancerSmoking$laggedAge
t[lungCancerSmoking$smoke,2] <-lungCancerSmoking$ageStart[lungCancerSmoking$smoke]
t[lungCancerSmoking$exSmoke,2] <- lungCancerSmoking$ageQuit[lungCancerSmoking$exSmoke]
t[lungCancerSmoking$exSmoke,1] <- lungCancerSmoking$ageStart[lungCancerSmoking$exSmoke]
smInd <- matrix(0,nrow=NROW(lungCancerSmoking),ncol=3)
smInd[lungCancerSmoking$smoke,3] <- 1
smInd[lungCancerSmoking$exSmoke,2] <- 1
wrap <-function(pars)
{
alpha <- matrix(1,nrow=1,ncol=3)
Nnu0 <- matrix(exp(pars[1]),nrow=1,ncol=3)
nu1 <- matrix(exp(pars[2]),nrow=1,ncol=3)
gamma <- matrix(pars[3],nrow=NROW(lungCancerSmoking),ncol=3) +
pars[4]*smInd +
pars[5]*smInd * (lungCancerSmoking$cigsPerDay>5)
parList <- list(Nnu0=Nnu0, alpha=alpha,gamma=gamma,nu1=nu1)
result <- tsce(t,parList)
return (result$hazard)
}
loglik <- function(pars)
{
return (-sum(dpois(lungCancerSmoking$cases, lungCancerSmoking$pyr*wrap(pars), log = TRUE)))
}
minResult <- nlminb(start = c(-3,-14,0.1,0.05,0.0), objective = loglik, upper=c(0,0,0.3,0.3,0.3))
bestEstimates <- minResult$par
bestEstimates
alpha <- matrix(1,nrow=1,ncol=3)
Nnu0 <- matrix(exp(bestEstimates[1]),nrow=1,ncol=3)
nu1 <- matrix(exp(bestEstimates[2]),nrow=1,ncol=3)
t<-matrix(0,nrow=95,ncol=3)
t[,3]<-1:95
gamma <- matrix(bestEstimates[3],nrow=95,ncol=3)
parList <- list(Nnu0=Nnu0, alpha=alpha,gamma=gamma,nu1=nu1)
resultN <- tsce(t,parList)
plot(6:100,resultN$hazard,type="l",log="y",
xlab="age",ylab="hazard [per year]",ylim=c(1e-6,1e-2))
t[20:95,2] <-20
gamma[20:95,3] <- bestEstimates[3] + bestEstimates[4] + bestEstimates[5]
parList <- list(Nnu0=Nnu0, alpha=alpha,gamma=gamma,nu1=nu1)
resultS <- tsce(t,parList)
lines(6:100,resultS$hazard,type="l",lty=2)
t[50:95,1] <-20
t[50:95,2] <-50
gamma[50:95,2] <- bestEstimates[3] + bestEstimates[4] + bestEstimates[5]
gamma[50:95,3] <- bestEstimates[3]
parList <- list(Nnu0=Nnu0, alpha=alpha,gamma=gamma,nu1=nu1)
resultE <- tsce(t,parList)
lines(6:100,resultE$hazard,type="l",lty=3)
legend("topleft", legend=c("Smoking", "Quitting", "Non-Smoking"),
lty=c(2,3,1),cex=0.8)
|
getNextR <- function(X, R.old, ind.train, block.size,
vec.center, vec.scale, use.Eigen) {
n <- length(ind.train)
m <- ncol(X)
L <- nrow(R.old)
R <- matrix(NA, L, m)
G <- matrix(0, L, n)
intervals <- CutBySize(m, block.size)
nb.block <- nrow(intervals)
for (j in 1:nb.block) {
ind <- seq2(intervals[j, ])
tmp <- scaling(X[ind.train, ind], vec.center[ind], vec.scale[ind])
G <- incrMat(G, tcrossprod(R.old[, ind], tmp))
}
for (j in 1:nb.block) {
ind <- seq2(intervals[j, ])
tmp <- scaling(X[ind.train, ind], vec.center[ind], vec.scale[ind])
R[, ind] <- mult(G, tmp, use.Eigen)
}
R <- R / m
}
BigMult2 <- function(X, mat, ind.train, block.size,
vec.center, vec.scale, use.Eigen) {
res <- matrix(0, length(ind.train), ncol(mat))
intervals <- CutBySize(ncol(X), block.size)
nb.block <- nrow(intervals)
for (j in 1:nb.block) {
ind <- seq2(intervals[j, ])
tmp <- scaling(X[ind.train, ind], vec.center[ind], vec.scale[ind])
if (use.Eigen) {
res <- incrMat(res, multEigen(tmp, mat[ind, ]))
} else {
res <- incrMat(res, tmp %*% mat[ind, ])
}
}
res
}
BigMult3 <- function(mat, X, ind.train, block.size,
vec.center, vec.scale, use.Eigen) {
m <- ncol(X)
res <- matrix(0, nrow(mat), m)
intervals <- CutBySize(m, block.size)
nb.block <- nrow(intervals)
for (j in 1:nb.block) {
ind <- seq2(intervals[j, ])
tmp <- scaling(X[ind.train, ind], vec.center[ind], vec.scale[ind])
res[, ind] <- mult(mat, tmp, use.Eigen)
}
res
}
big_randomSVD2 <- function(X, fun.scaling,
ind.train = seq(nrow(X)),
block.size = 1e3,
K = 10, max.I = 20,
use.Eigen = TRUE) {
check_X(X)
stopifnot((ncol(X) - K) >= ((max.I + 1) * (K + 12)))
L <- K + 12
n <- length(ind.train)
m <- ncol(X)
stats <- fun.scaling(X, ind.train)
means <- stats$mean
sds <- stats$sd
rm(stats)
R <- list()
R[[1]] <- BigMult3(mat = matrix(rnorm(L * n), L, n),
X, ind.train, block.size,
means, sds, use.Eigen)
i <- 1
MSE.old <- Inf
while(i <= max.I) {
R[[i+1]] <- getNextR(X, R[[i]], ind.train,
block.size, means, sds,
use.Eigen)
mylm <- lm(as.numeric(R[[i+1]]) ~ as.numeric(R[[i]]) - 1)
print(r2 <- summary(mylm)$r.squared)
if (r2 > (1 - 1e-8)) {
printf("Stop after %d interations\n", i)
break
}
i <- i + 1
}
H.svd <- svd(t(do.call(rbind, R)), nv = 0)
rm(R); gc()
T.t <- BigMult2(X, H.svd$u, ind.train, block.size, means, sds,
use.Eigen = use.Eigen)
T.svd <- svd(T.t, nu = K, nv = K)
list(d = T.svd$d[1:K], u = T.svd$u, v = H.svd$u %*% T.svd$v,
means = means, sds = sds)
}
|
LogNormalVaRETLPlot2DCL<- function(...){
if (nargs() < 4) {
stop("Too few arguments")
}
if (nargs() > 5) {
stop("Too many arguments")
}
args <- list(...)
if (nargs() == 5) {
mu <- args$mu
investment <- args$investment
cl <- args$cl
sigma <- args$sigma
hp <- args$hp
}
if (nargs() == 4) {
mu <- mean(args$returns)
investment <- args$investment
cl <- args$cl
sigma <- sd(args$returns)
hp <- args$hp
}
mu <- as.matrix(mu)
mu.row <- dim(mu)[1]
mu.col <- dim(mu)[2]
if (max(mu.row, mu.col) > 1) {
stop("Mean must be a scalar")
}
sigma <- as.matrix(sigma)
sigma.row <- dim(sigma)[1]
sigma.col <- dim(sigma)[2]
if (max(sigma.row, sigma.col) > 1) {
stop("Standard deviation must be a scalar")
}
cl <- as.matrix(cl)
cl.row <- dim(cl)[1]
cl.col <- dim(cl)[2]
if (min(cl.row, cl.col) > 1) {
stop("Confidence level must be a vector")
}
hp <- as.matrix(hp)
hp.row <- dim(hp)[1]
hp.col <- dim(hp)[2]
if (max(hp.row, hp.col) > 1) {
stop("Holding period must be a scalar")
}
if (cl.row > cl.col) {
cl <- t(cl)
}
if (sigma < 0) {
stop("Standard deviation must be non-negative")
}
if (max(cl) >= 1){
stop("Confidence level(s) must be less than 1")
}
if (min(cl) <= 0){
stop("Confidence level(s) must be greater than 0")
}
if (min(hp) <= 0){
stop("Holding period must be greater than 0")
}
cl.row <- dim(cl)[1]
cl.col <- dim(cl)[2]
VaR <- investment - exp(sigma[1,1] * sqrt(hp[1,1]) * qnorm(1 - cl, 0, 1)+mu[1,1]*hp[1,1]*matrix(1,cl.row,cl.col) + log(investment))
n <- 1000
cl0 <- cl
delta.cl <- (1 - cl) / n
v <- VaR
for (i in 1:(n-1)) {
cl <- cl0 + i * delta.cl
v <- v + investment - exp(sigma[1,1] * sqrt(hp[1,1]) *
qnorm(1 - cl, 0, 1) + mu[1,1] * hp[1,1] *
matrix(1, cl.row, cl.col) + log(investment))
}
v <- v/n
ymin <- min(VaR, v)
ymax <- max(VaR, v)
xmin <- min(cl0)
xmax <- max(cl0)
plot(cl0, VaR, type = "l", xlim = c(xmin, xmax), ylim = c(ymin, ymax), xlab = "Confidence level", ylab = "VaR/ETL")
par(new=TRUE)
plot(cl0, v, type = "l", xlim = c(xmin, xmax), ylim = c(ymin, ymax), xlab = "Confidence level", ylab = "VaR/ETL")
title("Lognormal VaR and ETL against confidence level")
xmin <- min(cl0)+.3*(max(cl0)-min(cl0))
text(xmin,max(VaR)-.1*(max(VaR)-min(VaR)),
'Input parameters', cex=.75, font = 2)
text(xmin,max(VaR)-.15*(max(VaR)-min(VaR)),
paste('Daily mean geometric return = ',round(mu[1,1],3)),cex=.75)
text(xmin,max(VaR)-.2*(max(VaR)-min(VaR)),
paste('Stdev. of daily geometric returns = ',round(sigma[1,1],3)),cex=.75)
text(xmin,max(VaR)-.25*(max(VaR)-min(VaR)),
paste('Investment size = ',investment),cex=.75)
text(xmin,max(VaR)-.3*(max(VaR)-min(VaR)),
paste('Holding period = ',hp,'days'),cex=.75)
text(max(cl0)-.4*(max(cl0)-min(cl0)),min(VaR)+.3*(max(VaR)-min(VaR)),'Upper line - ETL',cex=.75);
text(max(cl0)-.4*(max(cl0)-min(cl0)),min(VaR)+.2*(max(VaR)-min(VaR)),'Lower line - VaR',cex=.75);
}
|
n <- 5
x <- rexp(n)
a <- 0.8
sigma <- (cos(pi*a/2))^(1/a)
z <- stabledist::rstable(n = n, alpha = a, beta = 1, gamma = sigma, delta = 0, pm = 1)
y <- x^(1/a) * z
y
|
msc_equity <- function(year,
month,
cod,
matrix_type,
class,
value,
verbose = FALSE) {
if (!(matrix_type %in% c("MSCC", "MSCE"))) {
stop("Argument matrix_type must be 'MSCC' or 'MSCE'")
}
if (!(class %in% 1:4)) {
stop("Argument class must be 1, 2, 3 or 4")
}
if (!(value %in% c("beginning_balance", "period_change", "ending_balance"))) {
stop("Argument matrix_type must be 'beginning_balance', 'period_change' or 'ending_balance'")
}
get(
type = "msc_patrimonial",
an_referencia = year,
me_referencia = month,
id_ente = cod,
co_tipo_matriz = matrix_type,
classe_conta = class,
id_tv = value,
verbose = verbose
)
}
|
get_SDA_interpretation <- function(rulename,
method = c("Dominant Component",
"Dominant Condition",
"Weighted Average",
"None"),
areasymbols = NULL,
mukeys = NULL,
query_string = FALSE,
not_rated_value = NA_real_) {
q <- .constructInterpQuery(
method = method,
interp = rulename,
areasymbols = areasymbols,
mukeys = mukeys
)
if (query_string) return(q)
res <- suppressMessages(soilDB::SDA_query(q))
if (inherits(res, 'try-error')) {
warnings()
stop(attr(res, 'condition'))
}
ratingcols <- colnames(res)[grep("^rating_", colnames(res))]
res[] <- lapply(colnames(res), function(x) {
y <- res[[x]]
if(x %in% ratingcols) {
y[is.na(y) | y == 99] <- not_rated_value
return(y)
}
y
})
return(res)
}
.cointerpRuleNames <- function() {
c("AGR - Barley Yield (MT)", "AGR - Conventional Tillage (TX)",
"AGR - Grape non-irrigated (MO)", "AGR - Nitrate Leaching Potential, Irrigated (WA)",
"AGR - No Till, Tile Drained (TX)", "AGR - Pasture hayland (MO)",
"AGR - Pesticide Loss Potential-Soil Surface Runoff", "AGR - Plant Growth Index PGI no Climate Adj. (TX)",
"AGR - Spring Wheat Yield (MT)", "AGR - Water Erosion Potential Wide Ratings Array (TX)",
"AGR - Wind Erosion Potential Wide Ratings Array (TX)", "AGR-Agronomic Concerns (ND)",
"AGR-Pesticide and Nutrient Leaching Potential, NIRR (ND)", "AGR-Surface Salinity (ND)",
"Alaska Exempt Wetland Potential (AK)", "American Wine Grape Varieties Site Desirability (Short)",
"AWM - Irrigation Disposal of Wastewater (MD)", "AWM - Manure and Food Processing Waste (DE)",
"AWM - Manure Stacking - Site Evaluation (TX)", "AWM - Phosphorus Management (TX)",
"AWM - Slow Rate Process Treatment of Wastewater", "BLM - Pygmy Rabbit Habitat Potential",
"BLM - Rangeland Tillage", "BLM - Site Degradation Susceptibility",
"CA Prime Farmland (CA)", "CLASS RULE - Depth to root limiting layer (5 classes) (NPS)",
"CPI - Alfalfa Hay, NIRR - Palouse, Northern Rocky Mtns. (ID)",
"CPI - Barley, NIRR - Eastern Idaho Plateaus (ID)", "CPI - Grass Hay, IRR - Eastern Idaho Plateaus (ID)",
"CPI - Grass Hay, NIRR - Palouse, Northern Rocky Mtns. (ID)",
"CPI - Potatoes, IRR - Snake River Plains (ID)", "CPI - Small Grains, NIRR - Palouse Prairies (OR)",
"CPI - Small Grains, NIRR - Palouse Prairies (WA)", "CPI - Small Grains, NIRR - Snake River Plains (ID)",
"CPI - Wheat, NIRR - Eastern Idaho Plateaus (ID)", "CPI - Wild Hay, NIRR - Eastern Idaho Plateaus (ID)",
"CPI - Wild Hay, NIRR - Palouse, Northern Rocky Mtns. (ID)",
"CPI - Wild Hay, NIRR - Palouse, Northern Rocky Mtns. (WA)",
"Deep Infiltration Systems", "DHS - Site for Composting Facility - Surface",
"Elevated Sand Mound Septic System (DE)", "ENG - Animal Disposal by Composting (Catastrophic) (WV)",
"ENG - Application of Municipal Sludge (TX)", "ENG - Closed-Loop Horizontal Geothermal Heat Pump (CT)",
"ENG - Construction Materials; Gravel Source (IN)", "ENG - Construction Materials; Gravel Source (NE)",
"ENG - Construction Materials; Reclamation (MD)", "ENG - Construction Materials; Reclamation (MI)",
"ENG - Construction Materials; Roadfill (GA)", "ENG - Construction Materials; Sand Source (CT)",
"ENG - Construction Materials; Sand Source (GA)", "ENG - Construction Materials; Topsoil (ID)",
"ENG - Construction Materials; Topsoil (OH)", "ENG - Daily Cover for Landfill (OH)",
"ENG - Disposal Field (NJ)", "ENG - Disposal Field Type Inst (NJ)",
"ENG - Dwellings W/O Basements", "ENG - Dwellings With Basements",
"ENG - Dwellings without Basements (AK)", "ENG - Lawn and Landscape (OH)",
"ENG - Lawn, Landscape, Golf Fairway", "ENG - Local Roads and Streets (AK)",
"ENG - Local Roads and Streets (GA)", "ENG - On-Site Waste Water Lagoons (MO)",
"ENG - OSHA Soil Types (TX)", "ENG - Pier Beam Building Foundations (TX)",
"ENG - Sanitary Landfill (Area)", "ENG - Sanitary Landfill (Area) (AK)",
"ENG - Septage Application - Incorporation or Injection (MN)",
"ENG - Septic System; Disinfection, Surface Application (TX)",
"ENG - Septic Tank Absorption Fields (FL)", "ENG - Septic Tank Absorption Fields (OH)",
"ENG - Septic Tank Absorption Fields - Trench (MN)", "ENG - Sewage Lagoons (AK)",
"ENG - Shallow Excavations (OH)", "ENG - Stormwater Management / Infiltration (NY)",
"ENG - Stormwater Management / Wetlands (NY)", "FOR - Construction Limitations for Haul Roads/Log Landings",
"FOR - Displacement Hazard", "FOR - Harvest Equipment Operability (DE)",
"FOR - Harvest Equipment Operability (ME)", "FOR - Harvest Equipment Operability (MI)",
"FOR - Log Landing Suitability (ID)", "FOR - Log Landing Suitability (MI)",
"FOR - Log Landing Suitability (OR)", "FOR - Mechanical Planting Suitability (OH)",
"FOR - Mechanical Site Preparation (Surface) (MD)", "FOR - Mechanical Site Preparation (Surface) (OH)",
"FOR - Potential Erosion Hazard (Off-Road/Off-Trail) (MI)", "FOR - Potential Erosion Hazard (Off-Road/Off-Trail) (OH)",
"FOR - Potential Seedling Mortality (FL)", "FOR - Potential Seedling Mortality (OH)",
"FOR - Road Suitability (Natural Surface) (VT)", "FOR - Soil Rutting Hazard",
"FOTG - Indiana Soy Bean Yield Calculation (IN)", "FOTG - Indiana Wheat Yield Calculation (IN)",
"GRL - Fencing, Post Depth =<24 inches", "GRL - Fencing, Post Depth Less Than 24 inches (TX)",
"GRL - Fencing, Post Depth Less Than 36 inches (TX)", "GRL - NV range seeding (Wind C = 10) (NV)",
"GRL - NV range seeding (Wind C = 30) (NV)", "GRL - Rangeland Chaining (TX)",
"GRL - Rangeland Disking (TX)", "GRL - Rangeland Dozing/Grubbing (TX)",
"GRL - Utah Juniper Encroachment Potential", "GRL - Western Juniper Encroachment Potential (OR)",
"Hybrid Wine Grape Varieties Site Desirability (Medium)", "Lined Retention Systems",
"MIL - Trafficability Veh. Type 1 dry season (DOD)", "MIL - Trafficability Veh. Type 3 1-pass wet season (DOD)",
"MIL - Trafficability Veh. Type 3 dry season (DOD)", "MIL - Trafficability Veh. Type 4 dry season (DOD)",
"MIL - Trafficability Veh. Type 5 1-pass wet season (DOD)", "NCCPI - NCCPI Corn Submodel (I)",
"NCCPI - NCCPI Small Grains Submodel (II)", "NCCPI - NCCPI Soybeans Submodel (I)",
"Pressure Dose Full Depth Septic System (DE)", "REC - Camp Areas; Primitive (AK)",
"REC - Paths and Trails (CT)", "SAS - Eastern Oyster Habitat Restoration Suitability",
"SAS - Mooring Anchor - Mushroom", "Septic System CO-OP RFS III w/At-Grade Bed (PA)",
"Septic System Free Access Sand Filter w/At-Grade Bed (PA)",
"Septic System Modified Subsurface Sand Filter (Alt.) (PA)",
"Septic System Shallow In Ground Trench (conventional) (WV)",
"Septic System Subsurface Sand Filter Bed (conventional) (PA)",
"Septic System Subsurface Sand Filter Trench (standard) (PA)",
"URB - Commercial Brick Buildings w/Concrete Slab (TX)", "URB - Commercial Metal Bldg; w/Concrete Slab (TX)",
"URB - Concrete Driveways and Sidewalks (TX)", "URB - Dwellings on Concrete Slab (TX)",
"URB - Dwellings With Basements (TX)", "URB - Lawns and Ornamental Plantings (TX)",
"URB - Rural Residential Development w/Basement (TX)", "URB - Urban Residential Development w/Basement (TX)",
"URB/REC - Paths and Trails", "URB/REC - Paths and Trails (GA)",
"URB/REC - Playgrounds (MI)", "Vinifera Wine Grape Site Desirability (Long)",
"WAQ - Soil Pesticide Leaching Potential (TX)", "WLF - Crawfish Aquaculture (TX)",
"WLF - Desertic Herbaceous Plants (TX)", "WLF - Gopher Tortoise Burrowing Suitability",
"WLF - Grain & Seed Crops for Food and Cover (TX)", "WMS - Constructing Grassed Waterways (OH)",
"WMS - Constructing Terraces & Diversions (TX)", "WMS - Drainage (OH)",
"AGR - Avocado Root Rot Hazard (CA)", "AGR - California Revised Storie Index (CA)",
"AGR - Hops Site Suitability (WA)", "AGR - Map Unit Cropland Productivity (MN)",
"AGR - Nitrate Leaching Potential, Nonirrigated (WA)", "AGR - No Till (TX)",
"AGR - Pesticide Loss Potential-Soil Surface Runoff (NE)", "AGR - Plant Growth Index PGI with Climate Adj. (TX)",
"AGR - Plant Growth Index PGI with Climate Adj. MAP,MAAT (TX)",
"AGR - Ridge Till (TX)", "AGR - Selenium Leaching Potential (CO)",
"AGR - Wind Erosion Potential (TX)", "AGR - Winter Wheat Yield (MT)",
"AGR-Pesticide and Nutrient Runoff Potential (ND)", "AGR-Rooting Depth (ND)",
"American Wine Grape Varieties Site Desirability (Long)", "American Wine Grape Varieties Site Desirability (Medium)",
"American Wine Grape Varieties Site Desirability (Very Long)",
"AWM - Animal Mortality Disposal (Catastrophic) (MO)", "AWM - Irrigation Disposal of Wastewater (OH)",
"AWM - Irrigation Disposal of Wastewater (VT)", "AWM - Land Application of Municipal Biosolids, summer (OR)",
"AWM - Manure and Food Processing Waste (MD)", "AWM - Manure and Food Processing Waste (OH)",
"AWM - Overland Flow Process Treatment of Wastewater (VT)", "AWM - Rapid Infil Disposal of Wastewater (DE)",
"AWM - Sensitive Soil Features (MN)", "BLM - Fencing", "BLM - Fire Damage Susceptibility",
"BLM - Mechanical Treatment, Rolling Drum", "BLM - Rangeland Drill",
"BLM - Rangeland Seeding, Colorado Plateau Ecoregion", "BLM - Rangeland Seeding, Great Basin Ecoregion",
"BLM-Reclamation Suitability (MT)", "CLASS RULE - Depth to lithic bedrock (5 classes) (NPS)",
"CLASS RULE - Soil Inorganic Carbon kg/m2 to 2m (NPS)", "CLASS RULE - Soil Organic Carbon kg/m2 to 2m (NPS)",
"CLR-pastureland limitation (IN)", "CPI - Alfalfa Hay, NIRR - Palouse, Northern Rocky Mtns. (WA)",
"CPI - Barley, IRR - Eastern Idaho Plateaus (ID)", "CPI - Grass Hay, IRR - Klamath Valleys and Basins (OR)",
"CPI - Small Grains, IRR - Snake River Plains (ID)", "CPI - Wheat, IRR - Eastern Idaho Plateaus (ID)",
"DHS - Catastrophic Event, Large Animal Mortality, Burial", "DHS - Catastrophic Mortality, Large Animal Disposal, Pit",
"DHS - Catastrophic Mortality, Large Animal Disposal, Trench",
"DHS - Potential for Radioactive Bioaccumulation", "DHS - Potential for Radioactive Sequestration",
"DHS - Suitability for Composting Medium and Final Cover", "ENG - Construction Materials; Gravel Source",
"ENG - Construction Materials; Gravel Source (AK)", "ENG - Construction Materials; Gravel Source (ID)",
"ENG - Construction Materials; Gravel Source (OH)", "ENG - Construction Materials; Gravel Source (VT)",
"ENG - Construction Materials; Gravel Source (WA)", "ENG - Construction Materials; Roadfill (OH)",
"ENG - Construction Materials; Sand Source (OR)", "ENG - Construction Materials; Sand Source (WA)",
"ENG - Construction Materials; Topsoil (GA)", "ENG - Construction Materials; Topsoil (MD)",
"ENG - Daily Cover for Landfill", "ENG - Daily Cover for Landfill (AK)",
"ENG - Disposal Field Suitability Class (NJ)", "ENG - Dwellings W/O Basements (OH)",
"ENG - Dwellings with Basements (AK)", "ENG - Large Animal Disposal, Pit (CT)",
"ENG - Lawn, landscape, golf fairway (CT)", "ENG - Local Roads and Streets (OH)",
"ENG - On-Site Waste Water Absorption Fields (MO)", "ENG - Septic Tank Absorption Fields",
"ENG - Septic Tank Absorption Fields (MD)", "ENG - Septic Tank Absorption Fields (TX)",
"ENG - Septic Tank, Gravity Disposal (TX)", "ENG - Sewage Lagoons",
"ENG - Small Commercial Buildings (OH)", "ENG - Soil Potential Ratings of SSDS (CT)",
"FOR (USFS) - Road Construction/Maintenance (Natural Surface)",
"FOR - Compaction Potential (WA)", "FOR - Damage by Fire (OH)",
"FOR - General Harvest Season (VT)", "FOR - Hand Planting Suitability",
"FOR - Hand Planting Suitability, MO13 (DE)", "FOR - Hand Planting Suitability, MO13 (MD)",
"FOR - Log Landing Suitability", "FOR - Log Landing Suitability (ME)",
"FOR - Log Landing Suitability (VT)", "FOR - Log Landing Suitability (WA)",
"FOR - Mechanical Planting Suitability (CT)", "FOR - Mechanical Planting Suitability, MO13 (MD)",
"FOR - Mechanical Site Preparation (Deep)", "FOR - Mechanical Site Preparation (Deep) (DE)",
"FOR - Mechanical Site Preparation (Surface) (DE)", "FOR - Mechanical Site Preparation (Surface) (MI)",
"FOR - Mechanical Site Preparation; Surface (ME)", "FOR - Potential Erosion Hazard, Road/Trail, Spring Thaw (AK)",
"FOR - Potential Seedling Mortality (PIA)", "FOR - Potential Seedling Mortality(ME)",
"FOR - Puddling Hazard", "FOR - Road Suitability (Natural Surface) (ME)",
"FOR - Road Suitability (Natural Surface) (WA)", "FOR - Soil Rutting Hazard (OH)",
"FOR - Soil Sustainability Forest Biomass Harvesting (CT)", "FOR - White Oak Suitability (MO)",
"FOR-Biomass Harvest (WI)", "FOTG - Indiana Corn Yield Calculation (IN)",
"GRL - Excavations to 24 inches for Plastic Pipelines (TX)",
"GRL - Fencing, 24 inch Post Depth (MT)", "GRL - NV range seeding (Wind C = 100) (NV)",
"GRL - NV range seeding (Wind C = 40) (NV)", "GRL - NV range seeding (Wind C = 60) (NV)",
"GRL - NV range seeding (Wind C = 80) (NV)", "GRL - NV range seeding (Wind C >= 160) (NV)",
"GRL - Rangeland Planting by Mechanical Seeding (TX)", "GRL - Rangeland Root Plowing (TX)",
"Hybrid Wine Grape Varieties Site Desirability (Long)", "Low Pressure Pipe Septic System (DE)",
"MIL - Bivouac Areas (DOD)", "MIL - Excavations Crew-Served Weapon Fighting Position (DOD)",
"MIL - Excavations for Individual Fighting Position (DOD)", "MIL - Trafficability Veh. Type 1 50-passes wet season (DOD)",
"MIL - Trafficability Veh. Type 2 50-passes wet season (DOD)",
"MIL - Trafficability Veh. Type 4 1-pass wet season (DOD)", "MIL - Trafficability Veh. Type 4 50-passes wet season (DOD)",
"MIL - Trafficability Veh. Type 6 50-passes wet season (DOD)",
"MIL - Trafficability Veh. Type 7 50-passes wet season (DOD)",
"MIL - Trafficability Veh. Type 7 dry season (DOD)", "MT - Conservation Tree/Shrub Groups (MT)",
"NCCPI - Irrigated National Commodity Crop Productivity Index",
"Nitrogen Loss Potential (ND)", "REC - Foot and ATV Trails (AK)",
"REC - Playgrounds (AK)", "RSK-risk assessment for manure application (OH)",
"SAS - CMECS Substrate Origin", "SAS - CMECS Substrate Subclass/Group/Subgroup",
"SAS - Mooring Anchor - Deadweight", "Septic System A/B Soil System (Alternate) (PA)",
"Septic System CO-OP RFS III w/Spray Irrigation (PA)", "Septic System Dual Field Trench (conventional) (WV)",
"Septic System Elevated Field (alternative) (WV)", "Septic System In Ground Trench (conventional) (PA)",
"Septic System In Ground Trench (conventional) (WV)", "Septic System Low Pressure Pipe (alternative) (WV)",
"Septic System Mound (alternative) (WV)", "Septic System Peat Based Option2 w/Spray Irrigation (PA)",
"Septic System Steep Slope Mound (alternative) (WV)", "WMS - Excavated Ponds (Aquifer-fed) (OH)",
"WMS - Grape Production with Drip Irrigation (TX)", "WMS - Irrigation, Micro (subsurface drip) (VT)",
"WMS - Irrigation, Surface (level)", "WMS - Pond Reservoir Area (MI)",
"WMS - Sprinkler Irrigation (MT)", "WMS - Sprinkler Irrigation RDC (IL)",
"WMS - Subsurface Drains - Performance (VT)", "WMS - Subsurface Water Management, Outflow Quality",
"WMS - Surface Water Management, System", "WMS-Subsurface Water Management, Performance (ND)",
"AGR - Air Quality; PM10 (TX)", "AGR - Air Quality; PM2_5 (TX)",
"AGR - Index for alfalfa hay, irrigated (NV)", "AGR - Nitrate Leaching Potential, Nonirrigated (MA)",
"AGR - Orchard Groups (TX)", "AGR - Rangeland Grass/Herbaceous Productivity Index (TX)",
"AGR - Rutting Hazard > 10,000 Pounds per Wheel (TX)", "AGR - Water Erosion Potential (TX)",
"AGR - Wine Grape Site Suitability (WA)", "AGR-Natural Fertility (ND)",
"AGR-Subsurface Salinity (ND)", "AGR-Water Erosion (ND)", "AWM - Filter Group (OH)",
"AWM - Irrigation Disposal of Wastewater", "AWM - Land Application of Dry and Slurry Manure (TX)",
"AWM - Land Application of Municipal Biosolids, winter (OR)",
"AWM - Overland Flow Process Treatment of Wastewater", "AWM - Rapid Infiltration Disposal of Wastewater",
"AWM - Vegetated Treatment Area (PIA)", "AWM - Waste Field Storage Area (VT)",
"BLM - Mechanical Treatment, Shredder", "BLM - Medusahead Invasion Susceptibility",
"BLM - Soil Compaction Resistance", "Capping Fill Gravity Septic System (DE)",
"CLASS RULE - Depth to any bedrock kind (5 classes) (NPS)", "CPI - Alfalfa Hay, IRR - Eastern Idaho Plateaus (ID)",
"CPI - Alfalfa Hay, IRR - Klamath Valley and Basins (OR)", "CPI - Alfalfa Hay, IRR - Snake River Plains (ID)",
"CPI - Alfalfa Hay, NIRR- Eastern Idaho Plateaus (ID)", "CPI - Grass Hay, NIRR - Palouse, Northern Rocky Mtns. (WA)",
"CPI - Small Grains Productivity Index (AK)", "DHS - Catastrophic Event, Large Animal Mortality, Incinerate",
"DHS - Emergency Land Disposal of Milk", "DHS - Site for Composting Facility - Subsurface",
"DHS - Suitability for Clay Liner Material", "ENG - Cohesive Soil Liner (MN)",
"ENG - Construction Materials - Sand Source (MN)", "ENG - Construction Materials; Gravel Source (CT)",
"ENG - Construction Materials; Gravel Source (NY)", "ENG - Construction Materials; Reclamation (DE)",
"ENG - Construction Materials; Roadfill", "ENG - Construction Materials; Roadfill (AK)",
"ENG - Construction Materials; Sand Source (NY)", "ENG - Construction Materials; Sand Source (VT)",
"ENG - Construction Materials; Topsoil (AK)", "ENG - Construction Materials; Topsoil (DE)",
"ENG - Construction Materials; Topsoil (MI)", "ENG - Construction Materials; Topsoil (OR)",
"ENG - Disposal Field Gravity (DE)", "ENG - Dwellings With Basements (OH)",
"ENG - Large Animal Disposal, Trench (CT)", "ENG - Lawn, Landscape, Golf Fairway (MI)",
"ENG - Lawn, Landscape, Golf Fairway (VT)", "ENG - Sanitary Landfill (Area) (OH)",
"ENG - Sanitary Landfill (Trench)", "ENG - Sanitary Landfill (Trench) (AK)",
"ENG - Septage Application - Surface (MN)", "ENG - Septic Tank Absorption Fields - At-Grade (MN)",
"ENG - Septic Tank Absorption Fields - Mound (MN)", "ENG - Septic Tank Leaching Chamber (TX)",
"ENG - Septic Tank, Subsurface Drip Irrigation (TX)", "ENG - Shallow Excavations",
"ENG - Small Commercial Buildings", "ENG - Soil Potential of Road Salt Applications (CT)",
"ENG - Source of Caliche (TX)", "ENG - Stormwater Management / Ponds (NY)",
"Farm and Garden Composting Facility - Surface", "FOR - Biomass Harvest (MA)",
"FOR - Black Walnut Suitability Index (KS)", "FOR - Displacement Potential (WA)",
"FOR - General Harvest Season (ME)", "FOR - Harvest Equipment Operability",
"FOR - Mechanical Site Preparation (Deep) (MD)", "FOR - Mechanical Site Preparation (Surface)",
"FOR - Mechanical Site Preparation; Deep (CT)", "FOR - Potential Erosion Hazard (Road/Trail)",
"FOR - Potential Fire Damage Hazard", "FOR - Potential Seedling Mortality",
"FOR - Potential Seedling Mortality (MI)", "FOR - Potential Windthrow Hazard (ME)",
"FOR - Potential Windthrow Hazard (MI)", "FOR - Road Suitability (Natural Surface) (ID)",
"FOR - Rutting Hazard by Month", "FOR - Windthrow Hazard (WA)",
"Fragile Soil Index", "GRL - Juniper Encroachment Potential (NM)",
"GRL - NV range seeding (Wind C = 20) (NV)", "GRL - Pasture and Hayland SG (OH)",
"GRL - Rangeland Prescribed Burning (TX)", "GRL-FSG-NP-W (MT)",
"Ground-based Solar Arrays, Ballast Anchor Systems", "Inland Wetlands (CT)",
"IRR-restrictive features for irrigation (OH)", "MIL - Excavations for Vehicle Fighting Position (DOD)",
"MIL - Trafficability Veh. Type 1 1-pass wet season (DOD)", "MIL - Trafficability Veh. Type 2 dry season (DOD)",
"MIL - Trafficability Veh. Type 3 50-passes wet season (DOD)",
"MIL - Trafficability Veh. Type 6 1-pass wet season (DOD)", "MIL - Trafficability Veh. Type 6 dry season (DOD)",
"Muscadine Wine Grape Site Desirability (Very Long)", "NCCPI - NCCPI Cotton Submodel (II)",
"Permafrost Sensitivity (AK)", "Pressure Dose Capping Fill Septic System (DE)",
"REC - Camp Areas (CT)", "REC - Off-Road Motorcycle Trails (CT)",
"SAS - CMECS Substrate Class", "SAS - CMECS Substrate Subclass/Group",
"SAS - Eelgrass Restoration Suitability", "SAS - Land Utilization of Dredged Materials",
"SAS - Northern Quahog (Hard Clam) Habitat Suitability", "Septic System At Grade Shallow Field (alternative) (WV)",
"Septic System At-Grade Bed (Alternate) (PA)", "Septic System CO-OP RFS III w/Drip Irrigation (PA)",
"Septic System Drip Irrigation (alternative) (WV)", "Septic System Free Access Sand Filterw/Spray Irrigation (PA)",
"Septic System Peat Based Option1 w/At-Grade Bed (Alt.) (PA)",
"Septic System Spray Irrigation (PA)", "Septic System Steep Slope Sand Mound (Alternate) (PA)",
"Shallow Infiltration Systems", "SOH - Soil Surface Sealing",
"URB - Rural Residential Development on Concrete Slab (TX)",
"URB/REC - Camp Areas (GA)", "URB/REC - Camp Areas (MI)", "URB/REC - Golf Fairways (OH)",
"URB/REC - Off-Road Motorcycle Trails", "URB/REC - Paths and Trails (MI)",
"URB/REC - Playgrounds (OH)", "Vinifera Wine Grape Site Desirability (Long to Medium)",
"WAQ - Soil Pesticide Absorbed Runoff Potential (TX)", "WLF - Chufa for Turkey Forage (LA)",
"WLF - Food Plots for Upland Wildlife < 2 Acres (TX)", "WLF - Freshwater Wetland Plants (TX)",
"WLF - Irrigated Saline Water Wetland Plants (TX)", "WLF - Riparian Herbaceous Plants (TX)",
"WLF - Riparian Shrubs, Vines, & Trees (TX)", "WLF - Saline Water Wetland Plants (TX)",
"WLF - Upland Mixed Deciduous & Coniferous Trees (TX)", "WMS - Constructing Grassed Waterways (TX)",
"WMS - Constructing Terraces and Diversions (OH)", "WMS - Embankments, Dikes, and Levees (VT)",
"WMS - Irrigation, Sprinkler (close spaced outlet drops)", "WMS - Irrigation, Surface (graded)",
"WMS - Subsurface Drains - Installation (VT)", "WMS - Subsurface Drains < 3 Feet Deep (TX)",
"WMS - Subsurface Drains > 3 Feet Deep (TX)", "WMS - Subsurface Water Management, System Performance",
"WMS - Surface Drains (TX)", "WMS - Surface Irrigation Intake Family (TX)",
"SOH - Concentration of Salts- Soil Surface", "SOH - Soil Susceptibility to Compaction",
"Soil Habitat for Saprophyte Stage of Coccidioides", "Soil Vegetative Groups (CA)",
"Unlined Retention Systems", "URB - Commercial Metal Bldg; w/Reinforced Concrete Slab (TX)",
"URB - Commercial Metal Buildings w/o Concrete Slab (TX)", "URB - Urban Residential Development on Concrete Slab (TX)",
"URB/REC - Picnic Areas (GA)", "URB/REC - Picnic Areas (MI)",
"URB/REC - Picnic Areas (OH)", "Vinifera Wine Grape Site Desirability (Short)",
"WAQ - Soil Pesticide Solution Runoff Potential (TX)", "WLF - Burrowing Mammals & Reptiles (TX)",
"WLF - Desert Tortoise (CA)", "WLF - Domestic Grasses & Legumes for Food and Cover (TX)",
"WLF - Irrigated Grain & Seed Crops for Food & Cover (TX)", "WMS - Excavated Ponds (Aquifer-fed)",
"WMS - Excavated Ponds (Aquifer-fed) (VT)", "WMS - Irrigation, General",
"WMS - Irrigation, Micro (above ground)", "WMS - Irrigation, Micro (above ground) (VT)",
"WMS - Irrigation, Micro (subsurface drip)", "WMS - Irrigation, Sprinkler (general) (VT)",
"WMS - Pond Reservoir Area", "WMS - Pond Reservoir Area (OH)",
"WMS - Subsurface Water Management, System Installation", "AGR - Filter Strips (TX)",
"AGR - Mulch Till (TX)", "AGR - Nitrate Leaching Potential, Nonirrigated (MT)",
"AGR - No Till (VT)", "AGR - Oats Yield (MT)", "AGR - Pesticide Loss Potential-Leaching",
"AGR - Pesticide Loss Potential-Leaching (NE)", "AGR - Rutting Hazard =< 10,000 Pounds per Wheel (TX)",
"AGR-Available Water Capacity (ND)", "AGR-Physical Limitations (ND)",
"AGR-Sodicity (ND)", "AGR-Surface Crusting (ND)", "AGR-Wind Erosion (ND)",
"AWM - Irrigation Disposal of Wastewater (DE)", "AWM - Land App of Municipal Sewage Sludge (DE)",
"AWM - Land App of Municipal Sewage Sludge (MD)", "AWM - Land Application of Milk (CT)",
"AWM - Land Application of Municipal Biosolids, spring (OR)",
"AWM - Land Application of Municipal Sewage Sludge", "AWM - Land Application of Municipal Sewage Sludge (OH)",
"AWM - Land Application of Municipal Sewage Sludge (VT)", "AWM - Large Animal Disposal, Pit (MN)",
"AWM - Manure and Food Processing Waste", "AWM - Manure and Food Processing Waste (VT)",
"AWM - Rapid Infil Disposal of Wastewater (MD)", "AWM - Rapid Infiltration Disposal of Wastewater (VT)",
"AWM - Slow Rate Process Treatment of Wastewater (VT)", "BLM - Chaining Suitability",
"BLM - Fugitive Dust Resistance", "BLM - Soil Restoration Potential",
"BLM - Yellow Star-thistle Invasion Susceptibility", "CLASS RULE - Depth to non-lithic bedrock (5 classes) (NPS)",
"CLR-cropland limitation for corn and soybeans (IN)", "Commodity Crop Productivity Index (Corn) (WI)",
"CPI - Grass Hay, NIRR - Klamath Valleys and Basins (OR)", "CPI - Potatoes, IRR - Eastern Idaho Plateaus (ID)",
"CPI - Small Grains, NIRR - Palouse Prairies (ID)", "DHS - Emergency Animal Mortality Disposal by Shallow Burial",
"DHS - Rubble and Debris Disposal, Large-Scale Event", "ENG - Aquifer Assessment - 7081 (MN)",
"ENG - Construction Materials - Gravel Source (MN)", "ENG - Construction Materials; Gravel Source (MI)",
"ENG - Construction Materials; Gravel Source (OR)", "ENG - Construction Materials; Reclamation",
"ENG - Construction Materials; Reclamation (OH)", "ENG - Construction Materials; Sand Source",
"ENG - Construction Materials; Sand Source (AK)", "ENG - Construction Materials; Sand Source (ID)",
"ENG - Construction Materials; Sand Source (IN)", "ENG - Construction Materials; Sand Source (OH)",
"ENG - Construction Materials; Topsoil", "ENG - Construction Materials; Topsoil (WA)",
"ENG - Local Roads and Streets", "ENG - New Ohio Septic Rating (OH)",
"ENG - Sanitary Landfill (Trench) (OH)", "ENG - Septic Tank Absorption Fields (AK)",
"ENG - Septic Tank Absorption Fields (DE)", "ENG - Septic Tank Absorption Fields (NY)",
"ENG - Sewage Lagoons (OH)", "ENG - Shallow Excavations (AK)",
"ENG - Shallow Excavations (MI)", "ENG - Unpaved Local Roads and Streets",
"FOR - Black Walnut Suitability Index (MO)", "FOR - Construction Limitations - Haul Roads/Log Landing (OH)",
"FOR - Construction Limitations For Haul Roads (MI)", "FOR - Hand Planting Suitability (ME)",
"FOR - Harvest Equipment Operability (MD)", "FOR - Harvest Equipment Operability (OH)",
"FOR - Harvest Equipment Operability (VT)", "FOR - Mechanical Planting Suitability",
"FOR - Mechanical Planting Suitability (ME)", "FOR - Mechanical Planting Suitability, MO13 (DE)",
"FOR - Potential Erosion Hazard (Off-Road/Off-Trail)", "FOR - Potential Erosion Hazard (Road/Trail) (PIA)",
"FOR - Potential Seedling Mortality (VT)", "FOR - Potential Windthrow Hazard (NY)",
"FOR - Potential Windthrow Hazard (VT)", "FOR - Puddling Potential (WA)",
"FOR - Road Suitability (Natural Surface)", "FOR - Road Suitability (Natural Surface) (OH)",
"FOR - Road Suitability (Natural Surface) (OR)", "FOR - Rutting Hazard by Season",
"FOR - Shortleaf pine littleleaf disease susceptibility", "FOR - Soil Compactibility Risk",
"FOR - Soil Rutting Hazard (ME)", "FOR - Windthrow Hazard", "FOR-Construction Limitations for Haul Roads/Log Landings(ME)",
"FOTG - Indiana Slippage Potential (IN)", "Gravity Full Depth Septic System (DE)",
"GRL - Fencing, Post Depth =<36 inches", "GRL - NV range seeding (Wind C = 50) (NV)",
"GRL - Ranch Access Roads (TX)", "GRL - Rangeland Roller Chopping (TX)",
"Ground Penetrating Radar Penetration", "Ground-based Solar Arrays, Soil-penetrating Anchor Systems",
"Hybrid Wine Grape Varieties Site Desirability (Short)", "ISDH Septic Tank Interpretation (IN)",
"Land Application of Municipal Sewage Sludge (PA)", "MIL - Helicopter Landing Zones (DOD)",
"MIL - Trafficability Veh. Type 2 1-pass wet season (DOD)", "MIL - Trafficability Veh. Type 5 50-passes wet season (DOD)",
"MIL - Trafficability Veh. Type 5 dry season (DOD)", "MIL - Trafficability Veh. Type 7 1-pass wet season (DOD)",
"NCCPI - National Commodity Crop Productivity Index (Ver 3.0)",
"REC - Camp and Picnic Areas (AK)", "REC - Picnic Areas (CT)",
"REC - Playgrounds (CT)", "Salinity Risk Index, Discharge Model (ND)",
"SAS - CMECS Substrate Subclass", "Septic System Drip Irrigation (Alternate) (PA)",
"Septic System Free Access Sand Filter w/Drip Irrigation (PA)",
"Septic System In Ground Bed (conventional) (PA)", "Septic System Peat Based Option1 (UV & At-Grade Bed)Alt (PA)",
"Septic System Peat Sys Opt3 w/Subsurface Sand Filter (PA)",
"Septic System Sand Mound Bed or Trench (PA)", "Septic System Shallow Placement Pressure Dosed (Alt.) (PA)",
"SOH - Suitability for Aerobic Soil Organisms", "SOH - Agricultural Organic Soil Subsidence",
"SOH - Organic Matter Depletion", "SOIL HEALTH ASSESSMENT (NJ)",
"Surface Runoff Class (CA)", "URB - Commercial Brick Bldg; w/Reinforced Concrete Slab (TX)",
"URB - Reinforced Concrete Slab (TX)", "URB/REC - Camp Areas",
"URB/REC - Camp Areas (HI)", "URB/REC - Camp Areas (OH)", "URB/REC - Off-Road Motorcycle Trails (OH)",
"URB/REC - Paths and Trails (OH)", "URB/REC - Picnic Areas",
"URB/REC - Playgrounds", "URB/REC - Playgrounds (GA)", "Vinifera Wine Grape Site Desirability (Short to Medium)",
"WLF - Irr. Domestic Grasses & Legumes for Food & Cover (TX)",
"WLF - Irrigated Freshwater Wetland Plants (TX)", "WLF - Upland Coniferous Trees (TX)",
"WLF - Upland Deciduous Trees (TX)", "WLF - Upland Desertic Shrubs & Trees (TX)",
"WLF - Upland Native Herbaceous Plants (TX)", "WLF - Upland Shrubs & Vines (TX)",
"WLF-Soil Suitability - Karner Blue Butterfly (WI)", "WMS - Drainage (IL)",
"WMS - Drainage - (MI)", "WMS - Embankments, Dikes, and Levees",
"WMS - Embankments, Dikes, and Levees (OH)", "WMS - Grassed Waterways - (MI)",
"WMS - Irrigation, Sprinkler (general)", "WMS - Pond Reservoir Area (GA)",
"WMS-Subsurface Water Management, Installation (ND)", "WMS-Subsurface Water Management, Outflow Quality (ND)"
)
}
.interpretationAggMethod <- function(method) {
labels <- c("Dominant Component",
"Weighted Average",
"Dominant Condition",
"None")
method <- match.arg(toupper(method), toupper(labels))
suffixes <- c('_dom_comp_',
'_wtd_avg',
'_dom_cond',
'')
modifier <- suffixes[match(method, toupper(labels))]
return(list(method = method,
modifier = modifier))
}
.constructInterpQuery <- function(method, interp, areasymbols = NULL, mukeys = NULL) {
stopifnot(!is.null(areasymbols) | !is.null(mukeys))
if (!is.null(areasymbols))
areasymbols <- soilDB::format_SQL_in_statement(areasymbols)
if (!is.null(mukeys))
mukeys <- soilDB::format_SQL_in_statement(mukeys)
where_clause <- switch(as.character(is.null(areasymbols)),
"TRUE" = sprintf("mu.mukey IN %s", mukeys),
"FALSE" = sprintf("l.areasymbol IN %s", areasymbols))
agg_method <- .interpretationAggMethod(method)
areasymbols <- soilDB::format_SQL_in_statement(areasymbols)
switch(agg_method$method,
"DOMINANT COMPONENT" = .interpretation_aggregation(interp, where_clause, dominant = TRUE),
"DOMINANT CONDITION" = .interpretation_by_condition(interp, where_clause, dominant = TRUE),
"WEIGHTED AVERAGE" = .interpretation_weighted_average(interp, where_clause),
"NONE" = .interpretation_aggregation(interp, where_clause)
)
}
.cleanRuleColumnName <- function(x) gsub("[^A-Za-z0-9]", "", x)
.interpretation_by_condition <- function(interp, where_clause, dominant = TRUE) {
sprintf("SELECT areasymbol, musym, muname, mu.mukey/1 AS mukey,
%s
FROM legend AS l
INNER JOIN mapunit AS mu ON mu.lkey = l.lkey AND %s
INNER JOIN component AS c ON c.mukey = mu.mukey %s
ORDER BY areasymbol, musym, muname, mu.mukey",
paste0(sapply(interp, function(x) sprintf(" (SELECT TOP 1 ROUND (AVG(interphr) OVER (PARTITION BY interphrc), 2)
FROM mapunit
INNER JOIN component ON component.mukey = mapunit.mukey
INNER JOIN cointerp ON component.cokey = cointerp.cokey AND mapunit.mukey = mu.mukey AND ruledepth = 0 AND mrulename LIKE '%s' GROUP BY interphrc, interphr
ORDER BY SUM (comppct_r) DESC) AS [rating_%s],
(SELECT TOP 1 interphrc
FROM mapunit
INNER JOIN component ON component.mukey = mapunit.mukey
INNER JOIN cointerp ON component.cokey = cointerp.cokey AND mapunit.mukey = mu.mukey AND ruledepth = 0 AND mrulename LIKE '%s'
GROUP BY interphrc, comppct_r ORDER BY SUM(comppct_r) OVER (PARTITION BY interphrc) DESC) AS [class_%s],
(SELECT DISTINCT SUBSTRING((SELECT('; ' + interphrc)
FROM mapunit
INNER JOIN component ON component.mukey = mapunit.mukey AND compkind != 'miscellaneous area' AND component.cokey = c.cokey
INNER JOIN cointerp ON component.cokey = cointerp.cokey AND mapunit.mukey = mu.mukey
AND ruledepth != 0 AND interphrc NOT LIKE 'Not%%' AND mrulename LIKE '%s' GROUP BY interphrc, interphr
ORDER BY interphr DESC, interphrc
FOR XML PATH('') ), 3, 1000)) AS [reason_%s]",
x, .cleanRuleColumnName(x),
x, .cleanRuleColumnName(x),
x, .cleanRuleColumnName(x))),
collapse = ", "), where_clause,
ifelse(dominant, "AND c.cokey =
(SELECT TOP 1 c1.cokey FROM component AS c1
INNER JOIN mapunit ON c.mukey = mapunit.mukey AND c1.mukey = mu.mukey ORDER BY c1.comppct_r DESC, c1.cokey)", ""))
}
.interpretation_aggregation <- function(interp, where_clause, dominant = FALSE) {
sprintf("SELECT areasymbol, musym, muname, mu.mukey/1 AS mukey, c.cokey AS cokey, compname, comppct_r, majcompflag,
%s
FROM legend AS l
INNER JOIN mapunit AS mu ON mu.lkey = l.lkey AND %s
INNER JOIN component AS c ON c.mukey = mu.mukey %s",
paste0(sapply(interp, function(x) sprintf("(SELECT interphr FROM component INNER JOIN cointerp ON component.cokey = cointerp.cokey AND component.cokey = c.cokey AND ruledepth = 0 AND mrulename LIKE '%s') as [rating_%s],
(SELECT interphrc FROM component INNER JOIN cointerp ON component.cokey = cointerp.cokey AND component.cokey = c.cokey AND ruledepth = 0 AND mrulename LIKE '%s') as [class_%s],
(SELECT DISTINCT SUBSTRING( ( SELECT ( '; ' + interphrc)
FROM mapunit
INNER JOIN component ON component.mukey=mu.mukey AND compkind != 'miscellaneous area' AND component.cokey=c.cokey
INNER JOIN cointerp ON component.cokey = cointerp.cokey AND mapunit.mukey = mu.mukey
AND ruledepth != 0 AND interphrc NOT LIKE 'Not%%' AND mrulename LIKE '%s' GROUP BY interphrc, interphr
ORDER BY interphr DESC, interphrc
FOR XML PATH('') ), 3, 1000)) as [reason_%s]",
x, .cleanRuleColumnName(x),
x, .cleanRuleColumnName(x),
x, .cleanRuleColumnName(x))),
collapse = ", "), where_clause,
ifelse(dominant, "AND c.cokey = (SELECT TOP 1 c1.cokey FROM component AS c1
INNER JOIN mapunit ON c.mukey = mapunit.mukey AND c1.mukey = mu.mukey
ORDER BY c1.comppct_r DESC, c1.cokey)", ""))
}
.interpretation_weighted_average <- function(interp, where_clause) {
sprintf("SELECT areasymbol, musym, muname, mu.mukey/1 AS mukey,
%s
INTO
FROM legend AS l
INNER JOIN mapunit AS mu ON mu.lkey = l.lkey AND %s
INNER JOIN component AS c ON c.mukey = mu.mukey
GROUP BY areasymbol, musym, muname, mu.mukey
SELECT areasymbol, musym, muname, mukey,
%s,
%s,
%s
FROM
DROP TABLE
paste0(sapply(interp, function(x) sprintf("(SELECT TOP 1 CASE WHEN ruledesign = 1 THEN 'limitation'
WHEN ruledesign = 2 THEN 'suitability' END
FROM mapunit
INNER JOIN component ON component.mukey = mapunit.mukey
INNER JOIN cointerp ON component.cokey = cointerp.cokey AND mapunit.mukey = mu.mukey AND ruledepth = 0 AND mrulename LIKE '%s'
GROUP BY mapunit.mukey, ruledesign) AS [design_%s],
ROUND ((SELECT SUM (interphr * comppct_r)
FROM mapunit
INNER JOIN component ON component.mukey = mapunit.mukey
INNER JOIN cointerp ON component.cokey = cointerp.cokey AND mapunit.mukey = mu.mukey AND ruledepth = 0 AND mrulename LIKE '%s'
GROUP BY mapunit.mukey),2) AS [rating_%s],
ROUND ((SELECT SUM (comppct_r)
FROM mapunit
INNER JOIN component ON component.mukey = mapunit.mukey
INNER JOIN cointerp ON component.cokey = cointerp.cokey AND mapunit.mukey = mu.mukey AND ruledepth = 0 AND mrulename LIKE '%s'
AND (interphr) IS NOT NULL GROUP BY mapunit.mukey),2) AS [sum_com_%s],
(SELECT DISTINCT SUBSTRING((SELECT ( '; ' + interphrc)
FROM mapunit
INNER JOIN component ON component.mukey = mapunit.mukey AND compkind != 'miscellaneous area'
INNER JOIN cointerp ON component.cokey = cointerp.cokey AND mapunit.mukey = mu.mukey
AND ruledepth != 0 AND interphrc NOT LIKE 'Not%%' AND mrulename LIKE '%s' GROUP BY interphrc
ORDER BY interphrc
FOR XML PATH('') ), 3, 1000)) AS [reason_%s]",
x, .cleanRuleColumnName(x),
x, .cleanRuleColumnName(x),
x, .cleanRuleColumnName(x),
x, .cleanRuleColumnName(x))), collapse=", "),
where_clause,
paste0(sapply(interp,
function(x) sprintf("ISNULL(ROUND(([rating_%s] / [sum_com_%s]),2), 99) AS [rating_%s]",
.cleanRuleColumnName(x), .cleanRuleColumnName(x), .cleanRuleColumnName(x))),
collapse = ", "),
paste0(sapply(interp,
function(x) sprintf(gsub("design", paste0("[design_", .cleanRuleColumnName(x),"]"),
gsub("sum_com", paste0("[sum_com_", .cleanRuleColumnName(x), "]"),
gsub("rating", paste0("[rating_", .cleanRuleColumnName(x), "]"),
"CASE WHEN rating IS NULL THEN 'Not Rated'
WHEN design = 'suitability' AND ROUND((rating/sum_com),2) <= 0 THEN 'Not suited'
WHEN design = 'suitability' AND ROUND((rating/sum_com),2) > 0.001 and ROUND((rating/sum_com),2) <=0.333 THEN 'Poorly suited'
WHEN design = 'suitability' AND ROUND((rating/sum_com),2) > 0.334 and ROUND((rating/sum_com),2) <=0.666 THEN 'Moderately suited'
WHEN design = 'suitability' AND ROUND((rating/sum_com),2) > 0.667 and ROUND((rating/sum_com),2) <=0.999 THEN 'Moderately well suited'
WHEN design = 'suitability' AND ROUND((rating/sum_com),2) = 1 THEN 'Well suited'
WHEN design = 'limitation' AND ROUND((rating/sum_com),2) <= 0 THEN 'Not limited'
WHEN design = 'limitation' AND ROUND((rating/sum_com),2) > 0.001 and ROUND((rating/sum_com),2) <=0.333 THEN 'Slightly limited'
WHEN design = 'limitation' AND ROUND((rating/sum_com),2) > 0.334 and ROUND((rating/sum_com),2) <=0.666 THEN 'Somewhat limited'
WHEN design = 'limitation' AND ROUND((rating/sum_com),2) > 0.667 and ROUND((rating/sum_com),2) <=0.999 THEN 'Moderately limited'
WHEN design = 'limitation' AND ROUND((rating/sum_com),2) = 1 THEN 'Very limited' END AS [class_%s]"))),
.cleanRuleColumnName(x))),
collapse = ", "), paste0(sapply(interp, function(x) sprintf("[reason_%s]", .cleanRuleColumnName(x))), collapse = ", "))
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.