code
stringlengths 1
13.8M
|
---|
cor(Margin ~ Approval, data = ElectionMargin)
cor(AvgMercury ~ pH, data = FloridaLakes)
cor(AvgMercury ~ Alkalinity, data = FloridaLakes)
cor(Alkalinity ~ pH, data = FloridaLakes)
cor(AvgMercury ~ ThreeYrStdMercury, data = FloridaLakes)
|
npStochinUnpaired <- function(x1, x2, d = 0,
alternative = "two.sided",
iterations = 5000, alpha = 0.05,
epsilon = 1 * 10^(-6),
ignoreNA = FALSE,
max.iterations = 100000) {
method <- "Nonparametric Test for Stochastic Inequality"
names.x1 <- deparse(substitute(x1))
names.x2 <- deparse(substitute(x2))
DNAME <- paste(names.x1, "and",
names.x2)
if(is.data.frame(x1)) {
if(dim(x1)[2] == 1) {
x1 <- x1[, 1]
}
}
if(is.data.frame(x2)) {
if(dim(x2)[2] == 1) {
x2 <- x2[, 1]
}
}
x1 <- as.vector(x1)
x2 <- as.vector(x2)
if(ignoreNA == TRUE) {
x1 <- x1[!is.na(x1)]
x2 <- x2[!is.na(x2)]
} else if(any(is.na(c(x1, x2))) == TRUE) {
stop("The data contains NA's!")
}
if(alpha >= 1 | alpha <= 0)
stop("Please supply a sensible value for alpha.")
null.value <- d
names(null.value) <- "relation P(x1 > x2) - P(x1 < x2)"
if(alternative == "less") {
names.x1.new <- names.x2
names.x2 <- names.x1
names.x1 <- names.x1.new
x1.new <- x2
x2 <- x1
x1 <- x1.new
d <- -d
}
N1 <- length(x1)
N2 <- length(x2)
min.length <- min(N1, N2)
p <- (1 + d)/2
if(alternative == "less") {
count.x1 <- sum(tapply(x1, 1:N1, function(x.i) sum(x.i < x2)))
count.x2 <- sum(tapply(x1, 1:N1, function(x.i) sum(x.i > x2)))
sample.est <- (count.x1 - count.x2)/(N1 * N2)
stochin.parameter <- paste("P(", names.x1, " < ", names.x2, ") - P(",
names.x1, " > ", names.x2, ")",
sep = "")
} else {
count.x1 <- sum(tapply(x1, 1:N1, function(x.i) sum(x.i > x2)))
count.x2 <- sum(tapply(x1, 1:N1, function(x.i) sum(x.i < x2)))
sample.est <- (count.x1 - count.x2)/(N1 * N2)
stochin.parameter <- paste("P(", names.x1, " > ", names.x2, ") - P(",
names.x1, " < ", names.x2, ")",
sep = "")
}
names(sample.est) <- stochin.parameter
null.hypothesis <- paste("SI",
ifelse(alternative == "greater", " <= ",
ifelse(alternative == "less", " >= ",
" = ")),
null.value, sep = "")
alt.hypothesis <- paste("SI",
ifelse(alternative == "greater", " > ",
ifelse(alternative == "less", " < ", " != ")),
null.value, sep = "")
if(alternative == "two.sided") {
resultsGreater <- doTwoVariablesTest(alpha = alpha / 2,
epsilon = epsilon,
iterations = iterations,
max.iterations = max.iterations,
testFunction = sampleBinomTest,
x1 = x2, x2 = x1,
p = p, n = min.length,
diff = d)
d <- -d
p <- (1 + d)/2
resultsLess <- doTwoVariablesTest(alpha = alpha / 2,
epsilon = epsilon,
theta = resultsGreater[["theta"]],
typeII = resultsGreater[["typeIIerror"]],
d.alternative = resultsGreater[["d.alternative"]],
iterations = iterations,
max.iterations = max.iterations,
testFunction = sampleBinomTest,
x1 = x1, x2 = x2,
p = p, n = min.length,
diff = d)
if(resultsGreater[["rejection"]] == TRUE) {
results <- resultsGreater
theta <- resultsGreater[["theta"]]
} else if(resultsLess[["rejection"]] == TRUE) {
results <- resultsLess
theta <- resultsLess[["theta"]]
} else {
if((sample.est < null.value) &
!is.null(resultsGreater[["theta"]])) {
results <- resultsGreater
theta <- resultsGreater[["theta"]]
} else if((sample.est > null.value) &
!is.null(resultsLess[["theta"]])) {
results <- resultsLess
theta <- resultsLess[["theta"]]
} else {
results <- resultsGreater
theta <- resultsGreater[["theta"]]
}
}
results <- mergeTwoResultSets(results, resultsGreater, resultsLess)
if(results[["rejection"]] == TRUE) {
alt.hypothesis <- paste("SI",
ifelse(resultsGreater[["rejection"]] == TRUE, " > ", " < "),
null.value, sep = "")
}
} else {
results <- doTwoVariablesTest(alpha = alpha,
epsilon = epsilon,
iterations = iterations,
max.iterations = max.iterations,
testFunction = sampleBinomTest,
x1 = x2, x2 = x1,
p = p, n = min.length, diff = d)
theta <- results[["theta"]]
if(alternative == "less" & !is.null(results[["d.alternative"]])) {
results[["d.alternative"]] <- 1 - results[["d.alternative"]]
}
}
if(!is.null(iterations) & results[["iterations.taken"]] < 1000)
warning("Low number of iterations. Results may be inaccurate.")
if(results[["iterations.taken"]] >= max.iterations)
warning(paste("The maximum number of iterations (",
format(max.iterations, scientific = FALSE),
") was reached. Rejection may be very sensible to the choice of the parameters.", sep = ""))
structure(list(method = method,
data.name = DNAME,
alternative = alternative,
stochin.parameter = stochin.parameter,
stochin.estimate = sample.est,
null.hypothesis = null.hypothesis,
alt.hypothesis = alt.hypothesis,
estimate = NULL,
probrej = results[["probrej"]],
rejection = results[["rejection"]],
mc.error = results[["mc.error"]],
alpha = alpha,
theta = theta,
thetaValue = results[["theta"]],
d.alternative = (results[["d.alternative"]] * 2 - 1),
typeIIerror = results[["typeIIerror"]],
iterations = results[["iterations.taken"]],
pseudoalpha = results[["pseudoalpha"]],
bounds = NULL,
null.value = null.value),
class = "nphtest")
}
sampleBinomTest <- function(x1, x2, pseudoalpha, dots) {
n <- dots[["n"]]
p <- dots[["p"]]
d <- dots[["diff"]]
c1 <- sample(x1, n)
c2 <- sample(x2, n)
s1 <- sum(c1 > c2)
s2 <- sum(c1 < c2)
if((s1 + s2) != n) {
if(d > 0) {
q <- runif(sum(c1 == c2))
s1 <- s1 + sum(q < (d/(1 + d)))
} else if(d < 0) {
q <- runif(sum(c1 == c2))
s1 <- s1 + sum(q < (-1) * (d/(1 - d)))
}
}
prob <- sum(dbinom(s2:(s1 + s2), (s1 + s2), p))
res <- 0
if(prob <= pseudoalpha) {
res <- 1
} else {
h2 <- (p^s2) * ((1 - p)^s1) * choose(s1 + s2, s2)
if(prob <= (pseudoalpha + h2)) {
res <- ((pseudoalpha - prob + h2)/h2)
}
}
return(res)
}
|
"kplot.kselect" <- function (object, xax = 1, yax = 2, csub = 2,
possub = c("topleft", "bottomleft",
"bottomright", "topright"),
addval=TRUE, cpoint=1, csize=1, clegend=2, ...)
{
possub<-match.arg(possub)
x<-object
if (!inherits(x, "kselect"))
stop("x should be a 'kselect' object")
if (x$nf == 1) {
hist.kselect(x)
return(invisible())
}
Xi<-x$initab
Xrecalc<-t(as.matrix(apply(Xi, 1,
function(y) y*x$lw/sum(x$lw))))%*%as.matrix(x$l1)
rx<-range(Xrecalc[,xax])
ry<-range(Xrecalc[,yax])
li.Xi<-split(as.data.frame(Xrecalc), x$initfac)
li.wei<-split(x$initwei, x$initfac)
li.wei<-lapply(li.wei, function(x) x/sum(x) )
maxsqrtw<-max(sqrt(unlist(li.wei)))
csi<-0
for (i in 1:length(li.wei))
csi[i]<-csize*max(sqrt(li.wei[[i]]))/maxsqrtw
def.par <- par(no.readonly = TRUE)
on.exit(par(def.par))
ngraph<-length(li.Xi)
if (addval) {
par(mfrow = n2mfrow(ngraph+1))
} else {
par(mfrow = n2mfrow(ngraph))
}
for (i in 1:ngraph) {
Xtmp<-li.Xi[[i]]
wgtmp<-li.wei[[i]]
if (addval) {
s.value(Xtmp, wgtmp, xax, yax,
sub=names(li.Xi)[i], cpoint=cpoint, xlim=rx,
ylim=ry, clegend=0,
csub=1.5, cgrid=1.5, csize=csi[i])
}
s.distri(Xtmp, wgtmp, xax, yax,
sub=names(li.Xi)[i], add.plot=addval,
cpoint=cpoint, xlim=rx, ylim=ry,
...)
}
if (addval) {
coo <- scatterutil.base(dfxy = Xtmp, xax = xax, yax = yax,
xlim = rx, ylim = ry, grid = FALSE,
addaxes = FALSE,
cgrid = 0, include.origin = FALSE,
origin = c(0,0),
sub = "", csub = 0,
possub = "bottomleft", pixmap = NULL,
contour = NULL, area = NULL, add.plot = FALSE)
coeff <- diff(range(coo$x))/15
br0<-pretty(unlist(li.wei), 4)
l0 <- length(br0)
br0 <- (br0[1:(l0 - 1)] + br0[2:l0])/2
sq0 <- sqrt(abs(br0))
sq0 <- csize * coeff * sq0/max(sqrt(abs(wgtmp)))
sig0 <- sign(br0)
scatterutil.legend.bw.square(pretty(unlist(li.wei), 4),
sq0, sig0, clegend=clegend)
}
}
|
with.milist <- function(data, expr=NULL, ...)
{
call <- match.call()
if (!class(data)=="milist")
stop("data must be of class 'milist'")
statistics <- as.list(seq_len(length(data)))
for (i in seq_along(statistics)) {
df_m <- data[[i]]
statistics[[i]] <- eval(expr = substitute(expr),
envir = df_m, enclos = parent.frame())
if (is.expression(statistics[[i]])) {
statistics[[i]] <- eval(expr = statistics[[i]],
envir = df_m, enclos = parent.frame())
}
}
obj <- list(call = call, statistics = statistics)
class(obj) <- c("mistats")
return(obj)
}
|
library('mfx')
set.seed(12345)
n = 1000
x = rnorm(n)
y = rnegbin(n, mu = exp(1 + 0.5 * x), theta = 0.5)
data = data.frame(y,x)
poissonmfx(formula=y~x,data=data)
|
CoefTest <- function(x, vcov. = NULL, df = NULL, ...) {
UseMethod("CoefTest")
}
CoefTest.default <- function(x, vcov. = NULL, df = NULL, ...) {
coef0 <- coef
vcov0 <- vcov
est <- coef0(x)
if(is.null(vcov.)) se <- vcov0(x) else {
if(is.function(vcov.)) se <- vcov.(x)
else se <- vcov.
}
se <- sqrt(diag(se))
if(!is.null(names(est)) && !is.null(names(se))) {
anames <- names(est)[names(est) %in% names(se)]
est <- est[anames]
se <- se[anames]
}
tval <- as.vector(est)/se
if(is.null(df)) {
df <- try(df.residual(x), silent = TRUE)
if(inherits(df, "try-error")) df <- NULL
}
if(is.null(df)) df <- 0
if(is.finite(df) && df > 0) {
pval <- 2 * pt(abs(tval), df = df, lower.tail = FALSE)
cnames <- c("Estimate", "Std. Error", "t value", "Pr(>|t|)")
mthd <- "t"
} else {
pval <- 2 * pnorm(abs(tval), lower.tail = FALSE)
cnames <- c("Estimate", "Std. Error", "z value", "Pr(>|z|)")
mthd <- "z"
}
rval <- cbind(est, se, tval, pval)
colnames(rval) <- cnames
class(rval) <- "CoefTest"
attr(rval, "method") <- paste(mthd, "test of coefficients")
return(rval)
}
CoefTest.glm <- function(x, vcov. = NULL, df = Inf, ...)
CoefTest.default(x, vcov. = vcov., df = df, ...)
CoefTest.mlm <- function(x, vcov. = NULL, df = NULL, ...) {
v <- if(is.null(vcov.)) vcov(x) else if(is.function(vcov.)) vcov.(x) else vcov.
x$coefficients <- structure(as.vector(x$coefficients), .Names = colnames(vcov(x)))
CoefTest.default(x, vcov. = v, df = df, ...)
}
CoefTest.survreg <- function(x, vcov. = NULL, df = Inf, ...) {
if(is.null(vcov.)) v <- vcov(x) else {
if(is.function(vcov.)) v <- vcov.(x)
else v <- vcov.
}
if(length(x$coefficients) < NROW(x$var)) {
x$coefficients <- c(x$coefficients, "Log(scale)" = log(x$scale))
}
CoefTest.default(x, vcov. = v, df = df, ...)
}
CoefTest.multinom <- function(x, vcov. = NULL, df = NULL, ...)
{
est <- coef(x)
if(!is.null(dim(est))) {
est <- structure(as.vector(t(est)),
names = as.vector(t(outer(rownames(est), colnames(est), paste, sep = ":"))))
}
if(is.null(vcov.)) vc <- vcov(x) else {
if(is.function(vcov.)) vc <- vcov.(x)
else vc <- vcov.
}
se <- sqrt(diag(vc))
tval <- as.vector(est)/se
if(is.null(df)) df <- Inf
if(is.finite(df) && df > 0) {
pval <- 2 * pt(abs(tval), df = df, lower.tail = FALSE)
cnames <- c("Estimate", "Std. Error", "t value", "Pr(>|t|)")
mthd <- "t"
} else {
pval <- 2 * pnorm(abs(tval), lower.tail = FALSE)
cnames <- c("Estimate", "Std. Error", "z value", "Pr(>|z|)")
mthd <- "z"
}
rval <- cbind(est, se, tval, pval)
colnames(rval) <- cnames
class(rval) <- "coeftest"
attr(rval, "method") <- paste(mthd, "test of coefficients")
return(rval)
}
CoefTest.polr <- function(x, vcov. = NULL, df = NULL, ...)
{
est <- c(x$coefficients, x$zeta)
if(is.null(vcov.)) vc <- vcov(x) else {
if(is.function(vcov.)) vc <- vcov.(x)
else vc <- vcov.
}
se <- sqrt(diag(vc))
tval <- as.vector(est)/se
if(is.null(df)) df <- Inf
if(is.finite(df) && df > 0) {
pval <- 2 * pt(abs(tval), df = df, lower.tail = FALSE)
cnames <- c("Estimate", "Std. Error", "t value", "Pr(>|t|)")
mthd <- "t"
} else {
pval <- 2 * pnorm(abs(tval), lower.tail = FALSE)
cnames <- c("Estimate", "Std. Error", "z value", "Pr(>|z|)")
mthd <- "z"
}
rval <- cbind(est, se, tval, pval)
colnames(rval) <- cnames
class(rval) <- "coeftest"
attr(rval, "method") <- paste(mthd, "test of coefficients")
return(rval)
}
lrtest.fitdistr <- function(object, ..., name = NULL)
{
if(is.null(name)) name <- function(x) if(is.null(names(x$estimate))) {
paste(round(x$estimate, digits = max(getOption("digits") - 3, 2)), collapse = ", ")
} else {
paste(names(x$estimate), "=", round(x$estimate, digits = max(getOption("digits") - 3, 2)), collapse = ", ")
}
lmtest::lrtest.default(object, ..., name = name)
}
print.CoefTest <- function(x, ...) {
mthd <- attr(x, "method")
if(is.null(mthd)) mthd <- "Test of coefficients"
cat(paste("\n", mthd,":\n\n", sep = ""))
printCoefmat(x, ...)
cat("\n")
invisible(x)
}
|
updateBetaLambda = function(Y,Z,Gamma,iV,iSigma,Eta,Psi,Delta,rho, iQ, X,Tr,Pi,dfPi,C,rL){
ny = nrow(Z)
ns = ncol(Z)
nc = nrow(Gamma)
nt = ncol(Tr)
nr = ncol(Pi)
S = Z
Lambda = vector("list", nr)
if(nr > 0){
EtaFull = vector("list", nr)
nf = rep(NA,nr)
ncr = rep(NA,nr)
for(r in seq_len(nr)){
if(rL[[r]]$xDim == 0){
EtaFull[[r]] = Eta[[r]][Pi[,r],]
} else{
EtaFull[[r]] = vector("list", rL[[r]]$xDim)
for(k in 1:rL[[r]]$xDim)
EtaFull[[r]][[k]] = Eta[[r]][Pi[,r],] * rL[[r]]$x[as.character(dfPi[,r]),k]
}
nf[r] = ncol(Eta[[r]])
ncr[r] = max(rL[[r]]$xDim, 1)
}
nfSum = sum(nf*ncr)
EtaSt = matrix(unlist(EtaFull),ny,nfSum)
switch(class(X)[1L],
matrix = {
XEta = cbind(X,EtaSt)
},
list = {
XEta = lapply(X, cbind, EtaSt)
}
)
PsiT = vector("list",nr)
for(r in 1:nr){
if(rL[[r]]$xDim == 0){
PsiT[[r]] = t(Psi[[r]])
} else{
PsiT[[r]] = aperm(Psi[[r]], c(2,1,3))
}
}
psiSt = matrix(unlist(PsiT),nfSum,ns,byrow=TRUE)
Tau = lapply(Delta, function(a) apply(a,2,cumprod))
tauSt = matrix(unlist(Tau),nfSum,1)
priorLambda = psiSt*matrix(tauSt,nfSum,ns)
} else{
nf = c()
ncr = c()
nfSum = 0
XEta = X
priorLambda = matrix(numeric(0),0,ns)
}
Mu = rbind(tcrossprod(Gamma,Tr), matrix(0,nfSum,ns))
switch(class(X)[1L],
matrix = {
XEtaTXEta = crossprod(XEta)
isXTS = crossprod(XEta,S) * matrix(iSigma,nc+nfSum,ns,byrow=TRUE)
},
list = {
XEtaTXEta = lapply(XEta, crossprod)
isXTS = matrix(NA,nc+nfSum,ns)
for(j in 1:ns)
isXTS[,j] = crossprod(XEta[[j]],S[,j]) * iSigma[j]
}
)
if(is.null(C)){
Yx = !is.na(Y)
indColFull = apply(Yx,2,all)
indColNA = !indColFull
diagiV = diag(iV)
P0 = matrix(0,nc+nfSum,nc+nfSum)
P0[1:nc,1:nc] = iV
BetaLambda = matrix(NA, nc+nfSum, ns)
for(j in which(indColFull)){
P = P0
diag(P) = c(diagiV, priorLambda[,j])
switch(class(X)[1L],
matrix = {
iU = P + XEtaTXEta*iSigma[j]
},
list = {
iU = P + XEtaTXEta[[j]]*iSigma[j]
}
)
RiU = chol(iU)
U = chol2inv(RiU)
m = U %*% (P%*%Mu[,j] + isXTS[,j]);
BetaLambda[,j] = m + backsolve(RiU, rnorm(nc+nfSum))
}
for(j in which(indColNA)){
indObs = Yx[,j]
switch(class(X)[1L],
matrix = {
XEtaTXEta = crossprod(XEta[indObs,,drop=FALSE])
isXTS = crossprod(XEta[indObs,,drop=FALSE],S[indObs,j]) * iSigma[j]
},
list = {
XEtaTXEta = crossprod(XEta[[j]][indObs,,drop=FALSE])
isXTS = crossprod(XEta[[j]][indObs,,drop=FALSE],S[indObs,j]) * iSigma[j]
}
)
P = P0
diag(P) = c(diagiV, priorLambda[,j])
iU = P + XEtaTXEta*iSigma[j]
RiU = chol(iU)
U = chol2inv(RiU)
m = U %*% (P%*%Mu[,j] + isXTS);
BetaLambda[,j] = m + backsolve(RiU, rnorm(nc+nfSum))
}
} else{
P = bdiag(kronecker(iV,iQ), Diagonal(x=t(priorLambda)))
switch(class(X)[1L],
matrix = {
RiU = chol(kronecker(XEtaTXEta,diag(iSigma)) + P)
},
list = {
tmp = vector("list",ns)
for(j in 1:ns)
tmp[[j]] = XEtaTXEta[[j]] * iSigma[j]
tmpMat = Reduce(rbind, tmp)
ind1 = rep(rep(1:ns,each=nc+nfSum)+ns*rep(0:(nc+nfSum-1),ns), nc+nfSum)
ind2 = rep(1:((nc+nfSum)*ns), each=nc+nfSum)
mat = sparseMatrix(ind1, ind2, x=as.vector(tmpMat))
RiU = chol(as.matrix(mat) + P)
}
)
m1 = backsolve(RiU, P%*%as.vector(t(Mu)) + as.vector(t(isXTS)), transpose=TRUE)
BetaLambda = matrix(backsolve(RiU, m1 + rnorm(ns*(nc+nfSum))), nc+nfSum, ns, byrow=TRUE)
}
Beta = BetaLambda[1:nc,,drop=FALSE]
nfCumSum = c(0,cumsum(nf*ncr)) + nc
for(r in seq_len(nr)){
if(rL[[r]]$xDim == 0){
Lambda[[r]] = BetaLambda[(nfCumSum[r]+1):(nfCumSum[r+1]),,drop=FALSE]
} else
Lambda[[r]] = aperm(array(BetaLambda[(nfCumSum[r]+1):(nfCumSum[r+1]),,drop=FALSE], c(nf[r],ncr[r],ns)),c(1,3,2))
}
return(list(Beta=Beta, Lambda=Lambda))
}
|
context("cbind_smother")
test_that("cbind_smother works", {
df1 <- data.frame(x=c(1,2,3,NA,4), y=c(5,8,9,10,11), row.names=c("A", "B", "C", "D", "E"))
df2 <- data.frame(z=c(7,8,0,9,10), y=c(6,NA,NA,9,10), row.names=c("A", "B", "F", "C", "D"))
df1n2 <- data.frame(x=c(1,2,3,NA,4,NA), y=c(6,NA,9,10,11,NA), z=c(7,8,9,10,NA,0),
row.names=c("A", "B", "C", "D", "E","F"))
df2n1 <- data.frame(z=c(7,8,0,9,10,NA), y=c(5,8,NA,9,10,11), x=c(1,2,NA,3,NA,4),
row.names=c("A", "B", "F", "C", "D", "E"))
expect_equal(cbind_smother(df1, df2), df1n2)
expect_equal(cbind_smother(df2, df1), df2n1)
expect_equal(cbind_smother(df1, df1), df1)
expect_equal(cbind_smother(df2, df2), df2)
expect_equal(cbind_smother(df1, df1[-3,]), df1)
expect_equal(cbind_smother(df2[-2,], df2), df2[c(1,3,4,5,2),])
})
|
ph_comstruct <- function(sample, phylo, null_model = 0, randomizations = 999,
swaps = 1000, abundance = TRUE) {
assert(sample, c("character", "data.frame"))
assert(phylo, c("character", "phylo"))
assert(null_model, c("numeric", "integer"))
assert(randomizations, c("numeric", "integer"))
assert(swaps, c("numeric", "integer"))
assert(abundance, "logical")
stopifnot(null_model %in% 0:3)
sample <- sample_check(sample)
phylo <- phylo_check(phylo)
cdir <- getwd()
bdir <- dirname(sample)
stopifnot(bdir == dirname(phylo))
setwd(bdir)
on.exit(setwd(cdir))
out <- suppressWarnings(
phylocom(c(
"comstruct",
"-s", basename(sample),
"-f", basename(phylo),
"-m", null_model,
"-w", swaps,
"-r", randomizations,
if (abundance) "-a"
), intern = TRUE)
)
phylocom_error(out)
astbl(utils::read.table(
text = out, skip = 1, header = TRUE,
stringsAsFactors = FALSE
))
}
|
.select_bridge_mosaic <- function(paths, aoi, save_path, mode = "mask") {
tmp_load <- raster(paths[1])
src_datatype <- dataType(tmp_load)
srcnodata <- ifelse(src_datatype == INT2S(), "-32768", "-3.3999999521443642e+38")
mos_base <- .make_mosaic(paths, save_path,
mode = mode, srcnodata = srcnodata,
datatype = src_datatype
)
if (inherits(mos_base, RASTER_LAYER())) {
mos_base_mask <- .mask_raster_by_polygon(mos_base, aoi)
mos_base_crop <- .crop_raster_by_polygon(mos_base_mask, aoi)
writeRaster(mos_base_crop, save_path,
overwrite = T,
srcnodata = srcnodata, datatype = src_datatype
)
return(mos_base_crop)
} else {
return(NA)
}
}
.select_calc_mosaic <- function(records, base_records, aoi, sub_within, cloud_mask_col,
min_improvement, satisfaction_value,
ts, dir_out, identifier, delete_files) {
tmp_dir_orig <- tempdir()
tmp_dir <- .tmp_dir(dir_out, 1, TRUE)
space <- " "
space <- paste0(space, space)
if (is.null(base_records)) out(paste0("Checked records", space, "Cloud-free pixels"))
TMP_CROP <- "crop_tmp.tif"
TMP_CROP_MOS <- "curr_crop_mos_tmp.tif"
TMP_BASE_MOS <- "base_mos_tmp_"
r <- RASTER_LAYER()
mode_aoi <- "aoi"
curr_sensor <- unique(records$product)
le_first_order <- length(sub_within[[1]])
collection <- sapply(sub_within, function(x) cMask_path <- as.character(records[[cloud_mask_col]][x]))
names(collection) <- sapply(sub_within, function(x) {
return(records[x, identifier])
})
collection <- collection[intersect(which(collection != "NONE"), which(!is.na(collection)))]
le_collection <- length(collection)
base_mos_path <- file.path(tmp_dir, "base_mosaic_tmp.tif")
if (is.null(base_records)) {
base_records <- collection[1:le_first_order]
start <- le_first_order + 1
} else {
start <- 1
}
start <- ifelse(start > le_collection, le_collection, start)
base_mos_is_new <- !file.exists(base_mos_path)
if (base_mos_is_new) {
names <- names(base_records)
base_records <- .aggr_rasters(base_records, names, aoi = aoi, dir_out = tmp_dir)
names(base_records) <- names
base_mos <- .select_bridge_mosaic(base_records, aoi, base_mos_path)
rm(base_mos)
if (!inherits(base_mos, r)) start <- 1
}
.delete_tmp_files(tmp_dir)
base_coverage <- -1000
n_pixel_aoi <- .calc_aoi_corr_vals(aoi, raster(base_records[1]))
not_more_than_base <- le_collection == length(base_records)
for (i in start:le_collection) {
x <- collection[i]
base_mos <- raster(base_mos_path)
if (base_mos_is_new) {
cov_init <- 0
cov_aft <- .calc_aoi_coverage(base_mos, aoi, n_pixel_aoi)
add_them <- .select_exceeds_improvement(min_improvement, cov_init, cov_aft)
if (add_them) {
base_coverage <- cov_aft
} else {
if (file.exists(base_mos_path)) unlink(base_mos_path)
return(selected(NULL, 0, records))
}
}
if (i == start) {
is_last_record <- i == le_collection
base_coverage <- .calc_aoi_coverage(base_mos, aoi, n_pixel_aoi)
base_coverage_seq <- 0:base_coverage
last_base <- i - 1
cov_seq <- split(
base_coverage_seq,
ceiling(seq_along(base_coverage_seq) / (length(base_coverage_seq) / last_base))
)
for (cov_out in cov_seq) {
for (q in 1:1000000) {}
index <- ifelse(is_last_record, i, last_base)
.select_out_cov(tail(cov_out, n = 1), index, le_collection, curr_sensor)
}
}
if (not_more_than_base && base_mos_is_new) break
name_x <- names(x)
x <- .aggr_rasters(x, name_x, aoi = aoi, dir_out = tmp_dir)
names(x) <- name_x
next_record <- raster(x)
next_record <- .mask_raster_by_polygon(next_record, aoi)
next_record <- .check_crs(next_record)
curr_base_mos_crop <- crop(base_mos, next_record)
aoi_subset <- st_as_sfc(st_bbox(next_record), crs = 4326)
aoi_subset <- .check_crs(aoi_subset)
aoi_union <- st_union(aoi)
aoi_subset <- suppressMessages(st_intersection(aoi_subset, aoi_union))
cov_init <- .calc_aoi_coverage(curr_base_mos_crop, aoi_subset, n_pixel_aoi)
if (round(cov_init) == 99) {
add_it <- FALSE
} else {
crop_p <- file.path(tmp_dir, TMP_CROP)
curr_mos_tmp_p <- file.path(tmp_dir, TMP_CROP_MOS)
writeRaster(curr_base_mos_crop, crop_p, overwrite = T, datatype = dataType(base_mos))
curr_mos_tmp <- .select_bridge_mosaic(c(crop_p, x), aoi, curr_mos_tmp_p)
if (inherits(curr_mos_tmp, r)) {
cov_aft <- .calc_aoi_coverage(curr_mos_tmp, aoi_subset)
.delete_tmp_files(tmp_dir)
unlink(curr_mos_tmp_p)
rm(next_record, curr_base_mos_crop, curr_mos_tmp, base_mos)
if (((cov_init + (cov_init / 100) * min_improvement)) > 100) {
add_it <- cov_aft > cov_init && cov_init < 99
} else {
add_it <- .select_exceeds_improvement(min_improvement, cov_init, cov_aft)
}
} else {
add_it <- FALSE
}
}
if (add_it) {
save_str <- TMP_BASE_MOS
curr <- paste0(save_str, i, ".tif")
base_mos_path_tmp <- file.path(tmp_dir, curr)
base_records <- c(base_records, x)
base_mos <- .select_bridge_mosaic(base_records, aoi, base_mos_path_tmp)
if (inherits(base_mos, r)) {
rm(base_mos)
base_mos_tmp_files <- list.files(tmp_dir, pattern = save_str)
base_mos_tmp_files <- base_mos_tmp_files[which(base_mos_tmp_files != curr)]
del <- sapply(base_mos_tmp_files, function(del_file) {
del_path <- file.path(tmp_dir, del_file)
if (file.exists(del_path) && del_file != curr) unlink(del_path)
})
rm(del)
base_mos <- raster(base_mos_path_tmp)
base_coverage <- .raster_percent(base_mos, mode = mode_aoi, aoi = aoi, n_pixel_aoi)
base_mos_path <- base_mos_path_tmp
rm(base_mos)
.delete_tmp_files(tmp_dir)
.select_out_cov(base_coverage, i, le_collection, curr_sensor)
}
}
if (base_coverage >= satisfaction_value) {
break
}
}
out("\n")
selected <- selected(names(base_records), base_coverage, records)
if (delete_files) {
.tmp_dir(dir_out, 2, TRUE, tmp_dir_orig)
}
return(selected)
}
.select_save_mosaics <- function(records, selected, aoi,
params, dir_out, save_cmos, save_pmos) {
cols <- c(params$selected_col, params$timestamp_col, params$pmos_col, params$cmos_col)
records <- .select_prep_cols(records, cols, params$selected_col)
ts_seq <- 1:length(selected)
coverage_vector <- c()
n_records_vector <- c()
for (i in ts_seq) {
s <- selected[[i]]
ids_selected <- s$ids
ids_empty <- .is_empty_array(ids_selected)
if (ids_empty) {
save_path_cmos <- NA
save_path_pmos <- NA
} else {
if (save_cmos) {
save_path_cmos <- .select_cmask_mos(s, aoi, dir_out)
}
if (save_pmos) {
save_path_pmos <- .select_preview_mos(records, s, aoi, i, params$identifier, dir_out,
cloud_mask_col = params$cloud_mask_col,
preview_col = params$preview_col,
sensors_given = params$product_group
)
}
}
if (!save_cmos) save_path_cmos <- NA
if (!save_pmos) save_path_pmos <- NA
if (!is.na(save_path_pmos)) save_path_pmos <- normalizePath(save_path_pmos)
if (!is.na(save_path_cmos)) save_path_cmos <- normalizePath(save_path_cmos)
insert <- c(TRUE, s$timestamp, save_path_pmos, save_path_cmos)
for (j in 1:length(cols)) {
records[
which(records[[params$identifier]] %in% ids_selected),
cols[j]
] <- ifelse(.char_can_be_int(insert[j]), as.integer(insert[j]), insert[j])
}
if (!save_cmos) records[[params$cmos_col]] <- NULL
if (!save_pmos) records[[params$pmos_col]] <- NULL
curr_n_records <- length(s$cMask_paths)
curr_n_valid_pixels <- s$valid_pixels
n_records_vector <- append(n_records_vector, curr_n_records)
coverage_vector <- append(coverage_vector, curr_n_valid_pixels)
}
out("Summary by timestamp")
.select_final_info_table(ts_seq, coverage_vector, n_records_vector, params$sep)
return(records)
}
.select_cmask_mos <- function(s, aoi, dir_out) {
save_path_cmos <- file.path(dir_out, paste0(
.create_datetime_string(), "_",
"cloud_mask_mosaic_ts", s$timestamp, ".tif"
))
cMask_mosaic <- .select_bridge_mosaic(s$cMask_paths, aoi, save_path_cmos)
rm(cMask_mosaic)
.delete_tmp_files(tmpDir())
return(save_path_cmos)
}
.select_preview_mos <- function(records, s, aoi, i, identifier, dir_out,
cloud_mask_col, preview_col, sensors_given) {
tmp_dir_orig <- tempdir()
tmp_dir <- .tmp_dir(dir_out, action = 1, TRUE)
r <- RASTER_LAYER()
id_sel <- sapply(s$ids, function(x) which(records[[identifier]] == x))
save_str <- paste0(sample(LETTERS[1:20], 10), "_", collapse = "")
save_pmos <- file.path(tmp_dir, paste0(save_str, "preview_mosaic_timestamp", s$timestamp))
layers <- c("red", "green", "blue")
preview_paths <- lapply(id_sel, function(id_index) {
record <- records[id_index, ]
p_path <- record[[preview_col]]
if (is.na(p_path) || !file.exists(p_path)) {
return(NA)
}
cMask <- raster(records[id_index, cloud_mask_col])
preview <- stack(p_path)
cMask <- .check_crs(cMask)
preview <- .check_crs(preview)
na_mask <- .create_preview_na_mask(preview, record)
preview <- mask(preview, na_mask, maskvalue = 0)
if (is.landsat(record)) {
preview <- .landsat_preview_mask_edges(preview)
} else if (is.sentinel2(record)) {
preview <- .sentinel2_preview_mask_edges(preview)
}
preview_cloud_masked <- mask(preview, cMask, maskvalue = NA)
preview_save <- file.path(tmp_dir, paste0(records[id_index, identifier], "_cmasked"))
paths_sep <- sapply(1:nlayers(preview_cloud_masked), function(j) {
layer_save <- paste0(preview_save, "_", id_index, "_", layers[j], ".tif")
writeRaster(preview_cloud_masked[[j]], layer_save, overwrite = T)
return(layer_save)
})
})
preview_paths <- .gsd_compact(preview_paths)
preview_mos <- lapply(1:length(layers), function(j) {
curr_layers <- lapply(preview_paths, function(x) path <- x[j])
save_path_pmos <- paste0(save_pmos, "_", layers[j], ".grd")
pmos <- try(.select_bridge_mosaic(unlist(curr_layers), aoi, save_path_pmos, mode = "rgb"))
if (!inherits(pmos, r)) {
return(NA)
} else {
return(pmos)
}
})
if (any(sapply(preview_mos, class) != r)) out("Could not create preview RGB mosaic", 2)
preview_mos_stack <- stack(preview_mos)
save_pmos_final <- file.path(dir_out, paste0(
.create_datetime_string(), "_",
"rgb_preview_mosaic_ts", i, ".tif"
))
writeRaster(preview_mos_stack, save_pmos_final, overwrite = T)
.delete_tmp_files(tmp_dir)
.tmp_dir(dir_out, action = 2, TRUE, tmp_dir_orig)
return(save_pmos_final)
}
|
db_download_eurosl <- function(version = 'latest', verbose = TRUE, overwrite = FALSE) {
db_url <- paste('https://euromed.infinitenature.org/EuroSL', version, 'EuroSL.zip', sep='/')
db_path <- file.path(tdb_cache$cache_path_get(), 'euroslSqlite.zip')
db_path_file <- file.path(tdb_cache$cache_path_get(), 'euroslSqlite')
final_file <- file.path(tdb_cache$cache_path_get(), 'eurosl.sqlite')
assert(verbose, "logical")
assert(overwrite, "logical")
if (file.exists(final_file) && !overwrite) {
mssg(verbose, "Database already exists, returning old file")
return(final_file)
}
unlink(final_file, force = TRUE)
tdb_cache$mkdir()
mssg(verbose, 'downloading...')
curl::curl_download(db_url, db_path, quiet = TRUE)
mssg(verbose, 'unzipping...')
utils::unzip(db_path, exdir = db_path_file)
dirs <- list.dirs(db_path_file, full.names = TRUE)
dir_date <- dirs[ dirs != db_path_file ]
sql_path <- list.files(dir_date, pattern = ".sqlite", full.names = TRUE)
file.rename(sql_path, final_file)
mssg(verbose, 'cleaning up...')
unlink(db_path)
unlink(db_path_file, recursive = TRUE)
return(final_file)
}
db_download_germansl <- function(version = 'latest', verbose = TRUE, overwrite = FALSE) {
db_url <- paste('https://germansl.infinitenature.org/GermanSL', version, 'GermanSL.zip', sep='/')
db_path <- file.path(tdb_cache$cache_path_get(), 'germanslSqlite.zip')
db_path_file <- file.path(tdb_cache$cache_path_get(), 'germanslSqlite')
final_file <- file.path(tdb_cache$cache_path_get(), 'germansl.sqlite')
assert(verbose, "logical")
assert(overwrite, "logical")
if (file.exists(final_file) && !overwrite) {
mssg(verbose, "Database already exists, returning old file")
return(final_file)
}
unlink(final_file, force = TRUE)
tdb_cache$mkdir()
mssg(verbose, 'downloading...')
curl::curl_download(db_url, db_path, quiet = TRUE)
mssg(verbose, 'unzipping...')
utils::unzip(db_path, exdir = db_path_file)
dirs <- list.dirs(db_path_file, full.names = TRUE)
dir_date <- dirs[ dirs != db_path_file ]
sql_path <- list.files(dir_date, pattern = ".sqlite", full.names = TRUE)
file.rename(sql_path, final_file)
mssg(verbose, 'cleaning up...')
unlink(db_path)
unlink(db_path_file, recursive = TRUE)
return(final_file)
}
|
wCorrSim <- function(n, rho, ML=FALSE, fast=TRUE, reset=TRUE, usew=FALSE, outstr="") {
len <- max(c(length(n), length(rho), length(ML), length(fast), length(reset), length(usew)))
vec <- c("n", "rho", "ML", "fast", "reset", "usew")
for(i in 1:length(vec)) {
var <- get(vec[i])
if(length(var) != len) {
if(length(var) != 1) {
stop("length of ", sQuote(vec[i]), " must be 1 or the same as the longest vector passed to sim")
} else {
var <- rep(var,len)
}
}
assign(vec[i],var)
}
everusew <- sum(usew)>0
ns <- n
cor0 <- rho
df <- data.frame(n=n,rho=rho, ML=ML, fast=fast, reset=reset, usew=usew)
df$spear <- df$speart <- NA
df$Q <- df$M <- NA
df$pear <- df$peart <- NA
df$pc <- df$pct <- NA
df$ps <- df$pst <- NA
ii <- 1
while(ii <= nrow(df)) {
cori <- df$rho[ii]
n <- df$n[ii]
ML <- df$ML[ii]
fast <- df$fast[ii]
reset <- df$reset[ii]
usew <- df$usew[ii]
if(interactive()) {
cat(outstr,"n=",n,"cori=",cori,"pct=",100*ii/nrow(df),"\n")
cat(" fast=",fast,"ml=",ML,"reset=",reset,"\n")
}
if(reset) {
n <- ifelse(everusew, 10*df$n[ii], df$n[ii])
cr <- cori
x <- y <- w <- M <- Q <- c()
while(length(w) < df$n[ii]) {
xp <- rnorm(n)
yp <- sqrt(1-cr^2)*rnorm(n) + cr*xp
if(everusew) {
wp <- (xp-yp)^2+1
pr <- 1/wp
pr <- df$n[ii] * pr/(sum(pr) * 100)
wp <- 1/pr
samp <- (1:n)[runif(n)<pr]
x <- c(x,xp[samp])
y <- c(y,yp[samp])
w <- c(w,wp[samp])
} else {
x <- xp
y <- yp
w <- rep(1/n, n)
}
}
M <- 1
Q <- 1
nm <- sample(2:5,1)
nq <- sample(2:5,1)
x <- x[1:df$n[ii]]
y <- y[1:df$n[ii]]
w <- w[1:df$n[ii]]
iter <- 1
while( ((length(unique(M)) < 2) | (length(unique(Q)) < 2)) & (iter < 100)) {
iter <- iter + 1
tm <- sort(rnorm(nm))
tq <- sort(rnorm(nq))
theta1 <- c(NA,-Inf,tq,Inf)
theta2 <- c(NA,-Inf,tm,Inf)
Q <- rep(NA,n)
for(i in 2:length(theta1)) {
Q <- ifelse(x>theta1[i], i, Q)
}
Q <- Q - 1
Q <- as.numeric(as.factor(Q))
M <- rep(NA,n)
for(i in 2:length(theta2)) {
M <- ifelse(y>theta2[i], i, M)
}
M <- M - 1
M <- as.numeric(as.factor(M))
}
if(iter >=99) {
cat("could not get multiple bins\n")
cat("x <- c(",paste(x,collapse=","),")\n")
cat("y <- c(",paste(y,collapse=","),")\n")
cat("M <- c(",paste(M,collapse=","),")\n")
cat("Q <- c(",paste(Q,collapse=","),")\n")
}
df$M[ii] <- length(unique(M))
df$Q[ii] <- length(unique(Q))
} else {
df$cor[ii] <- df$cor[ii-1]
df$M[ii] <- length(unique(M))
df$Q[ii] <- length(unique(Q))
}
if(usew) {
wu <- w
} else {
wu <- rep(1,length(x))
}
st0 <- system.time(fcorp <- weightedCorr(x,y, method="Pearson", weights=wu, fast=fast, ML=ML))
df$peart[ii] <- sum(st0[1:2])
df$pear[ii] <- fcorp
st0 <- system.time(fcorp <- weightedCorr(x,y, method="Spearman", weights=wu, fast=fast, ML=ML))
df$speart[ii] <- sum(st0[1:2])
df$spear[ii] <- cor(x,y)
st0 <- system.time(fcorp <- weightedCorr(x,M, method="Polyserial", weights=wu, fast=fast, ML=ML))
df$pst[ii] <- sum(st0[1:2])
df$ps[ii] <- fcorp
st0 <- system.time(fcorp <- weightedCorr(M, Q, method="Polychoric", weights=wu, fast=fast, ML=ML))
df$pct[ii] <- sum(st0[1:2])
df$pc[ii] <- fcorp
ii <- ii + 1
}
dfout <- data.frame(n=rep(df$n,4),
rho=rep(df$rho,4),
ML=rep(df$ML,4),
usew=rep(df$usew,4),
fast=rep(df$fast,4),
est=c(df$pear, df$spear, df$ps, df$pc),
t=c(df$peart, df$speart, df$pst, df$pct),
type=rep(c("Pearson", "Spearman", "Polyserial", "Polychoric"),each=nrow(df)))
dfout
}
spearmanSim <- function(n, rho, ML=FALSE, fast=TRUE, reset=TRUE, usew=TRUE, outstr="") {
len <- max(c(length(n), length(rho), length(ML), length(fast), length(reset), length(usew)))
vec <- c("n", "rho", "ML", "fast", "reset", "usew", "Spearman", "N")
N <- n
Spearman <- rho
for(i in 1:length(vec)) {
var <- get(vec[i])
if(length(var) != len) {
if(length(var) != 1) {
stop("length of ", sQuote(vec[i]), " must be 1 or the same as the longest vector passed to sim")
} else {
var <- rep(var,len)
}
}
assign(vec[i],var)
}
everusew <- sum(usew)>0
ns <- n
cor0 <- rho
df <- data.frame(n=n,rho=rho, ML=ML, fast=fast, reset=reset, usew=usew, Spearman = rho)
df$spear <- df$speart <- NA
df$pear <- df$peart <- NA
ii <- 1
while(ii <= nrow(df)) {
cori <- df$rho[ii]
n <- df$n[ii]
ML <- df$ML[ii]
fast <- df$fast[ii]
reset <- df$reset[ii]
usew <- df$usew[ii]
if(interactive()) {
cat(outstr,"n=",n,"cori=",cori,"pct=",100*ii/nrow(df),"\n")
cat(" fast=",fast,"ml=",ML,"reset=",reset,"\n")
}
if(reset) {
n <- ifelse(everusew, 50*df$n[ii], df$n[ii])
cr <- cori
x <- y <- w <- M <- Q <- c()
while(length(w) < df$n[ii]) {
xp <- rnorm(n)
yp <- sqrt(1-cr^2)*rnorm(n) + cr*xp
df$Spearman[ii] <- cor(xp, yp, method="spearman")
if(everusew) {
wp <- (xp-yp)^2+1
pr <- 1/wp
pr <- pr*df$n[ii]/(sum(pr))
wp <- 1/pr
samp <- (1:n)[runif(n)<pr]
df$N[ii] <- length(samp)
x <- c(x,xp[samp])
y <- c(y,yp[samp])
w <- c(w,wp[samp])
} else {
x <- xp
y <- yp
w <- rep(1/n, n)
}
}
x <- x[1:df$n[ii]]
y <- y[1:df$n[ii]]
w <- w[1:df$n[ii]]
} else {
df$cor[ii] <- df$cor[ii-1]
}
if(usew) {
wu <- w
} else {
wu <- rep(1,length(x))
}
st0 <- system.time(fcorp <- weightedCorr(x,y, method="Spearman", weights=wu, fast=fast, ML=ML))
df$speart[ii] <- sum(st0[1:2])
df$spear[ii] <- fcorp
ii <- ii + 1
}
dfout <- data.frame(n=rep(df$n,1),
rho=rep(df$rho,1),
ML=rep(df$ML,1),
usew=rep(df$usew,1),
fast=rep(df$fast,1),
est=c(df$spear),
t=c(df$speart),
SpearmanOrig = df$Spearman,
N = df$N,
type=rep(c("Spearman"),each=nrow(df)))
dfout
}
|
get_release <- function(resource,
limit = 20L,
verbose = FALSE,
warnings = TRUE,
progress_bar = TRUE) {
tbl_json <- get(resource_url = resource,
limit = limit,
verbose = verbose,
warnings = warnings,
progress_bar = progress_bar)
tidy_tbls <- as_tidy_tables_releases(tbl_json)
return(tidy_tbls)
}
get_release_by_release_date <-
function(release_date,
limit = 20L,
verbose = FALSE,
warnings = TRUE,
progress_bar = TRUE) {
resource <- '/rest/release'
resource_urls <- sprintf("%s/%s", resource, release_date)
purrr::map(
resource_urls,
get_release,
limit = limit,
warnings = warnings,
verbose = verbose,
progress_bar = progress_bar
) %>%
purrr::pmap(dplyr::bind_rows)
}
get_release_current <-
function(limit = 20L,
verbose = FALSE,
warnings = TRUE,
progress_bar = TRUE) {
resource_url <- '/rest/release/current'
get_release(resource_url,
limit = limit,
warnings = warnings,
verbose = verbose,
progress_bar = progress_bar)
}
get_release_all <-
function(limit = 20L,
verbose = FALSE,
warnings = TRUE,
progress_bar = TRUE) {
resource_url <- '/rest/release/all'
get_release(resource_url,
limit = limit,
warnings = warnings,
verbose = verbose,
progress_bar = progress_bar)
}
get_releases <- function(date = 'latest',
verbose = FALSE,
warnings = TRUE,
progress_bar = TRUE) {
if (identical(date, 'latest')) {
tbl <- get_release_current(verbose = verbose,
warnings = warnings,
progress_bar = progress_bar) %>%
coerce_to_s4_releases()
return(tbl)
}
if (identical(date, 'all')) {
tbl <- get_release_all(verbose = verbose,
warnings = warnings,
progress_bar = progress_bar) %>%
coerce_to_s4_releases()
return(tbl)
}
if (all(stringr::str_detect(date, pattern = '^\\d{4}-\\d{2}-\\d{2}$'))) {
tbl <- get_release_by_release_date(
release_date = date,
verbose = verbose,
warnings = warnings,
progress_bar = progress_bar
) %>%
coerce_to_s4_releases()
return(tbl)
}
stop('Argument `date` must be one of:\n',
' - "all": for all releases;\n',
' - "latest": for the most up-to-date release;\n',
' - "YYYY-MM-DD": for a release of a specific date, e.g., "2020-10-19".\n'
)
}
|
boolSkip=F
test_that("Check 69.1 - testing calculation of class PerCapitaNucleolus" ,{
if(boolSkip){
skip("Test was skipped")
}
v<-c( 0, 0, 0, 0, 9, 10, 12)
result=perCapitaNucleolus(v)
expect_equal(result,c(4/6,7/6,61/6),tolerance=1e-3)
})
|
print.nonnest.test <- function(x, digits = x$digits, ...)
{
b <- min(x$stat, x$nobs - x$stat)
p <- 2 * pbinom(b, x$nobs, 0.5)
pref <- if (x$stat > x$nobs - x$stat) 1 else 2
testname <- "Clarke"
cat("\n", testname, " test for non-nested models\n", sep = "")
cat("\nModel 1 log-likelihood:", format(sum(x$loglik1), digits = digits))
cat("\nModel 2 log-likelihood:", format(sum(x$loglik2), digits = digits))
cat("\nObservations:", x$nobs)
cat("\nTest statistic:", format(x$stat, digits = digits))
if (x$test == "clarke")
cat(" (", round(100* x$stat / x$nobs), "%)", sep = "")
cat("\n")
fp <- format.pval(p, digits = digits)
if (substr(fp, 1L, 1L) != "<") {
fp <- paste("=", fp)
} else if (substr(fp, 2L, 2L) != " ") {
fp <- paste(substr(fp, 1L, 1L), substr(fp, 2L, nchar(fp)))
}
if (p < x$level) {
cat("\nModel ", pref, " is preferred (p ", fp, ")\n\n", sep = "")
} else {
cat("\nNeither model is significantly preferred (p ", fp, ")\n\n",
sep = "")
}
invisible(x)
}
clarke_test <- function(model1, model2, level = 0.05, digits = 2){
x <- nonnest(model1, model2)
correction <- (x$p1 - x$p2) * (log(x$n) / (2*x$n))
stat <- sum(x$loglik1 - x$loglik2 > correction)
ans <- list(stat = stat,
test = "clarke",
level = level,
digits = digits,
loglik1 = x$loglik1,
loglik2 = x$loglik2,
nparams = c(x$p1, x$p2),
nobs = x$n)
class(ans) <- "nonnest.test"
return(ans)
}
nonnest <- function(model1, model2){
n <- nobs(model1)
if (nobs(model2) != n)
stop("model1 and model2 have different numbers of observations")
y1 <- model.response(model.frame(model1))
y2 <- model.response(model.frame(model2))
if (!all.equal(y1, y2, check.attributes = FALSE))
stop("models do not have same dependent variable")
anyWeights <- function(x) !is.null(weights(x)) && !all(weights(x)==1)
if (anyWeights(model1) || anyWeights(model2))
stop("'clarke_test' does not yet support models with weights")
loglik1 <- indivLogLiks(model1)
loglik2 <- indivLogLiks(model2)
p1 <- nparams(model1)
p2 <- nparams(model2)
return(list(n = n, loglik1 = loglik1, loglik2 = loglik2, p1 = p1, p2 = p2))
}
indivLogLiks <- function(model){
UseMethod("indivLogLiks")
}
indivLogLiks.glm <- function(model)
{
if(!(family(model)$family %in% c("binomial", "poisson", "gaussian")))
stop("Only gaussian, binomial and poisson families currently supported")
if(family(model)$family == "binomial"){
ans <- ll_fun.binomial(model)
}
if(family(model)$family == "poisson"){
ans <- ll_fun.poisson(model)
}
if(family(model)$family == "gaussian"){
ans <- ll_fun.gaussian(model)
}
return(ans)
}
indivLogLiks.lm <- function(model){
ans <- ll_fun.gaussian(model)
return(ans)
}
indivLogLiks.polr <- function(model){
y <- as.numeric(model.response(model.frame(model)))
probs <- predict(model, type="probs")
probs <- probs[cbind(1:length(y), y)]
ans <- log(probs)
return(ans)
}
indivLogLiks.clm <- function(model){
probs <- predict(model, type="prob")$fit
ans <- log(probs)
return(ans)
}
indivLogLiks.multinom <- function(model){
y <- as.numeric(model.response(model.frame(model)))
probs <- predict(model, type="probs")
probs <- probs[cbind(1:length(y), y)]
ans <- log(probs)
return(ans)
}
indivLogLiks.negbin <- function(model){
y <- model.response(model.frame(model))
yhat <- fitted(model)
probs <- dnbinom(y, size=model$theta, mu=yhat)
ans <- log(probs)
return(ans)
}
ll_fun.binomial <- function(model){
y <- model.response(model.frame(model))
probs <- ifelse(y == 1, fitted.values(model), 1-fitted.values(model))
log(probs)
}
ll_fun.poisson <- function(model){
y <- model.response(model.frame(model))
probs <- dpois(y, fitted.values(model))
log(probs)
}
ll_fun.gaussian <- function(model){
y <- model.response(model.frame(model))
res <- residuals(model)
npar <- length(coef(model))
sigma <- sqrt(var(res)*((nobs(model)-1)/(nobs(model)-npar)))
probs <- dnorm(y, fitted.values(model), sigma)
log(probs)
}
nparams <- function(model){
UseMethod("nparams")
}
nparams.glm <- function(model){
attr(logLik(model), "df")
}
nparams.lm <- function(model){
sum(hatvalues(model))
}
nparams.polr <- function(model){
length(coef(model)) + length(model$zeta)
}
nparams.clm <- function(model){
length(coef(model))
}
nparams.multinom <- function(model){
length(c(coef(model)))
}
nparams.negbin <- function(model){
length(coef(model)) + 1
}
nobs.multinom <- function(object, ...){
length(object$weights)
}
|
mod_d_tree_ui <- function(id){
ns <- NS(id)
codigo.dt <- list(conditionalPanel("input['d_tree_ui_1-BoxDt'] == 'tabDtPlot'",
codigo.monokai(ns("fieldCodeDtPlot"),height = "10vh")),
conditionalPanel("input['d_tree_ui_1-BoxDt'] == 'tabDtPred'",
codigo.monokai(ns("fieldCodeDtPred"),height = "10vh")),
conditionalPanel("input['d_tree_ui_1-BoxDt'] == 'tabDtMC'",
codigo.monokai(ns("fieldCodeDtMC"),height = "10vh")),
conditionalPanel("input['d_tree_ui_1-BoxDt'] == 'tabDtIndex'",
codigo.monokai(ns("fieldCodeDtIG"),height = "10vh")),
conditionalPanel("input['d_tree_ui_1-BoxDt'] == 'tabDtReglas'",
codigo.monokai(ns("fieldCodeDtRule"),height = "10vh")))
codigo.dt.run<- list(conditionalPanel("input['d_tree_ui_1-BoxDt'] == 'tabDtModelo'",
codigo.monokai(ns("fieldCodeDt"),height = "10vh")))
opc_dt <- div(conditionalPanel(
"input['d_tree_ui_1-BoxDt'] == 'tabDtModelo'",
tabsOptions(heights = c(70, 30), tabs.content = list(
list(options.run(ns("runDt")), tags$hr(style = "margin-top: 0px;"),
fluidRow(col_6(numericInput(ns("minsplit.dt"), labelInput("minsplit"), 2, width = "100%",min = 1)),
col_6(numericInput(ns("maxdepth.dt"), labelInput("maxdepth"), 15, width = "100%",min = 0, max = 30, step = 1))),
fluidRow(col_12(selectInput(inputId = ns("split.dt"), label = labelInput("splitIndex"),selected = 1,
choices = list("gini" = "gini", "Entropia" = "information"))))),
codigo.dt.run))),
conditionalPanel(
"input['d_tree_ui_1-BoxDt'] != 'tabDtModelo'",
tabsOptions(botones = list(icon("code")), widths = 100,heights = 55, tabs.content = list(
codigo.dt))))
tagList(
tabBoxPrmdt(
id = ns("BoxDt"), opciones = opc_dt,
tabPanel(title = labelInput("generatem"), value = "tabDtModelo",
withLoader(verbatimTextOutput(ns("txtDt")),
type = "html", loader = "loader4")),
tabPanel(title = labelInput("garbol"), value = "tabDtPlot",
withLoader(plotOutput(ns('plot_dt'), height = "55vh"),
type = "html", loader = "loader4")),
tabPanel(title = labelInput("predm"), value = "tabDtPred",
withLoader(DT::dataTableOutput(ns("dtPrediTable")),
type = "html", loader = "loader4")),
tabPanel(title = labelInput("mc"), value = "tabDtMC",
withLoader(plotOutput(ns('plot_dt_mc'), height = "45vh"),
type = "html", loader = "loader4"),
verbatimTextOutput(ns("txtDtMC"))),
tabPanel(title = labelInput("indices"),value = "tabDtIndex",
fluidRow(col_6(echarts4rOutput(ns("dtPrecGlob"), width = "100%")),
col_6(echarts4rOutput(ns("dtErrorGlob"), width = "100%"))),
fluidRow(col_12(shiny::tableOutput(ns("dtIndPrecTable")))),
fluidRow(col_12(shiny::tableOutput(ns("dtIndErrTable"))))),
tabPanel(title = labelInput("reglas"),value = "tabDtReglas",
withLoader(verbatimTextOutput(ns("rulesDt")),
type = "html", loader = "loader4"))
)
)
}
mod_d_tree_server <- function(input, output, session, updateData, modelos){
ns <- session$ns
nombre.modelo <- rv(x = NULL)
observeEvent(c(updateData$datos.aprendizaje,updateData$datos.prueba), {
updateTabsetPanel(session, "BoxDt",selected = "tabDtModelo")
default.codigo.dt()
})
output$txtDt <- renderPrint({
input$runDt
tryCatch({
default.codigo.dt()
train <- updateData$datos.aprendizaje
test <- updateData$datos.prueba
var <- paste0(updateData$variable.predecir, "~.")
tipo <- isolate(input$split.dt)
minsplit<-isolate(input$minsplit.dt)
maxdepth<-isolate(input$maxdepth.dt)
nombre <- paste0("dtl-",tipo)
modelo <- traineR::train.rpart(as.formula(var), data = train,
control = rpart.control(minsplit = minsplit, maxdepth = maxdepth),parms = list(split = tipo))
pred <- predict(modelo , test, type = 'class')
prob <- predict(modelo , test, type = 'prob')
mc <- confusion.matrix(test, pred)
isolate(modelos$dt[[nombre]] <- list(nombre = nombre, modelo = modelo ,pred = pred , prob = prob, mc = mc))
nombre.modelo$x <- nombre
print(modelo)
},error = function(e){
return(invisible(""))
})
})
output$dtPrediTable <- DT::renderDataTable({
test <- updateData$datos.prueba
var <- updateData$variable.predecir
idioma <- updateData$idioma
obj.predic(modelos$dt[[nombre.modelo$x]]$pred,idioma = idioma, test, var)
},server = FALSE)
output$txtDtMC <- renderPrint({
print(modelos$dt[[nombre.modelo$x]]$mc)
})
output$plot_dt_mc <- renderPlot({
idioma <- updateData$idioma
exe(plot.MC.code(idioma = idioma))
plot.MC(modelos$dt[[nombre.modelo$x]]$mc)
})
output$dtIndPrecTable <- shiny::renderTable({
idioma <- updateData$idioma
indices.dt <- indices.generales(modelos$dt[[nombre.modelo$x]]$mc)
xtable(indices.prec.table(indices.dt,"dt", idioma = idioma))
}, spacing = "xs",bordered = T, width = "100%", align = "c", digits = 2)
output$dtIndErrTable <- shiny::renderTable({
idioma <- updateData$idioma
indices.dt <- indices.generales(modelos$dt[[nombre.modelo$x]]$mc)
output$dtPrecGlob <- renderEcharts4r(e_global_gauge(round(indices.dt[[1]],2), tr("precG",idioma), "
output$dtErrorGlob <- renderEcharts4r(e_global_gauge(round(indices.dt[[2]],2), tr("errG",idioma), "
xtable(indices.error.table(indices.dt,"dt"))
}, spacing = "xs",bordered = T, width = "100%", align = "c", digits = 2)
output$plot_dt <- renderPlot({
tryCatch({
tipo <- isolate(input$split.dt)
datos <- updateData$datos
var <- updateData$variable.predecir
num <- length(levels(datos[,var]))
modelo <- modelos$dt[[nombre.modelo$x]]$modelo
updateAceEditor(session, "fieldCodeDtPlot", value = dt.plot(tipo, num))
prp(modelo, type = 2, extra = 104, nn = T, varlen = 0, faclen = 0,
fallen.leaves = TRUE, branch.lty = 6, shadow.col = 'gray82',
box.col = gg_color_hue(num)[modelo$frame$yval], roundint=FALSE)
},
error = function(e){
output$plot_dt <- renderPlot(NULL)
})
})
output$rulesDt <- renderPrint({
tipo <- isolate(input$split.dt)
model <- modelos$dt[[nombre.modelo$x]]$modelo
var <- model$prmdt$var.pred
updateAceEditor(session, "fieldCodeDtRule", paste0("rpart.rules(modelo.dt.",tipo,", cover = TRUE,nn = TRUE , style = 'tall', digits=3,
response.name ='",paste0("Rule Number - ", var),"')"))
rpart.plot::rpart.rules(model, cover = TRUE,nn = TRUE ,roundint=FALSE, style = "tall", digits=3,
response.name = paste0("Rule Number - ", var))
})
default.codigo.dt <- function() {
tipo <- isolate(input$split.dt)
codigo <- dt.modelo(variable.pr = updateData$variable.predecir,
minsplit = isolate(input$minsplit.dt),
maxdepth = isolate(input$maxdepth.dt),
split = tipo)
updateAceEditor(session, "fieldCodeDt", value = codigo)
updateAceEditor(session, "fieldCodeDtPlot", value = dt.plot(tipo))
codigo <- dt.prediccion(tipo)
updateAceEditor(session, "fieldCodeDtPred", value = codigo)
codigo <- dt.MC(tipo)
updateAceEditor(session, "fieldCodeDtMC", value = codigo)
codigo <- extract.code("indices.generales")
updateAceEditor(session, "fieldCodeDtIG", value = codigo)
}
}
|
geom_labelpath <- function(
mapping = NULL,
data = NULL,
stat = "identity",
position = "identity",
na.rm = FALSE,
show.legend = NA,
inherit.aes = TRUE,
...,
lineend = "butt",
linejoin = "round",
linemitre = 10,
text_only = FALSE,
gap = FALSE,
upright = TRUE,
halign = "center",
offset = NULL,
parse = FALSE,
straight = FALSE,
padding = unit(0.05, "inch"),
text_smoothing = 0,
rich = FALSE,
label.padding = unit(0.25, "lines"),
label.r = unit(0.15, "lines"),
arrow = NULL,
remove_long = FALSE
) {
layer(
geom = GeomLabelpath,
mapping = mapping,
data = data,
stat = stat,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = set_params(
na.rm = na.rm,
lineend = lineend,
linejoin = linejoin,
linemitre = linemitre,
text_only = text_only,
gap = gap,
upright = upright,
halign = halign,
offset = offset,
parse = parse,
straight = straight,
padding = padding,
text_smoothing = text_smoothing,
rich = rich,
label.padding = label.padding,
label.r = label.r,
arrow = arrow,
remove_long = remove_long,
...
)
)
}
GeomLabelpath <- ggproto(
"GeomLabelpath", GeomTextpath,
default_aes = aes(
colour = "black",
alpha = 1,
size = 3.88,
hjust = 0.5,
vjust = 0.5,
family = "",
fontface = 1,
lineheight = 1.2,
linewidth = 0.5,
linetype = 1,
spacing = 0,
angle = 0,
fill = "white",
linecolour = NULL,
textcolour = NULL,
boxcolour = NULL,
boxlinetype = 1,
boxlinewidth = NULL
),
extra_params = c("na.rm", names(formals(static_text_params))[-1]),
setup_params = function(data, params) {
update_params(params, type = "label")
},
draw_panel = function(
data, panel_params, coord,
lineend = "butt", linejoin = "round", linemitre = 10,
label.padding = unit(0.25, "lines"),
label.r = unit(0.15, "lines"), arrow = NULL,
text_params = static_text_params("label")
) {
data$group <- discretise(data$group)
if (!all(gapply(data$label, data$group,
function(x) all(x == x[1]), logical(1)))) {
warn(paste("geom_labelpath: Multiple strings found in at",
"least one group. Only the first will be used."))
}
data <- data[order(data$group), , drop = FALSE]
data <- coord_munch(coord, data, panel_params)
first <- run_start(data$group)
text_gp <- data_to_text_gp(data[first, , drop = FALSE])
path_gp <- data_to_path_gp(data[first, , drop = FALSE],
lineend = lineend,
linejoin = linejoin,
linemitre = linemitre)
box_gp <- data_to_box_gp(data[first, , drop = FALSE],
lineend = lineend,
linejoin = linejoin,
linemitre = linemitre)
safe_labels <- if (text_params$parse) {
safe_parse(as.character(data$label[first]))
} else {
data$label[first]
}
textpathGrob(
label = safe_labels,
x = data$x,
y = data$y,
id = data$group,
hjust = data$hjust[first],
vjust = text_params$offset %||% data$vjust[first],
halign = text_params$halign,
gap = text_params$gap,
gp_text = text_gp,
gp_path = path_gp,
gp_box = box_gp,
straight = text_params$straight,
upright = text_params$upright,
default.units = "npc",
angle = data$angle,
polar_params = get_polar_params(coord),
padding = text_params$padding,
text_smoothing = text_params$text_smoothing,
rich = text_params$rich,
label.padding = label.padding,
label.r = label.r,
arrow = arrow,
remove_long = text_params$remove_long,
as_label = TRUE
)
}
)
geom_labelline <- function(
mapping = NULL,
data = NULL,
stat = "identity",
position = "identity",
na.rm = FALSE,
show.legend = NA,
inherit.aes = TRUE,
...,
lineend = "butt",
linejoin = "round",
linemitre = 10,
text_only = FALSE,
gap = FALSE,
upright = TRUE,
halign = "center",
offset = NULL,
parse = FALSE,
straight = FALSE,
padding = unit(0.05, "inch"),
label.padding = unit(0.25, "lines"),
label.r = unit(0.15, "lines"),
arrow = NULL,
remove_long = TRUE
) {
layer(
geom = GeomLabelline,
mapping = mapping,
data = data,
stat = stat,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = set_params(
na.rm = na.rm,
lineend = lineend,
linejoin = linejoin,
linemitre = linemitre,
text_only = text_only,
gap = gap,
upright = upright,
halign = halign,
offset = offset,
parse = parse,
straight = straight,
padding = padding,
label.padding = label.padding,
label.r = label.r,
arrow = arrow,
remove_long = remove_long,
...
)
)
}
GeomLabelline <- ggproto("GeomLabelLine", GeomLabelpath,
setup_params = function(data, params) {
params$flipped_aes <- has_flipped_aes(data, params, ambiguous = TRUE)
update_params(params, type = "label")
},
extra_params = c("na.rm", "orientation"),
setup_data = function(data, params) {
data$flipped_aes <- params$flipped_aes
data <- flip_data(data, params$flipped_aes)
data <- data[order(data$PANEL, data$group, data$x), ]
flip_data(data, params$flipped_aes)
}
)
|
"nichia"
|
winDialog <- function(type = c("ok", "okcancel", "yesno", "yesnocancel"),
message)
{
if (!interactive())
stop("winDialog() cannot be used non-interactively")
type <- match.arg(type)
res <- .External2(C_winDialog, type, message)
if(res == 10L) return(invisible(NULL))
c("NO", "CANCEL", "YES", "OK")[res+2L]
}
winDialogString <- function(message, default)
{
if (!interactive())
stop("winDialogString() cannot be used non-interactively")
.External2(C_winDialogString, message, default)
}
winMenuDel <- function(menuname)
invisible(.External2(C_winMenuDel, menuname, NULL))
winMenuDelItem <- function(menuname, itemname)
invisible(.External2(C_winMenuDel, menuname, itemname))
winMenuAdd <- function(menuname)
invisible(.External2(C_winMenuAdd, menuname, NULL, NULL))
winMenuAddItem <- function(menuname, itemname, action) {
if (! menuname %in% winMenuNames()) winMenuAdd(menuname)
invisible(.External2(C_winMenuAdd, menuname, itemname, action))
}
winMenuNames <- function() .External2(C_winMenuNames)
winMenuItems <- function(menuname) .External2(C_winMenuItems, menuname)
winProgressBar <- function(title = "R progress bar", label = "",
min = 0, max = 1, initial = 0, width = 300L)
{
res <- .External2(C_winProgressBar, as.integer(width), as.character(title),
as.character(label), as.double(min),
as.double(max), as.double(initial))
structure(list(pb=res), class = "winProgressBar")
}
close.winProgressBar <- function(con, ...)
.External2(C_closeWinProgressBar, con$pb)
setWinProgressBar <- function(pb, value, title=NULL, label=NULL)
{
if(!inherits(pb, "winProgressBar"))
stop(gettextf("'pb' is not from class %s",
dQuote("winProgressBar")),
domain = NA)
if(!is.null(title)) title <- as.character(title)
if(!is.null(label)) label <- as.character(label)
invisible(.External2(C_setWinProgressBar, pb$pb, as.double(value),
title, label))
}
getWinProgressBar <- function(pb)
{
if(!inherits(pb, "winProgressBar"))
stop(gettextf("'pb' is not from class %s",
dQuote("winProgressBar")),
domain = NA)
.External2(C_setWinProgressBar, pb$pb, NULL, NULL, NULL)
}
askYesNoWinDialog <- function(msg, ...) {
flush.console()
ans <- winDialog("yesnocancel", msg)
switch(ans,
YES = TRUE,
NO = FALSE,
NA)
}
|
aux.preprocess <- function(data,type=c("center","scale","cscale","decorrelate","whiten")){
if (is.data.frame(data)){
matinput = t(as.matrix(data))
} else if (is.matrix(data)){
matinput = t(data)
} else {
warning("WARNING : input should be either dataframe or matrix.")
}
type = match.arg(type)
if (type=="scale"){
tmpdata = t(matinput)
p = ncol(tmpdata)
multiplier = rep(0,p)
for (i in 1:p){
multiplier[i] = 1/stats::sd(as.vector(tmpdata[,i]))
}
info = list()
info$type = "scale"
info$mean = rep(0,p)
info$multiplier = diag(multiplier)
result = list()
result$pX = tmpdata%*%diag(multiplier)
result$info = info
return(result)
} else if (type=="cscale"){
tmpdata = t(matinput)
p = ncol(tmpdata)
infomean = as.double(colMeans(tmpdata))
infomultiplier = rep(0,p)
for (i in 1:p){
infomultiplier[i] = 1/stats::sd(as.vector(tmpdata[,i]))
}
info = list()
info$type = "cscale"
info$mean = infomean
info$multiplier = diag(infomultiplier)
outdata = array(0,c(nrow(tmpdata),p))
for (i in 1:nrow(tmpdata)){
outdata[i,] = as.vector(tmpdata[i,])-infomean
}
result = list()
result$pX = (outdata%*%diag(infomultiplier))
result$info = info
return(result)
} else {
matoutput = tryCatch(
{
if (type=="center"){
aux_preprocess(matinput,as.integer(1))
} else if (type=="decorrelate"){
aux_preprocess(matinput,as.integer(2))
} else {
aux_preprocess(matinput,as.integer(3))
}
}, error=function(cond){
return(NA)
}, warning=function(cond){
return(NA)
}
)
if (length(matoutput)==1){
if (is.na(matoutput)){
result = NA
return(result)
}
} else {
info = list()
info$type = matoutput$type
info$mean = matoutput$mean
info$multiplier = matoutput$multiplier
result = list()
result$pX = t(matoutput$output)
result$info = info
return(result)
}
}
}
aux.preprocess.hidden <- function(data,type=c("null","center","scale","cscale","decorrelate","whiten"),algtype=c("linear","nonlinear")){
pptype = match.arg(type)
ppalgtype = match.arg(algtype)
n = nrow(data)
p = ncol(data)
if (type=="null"){
info = list()
info$type = "null"
info$mean = rep(0,p)
info$multiplier = 1
result = list()
result$pX = data
result$info = info
result$info$algtype = ppalgtype
return(result)
} else {
result = aux.preprocess(data,type=pptype)
result$info$algtype = ppalgtype
return(result)
}
}
|
logtransform<-function(numeric.vector){
numeric.vector<-as.numeric(numeric.vector)
zero.present<-0 %in% numeric.vector
if (zero.present==TRUE){
numeric.vector<-numeric.vector+1
}else{
numeric.vector<-numeric.vector
}
if (zero.present==TRUE){
zero<-"Zero was present so 1 was added to each observation"
}else{
zero<-"No observation was found to be zero"
}
max.to.min.ratio<-max(numeric.vector)/min(numeric.vector)
logtransformed<-round(log10(numeric.vector),4)
my.output<-list(Ratio=max.to.min.ratio,LogTransformedVector=logtransformed,Comment=zero)
return(my.output)
}
|
outlyingness <- function(x, z=NULL, options=list()) {
if (missing(x)) {
stop("Input argument x is required.")
}
x <- data.matrix(x)
if (!is.numeric(x)) {
stop("The input argument x must be a numeric data matrix.")
}
n1 <- nrow(x)
p1 <- ncol(x)
if (n1 > sum(complete.cases(x))) {
stop("Missing values in x are not allowed.")
}
if (is.null(z)) {
z <- x
}
z <- data.matrix(z)
if (!is.numeric(z)) {
stop("The input argument z must be a numeric data matrix.")
}
n2 <- nrow(z)
p2 <- ncol(z)
if (p1 != p2) {
stop("The data dimension has to be the same for x and z.")
}
if (n2 > sum(complete.cases(z))) {
stop("Missing values in z are not allowed.")
}
if (is.null(options)) {
options <- list()
}
if (!is.list(options)) {
stop("options must be a list")
}
if ("type" %in% names(options)) {
type <- options[["type"]]
} else {
type <- "Affine"
}
if ("ndir" %in% names(options)) {
ndir <- options[["ndir"]]
} else {
ndir <- NULL
}
if ("stand" %in% names(options)) {
stand <- options[["stand"]]
} else {
stand <- "MedMad"
}
if ("centered" %in% names(options)) {
centered <- options[["centered"]]
} else {
centered <- FALSE
}
if ("h" %in% names(options)) {
h <- options[["h"]]
} else {
h <- NULL
}
if ("seed" %in% names(options)) {
seed <- options[["seed"]]
} else {
seed <- NULL
}
type.id <- match(type, c("Affine", "Rotation", "Shift"))[1]
if (is.na(type.id)) {
stop("The input parameter type must be one of: Affine, Rotation or Shift.")
}
if (is.null(ndir)) {
if (type.id == 1) ndir <- 250 * p1
if (type.id == 2) ndir <- 250 * 20
if (type.id == 3) ndir <- 250 * 50
}
calc.all <- 0
if (is.numeric(ndir)) {
if (ndir < 1) {
stop("The number of directions must be a positive integer.")
}
if (type.id == 1) {
ndir0 <- choose(n1, p1)
if (ndir0 <= ndir) {
ndir <- ndir0
calc.all <- 1
}
}
if (type.id == 2) {
ndir0 <- choose(n1, 2)
if (ndir0 <= ndir) {
ndir <- ndir0
calc.all <- 1
}
}
}
if (!is.numeric(ndir)) {
if (ndir == "all") {
if (type.id == 1) {
ndir <- choose(n1, p1)
calc.all <- 1
if (ndir > 1e7) {
stop("ndir is larger than 1e7. Try a smaller value of ndir.")
}
}
if (type.id == 2) {
ndir <- choose(n1, 2)
calc.all <- 1
if (ndir > 1e7) {
stop("ndir is larger than 1e7. Try a smaller value of ndir.")
}
}
if (type.id == 3) {
stop("Cannot compute all directions for type Shift.")
}
} else stop("The input parameter ndir is not recognized.")
}
if (p1 == 1) ndir <- 1
if (n1 < (p1 + 1) & type.id == 1) {
stop("When type is affine, n should be larger than p.")
}
scale.id <- match(stand, c("MedMad", "unimcd"))[1]
if (is.na(scale.id)) {
stop("The input parameter stand should be either MedMad or unimcd.")
}
if (FALSE == (centered %in% c(FALSE, TRUE))) {
stop("The input parameter centered should be TRUE or FALSE.")
}
centered <- !centered
if (is.null(h)) {
h <- floor(n1 / 2) + 1
}
if (!is.numeric(h)) {
stop("The input parameter h should be numeric.")
}
if (h < (floor(n1 / 2) + 1) | h > n1) {
stop("The input parameter h should lie between [(n/2)]+1 and n.")
}
if (is.null(seed)) {
seed <- 10
}
if (!is.numeric(seed)) {
stop("The seed must be a strictly positive integer.")
}
if (seed <= 0) {
stop("The seed must be a strictly positive integer.")
}
tol <- 1e-7
w1 <- try(svd(scale(x) / sqrt(n1 - 1)), silent = TRUE)
if (!is.list(w1)) {
warning("The singular-value decomposition of the data matrix x
could not be computed.")
returned.result <- list(outlyingnessX = NULL,
outlyingnessZ = NULL,
cutoff = NULL,
flagX = NULL,
flagZ = NULL,
singularSubsets = NULL,
dimension = NULL,
hyperplane = NULL,
inSubspace = NULL)
class(returned.result) <- c("mrfDepth", "outlyingness")
return(returned.result)
}
if (min(w1$d) < tol) {
warning("An exact fit was found. Check the output for more details.")
returned.result <- list(outlyingnessX = NULL,
outlyingnessZ = NULL,
cutoff = NULL,
flagX = NULL,
flagZ = NULL,
singularSubsets = NULL,
dimension = sum(w1$d > tol),
hyperplane = w1$v[, which(w1$d == min(w1$d))[1]],
inSubspace = NULL)
class(returned.result) <- c("mrfDepth", "outlyingness")
return(returned.result)
}
x <- rbind(x, z)
n <- nrow(x)
Factor <- ifelse(h < n1, qchisq(h / n1, df = 1), 1)
Result <- .C("projoutlyingness",
as.integer(n),
as.integer(p1),
as.integer(ndir),
as.double(x),
as.double(rep(0, n)),
as.integer(0),
as.integer(type.id),
as.integer(n1),
as.integer(scale.id),
as.integer(h),
as.integer(centered),
as.double(Factor),
as.integer(calc.all),
as.double(rep(0, p1)),
as.integer(seed),
PACKAGE = "mrfDepth")
Outlyingness <- Result[[5]]
LO <- log(0.1 + Outlyingness[1:n1])
cutoff <- exp(median(LO) + mad(LO) * qnorm(0.995)) - 0.1
flag.x <- (Outlyingness[1:n1] <= cutoff)
flag.z <- (Outlyingness[(n1 + 1):(n1 + n2)] <= cutoff)
if (sum(abs(Result[[14]])) > tol) {
warning("A direction was found for which the robust scale equals zero.
See the help page for more details.", call. = FALSE)
returned.result <- list(outlyingnessX = NULL,
outlyingnessZ = NULL,
cutoff = NULL,
flagX = NULL,
flagZ = NULL,
singularSubsets = NULL,
dimension = NULL,
hyperplane = Result[[14]],
inSubspace = as.logical(Outlyingness))
class(returned.result) <- c("mrfDepth", "outlyingness")
return(returned.result)
}
returned.result <- list(outlyingnessX = Outlyingness[1:n1],
outlyingnessZ = Outlyingness[(n1 + 1):(n1 + n2)],
cutoff = cutoff,
flagX = flag.x,
flagZ = flag.z,
singularSubsets = Result[[6]],
dimension = NULL,
hyperplane = NULL,
inSubspace = NULL)
class(returned.result) <- c("mrfDepth", "outlyingness")
return(returned.result)
}
|
NULL
scry_sets_impl <- function(endpoint) {
scryfall(paste0("/sets", endpoint), parse_sets)
}
scry_sets <- function() {
scry_sets_impl("/")
}
scry_set <- function(id, source = "code") {
source <- if (source == "tcgplayer") paste0("/", source) else ""
scry_sets_impl(paste0(source, "/", id))
}
|
rescaleRR<-function(tree,RR){
tree->tree1
abs(RR$rates[,1])->rts
sum(tree1$edge.length)->t1ele
rts[-1]->rts
names(rts)[Nnode(tree1):length(rts)]<-match(names(rts)[Nnode(tree1):length(rts)],tree$tip.label)
rts[match(tree1$edge[,2],names(rts))]->rts
tree1$edge.length*rts->tree1$edge.length
t1ele/sum(tree1$edge.length)*tree1$edge.length->tree1$edge.length
return(tree1)
}
|
extdim_to_geotransform <- function(ext, dim) {
c(xmin = raster::xmin(ext),
xres = (raster::xmax(ext) - raster::xmin(ext))/ dim[2],
xsh = 0,
ymax = raster::ymax(ext),
ysh = 0,
yres = -(raster::ymax(ext) - raster::ymin(ext))/ dim[1])
}
lazywarp <- function(gdalsource, target, band = 1L, a_srs = NULL) {
stop("not implemented")
}
|
par.secr.fit <- function (arglist, ncores = 1, seed = NULL,
trace = TRUE, logfile = "logfile.txt", prefix = "fit.",
LB = FALSE, save.intermediate = FALSE) {
ptm <- proc.time()
if (is.character(arglist))
arglist <- mget(arglist, inherits = TRUE)
arglist <- lapply(arglist, function (x) {x$trace <- trace; x})
if (is.null(names(arglist)))
names(arglist) <- paste0("arg", 1:length(arglist))
getnames <- function(obj = 'capthist') {
tmpnames <- sapply(arglist, function(x) if (is.character(x[[obj]])) x[[obj]] else '')
unique(tmpnames)
}
data <- c(getnames('capthist'), getnames('mask'),getnames('dframe'),getnames('details'))
data <- data[nchar(data)>0]
arglist <- lapply(arglist, function (x) {
if (is.null(x$details))
x$details <- list(savecall = FALSE)
else if (!('savecall' %in% names(x$details))) {
x$details[['savecall']] <- FALSE
}
x
})
run.fit <- function(x) {
fitname <- attr(x,'name')
fit <- do.call("secr.fit", x)
if (save.intermediate){
assign(fitname, fit)
save(list = fitname, file = paste0(fitname, ".RData"))
}
fit
}
nameattr <- function (x,n) {attr(x,'name') <- n; x}
arglist <- mapply(nameattr, arglist, names(arglist), SIMPLIFY = FALSE)
if (ncores > 1) {
clust <- makeCluster(ncores, methods = FALSE, useXDR = .Platform$endian=='big',
outfile = logfile)
clusterSetRNGStream(clust, seed)
clusterExport(clust, c(data, 'secr.fit'), environment())
if (LB)
output <- clusterApplyLB(clust, arglist, run.fit)
else
output <- clusterApply(clust, arglist, run.fit)
stopCluster(clust)
}
else {
set.seed (seed)
output <- lapply(arglist, run.fit)
}
message(paste('Completed in ', round((proc.time() - ptm)[3]/60,3), ' minutes at ',
format(Sys.time(), "%H:%M:%S %d %b %Y"), sep=''))
if (inherits(output[[1]], 'secr'))
output <- secrlist(output)
names(output) <- paste0(prefix, names(arglist))
output
}
par.derived <- function (secrlist, ncores = 1, ...) {
if (!inherits(secrlist, 'secrlist'))
stop("requires secrlist input")
if (ncores > 1) {
clust <- makeCluster(ncores, methods = FALSE, useXDR = .Platform$endian=='big')
output <- parLapply(clust, secrlist, derived, ...)
stopCluster(clust)
}
else {
output <- lapply(secrlist, derived, ...)
}
names(output) <- names(secrlist)
output
}
par.region.N <- function (secrlist, ncores = 1, ...) {
if (!inherits(secrlist, 'secrlist'))
stop("requires secrlist input")
if (ncores > 1) {
clust <- makeCluster(ncores, methods = FALSE, useXDR = .Platform$endian=='big')
output <- parLapply(clust, secrlist, region.N, ...)
stopCluster(clust)
}
else {
output <- lapply(secrlist, region.N, ...)
}
names(output) <- names(secrlist)
output
}
|
test_that("`plot.see_estimate_contrasts()` works", {
if (require("modelbased") && require("rstanarm")) {
model <- stan_glm(Sepal.Width ~ Species, data = iris, refresh = 0)
contrasts <- estimate_contrasts(model)
means <- estimate_means(model)
expect_s3_class(plot(contrasts, means), "gg")
}
})
|
illustrateLLN <- function(Distr = Norm(),
n = c(1,3,5,10,25,50,100,500,1000,10000),
m = 50, step = 1, sleep = 0,
withConf = TRUE, withCover = (length(n)<=12),
withEline = TRUE, withLegend = TRUE,
CLTorCheb = "CLT", coverage = 0.95, ...,
col.Eline = "blue", lwd.Eline = par("lwd"),
lty.Eline = par("lty"),
col.Conf = "red", lwd.Conf = par("lwd"),
lty.Conf = 2, cex.Cover = 0.7, cex.legend = 0.8){
Distrc <- match.call(call = sys.call(sys.parent(1)))$Distr
if(is.null(Distrc)) Distrc <- "Norm()"
else Distrc <- as.character(deparse(Distrc))
if(!is.numeric(coverage) || length(coverage)>1 || any(coverage <= 0)
|| any(coverage >= 1) )
stop("Argument 'coverage' must be a single number in (0,1)")
dots <- match.call(call = sys.call(sys.parent(1)),
expand.dots = FALSE)$"..."
dots.for.lines <- dots[! names(dots) %in% c("col", "lwd", "lty")]
dots.for.matplot <- dots.for.lines[! names(dots.for.lines) %in%
c("ylim", "xlim", "pch", "xlab", "ylab", "axes",
"main")]
dots.for.legend <- dots.for.lines[! names(dots.for.lines) %in%
c("legend", "main", "cex", "sub")]
dots.for.text <- dots[! names(dots) %in% c("cex", "main", "sub")]
confType <- pmatch(CLTorCheb, c("CLT","Chebyshev"), nomatch = 1)
col <- if (!hasArg(col)) par("col") else dots$col
if (hasArg(col) && missing(col.Eline))
col.Eline <- col
if (hasArg(col) && missing(col.Conf))
col.Conf <- col
lwd <- if (!hasArg(lwd)) par("lwd") else dots$lwd
if (hasArg(lwd) && missing(lwd.Eline))
lwd.Eline <- lwd
if (hasArg(lwd) && missing(lwd.Conf))
lwd.Conf <- lwd
lty <- if (!hasArg(lty)) 1 else dots$lty
if (hasArg(lty) && missing(lty.Eline))
lty.Eline <- lty
if (hasArg(lty) && missing(lty.Conf))
lty.Conf <- lty
cex <- if (!hasArg(cex)) 1 else dots$cex
if (hasArg(cex) && missing(cex.Cover))
cex.Cover <- cex
if (hasArg(cex) && missing(cex.legend))
cex.legend <- cex
if (hasArg(lty) && missing(lty.Eline))
lty.Eline <- lty
if (hasArg(lty) && missing(lty.Conf))
lty.Conf <- lty
pch <- if (!hasArg(pch)) 16 else dots$pch
facConf <- switch(confType, qnorm((1+coverage)/2), (1-coverage)^(-.5))
facName <- switch(confType, "CLT", "Chebyshev")
legend.txt <- if (hasArg(legend)) dots$legend else
gettextf("%s-based %3.0f%% (pointwise) confidence interval",
facName, round(100*coverage,0))
da <- matrix(NA,m,length(n))
omar <- par(no.readonly = TRUE)
on.exit(par(omar))
slots <- slotNames(param(Distr))
slots <- slots[slots != "name"]
nrvalues <- length(slots)
if(nrvalues > 0){
values <- numeric(nrvalues)
for(i in 1:nrvalues)
values[i] <- attributes(attributes(Distr)$param)[[slots[i]]]
paramstring <- paste(values, collapse = ", ")
nparamstring <- paste(slots, "=", values, collapse = ", ")
qparamstring <- paste("(",paramstring,")",sep="")
}
else paramstring <- qparamstring <- nparamstring <- ""
.mpresubs <- function(inx)
.presubs(inx, c("%C", "%D", "%N", "%P", "%Q", "%A",
"%X"),
list(as.character(class(Distr)[1]),
as.character(date()),
nparamstring,
paramstring,
qparamstring,
Distrc,
expression(~~~~~bar(X)[n]==~~sum(X[i],i==1,n)/n)))
xlab <- if (!hasArg(xlab)) gettext("Sample size n") else dots$xlab
xlab <- .mpresubs(xlab)
ylab <- if (!hasArg(ylab)) "Realisations of %X" else dots$ylab
ylab <- .mpresubs(ylab)
tit <- c("LLN: Convergence against E[X] -- case %C%Q",
"called with %A")
if ( is.na (E(Distr)))
tit <- c("LLN: Non-Convergence against E[X] -- case %C%Q",
"called with %A")
main <- if ( hasArg(main)) dots$main else tit
main <- .mpresubs(main)
sub <- if ( hasArg(sub)) dots$sub else ""
sub <- .mpresubs(sub)
LLNin <- function(x, n, from, to = from){
for(i in seq(length(n)))
da[from:to,i] <<-
rowMeans(matrix(r(x)(n[i]*(to-from+1)),(to-from+1),n[i]))
}
mE <- if(!is.na(E(Distr))) E(Distr) else median(Distr)
msd <- if(!is.na(sd(Distr))) sd(Distr) else 2 * mad(Distr)
Ns <- seq(length(n))
nn <- if(is(Distr,"Cauchy")) n*0+1 else n
for(j in seq(1, m, by = step))
{LLNin(Distr, n, j, j+step-1)
do.call(matplot, args = c(list(Ns, t(da), pch = pch, col=col,
axes = FALSE, ylim = q.l(Distr)(c(0.02,0.98)),
xlab = xlab, ylab = "", main = main), dots.for.matplot ))
title(ylab = ylab, line = 1.7)
axis(1, at = Ns, labels = n)
axis(2)
if (withEline)
do.call( abline, args= c(list(h = mE, col = col.Eline, lwd = lwd.Eline,
lty = lty.Eline), dots.for.lines))
if (withConf)
do.call( matlines, args = c(list(
Ns, mE + facConf * msd/sqrt(nn)%o%c(-1,1),
lty = lty.Conf, col = col.Conf, lwd = lwd.Conf
), dots.for.lines))
coverage <- colMeans( t(t(da) <= (mE+facConf*msd*1/sqrt(nn)) &
t(da) >= (mE-facConf*msd*1/sqrt(nn))) ,
na.rm= TRUE )
if (withCover && withConf)
do.call(mtext, args = c(list(at = c(0,Ns),
c(gettext("coverage"),sprintf("%3.2f",round(coverage,2))),
cex = cex.Cover), dots.for.text))
if (withLegend && withConf)
do.call(legend, args = c(list("bottomright",
legend = legend.txt, cex = cex.legend,
col = col.Conf, lwd = lwd.Conf, lty = lty.Conf),
dots.for.legend))
Sys.sleep(sleep)
}
}
.presubs <- function(inp, frompat, topat){
logic <- FALSE
inCx <- sapply(inp,
function(inpx){
inC <- deparse(inpx)
l <- length(frompat)
for(i in 1:l)
{ if (is.language(topat[[i]])){
totxt <- deparse(topat[[i]])
totxt <- gsub("expression\\(", "\", ", gsub("\\)$",", \"",totxt))
if (length(grep(frompat[i],inC))) logic <<- TRUE
inC <- gsub(frompat[i],totxt,inC)
}else inC <- gsub(frompat[i], topat[[i]], inC)
}
return(inC)
})
if(length(grep("expression",inCx))>0)
inCx <- gsub("expression\\(", "", gsub("\\)$","",inCx))
if (length(inCx) > 1) {
inCx <- paste(inCx, c(rep(",", length(inCx)-1), ""),
sep = "", collapse = "\"\\n\",")
if ( any(as.logical(c(lapply(inp,is.language)))) | logic )
inCx <- paste("expression(paste(", gsub("\\\\n"," ", inCx), "))", sep ="")
else
inCx <- paste("paste(",inCx,")", sep ="")
}else inCx <- paste("expression(paste(",inCx,"))",sep="")
outC <- eval(parse(text = eval(inCx)))
return(outC)
}
|
x <- rnorm(1e3)
X <- matrix(x, ncol = 10)
a1 <- array(rnorm(400), dim = c(100, 2, 2))
a2 <- array(rnorm(800), dim = c(100, 2, 2, 2))
dr <- list(sample(1:length(x), 1),
sample(1:length(x), 10),
sample(1:nrow(X), 1),
sample(1:nrow(X), 10),
11:20,
21:30,
31:40)
tmp <- list(extract_draws_from_array(x, dr[[1]]),
extract_draws_from_array(x, dr[[2]]),
extract_draws_from_array(X, dr[[3]]),
extract_draws_from_array(X, dr[[4]]),
extract_draws(list(x = x, X = X), dr[[5]]),
extract_draws_from_array(a1, dr[[6]]),
extract_draws_from_array(a2, dr[[7]]))
sol_nrow <- c(length(dr[[1]]),
length(dr[[2]]),
length(dr[[3]]) * ncol(X),
length(dr[[4]]) * ncol(X),
length(dr[[5]]) * (1 + ncol(X)),
length(dr[[6]]) * 2 * 2,
length(dr[[7]]) * 2 * 2 * 2)
test_that("extract_draws() (and related) returns dataframe of correct size", {
for (i in 1:length(tmp)) {
expect_true(is.data.frame(tmp[[1]]))
expect_true(all(colnames(tmp[[i]]) %in% c("Draw", "Index", "Value", "Parameter")))
expect_equal(nrow(tmp[[i]]), sol_nrow[i])
}
})
test_that("extract_draws() (and related) returns correct draws", {
for (i in 1:length(tmp)) {
expect_equal(sort(unique(tmp[[i]][["Draw"]])), sort(dr[[i]]))
expect_true(!any(is.na(tmp[[i]][["Value"]])))
}
})
test_that("extract_draws() (and related) identify incorrect inputs", {
expect_error(extract_draws_from_array(list(1, 2), 1))
expect_error(extract_draws_from_array(rnorm(1e2), 0))
expect_error(extract_draws_from_array(rnorm(1e2), 1e4))
expect_error(extract_draws(data.frame(rnorm(1e2)), 1))
})
|
T2funcrep <-
function(X,n,m,p,r1,r2,r3,start,conv,model,A,B,C,H){
X=as.matrix(X)
if (model==1){
C=diag(r3)
}
if (model==2){
B=diag(r2)
}
if (model==3){
A=diag(r1)
}
cputime=system.time({
ss=sum(X^2)
dys=0
if (start==0){
if (model!=3){
EIG=eigen(X%*%t(X))
A=EIG$vectors[,1:r1]
}
Z=permnew(X,n,m,p)
if (model!=2){
EIG=eigen(Z%*%t(Z))
B=EIG$vectors[,1:r2]
}
Z=permnew(Z,m,p,n)
if (model!=1){
EIG=eigen(Z%*%t(Z))
C=EIG$vectors[,1:r3]
}
}
if (start==1){
if (model!=3){
if (n>=r1){
A=orth(matrix(runif(n*r1,0,1),n,r1)-.5)
} else{
A=orth(matrix(runif(r1*r1,0,1),r1,r1)-.5)
A=A[1:n,]
}
}
if (model!=2){
if (m>=r2){
B=orth(matrix(runif(m*r2,0,1),m,r2)-.5)
} else{
B=orth(matrix(runif(r2*r2,0,1),r2,r2)-.5)
B=B[1:m,]
}
}
if (model!=1){
if (p>=r3){
C=orth(matrix(runif(p*r3,0,1),p,r3)-.5)
} else{
C=orth(matrix(runif(r3*r3,0,1),r3,r3)-.5)
C=C[1:p,]
}
}
}
if (start!=2){
Z=permnew(t(A)%*%X,r1,m,p)
Z=permnew(t(B)%*%Z,r2,p,r1)
H=permnew(t(C)%*%Z,r3,r1,r2)
}
if (start==2){
Z=B%*%permnew(A%*%H,n,r2,r3)
Z=C%*%permnew(Z,m,r3,n)
Z=permnew(Z,p,n,m)
f=sum((X-Z)^2)
} else{
f=ss-sum(H^2)
}
iter=0
fold=f+2*conv*f
while (fold-f>f*conv){
iter=iter+1
fold=f
if (model!=3){
Z=permnew(X,n,m,p)
Z=permnew(t(B)%*%Z,r2,p,n)
Z=permnew(t(C)%*%Z,r3,n,r2)
A=qr.Q(qr(Z%*%(t(Z)%*%A)),complete=FALSE)
}
if (model!=2){
Z=permnew(X,n,m,p)
Z=permnew(Z,m,p,n)
Z=permnew(t(C)%*%Z,r3,n,m)
Z=permnew(t(A)%*%Z,r1,m,r3)
B=qr.Q(qr(Z%*%(t(Z)%*%B)),complete=FALSE)
}
if (model!=1){
Z=permnew(t(A)%*%X,r1,m,p)
Z=permnew(t(B)%*%Z,r2,p,r1)
C=qr.Q(qr(Z%*%(t(Z)%*%C)),complete=FALSE)
}
Z=permnew(t(A)%*%X,r1,m,p)
Z=permnew(t(B)%*%Z,r2,p,r1)
H=permnew(t(C)%*%Z,r3,r1,r2)
f=ss-sum(H^2)
}
})
ss=sum(X^2)
fp=100*(ss-f)/ss
La=H%*%t(H)
Y=permnew(H,r1,r2,r3)
Lb=Y%*%t(Y)
Y=permnew(Y,r2,r3,r1)
Lc=Y%*%t(Y)
out=list()
out$A=A
out$B=B
out$C=C
out$H=H
out$f=f
out$fp=fp
out$iter=iter
out$cputime=cputime[1]
out$La=La
out$Lb=Lb
out$Lc=Lc
return(out)
}
|
RMSEP <- function(object, ...) UseMethod("RMSEP")
RMSEP.default <- function(object, ...)
{
stop("No default method for \"RMSEP\"")
}
RMSEP.mat <- function(object, k, weighted = FALSE, ...) {
if(!inherits(object, "mat"))
stop("'object' is not of class \"mat\".")
if(missing(k))
k <- getK(object)
if(weighted)
rmsep <- object$standard$rmsep[k]
else
rmsep <- object$weighted$rmsep[k]
return(rmsep)
}
RMSEP.bootstrap.mat <- function(object, type = c("birks1990", "standard"), ...) {
if(!inherits(object, "bootstrap.mat"))
stop("'object' is not of class \"bootstrap.mat\".")
if(missing(type))
type <- "birks1990"
type <- match.arg(type)
if(type == "birks1990")
rmsep <- object$bootstrap$rmsep[getK(object)]
else
rmsep <- sqrt(mean(object$bootstrap$residuals[, getK(object)]^2))
return(rmsep)
}
|
lag <- function(vector, lag=1)
{
if(lag<0)
stop("Lag needs to be non-negative")
return(c(rep(NA,lag),vector[1:(length(vector)-lag)]))
}
constant <- function(stateVariable, theta,...)
{
return(min(1,max(theta,0)))
}
logistic_linear <- function(stateVariable, theta,...)
{
if(length(theta)!=2){stop("Wrong dimension of parameter theta for logistic model")}
return(boot::inv.logit(stateVariable*theta[2]+theta[1]))
}
probit_linear <- function(stateVariable, theta,...)
{
if(length(theta)!=2){stop("Wrong dimension of parameter theta for probit linear model")}
return(stats::pnorm(stateVariable*theta[2]+theta[1]))
}
probit_break <- function(stateVariable, theta,...)
{
if(length(theta)!=2){stop("Wrong dimension of parameter theta for probit break model")}
return(stats::pnorm((stateVariable>0)*theta[2]+(stateVariable <= 0)*theta[1]))
}
probit_spline3 <- function(stateVariable, theta,...)
{
if(length(theta)!=4){stop("Wrong dimension of parameter theta for cubic probit model")}
return(stats::pnorm(stateVariable^3*theta[4]+stateVariable^2*theta[3]+stateVariable*theta[2]+theta[1]))
}
probit_spline2 <- function(stateVariable, theta,...)
{
if(length(theta)!=3){stop("Wrong dimension of parameter theta for quadratic probit model")}
return(stats::pnorm(stateVariable^2*theta[3]+stateVariable*theta[2]+theta[1]))
}
estimate.functional <- function(iden.fct = quantiles,
model = constant,
theta0 = NULL,
Y, X,
stateVariable=NULL,
other_data = NULL,
instruments = c("X","lag(Y)"),
prewhite = F,
kernel="Bartlett",
bw = bwNeweyWest1987,
...)
{
if(class(instruments)=='character')
{
w <- rep(1,length(Y))
if(!(length(instruments)==1 & grepl("const",instruments[1])))
{
if(is.null(other_data))
other_data <- data.frame(X=X,Y=Y)
else
other_data <- cbind(data.frame(X=X,Y=Y),other_data)
for(inst_cur in instruments)
{
if(grepl("Y",inst_cur) & !grepl("lag",inst_cur))
warning("Y without lags is not a valid instrument as it is not in the information set of the forecaster.")
w <- cbind(w,eval(parse(text=inst_cur),other_data))
}
compl <- complete.cases(w)
message(paste("Drop ", length(Y)-sum(compl), "case(s) because of chosen instruments"))
w <- w[compl,]
Y <- Y[compl]
X <- X[compl]
stateVariable <- stateVariable[compl]
}
} else if(class(instruments)%in% c("vector","matrix"))
{
w <- instruments
} else
{
stop("instruments has to be of class matrix, vector or character")
}
V <- function(theta,x,y,stateVariable,...)
{
return(iden.fct(x=x,y=y,stateVariable=stateVariable, theta=theta,model=model))
}
if(is.matrix(w))
{
if(dim(w)[1]!=length(Y) | dim(w)[1]!=length(X)){stop('Wrong dimensions')}
if(dim(w)[1]<length(theta0)){stop('Not enough moment conditions')}
if(qr(w)$rank!=ncol(w))
stop("Matrix of instruments does not have full rank. Choice of instruments may be invalid.")
} else
{
if(length(theta0)>1)
stop("Not enough moment conditions")
}
if(is.null(theta0))
{
message("Choose parameter theta0 automatically.")
if(sum(sapply(c(constant), identical, model))>0)
{theta0 <- 0.5}
else {if(sum(sapply(c(probit_spline2), identical, model))>0)
{theta0 <- rep(0,3)}
else {if(sum(sapply(c(probit_spline3), identical, model))>0)
{theta0 <- rep(0,4)}
else {if(sum(sapply(c(probit_linear,logistic_linear), identical, model))>0)
{theta0 <- c(0,0)} else {stop("Model unknown, specify theta0.")}}}}
}
if (length(theta0)>1){optfct <- 'optim'} else { optfct <- 'nlminb'}
stateV.cur <- if(is.null(stateVariable)) rep(0,length(X)) else stateVariable
if(is.null(dim(stateV.cur)))
model.dim <- 1
else
model.dim<-dim(stateV.cur)[2]
g <- function(theta, m_data,...)
{
x <- m_data[,1]
y <- m_data[,2]
z <- m_data[,3:(ncol(m_data)-model.dim)]
stateVariable <- m_data[,(ncol(m_data)-model.dim+1):ncol(m_data)]
diag(as.vector(V(theta=theta,x=x,y=y,stateVariable=stateVariable,...)))%*%cbind(z)
}
matrix_data <-cbind(X, Y, w, stateV.cur)
res <- gmm::gmm(g,
x = matrix_data,
t0 = theta0,
optfct = optfct,
prewhite = prewhite,
kernel = kernel,
bw = bw,
...)
return(structure(list(
gmm = res,
iden.fct = iden.fct,
model = model,
instruments = instruments,
stateVariable = stateVariable,
V=V,
call=match.call()
),class="pointfore"))
}
quantiles<- function(x,y,stateVariable,theta,model,...)
{
(y<=x)-model(stateVariable=stateVariable, theta=theta)
}
expectiles<- function(x,y,stateVariable,theta,model,...)
{
abs((y<=x)-model(stateVariable=stateVariable, theta=theta))*(x-y)
}
summary.pointfore <- function(object,...)
{
gmm.sum <- summary(object$gmm)
list(call=object$call,
coefficients=gmm.sum$coefficients,
Jtest = gmm.sum$stest)
}
bwNeweyWest1987 <- function(x,...) {
sandwich::bwNeweyWest(x,lag=nrow(gmm::estfun.gmmFct(x))^(0.2),...)
}
|
tbats <- function(y, use.box.cox=NULL, use.trend=NULL, use.damped.trend=NULL,
seasonal.periods=NULL, use.arma.errors=TRUE, use.parallel=length(y) > 1000, num.cores=2,
bc.lower=0, bc.upper=1, biasadj=FALSE, model=NULL, ...) {
if (!is.numeric(y) || NCOL(y) > 1) {
stop("y should be a univariate time series")
}
seriesname <- deparse(substitute(y))
origy <- y
attr_y <- attributes(origy)
if (is.null(seasonal.periods)) {
if (any(class(y) == "msts")) {
seasonal.periods <- sort(attr(y, "msts"))
} else if (class(y) == "ts") {
seasonal.periods <- frequency(y)
} else {
y <- as.ts(y)
seasonal.periods <- 1
}
}
else {
if (!any(class(y) == "ts")) {
y <- msts(y, seasonal.periods)
}
}
seasonal.periods <- unique(pmax(seasonal.periods, 1))
if (all(seasonal.periods == 1)) {
seasonal.periods <- NULL
}
ny <- length(y)
y <- na.contiguous(y)
if (ny != length(y)) {
warning("Missing values encountered. Using longest contiguous portion of time series")
if (!is.null(attr_y$tsp)) {
attr_y$tsp[1:2] <- range(time(y))
}
}
if (!is.null(model)) {
if (is.element("tbats", class(model))) {
refitModel <- try(fitPreviousTBATSModel(y, model = model), silent = TRUE)
} else if (is.element("bats", class(model))) {
refitModel <- bats(origy, model = model)
}
return(refitModel)
}
if (is.constant(y)) {
fit <- list(
y = y, x = matrix(y, nrow = 1, ncol = ny), errors = y * 0, fitted.values = y, seed.states = matrix(y[1]),
AIC = -Inf, likelihood = -Inf, variance = 0, alpha = 0.9999, method = "TBATS", call = match.call()
)
return(structure(fit, class = "bats"))
}
if (any((y <= 0))) {
use.box.cox <- FALSE
}
non.seasonal.model <- bats(
as.numeric(y), use.box.cox = use.box.cox, use.trend = use.trend,
use.damped.trend = use.damped.trend, use.arma.errors = use.arma.errors,
use.parallel = use.parallel, num.cores = num.cores,
bc.lower = bc.lower, bc.upper = bc.upper, biasadj = biasadj, ...
)
if (is.null(seasonal.periods)) {
non.seasonal.model$call <- match.call()
attributes(non.seasonal.model$fitted.values) <- attributes(non.seasonal.model$errors) <- attributes(origy)
non.seasonal.model$y <- origy
return(non.seasonal.model)
}
else {
seasonal.mask <- (seasonal.periods == 1)
seasonal.periods <- seasonal.periods[!seasonal.mask]
}
if (is.null(use.box.cox)) {
use.box.cox <- c(FALSE, TRUE)
}
if (any(use.box.cox)) {
init.box.cox <- BoxCox.lambda(y, lower = bc.lower, upper = bc.upper)
} else {
init.box.cox <- NULL
}
if (is.null(use.trend)) {
use.trend <- c(FALSE, TRUE)
} else if (use.trend == FALSE) {
use.damped.trend <- FALSE
}
if (is.null(use.damped.trend)) {
use.damped.trend <- c(FALSE, TRUE)
}
model.params <- logical(length = 3)
model.params[1] <- any(use.box.cox)
model.params[2] <- any(use.trend)
model.params[3] <- any(use.damped.trend)
y <- as.numeric(y)
n <- length(y)
k.vector <- rep(1, length(seasonal.periods))
if (use.parallel) {
if (is.null(num.cores)) {
num.cores <- detectCores(all.tests = FALSE, logical = TRUE)
}
clus <- makeCluster(num.cores)
}
best.model <- try(fitSpecificTBATS(
y, use.box.cox = model.params[1], use.beta = model.params[2],
use.damping = model.params[3], seasonal.periods = seasonal.periods,
k.vector = k.vector, init.box.cox = init.box.cox,
bc.lower = bc.lower, bc.upper = bc.upper, biasadj = biasadj
), silent = TRUE)
if (is.element("try-error", class(best.model))) {
best.model <- list(AIC = Inf)
}
for (i in 1:length(seasonal.periods)) {
if (seasonal.periods[i] == 2) {
next
}
max.k <- floor(((seasonal.periods[i] - 1) / 2))
if (i != 1) {
current.k <- 2
while (current.k <= max.k) {
if (seasonal.periods[i] %% current.k != 0) {
current.k <- current.k + 1
next
}
latter <- seasonal.periods[i] / current.k
if (any(((seasonal.periods[1:(i - 1)] %% latter) == 0))) {
max.k <- current.k - 1
break
} else {
current.k <- current.k + 1
}
}
}
if (max.k == 1) {
next
}
if (max.k <= 6) {
k.vector[i] <- max.k
best.model$AIC <- Inf
repeat {
new.model <- try(
fitSpecificTBATS(
y, use.box.cox = model.params[1], use.beta = model.params[2],
use.damping = model.params[3], seasonal.periods = seasonal.periods,
k.vector = k.vector, init.box.cox = init.box.cox,
bc.lower = bc.lower, bc.upper = bc.upper, biasadj = biasadj
),
silent = TRUE
)
if (is.element("try-error", class(new.model))) {
new.model <- list(AIC = Inf)
}
if (new.model$AIC > best.model$AIC) {
k.vector[i] <- k.vector[i] + 1
break
} else {
if (k.vector[i] == 1) {
break
}
k.vector[i] <- k.vector[i] - 1
best.model <- new.model
}
}
next
} else {
step.up.k <- k.vector
step.down.k <- k.vector
step.up.k[i] <- 7
step.down.k[i] <- 5
k.vector[i] <- 6
if (use.parallel) {
k.control.array <- rbind(step.up.k, step.down.k, k.vector)
models.list <- clusterApplyLB(
clus, c(1:3), parFitSpecificTBATS, y = y,
box.cox = model.params[1], trend = model.params[2],
damping = model.params[3], seasonal.periods = seasonal.periods,
k.control.matrix = k.control.array, init.box.cox = init.box.cox,
bc.lower = bc.lower, bc.upper = bc.upper, biasadj = biasadj
)
up.model <- models.list[[1]]
level.model <- models.list[[3]]
down.model <- models.list[[2]]
} else {
up.model <- try(
fitSpecificTBATS(
y, use.box.cox = model.params[1],
use.beta = model.params[2], use.damping = model.params[3],
seasonal.periods = seasonal.periods, k.vector = step.up.k,
init.box.cox = init.box.cox, bc.lower = bc.lower, bc.upper = bc.upper, biasadj = biasadj
),
silent = TRUE
)
if (is.element("try-error", class(up.model))) {
up.model <- list(AIC = Inf)
}
level.model <- try(
fitSpecificTBATS(
y, use.box.cox = model.params[1], use.beta = model.params[2],
use.damping = model.params[3], seasonal.periods = seasonal.periods,
k.vector = k.vector, init.box.cox = init.box.cox, bc.lower = bc.lower, bc.upper = bc.upper, biasadj = biasadj
),
silent = TRUE
)
if (is.element("try-error", class(level.model))) {
level.model <- list(AIC = Inf)
}
down.model <- try(
fitSpecificTBATS(
y, use.box.cox = model.params[1], use.beta = model.params[2],
use.damping = model.params[3], seasonal.periods = seasonal.periods,
k.vector = step.down.k, init.box.cox = init.box.cox, bc.lower = bc.lower, bc.upper = bc.upper, biasadj = biasadj
),
silent = TRUE
)
if (is.element("try-error", class(down.model))) {
down.model <- list(AIC = Inf)
}
}
aic.vector <- c(up.model$AIC, level.model$AIC, down.model$AIC)
if (min(aic.vector) == down.model$AIC) {
best.model <- down.model
k.vector[i] <- 5
repeat{
k.vector[i] <- k.vector[i] - 1
down.model <- try(
fitSpecificTBATS(
y = y, use.box.cox = model.params[1], use.beta = model.params[2],
use.damping = model.params[3], seasonal.periods = seasonal.periods,
k.vector = k.vector, init.box.cox = init.box.cox, bc.lower = bc.lower, bc.upper = bc.upper, biasadj = biasadj
),
silent = TRUE
)
if (is.element("try-error", class(down.model))) {
down.model <- list(AIC = Inf)
}
if (down.model$AIC > best.model$AIC) {
k.vector[i] <- k.vector[i] + 1
break
} else {
best.model <- down.model
}
if (k.vector[i] == 1) {
break
}
}
} else if (min(aic.vector) == level.model$AIC) {
best.model <- level.model
next
} else {
best.model <- up.model
k.vector[i] <- 7
repeat {
k.vector[i] <- k.vector[i] + 1
up.model <- try(
fitSpecificTBATS(y, model.params[1], model.params[2], model.params[3], seasonal.periods, k.vector, init.box.cox = init.box.cox, bc.lower = bc.lower, bc.upper = bc.upper, biasadj = biasadj),
silent = TRUE
)
if (is.element("try-error", class(up.model))) {
up.model <- list(AIC = Inf)
}
if (up.model$AIC > best.model$AIC) {
k.vector[i] <- k.vector[i] - 1
break
} else {
best.model <- up.model
}
if (k.vector[i] == max.k) {
break
}
}
}
}
}
aux.model <- best.model
if (non.seasonal.model$AIC < best.model$AIC) {
best.model <- non.seasonal.model
}
if ((length(use.box.cox) == 1) && use.trend[1] && (length(use.trend) == 1) && (length(use.damped.trend) == 1) && (use.parallel)) {
use.parallel <- FALSE
stopCluster(clus)
} else if ((length(use.box.cox) == 1) && !use.trend[1] && (length(use.trend) == 1) && (use.parallel)) {
use.parallel <- FALSE
stopCluster(clus)
}
if (use.parallel) {
control.array <- NULL
for (box.cox in use.box.cox) {
for (trend in use.trend) {
for (damping in use.damped.trend) {
if (!trend && damping) {
next
}
control.line <- c(box.cox, trend, damping)
if (!is.null(control.array)) {
control.array <- rbind(control.array, control.line)
} else {
control.array <- control.line
}
}
}
}
models.list <- clusterApplyLB(clus, c(1:nrow(control.array)), parFilterTBATSSpecifics, y = y, control.array = control.array, model.params = model.params, seasonal.periods = seasonal.periods, k.vector = k.vector, use.arma.errors = use.arma.errors, aux.model = aux.model, init.box.cox = init.box.cox, bc.lower = bc.lower, bc.upper = bc.upper, biasadj = biasadj, ...)
stopCluster(clus)
aics <- numeric(nrow(control.array))
for (i in 1:nrow(control.array)) {
aics[i] <- models.list[[i]]$AIC
}
best.number <- which.min(aics)
best.seasonal.model <- models.list[[best.number]]
if (best.seasonal.model$AIC < best.model$AIC) {
best.model <- best.seasonal.model
}
} else {
for (box.cox in use.box.cox) {
for (trend in use.trend) {
for (damping in use.damped.trend) {
if (all((model.params == c(box.cox, trend, damping)))) {
new.model <- filterTBATSSpecifics(y, box.cox, trend, damping, seasonal.periods, k.vector, use.arma.errors, aux.model = aux.model, init.box.cox = init.box.cox, bc.lower = bc.lower, bc.upper = bc.upper, biasadj = biasadj, ...)
} else if (trend || !damping) {
new.model <- filterTBATSSpecifics(y, box.cox, trend, damping, seasonal.periods, k.vector, use.arma.errors, init.box.cox = init.box.cox, bc.lower = bc.lower, bc.upper = bc.upper, biasadj = biasadj, ...)
}
if (new.model$AIC < best.model$AIC) {
best.model <- new.model
}
}
}
}
}
best.model$call <- match.call()
attributes(best.model$fitted.values) <- attributes(best.model$errors) <- attr_y
best.model$y <- origy
best.model$series <- seriesname
best.model$method <- "TBATS"
return(best.model)
}
parFilterTBATSSpecifics <- function(control.number, y, control.array, model.params, seasonal.periods, k.vector, use.arma.errors, aux.model=NULL, init.box.cox=NULL, bc.lower=0, bc.upper=1, biasadj=FALSE, ...) {
box.cox <- control.array[control.number, 1]
trend <- control.array[control.number, 2]
damping <- control.array[control.number, 3]
if (!all((model.params == c(box.cox, trend, damping)))) {
first.model <- try(
fitSpecificTBATS(y, use.box.cox = box.cox, use.beta = trend, use.damping = damping, seasonal.periods = seasonal.periods, k.vector = k.vector, init.box.cox = init.box.cox, bc.lower = bc.lower, bc.upper = bc.upper, biasadj = biasadj),
silent = TRUE
)
} else {
first.model <- aux.model
}
if (is.element("try-error", class(first.model))) {
first.model <- list(AIC = Inf)
}
if (use.arma.errors) {
suppressWarnings(arma <- try(auto.arima(as.numeric(first.model$errors), d = 0, ...), silent = TRUE))
if (!is.element("try-error", class(arma))) {
p <- arma$arma[1]
q <- arma$arma[2]
if ((p != 0) || (q != 0)) {
if (p != 0) {
ar.coefs <- numeric(p)
} else {
ar.coefs <- NULL
}
if (q != 0) {
ma.coefs <- numeric(q)
} else {
ma.coefs <- NULL
}
starting.params <- first.model$parameters
second.model <- try(
fitSpecificTBATS(y, use.box.cox = box.cox, use.beta = trend, use.damping = damping, seasonal.periods = seasonal.periods, k.vector = k.vector, ar.coefs = ar.coefs, ma.coefs = ma.coefs, init.box.cox = init.box.cox, bc.lower = bc.lower, bc.upper = bc.upper, biasadj = biasadj),
silent = TRUE
)
if (is.element("try-error", class(second.model))) {
second.model <- list(AIC = Inf)
}
if (second.model$AIC < first.model$AIC) {
return(second.model)
} else {
return(first.model)
}
} else {
return(first.model)
}
} else {
return(first.model)
}
} else {
return(first.model)
}
}
parFitSpecificTBATS <- function(control.number, y, box.cox, trend, damping, seasonal.periods, k.control.matrix, init.box.cox=NULL, bc.lower=0, bc.upper=1, biasadj=FALSE) {
k.vector <- k.control.matrix[control.number, ]
model <- try(
fitSpecificTBATS(y, use.box.cox = box.cox, use.beta = trend, use.damping = damping, seasonal.periods = seasonal.periods, k.vector = k.vector, init.box.cox = init.box.cox, bc.lower = bc.lower, bc.upper = bc.upper, biasadj = biasadj),
silent = TRUE
)
if (is.element("try-error", class(model))) {
model <- list(AIC = Inf)
}
return(model)
}
filterTBATSSpecifics <- function(y, box.cox, trend, damping, seasonal.periods, k.vector, use.arma.errors, aux.model=NULL, init.box.cox=NULL, bc.lower=0, bc.upper=1, biasadj=FALSE, ...) {
if (is.null(aux.model)) {
first.model <- try(
fitSpecificTBATS(y, use.box.cox = box.cox, use.beta = trend, use.damping = damping, seasonal.periods = seasonal.periods, k.vector = k.vector, init.box.cox = init.box.cox, bc.lower = bc.lower, bc.upper = bc.upper, biasadj = biasadj),
silent = TRUE
)
} else {
first.model <- aux.model
}
if (is.element("try-error", class(first.model))) {
first.model <- list(AIC = Inf)
}
if (use.arma.errors) {
suppressWarnings(arma <- try(auto.arima(as.numeric(first.model$errors), d = 0, ...), silent = TRUE))
if (!is.element("try-error", class(arma))) {
p <- arma$arma[1]
q <- arma$arma[2]
if ((p != 0) || (q != 0)) {
if (p != 0) {
ar.coefs <- numeric(p)
} else {
ar.coefs <- NULL
}
if (q != 0) {
ma.coefs <- numeric(q)
} else {
ma.coefs <- NULL
}
starting.params <- first.model$parameters
second.model <- try(
fitSpecificTBATS(y, use.box.cox = box.cox, use.beta = trend, use.damping = damping, seasonal.periods = seasonal.periods, k.vector = k.vector, ar.coefs = ar.coefs, ma.coefs = ma.coefs, init.box.cox = init.box.cox, bc.lower = bc.lower, bc.upper = bc.upper, biasadj = biasadj),
silent = TRUE
)
if (is.element("try-error", class(second.model))) {
second.model <- list(AIC = Inf)
}
if (second.model$AIC < first.model$AIC) {
return(second.model)
} else {
return(first.model)
}
} else {
return(first.model)
}
} else {
return(first.model)
}
} else {
return(first.model)
}
}
makeSingleFourier <- function(j, m, T) {
frier <- matrix(0, nrow = T, ncol = 2)
for (t in 1:T) {
frier[t, 1] <- cos((2 * pi * j) / m)
frier[t, 2] <- sin((2 * pi * j) / m)
}
return(frier)
}
calcFTest <- function(r.sse, ur.sse, num.restrictions, num.u.params, num.observations) {
f.stat <- ((r.sse - ur.sse) / num.restrictions) / (r.sse / (num.observations - num.u.params))
p.value <- pf(f.stat, num.restrictions, (num.observations - num.u.params), lower.tail = FALSE)
return(p.value)
}
fitted.tbats <- function(object, h=1, ...) {
if (h == 1) {
return(object$fitted.values)
}
else {
return(hfitted(object = object, h = h, FUN = "tbats", ...))
}
}
print.tbats <- function(x, ...) {
cat(as.character(x))
cat("\n")
cat("\nCall: ")
print(x$call)
cat("\nParameters")
if (!is.null(x$lambda)) {
cat("\n Lambda: ")
cat(round(x$lambda, 6))
}
cat("\n Alpha: ")
cat(x$alpha)
if (!is.null(x$beta)) {
cat("\n Beta: ")
cat(x$beta)
cat("\n Damping Parameter: ")
cat(round(x$damping.parameter, 6))
}
if (!is.null(x$gamma.one.values)) {
cat("\n Gamma-1 Values: ")
cat(x$gamma.one.values)
}
if (!is.null(x$gamma.two.values)) {
cat("\n Gamma-2 Values: ")
cat(x$gamma.two.values)
}
if (!is.null(x$ar.coefficients)) {
cat("\n AR coefficients: ")
cat(round(x$ar.coefficients, 6))
}
if (!is.null(x$ma.coefficients)) {
cat("\n MA coefficients: ")
cat(round(x$ma.coefficients, 6))
}
cat("\n")
cat("\nSeed States:\n")
print(x$seed.states)
cat("\nSigma: ")
cat(sqrt(x$variance))
cat("\nAIC: ")
cat(x$AIC)
cat("\n")
}
plot.tbats <- function(x, main="Decomposition by TBATS model", ...) {
out <- tbats.components(x)
plot.ts(out, main = main, nc = 1, ...)
}
tbats.components <- function(x) {
if (!is.null(x$lambda)) {
y <- BoxCox(x$y, x$lambda)
lambda <- attr(y, "lambda")
} else {
y <- x$y
}
tau <- ifelse(!is.null(x$k.vector), 2 * sum(x$k.vector), 0)
w <- .Call(
"makeTBATSWMatrix", smallPhi_s = x$damping.parameter, kVector_s = as.integer(x$k.vector),
arCoefs_s = x$ar.coefficients, maCoefs_s = x$ma.coefficients, tau_s = as.integer(tau), PACKAGE = "forecast"
)
out <- cbind(observed = c(y), level = x$x[1, ])
if (!is.null(x$beta)) {
out <- cbind(out, slope = x$x[2, ])
}
if (tau > 0) {
nonseas <- 2 + !is.null(x$beta)
nseas <- length(x$seasonal.periods)
seas.states <- cbind(x$seed.states, x$x)[-(1:(1 + !is.null(x$beta))), ]
seas.states <- seas.states[, -ncol(seas.states)]
w <- w$w.transpose[, -(1:(1 + !is.null(x$beta))), drop = FALSE]
w <- w[, 1:tau, drop = FALSE]
j <- cumsum(c(1, 2 * x$k.vector))
for (i in 1:nseas)
out <- cbind(out, season = c(w[, j[i]:(j[i + 1] - 1), drop = FALSE] %*% seas.states[j[i]:(j[i + 1] - 1), ]))
if (nseas > 1) {
colnames(out)[nonseas + 1:nseas] <- paste("season", 1:nseas, sep = "")
}
}
out <- ts(out)
tsp(out) <- tsp(y)
return(out)
}
|
ml_mcmc_summary_print_descriptives <- function(object)
{
cat("Outcome model:", object$outcome, "\n")
cat("Number of observations", "=", object$N, "\n")
ncluster_list <- object$ncluster_list
NCL <- length(ncluster_list)
for (cc in seq_len(NCL)){
cat( paste0("Number of clusters (", names(ncluster_list)[cc],")"), "=", ncluster_list[[cc]], "\n")
}
cat("\n")
}
|
context("svyflow estimates with nonresponse and zero counts: Model A")
set.seed( 123 )
library( survey )
library( surf )
library( testthat )
N <- as.integer( 10^5 )
n <- as.integer( 10^4 )
eta.pop <- c( .40 , .30, .20, .10 )
pij.pop <- matrix( c(.60, .10 , .20, .10,
.30, .50, 0, .20,
.20, .20, .30, .30,
0 , .20 , .30, .50 ) , nrow = 4 , byrow = TRUE )
muij.pop <- N * sweep( pij.pop , 1 , eta.pop , "*" )
psi.pop <- .8
rho.pop <- .9
tau.pop <- .7
poplist <-
list( "psi" = psi.pop ,
"rho" = rho.pop ,
"tau" = tau.pop ,
"eta" = eta.pop ,
"gamma" = colSums( sweep( pij.pop , 1 , eta.pop , "*" ) ) ,
"pij" = pij.pop ,
"muij" = muij.pop )
state.table <- expand.grid( data.frame( v0 = seq_len( nrow(pij.pop) +1 ) , v1 = seq_len( nrow(pij.pop) + 1 ) ) )
state.table <- state.table[ order( state.table$v0 ) , ]
state.table[ ,"k_ij" ] <- as.character( seq_len( nrow( state.table ) ) )
state.table$v0[ state.table$v0 == 5 ] <- NA
state.table$v1[ state.table$v1 == 5 ] <- NA
nipij <- sweep( pij.pop , 1 , eta.pop , "*" )
Part.Nij <- nipij * psi.pop * rho.pop
Part.Cj <- colSums( nipij * ( 1 - psi.pop ) * ( 1 - tau.pop ) )
Part.Ri <- rowSums( nipij * psi.pop * ( 1 - rho.pop ) )
Part.M <- sum( nipij * ( 1 - psi.pop ) * tau.pop )
exp.props <- rbind( cbind( Part.Nij , Part.Ri ) , c( Part.Cj , Part.M ) )
dimnames( exp.props ) <- NULL
N*exp.props
smp.df <- t( rmultinom( n , size = 1 , prob = as.numeric( t( exp.props ) ) ) )
smp.df <- apply( smp.df , 1 , function( z ) seq_len( ncol( smp.df ) )[ as.logical( z ) ] )
smp.df <- data.frame( "id" = seq_len( n ) , "k_ij" = smp.df , row.names = NULL , stringsAsFactors = FALSE )
smp.df <- merge( smp.df , state.table , by = c( "k_ij" ) , all.x = TRUE , all.y = FALSE , sort = FALSE )
smp.df <- smp.df[ order( smp.df$id ) , ]
rownames( smp.df ) <- NULL
smp.df[, c( "v0" , "v1" ) ] <- lapply( smp.df[, c( "v0" , "v1" ) ] , factor , levels = c( 1:4 ) , labels = 1:4 )
table( smp.df[ , c("v0","v1")] , useNA = "always" )
smp.df$wgt <- N / n
smp.df$fpcs <- N
des.lin <-
svydesign( ids = ~ 1 ,
weights = ~ wgt ,
fpc = ~fpcs ,
data = smp.df ,
nest = TRUE )
des.rep <- as.svrepdesign( des.lin , "bootstrap" , replicates = 100 )
svytable( ~v0+v1 , des.lin , addNA = TRUE )
suppressWarnings( flow_srs_lin <- svyflow( ~v0+v1 , des.lin , model = "A" , verbose = FALSE , as.zero.flows = TRUE , influence = TRUE ) )
suppressWarnings( flow_srs_rep <- svyflow( ~v0+v1 , des.rep , model = "A" , verbose = FALSE , as.zero.flows = TRUE , influence = TRUE ) )
test_that( "extraction of estimates: linearization" , {
expect_identical( coef( flow_srs_lin$psi ) , survey:::coef.svystat( flow_srs_lin$psi ) )
expect_identical( coef( flow_srs_lin$rho ) , survey:::coef.svystat( flow_srs_lin$rho ) )
expect_identical( coef( flow_srs_lin$tau ) , survey:::coef.svystat( flow_srs_lin$tau ) )
expect_identical( coef( flow_srs_lin$eta ) , survey:::coef.svystat( flow_srs_lin$eta ) )
expect_identical( coef( flow_srs_lin$pij ) , surf:::coef.svymstat( flow_srs_lin$pij ) )
expect_identical( coef( flow_srs_lin$muij ) , surf:::coef.svymstat( flow_srs_lin$muij ) )
expect_identical( vcov( flow_srs_lin$psi ) , survey:::vcov.svystat( flow_srs_lin$psi ) )
expect_identical( vcov( flow_srs_lin$rho ) , survey:::vcov.svystat( flow_srs_lin$rho ) )
expect_identical( vcov( flow_srs_lin$tau ) , survey:::vcov.svystat( flow_srs_lin$tau ) )
expect_identical( vcov( flow_srs_lin$eta ) , survey:::vcov.svystat( flow_srs_lin$eta ) )
expect_identical( vcov( flow_srs_lin$pij ) , surf:::vcov.svymstat( flow_srs_lin$pij ) )
expect_identical( vcov( flow_srs_lin$muij ) , surf:::vcov.svymstat( flow_srs_lin$muij ) )
expect_identical( SE( flow_srs_lin$psi ) , survey:::SE.svystat( flow_srs_lin$psi ) )
expect_identical( SE( flow_srs_lin$rho ) , survey:::SE.svystat( flow_srs_lin$rho ) )
expect_identical( SE( flow_srs_lin$tau ) , survey:::SE.svystat( flow_srs_lin$tau ) )
expect_identical( SE( flow_srs_lin$eta ) , survey:::SE.svystat( flow_srs_lin$eta ) )
expect_identical( SE( flow_srs_lin$pij ) , surf:::SE.svymstat( flow_srs_lin$pij ) )
expect_identical( SE( flow_srs_lin$muij ) , surf:::SE.svymstat( flow_srs_lin$muij ) )
} )
test_that( "extraction of estimates: resampling" , {
expect_identical( coef( flow_srs_rep$psi ) , survey:::coef.svystat( flow_srs_rep$psi ) )
expect_identical( coef( flow_srs_rep$rho ) , survey:::coef.svystat( flow_srs_rep$rho ) )
expect_identical( coef( flow_srs_rep$tau ) , survey:::coef.svystat( flow_srs_rep$tau ) )
expect_identical( coef( flow_srs_rep$eta ) , survey:::coef.svystat( flow_srs_rep$eta ) )
expect_identical( coef( flow_srs_rep$pij ) , surf:::coef.svymstat( flow_srs_rep$pij ) )
expect_identical( coef( flow_srs_rep$muij ) , surf:::coef.svymstat( flow_srs_rep$muij ) )
expect_identical( vcov( flow_srs_rep$psi ) , survey:::vcov.svystat( flow_srs_rep$psi ) )
expect_identical( vcov( flow_srs_rep$rho ) , survey:::vcov.svystat( flow_srs_rep$rho ) )
expect_identical( vcov( flow_srs_rep$tau ) , survey:::vcov.svystat( flow_srs_rep$tau ) )
expect_identical( vcov( flow_srs_rep$eta ) , survey:::vcov.svystat( flow_srs_rep$eta ) )
expect_identical( vcov( flow_srs_rep$pij ) , surf:::vcov.svymstat( flow_srs_rep$pij ) )
expect_identical( vcov( flow_srs_rep$muij ) , surf:::vcov.svymstat( flow_srs_rep$muij ) )
expect_identical( SE( flow_srs_rep$psi ) , survey:::SE.svystat( flow_srs_rep$psi ) )
expect_identical( SE( flow_srs_rep$rho ) , survey:::SE.svystat( flow_srs_rep$rho ) )
expect_identical( SE( flow_srs_rep$tau ) , survey:::SE.svystat( flow_srs_rep$tau ) )
expect_identical( SE( flow_srs_rep$eta ) , survey:::SE.svystat( flow_srs_rep$eta ) )
expect_identical( SE( flow_srs_rep$pij ) , surf:::SE.svymstat( flow_srs_rep$pij ) )
expect_identical( SE( flow_srs_rep$muij ) , surf:::SE.svymstat( flow_srs_rep$muij ) )
} )
test_that("compare point estimates vs population values",{
expect_equivalent( coef( flow_srs_lin$psi ) , psi.pop , tolerance = .30 )
expect_equivalent( coef( flow_srs_lin$rho ) , rho.pop , tolerance = .30 )
expect_equivalent( coef( flow_srs_lin$tau ) , tau.pop , tolerance = .30 )
expect_equivalent( coef( flow_srs_lin$eta ) , eta.pop , tolerance = .10 )
expect_equivalent( coef( flow_srs_lin$pij , to.matrix = TRUE ) , pij.pop , tolerance = .10 )
expect_equivalent( coef( flow_srs_lin$muij , to.matrix = TRUE ) , muij.pop , tolerance = .20 )
expect_equivalent( coef( flow_srs_rep$psi ) , psi.pop , tolerance = .30 )
expect_equivalent( coef( flow_srs_rep$rho ) , rho.pop , tolerance = .30 )
expect_equivalent( coef( flow_srs_rep$tau ) , tau.pop , tolerance = .30 )
expect_equivalent( coef( flow_srs_rep$eta ) , eta.pop , tolerance = .10 )
expect_equivalent( coef( flow_srs_rep$pij , to.matrix = TRUE ) , pij.pop , tolerance = .10 )
expect_equivalent( coef( flow_srs_rep$muij , to.matrix = TRUE ) , muij.pop , tolerance = .20 )
} )
test_that("compare influence functions estimates",{
expect_identical( attr( flow_srs_lin$eta , "influence" ) , attr( flow_srs_rep$eta , "influence" ) )
expect_identical( attr( flow_srs_lin$pij , "influence" ) , attr( flow_srs_rep$pij , "influence" ) )
expect_identical( attr( flow_srs_lin$psi , "influence" ) , attr( flow_srs_rep$psi , "influence" ) )
expect_identical( attr( flow_srs_lin$rho , "influence" ) , attr( flow_srs_rep$rho , "influence" ) )
expect_identical( attr( flow_srs_lin$tau , "influence" ) , attr( flow_srs_rep$tau , "influence" ) )
} )
|
mtq <- mf_get_mtq()
mf_map(mtq)
expect_silent(mf_label(x = mtq, var = "LIBGEO", halo = TRUE, cex = 0.8,
overlap = FALSE, lines = TRUE))
|
context("Checking grade_level")
test_that("grade_level ...",{
})
|
popproj80l <- read.delim(file='popproj80l.txt', comment.char='
|
make_csr_identity_matrix = function(x) {
res = Matrix::.sparseDiagonal(n = x, shape = 'g', kind = 'd')
as(res, "RsparseMatrix")
}
RankMF = R6::R6Class(
inherit = MatrixFactorizationRecommender,
classname = "RankMF",
public = list(
rank = NULL,
lambda_user = NULL,
lambda_item_positive = NULL,
lambda_item_negative = NULL,
learning_rate = NULL,
margin = NULL,
optimizer = NULL,
kernel = NULL,
gamma = NULL,
precision = NULL,
loss = NULL,
progress = NULL,
max_negative_samples = NULL,
item_features_embeddings = NULL,
user_features_embeddings = NULL,
initialize = function(rank = 8L,
learning_rate = 0.01,
optimizer = c("adagrad", "rmsprop"),
lambda = 0,
init = NULL,
gamma = 0,
precision = c("double", "float"),
loss = c("bpr", "warp"),
kernel = c("identity", "sigmoid"),
margin = 0.1,
max_negative_samples = 50L,
progress = 10,
...) {
self$progress = progress
self$rank = rank
self$learning_rate = learning_rate
optimizer = match.arg(optimizer)
allowed_optimizers = c("adagrad" = 0L, "rmsprop" = 1L)
self$optimizer = allowed_optimizers[[optimizer]]
stopifnot(is.numeric(lambda))
if (length(lambda) == 1) {
lambda = c(
lambda_user = lambda,
lambda_item_positive = lambda,
lambda_item_negative = lambda
)
}
self$lambda_user = lambda[["lambda_user"]]
self$lambda_item_positive = lambda[["lambda_item_positive"]]
self$lambda_item_negative = lambda[["lambda_item_negative"]]
self$gamma = gamma
self$precision = match.arg(precision)
allowed_loss = c("bpr" = 0L, "warp" = 1L)
loss = match.arg(loss)
self$loss = allowed_loss[[loss]]
kernel = match.arg(kernel)
allowed_kernel = c("identity" = 0L, "sigmoid" = 1L)
self$kernel = allowed_kernel[[kernel]]
self$margin = margin
self$max_negative_samples = max_negative_samples
},
transform = function(x, user_features = NULL, n_iter = 100, n_threads = getOption("rsparse_omp_threads", 1L)) {
stop("not implemented yet")
},
partial_fit_transform = function(x, item_features = NULL, user_features = NULL, n_iter = 100, n_threads = getOption("rsparse_omp_threads", 1L)) {
private$partial_fit_transform_(x, item_features, user_features, n_iter, n_threads, update_items = TRUE)
}
),
private = list(
item_features = NULL,
user_features_squared_grad = NULL,
item_features_squared_grad = NULL,
partial_fit_transform_ = function(x, item_features = NULL, user_features = NULL, n_iter = 100, n_threads = getOption("rsparse_omp_threads", 1L), update_items = TRUE) {
if (is.null(item_features)) item_features = make_csr_identity_matrix(ncol(x))
if (is.null(user_features)) user_features = make_csr_identity_matrix(nrow(x))
stopifnot(
inherits(x, "RsparseMatrix"),
inherits(item_features, "RsparseMatrix"),
inherits(user_features, "RsparseMatrix"),
nrow(x) == nrow(user_features),
ncol(x) == nrow(item_features)
)
private$item_features = item_features
n_user = nrow(x)
n_item_features = ncol(private$item_features)
n_user_features = ncol(user_features)
rnorm_matrix = function(m, n, mean = 0, sd = 1) {
if (self$precision == "double") {
matrix(rnorm(m * n, mean, sd), nrow = m)
} else {
float::flrnorm(m, n, mean, sd)
}
}
ones = function(m) {
if (self$precision == "double") {
rep(1, m)
} else {
rep(fl(1), m)
}
}
if (is.null(self$user_features_embeddings)) self$user_features_embeddings = rnorm_matrix(self$rank, n_user_features, 0, 1e-3)
if (is.null(private$user_features_squared_grad)) private$user_features_squared_grad = ones(n_user_features)
if (is.null(self$item_features_embeddings)) self$item_features_embeddings = rnorm_matrix(self$rank, n_item_features, 0, 1e-3)
if (is.null(private$item_features_squared_grad)) private$item_features_squared_grad = ones(n_item_features)
n_threads_blas = RhpcBLASctl::blas_get_num_procs()
RhpcBLASctl::blas_set_num_threads(1L)
on.exit(RhpcBLASctl::blas_set_num_threads(n_threads_blas))
SOLVER = if (self$precision == "double") rankmf_solver_double else rankmf_solver_float
SOLVER(
x,
self$user_features_embeddings,
self$item_features_embeddings,
private$user_features_squared_grad,
private$item_features_squared_grad,
user_features,
private$item_features,
rank = self$rank,
n_updates = n_iter * n_user,
self$learning_rate,
gamma = self$gamma,
lambda_user = self$lambda_user,
lambda_item_positive = self$lambda_item_positive,
lambda_item_negative = self$lambda_item_negative,
n_threads = n_threads,
update_items = update_items,
loss = self$loss,
kernel = self$kernel,
max_negative_samples = self$max_negative_samples,
margin = self$margin,
self$optimizer,
self$progress
)
item_embeddings = tcrossprod(private$item_features, self$item_features_embeddings)
private$components_ = t(item_embeddings)
user_embeddings = t(self$user_features_embeddings %*% t(user_features))
invisible(as.matrix(user_embeddings))
}
)
)
|
weighted_var <- function( x, w=rep(1,length(x) ), method="unbiased",
select=NULL )
{
res <- tam_weighted_stats_select(x=x, w=w, select=select)
x <- res$x
w <- res$w
dat <- data.frame("x"=x )
res <- stats::cov.wt( x=dat, wt=w, cor=FALSE, center=TRUE, method=method )
res <- res$cov[1,1]
return(res)
}
|
skip_on_cran()
oldtz <- Sys.getenv('TZ', unset = NA)
Sys.setenv(TZ = 'UTC')
tests.home <- getwd()
setwd(tempdir())
test_that("loadDetections fails with expected message if no detections are present.", {
expect_error(loadDetections(tz = "Europe/Copenhagen"),
"Could not find a 'detections' folder nor a 'detections.csv' file.", fixed = TRUE)
})
dir.create("detections")
test_that("loadDetections stops if detections folder is empty", {
expect_error(loadDetections(tz = "Europe/Copenhagen"),
"A 'detections' folder is present but appears to be empty.", fixed = TRUE)
})
aux <- split(example.detections, example.detections$Receiver)
receiver_aux <- strsplit(as.character(aux[[2]]$Receiver), "-", fixed = TRUE)
aux[[2]] <- data.frame(
`Date and Time UTC` = aux[[2]]$Timestamp,
`TBR Serial Number` = aux[[2]]$Receiver,
`Unix Timestamp UTC` = rep(NA_real_, nrow(aux[[2]])),
Millisecond = rep(NA_real_, nrow(aux[[2]])),
CodeType = rep("R64K", nrow(aux[[2]])),
Id = aux[[2]]$Signal,
Data = rep(NA_real_, nrow(aux[[2]])),
`Signal to Noise Ratio` = rep(NA_real_, nrow(aux[[2]])))
colnames(aux[[2]])[2] <- "TBR Serial Number"
aux[[3]] <- data.frame(
Date.and.Time..UTC. = aux[[3]]$Timestamp,
Unix.Timestamp..UTC. = rep(NA_real_, nrow(aux[[3]])),
ID = aux[[3]]$Signal,
Data = rep(NA_real_, nrow(aux[[3]])),
Protocol = rep("R64K-69kHz", nrow(aux[[3]])),
SNR = rep(NA_real_, nrow(aux[[3]])),
Receiver = aux[[3]]$Receiver)
aux[[4]] <- data.frame(
Date.and.Time..UTC. = aux[[4]]$Timestamp,
Receiver = paste0("VR2W-", aux[[4]]$Receiver),
Transmitter = paste0("A69-1303-", aux[[4]]$Signal),
Sensor.Value = rep(NA_real_, nrow(aux[[4]])),
Sensor.Unit = rep(NA_character_, nrow(aux[[4]])))
aux[[5]] <- data.frame(
Date.and.Time..UTC. = aux[[5]]$Timestamp,
Receiver = paste0("VR2W-", aux[[5]]$Receiver),
Transmitter = paste0("A69-1303-", aux[[5]]$Signal))
for (i in names(aux)[1:5]) {
write.csv(aux[[i]], paste0("detections/", i, ".csv"), row.names = FALSE)
}
test_that("loadDetections output is as expected", {
output <- loadDetections(start.time = NULL, stop.time = NULL, tz = "Europe/Copenhagen", force = FALSE)
expect_equal(attributes(output$Timestamp)$tz, "Europe/Copenhagen")
expect_equal(colnames(output), c("Timestamp", "Receiver", "CodeSpace", "Signal", 'Sensor.Value', 'Sensor.Unit', "Transmitter", "Valid"))
expect_equal(factor(paste(output$CodeSpace, output$Signal, sep = "-")), output$Transmitter)
expect_equal(nrow(output), 2639)
unlink("detections", recursive = TRUE)
write.csv(aux[[1]], "detections.csv", row.names = FALSE)
output <- loadDetections(start.time = NULL, stop.time = NULL, tz = "Europe/Copenhagen", force = FALSE)
expect_equal(attributes(output$Timestamp)$tz, "Europe/Copenhagen")
expect_equal(colnames(output), c("Timestamp", "Receiver", "CodeSpace", "Signal", 'Sensor.Value', 'Sensor.Unit', "Transmitter", "Valid"))
expect_equal(factor(paste(output$CodeSpace, output$Signal, sep = "-")), output$Transmitter)
expect_equal(nrow(output), 708)
file.remove("detections.csv")
})
dir.create("detections")
aux <- split(example.detections, example.detections$Receiver)
for (i in names(aux)[1:3]) {
write.csv(aux[[i]], paste0("detections/", i, ".csv"), row.names = FALSE)
}
test_that("loadDetections can handle the presence of a detections folder and detections file", {
output <- loadDetections(start.time = NULL, stop.time = NULL, tz = "Europe/Copenhagen", force = FALSE)
file.copy("detections/132908.csv", "detections.csv")
expect_warning(output <- loadDetections(start.time = NULL, stop.time = NULL, tz = "Europe/Copenhagen", force = FALSE),
"Both a 'detections' folder and a 'detections.csv' file are present in the current directory.\n Loading ONLY the files present in the 'detections' folder.", fixed = TRUE)
expect_equal(nrow(output), 1369)
file.remove("detections.csv")
})
test_that("loadDetectons can handle random/empty files", {
write.csv("abc", "detections/bad_file.csv", row.names = FALSE)
expect_warning(loadDetections(start.time = NULL, stop.time = NULL, tz = "Europe/Copenhagen", force = FALSE),
"File 'detections/bad_file.csv' could not be recognized as a valid detections table (ncol < 3), skipping processing. Are you sure it is a comma separated file?", fixed = TRUE)
sink("detections/bad_file.csv")
cat("'abc','def','ghi'\n1,2,3\n")
sink()
expect_warning(loadDetections(start.time = NULL, stop.time = NULL, tz = "Europe/Copenhagen", force = FALSE),
"File 'detections/bad_file.csv' does not match to any of the supported hydrophone file formats!\n If your file corresponds to a hydrophone log and actel did not recognize it, please get in contact through www.github.com/hugomflavio/actel/issues/new", fixed = TRUE)
sink("detections/bad_file.csv")
cat("Date and Time (UTC),Receiver,Transmitter,Transmitter Name,Transmitter Serial,Sensor Value,Sensor Unit,Station Name,Latitude,Longitude\n")
sink()
expect_message(loadDetections(start.time = NULL, stop.time = NULL, tz = "Europe/Copenhagen", force = FALSE),
"File 'detections/bad_file.csv' is empty, skipping processing.", fixed = TRUE)
sink("detections/bad_file2.csv")
cat("Date and Time (UTC),Receiver,Transmitter,Transmitter Name,Transmitter Serial,Sensor Value,Sensor Unit,Station Name,Latitude,Longitude\n")
sink()
expect_message(loadDetections(start.time = NULL, stop.time = NULL, tz = "Europe/Copenhagen", force = FALSE),
"File 'detections/bad_file2.csv' is empty, skipping processing.", fixed = TRUE)
expect_message(loadDetections(start.time = NULL, stop.time = NULL, tz = "Europe/Copenhagen", force = FALSE),
"M: 2 files were excluded from further analyses.", fixed = TRUE)
unlink("detections", recursive = TRUE)
dir.create("detections")
sink("detections/bad_file2.csv")
cat("Date and Time (UTC),Receiver,Transmitter,Transmitter Name,Transmitter Serial,Sensor Value,Sensor Unit,Station Name,Latitude,Longitude\n")
sink()
expect_error(loadDetections(start.time = NULL, stop.time = NULL, tz = "Europe/Copenhagen", force = FALSE),
"No valid detection files were found.", fixed = TRUE)
unlink("detections", recursive = TRUE)
})
dir.create("detections")
aux <- split(example.detections, example.detections$Receiver)
for (i in names(aux)[1:3]) {
write.csv(aux[[i]], paste0("detections/", i, ".csv"), row.names = FALSE)
}
test_that("loadDetections' start.time and stop.time arguments are working", {
expect_message(loadDetections(start.time = "2018-04-15 00:00:00", tz = "Europe/Copenhagen", force = FALSE),
"Discarding detection data previous to 2018-04-15 00:00:00 per user command (243 detections discarded).", fixed = TRUE)
expect_message(loadDetections(stop.time = "2018-05-01 00:00:00", tz = "Europe/Copenhagen", force = FALSE),
"M: Discarding detection data posterior to 2018-05-01 00:00:00 per user command (267 detections discarded).", fixed = TRUE)
output <- loadDetections(start.time = "2018-04-15 00:00:00", stop.time = "2018-05-01 00:00:00", tz = "Europe/Copenhagen", force = FALSE)
expect_equal(nrow(output), 859)
expect_true(output$Timestamp[1] > "2018-04-15 00:00:00")
expect_true(output$Timestamp[859] < "2018-05-01 00:00:00")
unlink("detections", recursive = TRUE)
})
dir.create("detections")
aux <- split(example.detections, example.detections$Receiver)
for (i in names(aux)[1:3]) {
write.csv(aux[[i]], paste0("detections/", i, ".csv"), row.names = FALSE)
}
write.csv(aux[[3]], paste0("detections/test.csv"), row.names = FALSE)
test_that("checkDupDetections kicks in if needed.", {
detections <- loadDetections(start.time = "2018-04-15 00:00:00", tz = "Europe/Copenhagen", force = FALSE)
expect_warning(output <- checkDupDetections(input = detections),
"412 duplicated detections were found. Could an input file be duplicated?", fixed = TRUE)
unlink("detections", recursive = TRUE)
})
dir.create("detections")
aux <- split(example.detections, example.detections$Receiver)
for (i in names(aux)[1:3]) {
write.csv(aux[[i]], paste0("detections/", i, ".csv"), row.names = FALSE)
}
test_that("checkDetectionsBeforeRelease kicks in if needed.", {
detections <- loadDetections(start.time = "2018-04-15 00:00:00", tz = "Europe/Copenhagen", force = FALSE)
bio <- example.biometrics
recipient <- splitDetections(detections = detections, bio = bio, exclude.tags = NULL)
detections.list <- recipient[[1]]
bio <- recipient[[2]]
bio$Release.date[2] <- bio$Release.date[2] + (3600 * 24 * 13)
expect_message(
expect_warning(checkDetectionsBeforeRelease(input = detections.list, bio = bio),
"Tag R64K-4451 was detected before being released!", fixed = TRUE),
"12 detections from tag R64K-4451 were removed per user command", fixed = TRUE)
bio$Release.date[2] <- bio$Release.date[2] + (3600 * 24 * 3)
expect_message(
expect_warning(output <- checkDetectionsBeforeRelease(input = detections.list, bio = bio),
"Tag R64K-4451 was detected before being released!", fixed = TRUE),
"ALL detections from tag R64K-4451 were removed per user command.", fixed = TRUE)
expect_equal(length(output), length(detections.list) - 1)
})
test_that("loadDetections can handle saved detections.", {
output <- loadDetections(tz = "Europe/Copenhagen", save.detections = TRUE)
file.copy("detections/actel.detections.RData", "actel.detections.RData")
expect_warning(output <- loadDetections(tz = "Europe/Copenhagen"),
"Previously compiled detections were found both in the current directory and in a 'detections' folder.
Loading ONLY the compiled detections present in the 'detections' folder.", fixed = TRUE)
file.remove("detections/actel.detections.RData")
expect_message(output <- loadDetections(tz = "Europe/Copenhagen"),
"The detections have been processed on", fixed = TRUE)
file.remove("actel.detections.RData")
})
setwd(tests.home)
if (is.na(oldtz)) Sys.unsetenv("TZ") else Sys.setenv(TZ = oldtz)
rm(list = ls())
|
mod_dxy_table_ui <- function(id){
ns <- NS(id)
f7ExpandableCard(
title = "Cities",
id = "china_card",
subtitle = "Cases by city in China",
uiOutput(ns("table"))
)
}
mod_dxy_table_server <- function(input, output, session, df){
ns <- session$ns
output$table <- renderUI({
df %>%
dplyr::arrange(-confirmedCount) %>%
dplyr::select(
City = cityName,
Confirmed = confirmedCount,
Recovered = curedCount,
Deaths = deadCount
) %>%
as_f7_table(card = TRUE)
})
}
|
library(testthat)
library(parsnip)
library(rlang)
library(survival)
library(tibble)
source(test_path("helper-objects.R"))
basic_form <- Surv(time, status) ~ age
complete_form <- Surv(time) ~ age
test_that('flexsurv execution', {
skip_if_not_installed("flexsurv")
rlang::local_options(lifecycle_verbosity = "quiet")
surv_basic <- surv_reg() %>% set_engine("flexsurv")
expect_error(
res <- fit(
surv_basic,
Surv(time, status) ~ age,
data = lung,
control = ctrl
),
regexp = NA
)
expect_error(
res <- fit(
surv_basic,
Surv(time) ~ age,
data = lung,
control = ctrl
),
regexp = NA
)
expect_false(has_multi_predict(res))
expect_equal(multi_predict_args(res), NA_character_)
expect_error(
res <- fit_xy(
surv_basic,
x = lung[, "age", drop = FALSE],
y = lung$time,
control = ctrl
)
)
})
test_that('flexsurv prediction', {
skip_if_not_installed("flexsurv")
rlang::local_options(lifecycle_verbosity = "quiet")
surv_basic <- surv_reg() %>% set_engine("flexsurv")
res <- fit(
surv_basic,
Surv(time, status) ~ age,
data = lung,
control = ctrl
)
exp_pred <- summary(res$fit, head(lung), type = "mean")
exp_pred <- do.call("rbind", unclass(exp_pred))
exp_pred <- tibble(.pred = exp_pred$est)
expect_equal(exp_pred, predict(res, head(lung)))
})
|
highlod <- function(scans, lod.thr = 0, drop.lod = 1.5,
extend = TRUE, restrict.lod = FALSE, ...)
{
pheno.col <- seq(ncol(scans) - 2)
if(is.null(lod.thr))
lod.thr <- 0
x <- as.matrix(scans[,-(1:2), drop = FALSE])
keep <- apply(x, 2, function(x, lod.thr) any(x >= lod.thr), lod.thr)
x <- x[, keep, drop = FALSE]
if(restrict.lod) {
if(extend)
tmpfn <- function(x, lod.thr, drop.lod) {
maxx <- max(x)
g <- (maxx >= lod.thr) & (maxx <= x + drop.lod)
if(any(g)) {
d <- diff(g)
(g | (c(d,0) == 1) | (c(0,d) == -1)) & (x >= lod.thr)
}
else
g
}
else
tmpfn <- function(x, lod.thr, drop.lod) {
(max(x) <= x + drop.lod) & (x >= lod.thr)
}
}
else {
if(extend)
tmpfn <- function(x, lod.thr, drop.lod) {
maxx <- max(x)
g <- (maxx >= lod.thr) & (maxx <= x + drop.lod)
if(any(g)) {
d <- diff(g)
g | (c(d,0) == 1) | (c(0,d) == -1)
}
else
g
}
else
tmpfn <- function(x, lod.thr, drop.lod) {
maxx <- max(x)
(maxx >= lod.thr) & (maxx <= x + drop.lod)
}
}
lodint.pos <- function(x, chr, lod.thr, drop.lod) {
unlist(tapply(x, chr, tmpfn, lod.thr, drop.lod))
}
wh <- apply(x, 2, lodint.pos, scans$chr, lod.thr, drop.lod)
rr <- row(x)[wh]
cc <- seq(keep)[keep][col(x)[wh]]
lod <- x[wh]
out <- list(highlod = cbind.data.frame(row = rr, phenos = pheno.col[cc], lod = lod),
chr.pos = scans[,1:2],
names = names(scans)[-(1:2)])
class(out) <- c("highlod", "list")
attr(out, "lod.thr") <- lod.thr
attr(out, "drop.lod") <- drop.lod
out
}
print.highlod <- function(x, ...) print(summary(x, ...))
summary.highlod <- function(object, ...)
{
summary(hotsize(object, ...))
}
plot.highlod <- function(x, ..., quant.level = NULL, sliding = FALSE)
{
if(sliding) {
if(is.list(quant.level))
quant.level <- quant.level$max.lod.quant
slidingbar.plot(slidingbar.create(x, quant.level, ...), ...)
}
else
graphics::plot(hotsize(x, ..., quant.level = quant.level), ...)
}
highlod.thr <- function(highobj, lod.thr)
{
if(is.null(lod.thr))
lod.thr <- attr(highobj, "lod.thr")
if(!is.null(lod.thr)) {
highobj$highlod <- highobj$highlod[highobj$highlod$lod >= lod.thr,, drop = FALSE]
attr(highobj, "lod.thr") <- lod.thr
}
highobj
}
cat.scanone <- function(dirpath = ".", filenames = permfiles, chr.pos)
{
permfiles <- list.files(dirpath, paste("per.scan", "*", "RData", sep = "."))
per.scan.hl <- NULL
rm(per.scan.hl)
for(i in 1:length(filenames)) {
highobj <- with(filenames[i], {
if(i==1) per.scan.hl else rbind.data.frame(highobj, per.scan.hl)
})
}
cbind.data.frame(chr.pos[highobj$row,],highobj)
}
sexbatch.covar <- function(cross, batch.effect, verbose = FALSE)
{
ic <- qtl::getsex(cross)$sex
if(length(unique(ic)) == 1)
ic <- NULL
if(!is.null(batch.effect)){
batch <- cross$pheno[,batch.effect, drop = FALSE]
tmp <- stats::formula(paste("~ factor(", batch.effect, ")"))
if(verbose)
cat("sexbatch.covar", names(tmp), levels(factor(batch[[1]])), "\n")
if(verbose)
cat("sexbatch.covar", dim(batch), "\n")
batch <- stats::model.matrix(tmp,batch)[,-1, drop = FALSE]
if(verbose)
cat("sexbatch.covar", dim(batch), "\n")
ac <- cbind(batch,ic)
}
else
ac <- ic
list(addcovar = ac, intcovar = ic)
}
scanone.permutations <- function(cross, pheno.col = seq(3, qtl::nphe(cross)),
n.perm, seed=123456789, batch.effect = NULL,
pheno.set = 1,
lod.min, drop.lod = 1.5,
addcovar = NULL, intcovar = NULL, ...)
{
set.seed(seed[[1]])
if(!is.null(batch.effect)) {
cross <- subset(cross, ind = !is.na(cross$pheno[[batch.effect]]))
covars <- sexbatch.covar(cross, batch.effect)
}
else
covars <- list(addcovar = addcovar, intcovar = intcovar)
n.ind <- qtl::nind(cross)
perms <- matrix(NA, n.ind, n.perm)
for(i in 1:n.perm){
perm.cross <- cross
perms[,i] <- tmp <- sample(c(1:n.ind), n.ind, replace=FALSE)
perm.cross$pheno <- cross$pheno[tmp,]
per.scan <- qtl::scanone(perm.cross, pheno.col=pheno.col, method="hk",
addcovar=covars$addcovar, intcovar=covars$intcovar, ...)
per.scan.hl <- highlod(per.scan, lod.thr = lod.min, drop.lod = drop.lod,
restrict.lod = TRUE)$highlod
save(per.scan.hl, perms,
file=paste("per.scan",pheno.set, i,"RData",sep="."))
}
}
pull.highlod <- function(object, chr, pos, ...)
{
pheno.names <- object$names
if(is.null(pheno.names)) {
extra <- list(...)
m <- match("names", names(extra))
if(!is.na(m))
pheno.names <- extra[[m]]
}
wh.chr <- which(object$chr.pos$chr == chr)
wh.pos <- which(object$chr.pos$pos[wh.chr] - min(pos) >= 0 &
object$chr.pos$pos[wh.chr] - max(pos) <= 0)
wh.high <- which(object$highlod$row %in% wh.chr[wh.pos])
wh.row <- object$highlod[wh.high, "row"]
out <- data.frame(object$chr.pos[wh.row,], object$highlod[wh.high, -1])
if(!is.null(pheno.names))
out$phenos <- pheno.names[out$phenos]
out
}
|
knitr::opts_chunk$set(
collapse = TRUE,
comment = "
)
library(contactdata)
(all_contacts <- list_countries())
library(ggplot2)
wlrd <- map_data("world")
wlrd$region <- countrycode::countryname(wlrd$region)
wlrd$included <- wlrd$region %in% all_contacts
ggplot(wlrd, aes(long, lat, group = group, fill = included)) +
geom_polygon() +
coord_equal() +
theme_bw()
|
mapping_create <- function(conn, index, body, type = NULL, update_all_types = FALSE,
include_type_name = NULL, ...) {
is_conn(conn)
url <- conn$make_url()
url <- file.path(url, esc(index), "_mapping")
if (!is.null(type)) url <- file.path(url, esc(type))
args <- ec(list(include_type_name = as_log(include_type_name)))
if (conn$es_ver() < 603) {
args <- ec(list(update_all_types = as_log(update_all_types)))
}
es_PUT(conn, url, body, args, ...)
}
mapping_get <- function(conn, index = NULL, type = NULL,
include_type_name = NULL, ...) {
is_conn(conn)
url <- conn$make_url()
if (any(index == "_all")) {
url <- file.path(url, "_mapping")
} else {
if (is.null(type)) {
url <- file.path(url, esc(cl(index)), "_mapping")
} else if (is.null(index) && !is.null(type)) {
url <- file.path(url, "_mapping", esc(cl(type)))
} else if (!is.null(index) && !is.null(type)) {
if (length(index) > 1) stop("You can only pass one index if you also pass a type",
call. = FALSE)
url <- file.path(url, esc(index), "_mapping", esc(cl(type)))
}
}
es_GET_(conn, url, ec(list(include_type_name = as_log(include_type_name))),
...)
}
field_mapping_get <- function(conn, index = NULL, type = NULL, field,
include_defaults = FALSE, include_type_name = NULL, ...) {
is_conn(conn)
stopifnot(!is.null(field))
url <- conn$make_url()
if (any(index == "_all")){
conn$stop_es_version(110, "field_mapping_get")
if (!is.null(type))
url <- file.path(url, "_all/_mapping", esc(cl(type)), "field", cl(field))
else
url <- file.path(url, "_all/_mapping/field", cl(field))
} else {
if (is.null(index) && is.null(type)) {
url <- file.path(url, "_mapping/field", cl(field))
} else if (!is.null(index) && is.null(type)) {
url <- file.path(url, esc(cl(index)), "_mapping/field", cl(field))
} else if (is.null(index) && !is.null(type)) {
url <- file.path(url, "_all/_mapping", esc(cl(type)), "field", cl(field))
} else if (!is.null(index) && !is.null(type)) {
if (length(index) > 1)
stop("You can only pass one index if you also pass a type",
call. = FALSE)
url <- file.path(url, esc(index), "_mapping", esc(cl(type)), "field", cl(field))
}
}
args <- ec(list(include_defaults = as_log(include_defaults),
include_type_name = as_log(include_type_name)))
es_GET_(conn, url, query = args, ...)
}
type_exists <- function(conn, index, type, ...) {
is_conn(conn)
if (conn$es_ver() <= 100) {
stop("type exists not available in ES <= v1", call. = FALSE)
}
if (conn$es_ver() >= 800) {
stop("types are defunct in ES >= v8", call. = FALSE)
}
url <- conn$make_url()
if (conn$es_ver() >= 500) {
url <- file.path(url, esc(index), "_mapping", esc(type))
} else {
url <- file.path(url, esc(index), esc(type))
}
res <- conn$make_conn(url, ...)$head()
if (conn$warn) catch_warnings(res)
if (res$status_code == 200) TRUE else FALSE
}
|
log_in <- function(username, password, environment = 1){
rfinanceConnection <- ConnectionHandler$new(username, password)
assign("rfinanceConnection", rfinanceConnection, envir = as.environment(environment))
}
log_out <- function(environment = 1){
if(exists("rfinanceConnection")){
rm("rfinanceConnection", envir = as.environment(environment))
}
}
|
GrammaticalEvolution <- function(grammarDef, evalFunc,
numExpr = 1,
max.depth = GrammarGetDepth(grammarDef),
startSymb = GrammarStartSymbol(grammarDef),
seqLen = GrammarMaxSequenceLen(grammarDef, max.depth, startSymb),
wrappings=3,
suggestions=NULL,
optimizer = c("auto", "es", "ga"),
popSize = "auto", newPerGen = "auto", elitism = 2,
mutationChance=NA,
iterations="auto",
terminationCost=NA,
monitorFunc=NULL,
disable.warnings=FALSE,
plapply=lapply, ...){
if (numExpr < 1) {
stop("Number of Expressions (numExpr) has to be at least 1.");
}
chromosomeLen <- seqLen * numExpr
optimizer <- match.arg(optimizer)
if (optimizer == "auto") {
if (numExpr > 1) {
optimizer = "ga"
} else {
optimizer = "es"
}
}
if (popSize == "auto") {
if (optimizer == "ga") {
popSize = 200
} else {
popSize = 8
}
}
if (iterations == "auto") {
iterations = 1000
num.grammar.expr = GrammarNumOfExpressions(grammarDef, max.depth, startSymb)
iterations = round(min(num.grammar.expr / popSize * 2,
iterations))
if (optimizer == "ga") {
iterations = round(iterations/5)
}
}
if (optimizer == "es" && newPerGen == "auto") {
if (GrammarIsRecursive(grammarDef)) {
newPerGen = popSize
popSize = 0
} else {
newPerGen = round(popSize / 4)
popSize = popSize - newPerGen
}
}
if (is.na(mutationChance)) {
if (optimizer == "es") {
mutationChance <- min(0.1, 5 / (1 + chromosomeLen))
} else {
mutationChance <- 1 / (1 + chromosomeLen)
}
}
if (numExpr == 1) {
ind.cut <- 1
geneCrossoverPoints <- NULL
} else {
ind.cut <- as.numeric(cut(1:chromosomeLen, numExpr))
geneCrossoverPoints <- ind.cut
}
chromToExprList <- function(chromosome) {
expr.list = c()
for (i in 1:numExpr) {
ch <- chromosome[ind.cut == i]
tryCatch({
expr <- GrammarMap(ch, grammarDef, wrappings = wrappings)
if (expr$type == "T") {
expr.list <- c(expr.list, as.expression(expr))
}
}, warning = function(w) print(w), error = function(e) print(e))
}
return(expr.list)
}
ga.evalFunc <- function(chromosome) {
expr.list = chromToExprList(chromosome)
if (length(expr.list) == 0) {
return (Inf)
}
if (disable.warnings) {
eval.results = suppressWarnings(evalFunc(expr.list))
} else {
eval.results = evalFunc(expr.list)
}
return (eval.results)
}
add.expression.to.results <- function(ga.result) {
ga.result$best$expressions = chromToExprList(ga.result$best$genome)
class(ga.result) <- "GrammaticalEvolution"
return(ga.result)
}
if (!is.null(monitorFunc)) {
ga.monFunc <- function(result) {
monitorFunc(add.expression.to.results(result))
}
} else {
ga.monFunc <- NULL
}
if (optimizer == "ga") {
result <- GeneticAlg.int(genomeLen=chromosomeLen,
codonMin = 0, codonMax = GrammarMaxRuleSize(grammarDef) - 1,
evalFunc=ga.evalFunc,
suggestions=suggestions,
popSize=popSize, iterations=iterations, elitism=elitism, mutationChance=mutationChance,
geneCrossoverPoints = geneCrossoverPoints,
terminationCost=terminationCost,
monitorFunc=ga.monFunc,
allowrepeat = TRUE,
plapply=plapply, ...)
} else {
result <- EvolutionStrategy.int(genomeLen=chromosomeLen,
codonMin = 0, codonMax = GrammarMaxRuleSize(grammarDef) - 1,
evalFunc=ga.evalFunc,
suggestion=suggestions,
mutationChance=mutationChance,
popSize=popSize, newPerGen = newPerGen,
iterations=iterations, terminationCost=terminationCost,
monitorFunc=ga.monFunc,
allowrepeat = TRUE,
plapply=plapply, ...)
}
return (add.expression.to.results(result))
}
|
multicomp.mmc <- function(x,
focus=dimnames(attr(x$terms,"factors"))[[2]][1],
comparisons="mca",
lmat,
lmat.rows=lmatRows(x, focus),
lmat.scale.abs2=TRUE,
ry,
plot=TRUE,
crit.point,
iso.name=TRUE,
estimate.sign=1,
x.offset=0,
order.contrasts=TRUE,
main,
main2,
focus.lmat,
...) {
multicomp.lm <- NA
stop("multicomp.mmc works only in S-Plus. Use mmc in R.")
}
"[.mmc.multicomp" <- function(x, ..., drop = TRUE) {
result <- NextMethod("[")
oldClass(result) <- oldClass(x)
result
}
|
pvsRequest3 <-
function (request,inputs) {
pvs.url <- paste("http://api.votesmart.org/",request,"key=",get('pvs.key',envir=.GlobalEnv),inputs,sep="")
httpresp <- try(GET(url=pvs.url, timeout(5)), silent=TRUE)
timedout <- any(grepl("OPERATION_TIMEDOUT", x=class(attributes(httpresp)$condition)))
t <- 0
while (timedout & t<3) {
httpresp <- try(GET(url=pvs.url, timeout(5)), silent=TRUE)
timedout <- any(grepl("OPERATION_TIMEDOUT", x=class(attributes(httpresp)$condition)))
t <- t +1
}
xmltext <- content(x=httpresp, as="text")
errors <- getXMLErrors(xmltext)
if (length(errors) != 0) {
if (names(errors[[1]]$code) == "XML_ERR_CDATA_NOT_FINISHED") {
xmltext <- gsub(pattern="\003", replacement="", x=xmltext, fixed=TRUE)
}
}
output.base <- xmlRoot(xmlTreeParse(xmltext, useInternalNodes=TRUE, error=function(...){}))
output <- xmlSApply(output.base, function(x) data.frame(t(xmlSApply(x, xmlValue))))
output
}
|
test_that("wkt_wkb", {
str <- "POINT (-116.4 45.2)"
pt1 <- wkt_wkb(str)
expect_is(pt1, "raw")
expect_equal(pt1[1], as.raw("01"))
expect_equal(wkb_wkt(pt1), str)
})
test_that("wkt_wkb works with multipoint", {
mpt <- multipoint(c(100, 3), c(101, 2), c(3, 2), fmt=0)
mpt1 <- wkt_wkb(mpt)
expect_is(mpt1, "raw")
expect_equal(mpt1[1], as.raw("01"))
expect_equal(wkb_wkt(mpt1), mpt)
})
test_that("wkt_wkb works with polygon input", {
ply <- polygon(c(100, 1), c(101, 1), c(101, 1), c(100, 1), fmt = 0)
poly1 <- wkt_wkb(ply)
expect_is(poly1, "raw")
expect_equal(poly1[1], as.raw("01"))
expect_equal(wkb_wkt(poly1), ply)
})
test_that("wkt_wkb works with multipolygon", {
df <- data.frame(long = c(30, 45, 10, 30), lat = c(20, 40, 40, 20))
df2 <- data.frame(long = c(15, 40, 10, 5, 15), lat = c(5, 10, 20, 10, 5))
mpoly <- multipolygon(df, df2, fmt = 0)
mpoly1 <- wkt_wkb(mpoly)
expect_is(mpoly1, "raw")
expect_equal(mpoly1[1], as.raw("01"))
expect_equal(wkb_wkt(mpoly1), mpoly)
})
test_that("wkt_wkb works with linestring", {
line <- linestring("LINESTRING (-116 45, -118 47)")
line1 <- wkt_wkb(line)
expect_is(line1, "raw")
expect_equal(line1[1], as.raw("01"))
expect_equal(wkb_wkt(line1), line)
})
test_that("wkt_wkb works with multilinestring", {
df <- data.frame(long = c(30, 45, 10), lat = c(20, 40, 40))
df2 <- data.frame(long = c(15, 40, 10), lat = c(5, 10, 20))
mline <- multilinestring(df, df2, fmt = 0)
mline1 <- wkt_wkb(mline)
expect_is(mline1, "raw")
expect_equal(mline1[1], as.raw("01"))
expect_equal(wkb_wkt(mline1), mline)
})
test_that("wkt_wkb fails well", {
expect_error(wkt_wkb(), "\"x\" is missing")
expect_error(wkt_wkb(""))
expect_error(wkt_wkb(5))
expect_error(wkt_wkb("foobar"))
})
|
Ffuncshift<-function(time,t,lambda,mu,rho=1) {
1-rho+rho*(Ffuncshifth(time,t,lambda,mu)+1)
}
|
settings <- list(
gen_autoUpdateSettings = TRUE,
block1 = "foo",
block1_2 = TRUE,
willBeDeleted1 = 5,
block1_nameThemWhateverYouWant = "Henry",
anyName = "XXX_1",
block2 = "bar",
block2_2 = "XXX_2",
comboDelete1 = "theo",
block2_oneMoreVariable = FALSE,
block3_giveMeaningfulNames = TRUE,
favouriteColor = "blue",
willBeDeleted2 = "darkred",
comboDelete2 = TRUE,
comboDelete3 = "XXX_3",
block4 = "you get the picture",
blabla = TRUE,
andSoOn = 999999,
petterson = "old",
findus = "cat",
mouse = "grey",
obey = TRUE,
willBeDeleted3 = FALSE,
strength = 5000,
last = 0
)
|
normImp <- function(obsData, M=10, pd=FALSE, steps=100, rseed) {
if (is.data.frame(obsData)==FALSE) {
stop("obsData argumment must be a data frame.")
}
if (is.null(rseed)==FALSE) {
norm::rngseed(rseed)
}
imps <- vector("list", M)
s <- norm::prelim.norm(as.matrix(obsData))
thetahat <- norm::em.norm(s)
if (pd==FALSE) {
for (i in 1:M) {
imps[[i]] <- as.data.frame(norm::imp.norm(s,thetahat,as.matrix(obsData)))
}
} else {
for (i in 1:M) {
theta <- norm::da.norm(s,thetahat,steps=steps)
imps[[i]] <- as.data.frame(norm::imp.norm(s,theta,as.matrix(obsData)))
}
}
if (M==1) {
imps <- data.frame(imps[[1]])
}
attr(imps, "pd") <- pd
imps
}
|
context("flatten_query")
test_that("flatten_query works", {
expect_equal(
flatten_query(list(statsDataId = "0003103532", cdCat01 = c("010800130", "010800140"))),
list(statsDataId = "0003103532", cdCat01 = "010800130,010800140")
)
})
|
test_that("test ksplot", {
hpp_obj <- pp_hpp(lambda = 1)
events <- pp_simulate(hpp_obj, end = 10)
expect_error(pp_ksplot(r = NULL), "No rescaled interevent times provided")
expect_error(pp_ksplot(r = c(0)), "No rescaled interevent times provided")
expect_error(pp_ksplot(r = c(-1, 0, 1)), "Incorrect interevent times provided")
})
|
hopskel <- function(X) {
stopifnot(is.ppp(X))
n <- npoints(X)
if(n < 2) return(NA)
dX <- nndist(X)
U <- runifpoint(n, Window(X))
dU <- nncross(U, X, what="dist")
A <- mean(dX^2)/mean(dU^2)
return(A)
}
hopskel.test <- function(X, ...,
alternative=c("two.sided", "less", "greater",
"clustered", "regular"),
method=c("asymptotic", "MonteCarlo"),
nsim=999
) {
Xname <- short.deparse(substitute(X))
verifyclass(X, "ppp")
W <- Window(X)
n <- npoints(X)
method <- match.arg(method)
alternative <- match.arg(alternative)
if(alternative == "clustered") alternative <- "less"
if(alternative == "regular") alternative <- "greater"
altblurb <-
switch(alternative,
two.sided="two-sided",
less="clustered (A < 1)",
greater="regular (A > 1)")
statistic <- hopskel(X)
switch(method,
asymptotic = {
nn <- 2 * n
p.value <-
switch(alternative,
less = pf(statistic, nn, nn, lower.tail=TRUE),
greater = pf(statistic, nn, nn, lower.tail=FALSE),
two.sided = 2 *
pf(statistic, nn, nn, lower.tail=(statistic < 1)))
pvblurb <- "using F distribution"
},
MonteCarlo = {
sims <- numeric(nsim)
for(i in 1:nsim) {
Xsim <- runifpoint(n, win=W)
sims[i] <- hopskel(Xsim)
p.upper <- (1 + sum(sims >= statistic))/(1 + nsim)
p.lower <- (1 + sum(sims <= statistic))/(1 + nsim)
p.value <- switch(alternative,
less=p.lower,
greater=p.upper,
two.sided=2*min(p.lower, p.upper))
}
pvblurb <- paste("Monte Carlo test based on",
nsim, "simulations of CSR with fixed n")
})
statistic <- as.numeric(statistic)
names(statistic) <- "A"
out <- list(statistic=statistic,
p.value=p.value,
alternative=altblurb,
method=c("Hopkins-Skellam test of CSR", pvblurb),
data.name=Xname)
class(out) <- "htest"
return(out)
}
|
combine.select <-
function(sel1=NULL, sel2=NULL, ..., operator="AND", verbose=TRUE) {
cl <- match.call()
sels <- list(sel1, sel2, ...)
if(any(sapply(sels, function(x) !is.null(x) && !inherits(x, "select"))))
stop("Invalid atom select(s)")
rm.inds = sapply(sels, is.null)
sels = sels[!rm.inds]
if(length(sels) == 0)
return(NULL)
else if(length(sels) == 1)
return(sels[[1]])
op.tbl <- c(rep("AND",3), rep("OR",4), rep("NOT",4))
operator <- op.tbl[match(operator, c("AND","and","&","OR","or","|","+","NOT","not","!","-"))]
if(verbose) {
msg <- switch(operator,
AND = " Intersect of selects",
OR = " Union of selects",
NOT = " Select 2 (, 3, ...) is subtracted from select 1",
stop("Unknown operation") )
cat(msg, "\n", sep="")
}
sel <- sels[[1]]$atom
for(i in 2:length(sels)) {
sel <-
switch(operator,
"AND" = intersect(sel, sels[[i]]$atom),
"OR" = sort(union(sel, sels[[i]]$atom)),
"NOT" = setdiff(sel, sels[[i]]$atom),
stop("Unknown operation") )
}
sel <- list(atom = sel, xyz = atom2xyz(sel), call=cl)
if(verbose) { cat(paste(" * Selected a total of:", length(sel$atom), "atoms *\n")) }
class(sel) = "select"
return(sel)
}
|
brgan<-function(m,n,n.chains,data)
{
cl<-match.call()
cl1<-as.list(cl[-1])
if(cl1$data==deparse(substitute(agedata))){
data<-agedata[,-1]
colnames(data)<-c("age","1","2","3","4")
}else{
data<-data
}
data1=function(d){
data1=matrix(ncol=ncol(d),nrow=nrow(d))
for(i in 1:nrow(d))
{
for(j in 1:ncol(d)){
data1[i,j]=ifelse(is.na(d[i,j])==TRUE,0.0001,d[i,j])
}
}
data1
}
age<-data$age
time<-as.numeric(colnames(data[,-which(colnames(data)=="age")]))[m:n]
data<-data1(data[-1,])
data<-data[,m:n]
N <- nrow(data)
M <- n-m+1
Y <-data
mdata<-list("N","M","Y",
"age","time"
)
mreg103A<-function(){
for (i in 1:4){beta[i] ~ dnorm(0.0, 0.001)}
for (i in 1:N){age[i]~dnorm(vu, taua)}
vu~dnorm(.001,.001)
taua~dgamma(.001,.001)
sigmaa<-1/taua
for(i in 1:N){for (j in 2:M){Y[i,j]~dnorm(mu[i,j],tau)}}
for(i in 1:N){for (j in 1:M){Z[i,j]~dnorm(mu[i,j],tau)}}
for(i in 1:N){for (j in 2:M){mu[i,j]<-beta[1]*(1-rho)+
beta[2]*(time[j]-rho*time[j-1])+beta[3]*(time[j]*time[j]-rho*
time[j-1]*time[j-1])+rho*Y[i,j-1]+beta[4]*age[i]*(1-rho)}}
for(i in 1:N){Y[i,1]~dnorm(mu[i,1],tau)}
for(i in 1:N){mu[i,1]<-beta[1]+beta[2]*time[1]+beta[3]*
time[1]*time[1]+beta[4]*age[i]}
rho~dbeta(1,1)
tau~dgamma(.001,.001)
sigma<-1/tau
}
jagsfit1 <- jags( model.file=mreg103A,
data =mdata,
n.chains=n.chains,parameters.to.save =c('beta','sigma') )
jagsfit1
}
utils::globalVariables(c("taua","rho","agedata"))
|
context("Queue endpoint")
tenant <- Sys.getenv("AZ_TEST_TENANT_ID")
app <- Sys.getenv("AZ_TEST_APP_ID")
password <- Sys.getenv("AZ_TEST_PASSWORD")
subscription <- Sys.getenv("AZ_TEST_SUBSCRIPTION")
if(tenant == "" || app == "" || password == "" || subscription == "")
skip("Authentication tests skipped: ARM credentials not set")
rgname <- Sys.getenv("AZ_TEST_STORAGE_RG")
storname <- Sys.getenv("AZ_TEST_STORAGE_HNS")
if(rgname == "" || storname == "")
skip("Queue client tests skipped: resource names not set")
sub <- AzureRMR::az_rm$new(tenant=tenant, app=app, password=password)$get_subscription(subscription)
stor <- sub$get_resource_group(rgname)$get_storage_account(storname)
options(azure_storage_progress_bar=FALSE)
qu <- stor$get_queue_endpoint()
test_that("Queue endpoint works",
{
qu2 <- queue_endpoint(stor$properties$primaryEndpoints$queue, key=stor$list_keys()[1])
expect_is(qu, "queue_endpoint")
expect_identical(qu, qu2)
expect_true(is_empty(list_storage_queues(qu)))
name1 <- make_name()
sq <- storage_queue(qu, name1)
create_storage_queue(sq)
create_storage_queue(qu, make_name())
create_storage_queue(paste0(qu$url, make_name()), key=qu$key)
lst <- list_storage_queues(qu)
expect_true(is.list(lst) && inherits(lst[[1]], "StorageQueue") && length(lst) == 3)
expect_identical(sq$name, lst[[name1]]$name)
expect_silent(delete_storage_queue(sq, confirm=FALSE))
})
teardown({
lst <- list_storage_queues(qu)
lapply(lst, delete_storage_queue, confirm=FALSE)
})
|
service <- Sys.getenv("_R_CHECK_HAVE_ODBC_")
if(identical(as.logical(service), TRUE)) {
cat("************** RODBC Examples ******************************\n")
cat("**************************************************************\n")
cat("* WARNING: THIS OVERWRITES TABLES IN TEST DATABASE ON SERVER**\n")
cat("**************************************************************\n")
dbname <- Sys.getenv("ODBC_DATABASE")
if ("" == dbname) dbname <- "test"
require("RODBC")
cat("**********setup 0\n")
user <- Sys.getenv("ODBC_USER")
if ("" != user) {
host <- Sys.getenv("ODBC_HOST")
if ("" == host) host <- Sys.info()["nodename"]
passwd <- Sys.getenv("ODBC_PASSWD")
if ("" == passwd) passwd <- NULL
setup <- RODBC::odbcConnect(dsn=dbname, uid=user, pwd=passwd)
}else
setup <- RODBC::odbcConnect(dsn=dbname)
require("TSodbc")
cat("**********setup 1\n")
TSsql::removeTSdbTables(setup, yesIknowWhatIamDoing=TRUE, ToLower=TRUE)
cat("**********setup 2\n")
TSsql::createTSdbTables(setup, index=FALSE)
cat("**********setup 3\n")
RODBC::odbcClose(setup)
detach(package:RODBC)
require("TSodbc")
con <- if ("" != user)
tryCatch(TSconnect("odbc", dbname=dbname, username=user, password=passwd, host=host))
else tryCatch(TSconnect("odbc", dbname=dbname))
if(inherits(con, "try-error")) cat("CreateTables did not work.\n")
else {
source(system.file("TSsql/Populate.TSsql", package = "TSsql"))
source(system.file("TSsql/TSdbi.TSsql", package = "TSsql"))
source(system.file("TSsql/dbGetQuery.TSsql", package = "TSsql"))
source(system.file("TSsql/HistQuote.TSsql", package = "TSsql"))
cat("************** removing test database tables\n")
TSsql::removeTSdbTables(con, yesIknowWhatIamDoing=TRUE, ToLower=TRUE)
cat("************** disconnecting test\n")
dbDisconnect(con)
}
} else {
cat("ODBC not available. Skipping tests.\n")
cat("_R_CHECK_HAVE_ODBC_ setting ", service, "\n")
}
|
return.mean.enveloppe <- function(
extrema,
data,
zcol = "z",
method = "splines",
n.pts.spline = 3,
verbose = TRUE
){
if (method == "splines") {
min.data <- as.data.frame(list(x=extrema$min$x,y=extrema$min$y,z=extrema$min$value))
max.data <- as.data.frame(list(x=extrema$max$x,y=extrema$max$y,z=extrema$max$value))
names(min.data) <- names(max.data) <- c('x','y','z')
interp.min <- mba.points(min.data, coordinates(data), verbose = FALSE)
interp.max <- mba.points(max.data, coordinates(data), verbose = FALSE)
extrema.min.surf <- as.data.frame(interp.min$xyz.est)
extrema.max.surf <- as.data.frame(interp.max$xyz.est)
names(extrema.min.surf) <- names(extrema.max.surf) <- c('x','y','z')
}
else {
stop("No other interpolation method that multi-level B splines had been implemented for the moment. Please use the splines option.\n")
}
mean.enveloppe <- as.data.frame(coordinates(data))
mean.enveloppe[[zcol]] <- rowMeans(cbind(extrema.max.surf$z,extrema.min.surf$z))
coordinates(mean.enveloppe) <- ~x+y
return(mean.enveloppe)
}
|
lnLCF <- function(x, data, fixed.parameters=NULL, parallel=TRUE, verbose=FALSE) {
x <- c(x, fixed.parameters)
if (verbose) d(x)
ml <- suppressWarnings(floor(as.numeric(gsub("[a-zA-z]+", "", names(x)))))
if ((length(ml) == 1) | all(is.na(ml)) | (max(c(0, ml), na.rm=TRUE)==0)) {
mln <- 1
} else {
mln <- max(ml, na.rm=TRUE)
}
MaxNests <- max(dim(data)[c(1, 2)])-1
mu <- x[(substr(names(x), 1, 2)=="mu") & (substr(names(x), 1, 3) != "mu_")]
if (mln > 1) {
if (any(names(mu)=="mu")) {
mu_ref <- mu[names(mu)=="mu"]
mu_ec <- NULL
for (i in 1:mln) {
if (all(!grepl(paste0("mu", i), names(mu)))) {
mu_ec <- c(mu_ec, structure(unname(mu_ref), .Names=paste0("mu", i)))
} else {
mu_ec <- c(mu_ec, mu[grepl(paste0("mu", i), names(mu))])
}
}
mu <- mu_ec
}
if (any(grepl("mu\\.+", names(mu)))) {
mu_ref <- mu[grepl("mu\\.+", names(mu))]
mu_ec <- NULL
for (i in 1:mln) {
if (all(!grepl(paste0("mu", i), names(mu)))) {
mu_ec <- c(mu_ec, structure(unname(mu_ref), .Names=gsub("mu(\\.[0-9]+)", paste0("mu", i, "\\1"), names(mu_ref))))
} else {
mu_ec <- c(mu_ec, mu[grepl(paste0("mu", i), names(mu))])
}
}
mu <- mu_ec
}
} else {
names(mu) <- gsub("mu[0-9]*(\\.*[0-9]*)", "mu1\\1", names(mu))
}
sd <- x[(substr(names(x), 1, 2)=="sd") & (substr(names(x), 1, 3) != "sd_")]
if (identical(sd, structure(numeric(0), .Names = character(0)))) sd <- c(sd=NA)
if (length(sd)>1) sd <- sd[order(as.numeric(gsub("sd([0-9]+)", "\\1", names(sd))))]
sd <- structure(c(sd, rep(sd[length(sd)], mln-length(sd))), .Names=paste0("sd", 1:mln))
mu_season <- x[substr(names(x), 1, 9)=="mu_season"]
if (length(mu_season)>1) mu_season <- mu_season[order(as.numeric(gsub("mu_season([0-9]+)", "\\1", names(mu_season))))]
if (!identical(mu_season, structure(numeric(0), .Names = character(0)))) {
mu_season <- c(mu_season, rep(mu_season[length(mu_season)], mln-length(mu_season)))
names(mu_season) <- paste0("mu_season", 1:mln)
} else {
mu_season <- NA
}
sd_season <- x[substr(names(x), 1, 9)=="sd_season"]
if (length(sd_season)>1) sd_season <- sd_season[order(as.numeric(gsub("sd_season([0-9]+)", "\\1", names(sd_season))))]
if (!identical(sd_season, structure(numeric(0), .Names = character(0)))) {
sd_season <- c(sd_season, rep(sd_season[length(sd_season)], mln-length(sd_season)))
names(sd_season) <- paste0("sd_season", 1:mln)
} else {
sd_season <- NA
}
a <- x[substr(names(x), 1, 1)=="a"]
if (identical(a, structure(numeric(0), .Names = character(0)))) {
a <- structure(rep(Inf, mln), .Names=paste0("a", 1:mln))
}
if (any(names(a) == "a")) names(a[names(a) == "a"]) <- "a1"
a_int <- structure(rep(Inf, mln), .Names=paste0("a", 1:mln))
a_int[names(a)] <- a
a <- 1/(1 + exp(-a_int))
if (mln>1) {
OTN <- abs(x[substr(names(x), 1, 3)=="OTN"])
if (length(OTN)>1) OTN <- OTN[order(as.numeric(gsub("OTN([0-9]+)", "\\1", names(OTN))))]
OTN <- c(OTN, 1)
OTN <- c(OTN, rep(OTN[length(OTN)], mln-length(OTN)))
names(OTN) <- paste0("OTN", 1:mln)
OTN <- OTN/sum(OTN)
} else {
OTN <- c(OTN1=1)
}
p <- x[substr(names(x), 1, 1)=="p"]
OCFECF <- data
OCFECF[] <- 0
for (j in 1:mln) {
pcommon <- p[(names(p)=="p") | (names(p) == paste0("p", as.character(j)))][1]
if (dim(data)[3]>1) {
p_period <- structure(rep(pcommon, dim(data)[3]-MaxNests),
.Names=paste0("p", as.character(j), ".",
formatC(1:(dim(data)[3]-MaxNests), width=2, flag="0")))
} else {
p_period <- structure(pcommon,
.Names=paste0("p", as.character(j)))
}
m1 <- match(names(p), names(p_period))
if (all(is.na(m1))) m1 <- match(names(p), paste0("p.",
formatC(1:(dim(data)[3]-MaxNests), width=2, flag="0")))
p_period[m1[!is.na(m1)]] <- p[!is.na(m1)]
p_period <- 1/(1+exp(-p_period))
p_period[m1[!is.na(m1)]] <- p_period[m1[!is.na(m1)]] * a[paste0("a", j)]
nm <- paste0("p", as.character(j))
OCFECF <- OCFECF+ ECFOCF_f(mu=mu[grepl(paste0("mu", j), names(mu))],
sd=sd[paste0("sd", j)],
p=p_period[substr(names(p_period), 1, nchar(nm))==nm],
mu_season = mu_season[paste0("mu_season", j)],
sd_season = sd_season[paste0("sd_season", j)],
MaxNests=MaxNests,
length_season=dim(data)[3]-MaxNests,
parallel=parallel) * OTN[paste0("OTN", j)]
}
if (any(is.na(OCFECF[1, 1, ]))) {
return(+Inf)
} else {
OCF0 <- sum(OCFECF[1, 1, ])
OCFECF <- OCFECF[-1, -1, ]/(1-OCF0)
OCFECF <- log(OCFECF)
OCFECF <- OCFECF*data[-1, -1, ]
LnL <- -sum(OCFECF, na.rm = TRUE)
return(LnL)
}
}
|
library(disk.frame)
setup_disk.frame(12)
a = disk.frame("c:/data/airontimecsv.df/")
system.time(b <- a %>%
group_by(YEAR, MONTH, DAY_OF_MONTH) %>%
summarise(sum(DEP_DELAY)) %>%
collect)
path_to_data <- "c:/data/"
rows = 148619656
recommended_nchunks = recommend_nchunks(file.size(file.path(path_to_data, "combined.csv")))
in_chunk_size = ceiling(rows/ recommended_nchunks)
path_to_data = "c:/data/AirOnTimeCSV/"
system.time(a <- csv_to_disk.frame(
list.files(path_to_data, pattern = ".csv$", full.names = TRUE),
outdir = file.path("c:/data/", "airontimecsv.df"),
colClasses = list(character = c("WHEELS_OFF", "WHEELS_ON"))
))
system.time(flights.df <- csv_to_disk.frame(
paste0(path_to_data, "combined.csv"),
outdir = paste0(path_to_data, "combined.laf.df"),
in_chunk_size = in_chunk_size,
backend = "LaF"
))
system.time(a <- csv_to_disk.frame(
file.path(path_to_data, "combined.csv"),
outdir = file.path(path_to_data, "combined.readr.df"),
in_chunk_size = in_chunk_size,
colClasses = list(character = c("WHEELS_OFF","WHEELS_ON")),
chunk_reader = "readr"
))
system.time(a <- csv_to_disk.frame(
file.path(path_to_data, "combined.csv"),
outdir = file.path(path_to_data, "combined.readLines.df"),
in_chunk_size = in_chunk_size,
colClasses = list(character = c("WHEELS_OFF","WHEELS_ON")),
chunk_reader = "readLines"
))
|
CalcGradEtaPersRSInts <- function(d1, d2, d3, Li, Ri, Q, fit.cox.rs.ints, pts.for.ints, tm, n.etas.per.fit)
{
n <- length(Ri)
n.fits <- length(pts.for.ints)
deriv.ell.etas <- matrix(nrow = n, ncol = sum(n.etas.per.fit), 0)
for (j in 1:n.fits)
{
point <- pts.for.ints[j]
fit.cox.int <- fit.cox.rs.ints[[j]]
eta.b <- fit.cox.int$b
eta.g <- fit.cox.int$g
n.g <- length(eta.g)
n.b <- length(eta.b)
knots <- fit.cox.int$knots
order <- fit.cox.int$order
in.risk.set <- tm >= point
n.set <- sum(in.risk.set)
Li.int <- Li[in.risk.set]
Ri.int <- Ri[in.risk.set]
d1.int <- d1[in.risk.set]
d2.int <- d2[in.risk.set]
d3.int <- d3[in.risk.set]
Q.int <- Q[in.risk.set,]
expQb <- as.vector(exp(Q.int%*%eta.b))
ti <- c(Li.int[d1.int == 0], Ri.int[d3.int == 0])
ti.max <- max(ti) + 1e-05
ti.min <- min(ti) - 1e-05
bRi <- t(ICsurv::Ispline(x = Ri.int, order = order, knots = knots))
bLi <- t(ICsurv::Ispline(x = Li.int, order = order, knots = knots))
GRi <- as.vector(bRi %*% eta.g)
GLi <- as.vector(bLi %*% eta.g)
HRi <- as.vector(GRi*expQb)
HLi <- as.vector(GLi*expQb)
SRi <- exp(-HRi)
SLi <- exp(-HLi)
FRi <- 1-SRi
FLi <- 1-SLi
term.deriv.etab.d1 <- Q.int*(SRi*HRi/FRi)
term.deriv.etab.d2 <- Q.int*(SRi*HRi -SLi*HLi)/(SLi-SRi)
term.deriv.etab.d3 <- -Q.int*HLi
term.deriv.etag.d1 <- bRi*(SRi*expQb/FRi)
term.deriv.etag.d2 <- expQb*(SRi*bRi -SLi*bLi)/(SLi-SRi)
term.deriv.etag.d3 <- -bLi*expQb
deriv.ell.etag.int <- matrix(nrow = n.set, ncol = n.g)
deriv.ell.etab.int <- matrix(nrow = n.set, ncol = n.b)
deriv.ell.etab.int[d1.int==1,] <- term.deriv.etab.d1[d1.int==1]
deriv.ell.etab.int[d2.int==1,] <- term.deriv.etab.d2[d2.int==1]
deriv.ell.etab.int[d3.int==1,] <- term.deriv.etab.d3[d3.int==1]
deriv.ell.etag.int[d1.int==1,] <- term.deriv.etag.d1[d1.int==1]
deriv.ell.etag.int[d2.int==1,] <- term.deriv.etag.d2[d2.int==1]
deriv.ell.etag.int[d3.int==1,] <- term.deriv.etag.d3[d3.int==1]
deriv.ell.etas.int <- cbind(deriv.ell.etab.int,deriv.ell.etag.int)
if (j > 1) {
deriv.ell.etas[in.risk.set, (sum(n.etas.per.fit[1:(j-1)]) + 1):sum(n.etas.per.fit[1:j])] <- deriv.ell.etas.int
} else {
deriv.ell.etas[in.risk.set, 1:n.etas.per.fit[1]] <- deriv.ell.etas.int
}
}
return(deriv.ell.etas)
}
|
source("ESEUR_config.r")
library("plyr")
library("vioplot")
normalise_pert=function(df)
{
ao=subset(df, Perturb_str == "AddOne")
perc_succ=trunc(100*ao$Success/ao$Num_Perturb)
perc_succ[is.nan(perc_succ)]=0
perc_succ[is.infinite(perc_succ)]=0
NormLoc=rep(100*ao$IndexLoc/max(ao$IndexLoc),
times=perc_succ)
return(data.frame(Program=df$Program[1], NormLoc))
}
ptb=read.csv(paste0(ESEUR_dir, "reliability/1611-09187a.csv.xz"), as.is=TRUE)
subjects=c("quicksort", "zip", "sudoku", "md5", "rsa", "rc4", "canny", "lcs", "laguerre", "linreg")
ptb=subset(ptb, Program %in% subjects)
vp=ddply(ptb, .(Program), normalise_pert)
pal_col=rainbow(length(unique(vp$Program)))
vioplot(NormLoc ~ Program, data=vp,
h=1.1,
horizontal=TRUE,
col=pal_col, border=pal_col,
xaxs="i",
xlab="Perturbation location (normalised)", ylab="")
|
BuildArrayNumberOfDataItems <- function(x){
n_rows_data <- dim(x)[1]
n_cols_data <- dim(x)[2]
nt <- array(0,c(1,n_rows_data))
for (i in 1:n_rows_data){
nt[i] <- match(0,x[i,])
if(is.na(nt[i])) nt[i] <- n_cols_data
}
return(nt)
}
BuildContingencyTable <- function(y,order){
n_rows_data <- y@N
l<-list()
for (g in 1:order){
c=array(0,c(y@K,y@K))
for (i in 1:n_rows_data){
for (t in 1:(y@T[i]-g)){
past <- y@y[[i]][t]
present <- y@y[[i]][t+g]
if(length((which(c(past,present)<1) | (which(c(past,present)>y@K))))==0){
c[past,present] <- c[past,present] + y@weights[i]
}
}
}
l[[g]]<-c
}
if(y@Ncov>0){
for(j in 1:y@Ncov){
kcov=y@Kcov[j]
CT=matrix(0,kcov,y@K)
for(n in 1:y@N){
for (i in 1:y@T[n]){
row=y@cov[n,i,j]
col=y@y[[n]][i]
CT[row,col]=CT[row,col]+y@weights[n]
}
}
l[[order+j]]<-CT
}
}
return(l)
}
NormalizeTable <- function(x){
nx <- array(NA,dim=dim(x))
m <- dim(x)[1]
sums <- rowSums(x)
for (i in 1:m){
if (sums[i]!=0) nx[i,] <- x[i,]/sums[i]
}
return(nx)
}
CalculateTheilU <- function(y,order,c){
u <- array(data=NA,dim=c(1,order+y@Ncov))
for (g in 1:order){
cg <- c[[g]]
tc <- sum(cg)
sr <- rowSums(cg)
sc <- colSums(cg)
num <- 0
for (i in 1:y@K){
for (j in 1:y@K){
if (cg[i,j]!=0){
num <- num + cg[i,j] * (log2(sc[i]) + log2(sr[j]) - log2(cg[i,j]) - log2(tc))
}
}
}
den <- 0
for (j in 1:y@K){
if(sc[j]!=0 & tc!=0){
den <- den + sc[j] * (log2(sc[j]) - log2(tc))
}
}
if(den != 0) u[g] = num/den
else u[g] = NaN
}
if(y@Ncov>0){
for (g in 1:y@Ncov){
cg <- c[[order+g]]
tc <- sum(cg)
sr <- rowSums(cg)
sc <- colSums(cg)
num <- 0
for (i in 1:y@Kcov[g]){
for (j in 1:y@K){
if (cg[i,j]!=0){
num <- num + cg[i,j] * (log2(sc[j]) + log2(sr[i]) - log2(cg[i,j]) - log2(tc))
}
}
}
den <- 0
for (j in 1:y@K){
if(sc[j]!=0 & tc!=0){
den <- den + sc[j] * (log2(sc[j]) - log2(tc))
}
}
if(den != 0) u[order+g] = num/den else u[order+g] = NaN
}
}
return(u)
}
InitializeParameters <- function(u,init_method,c,is_mtdg,m,order,kcov,ncov){
phi <- u/(sum(u))
if (is_mtdg){
q <- array(NA,c(order,m,m))
for (g in 1:order){
q[g,,] <- NormalizeTable(c[[g]])
}
} else {
if (init_method == "weighted"){
q <- array(0,c(1,m,m))
q_tilde <- array(0,dim=c(1,m,m))
for (g in 1:order){
q_tilde[1,,] <- q_tilde[1,,] + u[g] * c[[g]]
}
q[1,,] <- NormalizeTable(q_tilde[1,,])
} else if (init_method == "best"){
u_tilde<-u[1:order]
k <- which.max(u_tilde)
q <- array(0,c(1,m,m))
q[1,,] <- NormalizeTable(c[[k]])
} else if (init_method == "random"){
q <- 0.1 + array(runif(m*m),c(1,m,m))
q[1,,] <- q/rowSums(q[1,,])
} else{
stop("Init parameter should be either, \"best\", \"random\" or \"weighted\"",call.=FALSE)
}
}
S=list()
if(ncov>0){
for(i in 1:ncov){
S[[i]]=matrix(0,kcov[i],m)
S[[i]]<-NormalizeTable(c[[order+i]])
}
}
return(list(phi=phi,q=q,S=S))
}
BuildArrayCombinations <- function(m,l,kcov,ncov){
tCovar <- 1
if(prod(kcov)>0){
tCovar<-prod(kcov)
}
i0_il <- array(0,c(m^(l+1)*tCovar,l+1+ncov))
values <- 1:m
for(i in 1:(l+1)){
i0_il[,i] <- t(kronecker(values,rep(1,m^(l+1)*tCovar/m^i)))
values <- kronecker(rep(1,m),values)
}
if(ncov>0){
totm <- tCovar
totp <- m^(l+1)
for(i in 1:ncov){
totm <- totm/kcov[i]
values <- 1:kcov[i]
i0_il[,l+1+i] <- kronecker(rep(1,totp),kronecker(values,rep(1,totm)))
totp <- totp*kcov[i]
}
}
return(i0_il)
}
BuildArrayNumberOfSequences <- function(y,order){
if(y@Ncov>0){
tCovar<-prod(y@Kcov)
}else{
tCovar=1
}
n_i0_il <- array(0,dim=c(y@K^(order+1)*tCovar,1))
for(n in 1:y@N){
for(t in march.h.seq(1,y@T[n]-order)){
pos <- y@y[[n]][t:(t+order)]
row1=pos[1]
for(g in 2:(order+1)){
row1=row1+(y@K)^(g-1)*(pos[g]-1)
}
if(y@Ncov>0){
posCov <- y@cov[n,t+order,]
Covar<-tCovar
row2=posCov[y@Ncov]
totKC=1
if(y@Ncov>1){
for(i in (y@Ncov-1):1 ){
totKC=totKC*y@Kcov[i+1]
row2=row2+totKC*(y@cov[n,t+order,i]-1)
}
}
}
if(y@Ncov>0){
row1=(row1-1)*tCovar+row2
}
n_i0_il[row1]<-n_i0_il[row1]+1
}
}
return(n_i0_il)
}
BuildArrayQ <- function(m,l,i0_il,n_i0_il,q,kcov,ncov,S){
if(ncov>0){
tCovar<-prod(kcov)
}else{
tCovar=1
}
q_i0_il <- array(0,c(m^(l+1)*tCovar,l+ncov))
for (i in 1:length(n_i0_il)){
if( dim(q)[1]>1){
for (j in 1:l){
q_i0_il[i,j] <- q[j,i0_il[i,j+1],i0_il[i,1]]
}
}else {
for (j in 1:l){
q_i0_il[i,j] <- q[1,i0_il[i,j+1],i0_il[i,1]]
}
}
if(ncov>0){
for(j in 1:ncov){
q_i0_il[i,l+j] <- S[[j]][i0_il[i,l+1+j],i0_il[i,1]]
}
}
}
return(q_i0_il)
}
CalculateLogLikelihood <- function(n_i0_il,q_i0_il,phi){
ll <- 0
for (i in 1:length(n_i0_il)){
if(n_i0_il[i] > 0){
ll <- ll + n_i0_il[i] * log(q_i0_il[i,]%*%t(phi) )
}
}
ll
}
PartialDerivativesPhi <- function(n_i0_il,q_i0_il,l,phi,ncov){
pd_phi <- rep(0,l+ncov)
for (k in 1:length(n_i0_il)){
for (m in 1:(l+ncov)){
if (n_i0_il[k] > 0 && (q_i0_il[k,] %*% t(phi)) != 0){
pd_phi[m] <- pd_phi[m] + n_i0_il[k] * q_i0_il[k,m] / (q_i0_il[k,] %*% t(phi))
}
}
}
return(pd_phi)
}
PartialDerivativesQ <- function(n_i0_il,i0_il,q_i0_il,m,phi,order){
pd_q <- array(0,dim=c(m,m))
for (i in 1:length(n_i0_il)){
for (j in 1:order){
if (n_i0_il[i] > 0 && (q_i0_il[i,] %*% t(phi)) != 0){
pd_q[i0_il[i,j+1],i0_il[i,1]] <- pd_q[i0_il[i,j+1],i0_il[i,1]] + n_i0_il[i] * phi[j] / ( q_i0_il[i,] %*% t(phi) )
}
}
}
return(pd_q)
}
PartialDerivativesS<- function(CCov,ColVT,k,kcov,n_i0_il,q_i0_il,i0_il,phi){
pd_s<-array(0,dim=c(kcov,k))
nc=length(n_i0_il)
for (k in 1:nc){
if (n_i0_il[k] > 0 && (q_i0_il[k,] %*% t(phi)) != 0){
pd_s[i0_il[k,ColVT],i0_il[k,1]]<-pd_s[i0_il[k,ColVT],i0_il[k,1]]+n_i0_il[k]*phi[CCov]/(q_i0_il[k,] %*% t(phi))
}
}
return(pd_s)
}
OptimizePhi <- function(phi,pd_phi,delta,is_constrained,delta_stop,ll,n_i0_il,q_i0_il,k){
delta_it <- delta[1]
i_inc <- which.max(pd_phi)
i_dec <- which.min(pd_phi)
par_inc <- phi[i_inc]
par_dec <- phi[i_dec]
if (is_constrained){
if (par_inc == 1){
return(list(phi=phi,ll=ll,delta=delta))
}
if ( par_dec == 0 ){
pd_phi_sorted <- sort(pd_phi,index.return=TRUE)
i_dec <- pd_phi_sorted$ix[min(which(phi[pd_phi_sorted$ix]>0))]
par_dec <- phi[i_dec]
}
}
while(TRUE){
if(is_constrained){
delta_it <- min(c(delta_it,1-par_inc,par_dec))
}
new_phi <- phi
new_phi[i_inc] <- par_inc + delta_it
new_phi[i_dec] <- par_dec - delta_it
if (!is_constrained){
t <- sum(new_phi[new_phi >= 0])
for (i in 1:k){
q_min <- min(q[i,])
q_max <- max(q[i,])
if (t*q_min + (1-t)*q_max < 0){
return(list(phi=new_phi,ll=new_ll,delta=delta))
}
}
}
new_ll <- CalculateLogLikelihood(n_i0_il,q_i0_il,new_phi)
if (new_ll > ll){
if (delta_it == delta[1]){
delta[1] <- 2*delta[1]
}
return(list(phi=new_phi,ll=new_ll,delta=delta))
} else {
if (delta_it <= delta_stop) {
delta[1] <- 2*delta[1]
return(list(phi=phi,ll=ll,delta=delta))
}
delta_it <- delta_it/2
}
}
}
OptimizeQ <- function(q,j,pd_q,delta,delta_stop,ll,n_i0_il,q_i0_il,phi,i0_il,k,l,g,kcov,ncov,S){
delta_it <- delta[j+1]
i_inc <- which.max(pd_q[j,])
i_dec <- which.min(pd_q[j,])
par_inc <- q[g,j,i_inc]
par_dec <- q[g,j,i_dec]
if (par_inc == 1){
return(list(q=q,q_i0_il=q_i0_il,ll=ll,delta=delta))
}
if ( par_dec == 0 ){
pd_q_sorted <- sort(pd_q[j,],index.return=TRUE)
i_dec <- pd_q_sorted$ix[min(which(q[g,j,pd_q_sorted$ix]>0))]
par_dec <- q[g,j,i_dec]
}
while(TRUE){
delta_it <- min(c(delta_it,1-par_inc,par_dec))
new_q_row <- q[g,j,]
new_q_row[i_inc] <- par_inc + delta_it
new_q_row[i_dec] <- par_dec - delta_it
new_q <- q
new_q[g,j,] <- new_q_row
new_q_i0_il <- BuildArrayQ(k,l,i0_il,n_i0_il,new_q, kcov,ncov,S)
new_ll <- CalculateLogLikelihood(n_i0_il,new_q_i0_il,phi)
if (new_ll > ll){
if (delta_it == delta[j+1]){
delta[j+1] <- 2*delta[j+1]
}
return(list(q=new_q,q_i0_il=new_q_i0_il,ll=new_ll,delta=delta))
} else {
if (delta_it <= delta_stop) {
delta[j+1] <- 2*delta[j+1]
return(list(q=q,q_i0_il=q_i0_il,ll=ll,delta=delta))
}
delta_it <- delta_it/2
}
}
}
OptimizeS <-function(order,k,kcov,ncov,S,Tr,phi,pcol,ll,pd_s,delta,delta_stop,n_i0_il,i0_il,q_i0_il,q,n){
delta_it<-delta
i_inc<-which.max(pd_s)
i_dec<-which.min(pd_s)
par_inc<-S[[n]][Tr,i_inc]
par_dec<-S[[n]][Tr,i_dec]
if(par_inc==1){
return(list(S=S,ll=ll,delta=delta,q_i0_il=q_i0_il))
}
if(par_dec==0){
pd_s_sorted<-sort(pd_s,index.return=TRUE)
i_dec<-pd_s_sorted$ix[min(which(S[[n]][Tr,pd_s_sorted$ix]>0))]
par_dec<-S[[n]][Tr,i_dec]
}
delta_it <- min(c(delta_it,1-par_inc,par_dec))
new_S_row<-S[[n]][Tr,]
while(TRUE){
new_S_row[i_inc]<-par_inc+delta_it
new_S_row[i_dec]<-par_dec-delta_it
new_S<-S
new_S[[n]][Tr,]<-new_S_row
new_q_i0_il<-BuildArrayQ(k,order,i0_il,n_i0_il,q,kcov,ncov,new_S)
new_ll <- CalculateLogLikelihood(n_i0_il,new_q_i0_il,phi)
if(new_ll>ll){
if(delta_it==delta){
delta<-2*delta
}
return(list(S=new_S,ll=new_ll,delta=delta,q_i0_il=new_q_i0_il))
}else{
if(delta_it<=delta_stop){
delta<-2*delta
return(list(S=S,ll=ll,delta=delta,q_i0_il=q_i0_il))
}
delta_it<-delta_it/2
}
}
}
march.mtd.construct <- function(y,order,maxOrder=order,mtdg=FALSE,MCovar=0,init="best", deltaStop=0.0001, llStop=0.01, maxIter=0, seedModel=NULL){
order <- march.h.paramAsInteger(order)
if(order<1){
stop('Order should be greater or equal than 1')
}
maxOrder <- march.h.paramAsInteger(maxOrder)
if( order>maxOrder ){
stop("maxOrder should be greater or equal than order")
}
ySave <- y
y <- march.dataset.h.filtrateShortSeq(y,maxOrder+1)
y <- march.dataset.h.cut(y,maxOrder-order)
if(sum(MCovar)>0){
placeCovar <- which(MCovar==1)
y@cov <- y@cov[,,placeCovar,drop=FALSE]
y@Ncov <- as.integer(sum(MCovar))
y@Kcov <- y@Kcov[placeCovar]
}else{
y@cov <- array(0,c(1,1))
y@Ncov <- as.integer(0)
y@Kcov <- 0
}
is_constrained <- TRUE
is_mtdg <- mtdg
init_method <- init
c <- BuildContingencyTable(y,order)
u <- CalculateTheilU(y,order,c)
init_params <- InitializeParameters(u=u,init_method=init_method,c=c,is_mtdg=is_mtdg,m=y@K,order=order,kcov=y@Kcov,ncov=y@Ncov)
phi <- init_params$phi
q <- init_params$q
S <- init_params$S
if (class(seedModel)=="march.Mtd"){
for (i in 1:(order+y@Ncov)){
phi[i] <- seedModel@phi[i]
}
if (is_mtdg==F){
q[1,,] <- seedModel@Q[1,,]
}else{
for (i in 1:order){
q[i,,] <- seedModel@Q[i,,]
}
}
if (sum(MCovar)>0){
for (i in 1:sum(MCovar)){
S[[i]] <- seedModel@S[[i]]
}
}
}
if (class(seedModel)=="march.Dcmm"){
for (i in 1:(order+y@Ncov)){
phi[i] <- seedModel@CPhi[1,i,1]
}
if (is_mtdg==F){
q[1,,] <- seedModel@CQ[1,,,1]
}else{
for (i in 1:order){
q[i,,] <- seedModel@CQ[i,,,1]
}
}
if (sum(MCovar)>0){
for (i in 1:sum(MCovar)){
S[[i]] <- seedModel@CTCovar[[i]][,,1]
}
}
}
if(y@Ncov>0){
maxkcov <- max(y@Kcov)
}else{
maxkcov=0
}
delta <- array(0.1,dim=c(1,y@K+1+maxkcov*y@Ncov))
delta_stop <- deltaStop
ll_stop <- llStop
i0_il <- BuildArrayCombinations(y@K,order,y@Kcov,y@Ncov)
n_i0_il <- BuildArrayNumberOfSequences(y,order)
q_i0_il <- BuildArrayQ(y@K,l=order,i0_il=i0_il,n_i0_il = n_i0_il,q=q,kcov=y@Kcov,ncov=y@Ncov,S=S)
new_ll <- CalculateLogLikelihood(n_i0_il=n_i0_il,q_i0_il=q_i0_il,phi=phi)
iter <- 0
while (TRUE){
if( maxIter>0 && iter>=maxIter ){ break }
else{ iter <- iter+1 }
ll <- new_ll
pd_phi <- PartialDerivativesPhi(n_i0_il=n_i0_il,q_i0_il=q_i0_il,l=order,phi = phi,ncov=y@Ncov)
res_opt_phi <- OptimizePhi(phi=phi,pd_phi=pd_phi,delta=delta,is_constrained=is_constrained,delta_stop=delta_stop,ll=ll,n_i0_il=n_i0_il,q_i0_il=q_i0_il)
phi <- res_opt_phi$phi
new_ll <- res_opt_phi$ll
delta <- res_opt_phi$delta
for (j in 1:y@K){
qTmp <- array(NA,c(dim(q)[1],y@K,y@K))
llTmp <- array(NA,c(dim(q)[1]))
pd_q <- PartialDerivativesQ(n_i0_il,i0_il,q_i0_il,y@K,phi=phi,order=order)
for( g in 1:dim(q)[1] ){
res_opt_q <- OptimizeQ(q=q,j=j,pd_q=pd_q,delta=delta,delta_stop=delta_stop,ll=new_ll,n_i0_il=n_i0_il,q_i0_il=q_i0_il,phi=phi,i0_il = i0_il,k=y@K,l=order,g=g, kcov=y@Kcov, ncov=y@Ncov, S=S)
qTmp[g,,] <- res_opt_q$q[g,,]
q_i0_il <- res_opt_q$q_i0_il
llTmp[g] <- res_opt_q$ll
delta <- res_opt_q$delta
}
llMaxId <- which.max(llTmp)
q[llMaxId,,] <- qTmp[llMaxId,,]
}
new_ll <- llTmp[llMaxId]
if(y@Ncov>0){
tot=0
for (i in 1:y@Ncov){
tot=tot+1
pd_s<-PartialDerivativesS(CCov=order+i,ColVT=order+i+1,k=y@K,kcov=y@Kcov[i],n_i0_il=n_i0_il,q_i0_il=q_i0_il,i0_il=i0_il,phi)
for (g in 1:y@Kcov[i]){
res_opt_S<-OptimizeS(order,y@K,y@Kcov,y@Ncov,S,g,phi,order+i,new_ll,pd_s[g,],delta[1+y@K+(tot-1)*maxkcov+g],delta_stop,n_i0_il,i0_il,q_i0_il,q,i)
new_ll<-res_opt_S$ll
S<-res_opt_S$S
delta[1+y@K+(tot-1)*maxkcov+g]<-res_opt_S$delta
}
}
}
if (new_ll - ll < ll_stop){ break }
}
phi <- round(phi,10)
q <- round(q,10)
if(sum(MCovar)>0){
for(i in 1:sum(MCovar)){
S[[i]] <- round(S[[i]], 10)
}
}
nbZeros <- 0
nbZeros <- length(which(q==0))+length(which(phi==0))
if(sum(MCovar)>0){
for(i in 1:sum(MCovar)){
nbZeros <- nbZeros+length(which(S[[i]]==0))
}
}
ll <- as.numeric(ll)
tmCovar <- 1
if(y@Ncov>0){
for (i in 1:y@Ncov){
tmCovar <- tmCovar*y@Kcov[i]
}
}
NSS <- matrix(0,y@K^(order+1)*tmCovar,1)
ValT <- BuildArrayCombinations(y@K,order,y@Kcov,y@Ncov)
ProbT <- BuildArrayQ(y@K,order,ValT,NSS,q,y@Kcov,y@Ncov,S)
l <- GMTD_tm_cov(order,y@K,phi,matrix(ProbT,y@K^(order+1)*tmCovar,order+y@Ncov))
RA <- l$CRHOQ
new("march.Mtd",RA=RA,order=order,Q=q,phi=phi,S=S,MCovar=MCovar,ll=ll,y=ySave,dsL=sum(y@T-order),nbZeros=nbZeros)
}
|
"sde.sim.KPS" <-
function(X0, t0, Dt, N, M, d1, d1.x, d1.xx, s1, s1.x, s1.xx, Z, U){
return( .Call("sde_sim_KPS", X0, t0, Dt, as.integer(N), as.integer(M),
d1, d1.x, d1.xx, s1, s1.x, s1.xx, Z, U, .GlobalEnv, PACKAGE="sde") )
}
|
letterObject <- function(ch, fontfamily="Helvetica", fontsize=576, dim=c(480, 480)) {
fname <- tempfile(pattern = "file", fileext=".jpg")
jpeg(filename=fname, width=dim[1], height=dim[2])
grid.newpage()
grid.rect(x = 0, y=0, width=3, height=3,
gp=gpar(fill="black"), draw = TRUE, vp = NULL)
grid.text(ch, 0.5,0.5, gp=gpar(fontsize=fontsize, fontfamily=fontfamily, col="white"))
dev.off()
readJPEG(fname)
}
scaleTo <- function(x, fromRange=range(x), toRange=c(0,1)) {
x <- as.numeric(as.character(x))
(x-fromRange[1])/diff(fromRange)*diff(toRange) + toRange[1]
}
fortify<-function(model, data, ...){
UseMethod("fortify")
}
fortify.default <- function(model, data, ...) {
dims <- dim(model)
imdf <- adply(model, .margins=1, function(x) x)
imdf$x <- rep(1:dims[2], length=nrow(imdf))
names(imdf) <- c("y", "red", "green", "blue", "x")
imdf$y <- -as.numeric(as.character(imdf$y))
imdf[,c("x", "y", "red", "green", "blue")]
}
getOutline <- function(imdf, var="red", threshold=0.5) {
stopifnot(c("x", "y") %in% names(imdf))
x <- NA
y <- NA
edgesY <- ddply(imdf, .(y), function(dframe) {
idx <- which(dframe[, var] > threshold)
dx <- diff(sort(dframe$x[idx]))
nintervals <- sum(dx>1)+1
jdx <- which(dx > 1)
start <- idx[c(1, jdx+1)]
end <- idx[c(jdx, length(idx))]
dframe[c(start, end),]
})
edgesX <- ddply(imdf, .(x), function(dframe) {
idx <- which(dframe[, var] > threshold)
dx <- diff(sort(-dframe$y[idx]))
nintervals <- sum(dx>1)+1
jdx <- which(dx > 1)
start <- idx[c(1, jdx+1)]
end <- idx[c(jdx, length(idx))]
dframe[c(start, end),]
})
outline <- na.omit(unique(rbind(edgesX, edgesY)))
outline[,c("x", "y", "red", "green", "blue")]
}
determineOrder <- function (x, y) {
determineNext <- function(now, left) {
x1 <- x[now]
y1 <- y[now]
dists <- (x1-x[left])^2 + (y1-y[left])^2
left[which.min(dists)]
}
order <- 1
leftover <- c(1:length(x))[-order]
now <- order
while (length(leftover) > 0) {
now <- determineNext(now, leftover)
order <- c(order, now)
leftover <- leftover[-which(leftover==now)]
}
order(order)
}
identifyParts <- function(data, tol = NULL) {
group <- NA
order <- NA
data <- data[order(data$order),]
data$d <- 0
data$d[-1] <- diff(data$x)^2 + diff(data$y)^2
if (is.null(tol)) {
dt <- as.numeric(names(table(data$d)))
tol <- dt[min(which(diff(dt)>2))]
}
idx <- which(data$d > tol)
data$group <- rep(1:(length(idx)+1), diff(c(1, idx, nrow(data)+1)))
dg <- table(data$group)
idx <- which(dg < 2)
if (length(idx) > 0) {
data <- subset(data, !(group %in% idx))
}
data
}
determineDirection <- function(x,y) {
sum(diff(x)*(y[-1]+y[-length(y)]))
}
setDirection <- function(polygon, setdir=1) {
getdir <- determineDirection(polygon$x, polygon$y)
if (sign(getdir) != setdir) {
polygon$order <- rev(polygon$order)
}
polygon
}
insertIsland <- function(island, main) {
res <- rbind(main, island, main[nrow(main),])
res$order <- 1:nrow(res)
res
}
completePolygon <- function(polygon) {
polygon <- polygon[order(polygon$order), ]
polygon <- rbind(polygon, polygon[1,])
polygon$order <- 1:nrow(polygon)
polygon
}
mainPlusIslands <- function(imdf) {
group <- NA
main <- completePolygon(unique(subset(imdf, group==1)))
main <- setDirection(main, 1)
main <- main[order(main$order),]
lpath2 <- main
if (max(imdf$group) > 1) {
islands <- llply(2:max(imdf$group), function(i) {
l2 <- completePolygon(unique(subset(imdf, group==i)))
l2 <- setDirection(l2, -1)
l2[order(l2$order),]
})
for (i in 1:length(islands)) {
lpath2 <- insertIsland(islands[[i]], main=lpath2)
}
}
lpath2
}
simplifyPolygon <- function(points, tol=1) {
points[simplify_rec(points, tol=tol),]
}
letterToPolygon <- function(ch, fontfamily="Helvetica", fontsize=576, tol=1, dim=c(480, 480), threshold=0.5, var="red") {
im <- letterObject(ch, fontfamily=fontfamily, fontsize=fontsize, dim=dim)
imdf <- fortify(im)
outline <- getOutline(imdf, threshold=threshold, var=var)
outline$order <- determineOrder(outline$x, outline$y)
letterpath <- identifyParts(outline, tol=5)
group <- NA
letterpath2 <- ddply(letterpath, .(group), simplifyPolygon, tol=tol)
lpath2 <- mainPlusIslands(letterpath2)
lpath2
}
|
print.summary.maxim <- function( x,
max.rows=getOption("max.rows", 20),
max.cols=getOption("max.cols", 7),
... ) {
summary <- x
cat("--------------------------------------------\n")
cat(summary$type, "\n")
cat("Number of iterations:", summary$iterations, "\n")
cat("Return code:", summary$code, "\n")
cat(summary$message, "\n")
if(!is.null(summary$unsucc.step)) {
cat("Last (unsuccessful) step: function value", summary$unsucc.step$value,
"\n")
print(summary$unsucc.step$parameters)
}
if(!is.null(summary$estimate)) {
cat("Function value:", summary$maximum, "\n")
cat("Estimates:\n")
printRowColLimits(summary$estimate, max.rows, max.cols, ...)
if(!is.null(summary$hessian)) {
cat("Hessian:\n")
printRowColLimits(summary$hessian, max.rows, max.cols, ...)
}
}
if(!is.null(summary$constraints)) {
cat("\nConstrained optimization based on", summary$constraints$type,
"\n")
if(!is.null(summary$constraints$code))
cat("Return code:", summary$constraints$code, "\n")
if(!is.null(summary$constraints$message))
cat(summary$constraints$message, "\n")
cat(summary$constraints$outer.iterations,
" outer iterations, barrier value",
summary$constraints$barrier.value, "\n")
}
cat("--------------------------------------------\n")
}
summary.maxim <- function(object, hessian=FALSE, unsucc.step=FALSE,
... ) {
nParam <- length(object$estimate)
if(object$code == 3 & unsucc.step) {
a <- cbind(object$last.step$theta0, object$last.step$theta1)
dimnames(a) <- list(parameter=object$names,
c("current par", "new par"))
unsucc.step <- list(value=object$last.step$f0,
parameters=a)
} else {
unsucc.step <- NULL
}
estimate <- cbind("estimate"=object$estimate, "gradient"=object$gradient)
if(hessian) {
H <- object$hessian
}
else {
H <- NULL
}
summary <- list(maximum=object$maximum,
type=object$type,
iterations=object$iterations,
code=object$code,
message=object$message,
unsucc.step=unsucc.step,
estimate=estimate,
hessian=H,
constraints=object$constraints)
class(summary) <- c("summary.maxim", class(summary))
summary
}
|
dlaplace <-
function(x, mean=0, sd=1) {
if (!is.numeric(x)) stop("'x' must be numeric.")
if (!is.numeric(mean)) stop("'mean' must be numeric.")
if (!is.numeric(sd)) stop("'sd' must be numeric.")
if (sd<0) stop("'sd' cannot be negative.")
if (sd==0) return( dnorm(x, mean, 0) )
exp(-abs(x-mean)*sqrt(2)/sd)/(sd*sqrt(2))
}
|
rvn_rvh_blankHRUdf <- function(nHRUs = 1, subbasinIDs=NULL) {
if (is.null(subbasinIDs)) {
subbasinIDs <- rep(1, nHRUs)
}
if (nHRUs < length(subbasinIDs)) {
warning("nHRUs is less than the subbasinIDs specified, table will need to be modified for hydrologic consistency.")
}
df <- data.frame('ID' = 1:nHRUs,
'Area' = 0.0,
'Elevation' = 0.0,
'Latitude' = 0.0,
'Longitude' = 0.0,
'SBID' = subbasinIDs,
'LandUse' = NA,
'Vegetation' = NA,
'SoilProfile'= NA,
'Terrain' = "[NONE]",
'Aquifer' = "[NONE]",
'Slope' = 0.0,
'Aspect' = 0.0)
return(df)
}
|
coef.tvgarch <- function (object, spec = c("sigma2", "tv", "garch"), ...)
{
if (!is.null(object$order.g) && object$order.g[1] != 0) {
spec <- match.arg(spec)
if (spec == "sigma2") results <- c(object$par.g, object$par.h)
else if (spec == "tv") results <- object$par.g
else if (spec == "garch") results <- object$par.h
}
if (is.null(object$order.g) || object$order.g[1] == 0) results <- object$par.h
return(results)
}
|
powM2 <- function(z) z^(-2)
powM1 <- function(z) z^(-1)
powM0.5 <- function(z) z^(-0.5)
pow2 <- function(z) z^2
pow3 <- function(z) z^3
RI <- function(z) z * log(z)
RpowM2 <- function(z) z^(-2) * log(z)
RpowM1 <- function(z) z^(-1) * log(z)
RpowM0.5 <- function(z) z^(-0.5) * log(z)
Rlog <- function(z) log(z) * log(z)
Rsqrt <- function(z) sqrt(z) * log(z)
Rpow2 <- function(z) z^2 * log(z)
Rpow3 <- function(z) z^3 * log(z)
PT <- function(z) {
obj <- fp.scale(z)
(z + obj$shift) / obj$scale
}
fp.power<-function(z,a,b=NULL){
if(!is.null(b)){
if(b>a) {
he<-b
b<-a
a<-he
}
if(!(b %in% c(-2,-1, -0.5, 0, 0.5, 1,2,3))) stop("wrong power, must be one of c(-2,-1, -0.5, 0, 0.5, 1,2,3)\n")
}
if(!(a %in% c(-2,-1, -0.5, 0, 0.5, 1,2,3))) stop("wrong power, must be one of c(-2,-1, -0.5, 0, 0.5, 1,2,3)\n")
l<-length(z)
if(a==0) v1<-log(z)
else v1<-z^a
v<-v1
if(!is.null(b)) {
if(b!=a) {
if(b==0) v2<-log(z)
else v2<-z^b
}
else v2<-log(z) * v1
v<-c(v1,v2)
}
v <- matrix(v,l,length(v)/l)
if(!is.null(b)) dimnames(v)[[2]]<-c(paste("fp(",a,")", sep=""), paste("fp(",if(b==a)paste("R"),b,")", sep=""))
else dimnames(v)[[2]]<-list(paste("fp(",a,")", sep=""))
dimnames(v)[[1]]<-z
v
}
|
sup <- function(m){
indice<-NULL
for(i in 1:m){
for(j in i:m){
indice <- c(indice,(j-1)*j/2+i)
}
}
return(indice)
}
|
pkgname <- "tidyquant"
source(file.path(R.home("share"), "R", "examples-header.R"))
options(warn = 1)
library('tidyquant')
base::assign(".oldSearch", base::search(), pos = 'CheckExEnv')
base::assign(".old_wd", base::getwd(), pos = 'CheckExEnv')
cleanEx()
nameEx("av_api_key")
flush(stderr()); flush(stdout())
cleanEx()
nameEx("coord_x_date")
flush(stderr()); flush(stdout())
library(tidyquant)
library(dplyr)
library(ggplot2)
AAPL <- tq_get("AAPL", from = "2013-01-01", to = "2016-12-31")
AAPL %>%
ggplot(aes(x = date, y = adjusted)) +
geom_line() +
geom_ma(n = 50) +
geom_ma(n = 200, color = "red") +
coord_x_date(xlim = c("2016-01-01", "2016-12-31"),
ylim = c(75, 125))
time_index <- seq(from = as.POSIXct("2012-05-15 07:00"),
to = as.POSIXct("2012-05-17 18:00"),
by = "hour")
set.seed(1)
value <- rnorm(n = length(time_index))
hourly_data <- tibble(time.index = time_index,
value = value)
hourly_data %>%
ggplot(aes(x = time.index, y = value)) +
geom_point() +
coord_x_datetime(xlim = c("2012-05-15 07:00:00", "2012-05-15 16:00:00"))
cleanEx()
nameEx("excel_date_functions")
flush(stderr()); flush(stdout())
library(tidyquant)
library(tidyverse)
library(lubridate)
AS_DATE("2011 Jan-01")
YMD("2011 Jan-01")
MDY("01-02-20")
DMY("01-02-20")
WEEKDAY("2020-01-01")
WEEKDAY("2020-01-01", label = FALSE)
WEEKDAY("2020-01-01", label = FALSE, week_start = 1)
MONTH("2020-01-01")
QUARTER("2020-01-01")
YEAR("2020-01-01")
NOW()
TODAY()
EOMONTH("2020-01-01")
EOMONTH("2020-01-01", months = 1)
NET_WORKDAYS("2020-01-01", "2020-07-01")
NET_WORKDAYS("2020-01-01", "2020-07-01",
holidays = HOLIDAY_SEQUENCE("2020-01-01", "2020-07-01",
calendar = "NYSE"))
DATE_SEQUENCE("2020-01-01", "2020-07-01")
WORKDAY_SEQUENCE("2020-01-01", "2020-07-01")
HOLIDAY_SEQUENCE("2020-01-01", "2020-07-01", calendar = "NYSE")
WORKDAY_SEQUENCE("2020-01-01", "2020-07-01",
holidays = HOLIDAY_SEQUENCE("2020-01-01", "2020-07-01",
calendar = "NYSE"))
FLOOR_DATE(AS_DATE("2020-01-15"), by = "month")
CEILING_DATE(AS_DATE("2020-01-15"), by = "month")
CEILING_DATE(AS_DATE("2020-01-15"), by = "month") - ddays(1)
FANG %>%
pivot_table(
.rows = c(symbol, ~ QUARTER(date)),
.columns = ~ YEAR(date),
.values = ~ PCT_CHANGE_FIRSTLAST(adjusted)
)
cleanEx()
nameEx("excel_financial_math_functions")
flush(stderr()); flush(stdout())
cleanEx()
nameEx("excel_if_functions")
flush(stderr()); flush(stdout())
library(tidyverse)
library(tidyquant)
library(stringr)
library(lubridate)
SUM_IFS(x = 1:10, x > 5)
COUNT_IFS(x = letters, str_detect(x, "a|b|c"))
SUM_IFS(-10:10, x > 8 | x < -5)
Q75_IFS <- CREATE_IFS(.f = quantile, probs = 0.75, na.rm = TRUE)
Q75_IFS(1:10, x > 5)
FANG %>%
group_by(symbol) %>%
summarise(
high_volume_in_2015 = COUNT_IFS(volume,
year(date) == 2015,
volume > quantile(volume, 0.75))
)
FANG %>%
mutate(symbol = as_factor(symbol)) %>%
group_by(symbol) %>%
summarise_by_time(
.date_var = date,
.by = "month",
adjusted = FIRST(adjusted)
) %>%
group_by(symbol) %>%
mutate(
returns = PCT_CHANGE(adjusted, fill_na = 0)
) %>%
summarise(
negative_monthly_returns = COUNT_IFS(returns, returns < 0)
)
cleanEx()
nameEx("excel_pivot_table")
flush(stderr()); flush(stdout())
library(tidyquant)
library(tidyverse)
FANG %>%
pivot_table(
.rows = c(symbol, ~ QUARTER(date)),
.columns = ~ YEAR(date),
.values = ~ PCT_CHANGE_FIRSTLAST(adjusted)
)
cleanEx()
nameEx("excel_ref_functions")
flush(stderr()); flush(stdout())
library(tidyquant)
library(tidyverse)
lookup_table <- tibble(
stock = c("FB", "AMZN", "NFLX", "GOOG"),
company = c("Facebook", "Amazon", "Netflix", "Google")
)
VLOOKUP("NFLX",
.data = lookup_table,
.lookup_column = stock,
.return_column = company)
FANG %>%
mutate(company = VLOOKUP(symbol, lookup_table, stock, company))
cleanEx()
nameEx("excel_stat_mutation_functions")
flush(stderr()); flush(stdout())
library(tidyquant)
library(tidyverse)
library(forcats)
CUMULATIVE_SUM(1:10)
PCT_CHANGE(c(21, 24, 22, 25), fill_na = 0)
FANG %>%
mutate(symbol = as_factor(symbol)) %>%
group_by(symbol) %>%
summarise_by_time(
.date_var = date,
.time_unit = "month",
adjusted = FIRST(adjusted)
) %>%
group_by(symbol) %>%
mutate(
returns = PCT_CHANGE(adjusted, fill_na = 0),
growth = CUMULATIVE_SUM(returns) + 1
)
cleanEx()
nameEx("excel_stat_summary_functions")
flush(stderr()); flush(stdout())
library(tidyquant)
library(tidyverse)
library(forcats)
SUM(1:10)
PCT_CHANGE_FIRSTLAST(c(21, 24, 22, 25))
FANG %>%
mutate(symbol = as_factor(symbol)) %>%
group_by(symbol) %>%
summarise_by_time(
.date_var = date,
.time_unit = "month",
adjusted = FIRST(adjusted)
)
cleanEx()
nameEx("geom_bbands")
flush(stderr()); flush(stdout())
library(tidyquant)
library(dplyr)
library(ggplot2)
AAPL <- tq_get("AAPL", from = "2013-01-01", to = "2016-12-31")
AAPL %>%
ggplot(aes(x = date, y = close)) +
geom_line() +
geom_bbands(aes(high = high, low = low, close = close), ma_fun = SMA, n = 50) +
coord_x_date(xlim = c(as_date("2016-12-31") - dyears(1), as_date("2016-12-31")),
ylim = c(75, 125))
AAPL %>%
ggplot(aes(x = date, y = close)) +
geom_line() +
geom_bbands(aes(high = high, low = low, close = close),
ma_fun = EMA, wilder = TRUE, ratio = NULL, n = 50) +
coord_x_date(xlim = c(as_date("2016-12-31") - dyears(1), as_date("2016-12-31")),
ylim = c(75, 125))
AAPL %>%
ggplot(aes(x = date, y = close)) +
geom_line() +
geom_bbands(aes(high = high, low = low, close = close, volume = volume),
ma_fun = VWMA, n = 50) +
coord_x_date(xlim = c(as_date("2016-12-31") - dyears(1), as_date("2016-12-31")),
ylim = c(75, 125))
cleanEx()
nameEx("geom_chart")
flush(stderr()); flush(stdout())
library(tidyquant)
library(dplyr)
library(ggplot2)
AAPL <- tq_get("AAPL", from = "2013-01-01", to = "2016-12-31")
AAPL %>%
ggplot(aes(x = date, y = close)) +
geom_barchart(aes(open = open, high = high, low = low, close = close)) +
geom_ma(color = "darkgreen") +
coord_x_date(xlim = c("2016-01-01", "2016-12-31"),
ylim = c(75, 125))
AAPL %>%
ggplot(aes(x = date, y = close)) +
geom_candlestick(aes(open = open, high = high, low = low, close = close)) +
geom_ma(color = "darkgreen") +
coord_x_date(xlim = c("2016-01-01", "2016-12-31"),
ylim = c(75, 125))
cleanEx()
nameEx("geom_ma")
flush(stderr()); flush(stdout())
library(tidyquant)
library(dplyr)
library(ggplot2)
AAPL <- tq_get("AAPL", from = "2013-01-01", to = "2016-12-31")
AAPL %>%
ggplot(aes(x = date, y = adjusted)) +
geom_line() +
geom_ma(ma_fun = SMA, n = 50) +
geom_ma(ma_fun = SMA, n = 200, color = "red") +
coord_x_date(xlim = c("2016-01-01", "2016-12-31"),
ylim = c(75, 125))
AAPL %>%
ggplot(aes(x = date, y = adjusted)) +
geom_line() +
geom_ma(aes(volume = volume), ma_fun = EVWMA, n = 50) +
coord_x_date(xlim = c("2016-01-01", "2016-12-31"),
ylim = c(75, 125))
cleanEx()
nameEx("palette_tq")
flush(stderr()); flush(stdout())
library(scales)
scales::show_col(palette_light())
cleanEx()
nameEx("quandl_api_key")
flush(stderr()); flush(stdout())
cleanEx()
nameEx("quandl_search")
flush(stderr()); flush(stdout())
cleanEx()
nameEx("scale_manual")
flush(stderr()); flush(stdout())
library(tidyquant)
library(dplyr)
library(ggplot2)
stocks <- c("AAPL", "FB", "NFLX") %>%
tq_get(from = "2013-01-01",
to = "2017-01-01")
g <- stocks %>%
ggplot(aes(date, adjusted, color = symbol)) +
geom_line() +
labs(title = "Multi stock example",
xlab = "Date",
ylab = "Adjusted Close")
g +
theme_tq() +
scale_color_tq()
cleanEx()
nameEx("summarise_by_time")
flush(stderr()); flush(stdout())
library(tidyquant)
library(dplyr)
FANG %>%
group_by(symbol) %>%
summarise_by_time(
.date_var = date,
.by = "month",
adjusted = FIRST(adjusted)
)
FANG %>%
group_by(symbol) %>%
summarise_by_time(
.date_var = date,
.by = "month",
adjusted = LAST(adjusted),
.type = "ceiling")
FANG %>%
group_by(symbol) %>%
summarise_by_time(
.date_var = date,
.by = "year",
adjusted = SUM(volume))
cleanEx()
nameEx("theme_tq")
flush(stderr()); flush(stdout())
library(tidyquant)
library(dplyr)
library(ggplot2)
AAPL <- tq_get("AAPL", from = "2013-01-01", to = "2016-12-31")
AAPL %>% ggplot(aes(x = date, y = close)) +
geom_line() +
geom_bbands(aes(high = high, low = low, close = close),
ma_fun = EMA,
wilder = TRUE,
ratio = NULL,
n = 50) +
coord_x_date(xlim = c("2016-01-01", "2016-12-31"),
ylim = c(75, 125)) +
labs(title = "Apple BBands",
x = "Date",
y = "Price") +
theme_tq()
cleanEx()
nameEx("tiingo_api_key")
flush(stderr()); flush(stdout())
cleanEx()
nameEx("tq_get")
flush(stderr()); flush(stdout())
library(tidyquant)
library(tidyverse)
tq_get_options()
aapl_stock_prices <- tq_get("AAPL")
mult_stocks <- tq_get(c("FB", "AMZN"),
get = "stock.prices",
from = "2016-01-01",
to = "2017-01-01")
cleanEx()
nameEx("tq_index")
flush(stderr()); flush(stdout())
library(tidyquant)
tq_index_options()
tq_exchange_options()
cleanEx()
nameEx("tq_mutate")
flush(stderr()); flush(stdout())
library(tidyquant)
library(dplyr)
fb_stock_prices <- tq_get("FB",
get = "stock.prices",
from = "2016-01-01",
to = "2016-12-31")
fb_stock_prices %>%
tq_mutate(select = close, mutate_fun = periodReturn,
period = "daily", type = "log")
fb_stock_prices %>%
tq_mutate_xy(x = close, y = volume, mutate_fun = EVWMA,
col_rename = "EVWMA")
tq_get("DCOILWTICO", get = "economic.data") %>%
tq_mutate(select = price, mutate_fun = lag.xts, k = 1, na.pad = TRUE)
fb_returns <- fb_stock_prices %>%
tq_transmute(adjusted, periodReturn, period = "monthly", col_rename = "fb.returns")
xlk_returns <- tq_get("XLK", from = "2016-01-01", to = "2016-12-31") %>%
tq_transmute(adjusted, periodReturn, period = "monthly", col_rename = "xlk.returns")
returns_combined <- left_join(fb_returns, xlk_returns, by = "date")
regr_fun <- function(data) {
coef(lm(fb.returns ~ xlk.returns, data = as_tibble(data)))
}
returns_combined %>%
tq_mutate(mutate_fun = rollapply,
width = 6,
FUN = regr_fun,
by.column = FALSE,
col_rename = c("coef.0", "coef.1"))
col_name <- "adjusted"
mutate <- c("MACD", "SMA")
tq_mutate_xy_(fb_stock_prices, x = col_name, mutate_fun = mutate[[1]])
cleanEx()
nameEx("tq_performance")
flush(stderr()); flush(stdout())
library(tidyquant)
library(dplyr)
data(FANG)
Ra <- FANG %>%
group_by(symbol) %>%
tq_transmute(adjusted, periodReturn, period = "monthly", col_rename = "Ra")
Rb <- "^GSPC" %>%
tq_get(get = "stock.prices",
from = "2010-01-01",
to = "2015-12-31") %>%
tq_transmute(adjusted, periodReturn, period = "monthly", col_rename = "Rb")
RaRb <- left_join(Ra, Rb, by = c("date" = "date"))
tq_performance_fun_options()
RaRb %>%
tq_performance(Ra = Ra, performance_fun = SharpeRatio, p = 0.95)
RaRb %>%
tq_performance(Ra = Ra, Rb = Rb, performance_fun = table.CAPM)
cleanEx()
nameEx("tq_portfolio")
flush(stderr()); flush(stdout())
library(tidyquant)
library(dplyr)
data(FANG)
monthly_returns_stocks <- FANG %>%
group_by(symbol) %>%
tq_transmute(adjusted, periodReturn, period = "monthly")
weights <- c(0.50, 0.25, 0.25, 0)
tq_portfolio(data = monthly_returns_stocks,
assets_col = symbol,
returns_col = monthly.returns,
weights = weights,
col_rename = NULL,
wealth.index = FALSE)
weights_df <- tibble(symbol = c("FB", "AMZN", "NFLX"),
weights = c(0.50, 0.25, 0.25))
tq_portfolio(data = monthly_returns_stocks,
assets_col = symbol,
returns_col = monthly.returns,
weights = weights_df,
col_rename = NULL,
wealth.index = FALSE)
mult_monthly_returns_stocks <- tq_repeat_df(monthly_returns_stocks, n = 4)
weights <- c(0.50, 0.25, 0.25, 0.00,
0.00, 0.50, 0.25, 0.25,
0.25, 0.00, 0.50, 0.25,
0.25, 0.25, 0.00, 0.50)
stocks <- c("FB", "AMZN", "NFLX", "GOOG")
weights_table <- tibble(stocks) %>%
tq_repeat_df(n = 4) %>%
bind_cols(tibble(weights)) %>%
group_by(portfolio)
tq_portfolio(data = mult_monthly_returns_stocks,
assets_col = symbol,
returns_col = monthly.returns,
weights = weights_table,
col_rename = NULL,
wealth.index = FALSE)
cleanEx()
options(digits = 7L)
base::cat("Time elapsed: ", proc.time() - base::get("ptime", pos = 'CheckExEnv'),"\n")
grDevices::dev.off()
quit('no')
|
InitErgmReference.CompleteOrder <- function(nw, arglist, ...){
a <- check.ErgmTerm(nw, arglist)
list(name="CompleteOrder", init_methods = c("CD","zeros"))
}
|
unnest_tokens <- function(tbl, output, input, token = "words",
format = c(
"text", "man", "latex",
"html", "xml"
),
to_lower = TRUE, drop = TRUE,
collapse = NULL, ...) {
output <- enquo(output)
input <- enquo(input)
format <- arg_match(format)
tokenfunc <- find_function(token, format, to_lower, ...)
if (!is_null(collapse)) {
if (is_logical(collapse)) {
lifecycle::deprecate_stop(
"0.2.7",
"tidytext::unnest_tokens(collapse = 'must be `NULL` or a character vector')"
)
}
if (is_grouped_df(tbl)) {
rlang::abort(
paste0("Use the `collapse` argument or grouped data, but not both.")
)
}
if (any(!purrr::map_lgl(tbl, is_atomic))) {
rlang::abort(
paste0("If collapse != NULL (such as for unnesting by sentence or paragraph),\n",
"unnest_tokens needs all input columns to be atomic vectors (not lists)")
)
}
tbl <- group_by(tbl, !!!syms(collapse))
}
if (is_grouped_df(tbl)) {
tbl <- tbl %>%
ungroup() %>%
mutate(new_groups = cumsum(c(1, diff(group_indices(tbl)) != 0))) %>%
group_by(new_groups, !!!groups(tbl)) %>%
summarise(!!input := stringr::str_c(!!input, collapse = "\n")) %>%
group_by(!!!groups(tbl)) %>%
dplyr::select(-new_groups)
if(!is_null(collapse)) {
tbl <- ungroup(tbl)
}
}
col <- pull(tbl, !!input)
output_lst <- tokenfunc(col, ...)
if (!(is.list(output_lst) && length(output_lst) == nrow(tbl))) {
rlang::abort(sprintf(
"Expected output of tokenizing function to be a list of length %d",
nrow(tbl)
))
}
output <- quo_name(output)
input <- quo_name(input)
tbl_indices <- vec_rep_each(seq_len(nrow(tbl)), lengths(output_lst))
ret <- vec_slice(tbl, tbl_indices)
ret[[output]] <- flatten_chr(output_lst)
if (to_lower) {
if (!is_function(token))
if(token == "tweets") {
rlang::inform("Using `to_lower = TRUE` with `token = 'tweets'` may not preserve URLs.")
}
ret[[output]] <- stringr::str_to_lower(ret[[output]])
}
if (drop && output != input) {
ret[[input]] <- NULL
}
ret
}
find_function <- function(token, format, to_lower, ...) {
if (is_function(token)) {
tokenfunc <- token
} else if (token %in% c(
"word", "character",
"character_shingle", "ngram",
"skip_ngram", "sentence", "line",
"paragraph", "tweet"
)) {
rlang::abort(paste0(
"Error: Token must be a supported type, or a function that takes a character vector as input",
"\nDid you mean token = ", token, "s?"
))
} else if (format != "text") {
if (token != "words") {
rlang::abort("Cannot tokenize by any unit except words when format is not text")
}
tokenfunc <- function(col, ...) hunspell::hunspell_parse(col,
format = format
)
} else {
if (is_null(collapse) && token %in% c(
"ngrams", "skip_ngrams", "sentences",
"lines", "paragraphs", "regex",
"character_shingles"
)) {
lifecycle::deprecate_warn(
"0.2.7",
"tidytext::unnest_tokens(collapse = 'changed its default behavior for `NULL`')"
)
}
tf <- get(paste0("tokenize_", token))
if (token %in% c(
"characters", "words", "ngrams", "skip_ngrams",
"tweets", "ptb"
)) {
tokenfunc <- function(col, ...) tf(col, lowercase = to_lower, ...)
} else {
tokenfunc <- tf
}
}
tokenfunc
}
|
read_genome <-
function(file,
format = "fasta",
obj.type = "Biostrings",
...) {
if (!is.element(format, c("fasta", "gbk")))
stop("Please choose a file format that is
supported by this function.",
call. = FALSE)
if (!file.exists(file))
stop("The file path you specified does not seem to exist: '", file,"'.", call. = FALSE)
if (!is.element(obj.type, c("Biostrings", "data.table")))
stop(
"Please specify a valid object type: obj.type = 'Biostrings'
(default) or obj.type = 'data.table'.",
call. = FALSE
)
geneids <- NULL
if (obj.type == "Biostrings") {
tryCatch({
genome <-
Biostrings::readDNAStringSet(filepath = file,
format = format, ...)
}, error = function(e) {
stop(
paste0(
"File ",
file,
" could not be read properly. \n",
"Please make sure that ",
file,
" contains only DNA sequences and is in ",
format,
" format."
),
call. = FALSE
)
})
return(genome)
}
if (obj.type == "data.table") {
tryCatch({
genome <-
Biostrings::readDNAStringSet(filepath = file,
format = format, ...)
genome_names <-
as.vector(unlist(lapply(genome@ranges@NAMES, function(x) {
return(strsplit(x, " ")[[1]][1])
})))
genome.dt <-
data.table::data.table(geneids = genome_names,
seqs = tolower(as.character(genome)))
data.table::setkey(genome.dt, geneids)
}, error = function(e) {
stop(
paste0(
"File ",
file,
" could not be read properly. \n",
"Please make sure that ",
file,
" contains only DNA sequences and is in ",
format,
" format."
),
call. = FALSE
)
})
return(genome.dt)
}
}
|
expected <- eval(parse(text="c(3L, 1L, 0L)"));
test(id=0, code={
argv <- eval(parse(text="list(c(0.099999994, 0.2), 6L, 0L)"));
.Internal(format.info(argv[[1]], argv[[2]], argv[[3]]));
}, o=expected);
|
context("Testing {dprw}LambertW functions\n")
set.seed(10)
cauchy.samples <- rcauchy(1000)
beta.list <- list("normal" = c(1, 2),
"t" = c(1, 10, 5),
"exp" = 10,
"gamma" = c(2, 3),
"chisq" = 5)
dist.names <- names(beta.list)
beta.list <- lapply(names(beta.list),
function(nn) {
x <- beta.list[[nn]]
names(x) <- get_beta_names(nn)
return(x)
})
names(beta.list) <- dist.names
heavy.theta.list <- lapply(beta.list,
function(x) {
return(list(beta = x, delta = 0.1))
})
names(heavy.theta.list) <- names(beta.list)
for (nn in names(heavy.theta.list)) {
info.txt <- paste0("Testing '", nn, "' distribution\n")
context(info.txt)
theta.tmp <- heavy.theta.list[[nn]]
tau.tmp <- theta2tau(theta.tmp, distname = nn)
theta.zero.tmp <- theta.tmp
theta.zero.tmp[["delta"]] <- 0
tau.zero.tmp <- theta2tau(theta.zero.tmp, distname = nn)
auxD <- function(x) {
dU(x, beta = theta.tmp$beta, distname = nn)
}
auxP <- function(q) {
pU(q, beta = theta.tmp$beta, distname = nn)
}
auxR <- function(n) {
rU(n = n, beta = theta.tmp$beta, distname = nn)
}
auxQ <- function(x) {
qU(x, beta = theta.tmp$beta, distname = nn)
}
dist.family <- get_distname_family(nn)
ib <- c(-Inf, Inf)
if (nn %in% c("unif", "beta")) {
ib <- theta.tmp$beta
}
support.dist <- get_support(tau.tmp,
is.non.negative = dist.family$is.non.negative,
input.bounds = ib)
test_that("dLamberW is a density", {
area.curve <- integrate(auxD, support.dist[1], support.dist[2])$value
expect_equal(area.curve, 1,
info = info.txt, tol = 1e-4)
expect_true(all(auxD(cauchy.samples) >= 0))
})
test_that("pLamberW is a cdf", {
expect_equivalent(auxP(support.dist[1]), 0,
info = info.txt)
expect_equivalent(auxP(support.dist[2]), 1,
info = info.txt)
expect_true(all(auxP(cauchy.samples) >= 0))
expect_true(all(auxP(cauchy.samples) <= 1))
expect_true(all(diff(auxP(sort(cauchy.samples))) >= 0))
expect_equal(integrate(auxD, 0, 1)$value,
auxP(1) - auxP(0), tol = 1e-4)
})
test_that("qLambertW is a quantile function", {
expect_true(auxQ(0) == support.dist[1],
info = paste0(info.txt, ": at 0 it equals lower bound."))
expect_true(auxQ(1) == support.dist[2],
info = paste0(info.txt, ": at 1 it equals upper bound."))
samples.from.dist <- auxR(n = 1e2)
expect_equivalent(auxQ(auxP(samples.from.dist)), samples.from.dist)
expect_true(all(diff(auxQ(seq(0, 1, by = 0.1))) >= 0))
dist.family <- get_distname_family(nn)
if (dist.family$is.non.negative) {
expect_equivalent(auxQ(0), 0)
}
})
test_that("rLamberW is a random number generator", {
expect_true(is.numeric(auxR(n = 100)),
info = info.txt)
expect_equal(length(auxR(n = 100)), 100,
info = info.txt)
samples.from.dist <- auxR(n = 1e3)
kde.est <- density(samples.from.dist)
expect_gt(cor(kde.est$y, auxD(kde.est$x)), 0.9)
})
}
|
source("ESEUR_config.r")
library("plyr")
par(mar=MAR_default+c(0.0, 0.7, 0, 0))
pal_col=rainbow(4)
probs_found=function(df)
{
t=apply(df, 1, function(X) as.numeric(sum(X) != 0))
return(sum(t))
}
plot_group=function(grp_size)
{
g=combn(num_subj, grp_size, function(X) probs_found(usis[ , X]))
totals=count(g)
lines(totals$x, totals$freq/length(g), col=pal_col[grp_size-1])
}
usis=read.csv(paste0(ESEUR_dir, "reliability/24b1e.csv.xz"), as.is=TRUE)
usis$Prob=NULL
usis$Impact=NULL
num_subj=ncol(usis)
plot(1, type="n", log="y",
xaxs="i", yaxs="i",
xlim=c(15, 100), ylim=c(3e-4, 1e-1),
xlab="Issues found", ylab="Probability\n\n")
plot_group(5)
plot_group(4)
plot_group(3)
plot_group(2)
legend(x="bottomright", legend=paste0("Reviewers=", 2:5),
inset=c(0.03, 0), bty="n", fill=pal_col, cex=1.2)
|
dat <- load_edge_assignment("../test_data/HMP_stagger/allReads-diamond.list.txt", type = 'diamond')
tmp_folder <- file.path(getwd(), "sandbox")
dir.create(path = tmp_folder, recursive = TRUE, showWarnings = FALSE)
pdf_name <- file.path(tmp_folder, "test_pdf.pdf")
png_name <- file.path(tmp_folder, "test_png.png")
gplot <- plot_edge_assignment(dat, "superkingdom", "Test Plot
"HMP stagger", file.path(tmp_folder, "test_pdf"))
Cairo::Cairo(width = 500, height = 500,
file = png_name, type = "png", pointsize = 18,
bg = "white", canvas = "white", dpi = 86)
print(gplot)
dev.off()
expect_that(file.exists(png_name), is_true())
expect_that(file.exists(pdf_name), is_true())
expect_that(file.info(png_name)$size > 0, is_true())
expect_that(file.info(pdf_name)$size > 0, is_true())
unlink(tmp_folder, recursive = T)
|
vcov.twinSIR <- function (object, ...)
{
solve(object$fisherinfo)
}
logLik.twinSIR <- function (object, ...)
{
r <- object$loglik
attr(r, "df") <- length(coef(object))
class(r) <- "logLik"
r
}
.OSAICpenalty <- function (twinSIRobject, k = 2, nsim = 1e3)
{
theta <- coef(twinSIRobject)
npar <- length(theta)
pz <- length(grep("cox\\([^)]+\\)", names(theta), ignore.case = FALSE,
perl = FALSE, fixed = FALSE, useBytes = FALSE,
invert = FALSE))
px <- npar - pz
penalty <- if (px == 0L) {
k * pz
} else if (px == 1L) {
k * (pz + 0.5)
} else if (px == 2L) {
Sigma <- vcov(twinSIRobject)
rho <- cov2cor(Sigma[1:2,1:2])[1,2]
as <- acos(rho)/2/pi
w <- c(as, 0.5, 0.5-as)
k * sum(w * (pz + 0:2))
} else {
message("Computing OSAIC weights for ", px,
" epidemic covariates based on ", nsim, " simulations ...")
W <- vcov(twinSIRobject)[1:px,1:px]
w.sim <- w.chibarsq.sim(p=px, W=W, N=nsim)
k * sum(w.sim * (pz + 0:px))
}
attr(penalty, "exact") <- px <= 2
penalty
}
AIC.twinSIR <- function (object, ..., k = 2, one.sided = NULL, nsim = 1e3)
{
AIC.default <- match.call()
AIC.default$one.sided <- NULL
AIC.default$nsim <- NULL
AIC.default[[1]] <- call(":::", as.name("stats"), as.name("AIC.default"))
if (is.null(one.sided)) {
one.sided <- object$method == "L-BFGS-B"
}
if (one.sided) {
penalty <- .OSAICpenalty(object, k = k, nsim = nsim)
edf <- length(coef(object))
AIC.default$k <- penalty/edf
}
res <- eval(AIC.default, parent.frame())
attr(res, "type") <- if (one.sided) "One-sided AIC" else "Standard AIC"
attr(res, "exact") <- if (one.sided) attr(penalty, "exact") else TRUE
res
}
extractAIC.twinSIR <- function (fit, scale = 0, k = 2, one.sided = NULL,
nsim = 1e3, ...)
{
if (is.null(one.sided)) {
one.sided <- fit$method == "L-BFGS-B"
}
loglik <- logLik(fit)
edf <- attr(loglik, "df")
penalty <- if (one.sided) {
.OSAICpenalty(fit, k = k, nsim = nsim)
} else {
k * edf
}
res <- c(edf = edf, AIC = -2 * c(loglik) + penalty)
attr(res, "type") <- if (one.sided) "One-sided AIC" else "Standard AIC"
attr(res, "exact") <- if (one.sided) attr(penalty, "exact") else TRUE
res
}
print.twinSIR <- function (x, digits = max(3, getOption("digits") - 3), ...)
{
cat("\nCall:\n")
print.default(x$call)
cat("\nCoefficients:\n")
print.default(format(coef(x), digits=digits), print.gap = 2, quote = FALSE)
cat("\nLog-likelihood: ", format(logLik(x), digits=digits), "\n", sep = "")
if (!x$converged) {
cat("\nWARNING: OPTIMIZATION DID NOT CONVERGE!\n")
}
cat("\n")
invisible(x)
}
summary.twinSIR <- function (object,
correlation = FALSE, symbolic.cor = FALSE, ...)
{
ans <- object[c("call", "converged", "counts", "intervals", "nEvents")]
ans$cov <- vcov(object)
est <- coef(object)
se <- sqrt(diag(ans$cov))
zval <- est/se
pval <- 2 * pnorm(abs(zval), lower.tail = FALSE)
ans$coefficients <- cbind(est, se, zval, pval)
dimnames(ans$coefficients) <- list(names(est),
c("Estimate", "Std. Error", "z value", "Pr(>|z|)"))
if (correlation) {
ans$correlation <- cov2cor(ans$cov)
ans$symbolic.cor <- symbolic.cor
}
ans$loglik <- logLik(object)
aic <- extractAIC(object, ...)
ans$aic <- as.vector(aic[2L])
attributes(ans$aic) <- attributes(aic)[c("type", "exact")]
class(ans) <- "summary.twinSIR"
ans
}
print.summary.twinSIR <- function (x,
digits = max(3, getOption("digits") - 3), symbolic.cor = x$symbolic.cor,
signif.stars = getOption("show.signif.stars"), ...)
{
cat("\nCall:\n")
print.default(x$call)
cat("\nCoefficients:\n")
coefs <- x$coefficients
printCoefmat(coefs, digits = digits, signif.stars = signif.stars,
na.print = "NA", ...)
nEvents <- x$nEvents
nh0 <- length(nEvents)
if (nh0 < 2L) {
cat("\nTotal number of infections: ", nEvents, "\n")
} else {
cat("\nBaseline intervals:\n")
intervals <- character(nh0)
for(i in seq_len(nh0)) {
intervals[i] <-
paste("(",
paste(format(x$intervals[c(i,i+1L)],trim=TRUE), collapse=";"),
"]", sep = "")
}
names(intervals) <- paste("logbaseline", seq_len(nh0), sep=".")
print.default(rbind("Time interval" = intervals,
"Number of events" = nEvents),
quote = FALSE, print.gap = 2)
}
cat("\n", attr(x$aic, "type"), ": ", format(x$aic, digits=max(4, digits+1)),
if (!attr(x$aic, "exact")) "\t(simulated penalty weights)" else "",
sep = "")
cat("\nLog-likelihood:", format(x$loglik, digits = digits))
cat("\nNumber of log-likelihood evaluations:", x$counts[1], "\n")
correl <- x$correlation
if (!is.null(correl)) {
p <- NCOL(correl)
if (p > 1L) {
cat("\nCorrelation of Coefficients:\n")
if (is.logical(symbolic.cor) && symbolic.cor) {
correl <- symnum(correl, abbr.colnames = NULL)
correlcodes <- attr(correl, "legend")
attr(correl, "legend") <- NULL
print(correl)
cat("---\nCorr. codes: ", correlcodes, "\n", sep="")
} else {
correl <- format(round(correl, 2), nsmall = 2, digits = digits)
correl[!lower.tri(correl)] <- ""
print(correl[-1, -p, drop = FALSE], quote = FALSE)
}
}
}
if (!x$converged) {
cat("\nWARNING: OPTIMIZATION DID NOT CONVERGE!\n")
}
cat("\n")
invisible(x)
}
plot.twinSIR <- function (x, which, ...)
{
cl <- match.call()
cl[[1]] <- as.name("intensityplot")
eval(cl, envir = parent.frame())
}
formals(plot.twinSIR)$which <- formals(intensityplot.twinSIR)$which
residuals.twinSIR <- function(object, ...)
{
eventTimes <- attr(object$model$survs,"eventTimes")
sortedStop <- sort(unique(object$model$survs[,"stop"]))
eventTimesIdx <- match(eventTimes, sortedStop)
nTimes <- nrow(object$model$X)
zerovec <- numeric(nTimes)
px <- ncol(object$model$X)
pz <- ncol(object$model$Z)
theta <- coef(object)
alpha <- theta[seq_len(px)]
beta <- theta[px+seq_len(pz)]
if (px > 0) { e <- as.vector(object$model$X %*% as.matrix(alpha)) } else { e <- zerovec }
if (pz > 0) { h <- as.vector(exp(object$model$Z %*% as.matrix(beta))) } else { h <- zerovec }
lambda <- (e + h)
BLOCK <- as.numeric(factor(object$model$survs$start))
dt <- object$model$survs[,"stop"] - object$model$survs[,"start"]
intlambda <- tapply(object$model$weights * lambda* dt, BLOCK, sum)
tau <- cumsum(intlambda)[eventTimesIdx]
tau
}
|
"nbasal"
|
convert_from_rds<-function(language, in_direc, out_direc){
dir.create(out_direc,showWarnings = F)
filenames<-list.files(path=in_direc,pattern= c(".*\\.rds$" ))
for(i in 1:length(filenames)){
tempdataframename<-readRDS(paste(in_direc,"/",filenames[i],sep=""))
if(language=="SPSS"){
write.foreign(tempdataframename, paste(out_direc,"/",filenames[i],".txt",sep=""), paste(out_direc,"/",tempdataframename,".sps",sep=""), package="SPSS")
}
if(language=="SAS"){
write.foreign(tempdataframename, paste(out_direc,"/",filenames[i],".csv",sep=""), paste(out_direc,"/",tempdataframename,".sas",sep=""), dataname=gsub(" ","",tempdataframename), package="SAS")
}
if(language=="STATA"){
write.dta(tempdataframename, paste(out_direc,"/",filenames[i],".dta",sep=""))
}
message(paste(filenames[i],"Exported"))
}
}
|
context("testing results with varrious link functions agianst `glm.fit`")
to_check <- c(
"coefficients", "residuals", "fitted.values", "rank",
"family", "linear.predictors", "deviance", "aic", "null.deviance",
"prior.weights", "df.residual", "df.null",
"boundary", "formula", "terms", "data",
"offset", "contrasts", "xlevels")
sim_func <- function(family, n, p){
nam <- paste0(family$family, family$link)
x_vals <- seq(-pi, pi, length.out = n)
X <- outer(x_vals, 1:p, function(x, y) sin(x * y))
rg <- range(rowSums(X))
X <- X * 2 / diff(rg)
set.seed(77311413)
if(nam %in% c("binomiallogit", "binomialprobit", "binomialcauchit",
"binomialcloglog")){
inter <- 1.
y <- family$linkinv(rowSums(X) + inter) > runif(n)
} else if(nam %in% "binomiallog"){
inter <- -.5
X <- -abs(X)
X <- X * .25 / diff(range(rowSums(X)))
y <- family$linkinv(rowSums(X) + inter) > runif(n)
} else if(nam %in% c("gaussianidentity", "gaussianlog")){
inter <- 0
y <- rnorm(n, family$linkinv(rowSums(X)), sd = .1)
} else if(nam %in% c("gaussianinverse")){
inter <- 1.
X <- abs(X)
y <- rnorm(n, family$linkinv(rowSums(X) + inter), sd = .1)
} else if(nam %in% c("Gammainverse", "Gammaidentity", "Gammalog")){
inter <- .5
X <- abs(X)
y <- rgamma(n, shape = 1, rate = 1 / family$linkinv(rowSums(X) + inter))
} else if(nam %in% c("poissonlog", "poissonidentity", "poissonsqrt")){
inter <- 1.5
X <- abs(X)
y <- rpois(n, family$linkinv(rowSums(X) + inter))
} else if(nam %in% c("inverse.gaussian1/mu^2", "inverse.gaussianinverse",
"inverse.gaussianidentity", "inverse.gaussianlog")){
inter <- 1.5
X <- abs(X)
y <- SuppDists::rinvGauss(n, family$linkinv(rowSums(X) + inter), 1)
} else stop("family not implemented")
list(X = X, y = y, inter = inter)
}
test_expr <- expression({
is_FAST <- method == "FAST"
tol <- if(is_FAST) .Machine$double.eps^(1/5) else
.Machine$double.eps^(1/4)
expect_equal(f1[to_check], f2[to_check], label = lab,
tolerance = tol)
s2 <- summary(f2)
s1 <- summary(f1)
excl <- c("call", "coefficients", "cov.unscaled", "cov.scaled",
"dispersion", "iter")
expect_equal(s1[!names(s1) %in% excl], s2[!names(s2) %in% excl],
label = lab, tolerance = tol)
na <- rownames(s1$coefficients)
expect_equal(s1$coefficients[na, 1:2], s2$coefficients[na, 1:2],
label = lab, tolerance = tol)
expect_equal(s1$dispersion, s2$dispersion, label = lab,
tolerance = tol)
})
test_that("works with different families", {
skip_if_not_installed("SuppDists")
n <- 500L
p <- 2L
for(method in c("LAPACK", "LINPACK", "FAST"))
for(fa in list(
binomial("logit"), binomial("probit"), binomial("cauchit"),
binomial("cloglog"),
gaussian("identity"), gaussian("inverse"),
gaussian("log"),
Gamma("log"),
poisson("log"), poisson("sqrt")))
{
tmp <- sim_func(fa, n, p)
df <- data.frame(y = tmp$y, tmp$X)
lab <- paste0(fa$family, "_", fa$link, "_", method)
frm <- y ~ X1 + X2
glm_control <- list(maxit = 25L, epsilon = .Machine$double.xmin)
parglm_control <- parglm.control(
nthreads = 2, method = method, maxit = 25L,
epsilon = .Machine$double.xmin)
suppressWarnings({
f2 <- glm(frm, family = fa, data = df, control = glm_control)
f1 <- parglm(frm, family = fa, data = df,
control = parglm_control)
})
eval(test_expr)
lab <- paste0(fa$family, "_", fa$link, "_", method, " w/ offset")
offs <- seq(0, .05, length.out = n)
suppressWarnings({
f2 <- glm(frm, family = fa, offset = offs, data = df,
control = glm_control)
f1 <- parglm(frm, family = fa, offset = offs, data = df,
control = parglm_control)
})
eval(test_expr)
lab <- paste0(fa$family, "_", fa$link, "_", method, " w/ weigths")
df$w <- seq(.5, 1.5, length.out = n)
suppressWarnings({
f2 <- glm(frm, family = fa, weights = w, data = df,
control = glm_control)
f1 <- parglm(frm, family = fa, weights = w, data = df,
control = parglm_control)
})
eval(test_expr)
}
})
test_that("works with different families w/ starting values", {
skip_if_not_installed("SuppDists")
n <- 500L
p <- 2L
set.seed(77311413)
for(method in c("LAPACK", "LINPACK", "FAST"))
for(fa in list(
binomial("logit"), binomial("probit"), binomial("cauchit"),
binomial("cloglog"),
gaussian("identity"), gaussian("inverse"),
gaussian("log"),
Gamma("log"),
poisson("log"), poisson("sqrt"), poisson("identity"),
inverse.gaussian("1/mu^2"), inverse.gaussian("inverse"),
inverse.gaussian("identity")))
{
tmp <- sim_func(fa, n, p)
df <- data.frame(y = tmp$y, tmp$X)
df$INTER <- 1.
lab <- paste0(fa$family, "_", fa$link, "_", method)
sta <- rep(1, p + 1L)
sta[1] <- tmp$inter
frm <- y ~ X1 + X2 + INTER - 1
glm_control <- list(maxit = 25L, epsilon = .Machine$double.xmin)
parglm_control <- parglm.control(
nthreads = 2, method = method, maxit = 25L,
epsilon = .Machine$double.xmin)
suppressWarnings({
f2 <- glm(frm, family = fa, start = sta, data = df,
control = glm_control)
f1 <- parglm(frm, family = fa, data = df,
control = parglm_control, start = sta)
})
eval(test_expr)
lab <- paste0(fa$family, "_", fa$link, "_", method, " w/ offset")
sta <- rep(1, p)
sta[1] <- tmp$inter
frm_off <- update(frm, . ~ . - X1)
suppressWarnings({
f2 <- glm(frm_off, family = fa, offset = X1, start = sta, data = df,
control = glm_control)
f1 <- parglm(frm_off, family = fa, offset = X1, data = df,
control = parglm_control, start = sta)
})
eval(test_expr)
lab <- paste0(fa$family, "_", fa$link, "_", method, " w/ weigths")
w <- runif(n)
df$w <- n * w / sum(w)
sta <- rep(1, p + 1L)
sta[1] <- tmp$inter
suppressWarnings({
f2 <- glm(frm, family = fa, weights = w, start = sta, data = df,
control = glm_control)
f1 <- parglm(frm, family = fa, weights = w, start = sta, data = df,
control = parglm_control)
})
eval(test_expr)
}
})
test_that("'method' equal to 'LINPACK' behaves as 'glm'", {
set.seed(73640893)
n <- 500
p <- 5
X <- matrix(nrow = n, ncol = p)
for(i in 1:p)
X[, i] <- rnorm(n, sd = sqrt(p - i + 1L))
y <- rnorm(n) + rowSums(X)
glm_control <- list(maxit = 25L, epsilon = .Machine$double.xmin)
parglm_control <- parglm.control(
nthreads = 2, maxit = 25L,
epsilon = .Machine$double.xmin, method = "LINPACK")
f1 <- glm(y ~ X, control = glm_control)
f2 <- parglm(y ~ X, control = parglm_control)
expect_equal(f1[to_check], f2[to_check])
s1 <- summary(f1)
s2 <- summary(f2)
excl <- c("call", "coefficients", "cov.unscaled", "cov.scaled",
"dispersion", "iter")
expect_equal(s1[!names(s1) %in% excl], s2[!names(s2) %in% excl])
expect_equal(s1$coefficients, s2$coefficients)
expect_equal(s1$dispersion, s2$dispersion)
})
test_that("'FASTs' fail when design matrix is singular", {
set.seed(73640893)
n <- 1000
p <- 5
X <- matrix(nrow = n, ncol = p)
for(i in 1:p)
X[, i] <- rnorm(n, sd = sqrt(p - i + 1L))
y <- rnorm(n) + rowSums(X)
X <- cbind(X[, 1:3], X[, 3:p])
X <- cbind(X, X)
suppressMessages(expect_error(
f2 <- parglm(y ~ X, control = parglm.control(method = "FAST"))))
})
test_that("'parglm' yields the same as 'glm' also when one observations is not 'good'", {
phat <- seq(.01, .99, by = .01)
X <- log(phat / (1 - phat)) - 2
set.seed(47313714)
Y <- phat > runif(length(phat))
W <- rep(1, length(Y))
W[1] <- 0
glm_control <- list(maxit = 25L, epsilon = .Machine$double.xmin)
parglm_control <- parglm.control(
nthreads = 2, maxit = 25L, epsilon = .Machine$double.xmin)
fit <- suppressWarnings(glm(Y ~ X, binomial(), weights = W,
control = glm_control))
pfit <- parglm(Y ~ X, binomial(), weights = W,
control = parglm_control)
expect_equal(fit[to_check], pfit[to_check])
Y <- rev(Y)
X <- rev(X)
W <- rev(W)
fit <- suppressWarnings(glm(Y ~ X, binomial(), weights = W,
control = glm_control))
pfit <- parglm(Y ~ X, binomial(), weights = W,
control = parglm_control)
expect_equal(fit[to_check], pfit[to_check])
})
test_that("'stop's when there are more variables than observations", {
set.seed(1)
n <- 20L
dframe <- cbind(data.frame(y = 1:n), replicate(n, rnorm(n)))
expect_error(
parglm(y ~ ., gaussian(), dframe),
"not implemented with more variables than observations", fixed = TRUE)
dframe <- dframe[, 1:n]
fpar <- parglm(y ~ ., gaussian(), dframe, nthreads = 2)
fglm <- glm(y ~ ., gaussian(), dframe)
expect_equal(coef(fpar), coef(fglm))
dframe <- dframe[, 1:(n - 3L)]
fpar <- parglm(y ~ ., gaussian(), dframe, nthreads = 2)
fglm <- glm(y ~ ., gaussian(), dframe)
expect_equal(coef(fpar), coef(fglm))
})
|
test_immediate_substitution <- function() {
if(requireNamespace('rqdatatable', quietly = TRUE)) {
library("rqdatatable")
d <- data.frame(
x = c(1, 1, 2),
y = c(5, 4, 3),
z = c(6, 7, 8)
)
condition_variable <- as.name('x')
new_value_variable <- as.name('y')
old_value_variable <- as.name('z')
res <- d %.>%
select_rows(.,
.(condition_variable) == 1) %.>%
extend(.,
.(new_value_variable) := .(old_value_variable) + 1) %.>%
order_rows(.,
c('x', 'y', 'z'))
expect <- wrapr::build_frame(
"x" , "y", "z" |
1 , 7 , 6 |
1 , 8 , 7 )
expect_true(wrapr::check_equiv_frames(res, expect))
}
invisible(NULL)
}
test_immediate_substitution()
|
workContext <-
function(list){
if(length(list$work_context) > 0){
work_context <- ldply((lapply(list$work_context, function(x){t(unlist(x))})))
return(work_context)
}
else{
message("Warning: This type of data is missing or incomplete for this occupation.")
}
}
|
library(tryCatchLog)
library(testthat)
context("test_build_log_output.R")
source("init_unit_test.R")
options("width" = 2000)
source("disable_logging_output.R")
test_that("log output is correct", {
load("stack_trace.RData")
log.entry <- tryCatchLog:::build.log.entry(Sys.time(), "ERROR", "msg", "", stack.trace, "", 0)
out1 <- tryCatchLog::build.log.output(log.entry, include.full.call.stack = FALSE)
expected1 <- "[ERROR] msg\n\nCompact call stack:\n 1 tryLog(log(\"abc\"))\n 2 tryLog.R
expect_equal(out1, expected1, info = "include.full.call.stack = FALSE")
out2 <- tryCatchLog::build.log.output(log.entry, include.full.call.stack = TRUE)
expected2 <- paste(readLines("expected_log_output.txt"), collapse = "\n")
expect_equal(out2, expected2, info = "include.full.call.stack = TRUE")
out3 <- tryCatchLog::build.log.output(log.entry)
expect_equal(out3, expected2, info = "default value of include.full.call.stack")
log.entry$dump.file.name <- "my.dump.file123.rda"
out4 <- tryCatchLog::build.log.output(log.entry)
expect_match(out4, "Created dump file: my.dump.file123.rda", fixed = TRUE, info = "dump file name is in output")
expect_equal(tryCatchLog::build.log.output(data.frame()), "", info = "empty log -> empty output")
expect_error(tryCatchLog::build.log.output(""), "\"data.frame\" %in% class(log.results) is not TRUE", fixed = TRUE)
})
test_that("multiple log entry rows work", {
expect_error(expect_warning(tryCatchLog({
log(-1); log("abc")
})))
log.entries <- last.tryCatchLog.result()
expect_equal(NROW(log.entries), 2)
out1 <- build.log.output(log.entries)
expect_match(out1, ".*\\[WARN\\] NaNs produced.*\\[ERROR\\] non-numeric argument to mathematical function.*")
})
test_that("include args do work", {
timestamp <- as.POSIXct("12/31/2010 9:00", format = "%m/%d/%Y %H:%M")
log.entry <- tryCatchLog:::build.log.entry(timestamp, "ERROR", "MESSAGE", "", NULL, "dump_123.rda", 0)
out <- build.log.output(log.entry, include.severity = TRUE, include.timestamp = TRUE)
expect_equal(out, "2010-12-31 09:00:00 [ERROR] MESSAGE\n\nCreated dump file: dump_123.rda\n\nCompact call stack:\n\n\nFull call stack:\n\n\n")
out <- build.log.output(log.entry, include.severity = FALSE, include.timestamp = TRUE)
expect_equal(out, "2010-12-31 09:00:00 MESSAGE\n\nCreated dump file: dump_123.rda\n\nCompact call stack:\n\n\nFull call stack:\n\n\n")
out <- build.log.output(log.entry)
expect_equal(out, "[ERROR] MESSAGE\n\nCreated dump file: dump_123.rda\n\nCompact call stack:\n\n\nFull call stack:\n\n\n")
out <- build.log.output(log.entry, include.severity = FALSE)
expect_false(grepl("ERROR", out, fixed = TRUE))
out <- build.log.output(log.entry, include.timestamp = TRUE)
expect_true(grepl("2010-12-31 09:00:00", out, fixed = TRUE))
})
test_that("platform-specific newline works", {
log.entry <- tryCatchLog:::build.log.entry(Sys.time(), "ERROR", "MESSAGE", "stack.trace", "dump.file.name", 0)
out <- build.log.output(log.entry)
expect_true(grepl("\n", out, fixed = TRUE))
with_mock(
platform.NewLine = function() return("<platform_newline>"),
out <- build.log.output(log.entry, use.platform.newline = TRUE),
expect_false(grepl("\n", out, fixed = TRUE)),
expect_true(grepl("<platform_newline>", out, fixed = TRUE))
)
})
test_that("log output includes execution.context.msg", {
load("stack_trace.RData")
log.entry <- tryCatchLog:::build.log.entry(Sys.time(), "ERROR", "msg", "ctx1", stack.trace, "", 0)
out1 <- tryCatchLog::build.log.output(log.entry, include.full.call.stack = FALSE)
expected1 <- "[ERROR] msg {execution.context.msg: ctx1}\n\nCompact call stack:\n 1 tryLog(log(\"abc\"))\n 2 tryLog.R
expect_equal(out1, expected1)
log.entry <- tryCatchLog:::build.log.entry(Sys.time(), "ERROR", "msg", NA_character_, stack.trace, "", 0)
out2 <- tryCatchLog::build.log.output(log.entry, include.full.call.stack = FALSE)
expected2 <- "[ERROR] msg\n\nCompact call stack:\n 1 tryLog(log(\"abc\"))\n 2 tryLog.R
expect_equal(out2, expected2)
})
test_that("log output contains execution.context.msg", {
expect_error(expect_warning(tryCatchLog({
log(-1); log("abc")
}, execution.context.msg = "my context")
)
)
log.entries <- last.tryCatchLog.result()
expect_equal(NROW(log.entries), 2)
out1 <- build.log.output(log.entries)
expect_match(out1, paste0(".*\\[WARN\\] NaNs produced \\{execution\\.context\\.msg: my context\\}",
".*",
"\\[ERROR\\] non-numeric argument to mathematical function \\{execution\\.context\\.msg: my context\\}.*"))
})
|
xP <- function(x, d=NULL, unit=NULL, semi=FALSE) {
if (is.null(d))
digits_d <- getOption("digits_d")
else
digits_d <- d
if (!is.na(x)) {
neg.flag <- FALSE
if (!is.null(unit)) {
if (unit == "dollar"){
digits_d <- 2
if (x < 0) {
neg.flag <- TRUE
x <- abs(x)
}
}
}
tx <- formatC(x, digits=digits_d, big.mark=",", format="f")
if (!is.null(unit)) {
if (unit != "dollar") {
if (!semi)
tx <- paste(tx, unit)
else
tx <- paste(tx, "\\:", unit)
}
else {
if (!neg.flag)
tx <- paste("$", tx, sep="")
else
tx <- paste("-$", tx, sep="")
}
}
}
else
tx <- ""
return(tx)
}
|
setClass("pim.poset",
slots = c(compare="character",
nobs="integer"),
contains = "environment",
validity= function(object){
out <- TRUE
if(length(object@compare) !=1 ){
out <- "Compare should be a single value."
} else if(length(nobs) !=1){
out <- "nobs should be a single value."
} else if(!object@compare %in% c("unique",
"all",
"custom")){
out <- "Compare should be any of unique, all or custom."
}
out
})
|
HqzBeta <-
function(NorP, NorPexp = NULL, q = 1, Z = diag(length(NorP)), ...)
{
UseMethod("HqzBeta")
}
HqzBeta.ProbaVector <-
function(NorP, NorPexp = NULL, q = 1, Z = diag(length(NorP)), ..., CheckArguments = TRUE, Ps = NULL, Pexp = NULL)
{
if (missing(NorP)){
if (!missing(Ps)) {
NorP <- Ps
} else {
stop("An argument NorP or Ps must be provided.")
}
}
if (missing(NorPexp)){
if (!missing(Pexp)) {
NorPexp <- Pexp
} else {
stop("An argument NorPexp or Pexp must be provided.")
}
}
if (CheckArguments)
CheckentropartArguments()
if (length(NorP) != length(NorPexp)) {
stop("NorP and NorPexp should have the same length.")
}
if (is.null(colnames(Z)) | is.null(names(NorP))) {
if (ncol(as.matrix(Z)) != length(NorP))
stop("The matrix dimension must equal the probability vector length.")
} else {
if (!setequal(names(NorP), names(NorPexp)))
stop("NorP and NorPexp should have the same names.")
NorPexp <- NorPexp[names(NorP)]
if (length(setdiff(names(NorP), colnames(Z))) != 0)
stop("Some species are missing in the similarity matrix.")
Z <- as.matrix(Z)[names(NorP), names(NorP)]
}
Zps <- Z %*% NorP
Zpexp <- Z %*% NorPexp
Zps <- Zps[NorP != 0]
Zpexp <- Zpexp[NorP != 0]
NorPexp <- NorPexp[NorP != 0]
NorP <- NorP[NorP != 0]
dataBeta <- NorP * (lnq(1/Zpexp, q)-lnq(1/Zps, q))
entropy <- sum(dataBeta)
names(entropy) <- "None"
return (entropy)
}
HqzBeta.AbdVector <-
function(NorP, NorPexp = NULL, q = 1, Z = diag(length(NorP)), Correction = "Best", ..., CheckArguments = TRUE, Ns = NULL, Nexp = NULL)
{
if (missing(NorP)){
if (!missing(Ns)) {
NorP <- Ns
} else {
stop("An argument NorP or Ns must be provided.")
}
}
if (missing(NorPexp)){
if (!missing(Nexp)) {
NorPexp <- Nexp
} else {
stop("An argument NorPexp or Nexp must be provided.")
}
}
return (bcHqzBeta(Ns=NorP, Nexp=NorPexp , q=q, Z=Z, Correction=Correction, CheckArguments=CheckArguments))
}
HqzBeta.integer <-
function(NorP, NorPexp = NULL, q = 1, Z = diag(length(NorP)), Correction = "Best", ..., CheckArguments = TRUE, Ns = NULL, Nexp = NULL)
{
if (missing(NorP)){
if (!missing(Ns)) {
NorP <- Ns
} else {
stop("An argument NorP or Ns must be provided.")
}
}
if (missing(NorPexp)){
if (!missing(Nexp)) {
NorPexp <- Nexp
} else {
stop("An argument NorPexp or Nexp must be provided.")
}
}
return (bcHqzBeta(Ns=NorP, Nexp=NorPexp, q=q, Z=Z, Correction=Correction, CheckArguments=CheckArguments))
}
HqzBeta.numeric <-
function(NorP, NorPexp = NULL, q = 1, Z = diag(length(NorP)), Correction = "Best", ..., CheckArguments = TRUE, Ps = NULL, Ns = NULL, Pexp = NULL, Nexp = NULL)
{
if (missing(NorP)){
if (!missing(Ps)) {
NorP <- Ps
} else {
if (!missing(Ns)) {
NorP <- Ns
} else {
stop("An argument NorP or Ps or Ns must be provided.")
}
}
}
if (missing(NorPexp)){
if (!missing(Pexp)) {
NorPexp <- Pexp
} else {
if (!missing(Nexp)) {
NorP <- Nexp
} else {
stop("An argument NorPexp or Pexp or Nexp must be provided.")
}
}
}
if (abs(sum(NorP) - 1) < length(NorP)*.Machine$double.eps) {
return (HqzBeta.ProbaVector(NorP, NorPexp, q=q, Z=Z, CheckArguments=CheckArguments))
} else {
return (HqzBeta.AbdVector(NorP, NorPexp, q=q, Z=Z, Correction=Correction, CheckArguments=CheckArguments))
}
}
bcHqzBeta <-
function(Ns, Nexp = NULL, q = 1, Z = diag(length(Ns)), Correction = "Best", CheckArguments = TRUE)
{
if (CheckArguments)
CheckentropartArguments()
if (length(Ns) != length(Nexp)) {
stop("Ns and Nexp should have the same length.")
}
if (Correction == "None" | Correction == "Best") {
return (HqzBeta.ProbaVector(Ns/sum(Ns), Nexp/sum(Nexp), q, Z, CheckArguments=FALSE))
}
warning("Correction was not recognized")
return (NA)
}
|
library(projmgr)
library(gt)
library(tidyverse)
library(png)
myrepo <- create_repo_ref("rich-iannone", "pointblank")
issues <- get_issues(myrepo, state = "open")
issues_df <- dplyr::as_tibble(parse_issues(issues))
tbl <-
issues_df %>%
dplyr::filter(!is.na(milestone_title)) %>%
dplyr::select(number, title, labels_name, milestone_title) %>%
tidyr::unnest(labels_name) %>%
dplyr::filter(grepl("(Type|Difficulty|Effort|Priority|Release)", labels_name)) %>%
dplyr::mutate(labels_name = ifelse(labels_name == "Release", "Type: ✈ Release", labels_name)) %>%
dplyr::filter(!grepl("Question", labels_name)) %>%
tidyr::separate(labels_name, into = c("category", "value"), sep = ": ") %>%
tidyr::pivot_wider(names_from = category, values_from = value) %>%
dplyr::rename(
type = Type,
difficulty = Difficulty,
effort = Effort,
priority = Priority
) %>%
tidyr::separate(
col = milestone_title,
into = c("major", "minor", "patch"),
sep = "\\.",
remove = FALSE
) %>%
dplyr::select(
number, title, milestone_title, major, minor, patch, type, difficulty, effort, priority
) %>%
dplyr::mutate(major = gsub("v", "", major)) %>%
dplyr::mutate_at(.vars = vars(major, minor, patch), .funs = as.integer) %>%
dplyr::mutate(difficulty = gsub(".*?([1-3]).*", "\\1", difficulty)) %>%
dplyr::mutate(effort = gsub(".*?([1-3]).*", "\\1", effort)) %>%
dplyr::mutate(priority = gsub(".*?([1-3]).*", "\\1", priority)) %>%
dplyr::mutate(priority = ifelse(grepl("[^1-3]", priority), 4, priority)) %>%
dplyr::mutate(type = gsub(".*?Type: (.*?)\\\"\\)", "\\1", type)) %>%
dplyr::mutate_at(.vars = vars(priority, difficulty, effort), .funs = as.numeric) %>%
dplyr::arrange(
major,
minor,
dplyr::desc(priority),
type,
dplyr::desc(difficulty),
dplyr::desc(effort)
) %>%
dplyr::mutate(number = paste0("
dplyr::select(-c(major, minor, patch))
gt_tbl <-
tbl %>%
gt(
rowname_col = "number",
groupname_col = "milestone_title",
id = "report"
) %>%
tab_header(title = md("Upcoming Tasks and Milestones for **pointblank**")) %>%
fmt_markdown(columns = title) %>%
fmt_missing(columns = c(priority, difficulty, effort), missing_text = "") %>%
data_color(
columns = c(priority, difficulty, effort),
colors = scales::col_numeric(
palette = c("
domain = c(1, 4),
na.color = "
),
alpha = 0.6,
apply_to = "fill"
) %>%
tab_style(
style = cell_text(color = "black", weight = "600"),
locations = cells_body(columns = c(priority, difficulty, effort))
) %>%
tab_style(
style = cell_borders(
sides = "left", style = "dashed", weight = px(2), color = "
locations = cells_body(columns = type)
) %>%
tab_style(
style = cell_borders(
sides = "right", style = "solid", weight = px(1), color = "
locations = cells_body(columns = c(priority, difficulty))
) %>%
cols_label(
title = "",
type = ""
) %>%
cols_align("center", columns = c(priority, difficulty, effort)) %>%
cols_width(
1 ~ px(60),
title ~ px(400),
c(priority, difficulty, effort) ~ px(75),
everything() ~ px(140)
) %>%
opt_align_table_header(align = "left") %>%
opt_all_caps() %>%
opt_table_outline(style = "none") %>%
tab_options(
heading.border.bottom.style = "none",
column_labels.border.top.style = "none",
column_labels.font.size = px(11),
data_row.padding = px(4)
) %>%
text_transform(
locations = cells_body(columns = priority),
fn = function(x) {
ifelse(x == "4", "♨︎", x)
}
) %>%
text_transform(
locations = cells_body(columns = priority),
fn = function(x) {
ifelse(x == "4", "♨︎", x)
}
) %>%
tab_style(
style = "height: 50px",
locations = cells_body(
columns = everything(),
rows = !grepl("Release", type)
)
) %>%
tab_style(
style = "
height: 75px;
background: linear-gradient(180deg,
background-size: 100% 100%;
-webkit-animation: AnimationName 5s ease infinite;
-moz-animation: AnimationName 5s ease infinite;
-o-animation: AnimationName 5s ease infinite;
animation: AnimationName 5s ease infinite;",
locations = cells_body(
columns = c(title, type, priority, difficulty, effort),
rows = grepl("Release", type)
)
) %>%
tab_style(
style = cell_borders(
sides = c("left", "right"),
style = "solid",
weight = "0"
),
locations = cells_body(
columns = c(title, type, priority, difficulty, effort),
rows = grepl("Release", type)
)
)
temp_file <- tempfile(fileext = ".png")
gtsave(gt_tbl, filename = temp_file)
tbl_img <- png::readPNG(temp_file, native = TRUE, info = TRUE)
tbl_dim <- attr(tbl_img, "info")[["dim"]]
tbl_w <- ceiling(tbl_dim[1] / 2)
tbl_h <- ceiling(tbl_dim[2] / 2) + 5
svg_object <-
glue::glue(
"<svg fill=\"none\" viewBox=\"0 0 {tbl_w} {tbl_h}\" width=\"{tbl_w}\" height=\"{tbl_h}\" xmlns=\"http://www.w3.org/2000/svg\">
<foreignObject width=\"100%\" height=\"100%\">
<div xmlns=\"http://www.w3.org/1999/xhtml\">
{gt::as_raw_html(gt_tbl)}
</div>
</foreignObject>
</svg>
"
) %>%
as.character() %>%
gsub("style>", ">", ., fixed = TRUE) %>%
gsub("<p>", "<p style='margin:0'>", ., fixed = TRUE) %>%
gsub(
"; width: 0px\">",
"; width: 0px;
@-webkit-keyframes AnimationName {0% {background-position:50% 0%} 50% {background-position:51% 100%} 100% {background-position:50% 0%}}
@-moz-keyframes AnimationName {0% {background-position:50% 0%} 50% {background-position:51% 100%} 100% {background-position:50% 0%}}
@-o-keyframes AnimationName {0% {background-position:50% 0%} 50% {background-position:51% 100%} 100% {background-position:50% 0%}}
@keyframes AnimationName {0% {background-position:50% 0%} 50% {background-position:51% 100%} 100% {background-position:50% 0%}}
\">
",
., fixed = TRUE)
cat(svg_object, file = "./man/figures/pointblank-milestones.svg")
|
as.gganim <- function(x) {
if (is.gganim(x)) return(x)
if (!is.ggplot(x)) stop('Only knows how to convert ggplot to gganim', call. = FALSE)
class(x) <- c('gganim', class(x))
if (inherits(x, 'ggraph')) {
cl <- class(x)
ggraph_ind <- cl == 'ggraph'
cl <- c(cl[ggraph_ind], cl[!ggraph_ind])
class(x) <- cl
}
x$transition <- transition_null()
x$view <- view_static()
x$shadow <- shadow_null()
x$transmuters <- transmuter_list()
x$ease <- ease_aes('linear')
x
}
is.gganim <- function(x) inherits(x, 'gganim')
plot.gganim <- function(x, frame = 50, total = 100, detail = 1, newpage = is.null(vp), vp = NULL, ...) {
plot <- prerender(x, total * detail)
plot$scene$plot_frame(plot, frame * detail, newpage = newpage, vp = vp)
invisible(x)
}
print.gganim <- function(x, ...) {
anim <- animate(x, ...)
print(anim, info = FALSE)
}
knit_print.gganim <- function(x, options, ...) {
knitr_options <- get_knitr_options(options)
if (knitr::is_latex_output()) {
knitr_options$device <- 'current'
do.call(animate, c(list(plot = x), knitr_options))
} else {
anim <- do.call(animate, c(list(plot = x), knitr_options))
knitr::knit_print(anim, options, ...)
}
}
get_knitr_options <- function(options, unlist = TRUE) {
opt <- options$gganimate
opt$device <- opt$device %||% options$dev
if (is.null(opt$width) || is.null(opt$height)) {
opt$width <- options$fig.width
opt$height <- options$fig.height
opt$units <- 'in'
opt$res <- options$dpi
}
if (unlist) {
c(opt, options$dev.args)
} else {
opt$dev_args <- options$dev.args %||% list()
opt$dev_args <- modifyList(opt$dev_args, opt[c('width', 'height', 'units', 'res')])
opt
}
}
set_nframes <- function(plot, n) {
plot$nframes <- n
plot
}
get_nframes <- function(plot) {
if (is.null(plot$scene)) plot$nframes
else plot$scene$nframes
}
|
new_ml_model_als <- function(pipeline_model,
formula,
dataset) {
m <- new_ml_model_recommendation(
pipeline_model, formula,
dataset = dataset,
class = "ml_model_als"
)
m$`.jobj` <- spark_jobj(m)
m
}
|
library(OpenMx)
options(width=120)
got <- suppressWarnings(try(load("models/nightly/data/mlcfa.xxm.RData")))
if (is(got, "try-error")) load("data/mlcfa.xxm.RData")
indicators <- colnames(mlcfa.student)[3:6]
teacherModel <- mxModel(
"teacherModel", type="RAM",
latentVars="psi",
mxData(mlcfa.student[!duplicated(mlcfa.student$teacher),], "raw",
primaryKey="teacher"),
mxPath("psi", arrows=2, values=1, lbound=1e-2, ubound=3))
studentModel <- mxModel(
"studentModel", type="RAM", teacherModel,
latentVars="psi", manifestVars=indicators,
mxData(mlcfa.student, "raw"),
mxPath("psi", arrows=2, values=1, lbound=1e-2, ubound=5),
mxPath(indicators, arrows=2, values=1, lbound=1e-4, ubound=3),
mxPath("psi", indicators, free=c(FALSE, rep(TRUE, 3)),
values=1, lbound=0, ubound=5),
mxPath("teacherModel.psi", indicators,
free=c(FALSE, rep(TRUE, 3)), values=1, lbound=0, ubound=5,
joinKey="teacher"),
mxPath('one', indicators))
studentModel$expectation$.useSufficientSets <- FALSE
studentModel <- mxRun(studentModel)
summary(studentModel)
omxCheckCloseEnough(studentModel$output$fit, 14463.33, 1e-2)
studentModel$expectation$.useSufficientSets <- TRUE
altFit <- mxRun(mxModel(studentModel,
mxComputeSequence(list(
mxComputeOnce('fitfunction', 'fit'),
mxComputeReportExpectation()))))
omxCheckCloseEnough(altFit$output$fit, 14463.33, 1e-2)
if(0) {
layout <- studentModel$expectation$debug$layout
head(layout[layout$group==2,],n=20)
}
f1 <- studentModel
f1$expectation$.rampartCycleLimit <- 0L
f1 <- mxRun(mxModel(f1, mxComputeOnce('fitfunction', 'fit')))
omxCheckCloseEnough(f1$output$fit, 14463.33, 1e-2)
f2 <- omxSetParameters(studentModel, labels=names(coef(studentModel)),
values=c(0.8838, 1.0983, 0.7628,
0.7915, 1.2093, 0.8297, 0.5558,
1.3788,
1.1687, 1.2239, 0.6455,
46.0726, 47.0572, 46.4774, 48.0057,
0.5651))
omxCheckCloseEnough(max(abs(coef(f2) - coef(altFit))), 0, 1e-2)
|
context("Test: getGenome()")
test_that("The getGenome() interface works properly for NCBI RefSeq (including when command is repeated)..", {
skip_on_cran()
skip_on_travis()
expect_output(read_genome(
getGenome(
db = "refseq",
organism = "Saccharomyces cerevisiae",
path = tempdir()
),
format = "fasta"
))
expect_output(read_genome(
getGenome(
db = "refseq",
organism = "Saccharomyces cerevisiae",
path = tempdir()
),
format = "fasta"
))
})
test_that("The getGenome() interface works properly for NCBI RefSeq using taxid..", {
skip_on_cran()
skip_on_travis()
expect_output(read_genome(
getGenome(
db = "refseq",
organism = "559292",
path = tempdir()
),
format = "fasta"
))
})
test_that("The getGenome() interface works properly for NCBI RefSeq using assembly id..", {
skip_on_cran()
skip_on_travis()
expect_output(read_genome(
getGenome(
db = "refseq",
organism = "GCF_000146045.2",
path = tempdir()
),
format = "fasta"
))
})
test_that("The getGenome() interface works properly for NCBI Genbank (including when command is repeated)..", {
skip_on_cran()
skip_on_travis()
expect_output(read_genome(
getGenome(
db = "genbank",
organism = "Saccharomyces cerevisiae",
path = tempdir()
),
format = "fasta"
))
expect_output(read_genome(
getGenome(
db = "genbank",
organism = "Saccharomyces cerevisiae",
path = tempdir()
),
format = "fasta"
))
})
test_that("The getGenome() interface works properly for NCBI Genbank using taxid (including when command is repeated)..", {
skip_on_cran()
skip_on_travis()
expect_output(read_genome(
getGenome(
db = "genbank",
organism = "559292",
path = tempdir()
),
format = "fasta"
))
expect_output(read_genome(
getGenome(
db = "genbank",
organism = "559292",
path = tempdir()
),
format = "fasta"
))
})
test_that("The getGenome() interface works properly for NCBI Genbank using accession ids (including when command is repeated)..", {
skip_on_cran()
skip_on_travis()
expect_output(read_genome(
getGenome(
db = "genbank",
organism = "GCA_000146045.2",
path = tempdir()
),
format = "fasta"
))
expect_output(read_genome(
getGenome(
db = "genbank",
organism = "GCA_000146045.2",
path = tempdir()
),
format = "fasta"
))
})
test_that("The getGenome() interface works properly for ENSEMBL (including repeating function call)..",{
skip_on_cran()
skip_on_travis()
expect_output(read_genome(
getGenome(
db = "ensembl",
organism = "Saccharomyces cerevisiae",
path = tempdir()
),
format = "fasta"
))
expect_output(read_genome(
getGenome(
db = "ensembl",
organism = "Saccharomyces cerevisiae",
path = tempdir()
),
format = "fasta"
))
})
test_that("The getGenome() interface works properly for ENSEMBL using taxid (including repeating function call)..",{
skip_on_cran()
skip_on_travis()
expect_output(read_genome(
getGenome(
db = "ensembl",
organism = "4932",
path = tempdir()
),
format = "fasta"
))
expect_output(read_genome(
getGenome(
db = "ensembl",
organism = "4932",
path = tempdir()
),
format = "fasta"
))
})
test_that("The getGenome() interface works properly for ENSEMBL using accession id (including repeating function call)..",{
skip_on_cran()
skip_on_travis()
expect_output(read_genome(
getGenome(
db = "ensembl",
organism = "GCA_000146045.2",
path = tempdir()
),
format = "fasta"
))
expect_output(read_genome(
getGenome(
db = "ensembl",
organism = "GCA_000146045.2",
path = tempdir()
),
format = "fasta"
))
})
test_that("The getGenome() error messages work properly for ENSEMBL..", {
skip_on_cran()
skip_on_travis()
expect_output(getGenome(
db = "ensembl",
organism = "Saccharomyces cerevisi",
path = tempdir()
))
})
test_that("The getGenome() error messages work properly for NCBI RefSeq", {
skip_on_cran()
skip_on_travis()
expect_equal(getGenome(
db = "refseq",
organism = "Saccharomycesi",
path = tempdir()
), "Not available")
})
|
calculate_sleep_wake = function(data, FUN, sleep_start = 0, sleep_end = 6, calculate = c("sleep", "wake", "both"), ...) {
append_str = function(str_vec, to_append) {
sapply(str_vec, FUN = function (x) {paste0(x, to_append)})
}
FUN = match.fun(FUN)
calculate = tolower(calculate)
if (sleep_start == sleep_end) {
stop("Sleep start cannot equal sleep end, please change one of the inputs")
}
if (sleep_start > sleep_end) {
filter_gate = (lubridate::hour(data$time) >= sleep_start | lubridate::hour(data$time) < sleep_end)
} else {
filter_gate = (lubridate::hour(data$time) >= sleep_start & lubridate::hour(data$time) < sleep_end)
}
if (calculate == "sleep") {
filtered_data = dplyr::filter(data, filter_gate)
out = FUN(filtered_data, ...)
} else if (calculate == "wake") {
filtered_data = dplyr::filter(data, !filter_gate)
out = FUN(filtered_data, ...)
} else if (calculate == "both") {
filtered_data = dplyr::filter(data, filter_gate)
inside = FUN(filtered_data, ...)
filtered_data = dplyr::filter(data, !filter_gate)
outside = FUN(filtered_data, ...)
innames = colnames(inside)
outnames = colnames(outside)
if (is.vector(data)) {
out = inside
out$outside = outside
colnames(out) = c(append_str(innames[2:length(innames)]," sleep"), append_str(outnames[2:length(outnames)]," wake"))
return(out)
}
if (any(colnames(outside) == "roc")) {
inside$row = outside$row = 1:length(inside$id)
out = dplyr::left_join(inside, outside[, 2:3], by = "row") %>%
dplyr::select(-row)
} else {out = dplyr::left_join(inside, outside, by = "id")}
colnames(out) = c("id", append_str(innames[2:length(innames)]," sleep"), append_str(outnames[2:length(outnames)]," wake"))
} else {
stop("Please enter one of 'sleep', 'wake', 'both' for calculate.")
}
return(out)
}
|
expected <- eval(parse(text="c(\"«L\", \"tin-1 \", \"\", \"\", \"ented \", \"h\", \"rs»: éè øØ å<Å æ<Æ é éè\")"));
test(id=0, code={
argv <- eval(parse(text="list(c(\"«L\", \"tin-1 \", \"\", \"\", \"ented \", \"h\", \"rs»: éè øØ å<Å æ<Æ é éè\"))"));
do.call(`(`, argv);
}, o=expected);
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.