code
stringlengths 1
13.8M
|
---|
sms_test_prob <- predict(sms_classifier, sms_test, type = "raw")
head(sms_test_prob)
sms_results <- data.frame(actual_type = sms_test_labels,
predict_type = sms_test_pred,
prob_spam = round(sms_test_prob[ , 2], 5),
prob_ham = round(sms_test_prob[ , 1], 5))
sms_results <- read.csv("sms_results.csv", stringsAsFactors = TRUE)
head(sms_results)
head(subset(sms_results, prob_spam > 0.40 & prob_spam < 0.60))
head(subset(sms_results, actual_type != predict_type))
table(sms_results$actual_type, sms_results$predict_type)
xtabs(~ actual_type + predict_type, sms_results)
library(gmodels)
CrossTable(sms_results$actual_type, sms_results$predict_type)
(152 + 1203) / (152 + 1203 + 4 + 31)
(4 + 31) / (152 + 1203 + 4 + 31)
1 - 0.9748201
library(caret)
confusionMatrix(sms_results$predict_type, sms_results$actual_type, positive = "spam")
pr_a <- 0.865 + 0.109
pr_a
pr_e <- 0.868 * 0.888 + 0.132 * 0.112
pr_e
k <- (pr_a - pr_e) / (1 - pr_e)
k
library(vcd)
Kappa(table(sms_results$actual_type, sms_results$predict_type))
library(irr)
kappa2(sms_results[1:2])
sens <- 152 / (152 + 31)
sens
spec <- 1203 / (1203 + 4)
spec
library(caret)
sensitivity(sms_results$predict_type, sms_results$actual_type, positive = "spam")
specificity(sms_results$predict_type, sms_results$actual_type, negative = "ham")
prec <- 152 / (152 + 4)
prec
rec <- 152 / (152 + 31)
rec
library(caret)
posPredValue(sms_results$predict_type, sms_results$actual_type, positive = "spam")
sensitivity(sms_results$predict_type, sms_results$actual_type, positive = "spam")
f <- (2 * prec * rec) / (prec + rec)
f
f <- (2 * 152) / (2 * 152 + 4 + 31)
f
library(pROC)
sms_roc <- roc(sms_results$actual_type, sms_results$prob_spam)
plot(sms_roc, main = "ROC curve for SMS spam filter", col = "blue", lwd = 2, legacy.axes = TRUE)
sms_results_knn <- read.csv("sms_results_knn.csv")
sms_roc_knn <- roc(sms_results$actual_type, sms_results_knn$p_spam)
plot(sms_roc_knn, col = "red", lwd = 2, add = TRUE)
auc(sms_roc)
auc(sms_roc_knn)
library(caret)
credit <- read.csv("credit.csv", stringsAsFactors = TRUE)
random_ids <- order(runif(1000))
credit_train <- credit[random_ids[1:500],]
credit_validate <- credit[random_ids[501:750], ]
credit_test <- credit[random_ids[751:1000], ]
in_train <- createDataPartition(credit$default, p = 0.75, list = FALSE)
credit_train <- credit[in_train, ]
credit_test <- credit[-in_train, ]
folds <- createFolds(credit$default, k = 10)
str(folds)
credit01_test <- credit[folds$Fold01, ]
credit01_train <- credit[-folds$Fold01, ]
library(caret)
library(C50)
library(irr)
credit <- read.csv("credit.csv", stringsAsFactors = TRUE)
RNGversion("3.5.2")
set.seed(123)
folds <- createFolds(credit$default, k = 10)
cv_results <- lapply(folds, function(x) {
credit_train <- credit[-x, ]
credit_test <- credit[x, ]
credit_model <- C5.0(default ~ ., data = credit_train)
credit_pred <- predict(credit_model, credit_test)
credit_actual <- credit_test$default
kappa <- kappa2(data.frame(credit_actual, credit_pred))$value
return(kappa)
})
str(cv_results)
mean(unlist(cv_results)) |
setGeneric("qoffset_z", function(object) standardGeneric("qoffset_z"))
setMethod("qoffset_z", "nifti", function(object) object@"qoffset_z")
setGeneric("qoffset_z<-", function(object, value) standardGeneric("qoffset_z<-"))
setMethod("qoffset_z<-",
signature(object="nifti"),
function(object, value) {
if ( "qoffset_z" %in% slotNames(object) ){
object@"qoffset_z" <- value
audit.trail(object) <-
niftiAuditTrailEvent(object, "modification", match.call(),
paste("qoffset_z <-", value))
} else {
warning("qoffset_z is not in slotNames of object")
}
return(object)
})
setGeneric("qoffset.z", function(object) standardGeneric("qoffset.z"))
setMethod("qoffset.z", "nifti", function(object) object@"qoffset_z")
setGeneric("qoffset.z<-", function(object, value) standardGeneric("qoffset.z<-"))
setMethod("qoffset.z<-",
signature(object="nifti"),
function(object, value) {
if ( "qoffset_z" %in% slotNames(object) ){
object@"qoffset_z" <- value
audit.trail(object) <-
niftiAuditTrailEvent(object, "modification", match.call(),
paste("qoffset_z <-", value))
} else {
warning("qoffset_z is not in slotNames of object")
}
return(object)
}) |
undo_interleave <- function(x){
l <- seq(1, length(x[is.na(x) == FALSE]), 1)
evens <- l[l %% 2 != 1]
odds <- l[l %% 2 == 1]
resort_index <- order(c(seq_along(odds), seq_along(evens)))
if(any(is.na(x)) == TRUE){
pad <- seq(max(c(odds, evens) + 1), length(x), 1)
resort_index <- c(resort_index, pad)
}
x <- x[resort_index]
return(x)
} |
context("Test date_between")
test_that("date_between works as expected", {
date1 <- as.Date("2016-02-22")
date2 <- as.Date("2016-02-11")
date_column <- "STD_1"
expect_identical(
date_between(date_column, date1),
"STD_1 between to_date('2016-02-22', 'yyyy-mm-dd') and to_date('2016-02-22', 'yyyy-mm-dd')")
expect_identical(
date_between(date_column, c(date1, date2)),
"STD_1 between to_date('2016-02-11', 'yyyy-mm-dd') and to_date('2016-02-22', 'yyyy-mm-dd')")
})
test_that("date_between checks dates", {
date1 <- as.Date("2016-02-22")
date_column <- "STD"
expect_error(date_between(date_column, as.POSIXct(date1)))
expect_error(date_between(date_column, as.Date(NA)))
expect_error(date_between(date_column, date1 + 1:11))
expect_error(date_between(date_column, date1[0]))
expect_error(date_between(date_column, NULL))
})
test_that("date_between checks column_names", {
date1 <- as.Date("2016-02-22")
expect_error(date_between("", date1))
expect_error(date_between(NA, date1))
expect_error(date_between(1L, date1))
expect_error(date_between("'wrong'", date1))
expect_error(date_between("wrong wrong", date1))
expect_error(date_between("123", date1))
expect_error(date_between("ABC$", date1))
}) |
plotscore <-
function(param=c(2,.5), fam="pow", bounds, reverse=FALSE, legend=TRUE, ...){
if(length(param) > 2) stop("plotscore is only for two-alternative rules.\n")
dots <- list(...)
if(exists("dots$scaling")){
if(dots$scaling) bounds <- c(0,1)
}
p <- seq(.01,.99,.01)
if(missing(bounds)) bounds <- NULL
sc1 <- calcscore(p, rep(1,length(p)), fam, param, bounds=bounds, reverse=reverse)
sc0 <- calcscore(p, rep(0,length(p)), fam, param, bounds=bounds, reverse=reverse)
ymin <- min(sc1,sc0)
ymax <- max(sc1,sc0)
yl <- c(ymin - .05*(ymax - ymin), ymax + .05*(ymax - ymin))
main.arg <- list(x=p, y=sc1)
supplied <- list(...)
default <- list(type="l", ylim=yl, xlab="Forecast", ylab="Score")
nomatch <- setdiff(c("type","xlab","ylab","ylim"), names(supplied))
plot.args <- c(main.arg, supplied, default[nomatch])
do.call(plot, plot.args)
lines(p, sc0, lty=2)
if(legend) legend(.8, yl[2] - .1, c("d=1","d=0"), lty=c(1,2))
} |
plot.lacfCI <-
function (x, plotcor = TRUE, type = "line", lags = 0:as.integer(10 *
log10(nrow(x$lacf))), tcex = 1, lcol = 1, llty = 1,
ylim = NULL, segwid = 1, segandcross = TRUE, conf.level = 0.95,
plot.it=TRUE, xlab, ylab, sub, ...)
{
if (conf.level < 0 || conf.level > 1)
stop("conf.level has to be between 0 and 1")
siz <- 1 - conf.level
qval <- qnorm(1 - siz/2)
XCI <- x
x <- x$the.lacf
nlags <- length(lags)
ntime <- nrow(x$lacf)
if (max(lags) + 1 > ncol(x$lacf))
stop("Maximum lag is too high")
if (length(lcol) == 1)
lcol <- rep(lcol, length(lags))
if (length(llty) == 1)
llty <- rep(llty, length(lags))
if (length(lcol) != length(lags))
stop("Length of lcol vector has to be 1 or the same as the length of the lags vector")
if (length(llty) != length(lags))
stop("Length of llty vector has to be 1 or the same as the length of the lags vector")
if (type == "line") {
if (plotcor == TRUE) {
if (plot.it==TRUE) {
if (missing(xlab))
xlab <- "Time"
if (missing(ylab))
ylab <- "Autocorrelation"
plot(c(1, max(ntime)), c(-1, 1), type = "n", xlab = xlab,
ylab = ylab, ...)
for (i in 1:nlags) {
lines(1:ntime, x$lacr[, 1 + lags[i]], col = lcol[i],
lty = llty[i])
pp <- seq(from = 1, to = ntime, length = 5)
text(pp, x$lacr[pp, 1 + lags[i]], labels = lags[i],
cex = tcex)
}
}
}
else {
yl <- range(x$lacf[,1+lags])
if (plot.it==TRUE) {
if (missing(xlab))
xlab <- "Time"
if (missing(ylab))
ylab <- "Autocovariance"
plot(c(1, max(ntime)), c(yl[1], yl[2]), type = "n", xlab = xlab,
ylab = ylab, ...)
for (i in 1:nlags) {
lines(1:ntime, x$lacf[, 1 + lags[i]], col = lcol[i],
lty = llty[i])
pp <- seq(from = 1, to = ntime, length = 5)
text(pp, x$lacf[pp, 1 + lags[i]], labels = lags[i],
cex = tcex)
}
}
ans <- x$lacf[, 1+ lags]
dimnames(ans) <- list(NULL, as.character(lags))
return(invisible(ans))
}
}
else if (type == "persp") {
if (plotcor == TRUE) {
m <- x$lacr[, lags + 1]
zlab <- "Autocorrelation"
}
else {
m <- x$lacf[, lags + 1]
zlab <- "Autocovariance"
}
if (plot.it==TRUE) {
if (missing(xlab))
xlab <- "Time"
if (missing(ylab))
ylab <- "Lag"
persp(x = 1:ntime, y = lags, z = m[, lags + 1], xlab = xlab,
ylab = ylab, zlab = zlab, ...)
}
}
else if (type == "acf") {
the.time <- XCI$nz
if (missing(sub))
sub <- paste("c(", the.time, ", lag)")
if (plotcor == TRUE) {
acfvals <- x$lacr[the.time, lags + 1]
if (missing(ylab))
ylab <- "Autocorrelation"
}
else {
acfvals <- x$lacf[the.time, lags + 1]
if (missing(ylab))
ylab <- "Autocovariance"
}
vlags <- XCI$lag
acvvar <- XCI$cvar
sv <- match(vlags, lags)
sw <- 0.2
x0v <- x1v <- yuv <- ylv <- NULL
for (i in 1:length(vlags)) {
if (!is.null(sv[i])) {
x0v <- c(x0v, vlags[i] - sw/2)
x1v <- c(x1v, vlags[i] + sw/2)
yuv <- c(yuv, x$lacf[the.time, vlags[i] + 1] +
qval * sqrt(acvvar[i]))
ylv <- c(ylv, x$lacf[the.time, vlags[i] + 1] -
qval * sqrt(acvvar[i]))
}
else {
x0v <- c(x0v, NULL)
x1v <- c(x1v, NULL)
yuv <- c(yuv, NULL)
ylv <- c(ylv, NULL)
}
}
if (is.null(ylim)) {
if (plotcor == FALSE) {
ylim <- range(c(yuv, ylv, min(acfvals, 0)))
}
else ylim <- range(min(acfvals, 0), 1)
}
if (plot.it==TRUE) {
if (missing(xlab))
xlab <- "Lag"
plot(c(0, max(lags)), c(min(acfvals, 0), 1), type = "n",
xlab = xlab, ylab = ylab, ylim = ylim, sub=sub, ...)
segments(x0 = lags, y0 = 0, x1 = lags, y1 = acfvals,
lwd = segwid)
abline(h = 0)
if (segandcross == TRUE)
points(lags, acfvals, pch = 18)
if (plotcor == FALSE) {
for (i in 1:length(vlags)) {
if (!is.null(sv[i])) {
polygon(x = c(x0v[i], x1v[i], x1v[i], x0v[i]),
y = c(ylv[i], ylv[i], yuv[i], yuv[i]), density = 50,
col = rgb(red = 0.9, green = 0.6, blue = 0.6))
}
}
}
}
return(invisible(acfvals))
}
} |
lmnet <- function(Y, X, directed=TRUE, tmax=1, nodes=NULL, reweight=FALSE, type="exchangeable", tol=1e-6, maxit=1e4, ndstop=TRUE, verbose=FALSE)
{
tmax <- as.numeric(tmax)
directed <- as.logical(directed)
if(tmax == 1){
temp <- node_preprocess(Y,X,directed,nodes)
} else {
temp <- node_preprocess_time(Y,X,directed,nodes,tmax,type,subtract=NULL)
}
Y <- temp$Y ; X <- temp$X ; missing <- temp$missing ; row_list <- temp$row_list ; dyads <- temp$dyads ; n <- temp$n ; type <- temp$type
rm(temp)
reweight <- as.logical(reweight)
tol <- as.numeric(tol)
maxit <- as.numeric(maxit)
verbose <- as.logical(verbose)
if(sum(is.na(X))!=0){warning("NAs in X; no action taken.")}
if(missing & tmax > 1){
stop("Missing data not yet implemented for temporal data")
}
fit <- lm(Y ~ X - 1)
beta_ols <- coef(fit)
X <- model.matrix(fit)
p <- ncol(X)
XX <- solve(crossprod(X))
e <- Y - X %*% beta_ols
meat <- meat.E.row(row_list, X, e)
phi_ols <- meat$phi
v0 <- make.positive.var( XX %*% meat$M %*% XX )
Vhat_ols <- v0$V
Vflag_ols <- v0$flag
if(reweight){
if(tmax ==1){
fit_weighted <- GEE.est(row_list, Y, X, n, directed, tol.in=tol, beta_start=beta_ols, missing=missing, dyads=dyads, ndstop=ndstop, verbose=verbose)
} else if (tmax > 1){
fit_weighted <- GEE_est_time(Y, X, n, tmax, directed, type, write_dir=NULL, missing=missing, tol.in=tol, maxit=maxit, verbose=verbose)
} else {
stop("tmax must be a positive integer")
}
beta_weighted <- fit_weighted$beta
v0 <- make.positive.var( solve( fit_weighted$bread ) )
e <- fit_weighted$residuals
Vhat_weighted <- v0$V
Vflag_weighted <- v0$flag
phiout <- as.numeric(fit_weighted$phi)
nit <- as.numeric(fit_weighted$nit)
conv <- as.logical(fit_weighted$convergence)
if(!conv){warning("Iteratively reweighted least squares procedure stopped based on maximum number of iterations (did not converge)\n")}
betaout <- beta_weighted
Vout <- Vhat_weighted
flagout <- as.logical(Vflag_weighted == 1)
bread = Vout
W <- fit_weighted$W
} else {
beta_weighted <- Vhat_weighted <- Vflag_weighted <- nit <- tol <- conv <- NA
betaout <- beta_ols
Vout <- Vhat_ols
flagout <- as.logical(Vflag_ols == 1)
phiout=phi_ols
W <- diag(nrow(X))
bread <- XX
}
df <- nrow(X) - length(betaout) - 1
if(length(betaout) == ncol(X)){names(betaout) <- colnames(X)}
fitout <- list(call=match.call(), coefficients=betaout, residuals=e, vcov=Vout, fitted.values=X %*% betaout,
df=df, sigma=sqrt(phiout[1]),
reweight=reweight,
corrected=flagout, phi_hat=phiout, nit=nit, converged=conv,
X=X, nodes=nodes,
bread=bread, W=W,
tmax=tmax, type=type, ndstop=ndstop)
class(fitout) <- "lmnet"
return(fitout)
}
print.lmnet <- function(x, ...)
{
cat("\nCall: \n")
print(x$call)
cat("\nCoefficients:\n")
print(x$coefficients)
cat("\n")
}
coef.lmnet <- function(object, ...)
{
object$coefficients
}
vcov.lmnet <- function(object, ...)
{
object$vcov
}
summary.lmnet <- function(object, ...)
{
x <- object
out <- matrix(coef(x), ncol=1)
out <- cbind(out, sqrt(diag(vcov(x))))
out <- cbind(out, out[,1] / out[,2])
out <- cbind(out, 1-pt(abs(out[,3]), df=x$df))
rownames(out) <- names(coef(x))
colnames(out) <- c("Estimate", "Std. Error", "t value", "Pr(|t| > 0)")
listout <- list(coefficients=out, call=x$call)
class(listout) <- "summary.lmnet"
return(listout)
}
print.summary.lmnet <- function(x, ...)
{
cat("\nCall:\n")
print(x$call)
cat("\nCoefficients:\n")
printCoefmat(x$coefficients)
}
plot.lmnet <- function(x, ...)
{
hist(scale(resid(x)), freq=F, xlab="standardized residuals", main="")
plot(fitted.values(x), scale(resid(x)), xlab="fitted values", ylab="standardized residuals", main="")
qqnorm(scale(resid(x)), main="Normal Q-Q Plot for residuals")
abline(0,1, col="red")
}
model.matrix.lmnet <- function(object, ...)
{
object$X
} |
plotPostPredStats <- function(data,
prob = c(0.9, 0.95),
col = NULL,
side = "both") {
if (is.list(data) == FALSE)
stop("Argument data must be a list.")
if ("simulated" %in% names(data) == FALSE)
stop("Argument data must be a contain an element called simulated.")
if ("observed" %in% names(data) == FALSE)
stop("Argument data must be a contain an element called observed.")
if (is.data.frame(data$simulated) == FALSE)
stop("data$simulated must be a data.frame.")
if (is.data.frame(data$observed) == FALSE)
stop("data$observed must be a data.frame.")
if (side %in% c("both", "left", "right") == FALSE)
stop("Invalid side argument.")
if (is.null(col)) {
col <- grDevices::colorRampPalette(colFun(2))(length(prob))
}
if (length(col) != length(prob))
stop("Number of colors does not match the number of quantiles.")
prob <- sort(prob)
sim <- data$simulated
obs <- data$observed
sim_stats <- colnames(sim)
obs_stats <- colnames(obs)
names <- intersect(sim_stats, obs_stats)
if (length(names) == 0) {
stop("data$simulated and data$observed do not contain the same statistics.")
}
if (length(setdiff(obs_stats, sim_stats)) > 0) {
warning(
"data$simulated and data$observed do not share all the same statistics.
Only the shared statistics will be plotted."
)
}
plots <- vector("list", length(names))
for (i in seq_len(length(names))) {
min_value <- min(sim[, i], obs[[i]])
max_value <- max(sim[, i], obs[[i]])
spread_value <- max_value - min_value
kde <- density(sim[, i])
pdf <- approxfun(kde)
if (side == "both") {
dens <- pdf(obs[, i])
if (is.na(dens)) {
p_value <- 0.0
} else {
p_value <- mean(pdf(sim[, i]) <= dens)
}
} else if (side == "left") {
p_value <- mean(sim[, i] <= obs[, i])
} else if (side == "right") {
p_value <- mean(sim[, i] >= obs[, i])
}
df <- data.frame((kde)[c("x", "y")])
p_lab <- paste0("p=", sprintf("%.3f", p_value))
p_x <- max_value + 0.25 * spread_value
p_y <- max(df$y)
p <- ggplot2::ggplot(df, ggplot2::aes(x, y))
for (q in seq_len(length(prob))) {
this_q <- prob[q]
if (side == "left") {
l <- 1 - this_q
p <-
p +
ggplot2::geom_area(data = df[df$x <= quantile(sim[, i], prob = l), ],
fill = col[q])
} else if (side == "right") {
u <- this_q
p <-
p +
ggplot2::geom_area(data = df[df$x >= quantile(sim[, i], prob = u), ],
fill = col[q])
} else {
l <- (1 - this_q) / 2
u <- 1 - l
p <-
p +
ggplot2::geom_area(data = df[df$x <= quantile(sim[, i], prob = l), ],
fill = col[q])
p <-
p +
ggplot2::geom_area(data = df[df$x >= quantile(sim[, i], prob = u), ],
fill = col[q])
}
}
p <- p + ggplot2::geom_line() +
ggplot2::xlim(c(
min_value - 0.25 * spread_value,
max_value + 0.25 * spread_value
)) +
ggplot2::geom_vline(xintercept = obs[[i]],
linetype = "dashed") +
ggplot2::xlab(names[i]) +
ggplot2::ylab("Density") +
ggplot2::theme_bw() +
ggplot2::theme(
panel.grid.major = ggplot2::element_blank(),
panel.grid.minor = ggplot2::element_blank()
) +
ggplot2::annotate(
"text",
x = p_x,
y = p_y,
label = p_lab,
size = 3,
hjust = 1
)
plots[[i]] <- p
}
names(plots) <- colnames(data[[2]])
return(plots)
} |
rbfKernDiagGradX <-
function(kern, X) {
gX = array(0,dim(as.array(X)))
return (gX)
} |
testthat::context("Testing the primary mtlr function.")
testthat::test_that("mtlr function is consistent for basic survival dataset",{
formula <- survival::Surv(time,status)~.
data <- survival::leukemia
expect_equal_to_reference(mtlr(formula,data),"mtlr_leuk.rds")
})
testthat::test_that("mtlr function doesn't fail for 0 varaince features",{
formula <- survival::Surv(time,status)~.
data <- survival::leukemia
data$x <- 1
expect_warning(mtlr(formula,data))
})
testthat::test_that("mtlr function is consistent for more complex survival dataset",{
formula <- survival::Surv(time,status)~.
data <- survival::lung
expect_equal_to_reference(mtlr(formula,data),"mtlr_lung.rds")
})
testthat::test_that("mtlr function is consistent for more complex survival dataset - no extra bias training",{
formula <- survival::Surv(time,status)~.
data <- survival::lung
expect_equal_to_reference(mtlr(formula,data, train_biases = F),"mtlr_lung_nobias.rds")
})
testthat::test_that("mtlr function is consistent for all censored survival dataset",{
formula <- survival::Surv(time,status)~.
data <- survival::leukemia
data <- data[data$status == 0,]
expect_equal_to_reference(mtlr(formula,data),"mtlr_censored.rds")
})
testthat::test_that("mtlr function is consistent for all uncensored survival dataset",{
formula <- survival::Surv(time,status)~.
data <- survival::leukemia
data <- data[data$status == 1,]
expect_equal_to_reference(mtlr(formula,data),"mtlr_uncensored.rds")
})
testthat::test_that("mtlr function is consistent for basic survival dataset UNNORMALIZED",{
formula <- survival::Surv(time,status)~.
data <- survival::leukemia
expect_equal_to_reference(mtlr(formula,data, normalize = F),"mtlr_leukUNNORMALIZED.rds")
})
testthat::test_that("mtlr function is consistent for basic survival dataset for chosen nintervals",{
formula <- survival::Surv(time,status)~.
data <- survival::leukemia
expect_equal_to_reference(mtlr(formula,data, nintervals = 3),"mtlr_leuk_timepoints.rds")
})
testthat::test_that("mtlr function works with left censoring",{
formula <- survival::Surv(time,status, type = "left")~.
data <- survival::lung
expect_equal_to_reference(mtlr(formula,data),"mtlr_leuk_left.rds")
})
testthat::test_that("mtlr function works with multiple types of censoring",{
time1 <- c(NA, 4, 7, 12, 10, 6, NA, 3,5,9,10,12,NA,4,6,2,NA,16,15,11)
time2 <- c(14, 4, 10, 12, NA, 9, 5, NA, NA, NA, NA, 15,22,4,8,6,2,20,23,11)
set.seed(42)
dat <- cbind.data.frame(time1, time2, importantfeature1 = rnorm(20),importantfeature2 = rnorm(20),
importantfeature3 = rnorm(20),importantfeature4 = rnorm(20),importantfeature5 = rnorm(20),
importantfeature6 = rbinom(20,1,.3),importantfeature7 = rbinom(20,1,.3))
formula <- survival::Surv(time1,time2,type = "interval2")~.
expect_equal_to_reference(mtlr(formula, dat),"mtlr_mixed_censoring.rds", tolerance = 1e-3)
})
testthat::test_that("mtlr argument specifications are working.",{
formula <- survival::Surv(time,status)~.
data <- survival::leukemia
data$time[1] <- -10
expect_error(mtlr(formula,data),"All event times must be non-negative")
data$time[1] <- 10
formula <- time~.
expect_error(mtlr(formula,data),"The response must be a Surv object.")
formula <- survival::Surv(time,status)~.
expect_error(mtlr(formula,data, C1 = -10),"C1 must be non-negative.")
expect_error(mtlr(formula,data, C1 = -1e-10),"C1 must be non-negative.")
expect_error(mtlr(formula,data, threshold = -1e-10),"The threshold must be positive.")
expect_error(mtlr(formula,data, threshold = 0),"The threshold must be positive.")
expect_error(mtlr(formula,data.frame()),"Dimensions of the dataset must be non-zero.")
})
testthat::test_that("when training mtlr fails optim error is caught",{
formula <- survival::Surv(time,status)~.
data <- survival::lung
data$meal.cal <- data$meal.cal*1e100
expect_error(mtlr(formula,data,normalize = F))
}) |
gen_fmridata = function(
signal = 1.5,
noise = 20,
arfactor = .3
){
gkernsm <- function(y,h=1) {
grid <- function(d) {
d0 <- d%/%2+1
gd <- seq(0,1,length=d0)
if (2*d0==d+1) gd <- c(gd,-gd[d0:2]) else gd <- c(gd,-gd[(d0-1):2])
gd
}
dy <- dim(y)
if (is.null(dy)) dy<-length(y)
ldy <- length(dy)
if (length(h)!=ldy) h <- rep(h[1],ldy)
kern <- switch(ldy,dnorm(grid(dy),0,2*h/dy),
outer(dnorm(grid(dy[1]),0,2*h[1]/dy[1]),
dnorm(grid(dy[2]),0,2*h[2]/dy[2]),"*"),
outer(outer(dnorm(grid(dy[1]),0,2*h[1]/dy[1]),
dnorm(grid(dy[2]),0,2*h[2]/dy[2]),"*"),
dnorm(grid(dy[3]),0,2*h[3]/dy[3]),"*"))
kern <- kern/sum(kern)
kernsq <- sum(kern^2)
list(gkernsm=convolve(y,kern,conj=TRUE),kernsq=kernsq)
}
create.mask <- function(){
mask <- array(0,dim=c(65,65,26))
mask[5:10,5:10,] <- 1
mask[7:8,7:8,] <- 0
mask[8:10,8:10,] <- 0
mask[14:17,14:17,] <- 1
mask[16:17,16:17,] <- 0
mask[21:23,21:23,] <- 1
mask[22:23,23,] <- 0
mask[23,22,] <- 0
mask[27:28,27:28,] <- 1
mask[28,28,] <- 0
mask[5:7,29:33,] <- 1
mask[7,32:33,] <- 0
mask[14:15,30:33,] <- 1
mask[15,30,] <- 0
mask[21,31:33,] <- 1
mask[22,33,] <- 1
mask[27,32:33,] <- 1
mask[29:33,5:7,] <- 1
mask[32:33,7,] <- 0
mask[30:33,14:15,] <- 1
mask[30,15,] <- 0
mask[31:33,21,] <- 1
mask[33,22,] <- 1
mask[32:33,27,] <- 1
mask[34:65,1:33,] <- mask[32:1,1:33,]
mask[1:33,34:65,] <- mask[1:33,32:1,]
mask[34:65,34:65,] <- mask[32:1,32:1,]
mask
}
create.sig <- function(signal=1.5,efactor=1.2){
sig <- array(0,dim=c(65,65,26))
sig[29:37,38:65,] <- signal
sig[38:65,38:65,] <- signal * efactor
sig[38:65,29:37,] <- signal * efactor^2
sig[38:65,1:28,] <- signal * efactor^3
sig[29:37,1:28,] <- signal * efactor^4
sig[1:28,1:28,] <- signal * efactor^5
sig[1:28,29:37,] <- signal * efactor^6
sig[1:28,38:65,] <- signal * efactor^7
sig * create.mask()
}
i <- 65
j <- 65
k <- 26
scans <- 107
ttt <- array(0,dim=c(i,j,k,scans))
sig <- array(0,dim=c(i,j,k))
mask <- create.mask()
sig <- create.sig(signal)
hrf <- signal * fmri.stimulus(scans, c(18, 48, 78), 15, 2)
dim(sig) <- c(i*j*k,1)
dim(hrf) <- c(1,scans)
sig4 <- sig %*% hrf
dim(sig) <- c(i,j,k)
dim(sig4) <- c(i,j,k,scans)
set.seed(1)
noisy4 <- rnorm(i*j*k*scans,0,noise)
dim(noisy4) <- c(i,j,k,scans)
for (t in 2:scans) noisy4[,,,t] <- noisy4[,,,t] + arfactor*noisy4[,,,t-1]
for (t in 1:scans) noisy4[,,,t] <- gkernsm(noisy4[,,,t],c(0.8,0.8,0.4))$gkernsm
ttt <- sig4 + noisy4
ex_fmridata <- list(ttt=writeBin(as.numeric(ttt),raw(),4),dim=c(i,j,k,scans),weights=c(1,1,2),
mask=array(1,c(i,j,k)),
delta = rep(1, 4))
class(ex_fmridata) <- "fmridata"
return(ex_fmridata)
} |
library(shiny)
library(shinydashboard)
library(shinyBS)
library(ggplot2)
ui <- dashboardPage(
dashboardHeader(title = "Demo - add popover to infoBox", titleWidth = 400),
dashboardSidebar(
sidebarMenu(
menuItem("Dashboard", tabName = "dashboard", icon = icon("dashboard"))
)
),
dashboardBody(
tags$head(
tags$link(rel = "stylesheet", type = "text/css", href = "custom3.css")
),
bsPopover(id="q1", title = "Mean",
content = "Mean price of diamonds",
trigger = "hover",
placement = "right",
options = list(container = "body")),
bsPopover(id="info2", title = "Median",
content = "Median price of diamonds",
trigger = "hover",
placement = "right",
options = list(container="body")),
tabItems(
tabItem(tabName = "dashboard",
fluidRow(
infoBoxOutput("info1"),
infoBoxOutput("info2")
)
))))
server <- function(input, output, session) {
output$info1 <- renderInfoBox({
infoBox("Mean", round(mean(diamonds$price), 2),
icon = icon("usd"),
subtitle = tags$a(icon("question-circle"), id="q1"))
})
output$info2 <- renderInfoBox({
infoBox("Median", median(diamonds$price), icon = icon("usd"))
})
}
shinyApp(ui, server) |
pkg_data <- new.env(parent = emptyenv())
.onLoad <- function(libname, pkgname) {
utils::data(
pak_sitrep_data,
package = pkgname,
envir = environment(.onLoad)
)
if (Sys.getenv("_R_CHECK_PACKAGE_NAME_", "") == "") {
check_platform(libname, pkgname)
}
pkg_data$ns <- list()
worker <- Sys.getenv("R_PKG_PKG_WORKER", "")
if (worker == "") {
fix_macos_path_in_rstudio()
} else if (worker == "true") {
Sys.setenv("R_PKG_PKG_WORKER" = "false")
options(
cli.num_colors = as.numeric(Sys.getenv("R_PKG_NUM_COLORS", "1")),
rlib_interactive = (Sys.getenv("R_PKG_INTERACTIVE") == "TRUE"),
cli.dynamic = (Sys.getenv("R_PKG_DYNAMIC_TTY") == "TRUE")
)
ca_path <- system.file(package = "pak", "curl-ca-bundle.crt")
if (ca_path != "") options(async_http_cainfo = ca_path)
use_private_lib()
} else {
use_private_lib()
}
invisible()
}
check_platform <- function(libname = dirname(find.package("pak")),
pkgname = "pak", data = pak_sitrep_data) {
if (!file.exists(file.path(libname, pkgname, "help"))) return(TRUE)
if (Sys.getenv("R_PACKAGE_DIR", "") != "") return(TRUE)
current <- R.Version()$platform
install <- data$platform
if (!platform_match(install, current)) {
warning(
"! Wrong OS or architecture, pak is probably dysfunctional.\n",
" Call `pak_update()` to fix this.",
call. = FALSE
)
}
}
platform_match <- function(install, current) {
os_ins <- get_os_from_platform(install)
os_cur <- get_os_from_platform(current)
arch_ins <- get_arch_from_platform(install)
arch_cur <- get_arch_from_platform(current)
if (os_ins != os_cur) return(FALSE)
if (os_ins == "windows") return(TRUE)
if (os_ins == "macos") return(arch_ins == arch_cur)
if (os_ins == "solaris") return(arch_ins == arch_cur)
if (os_ins == "linux") {
if (arch_ins != arch_cur) return(FALSE)
libc_ins <- get_libc_from_platform(install)
libc_cur <- get_libc_from_platform(current)
same <- !is.na(libc_ins) && !is.na(libc_cur) && libc_ins == libc_cur
return(same || identical(libc_ins, "musl"))
}
install == current
}
get_os_from_platform <- function(x) {
pcs <- strsplit(x, "-", fixed = TRUE)[[1]]
if (pcs[3] == "mingw32") return("windows")
if (pcs[2] == "apple") return("macos")
if (pcs[3] == "linux") return("linux")
if (grepl("^solaris", pcs[3])) return("solaris")
sub("[0-9.]*$", "", pcs[3])
}
get_arch_from_platform <- function(x) {
pcs <- strsplit(x, "-", fixed = TRUE)[[1]]
pcs[1]
}
get_libc_from_platform <- function(x) {
pcs <- strsplit(x, "-", fixed = TRUE)[[1]]
if (pcs[3] != "linux") return(NA_character_)
pcs[4]
} |
read_copynumber <- function(input,
pattern = NULL,
ignore_case = FALSE,
seg_cols = c("Chromosome", "Start.bp", "End.bp", "modal_cn"),
samp_col = "sample",
add_loh = FALSE,
loh_min_len = 1e4,
loh_min_frac = 0.05,
join_adj_seg = TRUE,
skip_annotation = FALSE,
use_all = add_loh,
min_segnum = 0L,
max_copynumber = 20L,
genome_build = c("hg19", "hg38", "mm10", "mm9"),
genome_measure = c("called", "wg"),
complement = FALSE,
...) {
stopifnot(
is.character(samp_col),
length(samp_col) == 1,
min_segnum >= 0
)
timer <- Sys.time()
send_info("Started.")
on.exit(send_elapsed_time(timer))
genome_build <- match.arg(genome_build)
genome_measure <- match.arg(genome_measure)
send_info("Genome build : ", genome_build, ".")
send_info("Genome measure: ", genome_measure, ".")
if (add_loh) {
use_all <- TRUE
send_info("When add_loh is TRUE, use_all is forced to TRUE.
Please drop columns you don't want to keep before reading.")
}
if (genome_build %in% c("mm10", "mm9")) {
valid_chr <- c(paste0("chr", 1:19), "chrX", "chrY")
} else {
valid_chr <- c(paste0("chr", 1:22), "chrX", "chrY")
}
chrlen <- get_genome_annotation(
data_type = "chr_size",
chrs = valid_chr,
genome_build = genome_build
)
data.table::setDT(chrlen)
send_success("Chromosome size database for build obtained.")
send_info("Reading input.")
if (tryCatch(dir.exists(input), error = function(e) FALSE)) {
send_success("A directory as input detected.")
if (length(input) != 1) {
send_stop("Only can take one directory as input!")
}
all.files <- list.files(
path = input,
pattern = pattern,
all.files = FALSE,
recursive = FALSE,
ignore.case = ignore_case
)
files <- all.files[!file.info(file.path(input, all.files))$isdir]
if (length(files) == 0) {
send_stop("No files exist, please check!")
}
files_path <- file.path(input, files)
data_list <- list()
dropoff_list <- list()
sb <- cli::cli_status("{symbol$arrow_right} About to read files.")
Sys.sleep(0.5)
for (i in seq_along(files_path)) {
cli::cli_status_update(id = sb, "{symbol$arrow_right} Reading file {files_path[i]}.")
temp <- data.table::fread(files_path[i], ...)
if (!all(seg_cols %in% colnames(temp))) {
send_stop("Not all seg_cols are in file, please check.")
}
if (length(samp_col %in% colnames(temp)) == 0 | !(samp_col %in% colnames(temp))) {
cli::cli_status_update(id = sb, "{symbol$arrow_right} Select file names as sample names.")
temp[, "sample"] <- files[i]
sample_col <- "sample"
}
tempName <- unique(temp[[samp_col]])
if (length(tempName) > 1) {
send_stop("When input is a directory, a file can only contain one sample.")
}
data.table::setcolorder(temp, neworder = c(seg_cols, samp_col))
new_cols <- c("chromosome", "start", "end", "segVal", "sample")
colnames(temp)[1:5] <- new_cols
if (any(is.na(temp$segVal))) {
temp <- temp[!is.na(temp$segVal)]
}
cli::cli_status_update(id = sb, "{symbol$arrow_right} Checking chromosome names.")
temp[, chromosome := sub(
pattern = "chr",
replacement = "chr",
x = as.character(chromosome),
ignore.case = TRUE
)]
temp$chromosome <- ifelse(startsWith(temp$chromosome, "chr"),
temp$chromosome,
paste0("chr", temp$chromosome)
)
temp[, chromosome := sub(
pattern = "x",
replacement = "X",
x = chromosome
)]
temp[, chromosome := sub(
pattern = "y",
replacement = "Y",
x = chromosome
)]
temp[["chromosome"]] <- sub("23", "X", temp[["chromosome"]])
temp[["chromosome"]] <- sub("24", "Y", temp[["chromosome"]])
if (complement) {
cli::cli_status_update(
id = sb,
"{symbol$arrow_right} Fill value 2 (normal copy) to uncalled chromosomes."
)
miss_index <- !valid_chr %in% unique(temp[["chromosome"]])
miss_index[length(miss_index)] <- FALSE
if (any(miss_index)) {
comp_df <- temp[rep(1, sum(miss_index))]
comp_df[, c("chromosome", "start", "end", "segVal") := .(
chrlen[["chrom"]][miss_index],
1,
chrlen[["size"]][miss_index],
2
)]
comp_df[, setdiff(
colnames(comp_df),
c("chromosome", "start", "end", "segVal", "sample")
) := NA]
temp <- rbind(temp, comp_df, fill = TRUE)
}
}
if (!use_all) temp <- temp[, new_cols, with = FALSE]
if (nrow(temp) < min_segnum) {
dropoff_list[[tempName]] <- temp
} else {
data_list[[tempName]] <- temp
}
}
cli::cli_status_clear(sb)
if (length(data_list) >= 1) {
data_df <- data.table::rbindlist(data_list, use.names = TRUE, fill = TRUE)
} else {
data_df <- data.table::data.table()
}
if (length(dropoff_list) >= 1) {
dropoff_df <- data.table::rbindlist(dropoff_list, use.names = TRUE, fill = TRUE)
} else {
dropoff_df <- data.table::data.table()
}
} else if (all(is.character(input)) | is.data.frame(input)) {
if (!is.data.frame(input)) {
send_success("A file as input detected.")
if (length(input) > 1) {
send_stop("Muliple files are not a valid input, please use directory as input.")
}
if (!file.exists(input)) {
send_stop("Input file not exists.")
}
temp <- data.table::fread(input, ...)
} else {
send_success("A data frame as input detected.")
temp <- data.table::as.data.table(input)
}
if (is.null(samp_col)) {
send_stop("'samp_col' parameter must set!")
}
if (!all(seg_cols %in% colnames(temp))) {
send_stop("Not all seg_cols are in file, please check.")
}
if (!(samp_col %in% colnames(temp))) {
send_stop("Column ", samp_col, " does not exist.")
}
send_success("Column names checked.")
data.table::setcolorder(temp, neworder = c(seg_cols, samp_col))
new_cols <- c("chromosome", "start", "end", "segVal", "sample")
colnames(temp)[1:5] <- new_cols
send_success("Column order set.")
if (is.factor(temp$sample)) {
temp$sample <- as.character(temp$sample)
}
if (any(is.na(temp$segVal))) {
temp <- temp[!is.na(temp$segVal)]
send_success("Rows with NA copy number removed.")
}
temp[, chromosome := sub(
pattern = "chr",
replacement = "chr",
x = as.character(chromosome),
ignore.case = TRUE
)]
if (any(!grepl("chr", temp$chromosome))) {
temp$chromosome[!grepl("chr", temp$chromosome)] <-
paste0("chr", temp$chromosome[!grepl("chr", temp$chromosome)])
}
temp[, chromosome := sub(
pattern = "x",
replacement = "X",
x = chromosome
)]
temp[, chromosome := sub(
pattern = "y",
replacement = "Y",
x = chromosome
)]
temp[["chromosome"]] <- sub("23", "X", temp[["chromosome"]])
temp[["chromosome"]] <- sub("24", "Y", temp[["chromosome"]])
send_success("Chromosomes unified.")
if (complement) {
comp <- data.table::data.table()
for (i in unique(temp[["sample"]])) {
tmp_sample <- temp[i, on = "sample"]
miss_index <- !valid_chr %in% unique(tmp_sample[["chromosome"]])
miss_index[length(miss_index)] <- FALSE
if (any(miss_index)) {
comp_df <- tmp_sample[rep(1, sum(miss_index))]
comp_df[, c("chromosome", "start", "end", "segVal") := .(
chrlen[["chrom"]][miss_index],
1,
chrlen[["size"]][miss_index],
2
)]
comp <- rbind(comp, comp_df, fill = TRUE)
}
}
comp[, setdiff(
colnames(comp),
c("chromosome", "start", "end", "segVal", "sample")
) := NA]
temp <- rbind(temp, comp, fill = TRUE)
send_success("Value 2 (normal copy) filled to uncalled chromosomes.")
}
if (!use_all) temp <- temp[, new_cols, with = FALSE]
dropoff_samples <- temp[, .N, by = .(sample)][N < min_segnum][["sample"]]
keep_samples <- base::setdiff(unique(temp[["sample"]]), dropoff_samples)
data_df <- temp[sample %in% keep_samples]
dropoff_df <- temp[sample %in% dropoff_samples]
} else {
send_stop("Invalid input.")
}
send_success("Data imported.")
if (!all(data_df$chromosome %in% valid_chr)) {
data_drop <- data_df[!chromosome %in% valid_chr]
if (nrow(dropoff_df) >= 1) {
dropoff_df <- base::rbind(dropoff_df, data_drop)
} else {
dropoff_df <- data_drop
}
data_df <- data_df[chromosome %in% valid_chr]
send_success("Some invalid segments (not 1:22 and X, Y) dropped.")
}
send_info("Segments info:")
send_info(" Keep - ", nrow(data_df))
send_info(" Drop - ", nrow(dropoff_df))
data_df$segVal[data_df$segVal > max_copynumber] <- max_copynumber
data_df[["segVal"]] <- as.integer(round(data_df[["segVal"]]))
data_df$start <- as.numeric(data_df$start)
data_df$end <- as.numeric(data_df$end)
data.table::setorderv(data_df, c("sample", "chromosome", "start"))
send_success("Segments sorted.")
if (add_loh) {
send_info("Adding LOH labels...")
if (!"minor_cn" %in% colnames(data_df)) {
send_stop("When you want to add LOH infor, a column named as 'minor_cn' should exist!")
}
data_df$minor_cn <- pmin(
data_df$segVal - data_df$minor_cn,
data_df$minor_cn
)
data_df$loh <- data_df$segVal >= 1 & data_df$minor_cn == 0 &
(data_df$end - data_df$start > loh_min_len - 1)
data_df[data_df$chromosome %in% c("chrX", "chrY")]$loh <- FALSE
}
if (join_adj_seg) {
send_info("Joining adjacent segments with same copy number value. Be patient...")
data_df <- helper_join_segments2(data_df,
add_loh = add_loh,
loh_min_frac = loh_min_frac
)
send_success(nrow(data_df), " segments left after joining.")
} else {
send_info("Skipped joining adjacent segments with same copy number value.")
}
data.table::setorderv(data_df, c("sample", "chromosome", "start"))
data.table::setcolorder(data_df, c("chromosome", "start", "end", "segVal", "sample"))
if ("groups" %in% names(attributes(data_df))) {
attr(data_df, "groups") <- NULL
}
send_success("Segmental table cleaned.")
if (skip_annotation) {
annot <- data.table::data.table()
send_info("Annotation skipped.")
} else {
send_info("Annotating.")
annot <- get_LengthFraction(data_df,
genome_build = genome_build,
seg_cols = new_cols[1:4],
samp_col = new_cols[5]
)
send_success("Annotation done.")
}
send_info("Summarizing per sample.")
sum_sample <- get_cnsummary_sample(data_df,
genome_build = genome_build,
genome_measure = genome_measure
)
send_success("Summarized.")
send_info("Generating CopyNumber object.")
res <- CopyNumber(
data = data_df,
summary.per.sample = sum_sample,
genome_build = genome_build,
genome_measure = genome_measure,
annotation = annot,
dropoff.segs = dropoff_df
)
send_success("Generated.")
send_info("Validating object.")
res <- validate_segTab(res)
send_success("Done.")
res
}
utils::globalVariables(
c(
".",
"N",
".N",
".SD",
"flag",
"p_start",
"p_end",
"q_start",
"q_end",
"total_size"
)
) |
ReadCodeChunks <- function(path) {
checkmate::assertFileExists(path, extension=c("rnw", "rmd", "r"))
ext <- tools::file_ext(path)
src <- readLines(path)
if (tolower(ext) %in% c("rnw", "rmd"))
src <- strsplit(knitr::purl(text=src, quiet=TRUE), "\n")[[1]]
lin <- grep("
nam <- gsub("^
is.unnamed <- which(grepl("=", nam) | nam == "")
nam[is.unnamed] <- paste0("unnamed-chunk-", is.unnamed)
m <- cbind(from = lin, to = c(lin[-1] - 1, length(src)))
chunks <- apply(m, 1, function(x) src[x[1]:x[2]])
chunks <- lapply(chunks, function(x) x[1:max(which(x != ""))])
names(chunks) <- nam
attr(chunks, "path") <- path
chunks
} |
DataFrame2Matrix4Regression <- function(X, last=TRUE, Intercept=FALSE){
if (!is.data.frame(X)) stop("You must provide a data frame to prepare for regression")
n=dim(X)[1]
p=dim(X)[2]
rnames=rownames(X)
names=colnames(X)
print(names)
XN=NULL
newnames=NULL
for (i in 1:p){
if (is.numeric(X[[i]])) {
XN=cbind(XN,X[[i]])
newnames=c(newnames, names[i])
}
if (is.factor(X[[i]])){
Z=Factor2Binary(X[[i]], Name=names[i])
pp=dim(Z)[2]
nn=colnames(Z)
if (last) {Z=as.matrix(Z[,-pp])
nn=nn[-pp]}
else {Z=as.matrix(Z[,-1])
nn=nn[-1]}
newnames=c(newnames, nn)
XN=cbind(XN,Z)
}
}
if (Intercept){
XN=cbind(rep(1,n),XN)
newnames=c("Intercept", newnames)
}
colnames(XN)<- newnames
rownames(XN) =rnames
return(XN)
} |
est.h <-
function(introgress.data=NULL, loci.data=NULL, ind.touse=NULL,
fixed=FALSE, p1.allele=NULL, p2.allele=NULL){
if (is.null(dim(loci.data))==TRUE)
stop("Locus information was not supplied")
else if (is.null(introgress.data)==TRUE)
stop("The count was not supplied")
if (fixed==FALSE & is.list(introgress.data)==FALSE)
stop("introgress.data must be a list if fixed=FALSE")
cat("est.h is working; this may take a few minutes", fill=TRUE)
if (is.list(introgress.data)==TRUE) admix.gen<-as.matrix(introgress.data$Admix.gen)
if (fixed==TRUE & (sum(loci.data[,2]=="D") + sum(loci.data[,2]=="d")) > 0)
stop("dominant data can not be modeled as fixed")
if (fixed==TRUE & is.null(admix.gen)==TRUE){
if (is.null(p1.allele)==TRUE | is.null(p2.allele)==TRUE)
stop("parental alleles must be provided if fixed==TRUE")
admix.gen<-array(dim=dim(introgress.data))
for (i in 1:dim(admix.gen)[1]){
if (loci.data[i,2]=="C" | loci.data[i,2]=="c"){
for (j in 1:dim(admix.gen)[2]){
if (is.na(introgress.data[i,j])==TRUE) admix.gen[i,j]=NA
else {
if (as.numeric(introgress.data[i,j])==2)
admix.gen[i,j]<-as.character(paste(p1.allele,"/",p1.allele))
else if (as.numeric(introgress.data[i,j])==1)
admix.gen[i,j]<-as.character(paste(p1.allele,"/",p2.allele))
else if (as.numeric(introgress.data[i,j])==0)
admix.gen[i,j]<-as.character(paste(p2.allele,"/",p2.allele))
}
}
}
else if (loci.data[i,2]=="H" | loci.data[i,]=="h"){
for (j in 1:dim(admix.gen)[2]){
if (is.na(introgress.data[i,j])==TRUE) admix.gen[i,j]<-NA
else{
if (as.numeric(introgress.data[i,j])==1) admix.gen[i,j]<-p1.allele
else if (as.numeric(introgress.data[i,j])==0) admix.gen[i,j]<-p2.allele
}
}
}
}
p1.freq<-cbind(rep(1,dim(admix.gen)[1]),rep(0,dim(admix.gen)[1]))
p2.freq<-cbind(rep(0,dim(admix.gen)[1]),rep(1,dim(admix.gen)[1]))
alleles<-cbind(rep(p1.allele,dim(admix.gen)[1]),rep(p2.allele,dim(admix.gen)[1]))
introgress.data<-list(NULL,introgress.data,NULL,p1.freq,p2.freq,alleles)
names(introgress.data)<-c("Individual.data","Count.matrix","Combos.to.use",
"Parental1.allele.freq","Parental2.allele.freq","Alleles")
}
if (fixed==TRUE & is.null(admix.gen)==FALSE){
if (is.null(p1.allele)==TRUE | is.null(p2.allele)==TRUE)
stop("parental alleles must be provided if fixed==TRUE")
p1.freq<-cbind(rep(1,dim(admix.gen)[1]),rep(0,dim(admix.gen)[1]))
p2.freq<-cbind(rep(0,dim(admix.gen)[1]),rep(1,dim(admix.gen)[1]))
alleles<-cbind(rep(p1.allele,dim(admix.gen)[1]),rep(p2.allele,dim(admix.gen)[1]))
introgress.data[[4]]<-p1.freq
introgress.data[[5]]<-p2.freq
introgress.data[[6]]<-alleles
}
if (is.null(ind.touse)==FALSE) {
if (is.character(ind.touse)==TRUE & is.character(colnames(introgress.data[[2]]))==FALSE){
stop ("individual names were not supplied for subsetting")
}
admix.gen<-admix.gen[,ind.touse]
}
hi<-data.frame(lower=numeric(ncol(admix.gen)),
h=numeric(ncol(admix.gen)),
upper=numeric(ncol(admix.gen)))
for(i in 1:ncol(admix.gen)){
hi[i, ] <- h.func(geno=admix.gen[,i],
locustype=loci.data[,"type"],
r=introgress.data$Parental2.allele.freq,
s=introgress.data$Parental1.allele.freq,
alleles=introgress.data$Alleles)
}
return(zapsmall(hi))
} |
simPtsOptNet <- function(formula, loc=NULL, data, fitmodel, BLUE=FALSE, n, popSize, generations, xmin, ymin, xmax, ymax, plotMap=FALSE, spMap=NULL, ...){
evaluate <- function(string=c()) {
returnVal = NA;
pts2 <- as.data.frame(matrix(0, ncol=ncol(as.data.frame(data)), nrow=n))
names(pts2) <- colnames(as.data.frame(data))
if(is.data.frame(data)) {
if(is.null(loc)) stop(paste("loc must be provided"))
x1 <- all.vars(loc)[1]
y1 <- all.vars(loc)[2]
}
if(class(data)=="SpatialPointsDataFrame") {
x1 <- colnames(coordinates(data))[1]
y1 <- colnames(coordinates(data))[2]
}
for (i in 1:n){
pts2[i,x1] <- round(string[i], 1)
}
for (j in 1:n){
pts2[j,y1] <- round(string[n + j], 1)
}
coordinates(pts2) = c(x1, y1)
if (plotMap==TRUE) {
if(is.null(spMap)) stop(paste("if plotMap=TRUE, spMap must also be provided"))
plot(spMap, xlim=c(bbox(spMap)[1],bbox(spMap)[3]), ylim=c(bbox(spMap)[2],bbox(spMap)[4]), ...)
plot(pts2, add=TRUE)
}
g <- gstat(formula=formula, locations= loc, data=data, model = fitmodel, ...)
interp <- predict(g, newdata = pts2, BLUE = BLUE)
returnVal <- sum(sqrt(interp[["var1.var"]]))/n
returnVal
}
results <- rbga(as.matrix(c(rep(xmin,n), rep(ymin, n))), as.matrix(c(rep(xmax,n), rep(ymax, n))), popSize=popSize, evalFunc=evaluate, verbose=TRUE, iters=generations, ...)
return(results)
} |
construct.ilab<-function(org, item, measurand, x, u, df, k, U, U.lower, U.upper, distrib=NULL, distrib.pars=NULL, study=NA, title=NA, p=0.95, ...) {
rv<-list()
rv$title <- title
rv$subset <- NA
L <- length(x)
org<-rep(org, length.out=L)
if(is.character(org)) org <- factor(org)
item<-rep(item, length.out=L)
measurand<-rep(measurand, length.out=L)
study<-rep(study, length.out=L)
l. <- as.data.frame(list(...))
if(missing(df)) df <- rep(NA, L)
if( !missing(U) ) {
if( is.factor(U) ) U <- as.character(U)
if( is.character(U) ) {
U.l <- U.r <- U.lower <- U.upper <- rep(NA, L)
AtoB <- grep("[-+.0-9]+ *- *[-+.0-9]+",U)
U.l[AtoB] <- as.numeric(gsub("([-+]?[.0-9]+) *- *[-+]?[.0-9]+","\\1", U[AtoB]))
U.r[AtoB] <- as.numeric(gsub("[-+]?[.0-9]+ *- *([-+]?[.0-9]+)","\\1", U[AtoB]))
U.lower[AtoB] <- x[AtoB] - pmin(U.l[AtoB], U.r[AtoB])
U.upper[AtoB] <- pmax(U.l[AtoB], U.r[AtoB]) - x[AtoB]
if( any( c(U.lower[AtoB], U.upper[AtoB]) <0 ) )
stop("Some x values outside range given by U=\"a-b\"")
AslashB <- grep("[-+.0-9]+ */ *[-+.0-9]+",U)
U.l[AslashB] <- as.numeric(gsub("([-+]?[.0-9]+) */ *[-+]?[.0-9]+","\\1", U[AslashB]))
U.r[AslashB] <- as.numeric(gsub("[-+]?[.0-9]+ */ *([-+]?[.0-9]+)","\\1", U[AslashB]))
U.lower[AslashB] <- - pmin(U.l[AslashB], U.r[AslashB])
U.upper[AslashB] <- pmax(U.l[AslashB], U.r[AslashB])
simple <- (1:L)[- c(AtoB, AslashB)]
U.l[simple] <- U.lower[simple] <- U.upper[simple] <- as.numeric(U[simple])
U <- rep(NA, L)
U[simple] <- U.l[simple]
} else {
U.lower <- U.upper <- U
}
} else {
if(!missing(u) && !missing(k) && missing(U.lower) && missing(U.upper) ) {
U <- k * u
} else
U <- rep(NA, L)
}
if(missing(U.lower)) U.lower <- if(!missing(U)) U else rep(NA, L)
if(missing(U.upper)) U.upper <- if(!missing(U)) U else rep(NA, L)
if(missing(u)) {
if(!missing(U) && !missing(k) ) {
u <- U / k
} else
u <- rep(NA, L)
}
if(missing(k)) {
if(!missing(U) && !missing(u) ) {
k <- U / u
} else
k <- rep(NA, L)
}
rv$data <- data.frame(
org=org, item=item, measurand=measurand, x=x, u=u, df=df, k=k,
U=U, U.lower=U.lower, U.upper=U.upper, study=study
)
l. <- list(...)
if( length(l.) > 0) rv$data <- cbind(rv$data, as.data.frame(l.))
if(!is.null(distrib) ) {
if(is.list(distrib)) {
rv$distrib<-distrib
} else {
if(length(distrib) < L ) distrib <- rep(distrib, length.out=L)
rv$distrib<-as.list(distrib)
}
for(n in 1:L) {
if(is.na(rv$data$df[n]) && rv$distrib[[n]] %in% c("t", "t.scaled")) {
if(!is.na(rv$data$k[n])) rv$data$df[n] <- .get.df(rv$data$k[n], p)
}
}
if(!is.null(distrib.pars)) {
rv$distrib.pars<-as.list(distrib.pars)
} else {
rv$distrib.pars<-list()
for( n in 1:L ) {
rv$distrib.pars[[n]]<-.get.pars(distrib[[n]], rv$data$x[n], rv$data$u[n], rv$data$df[n])
}
}
} else {
rv$distrib<-as.list(rep(NA, L))
rv$distrib.pars<-as.list(rep(NA, L))
}
class(rv) <- "ilab"
return(rv)
}
print.ilab <- function(x, ..., digits=NULL, right=FALSE) {
maxwidth<-12L
if(!is.na(x$title[1])) {
for(s in x$title) cat(sprintf("%s\n", s))
} else {
cat("Interlaboratory study:\n")
}
if(!is.na(x$subset)) {
cat(sprintf("Subset: %s\n", x$subset))
}
dp<-x$data
if(!is.null(x[["distrib", exact=TRUE]]) ) {
fdp<-function(x) { if(is.function(x)) deparse(x)[1] else paste(x) }
distrib.labels<- as.vector( sapply(x$distrib, fdp ) )
dp$distrib<-sub(paste("(.{",maxwidth,",",maxwidth,"})(.+)", sep=""),
"\\1...",distrib.labels)
}
if(!is.null(x$distrib.pars)) {
dp$distrib.pars <- vector("character", length=nrow(x$data) )
for(nn in 1:nrow(x$data) ) {
dp[nn,"distrib.pars"]<-
paste(names(x$distrib.pars[[nn]]),
format(x$distrib.pars[[nn]], digits=digits),
sep="=", collapse=", ")
}
}
print.data.frame(dp,digits=digits, right=right, ...)
}
plot.ilab <- function(x, ...) {
pars<-c(list(x=x), list(...) )
do.call("kplot", pars)
}
subset.ilab <- function(x, subset, drop=FALSE, ...) {
if (!missing(subset)) {
e <- substitute(subset)
r <- eval(e, x$data, parent.frame())
if (!is.logical(r))
stop("'subset' must evaluate to logical")
r <- r & !is.na(r)
x$subset <- sprintf("subset(%s, %s)", deparse(substitute(x)), deparse(substitute(subset)))
x$data<-x$data[r, ,drop=drop]
if(!is.null(x$distrib)) x$distrib <- x$distrib[r]
if(!is.null(x$distrib.pars)) x$distrib.pars <- x$distrib.pars[r]
}
return(x)
}
'[.ilab' <- function(x, i, j) {
x$subset <- sprintf("%s[%s, %s]", deparse(substitute(x)),
deparse(substitute(i)), deparse(substitute(j)))
x$data <- x$data[i,j, drop=FALSE]
if( !is.null(x$distrib) ) x$distrib <- x$distrib[i]
if( !is.null(x$distrib.pars) ) x$distrib.pars <- x$distrib.pars[i]
return(x)
}
rbind<-function(..., deparse.level = 1) UseMethod("rbind")
rbind.default <- function(..., deparse.level=1) base::rbind(..., deparse.level=deparse.level)
rbind.ilab<-function(..., deparse.level = 1) {
ilab.list <- list(...)
il.classes <- sapply(ilab.list, function(x) class(x)[1])
if(any(il.classes != "ilab"))
stop("All objects must be of class 'ilab'", call.=TRUE)
if(length(ilab.list) == 0 ) {
return(NULL)
} else if(length(ilab.list) == 1) {
return(ilab.list[[1]])
} else {
rv <- ilab.list[[1]]
if(is.null(rv$distrib)) rv$distrib<-rep(NA, nrow(rv$data))
if(is.null(rv$distrib.pars)) rv$distrib.pars<-as.list(rep(NA, nrow(rv$data)))
for( i in 2:length(ilab.list) ) {
if(!isTRUE(all.equal(sort(names(rv)), sort(names(ilab.list[[i]])) ))) {
stop(sprintf("Names in %s do not match previous names.", names(ilab.list)[i]), call.=TRUE)
} else {
print(paste("Binding ", i, "\n"))
rv$data<-rbind(rv$data, ilab.list[[i]]$data, deparse.level=deparse.level)
if(is.null(ilab.list[[i]]$distrib)) ilab.list[[i]]$distrib<-rep(NA, nrow(ilab.list[[i]]$data))
if(is.null(ilab.list[[i]]$distrib.pars)) ilab.list[[i]]$distrib.pars<-as.list(rep(NA, nrow(ilab.list[[i]]$data)))
rv$distrib<-c(rv$distrib, ilab.list[[i]]$distrib)
rv$distrib.pars<-c(rv$distrib.pars, ilab.list[[i]]$distrib.pars)
}
}
}
return(rv)
}
c.ilab<-function(..., recursive=FALSE) {
rbind.ilab(...)
}
cbind<-function(..., deparse.level = 1) UseMethod("cbind")
cbind.default <- function(..., deparse.level=1) base::cbind(..., deparse.level=deparse.level)
cbind.ilab<-function(..., deparse.level = 1) {
l<-list(...)
L<-length(l)
i.ilab <- which( sapply( l, function(x) class(x)[1] ) =="ilab")
if(length(i.ilab) == 0)
stop("Only one ilab object permitted in cbind.ilab", call.=TRUE)
if(length(i.ilab) > 1)
stop("cbind.ilab requires one ilab object", call.=TRUE)
i.args <- (1:L)[-i.ilab]
args.ok <- sapply(l[i.args], is.atomic) | sapply(l[i.args], is.data.frame)
if( any(!args.ok) )
stop("Arguments to cbind.ilab must be atomic, data frame or class 'ilab'", call.=TRUE)
ilab<-l[[i.ilab]]
for(i in i.args) {
nm <- names(l)[i]
if(is.null(nm)) nm <- sprintf("argument %d", i+1)
if(length(dim(l[[i]]))>2)
stop(sprintf("Number of dimensions of parameter %s exceeds 2", nm), call.=TRUE)
if(length(dim(l[[i]]))==2) {
if( nrow(l[[i]])>nrow(ilab$data) )
stop(sprintf("Number of rows in %s exceeds rows in %s", nm, deparse(substitute(ilab))), call.=TRUE)
} else {
if(length(l[[i]])>nrow(ilab$data))
stop(sprintf("Length of %s exceeds rows in %s", nm, deparse(substitute(ilab))), call.=TRUE)
}
}
ilab$data <- do.call(base::cbind, c(list(ilab$data), l[i.args], list(deparse.level = deparse.level)))
return(ilab)
} |
GEX_cluster_genes <- function(GEX,
min.pct,
filter,
base,
platypus.version){
platypus.version <- "does not matter"
automate_GEX.output <- GEX
GEX <- NULL
if(missing(min.pct)) min.pct <- 0.25
if (missing(filter)) {filter <- c("MT-", "RPL", "RPS")}
if(missing(base)){base <- 2}
Seurat::Idents(automate_GEX.output) <- automate_GEX.output$seurat_clusters
number_of_clusters <- length(unique(automate_GEX.output$seurat_clusters))
cluster_markers <- list()
for(i in 1:number_of_clusters){
cluster_markers[[i]] <- Seurat::FindMarkers(automate_GEX.output, ident.1 = i-1, min.pct = min.pct, base=base)
colnames(cluster_markers[[i]])[2] <- "avg_logFC"
cluster_markers[[i]]$SYMBOL <- rownames(cluster_markers[[i]])
cluster_markers[[i]]$cluster <- rep((i-1), nrow(cluster_markers[[i]]))
exclude <- c()
for (j in filter) {
exclude <- c(exclude, stringr::str_which(rownames(cluster_markers[[i]]), j))
}
cluster_markers[[i]] <- cluster_markers[[i]][-exclude,]
}
return(cluster_markers)
} |
SL.polymars <- function(Y, X, newX, family, obsWeights, ...){
.SL.require('polspline')
if(family$family == "gaussian") {
fit.mars <- polspline::polymars(Y, X, weights = obsWeights)
pred <- predict(fit.mars, x = newX)
fit <- list(object = fit.mars)
}
if(family$family == "binomial") {
fit.mars <- polspline::polyclass(Y, X, cv = 5, weight = obsWeights)
pred <- polspline::ppolyclass(cov = newX, fit = fit.mars)[, 2]
fit <- list(fit = fit.mars)
}
out <- list(pred = pred, fit = fit)
class(out$fit) <- c("SL.polymars")
return(out)
}
predict.SL.polymars <- function(object, newdata, family, ...) {
.SL.require('polspline')
if(family$family=="gaussian"){
pred <- predict(object = object$object, x = newdata)
}
if(family$family=="binomial"){
pred <- polspline::ppolyclass(cov=newdata, fit=object$fit)[, 2]
}
return(pred)
} |
print.gv <-
function(x, ...) {
multi <- FALSE
if ("gamma.mat" %in% names(x)) multi <- TRUE
line1 <- "Variogram"
if (multi) {
line1 <- paste(line1, "with multiple genetic distance matrices.")
} else {
line1 <- paste(line1, "with a single genetic distance matrix.")
}
n.obs <- nrow(x$x)
n.dstcl <- length(x$lag)
line2 <- paste(n.obs, "observations and", n.dstcl,
"distance classes (from", round(min(x$x), 2), "to",
round(max(x$x), 2), ")")
line3 <- paste(round(x$param$lag, 2), "lag size with",
round(x$param$tol, 2), "tolerance.")
mtest <- mtest.gv(x)
if (mtest) {
line4 <- paste("A", x$model$type, "model is fitted with", x$model$sill,
"sill,", x$model$range, "range and", x$model$nugget,
"nugget.")
} else {
line4 <- "No model fitted"
}
cat(line1, "\n", line2, "\n", line3, "\n", line4, "\n")
} |
context("xmlConverter unit tests")
test_that("XML Strings can be imported", {
doc <- parse.xmlstring("<foo><bar><baz val='the baz attribute'/></bar></foo>")
expect_equal(doc$toString(), "<foo><bar><baz val='the baz attribute'></baz></bar></foo>")
})
test_that("XML files can be imported", {
doc <- parse.xmlfile("pom.xml")
root <- doc$getRootElement()
expect_equal(root$getName(), "project")
findByArtifactId <- function(dependencies, artifactId) {
for (dependency in dependencies$getChildren()) {
if (dependency$getChild("artifactId")$getText() == artifactId) {
return (dependency)
}
}
}
dep <- findByArtifactId(root$getChild("dependencies"), "testthat")
groupId <- dep$getChild("groupId")$getText()
expect_equal("org.renjin.cran", groupId)
}) |
.readRasterCellsNetCDF <- function(x, cells) {
if (canProcessInMemory(x, 2)) {
r <- getValues(x)
r <- r[cells]
return(r)
}
row1 <- rowFromCell(x, min(cells))
row2 <- rowFromCell(x, max(cells))
if ((row2 - row1) < 10 ) {
ncl <- (row2 - row1 + 1) * x@ncols
r <- raster(nrow=1, ncol=ncl)
v <- getValues(x, row1, row2-row1+1)
v <- v[cells-cellFromRowCol(x, row1, 1)+1]
return(v)
}
colrow <- matrix(ncol=3, nrow=length(cells))
colrow[,1] <- colFromCell(x, cells)
colrow[,2] <- rowFromCell(x, cells)
colrow[,3] <- NA
rows <- sort(unique(colrow[,2]))
readrows <- rows
if ( x@file@toptobottom ) {
readrows <- x@nrows - readrows + 1
}
zvar = x@data@zvar
time = x@data@band
nc <- ncdf4::nc_open(x@file@name, suppress_dimvals = TRUE)
on.exit( ncdf4::nc_close(nc) )
if (nc$var[[zvar]]$ndims == 1) {
ncx <- x@ncols
count <- ncx
for (i in 1:length(rows)) {
start <- (readrows[i]-1) * ncx + 1
v <- as.vector(ncdf4::ncvar_get(nc, varid=zvar, start=start, count=count))
thisrow <- subset(colrow, colrow[,2] == rows[i])
colrow[colrow[,2]==rows[i], 3] <- v[thisrow[,1]]
}
} else if (nc$var[[zvar]]$ndims == 2) {
count <- c(x@ncols, 1)
for (i in 1:length(rows)) {
start <- c(1, readrows[i])
v <- as.vector(ncdf4::ncvar_get(nc, varid=zvar, start=start, count=count))
thisrow <- subset(colrow, colrow[,2] == rows[i])
colrow[colrow[,2]==rows[i], 3] <- v[thisrow[,1]]
}
} else if (nc$var[[zvar]]$ndims == 3) {
count <- c(x@ncols, 1, 1)
for (i in 1:length(rows)) {
start <- c(1, readrows[i], time)
v <- as.vector(ncdf4::ncvar_get(nc, varid=zvar, start=start, count=count))
thisrow <- subset(colrow, colrow[,2] == rows[i])
colrow[colrow[,2]==rows[i], 3] <- v[thisrow[,1]]
}
} else {
if (x@data@dim3 == 4) {
count <- c(x@ncols, 1, 1, 1)
for (i in 1:length(rows)) {
start <- c(1, readrows[i], x@data@level, time)
v <- as.vector(ncdf4::ncvar_get(nc, varid=zvar, start=start, count=count))
thisrow <- subset(colrow, colrow[,2] == rows[i])
colrow[colrow[,2]==rows[i], 3] <- v[thisrow[,1]]
}
} else {
count <- c(x@ncols, 1, 1, 1)
for (i in 1:length(rows)) {
start <- c(1, readrows[i], time, x@data@level)
v <- as.vector(ncdf4::ncvar_get(nc, varid=zvar, start=start, count=count))
thisrow <- subset(colrow, colrow[,2] == rows[i])
colrow[colrow[,2]==rows[i], 3] <- v[thisrow[,1]]
}
}
}
colrow <- colrow[,3]
colrow[colrow == x@file@nodatavalue] <- NA
return(colrow)
}
.readBrickCellsNetCDF <- function(x, cells, layer, nl) {
i <- which(!is.na(cells))
if (length(cells) > 1000) {
if (canProcessInMemory(x, 2)) {
endlayer <- layer+nl-1
r <- getValues(x)
r <- r[cells, layer:endlayer]
return(r)
}
}
zvar <- x@data@zvar
dim3 <- x@data@dim3
cols <- colFromCell(x, cells)
rows <- rowFromCell(x, cells)
if ( x@file@toptobottom ) {
rows <- x@nrows - rows + 1
}
nc <- ncdf4::nc_open(x@file@name, suppress_dimvals = TRUE)
on.exit( ncdf4::nc_close(nc) )
j <- which(!is.na(cells))
if (nc$var[[zvar]]$ndims == 2) {
count <- c(1, 1)
res <- matrix(NA, nrow=length(cells), ncol=1)
for (i in j) {
start <- c(cols[i], rows[i])
res[i] <- ncdf4::ncvar_get(nc, varid=zvar, start=start, count=count)
}
} else if (nc$var[[zvar]]$ndims == 3) {
count <- c(1, 1, nl)
res <- matrix(NA, nrow=length(cells), ncol=nl)
for (i in j) {
start <- c(cols[i], rows[i], layer)
res[i,] <- ncdf4::ncvar_get(nc, varid=zvar, start=start, count=count)
}
} else {
if (x@data@dim3 == 4) {
count <- c(1, 1, 1, nl)
res <- matrix(NA, nrow=length(cells), ncol=nl)
for (i in j) {
start <- c(cols[i], rows[i], x@data@level, layer)
res[i,] <- ncdf4::ncvar_get(nc, varid=zvar, start=start, count=count)
}
} else {
count <- c(1, 1, nl, 1)
res <- matrix(nrow=length(cells), ncol=nl)
for (i in 1:length(cells)) {
start <- c(cols[i], rows[i], layer, x@data@level)
res[i,] <- ncdf4::ncvar_get(nc, varid=zvar, start=start, count=count)
}
}
}
res[res == x@file@nodatavalue] <- NA
return(res)
} |
simplex<-function(x, dist="weibull", tz=0, debias="none", optcontrol=NULL) {
default_tz=0
default_sign=1
if(class(x)!="data.frame") {stop("mlefit takes a structured dataframe input, use mleframe")}
if(ncol(x)!=3) {stop("mlefit takes a structured dataframe input, use mleframe")}
xnames<-names(x)
if(xnames[1]!="left" || xnames[2]!="right"||xnames[3]!="qty") {
stop("mlefit takes a structured dataframe input, use mleframe") }
if(tolower(dist) %in% c("weibull","weibull2p","weibull3p")){
fit_dist<-"weibull"
}else{
if(tolower(dist) %in% c("lnorm", "lognormal","lognormal2p", "lognormal3p")){
fit_dist<-"lnorm"
}else{
stop(paste0("dist argument ", dist, "is not recognized for mle fitting"))
}
}
if(tolower(dist) %in% c("weibull3p", "lognormal3p")){
npar<-3
}
Nf=0
Ns=0
Nd=0
Ni=0
failNDX<-which(x$right==x$left)
suspNDX<-which(x$right<0)
Nf_rows<-length(failNDX)
if(Nf_rows>0) {
Nf<-sum(x[failNDX,3])
}
Ns_rows<-length(suspNDX)
if(Ns_rows>0) {
Ns<-sum(x[suspNDX,3])
}
discoveryNDX<-which(x$left==0)
Nd_rows<-length(discoveryNDX)
if(Nd_rows>0) {
Nd<-sum(x[discoveryNDX,3])
}
testint<-x$right-x$left
intervalNDX<-which(testint>0)
interval<-x[intervalNDX,]
intervalsNDX<-which(interval$left>0)
Ni_rows<-length(intervalsNDX)
if(Ni_rows>0) {
Ni<-sum(interval[intervalsNDX,3])
}
fsiq<-rbind(x[failNDX,], x[suspNDX,], x[discoveryNDX,], interval[intervalsNDX,])
fsd<-NULL
if((Nf+Ns)>0) {
fsd<-fsiq$left[1:(Nf_rows + Ns_rows)]
}
if(Nd>0) {
fsd<-c(fsd,fsiq$right[(Nf_rows + Ns_rows + 1):(Nf_rows + Ns_rows + Nd_rows)])
}
if(Ni>0) {
fsdi<-c(fsd, fsiq$left[(Nf_rows + Ns_rows + Nd_rows + 1):nrow(fsiq)],
fsiq$right[(Nf_rows + Ns_rows + Nd_rows + 1):nrow(fsiq)])
}else{
fsdi<-fsd
}
q<-fsiq$qty
N<-c(Nf_rows,Ns_rows,Nd_rows,Ni_rows)
mrr_fail_data<- c(rep(x[failNDX,1],x[failNDX,3]),
rep( x[discoveryNDX,2]/2, x[discoveryNDX,3]),
rep((interval[intervalsNDX,1]+(interval[intervalsNDX,2]-interval[intervalsNDX,1])/2), interval[intervalsNDX,3])
)
mrr_susp_data<-rep(x[suspNDX,1], x[suspNDX,3])
if(fit_dist=="weibull"){
dist_num=1
if(Nf==1 && Nd+Ni==0) {
weibayes_scale <-x[failNDX,1]+sum(x[suspNDX,1])
vstart<- c(1, weibayes_scale)
warning("single failure data set may be candidate for weibayes fitting")
}else{
mrr_fit<-lslr(getPPP(mrr_fail_data, mrr_susp_data), abpval=FALSE)
shape<-mrr_fit[2]
scale<- mrr_fit[1]
vstart <- c(shape, scale)
}
}else{
if(fit_dist=="lnorm"){
dist_num=2
mrr_fit<-lslr(getPPP(mrr_fail_data, mrr_susp_data), dist="lognormal", abpval=FALSE)
ml<- mrr_fit[1]
sdl<- mrr_fit[2]
vstart<-c(ml,sdl)
}else{
stop("distribution not resolved for mle fitting")
}
}
limit<-1e-6
maxit<-100
listout<-FALSE
if(length(optcontrol)>0) {
if(length(optcontrol$vstart>0)) {
vstart<-optcontrol$vstart
}
if(length(optcontrol$limit)>0) {
limit<-optcontrol$limit
}
if(length(optcontrol$maxit)>0) {
maxit<-optcontrol$maxit
}
if(length(optcontrol$listout)>0) {
listout<-optcontrol$listout
}
}
pos<-1
Q<-sum(q)
for(j in seq(1,4)) {
if(N[j]>0) {
Q<-c(Q, sum(q[pos:(pos+N[j]-1)]))
pos<-pos+N[j]
}else{
Q<-c(Q, 0)
}
}
names(Q)<-c("n","fo", "s", "d", "i")
MLEclassList<-list(fsdi=fsdi,q=q,N=N,dist_num=dist_num)
LLtest<-.Call(MLEloglike,MLEclassList,vstart, default_sign, default_tz)
if(!is.finite(LLtest)) {
stop("Cannot start mle optimization with given parameters")
}
ControlList<-list(limit=limit,maxit=maxit)
if(debias!="none" && dist_num==1) {
if(tolower(debias)!="rba"&&tolower(debias)!="mean"&&tolower(debias)!="hrbu") {
stop("debias method not resolved")
}
}
listout_int<-0
result_of_simplex_call<-.Call(MLEsimplex,MLEclassList, ControlList, vstart, default_tz, listout_int)
if(result_of_simplex_call[4]>0) {
warning("simplex does not converge")
}
result_of_simplex_call
} |
"to_real" <- function(o){
out <- c(rbind(Re(o),Im(o)))
if(!is.null(names(o))){
names(out) <-
apply(expand.grid(c("_real","_imag"),names(o))[,2:1],1,paste,collapse="")
} else {
names(out) <- NULL
}
return(out)
}
"to_complex" <- function(p){
if(is.vector(p)){
jj <- Recall(t(p))
out <- c(jj)
names(out) <- colnames(jj)
return(out)
}
out <- (
p[,seq(from=1,by=2,to=ncol(p)),drop=FALSE] +
1i*p[,seq(from=2,by=2,to=ncol(p)),drop=FALSE]
)
f <- function(string){sub("_real","",string)}
colnames(out) <- sapply(colnames(out),f)
return(out)
}
"complex_ode" <- function(y, times, func, parms=NA, method=NULL, u, udash, ...){
out <-
ode(y=to_real(y), times=times, func=func, parms=to_real(parms), method, u=u, udash=udash, ...)
out <- cbind(z=u(out[,1]),to_complex(out[,-1]))
class(out) <- c("deSolve", "matrix")
return(out)
}
hypergeo_press <- function(A,B,C,z, ...){
if(Re(z)<=0){
startz <- -0.5
} else if( (Re(z)<=0.5)){
startz <- 0.5
} else if(Im(z)>=0){
startz <- 0.5i
} else if(Im(z)<0){
startz <- -0.5i
}
initial_value <- hypergeo(A,B,C,z=startz)
initial_deriv <- (A*B)/C*hypergeo(A+1,B+1,C+1,z=startz)
complex_ode(y = c(F=initial_value, Fdash=initial_deriv),
times = seq(0,1,by=0.05),
func = hypergeo_func,
parms = c(A=A, B=B, C=C)+0i,
u = function(u){startz + (z-startz)*u},
udash = function(u){z-startz},
...)
}
"hypergeo_func" <- function(Time, State, Pars, u, udash) {
with(as.list(c(to_complex(State), to_complex(Pars))), {
z <- u(Time)
dz <- udash(Time)
dF <- dz * Fdash
dFdash <- dz * (A*B*F -(C-(A+B+1)*z)*Fdash)/(z*(1-z))
out <- to_real(c(dF,dFdash))
names(out) <- names(State)
return(list(out))
})
}
f15.5.1 <- function(A,B,C,z,startz,u,udash,give=FALSE, ...){
out <-
complex_ode(y = c(F=hypergeo(A,B,C,startz), Fdash=hypergeo(A+1,B+1,C+1,startz)*A*B/C),
times = seq(0,1,by=0.1),
func = hypergeo_func,
parms = c(A=A, B=B, C=C)+0i,
u = u,
udash = udash,
...)
if(give){
return(out)
} else {
return(unname(out[11,2]))
}
}
"semicircle" <- function(t,z0,z1,clockwise=TRUE){
if(clockwise){m <- -1} else {m <- 1}
center <- (z0+z1)/2
center + (z0-center)*exp(1i*t*pi*m)
}
"semidash" <- function(t,z0,z1,clockwise=TRUE){
if(clockwise){m <- -1} else {m <- 1}
center <- (z0+z1)/2
(z0-center)*(1i*pi*m)*exp(1i*t*pi*m)
}
"straight" <- function(t,z0,z1){ z0 + t*(z1-z0) }
"straightdash" <- function(t,z0,z1){ (z1-z0) } |
context("commands - server")
test_that("CLIENT KILL", {
expect_equal(redis_cmds$CLIENT_KILL(ID = "12", SKIPME = "yes"),
list("CLIENT", "KILL", NULL, list("ID", "12"),
NULL, NULL, list("SKIPME", "yes")))
expect_equal(redis_cmds$CLIENT_KILL(ID = "11", SKIPME = "no"),
list("CLIENT", "KILL", NULL, list("ID", "11"),
NULL, NULL, list("SKIPME", "no")))
})
test_that("CLIENT LIST", {
expect_equal(redis_cmds$CLIENT_LIST(), list("CLIENT", "LIST"))
})
test_that("CLIENT GETNAME", {
expect_equal(redis_cmds$CLIENT_GETNAME(), list("CLIENT", "GETNAME"))
})
test_that("CLIENT PAUSE", {
expect_equal(redis_cmds$CLIENT_PAUSE(1000),
list("CLIENT", "PAUSE", 1000))
})
test_that("CLIENT REPLY", {
expect_error(redis_cmds$CLIENT_REPLY("SKIP"),
"Do not use CLIENT_REPLY")
})
test_that("CLIENT SETNAME", {
name <- rand_str()
expect_equal(redis_cmds$CLIENT_SETNAME(name),
list("CLIENT", "SETNAME", name))
})
test_that("COMMAND", {
expect_equal(redis_cmds$COMMAND(), list("COMMAND"))
})
test_that("COMMAND COUNT", {
expect_equal(redis_cmds$COMMAND_COUNT(),
list("COMMAND", "COUNT"))
})
test_that("COMMAND GETKEYS", {
cmd <- redis_cmds$MSET(letters[1:3], 1:3)
expect_equal(redis_cmds$COMMAND_GETKEYS(cmd),
c(list("COMMAND", "GETKEYS"), cmd))
})
test_that("COMMAND INFO", {
cmds <- c("get", "set", "eval")
expect_equal(redis_cmds$COMMAND_INFO(cmds),
list("COMMAND", "INFO", cmds))
})
test_that("CONFIG GET", {
query <- "*max-*-entries*"
expect_equal(redis_cmds$CONFIG_GET(query),
list("CONFIG", "GET", query))
})
test_that("DBSIZE", {
expect_equal(redis_cmds$DBSIZE(), list("DBSIZE"))
})
test_that("FLUSHALL", {
expect_equal(redis_cmds$FLUSHALL(), list("FLUSHALL"))
})
test_that("FLUSHDB", {
expect_equal(redis_cmds$FLUSHDB(), list("FLUSHDB"))
})
test_that("INFO", {
expect_equal(redis_cmds$INFO(), list("INFO", NULL))
})
test_that("LASTSAVE", {
expect_equal(redis_cmds$LASTSAVE(), list("LASTSAVE"))
})
test_that("ROLE", {
expect_equal(redis_cmds$ROLE(), list("ROLE"))
})
test_that("SLOWLOG", {
expect_equal(redis_cmds$SLOWLOG("LEN"),
list("SLOWLOG", "LEN", NULL))
expect_equal(redis_cmds$SLOWLOG("GET", "1"),
list("SLOWLOG", "GET", "1"))
})
test_that("TIME", {
expect_equal(redis_cmds$TIME(), list("TIME"))
})
test_that("BGREWRITEAOF", {
expect_equal(redis_cmds$BGREWRITEAOF(), list("BGREWRITEAOF"))
})
test_that("BGSAVE", {
expect_equal(redis_cmds$BGSAVE(), list("BGSAVE"))
})
test_that("CONFIG REWRITE", {
expect_equal(redis_cmds$CONFIG_REWRITE(), list("CONFIG", "REWRITE"))
})
test_that("CONFIG SET", {
expect_equal(redis_cmds$CONFIG_SET("SAVE", "900 1 300 10"),
list("CONFIG", "SET", "SAVE", "900 1 300 10"))
})
test_that("CONFIG RESETSTAT", {
expect_equal(redis_cmds$CONFIG_RESETSTAT(),
list("CONFIG", "RESETSTAT"))
})
test_that("DEBUG OBJECT", {
expect_equal(redis_cmds$DEBUG_OBJECT("key"),
list("DEBUG", "OBJECT", "key"))
})
test_that("DEBUG SEGFAULT", {
expect_equal(redis_cmds$DEBUG_SEGFAULT(),
list("DEBUG", "SEGFAULT"))
})
test_that("MONITOR", {
expect_equal(redis_cmds$MONITOR(),
list("MONITOR"))
})
test_that("SAVE", {
expect_equal(redis_cmds$SAVE(), list("SAVE"))
})
test_that("SHUTDOWN", {
expect_equal(redis_cmds$SHUTDOWN("SAVE"), list("SHUTDOWN", "SAVE"))
expect_equal(redis_cmds$SHUTDOWN("NOSAVE"), list("SHUTDOWN", "NOSAVE"))
})
test_that("SLAVEOF", {
expect_equal(redis_cmds$SLAVEOF("NO", "ONE"), list("SLAVEOF", "NO", "ONE"))
})
test_that("SYNC", {
expect_equal(redis_cmds$SYNC(), list("SYNC"))
}) |
mipplot_autofill_color <- function(rule_table_without_colors) {
random_colors_for_the_not_matched <-
c("
"
"
"
"
"
"
"
"
rule_table_with_colors <- rule_table_without_colors
ith_LHS <- NA
n_rule <- nrow(rule_table_with_colors)
for (i_rule in 1:n_rule) {
ith_rule <- rule_table_with_colors[i_rule, ]
if (has_LHS_in(ith_rule)) {
ith_LHS <- extract_LHS_from_rule(ith_rule)
}
if (nchar(ith_rule$Right_side) == 0) next
if (nchar(ith_rule$Color_code) > 0) next
ith_rule$Left_side <- ith_LHS
V <- extract_specific_category_from_rule(ith_rule)
standard_color_scheme_table <- mipplot::mipplot_default_color_palette[[1]]
n_standard_color_scheme <- length(standard_color_scheme_table)
distance_list <- numeric(n_standard_color_scheme)
for (i_standard_color_scheme in 1:n_standard_color_scheme) {
V_prime <- names(standard_color_scheme_table)[i_standard_color_scheme]
distance <- levenshtein_distance(tolower(V), tolower(V_prime))
distance_list[i_standard_color_scheme] <- distance
}
minimum_distance <- min(distance_list)
DISTANCE_THRESHOLD <- as.integer(max(nchar(V), nchar(V_prime)) * 0.8)
if (minimum_distance < DISTANCE_THRESHOLD) {
i_minimum_distance <- which.min(distance_list)
rule_table_with_colors[i_rule, INDEX_COL_COLOR_CODE] <-
standard_color_scheme_table[i_minimum_distance]
print(paste(
'[message] ',
"'", V, "'", ' matched to ',
"'", names(standard_color_scheme_table)[i_minimum_distance], "'",
sep = ''))
}else{
random_color_code <- random_colors_for_the_not_matched[1]
random_colors_for_the_not_matched <-
random_colors_for_the_not_matched[
2:length(random_colors_for_the_not_matched)]
if (length(random_colors_for_the_not_matched) == 0) {
random_colors_for_the_not_matched <- c("
}
rule_table_with_colors[i_rule, INDEX_COL_COLOR_CODE] <-
random_color_code
print(paste(
'[message] ',
'Similar name of variable to ',
"'", V, "'",' is not found. ',
'random color code ', random_color_code, ' is inserted.',
sep = ''))
}
}
return(rule_table_with_colors)
}
INDEX_COL_LHS <- 2
INDEX_COL_RHS <- 3
INDEX_COL_COLOR_CODE <- 4
extract_LHS_from_rule <- function(rule) {
return(rule[1, INDEX_COL_LHS])
}
has_LHS_in <- function(rule) {
if (nchar(rule[1, INDEX_COL_LHS]) == 0) {
return(FALSE)
}else{
return(TRUE)
}
}
extract_specific_category_from_rule <- function(rule) {
LHS <- rule[1, INDEX_COL_LHS]
RHS <- rule[1, INDEX_COL_RHS]
category <- LHS
specific_category <- gsub(paste(category, "|", sep=""), "", RHS, fixed = TRUE)
return(specific_category)
}
levenshtein_distance <- function(s, t) {
m <- nchar(s)
n <- nchar(t)
d <- matrix(0, nrow = m + 1, ncol = n + 1)
for (i in 1:m) {
d[i+1, 0+1] <- i
}
for (j in 1:n) {
d[0+1, j+1] <- j
}
for (j in 1:n) {
for (i in 1:m) {
if (substr(s, i, i) == substr(t, j, j)) {
substitution_cost <- 0
} else {
substitution_cost <- 1
}
d[i+1, j+1] <- min(
d[i-1+1, j+1]+1,
d[i+1, j-1+1]+1,
d[i-1+1, j-1+1] + substitution_cost)
}
}
return(d[m+1, n+1])
} |
as_tcclimate <- function(x, varnames = NULL) {
msg1 <- "Format of climate data was not recognized. It is absolutely necessary that only complete years (months 1-12) are provided."
if (any(class(x) == "list")) {
n <- length(x)
minyrs <- maxyrs <- numeric(n)
for (i in 1:n) {
y <- x[[i]]
if (dim(y)[2] == 13) {
perf_seq <- seq(y[1,1], y[dim(y)[1],1], 1)
if (length(y[,1]) != length(perf_seq)) {
stop(msg1)
}
if (!any(y[,1] == perf_seq)) {
stop(msg1)
} else {
minyrs[i] <- min(y[,1])
maxyrs[i] <- max(y[,1])
}
}
}
yrs <- max(minyrs):min(maxyrs)
nyrs <- length(yrs)
output_matrix <- matrix(NA, ncol = n + 2, nrow = nyrs*12)
output_matrix[,1] <- rep(yrs, each = 12)
output_matrix[,2] <- rep(1:12, nyrs)
for (i in 1:n) {
y <- x[[i]]
for (j in 1:nyrs) {
if (any(y[,1] == yrs[j])) {
output_matrix[which(output_matrix[,1] == yrs[j]), 2+i] <-
unlist(y[which(y[,1] == yrs[j]), 2:13])
}
}
}
} else {
if (dim(x)[2] == 13) {
perf_seq <- seq(x[1,1], x[dim(x)[1],1], 1)
if (length(x[,1]) != length(perf_seq)) {
stop(msg1)
}
if (!any(x[,1] == perf_seq)) {
stop(msg1)
} else {
yrs <- unique(x[,1])
nyrs <- length(yrs)
output_matrix <- matrix(NA, ncol = 3, nrow = nyrs*12)
output_matrix[,1] <- rep(yrs, each = 12)
output_matrix[,2] <- rep(1:12, nyrs)
for (i in 1:nyrs) {
output_matrix[which(output_matrix[,1] == yrs[i]), 3] <-
unlist(x[which(x[,1] == yrs[i]), 2:13])
}
}
} else {
perf_seq <- rep(x[1,1]:x[dim(x)[1],1], each = 12)
if (length(x[,1]) != length(perf_seq)) {
stop(msg1)
}
if (!any(x[,1] == perf_seq)) {
stop(msg1)
} else {
if (!(any(x[,2] == rep(1:12, length(unique(x[,1])))))) {
stop(msg1)
} else {
output_matrix <- x
}
}
}
}
output <- data.frame(output_matrix)
if (!is.null(varnames)) {
if (length(varnames) == dim(output[2])) {
colnames(output)[-c(1,2)] <- varnames
} else {
stop("`var_names` has to be of the same length as the number of parameters.")
}
}
if (is.null(varnames) & !is.null(names(x)) & (class(x) == "list")) {
colnames(output)[-c(1,2)] <- names(x)
}
class(output) <- c("tc_climate", "data.frame")
output
} |
library(ggplot2)
library(patchwork)
pie_sales = data.frame(
ratio = c(0.12, 0.3, 0.26, 0.16, 0.04, 0.12),
name = c("蓝莓", "樱桃", "苹果", "波士顿奶油", "其它", "香草奶油"))
pie_sales = pie_sales[order(-pie_sales$ratio), ]
pie_sales$name = factor(
pie_sales$name, levels = pie_sales$name[order(pie_sales$ratio)])
pie1 = ggplot(pie_sales, aes(x = "", y = ratio, fill = name)) +
geom_bar(width = 1, stat = "identity", color = "white") +
coord_polar("y", start = 0) +
labs(fill = "口味") +
theme_void()
dot1 = ggplot(pie_sales, aes(name, ratio, color = name)) +
geom_point() +
coord_flip() +
theme(legend.position = "", axis.title = element_blank())
col1 = ggplot(pie_sales, aes(name, ratio, fill = name)) +
geom_col() +
coord_flip() +
theme(legend.position = "", axis.title = element_blank())
print(pie1 / dot1 / col1) |
context("Test tk_tbl")
FB_tbl <- FANG %>% filter(symbol == "FB")
test_that("tbl tot tbl test returns tibble with correct rows and columns.", {
test_tbl_1 <- tk_tbl(FB_tbl, preserve_index = F, rename_index = "date")
expect_is(test_tbl_1, "tbl")
expect_equal(nrow(test_tbl_1), 1008)
expect_equal(ncol(test_tbl_1), 8)
expect_equal(colnames(test_tbl_1)[[2]], "date")
expect_warning(tk_tbl(FB_tbl, preserve_index = T))
})
FB_xts <- tk_xts(FB_tbl, select = -c(date, symbol), date_var = date)
test_that("xts to tbl test returns tibble with correct rows and columns.", {
test_tbl_2 <- tk_tbl(FB_xts, preserve_index = T, rename_index = "date")
expect_equal(nrow(test_tbl_2), 1008)
expect_equal(ncol(test_tbl_2), 7)
expect_equal(colnames(test_tbl_2)[[1]], "date")
expect_equal(ncol(tk_tbl(FB_xts, preserve_index = F, rename_index = "date")), 6)
})
FB_zoo <- tk_zoo(FB_tbl, silent = TRUE)
test_that("zoo to tbl test returns tibble with correct rows and columns.", {
test_tbl_3 <- tk_tbl(FB_zoo, preserve_index = T, rename_index = "date")
expect_equal(nrow(test_tbl_3), 1008)
expect_equal(ncol(test_tbl_3), 7)
expect_equal(colnames(test_tbl_3)[[1]], "date")
expect_equal(ncol(tk_tbl(FB_zoo, preserve_index = F, rename_index = "date")), 6)
})
FB_zooreg <- tk_zooreg(FB_tbl, start = 2015, frequency = 250, silent = TRUE)
test_that("zooreg to tbl test returns tibble with correct rows and columns.", {
test_tbl_3a <- tk_tbl(FB_zooreg, preserve_index = T, rename_index = "date")
expect_equal(nrow(test_tbl_3a), 1008)
expect_equal(ncol(test_tbl_3a), 7)
expect_equal(colnames(test_tbl_3a)[[1]], "date")
expect_equal(ncol(tk_tbl(FB_zooreg, preserve_index = F, rename_index = "date")), 6)
test_tbl_3b <- FB_zooreg %>%
tk_tbl(rename_index = "date", timetk_idx = TRUE)
expect_identical(test_tbl_3b, FB_tbl %>% select(-symbol))
zooreg_1 <- zoo::zooreg(1:5, start = as.Date("2000-01-01"))
expect_true(inherits(tk_tbl(zooreg_1)$index, "Date"))
zooreg_2 <- zoo::zooreg(1:5, end = zoo::yearmon(2000))
expect_true(inherits(tk_tbl(zooreg_2)$index, "yearmon"))
zooreg_3 <- zoo::zooreg(1:5, start = zoo::yearqtr(2000), frequency = 4)
expect_true(inherits(tk_tbl(zooreg_3)$index, "yearqtr"))
})
FB_mts <- tk_ts(FB_tbl, select = -c(date, symbol), start = 2015, frequency = 252)
test_that("mts to tbl test returns tibble with correct rows and columns.", {
test_tbl_4 <- tk_tbl(FB_mts, preserve_index = T, rename_index = "date")
expect_equal(nrow(test_tbl_4), 1008)
expect_equal(ncol(test_tbl_4), 7)
expect_equal(colnames(test_tbl_4)[[1]], "date")
expect_equal(ncol(tk_tbl(FB_mts, preserve_index = F, rename_index = "date")), 6)
expect_warning(tk_tbl(tk_ts(FB_mts, start = 1), select = -date, preserve_index = T))
expect_warning(
WWWusage %>%
tk_tbl(timetk_idx = TRUE)
)
test_tbl_4b <- FB_mts %>%
tk_tbl(rename_index = "date", timetk_idx = TRUE)
expect_identical(test_tbl_4b, FB_tbl %>% select(-symbol))
})
FB_matrix <- FB_xts %>% as.matrix()
test_that("matrix to tbl test returns tibble with correct rows and columns.", {
test_tbl_5 <- tk_tbl(FB_matrix, preserve_index = T, rename_index = "date")
expect_equal(nrow(test_tbl_5), 1008)
expect_equal(ncol(test_tbl_5), 7)
expect_equal(colnames(test_tbl_5)[[1]], "date")
expect_equal(ncol(tk_tbl(FB_matrix, preserve_index = F, rename_index = "date")), 6)
rownames(FB_matrix) <- NULL
expect_warning(tk_tbl(FB_matrix))
})
test_timeSeries <- timeSeries::timeSeries(1:100, timeDate::timeSequence(length.out = 100, by = "sec"))
test_that("timeSeries to tbl test returns tibble with correct rows and columns.", {
test_tbl_6 <- tk_tbl(test_timeSeries, preserve_index = T, rename_index = "date-time")
expect_equal(nrow(test_tbl_6), 100)
expect_equal(ncol(test_tbl_6), 2)
expect_equal(colnames(test_tbl_6)[[1]], "date-time")
})
n <- 10
t <- cumsum(rexp(n, rate = 0.1))
v <- rnorm(n)
test_tseries <- tseries::irts(t, v)
test_that("tseries to tbl test returns tibble with correct rows and columns.", {
test_tbl_7 <- tk_tbl(test_tseries, preserve_index = T, rename_index = "date-time")
expect_equal(nrow(test_tbl_7), 10)
expect_equal(ncol(test_tbl_7), 2)
expect_equal(colnames(test_tbl_7)[[1]], "date-time")
})
test_msts <- forecast::msts(forecast::taylor, seasonal.periods=c(48,336), start=2000+22/52)
test_that("forecast::msts to tbl test returns tibble with correct rows and columns.", {
test_tbl_8 <- tk_tbl(test_msts, preserve_index = T, rename_index = "index")
expect_equal(nrow(test_tbl_8), 4032)
expect_equal(ncol(test_tbl_8), 2)
expect_equal(colnames(test_tbl_8)[[1]], "index")
})
test_that("forecast::msts to tbl test returns tibble with correct rows and columns.", {
test_default <- 4
expect_warning(
tk_tbl(test_default, preserve_index = T, rename_index = "index")
)
test_tbl_9 <- tk_tbl(test_default, preserve_index = F, rename_index = "index")
expect_equal(nrow(test_tbl_9), 1)
expect_equal(ncol(test_tbl_9), 1)
}) |
library(hamcrest)
expected <- c(0x1.654e88411a9ccp+4 + -0x1.7p-47i, 0x1.5e242f0d84897p+4 + -0x1.8p-50i,
0x1.6d062ceb53381p+4 + 0x1p-46i, 0x1.91d15da3a3161p+4 + 0x1.f8p-46i,
0x1.cac2d754c9d36p+4 + 0x1.4cp-45i, 0x1.0a47b94df6325p+5 + 0x1.8a5981113790ep-47i,
0x1.354d3c5e5ea19p+5 + 0x1.315996a08e8f8p-46i, 0x1.63a3c3ee53814p+5 + 0x1.6dc867ee77cdp-46i,
0x1.9230c6ffbac1dp+5 + 0x1.ecbeea8c701f6p-46i, 0x1.bdc782af897a2p+5 + 0x1.c398a8a7dce9ep-46i,
0x1.e365417b2cd7bp+5 + 0x1.0b933bdc0d397p-45i, 0x1.00359059b7d81p+6 + 0x1.1630c8580c19fp-45i,
0x1.096869dca93fdp+6 + 0x1.ca513fb84dfd2p-46i, 0x1.0ca63500727bcp+6 + 0x1.0b14fe82b0144p-45i,
0x1.09b5f96e00eccp+6 + 0x1.e4f08fbb2b031p-46i, 0x1.00cf7815fab35p+6 + 0x1.442734d0d2c58p-45i,
0x1.e52ec9a9e289fp+5 + 0x1.52ed79f90004ep-45i, 0x1.c026c47135e7ep+5 + 0x1.6162cb3b791d6p-45i,
0x1.952d4b63c5d6cp+5 + 0x1.a568e1a8f9d27p-45i, 0x1.67517b03eee11p+5 + 0x1.d9680e84495e5p-45i,
0x1.39d0cf4dee4d2p+5 + 0x1.1e0a3f86e9328p-45i, 0x1.0fd9713a3994cp+5 + 0x1.54c3d2e17628ap-45i,
0x1.d89bce5007f8ap+4 + 0x1.c6efa7e2ee849p-45i, 0x1.a31da36b5087ep+4 + 0x1.1215d3d3aca7bp-44i,
0x1.829dce69ce6a4p+4 + 0x1.3aa8af688dfe2p-44i, 0x1.78e927fa53076p+4 + 0x1.05a9bff0bfde6p-45i,
0x1.86172a2875094p+4 + 0x1.4e3caba1f2803p-45i, 0x1.a88f44580d41bp+4 + 0x1.ae97873918978p-45i,
0x1.dd2fc9998a4cdp+4 + 0x1.05235bdc2bbc8p-44i, 0x1.0fc9a795d31bfp+5 + 0x1.1ca50dc5935eap-44i,
0x1.3537747edd9d6p+5 + 0x1.57e2e30bd4fedp-45i, 0x1.5c00dc5517199p+5 + 0x1.9649bfb646d26p-45i,
0x1.81467082f7ce6p+5 + 0x1.9558f57cfdecp-45i, 0x1.a26520df7ebc9p+5 + 0x1.ffddb964a97e6p-45i,
0x1.bd2c392bd4c38p+5 + 0x1.18895ba82b4a7p-44i, 0x1.d009a4d177605p+5 + 0x1.03775b0ef7229p-44i,
0x1.da28e383d1cfep+5 + 0x1.251f4a70e6062p-44i, 0x1.db8227b156efcp+5 + 0x1.1957c1c86c666p-44i,
0x1.d4d85b51fd97fp+5 + 0x1.5c542378bfbb8p-44i, 0x1.c7a627602a482p+5 + 0x1.8e542b43bf5ffp-44i,
0x1.b5fb7a7fcfef9p+5 + 0x1.53a9faf0c4adap-44i, 0x1.a24e501a95402p+5 + 0x1.7f9e566edbe65p-44i,
0x1.8f428884d1bacp+5 + 0x1.94da4f3c929a7p-44i, 0x1.7f6d668a158b4p+5 + 0x1.c60ce6dda154dp-44i,
0x1.7519aeb529ab7p+5 + 0x1.f33e8b28515fp-44i, 0x1.72116780c822fp+5 + 0x1.6c1fd99e21b54p-44i,
0x1.7771d5ee0d04cp+5 + 0x1.90cd080fa488dp-44i, 0x1.858d8e6005077p+5 + 0x1.ae4e15ae223e2p-44i,
0x1.9bdf5c48e5be6p+5 + 0x1.ad58f11f523e1p-44i, 0x1.b90f70fb5c4adp+5 + 0x1.ad1290bb4cc7ep-44i,
0x1.db0ad4a96b7a5p+5 + 0x1.50c0ff1a12dc1p-44i, 0x1.ff2b9932159c5p+5 + 0x1.5c511e0fa5aefp-44i,
0x1.113775542118p+6 + 0x1.5353bc7d2ca93p-44i, 0x1.20da74ee27f21p+6 + 0x1.441957df6adb3p-44i,
0x1.2d02ad333db9ep+6 + 0x1.37290bbfce092p-44i, 0x1.3469d56fb8571p+6 + 0x1.35c30b3e10b48p-44i,
0x1.361e638caf46bp+6 + 0x1.3edc38b408b3ep-44i, 0x1.319cbe8966cf2p+6 + 0x1.238a848e5eb6bp-44i,
0x1.26e0809cd50d3p+6 + 0x1.545d6a0b7e6ep-44i, 0x1.166c20222276p+6 + 0x1.856fb793a23efp-44i,
0x1.01460be7152c6p+6 + 0x1.4c8ded54aa9d3p-44i, 0x1.d1d628714ec94p+5 + 0x1.7d348a21e4a21p-44i,
0x1.9e6d9da017542p+5 + 0x1.8bed066e2de7dp-44i, 0x1.6c86e0ad836f1p+5 + 0x1.00f5408023e51p-43i,
0x1.4086957f5319bp+5 + 0x1.367956c6d5921p-43i, 0x1.1ead5bbd989bp+5 + 0x1.93612771eb7dp-44i,
0x1.0ac287a924a41p+5 + 0x1.e654f9bd0253fp-44i, 0x1.07c449f6f834fp+5 + 0x1.0c287548b79e8p-43i,
0x1.17a38681ca916p+5 + 0x1.36639cdabbfe4p-43i, 0x1.3b10e7e6e5077p+5 + 0x1.5791d8ea6da96p-43i,
0x1.715f9d05b471ap+5 + 0x1.d6e2642bc7132p-44i, 0x1.b880b033d396p+5 + 0x1.07ba7d034b901p-43i,
0x1.068c14aff537dp+6 + 0x1.0a6614c10d418p-43i, 0x1.3555a622757d8p+6 + 0x1.f622dad8e946ep-44i,
0x1.65f2b29902998p+6 + 0x1.d95e2c6da8bc2p-44i, 0x1.95776ea23b8f7p+6 + 0x1.e410ef3c8ee37p-44i,
0x1.c0e86dcae5159p+6 + 0x1.de4b8087ff6f9p-44i, 0x1.e56f5442c5a42p+6 + 0x1.992ad0c15c27fp-44i,
0x1.00475c31c9e54p+7 + 0x1.48904c661657ap-44i, 0x1.0828bd23eb073p+7 + 0x1.1df003bb78305p-44i,
0x1.09b909f55debp+7 + 0x1.b9e73e01faf1ap-44i, 0x1.04bb712da63e8p+7 + 0x1.a52e9edfa2bdp-44i,
0x1.f2c120600f642p+6 + 0x1.671faf87b0c5ap-44i, 0x1.d08d3025427ep+6 + 0x1.7070e8da905e2p-44i,
0x1.a4e3d4667d378p+6 + 0x1.a49e14686272ap-44i, 0x1.727d07c8a860dp+6 + 0x1.8a43938c940f4p-44i,
0x1.3c96278d94d18p+6 + 0x1.afdc315f7ba86p-44i, 0x1.06be1284e2858p+6 + 0x1.c94974904f496p-44i,
0x1.a93539cdb1ae4p+5 + 0x1.2295e9268d75p-43i, 0x1.53567eb868df3p+5 + 0x1.60700f949631cp-43i,
0x1.121a513ca122ap+5 + 0x1.89350848bfcap-44i, 0x1.d513419870bd4p+4 + 0x1.e69f7b4e823dfp-44i,
0x1.c0435e670553bp+4 + 0x1.14afc569a6ccbp-43i, 0x1.e925a7993d394p+4 + 0x1.49a5f9bf01bacp-43i,
0x1.279d04806898cp+5 + 0x1.6c5afc5efca38p-43i, 0x1.770ce8ec0265p+5 + 0x1.ba2b9533b603cp-44i,
0x1.deda075e8252p+5 + 0x1.00384dc1f1615p-43i, 0x1.2cb22c013bad4p+6 + 0x1.fa4da68889c72p-44i,
0x1.6fe6b07e3fe5dp+6 + 0x1.dff25849b3608p-44i, 0x1.b5338ac52eb56p+6 + 0x1.bf382cfe4ebf3p-44i,
0x1.f89896678ab3ap+6 + 0x1.ec9950fff5972p-44i, 0x1.1b1727aaa9267p+7 + 0x1.ea0f25525763fp-44i,
0x1.353320243993p+7 + 0x1.89b93c41b726fp-44i, 0x1.4922e898303ap+7 + 0x1.2352bd0d482dap-44i,
0x1.55cbc25e2ae0fp+7 + 0x1.f6f9b5a89e54ep-45i, 0x1.5a87ac6b1b7bcp+7 + 0x1.eb4e51cb6065fp-44i,
0x1.572fee5f7ea76p+7 + 0x1.cc6002893d37ep-44i, 0x1.4c1f4c82bf23ap+7 + 0x1.7be99f83cc8fep-44i,
0x1.3a2bc39fcda2ap+7 + 0x1.5ae1849beba7p-44i, 0x1.22983e8a9e894p+7 + 0x1.89f3c03b15d2ap-44i,
0x1.06ff41af0c6bap+7 + 0x1.b05f49733fd18p-44i, 0x1.d27003961cea8p+6 + 0x1.bf081fab6dbcap-44i,
0x1.966f7318ceca4p+6 + 0x1.cb43be517a80cp-44i, 0x1.5de29f7ff1346p+6 + 0x1.0b53189206e76p-43i,
0x1.2c6af1e88886dp+6 + 0x1.3a3e34f237f54p-43i, 0x1.052ae4b4085dep+6 + 0x1.6af8f15cb15c3p-44i,
0x1.d52905e585829p+5 + 0x1.ac6e0946f7434p-44i, 0x1.bc88431d02184p+5 + 0x1.dbb9d2b9f3133p-44i,
0x1.c1d34974b8289p+5 + 0x1.1501a670a2ce6p-43i, 0x1.e483401f34307p+5 + 0x1.281cacf7ae922p-43i,
0x1.111e879c04b21p+6 + 0x1.5171a7056b576p-44i, 0x1.3b810fc119e27p+6 + 0x1.89454030c797dp-44i,
0x1.6ebdd4b4cbd8fp+6 + 0x1.77a502fe3f634p-44i, 0x1.a7ad82c2f5c4ep+6 + 0x1.6995c9c18b2f8p-44i,
0x1.e2e40d4a249f3p+6 + 0x1.4eb30a48be2d3p-44i, 0x1.0e758b90a5efep+7 + 0x1.6c64883ea09c2p-44i,
0x1.293dc51f92d5p+7 + 0x1.74026a6ac7ef8p-44i, 0x1.40593fd4d3cccp+7 + 0x1.21a33a29349f3p-44i,
0x1.529f00254310dp+7 + 0x1.afb8e01ee132ap-45i, 0x1.5f3f656ca35fp+7 + 0x1.a02f8e1f4f63p-45i,
0x1.65ce8f03c8cbp+7 + 0x1.8e6bb8b287b66p-44i, 0x1.6647bc5cee78ep+7 + 0x1.7f751b65d7464p-44i,
0x1.6109a14439f87p+7 + 0x1.40915a748e292p-44i, 0x1.56cc254492771p+7 + 0x1.16616edcabda6p-44i,
0x1.489077cd1ee82p+7 + 0x1.476f3960aa1a3p-44i, 0x1.378cc2112c6cap+7 + 0x1.808aea9ff0b4ap-44i,
0x1.2515054908667p+7 + 0x1.82f92ecd7f134p-44i, 0x1.1282db7d9475ep+7 + 0x1.7bed26bb69959p-44i,
0x1.011dd82dbf6bcp+7 + 0x1.86266f44fa626p-44i, 0x1.e40c5a2a2d881p+6 + 0x1.b19675f1f610ep-44i,
0x1.cc46015a77adap+6 + 0x1.37f8b435d84d9p-44i, 0x1.bc2b28aea8ad9p+6 + 0x1.4be4120ee7debp-44i,
0x1.b463eba520708p+6 + 0x1.493d0f9d7a171p-44i, 0x1.b4fb14c70d7b3p+6 + 0x1.522ad0308e199p-44i,
0x1.bd66a43f6dbd1p+6 + 0x1.42ea9ef306a6fp-44i, 0x1.cc9aaf06feceap+6 + 0x1.c1a37e2fc5e0ep-45i,
0x1.e124c5184411fp+6 + 0x1.dd1f33103e149p-45i, 0x1.f94d7b31d1702p+6 + 0x1.8addd35b9fff1p-45i,
0x1.099eaa4d01f9p+7 + 0x1.59502f9db416ap-45i, 0x1.1691108e98543p+7 + 0x1.1ddc54db6c2adp-45i,
0x1.22a8fa66651b1p+7 + 0x1.668c23368c0a1p-45i, 0x1.2d34939605f5ep+7 + 0x1.6d5be3cbc64c6p-45i,
0x1.35b09fd7bb9bdp+7 + 0x1.0a7ccf459c371p-45i, 0x1.3bcfa9bbe3345p+7 + 0x1.8de5b23897becp-46i,
0x1.3f7c8785880ccp+7 + 0x1.e85b940fe160cp-46i, 0x1.40d833f3a10d2p+7 + 0x1.7a07123179bbfp-45i,
0x1.4033570317cccp+7 + 0x1.8579137dcb533p-45i, 0x1.3e042f2db703p+7 + 0x1.5fb3024617a74p-45i,
0x1.3ad9d022d03cp+7 + 0x1.35b2435639865p-45i, 0x1.374ddb704ff3ep+7 + 0x1.9e6ae15ca41b5p-45i,
0x1.33f5eec5057bdp+7 + 0x1.a831c9e719f8fp-45i, 0x1.31560106d75c3p+7 + 0x1.b1a936c611624p-45i,
0x1.2fd4cc157c526p+7 + 0x1.97ef6b1f1651ap-45i, 0x1.2fb32f57f5959p+7 + 0x1.5febefb5ebe01p-45i,
0x1.31073371ae71cp+7 + 0x1.66bba9ab29c93p-45i, 0x1.33bb075eece66p+7 + 0x1.8035b2bd08856p-45i,
0x1.378ff94ee4dbbp+7 + 0x1.6533b9a7e23f3p-45i, 0x1.3c2518caef7c6p+7 + 0x1.01a494d7fd189p-45i,
0x1.4100e2d80a3d4p+7 + 0x1.303f02eafcce8p-46i, 0x1.459d19d01fc29p+7 + 0x1.477005e7228e8p-48i,
0x1.4973be96c52c5p+7 + 0x1.c5a9030932282p-46i, 0x1.4c0c0ae19245dp+7 + 0x1.532e29dafd5a8p-46i,
0x1.4d064e1d57394p+7 + 0x1.2efb3d8a72afap-48i, 0x1.4c25a7afc3683p+7 + -0x1.11a5e3874c608p-47i,
0x1.4956c7be19d0ep+7 + -0x1.252e0bfef5305p-46i, 0x1.44b31f5f726b5p+7 + 0x1.0b2de321a285dp-48i,
0x1.3e8034fb54044p+7 + -0x1.55f2d5a693c8p-54i, 0x1.372b23648f2fbp+7 + -0x1.7b690f68f8d38p-48i,
0x1.2f409b6afdd3fp+7 + -0x1.7b26e74e4d15ep-47i, 0x1.276209a020e6p+7 + -0x1.8bbaa4be143fcp-48i,
0x1.2038bef54afc7p+7 + -0x1.43d34ca4d4136p-47i, 0x1.1a6825cfc500dp+7 + -0x1.1b92d5ab9c004p-47i,
0x1.168022c222c0dp+7 + -0x1.16a0786193a9ep-48i, 0x1.14f0be905c977p+7 + -0x1.ae82448b0316fp-48i,
0x1.16001e9b1a9bbp+7 + 0x1.d544a50554d6p-50i, 0x1.19c3a335edeadp+7 + -0x1.3c31414739762p-47i,
0x1.201cc45f4aeffp+7 + -0x1.29f7cce584204p-47i, 0x1.28b9f9cd490c5p+7 + -0x1.673bc742a8999p-47i,
0x1.331ba941e9a83p+7 + -0x1.3b06a3bef46e8p-46i, 0x1.3e9cc41bd8eefp+7 + -0x1.837c563db35bep-46i,
0x1.4a7e719da9921p+7 + -0x1.2ad9d75ce2937p-48i, 0x1.55f5e46c6fe56p+7 + -0x1.5f9b0ae1b7dd9p-47i,
0x1.603b4c5404df3p+7 + -0x1.af3c7cb1ee55ep-46i, 0x1.6898bd184da71p+7 + -0x1.544be35a789e4p-45i,
0x1.6e77e1fba13dcp+7 + -0x1.c3141a660f99p-45i, 0x1.716d6bd5187b3p+7 + -0x1.1b87c0d11561dp-47i,
0x1.7141536ad56cfp+7 + -0x1.36e8081b92ed2p-46i, 0x1.6df347e3bc8dcp+7 + -0x1.1c5ccc8f84205p-45i,
0x1.67baecb751c8fp+7 + -0x1.952ce248837cp-45i, 0x1.5f03dda511192p+7 + -0x1.db526d08e428ep-45i,
0x1.5465c36dfd0dp+7 + -0x1.b8e3adb069694p-46i, 0x1.489912b04fa6dp+7 + -0x1.1d6e7f74e5ae3p-45i,
0x1.3c69512d86874p+7 + -0x1.2d5a847213929p-45i, 0x1.30a5efa465f3ep+7 + -0x1.8457f25b2c46p-45i,
0x1.2612e34a79c8bp+7 + -0x1.91edf0f7ce102p-45i, 0x1.1d5a2f7cafd43p+7 + -0x1.9c532ec419155p-45i,
0x1.16ff7df0fa5cbp+7 + -0x1.c3347df4205bcp-45i, 0x1.1356bae6081d4p+7 + -0x1.97a7a3ae471f8p-45i,
0x1.127e6ea1b6b05p+7 + -0x1.e66684f152a35p-45i, 0x1.145e42aaf32a6p+7 + -0x1.042e379e0c73p-44i,
0x1.18a9cd64ddf0ap+7 + -0x1.08d091f65abbap-44i, 0x1.1ee76894e5d3p+7 + -0x1.20452494db4d9p-44i,
0x1.267a85b062c92p+7 + -0x1.2bca0cabccb6ep-44i, 0x1.2eb0ac036673cp+7 + -0x1.5227a7b2b4406p-44i,
0x1.36d0167ad6032p+7 + -0x1.74e7bebde3e42p-44i, 0x1.3e26c573c204fp+7 + -0x1.06e28ab172edp-44i,
0x1.4418cf27e6fap+7 + -0x1.25722590ccd65p-44i, 0x1.482cc80aa772dp+7 + -0x1.51d755056c194p-44i,
0x1.4a1542a5bd11bp+7 + -0x1.692741dd75213p-44i, 0x1.49b6a1615f6a4p+7 + -0x1.7da22019567b5p-44i,
0x1.4728bf9c1c72dp+7 + -0x1.d56595787c838p-45i, 0x1.42b44cb95eb78p+7 + -0x1.0375f28304914p-44i,
0x1.3ccc0cadeb169p+7 + -0x1.19f7a0a823bf8p-44i, 0x1.3602870bc88bp+7 + -0x1.2d2c8365046ebp-44i,
0x1.2efcf8d4273efp+7 + -0x1.33aaad0b51954p-44i, 0x1.28649854b2c53p+7 + -0x1.dcf7b44dd799ep-45i,
0x1.22d76035c4a51p+7 + -0x1.05520b3df82bbp-44i, 0x1.1ed9a4644eaa5p+7 + -0x1.f28202a52b8cep-45i,
0x1.1cc9a7528ececp+7 + -0x1.313a2bc74be05p-44i, 0x1.1cd640cc76c1fp+7 + -0x1.58b51b0fa63a5p-44i,
0x1.1ef96bf09573p+7 + -0x1.2b5b8941292acp-44i, 0x1.22f7442f6eb67p+7 + -0x1.579e573e71468p-44i,
0x1.28619f647f0b8p+7 + -0x1.596d8cf005f55p-44i, 0x1.2ea0141791ccap+7 + -0x1.b8f870aad357ep-44i,
0x1.34fbde24e84a7p+7 + -0x1.055c7f2776639p-43i, 0x1.3aaec0c7f56fap+7 + -0x1.809d9de22bafep-44i,
0x1.3ef3c204a7006p+7 + -0x1.c1dc51834705p-44i, 0x1.41186e393a203p+7 + -0x1.e4f1a5dd3ae82p-44i,
0x1.408d3f2ce9693p+7 + -0x1.135ca9d88e775p-43i, 0x1.3cf3ca351e52p+7 + -0x1.2f9d7ccc9fdfp-43i,
0x1.36297cebee84ap+7 + -0x1.b36e5d3497966p-44i, 0x1.2c4ded2cffce6p+7 + -0x1.e150489d70044p-44i,
0x1.1fc413603336ap+7 + -0x1.f11a9dd65d33dp-44i, 0x1.112e278fe4c37p+7 + -0x1.dcea66e6a07f3p-44i,
0x1.0164432b12582p+7 + -0x1.cb5aecfebfb52p-44i, 0x1.e2cca55cf0111p+6 + -0x1.a80b8ed1c7f1ep-44i,
0x1.c4948756e7557p+6 + -0x1.a88a145b751d7p-44i, 0x1.aa4f5a45fb74dp+6 + -0x1.7f3f0e8063f78p-44i,
0x1.9604d6c199bb6p+6 + -0x1.4c5eeed376864p-44i, 0x1.896b90e93ec39p+6 + -0x1.2db3384234ee1p-44i,
0x1.85c3982abc8d4p+6 + -0x1.7cad8ffea3271p-44i, 0x1.8bb860bd66a04p+6 + -0x1.75f228c0ae8aep-44i,
0x1.9b4c7c08ac438p+6 + -0x1.490267e93ee1bp-44i, 0x1.b3d0fc2767718p+6 + -0x1.69276d260e81ap-44i,
0x1.d3e98def54ec4p+6 + -0x1.9e07f9f21bcbbp-44i, 0x1.f99d6b194711bp+6 + -0x1.6bea26a2e32bep-44i,
0x1.113a2a1ec0c6bp+7 + -0x1.99f3452f711aap-44i, 0x1.25ceed0c1cec4p+7 + -0x1.b13ca8b65b35p-44i,
0x1.391038292bfb4p+7 + -0x1.18d8a16222096p-43i, 0x1.498690c14c4d4p+7 + -0x1.55b2b2c340218p-43i,
0x1.55da28836ed38p+7 + -0x1.9372f147a04e1p-44i, 0x1.5cebe64c16764p+7 + -0x1.f0986fc75b0ecp-44i,
0x1.5deb44b1a2e1ep+7 + -0x1.1739572471896p-43i, 0x1.586763298a446p+7 + -0x1.4916e5d0ce49cp-43i,
0x1.4c59f4bb2464cp+7 + -0x1.6ce7382a3c621p-43i, 0x1.3a2b1f5cd129fp+7 + -0x1.d706934034ed8p-44i,
0x1.22adda30c5e99p+7 + -0x1.0c6b9ea67450cp-43i, 0x1.0714de4ce3b54p+7 + -0x1.0ba8046833f4ep-43i,
0x1.d1c1883905a64p+6 + -0x1.f774728cb2d42p-44i, 0x1.9390ca37cb9eep+6 + -0x1.d61a6749964e3p-44i,
0x1.573a0b1a56284p+6 + -0x1.fbeb9206489dcp-44i, 0x1.205837ca315ep+6 + -0x1.f5846ba69e0c8p-44i,
0x1.e482f773f524fp+5 + -0x1.9d4976192f10ap-44i, 0x1.9f9b25866231ap+5 + -0x1.3a982c0b6572p-44i,
0x1.76472242ac93fp+5 + -0x1.0ca734719e21dp-44i, 0x1.6b255367be2cep+5 + -0x1.e2283b68af042p-44i,
0x1.7ef187d5af1bcp+5 + -0x1.c4b9fd69d1559p-44i, 0x1.b0718f77060bep+5 + -0x1.79b400d1e05fep-44i,
0x1.fc82ed9ba6234p+5 + -0x1.6b6918b2eee15p-44i, 0x1.2f24898b4554ap+6 + -0x1.9ccc1b71e7641p-44i,
0x1.67bca8f83280dp+6 + -0x1.a3aa36579a0e4p-44i, 0x1.a45fd75a349bcp+6 + -0x1.bdb2dca97fb64p-44i,
0x1.e11afc2e2a5d6p+6 + -0x1.d2d141d18eee4p-44i, 0x1.0cf951c5f276cp+7 + -0x1.1dd70f111b7c2p-43i,
0x1.259250c484ed4p+7 + -0x1.56966716c968cp-43i, 0x1.38b31b311e012p+7 + -0x1.7afb926e9110cp-44i,
0x1.450d44e0025f2p+7 + -0x1.ce81eb383422cp-44i, 0x1.49beeb3c1ffedp+7 + -0x1.0647e5172c9a4p-43i,
0x1.466131d54dcdep+7 + -0x1.37d0aef32a56dp-43i, 0x1.3b0eaf38e494cp+7 + -0x1.54e11f95404b9p-43i,
0x1.28615fb8ff58fp+7 + -0x1.8a301442402ffp-44i, 0x1.0f6846e5a4df9p+7 + -0x1.cd78a9760bff5p-44i,
0x1.e32af0e5ac4c2p+6 + -0x1.c2b60956b1419p-44i, 0x1.a14b882cfd45cp+6 + -0x1.aff100740f49ep-44i,
0x1.5d0759099998ap+6 + -0x1.92598d4bff217p-44i, 0x1.1a4f414696a6dp+6 + -0x1.b9b3e6edacf96p-44i,
0x1.b9de67ee2fe4cp+5 + -0x1.bce54e4c42278p-44i, 0x1.50a126290413ap+5 + -0x1.5f4b1340afa74p-44i,
0x1.fd0ee08e24d87p+4 + -0x1.029f40b73504cp-44i, 0x1.8f50dcb8d836cp+4 + -0x1.d16d748d024acp-45i,
0x1.5caa8d12502dp+4 + -0x1.cf5aade2362a2p-44i, 0x1.65f6a2bcddf9ep+4 + -0x1.b5fa68f2078b5p-44i,
0x1.a84d01c5d53f4p+4 + -0x1.6998ffbca5333p-44i, 0x1.0ea34bed3ab7p+5 + -0x1.3ed6bec0006ep-44i,
0x1.5dbd4d5337a3bp+5 + -0x1.6dc2d3f6f3685p-44i, 0x1.bb95dd778c167p+5 + -0x1.a731d4be37795p-44i,
0x1.10c62f4759547p+6 + -0x1.ad81350bef409p-44i, 0x1.44586d9f187f9p+6 + -0x1.af4dfc3b50837p-44i,
0x1.751c8307621eap+6 + -0x1.d8f2acebe0f95p-44i, 0x1.9ff7d36346a64p+6 + -0x1.0ef56607e3a0bp-43i,
0x1.c24b87859b189p+6 + -0x1.56ab8d86b6364p-44i, 0x1.da1c1ec005f29p+6 + -0x1.8182a19635762p-44i,
0x1.e62ce85e1e86ep+6 + -0x1.99ed31104d3f7p-44i, 0x1.e60ddaa85e3e9p+6 + -0x1.c83c160b6651ap-44i,
0x1.da1b38df540f6p+6 + -0x1.d4890114d0c7dp-44i, 0x1.c36f7a08b019ep+6 + -0x1.17d60c2fa46f3p-44i,
0x1.a3c8d50a683p+6 + -0x1.3cce025003ad1p-44i, 0x1.7d64aa5d4d738p+6 + -0x1.20e2e4b40e78fp-44i,
0x1.52d3aa0c80dc1p+6 + -0x1.111be438697dbp-44i, 0x1.26ca02bf6f2d4p+6 + -0x1.ecbda25e5bcf4p-45i,
0x1.f7de2aeb0ec11p+5 + -0x1.10e3dda717b04p-44i, 0x1.a9602ebf20226p+5 + -0x1.18fa9c117b5efp-44i,
0x1.663175476a913p+5 + -0x1.ad44ff60b7814p-45i, 0x1.3168de315e37ep+5 + -0x1.454470b89c64ap-45i,
0x1.0cf43090ac609p+5 + -0x1.59154b3a26715p-45i, 0x1.f30d11ef026a2p+4 + -0x1.2f170d47a32acp-44i,
0x1.ed3c4518150edp+4 + -0x1.2bcd9e07553f5p-44i, 0x1.02a01b786ae8p+5 + -0x1.0172dc5fb66adp-44i,
0x1.1b0742fd71932p+5 + -0x1.c03519884fd0ep-45i, 0x1.3ca1b0bcd897dp+5 + -0x1.13f18d3288098p-44i,
0x1.63d7f0cbc1223p+5 + -0x1.3938bf0c5d005p-44i, 0x1.8cf806d8a739ep+5 + -0x1.3bd5a71be4e76p-44i,
0x1.b47e73e4c761cp+5 + -0x1.2fb7c07ec1e06p-44i, 0x1.d757c4b850e11p+5 + -0x1.21f5a5cdd6268p-44i,
0x1.f315f1c1f3be4p+5 + -0x1.36e1ac5320c77p-44i, 0x1.030af920a62eap+6 + -0x1.079a421098176p-44i,
0x1.07c99e7bedc87p+6 + -0x1.0800d87f3f466p-44i, 0x1.07d43629829d3p+6 + -0x1.d617590a1d7eep-45i,
0x1.039ede8b1eaa2p+6 + -0x1.a132a4bafe392p-45i, 0x1.f7e7872a7a00ap+5 + -0x1.526d731a1c22p-45i,
0x1.e3b804f9a95f9p+5 + -0x1.5601c4f739996p-45i, 0x1.cd08a7bb75786p+5 + -0x1.42d572ddaff7fp-45i,
0x1.b63fc59c92b86p+5 + -0x1.a9d44b6f70d18p-46i, 0x1.a197985cd94d2p+5 + -0x1.0e970a449db49p-46i,
0x1.90eb468a52dedp+5 + -0x1.07c0c7a184fc4p-47i, 0x1.858e158401ed2p+5 + -0x1.721b1950e46abp-46i,
0x1.802ff4eeb5a2p+5 + -0x1.5b0fd93332e44p-46i, 0x1.80d1696e6e8dbp+5 + -0x1.a0d4d9431a7ecp-47i,
0x1.86c78cfd125bdp+5 + -0x1.c53c3a960b11cp-48i, 0x1.90cf825d9ef8ep+5 + -0x1.bc3dc598c93acp-47i,
0x1.9d2f72988581cp+5 + -0x1.18a5a36d5877dp-46i, 0x1.a9e21103446d3p+5 + -0x1.3c77ee8071232p-46i,
0x1.b4c8ca2bee5dap+5 + -0x1.472e28893ad2cp-46i, 0x1.bbe03cb886ce2p+5 + -0x1.1a49ab3a59fcap-46i,
0x1.bd7272c6c9aebp+5 + -0x1.d79e7df14fceep-46i, 0x1.b842890d10417p+5 + -0x1.685e46ed1e7fdp-46i,
0x1.abae01c5f01ffp+5 + -0x1.7efd0c096a972p-46i, 0x1.97c0e00c9563cp+5 + -0x1.54e5036e1a49cp-46i,
0x1.7d3ac26353d14p+5 + -0x1.9de0388834c0ap-47i, 0x1.5d8471ad4bd2cp+5 + -0x1.553345b7bc8cep-47i,
0x1.3a96a32be2ba7p+5 + -0x1.81ba3cf2b2a61p-46i, 0x1.16d3e591717afp+5 + -0x1.2b8f30d9ef1dep-46i,
0x1.e9b1839fb216bp+4 + -0x1.a2e0520ad148p-49i, 0x1.ae8bd44b18ddap+4 + 0x1.a0b2fb730c445p-47i,
0x1.8111a896cdb87p+4 + 0x1.c4a3ce4b38c4ep-46i)
assertThat(stats:::fft(inverse=TRUE,z=c(101.119971428571+0i, -29.0928136430285+4.7733132342952i, 0+0i,
0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, -9.96827469726697-3.36903966620577i,
0+0i, 10.5831624331481-2.3897986984906i, 0+0i, -10.916223351644+6.3427574475901i,
0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i,
0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i,
0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i,
0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i,
0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i,
0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i,
0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i,
0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i,
0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i,
0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i,
0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i,
0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i,
0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i,
0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i,
0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i,
0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i,
0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i,
0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i,
0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i,
0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i,
0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i,
0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i,
0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i,
0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i,
0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i,
0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i,
0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i,
0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i,
0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i,
0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i,
0+0i, 0+0i, -10.916223351644-6.3427574475901i, 0+0i, 10.5831624331481+2.3897986984906i,
0+0i, -9.96827469726698+3.36903966620576i, 0+0i, 0+0i, 0+0i,
0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, -29.0928136430286-4.7733132342952i
))
, identicalTo( expected, tol = 1e-6 ) ) |
setOldClass("POSIXt")
setClassUnion("DateTime", "POSIXt")
setClass("GPSTrack",
representation(latitude = "numeric", longitude = "numeric",
elevation = "numeric", time = "DateTime")
)
scanGPSTrack <- function(con, ...) {
fields <- list(date = "", time = "",
x = 0., y = 0., z = 0.)
data <- scan(con, fields)
dateTime <- scanDateTime(textConnection(
paste(data$date, data$time)))
new("GPSTrack", latitude = data$x, longitude = data$y,
elevation = data$z, time = dateTime)
}
scanDateTime <- function(con) {
as.POSIXct(strptime(readLines(con), "20%y-%m-%d %H:%M:%S"))
}
geoCoords <- function(latitude, longitude, origin = c(40.7, -74.4)) {
n = length(latitude)
if(length(longitude) != n)
stop("required equal length of latitude, longitude, got ", n,
", ", length(longitude))
x = geoDist(latitude, longitude, latitude, rep(origin[[2]], n))
*sign(longitude-origin[[2]])
y = geoDist(latitude, longitude, rep(origin[[1]], n), longitude) *
sign(latitude - origin[[1]])
list(x=x, y=y)
} |
phrase.matrix = function( rules, n ) {
if ( is.textreg.result( rules ) ) {
n = rules$notes$n
rules = rules$rules
}
mat = matrix( 0, nrow=n, ncol=length(rules) )
colnames(mat) = sapply( rules, function(x) { x$ngram } )
for ( i in 1:length(rules) ) {
rl = rules[[i]]$support + 1
mat[ rl, i ] = rules[[i]]$weight
}
mat[ , "*intercept*"] = 1
mat
}
predict.textreg.result = function( object, new.text= NULL, return.matrix=FALSE, ... ) {
stopifnot( is.textreg.result( object ) )
model = object
if ( !is.null( new.text ) ) {
keyphrase.mat = make.phrase.matrix( model$model$ngram, new.text )
keyphrase.mat[ , "*intercept*" ] = 1
} else {
keyphrase.mat = phrase.matrix( model )
}
model = model$model
kp = sweep( keyphrase.mat, 2, model$Z, FUN="/" )
rsp = as.numeric( kp %*% model$beta )
if ( return.matrix ) {
attr( rsp, "keyphrase.matrix" ) <- keyphrase.mat
}
rsp
}
calc.loss = function( model.blob, new.text=NULL, new.labeling=NULL, loss=c( "square.hinge", "square", "hinge") ) {
loss = match.arg( loss )
model = model.blob$model
pd = predict( model.blob, new.text )
if ( is.null( new.labeling ) ) {
if ( !is.null( new.text ) ) {
stop( "New text without new labeling" )
}
new.labeling = model.blob$labeling
}
if ( loss =="square.hinge" ) {
loss = sum( pmax( (1 - new.labeling*pd), 0 )^2 )
} else if ( loss == "square" ) {
loss = sum( (pd-new.labeling)^2 )
} else if ( loss == "hinge" ) {
loss = sum( pmax( (1 - pd* new.labeling), 0 ) )
}
pen = model.blob$notes$C * sum( abs( model$beta[ -1 ] ) )
c( tot.loss=loss+pen, loss=loss, penalty=pen )
}
reformat.textreg.model = function( model, short=TRUE ) {
stopifnot( is.textreg.result( model ) )
npos = model$model$posCount[[1]]
nneg = model$model$negCount[[1]]
mod = model$model
mod$per = mod$posCount / mod$totalDocs
mod$perPos = mod$posCount / npos
mod$perNeg = mod$negCount / nneg
if ( !short ) {
mod[ c( "ngram", "beta", "Z", "support", "totalDocs", "posCount", "negCount", "per", "perPos", "perNeg" ) ]
} else {
mod = mod[ c("ngram", "support", "totalDocs", "posCount", "per", "perPos") ]
names(mod) = c("phrase", "num.phrase", "num.reports", "num.tag", "per.tag", "per.phrase" )
mod$per.tag = round( 100 * mod$per.tag )
mod$per.phrase = round( 100 * mod$per.phrase )
mod = mod[ order( mod$per.phrase, decreasing=TRUE ), ]
mod
}
} |
NULL
climate <- function(series, first.yr=NULL, last.yr=NULL, max.perc.missing)
{
if(is.null(first.yr)) first.yr <- min(series$year)
if(is.null(last.yr)) last.yr <- max(series$year)
series_period<-series[series$year>=first.yr & series$year<=last.yr,]
series_cli_med<-aggregate(series_period, by=list(series_period$month), FUN=mean, na.rm=T)[-(1:2)]
if(sum(!is.na(series_period$Tn)) >0 & "Tn" %in% names(series_period))
series_abs_Tn<-aggregate(data.frame(series_period$month, series_period$Tn), by=list(series_period$month), FUN=min, na.rm=T)[3] else
series_abs_Tn<-as.numeric(rep(NA,12))
names(series_abs_Tn)<-"AbsTn"
missing<-aggregate(series_period, by=list(series_period$month), FUN=function(x)
{count<-sum(is.na(x))
return(count)} )[-(1:3)]
series_cli_med[-1][missing > max.perc.missing/100*(last.yr - first.yr +1)] <- NA
if("Tn" %in% names(series) & "Tx" %in% names(series) & !"Tm" %in% names(series))
series_cli<-round(data.frame(series_cli_med, Tm=(series_cli_med$Tn + series_cli_med$Tx)/2, AbsTn=series_abs_Tn), 1) else
series_cli<-round(data.frame(series_cli_med, AbsTn=series_abs_Tn), 1)
if("Tn" %in% names(series)) series_cli$AbsTn[is.na(series_cli$Tn)]<-NA
return(series_cli)
} |
setGeneric("as.trip",
function(x, ...) standardGeneric("as.trip"))
ltraj2trip <- function (ltr)
{
requireNamespace("adehabitatLT") ||
stop("adehabitatLT package is required, but unavailable")
if (!inherits(ltr, "ltraj"))
stop("ltr should be of class \"ltraj\"")
ltr <- lapply(ltr, function(x) {
x$id=attr(x, "id")
x$burst=attr(x, "burst")
x})
tr <- do.call("rbind", ltr)
class(tr) <- "data.frame"
xy <- tr[!is.na(tr$x), c("x", "y")]
tr <- tr[!is.na(tr$x), ]
tr$y <- tr$x <- NULL
res <- SpatialPointsDataFrame(xy, tr)
trip(res, c("date", "id"))
}
telemetry2trip <- function(x) {
dat <- as.data.frame(setNames([email protected], x@names), stringsAsFactors = FALSE)
if (!is.null(x@info$timezone) && !x@info$timezone == "UTC") warning("non-UTC timezone in telemetry (ctmm) object")
dat[["identity"]] <- x@info$identity
if (is.null(x@info$projection)) stop("variant of telemetry object not yet understood (gazelle)")
if (!is.null(x@info$projection)) print(sprintf("nominal projection?? %s in telemetry (ctmm) object", x@info$projection))
sp::coordinates(dat) <- c("longitude", "latitude")
sp::proj4string(dat) <- sp::CRS(.llproj(), doCheckCRSArgs = FALSE)
tname <- "timestamps"
if (!inherits(dat[[tname]], "POSIXt")) dat[[tname]] <- dat[[tname]] + ISOdatetime(1970, 1, 1, 0, 0, 0, tz = "UTC")
trip(dat, c(tname,"identity"))
}
setMethod("as.trip", signature(x="ltraj"),
function(x, ...) ltraj2trip(x))
setMethod("as.trip", signature(x = "track_xyt"),
function(x, ...) trip(x))
setAs("ltraj", "trip", function(from) as.trip(from))
setAs("track_xyt", "trip",
function(from) trip(from)) |
Rcpp::sourceCpp("cpp/RcppMisc.cpp")
fun <- RcppFrameFunc
dframe <- data.frame(fun()[[1]])
expect_equal(dframe, data.frame(A=c(1.23,4.56), B=c(42,21), C=c(FALSE,TRUE)), info = "RcppFrame")
fun <- RcppListFunc
expect_equal(fun(), list(foo=1L, bar=2, biz="xyz"), info="RcppList")
fun <- RcppParams_Double
expect_equal(fun(list(val=1.234)), 2*1.234, info="RcppParams.getDoubleValue")
fun <- RcppParams_Int
expect_equal(fun(list(val=42)), 2*42, info="RcppParams.getIntValue")
fun <- RcppParams_String
expect_equal(fun(list(val="a test string")), "a test stringa test string",
info = "RcppParams.getStringValue")
fun <- RcppParams_Bool
expect_equal(fun(list(val=FALSE)), FALSE, info = "RcppParams.getBoolValue")
fun <- RcppParams_Date
expect_equal(fun(list(val=as.Date("2000-01-01")))[[1]], as.Date("2000-01-01"),
info = "RcppParams.getDateValue")
fun <- RcppParams_Datetime
posixt <- as.POSIXct(strptime("2000-01-02 03:04:05.678", "%Y-%m-%d %H:%M:%OS"))
attr(posixt, "tzone") <- NULL
result <- fun(list(val=posixt))[[1]]
expect_true( (result-posixt) == 0.0 , info = "RcppParams.getDatetimeValue") |
rev.logit <- function(x) 1/(1+exp(-x)) |
explore.influence=function(x,cut.offs="default",plot=TRUE,cook=FALSE,...)
{
if ( (length(cut.offs) )==1 && (cut.offs=="default")){
q25=quantile(x,.25,na.rm=TRUE)
q75=quantile(x,.75,na.rm=TRUE)
if(q75<q25){
cut.low=q75-(q25-q75)*1.5
cut.upp=q25+(q25-q75)*1.5
}
else{cut.low=q25-(q75-q25)*1.5
cut.upp=q75+(q75-q25)*1.5}
}
else if ( (is.numeric(cut.offs)==TRUE) && (length(cut.offs)==2) && (sum(is.na(cut.offs))==0) && (cut.offs[1]<cut.offs[2]) ){
cut.low=cut.offs[1]
cut.upp=cut.offs[2]
}
else stop ("\"cut.offs\" must be a vector of 2 numeric elements, with the first element less than the second element")
if (cook==TRUE) cut.low=max(0,cut.low)
if (plot==TRUE) {
plot(x,xlab="observations",ylab="influence",
ylim=c(min(cut.low,min(x,na.rm=TRUE)),max(cut.upp,max(x,na.rm=TRUE))),...)
if (cook==FALSE) abline(h=cut.low,lty=2)
if ((cook==TRUE) && (cut.low>0)) abline(h=cut.low,lty=2)
abline(h=cut.upp,lty=2)
}
ris=NULL
n=length(x)
id.row=c(1:n)
not.allowed=id.row[is.na(x)==TRUE]
less.cut.low=id.row[(is.na(x)==FALSE) & (x<=cut.low)]
greater.cut.upp=id.row[(is.na(x)==FALSE) & (x>=cut.upp)]
ris=list(n=n,cook=cook,cut.low=as.numeric(cut.low),cut.upp=as.numeric(cut.upp),not.allowed=not.allowed,less.cut.low=less.cut.low,greater.cut.upp=greater.cut.upp)
return(ris)
} |
makeRLearner.classif.nnTrain = function() {
makeRLearnerClassif(
cl = "classif.nnTrain",
package = "deepnet",
par.set = makeParamSet(
makeNumericVectorLearnerParam(id = "initW"),
makeNumericVectorLearnerParam(id = "initB"),
makeIntegerVectorLearnerParam(id = "hidden", default = 10, lower = 1),
makeIntegerLearnerParam("max.number.of.layers", lower = 1L),
makeDiscreteLearnerParam(id = "activationfun", default = "sigm", values = c("sigm", "linear", "tanh")),
makeNumericLearnerParam(id = "learningrate", default = 0.8, lower = 0),
makeNumericLearnerParam(id = "momentum", default = 0.5, lower = 0),
makeNumericLearnerParam(id = "learningrate_scale", default = 1, lower = 0),
makeIntegerLearnerParam(id = "numepochs", default = 3, lower = 1),
makeIntegerLearnerParam(id = "batchsize", default = 100, lower = 1),
makeDiscreteLearnerParam(id = "output", default = "sigm", values = c("sigm", "linear", "softmax")),
makeNumericLearnerParam(id = "hidden_dropout", default = 0, lower = 0, upper = 1),
makeNumericLearnerParam(id = "visible_dropout", default = 0, lower = 0, upper = 1)
),
par.vals = list(output = "softmax"),
properties = c("twoclass", "multiclass", "numerics", "prob"),
name = "Training Neural Network by Backpropagation",
short.name = "nn.train",
note = "`output` set to `softmax` by default. `max.number.of.layers` can be set to control and tune the maximal number of layers specified via `hidden`.",
callees = "nn.train"
)
}
trainLearner.classif.nnTrain = function(.learner, .task, .subset, .weights = NULL, max.number.of.layers = Inf, hidden = 10, ...) {
d = getTaskData(.task, .subset, target.extra = TRUE)
y = as.numeric(d$target)
dict = sort(unique(y))
onehot = matrix(0, length(y), length(dict))
for (i in seq_along(dict)) {
ind = which(y == dict[i])
onehot[ind, i] = 1
}
deepnet::nn.train(x = data.matrix(d$data), y = onehot, hidden = head(hidden, max.number.of.layers), ...)
}
predictLearner.classif.nnTrain = function(.learner, .model, .newdata, ...) {
type = switch(.learner$predict.type, response = "class", prob = "raw")
pred = deepnet::nn.predict(.model$learner.model, data.matrix(.newdata))
colnames(pred) = .model$factor.levels[[1]]
if (type == "class") {
classes = colnames(pred)[max.col(pred)]
return(as.factor(classes))
}
return(pred)
} |
data(andalusia)
o <- loca.p(x=andalusia$x[1:8], y=andalusia$y[1:8])
xmin <- min(andalusia$x)
ymin <- min(andalusia$y)
xmax <- max(andalusia$x)
ymax <- max(andalusia$y)
file = system.file('img', 'andalusian_provinces.png', package='orloca')
img = readPNG(file)
plot(o, img=img, main=gettext('Andalucia'), xleft=xmin, ybottom=ymin, xright=xmax, ytop=ymax)
contour(o, img=img, main=gettext('Andalusia'), xleft=xmin, ybottom=ymin, xright=xmax, ytop=ymax)
andalusia.loca.p <- loca.p(andalusia$x[1:8], andalusia$y[1:8])
sol <- distsummin(andalusia.loca.p)
sol
points(sol[1], sol[2], type='p', col='red') |
sig <- matrix(c(1.0, 0.8, 0.5, 0.2,
0.8, 1.0, 0.5, 0.5,
0.5, 0.5, 1.0, 0.5,
0.2, 0.5, 0.5, 1.0), nrow = 4)
sig
library(MASS)
df.4 <- data.frame(mvrnorm(n = 1000, mu = rep(0, 4), Sigma = sig, empirical = TRUE))
detach("package:MASS")
summary(df.4)
ncol(df.4)
nrow(df.4)
head(df.4)
round(sig, 2)
round(cor(df.4), 2) |
fv_ecdf_single_budget_box <- function(width = 12, collapsible = T, collapsed = T) {
box(
title = HTML('<p style="font-size:120%;">Empirical Cumulative Distribution of
the Fixed-Budget Values: Single Budgets</p>'),
width = width, collapsible = collapsible, collapsed = collapsed,
solidHeader = TRUE, status = "primary",
sidebarLayout(
sidebarPanel(
width = 3,
selectInput('FCEECDF.Single.Algs', label = 'Select which IDs to include:',
multiple = T, selected = NULL, choices = NULL) %>% shinyInput_label_embed(
custom_icon() %>%
bs_embed_popover(
title = "ID selection", content = alg_select_info,
placement = "auto"
)
),
HTML('Select the budgets for which EDCF curves are displayed '),
textInput('FCEECDF.Single.Target', label = HTML('<p>\\(B_1\\)</p>'), value = ''),
checkboxInput('FCEECDF.Single.Logx', label = 'Scale x axis \\(\\log_{10}\\)', value = F)
),
mainPanel(
width = 9,
column(
width = 12, align = "center",
HTML_P('Each EDCF curve shows the proportion of the runs that have found
within the given budget B a solution of at least the required target
value given by the x-axis. The displayed curves can be selected
by clicking on the legend on the right. A <b>tooltip</b> and <b>toolbar</b>
appears when hovering over the figure.</p>'),
plotlyOutput.IOHanalyzer("FCE_ECDF_PER_TARGET")
)
)
)
)
}
fv_ecdf_agg_budgets_box <- function(width = 12, collapsible = T, collapsed = T) {
box(
title = HTML('<p style="font-size:120%;">Empirical Cumulative Distribution
of the Fixed-Budget Values: Aggregation</p>'),
width = width, collapsible = collapsible, collapsed = collapsed,
solidHeader = T, status = "primary",
sidebarPanel(
width = 3,
selectInput('FCEECDF.Mult.Algs', label = 'Select which IDs to include:',
multiple = T, selected = NULL, choices = NULL) %>% shinyInput_label_embed(
custom_icon() %>%
bs_embed_popover(
title = "ID selection", content = alg_select_info,
placement = "auto"
)
),
HTML('<p align="justify">Set the range and the granularity of the budgets
taken into account in the ECDF curve. The plot will show the ECDF curves
for evenly spaced budgets.</p>'),
textInput('FCEECDF.Mult.Min', label = RT_MIN_LABEL, value = ''),
textInput('FCEECDF.Mult.Max', label = RT_MAX_LABEL, value = ''),
textInput('FCEECDF.Mult.Step', label = RT_STEP_LABEL, value = ''),
checkboxInput('FCEECDF.Mult.Logx',
label = 'Scale x axis \\(\\log_{10}\\)',
value = F),
hr(),
selectInput('FCEECDF.Mult.Format', label = 'Select the figure format',
choices = supported_fig_format, selected = supported_fig_format[[1]]),
downloadButton('FCEECDF.Mult.Download', label = 'Download the figure')
),
mainPanel(
width = 9,
column(
width = 12, align = "center",
HTML_P('The evenly spaced budget values are:'),
verbatimTextOutput('FCE_RT_GRID'),
HTML_P('The fraction of (run,budget) pairs \\((i,B)\\) satisfying that the best
solution that the algorithm has found in the \\(i\\)-th run within the
first \\(B\\) evaluations has quality at <b>most</b> \\(v\\) is plotted
against the target value \\(v\\). The displayed elements can be switched
on and off by clicking on the legend on the right. A <b>tooltip</b> and
<b>toolbar</b> appears when hovering over the figure.'),
plotlyOutput.IOHanalyzer('FCE_ECDF_AGGR')
)
)
)
}
fv_ecdf_auc_box <- function(width = 12, collapsible = T, collapsed = T) {
box(
title = HTML('<p style="font-size:120%;">Area Under the ECDF</p>'),
width = width, collapsible = collapsible, collapsed = collapsed,
solidHeader = T, status = "primary",
sidebarPanel(
width = 3,
selectInput('FCEECDF.AUC.Algs', label = 'Select which IDs to include:',
multiple = T, selected = NULL, choices = NULL) %>% shinyInput_label_embed(
custom_icon() %>%
bs_embed_popover(
title = "ID selection", content = alg_select_info,
placement = "auto"
)
),
HTML('<p align="justify">Set the range and the granularity of the evenly spaced budgets.</p>'),
textInput('FCEECDF.AUC.Min', label = RT_MIN_LABEL, value = ''),
textInput('FCEECDF.AUC.Max', label = RT_MAX_LABEL, value = ''),
textInput('FCEECDF.AUC.Step', label = RT_STEP_LABEL, value = ''),
hr(),
selectInput('FCEECDF.AUC.Format', label = 'select the figure format',
choices = supported_fig_format, selected = supported_fig_format[[1]]),
downloadButton('FCEECDF.AUC.Download', label = 'download the figure')
),
mainPanel(
width = 9,
column(
width = 12, align = "center",
HTML_P('The <b>area under the ECDF</b> is
caculated for the sequence of budget values specified on the left. The displayed
values are normalized against the maximal target value recorded for
each algorithm. Intuitively, the <b>smaller</b> the area, the <b>better</b> the algorithm.
The displayed IDs can be selected by clicking on the legend on the right.
A <b>tooltip</b> and <b>toolbar</b> appears when hovering over the figure.'),
plotlyOutput.IOHanalyzer("FCE_AUC")
)
)
)
} |
CreateLinePoints<-function(P1, P2){
if (P1[1]==P2[1]){
Line=c("Inf",P1[1])
names(Line)=c("slope","x-value")
}
else{
m=(P2[2]-P1[2])/(P2[1]-P1[1])
n=P1[2]-m*P1[1]
Line=c(m,n)
names(Line)=c("slope","intercept")
}
class(Line) <- append(class(Line),"Line")
return(Line)
} |
ergm.getnetwork <- function (formula, loopswarning=TRUE){
nw <- eval_lhs.formula(formula)
nw <- ensure_network(nw)
if (loopswarning) {
e <- as.edgelist(nw)
if(any(e[,1]==e[,2])) {
print("Warning: This network contains loops")
} else if (has.loops(as.network(nw,populate=FALSE))) {
print("Warning: This network is allowed to contain loops")
}
}
nw
}
ensure_network <- function(nw){
if(!is.network(nw) && !is.ergm_state(nw)){
nw <- ERRVL(
try(as.network(nw)),
abort("A network object on the LHS of the formula or as a basis argument must be given")
)
}
nw
} |
set.seed(1)
n=10*1000*1000
gshape=1.5
rate=0.0004
SlicePoint=400
shape=1.5
x<-rgamma(n,gshape,rate)
x<-ifelse(x<SlicePoint,x,SlicePoint/(runif(n)^(1/shape)))
hist(x, breaks = 200000, xlim = c(0,1.5e4), probability = T)
lines(0:1e4,dSlicedGammaPareto(0:1e4,gshape,rate,SlicePoint,shape), col="red")
step<-0.001
plot(log(quantile(x,seq(0+step, 1-step, step)))
,pSlicedGammaPareto(quantile(x,seq(0+step, 1-step, step)),gshape,rate,SlicePoint,shape)
,type = "l"
)
lines(log(quantile(x,seq(0+step, 1-step, step))),seq(0+step, 1-step, step), type = "l", col="red")
plot(log(qSlicedGammaPareto(seq(0+step, 1-step, step),gshape,rate,SlicePoint,shape))
,seq(0+step, 1-step, step)
,type = "l"
)
lines(log(quantile(x,seq(0+step, 1-step, step))),seq(0+step, 1-step, step), type = "l", col="red") |
fitVoigtPeaksSMC <- function(wl, spc, lPriors, conc=rep(1.0,nrow(spc)), npart=10000, rate=0.9, mcAR=0.234, mcSteps=20, minESS=npart/2, destDir=NA, minPart=npart) {
N_Peaks <- length(lPriors$loc.mu)
N_WN_Cal <- length(wl)
N_Obs_Cal <- nrow(spc)
lPriors$noise.SS <- lPriors$noise.nu * lPriors$noise.sd^2
print(paste("SMC with",N_Obs_Cal,"observations at",length(unique(conc)),"unique concentrations,",npart,"particles, and",N_WN_Cal,"wavenumbers."))
ptm <- proc.time()
Knots<-seq(min(wl),max(wl), length.out=lPriors$bl.knots)
r <- max(diff(Knots))
NK<-lPriors$bl.knots
X_Cal <- bs(wl, knots=Knots, Boundary.knots = range(Knots) + c(-r,+r),
intercept = TRUE)
class(X_Cal) <- "matrix"
XtX <- Matrix(crossprod(X_Cal), sparse=TRUE)
NB_Cal<-ncol(X_Cal)
FD2_Cal <- diff(diff(diag(NB_Cal)))
Pre_Cal <- Matrix(crossprod(FD2_Cal), sparse=TRUE)
R = chol(XtX + Pre_Cal*1e-9)
Rinv <- solve(R)
Rsvd <- svd(crossprod(Rinv, Pre_Cal %*% Rinv))
Ru <- Rinv %*% Rsvd$u
A <- X_Cal %*% Rinv %*% Rsvd$u
lPriors$bl.basis <- X_Cal
lPriors$bl.precision <- as(Pre_Cal, "dgCMatrix")
lPriors$bl.XtX <- as(XtX, "dgCMatrix")
lPriors$bl.orthog <- as.matrix(A)
lPriors$bl.Ru <- as.matrix(Ru)
lPriors$bl.eigen <- Rsvd$d
print(paste0("Step 0: computing ",NB_Cal," B-spline basis functions (r=",r,") took ",(proc.time() - ptm)[3],"sec."))
ptm <- proc.time()
Sample<-matrix(numeric(npart*(4*N_Peaks+3+N_Obs_Cal)),nrow=npart)
Sample[,1:N_Peaks] <- rlnorm(N_Peaks*npart, lPriors$scaG.mu, lPriors$scaG.sd)
Sample[,(N_Peaks+1):(2*N_Peaks)] <- rlnorm(N_Peaks*npart, lPriors$scaL.mu, lPriors$scaL.sd)
for (k in 1:npart) {
propLoc <- rtruncnorm(N_Peaks, a=min(wl), b=max(wl), mean=lPriors$loc.mu, sd=lPriors$loc.sd)
Sample[k,(2*N_Peaks+1):(3*N_Peaks)] <- sort(propLoc)
}
exp_pen <- 15
if (exists("beta.mu", lPriors) && exists("beta.sd", lPriors)) {
for (j in 1:N_Peaks) {
Sample[,3*N_Peaks+j] <- rtruncnorm(npart, a=0, b=max(spc)/max(conc), mean=lPriors$beta.mu[j], sd=lPriors$beta.sd[j])
}
} else {
if (exists("beta.exp", lPriors)) exp_pen <- lPriors$beta.exp
Sample[,(3*N_Peaks+1):(4*N_Peaks)] <- rexp(N_Peaks*npart, max(conc)*exp_pen/diff(range(spc)))
lPriors$beta.rate <- max(conc)*exp_pen/diff(range(spc))
}
Offset_1<-4*N_Peaks
Offset_2<-Offset_1 + N_Obs_Cal + 1
Cal_I <- 1
Sample[,Offset_2+1] <- 1/rgamma(npart, lPriors$noise.nu/2, lPriors$noise.SS/2)
Sample[,Offset_2+2] <- Sample[,Offset_2+1]/lPriors$bl.smooth
print(paste("Mean noise parameter sigma is now",mean(sqrt(Sample[,Offset_2+1]))))
print(paste("Mean spline penalty lambda is now",mean(Sample[,Offset_2+1]/Sample[,Offset_2+2])))
g0_Cal <- N_WN_Cal * lPriors$bl.smooth * Pre_Cal
gi_Cal <- XtX + g0_Cal
a0_Cal <- lPriors$noise.nu/2
ai_Cal <- a0_Cal + N_WN_Cal/2
b0_Cal <- lPriors$noise.SS/2
for(k in 1:npart) {
Sigi <- conc[Cal_I] * mixedVoigt(Sample[k,2*N_Peaks+(1:N_Peaks)], Sample[k,(1:N_Peaks)],
Sample[k,N_Peaks+(1:N_Peaks)], Sample[k,3*N_Peaks+(1:N_Peaks)], wl)
Obsi <- spc[Cal_I,] - Sigi
lambda <- lPriors$bl.smooth
L_Ev <- computeLogLikelihood(Obsi, lambda, lPriors$noise.nu, lPriors$noise.SS,
X_Cal, Rsvd$d, lPriors$bl.precision, lPriors$bl.XtX,
lPriors$bl.orthog, lPriors$bl.Ru)
Sample[k,Offset_1+2]<-L_Ev
}
Sample[,Offset_1+1]<-rep(1/npart,npart)
T_Sample<-Sample
T_Sample[,1:N_Peaks]<-log(T_Sample[,1:N_Peaks])
T_Sample[,(N_Peaks+1):(2*N_Peaks)]<-log(T_Sample[,(N_Peaks+1):(2*N_Peaks)])
T_Sample[,(3*N_Peaks+1):(4*N_Peaks)]<-log(T_Sample[,(3*N_Peaks+1):(4*N_Peaks)])
iTime <- proc.time() - ptm
ESS<-1/sum(Sample[,Offset_1+1]^2)
MC_Steps<-numeric(1000)
MC_AR<-numeric(1000)
ESS_Hist<-numeric(1000)
ESS_AR<-numeric(1000)
Kappa_Hist<-numeric(1000)
Time_Hist<-numeric(1000)
MC_Steps[1]<-0
MC_AR[1]<-1
ESS_Hist[1]<-ESS
ESS_AR[1]<-npart
Kappa_Hist[1]<-0
Time_Hist[1]<-iTime[3]
print(paste("Step 1: initialization for",N_Peaks,"Voigt peaks took",iTime[3],"sec."))
i<-1
Cal_I <- 1
MADs<-numeric(4*N_Peaks)
Alpha<-rate
MC_AR[1]<-mcAR
MCMC_MP<-1
repeat{
i<-i+1
iTime<-system.time({
ptm <- proc.time()
Min_Kappa<-Kappa_Hist[i-1]
Max_Kappa<-1
Kappa<-1
Temp_w<-Sample[,Offset_1+1]*exp((Kappa-Kappa_Hist[i-1])*(Sample[,Offset_1+Cal_I+1]-max(Sample[,Offset_1+Cal_I+1])))
Temp_W<-Temp_w/sum(Temp_w)
US1<-unique(Sample[,1])
N_UP<-length(US1)
Temp_W2<-numeric(N_UP)
for(k in 1:N_UP){
Temp_W2[k]<-sum(Temp_W[which(Sample[,1]==US1[k])])
}
Temp_ESS<-1/sum(Temp_W2^2)
if(Temp_ESS<(Alpha*ESS_AR[i-1])){
while(abs(Temp_ESS-((Alpha*ESS_AR[i-1])))>1 & !isTRUE(all.equal(Kappa, Min_Kappa))){
if(Temp_ESS<((Alpha*ESS_AR[i-1]))){
Max_Kappa<-Kappa
} else{
Min_Kappa<-Kappa
}
Kappa<-0.5*(Min_Kappa+Max_Kappa)
Temp_w<-Sample[,Offset_1+1]*exp((Kappa-Kappa_Hist[i-1])*(Sample[,Offset_1+Cal_I+1]-max(Sample[,Offset_1+Cal_I+1])))
Temp_W<-Temp_w/sum(Temp_w)
US1<-unique(Sample[,1])
N_UP<-length(US1)
Temp_W2<-numeric(N_UP)
for(k in 1:N_UP){
Temp_W2[k]<-sum(Temp_W[which(Sample[,1]==US1[k])])
}
Temp_ESS<-1/sum(Temp_W2^2)
}
}
Sample[,Offset_1+1]<-Temp_W
Kappa_Hist[i]<-Kappa
ESS_Hist[i]<-Temp_ESS
print(paste0("Reweighting took ",format((proc.time()-ptm)[3],digits=3),
"sec. for ESS ",format(Temp_ESS,digits=6)," with new kappa ",Kappa,"."))
Acc<-0
Prop_Info<-cov.wt(T_Sample[,1:(4*N_Peaks)],wt=Sample[,Offset_1+1])
Prop_Mu<-Prop_Info$center
Prop_Cor<-cov2cor(Prop_Info$cov)
if(ESS_Hist[i] < minESS){
ptm <- proc.time()
ReSam<-sample(1:npart,size=npart,replace=T,prob=Sample[,Offset_1+1])
Sample<-Sample[ReSam,]
T_Sample<-T_Sample[ReSam,]
Sample[,Offset_1+1]<-rep(1/npart,npart)
T_Sample[,Offset_1+1]<-rep(1/npart,npart)
print(paste("*** Resampling with",length(unique(Sample[,1])),"unique indices took",
format((proc.time()-ptm)[3],digits=6),"sec ***"))
}
for(j in 1:(4*N_Peaks)){
Prop_Mu[j]<-median(T_Sample[,j])
MADs[j]<-median(abs((T_Sample[,j])-median(T_Sample[,j])))
}
Prop_Cov<-(1.4826*MADs)%*%t(1.4826*MADs)*Prop_Cor
US1<-unique(Sample[,1])
N_UP<-length(US1)
Temp_W<-numeric(N_UP)
for(k in 1:N_UP){
Temp_W[k]<-sum(Sample[which(Sample[,1]==US1[k]),Offset_1+1])
}
Temp_ESS<-1/sum(Temp_W^2)
ESS_AR[i]<-Temp_ESS
if(!is.na(MC_AR[i-1]) && MC_Steps[i-1] > 0){
MCMC_MP<-2^(-5*(mcAR-MC_AR[i-1]))*MCMC_MP
if (MC_AR[i-1] < 0.15) {
print(paste("WARNING: M-H Acceptance Rate",MC_AR[i-1],"has fallen below minimum threshold."))
MCMC_MP <- MCMC_MP * MC_AR[i-1]^3
}
}
mhCov <- MCMC_MP*(2.38^2/(4*N_Peaks))*Prop_Cov
ch <- try(chol(mhCov, pivot = FALSE))
if (inherits(ch, "try-error")) {
v <- apply(T_Sample[,1:(4*N_Peaks)],2,var)
mhCov <- (MCMC_MP/(4*N_Peaks))*diag(v, nrow=4*N_Peaks) + diag(1e-12, nrow=4*N_Peaks)
ch <- chol(mhCov, pivot = FALSE)
}
mhChol <- t(ch)
mcr <- 0
MC_AR[i] <- MC_AR[i-1]
while(mcr < mcSteps && N_UP < minPart) {
MC_Steps[i]<-MC_Steps[i]+1
mh_acc <- mhUpdateVoigt(spc, Cal_I, Kappa_Hist[i], conc, wl, Sample, T_Sample, mhChol, lPriors)
Acc <- Acc + mh_acc
mcr <- mcr + 1
US1<-unique(Sample[,1])
N_UP<-length(US1)
Temp_W<-numeric(N_UP)
for(k in 1:N_UP){
Temp_W[k]<-sum(Sample[which(Sample[,1]==US1[k]),Offset_1+1])
}
Temp_ESS<-1/sum(Temp_W^2)
print(paste(mh_acc,"M-H proposals accepted. Temp ESS is",format(Temp_ESS,digits=6),
"with",N_UP,"unique particles."))
ESS_AR[i]<-Temp_ESS
MC_AR[i]<-Acc/(npart*MC_Steps[i])
}
})
Time_Hist[i]<-iTime[3]
if (!is.na(destDir) && file.exists(destDir)) {
iFile<-paste0(destDir,"/Iteration_",i,"/")
dir.create(iFile)
save(Sample,file=paste0(iFile,"Sample.rda"))
print(paste("Interim results saved to",iFile))
}
print(paste0("Iteration ",i," took ",format(iTime[3],digits=6),"sec. for ",
MC_Steps[i]," MCMC loops (acceptance rate ",format(MC_AR[i],digits=5),")"))
if (Kappa >= 1 || MC_AR[i] < 1/npart) {
break
}
}
if (Kappa < 1 && MC_AR[i] < 1/npart) {
print(paste("SMC collapsed due to MH acceptance rate",
Acc,"/",(npart*MC_Steps[i]),"=", MC_AR[i]))
}
return(list(priors=lPriors, ess=ESS_Hist[1:i], weights=Sample[,Offset_1+1], kappa=Kappa_Hist[1:i],
accept=MC_AR[1:i], mhSteps=MC_Steps[1:i], essAR=ESS_AR[1:i], times=Time_Hist[1:i],
scale_G=Sample[,1:N_Peaks], scale_L=Sample[,(N_Peaks+1):(2*N_Peaks)],
location=Sample[,(2*N_Peaks+1):(3*N_Peaks)], beta=Sample[,(3*N_Peaks+1):(4*N_Peaks)],
sigma=sqrt(Sample[,Offset_2+1]), lambda=Sample[,Offset_2+1]/Sample[,Offset_2+2]))
} |
install_local <- function(path = ".", subdir = NULL,
dependencies = NA,
upgrade = c("default", "ask", "always", "never"),
force = FALSE,
quiet = FALSE,
build = !is_binary_pkg(path),
build_opts = c("--no-resave-data", "--no-manual", "--no-build-vignettes"),
build_manual = FALSE, build_vignettes = FALSE,
repos = getOption("repos"),
type = getOption("pkgType"),
...) {
remotes <- lapply(path, local_remote, subdir = subdir)
install_remotes(remotes,
dependencies = dependencies,
upgrade = upgrade,
force = force,
quiet = quiet,
build = build,
build_opts = build_opts,
build_manual = build_manual,
build_vignettes = build_vignettes,
repos = repos,
type = type,
...)
}
local_remote <- function(path, subdir = NULL, branch = NULL, args = character(0), ...) {
remote("local",
path = normalizePath(path),
subdir = subdir
)
}
remote_download.local_remote <- function(x, quiet = FALSE) {
bundle <- tempfile()
dir.create(bundle)
suppressWarnings(
res <- file.copy(x$path, bundle, recursive = TRUE)
)
if (!all(res)) {
stop("Could not copy `", x$path, "` to `", bundle, "`", call. = FALSE)
}
dir(bundle, full.names = TRUE)[1]
}
remote_metadata.local_remote <- function(x, bundle = NULL, source = NULL, sha = NULL) {
list(
RemoteType = "local",
RemoteUrl = x$path,
RemoteSubdir = x$subdir
)
}
remote_package_name.local_remote <- function(remote, ...) {
is_tarball <- !dir.exists(remote$path)
if (is_tarball) {
return(sub("_.*$", "", basename(remote$path)))
}
description_path <- file.path(remote$path, "DESCRIPTION")
read_dcf(description_path)$Package
}
remote_sha.local_remote <- function(remote, ...) {
is_tarball <- !dir.exists(remote$path)
if (is_tarball) {
return(NA_character_)
}
read_dcf(file.path(remote$path, "DESCRIPTION"))$Version
}
format.local_remote <- function(x, ...) {
"local"
} |
generate_results_table <- function(sk_result, stations, param) {
sk_result <- sk_result %>% filter(.data$station %in% stations)
sk_result <- sk_result[order(sk_result$station), ]
sk_result <- sk_result %>% select(param)
return(sk_result)
} |
simCovariate <- function(
cov.list=NULL,
...,
n,
add.yr = TRUE
) {
if (is.null(cov.list)) {
cov.list <- list(...)
if (length(cov.list) == 1) {
if (is.null(names(cov.list))) {
cov.list <- cov.list[[1]]
}
}
}
datalist <- list()
distribution <- c('uniform','normal','beta','binomial', 'poisson', 'bernoulli')
seeds.used <- vector()
for (i in 1:length(cov.list)) {
cov <- cov.list[[i]]
cov.name <- names(cov.list[i])
distr <- cov[['dist']]
if (!is.function(distr)) {
if (distr == "bernoulli") {dist <- 'binomial'}
distr <- tolower(distr)
if (substr(distr,1,1) == 'r') distr <- substr(distr,2,nchar(distr))
distr <- match.arg(distr,distribution)
distr <- switch(distr, uniform=stats::runif, normal = stats::rnorm, beta = stats::rbeta, binomial = stats::rbinom, poisson = stats::rpois, bernoulli = stats::rbinom, sample = sample, multinomial = stats::rmultinom)
}
arg.names <- names(formals(fun = distr))
keepers <- names(cov) %in% arg.names
rand.args <- cov[keepers == TRUE]
rand.args$n <- n
if(is.null(cov[['seed']])) {seed <- sample(.Random.seed,1)
warning(paste("You did not provide a random seed for the simulation for covariate named ", cov.name , ". Data have been simulated using", seed, "as the random seed.", sep = ""))} else {seed <- cov[['seed']]}
seed <- as.integer(seed)
set.seed(seed)
seeds.used <- append(seeds.used, values = seed)
dataset <- do.call(what = distr, args = rand.args)
if (!is.null(cov[['round']])) dataset <- round(dataset, cov[['round']])
datalist[[cov.name]] <- dataset
}
dataset <- as.data.frame(datalist)
if (add.yr == TRUE) {dataset$yr <- 1:nrow(dataset)}
attr(dataset, which = 'seeds.used') <- seeds.used
attr(dataset, which = 'cov.list') <- cov.list
attr(dataset, which = 'n') <- n
return(dataset)
}
|
req_suggested_packages <- c("emmeans", "multcomp", "ggplot2")
pcheck <- lapply(req_suggested_packages, requireNamespace,
quietly = TRUE)
if (any(!unlist(pcheck))) {
message("Required package(s) for this vignette are not available/installed and code will not be executed.")
knitr::opts_chunk$set(eval = FALSE)
}
op <- options(width = 90)
knitr::opts_chunk$set(dpi=72)
library("afex")
library("emmeans")
library("multcomp")
library("ggplot2")
data(sk2011.1)
str(sk2011.1)
with(sk2011.1, table(inference, id, plausibility))
a1 <- aov_ez("id", "response", sk2011.1, between = "instruction",
within = c("inference", "plausibility"))
a1
knitr::kable(nice(a1))
print(xtable::xtable(anova(a1), digits = c(rep(2, 5), 3, 4)), type = "html")
m1 <- emmeans(a1, ~ inference)
m1
pairs(m1)
summary(as.glht(pairs(m1)), test=adjusted("free"))
m2 <- emmeans(a1, "inference", by = "instruction")
m2
pairs(m2)
m3 <- emmeans(a1, c("inference", "instruction"))
m3
pairs(m3)
c1 <- list(
v_i.ded = c(0.5, 0.5, -0.5, -0.5, 0, 0, 0, 0),
v_i.prob = c(0, 0, 0, 0, 0.5, 0.5, -0.5, -0.5)
)
contrast(m3, c1, adjust = "holm")
summary(as.glht(contrast(m3, c1)), test = adjusted("free"))
afex_plot(a1, x = "inference", trace = "instruction", panel = "plausibility")
afex_plot(a1, x = "inference", trace = "instruction", panel = "plausibility",
error = "within")
afex_plot(a1, x = "inference", trace = "instruction", panel = "plausibility",
error = "none")
p1 <- afex_plot(a1, x = "inference", trace = "instruction",
panel = "plausibility", error = "none",
mapping = c("color", "fill"),
data_geom = geom_boxplot, data_arg = list(width = 0.4),
point_arg = list(size = 1.5), line_arg = list(size = 1))
p1
p1 + theme_light()
theme_set(theme_light())
a2 <- aov_ez("id", "response", sk2011.1, between = "instruction",
within = c("validity", "plausibility", "what"))
a2
afex_plot(a2, x = c("plausibility", "validity"),
trace = "instruction", panel = "what",
error = "none")
(m4 <- emmeans(a2, ~instruction+plausibility+validity|what))
c2 <- list(
diff_1 = c(1, -1, 0, 0, 0, 0, 0, 0),
diff_2 = c(0, 0, 1, -1, 0, 0, 0, 0),
diff_3 = c(0, 0, 0, 0, 1, -1, 0, 0),
diff_4 = c(0, 0, 0, 0, 0, 0, 1, -1),
val_ded = c(0.5, 0, 0.5, 0, -0.5, 0, -0.5, 0),
val_prob = c(0, 0.5, 0, 0.5, 0, -0.5, 0, -0.5),
plau_ded = c(0.5, 0, -0.5, 0, -0.5, 0, 0.5, 0),
plau_prob = c(0, 0.5, 0, -0.5, 0, 0.5, 0, -0.5)
)
contrast(m4, c2, adjust = "holm")
summary(as.glht(contrast(m4, c2)), test = adjusted("free"))
options(op) |
confIntBootLogConROC_t0 <- function(controls, cases, grid = c(0.2, 0.8), conf.level = 0.95, M = 1000, smooth = TRUE, output = TRUE){
alpha <- 1 - conf.level
boot.mat <- matrix(NA, nrow = length(grid), ncol = M)
boot.mat.smooth <- boot.mat
for (m in 1:M){
con.m <- sample(controls, replace = TRUE)
cas.m <- sample(cases, replace = TRUE)
roc <- logConROC(cas.m, con.m, grid, smooth = smooth)
boot.mat[, m] <- roc$fROC
if (identical(smooth, TRUE)){boot.mat.smooth[, m] <- roc$fROC.smooth}
if (identical(output, TRUE)){print(paste(m, " of ", M, " runs done", sep = ""))}
}
qs <- data.frame(cbind(grid, t(apply(boot.mat, 1, quantile, c(alpha / 2, 1 - alpha / 2)))))
colnames(qs) <- c("t", "CIlow", "CIup")
qs.smooth <- NA
if (identical(smooth, TRUE)){
qs.smooth <- data.frame(cbind(grid, t(apply(boot.mat.smooth, 1, quantile, c(alpha / 2, 1 - alpha / 2)))))
colnames(qs.smooth) <- c("t", "CIlow", "CIup")
}
res <- list("quantiles" = qs, "boot.samples" = boot.mat, "quantiles.smooth" = qs.smooth, "boot.samples.smooth" = boot.mat.smooth)
return(res)
} |
context("test-move_to")
test_that("move_to works", {
x <- list("a" = c("A" = 0.1, "B" = 0.5), "b" = c("A" = 0.2, "B" = 1))
a <- tidySet(x)
a <- mutate_element(a, b = runif(2))
b <- move_to(a, from = "elements", to = "relations", "b")
expect_equal(as.data.frame(a), as.data.frame(b))
expect_equal(ncol(elements(a)), 2)
expect_equal(ncol(relations(a)), 3)
expect_equal(ncol(elements(b)), 1)
expect_equal(ncol(relations(b)), 4)
}) |
Votes.getBillActionVoteByOfficial <-
function (actionId, candidateId) {
Votes.getBillActionVoteByOfficial.basic <- function (.actionId, .candidateId) {
request <- "Votes.getBillActionVoteByOfficial?"
inputs <- paste("&actionId=",.actionId,"&candidateId=",.candidateId,sep="")
output <- pvsRequest(request,inputs)
output$actionId <- .actionId
output$candidateId <- .candidateId
output
}
output.list <- lapply(actionId, FUN= function (y) {
lapply(candidateId, FUN= function (s) {
Votes.getBillActionVoteByOfficial.basic(.actionId=y, .candidateId=s)
}
)
}
)
output.list <- do.call("c",output.list)
coln <- which.is.max(sapply(output.list, ncol));
max.cols <- max(sapply(output.list, ncol));
output.list2 <- lapply(output.list, function(x){
if (ncol(x) < max.cols) x <- data.frame(cbind(matrix(NA, ncol=max.cols-ncol(x), nrow = 1, ),x),row.names=NULL)
names(x) <- names(output.list[[coln]])
x
})
output <- do.call("rbind",output.list2)
output
} |
EM2partial <-
function(tms, cens, pars, maxiter = 1000, tol = 1e-8,
h.fn = function(x,p) dexp(x, rate = 1 / p),
mu.fn = function(x, p){
exp(dweibull(x, shape = p[1], scale = p[2], log = TRUE) -
pweibull(x, shape = p[1], scale = p[2], lower.tail = FALSE, log.p = TRUE))
},
H.fn = function(x, p) pexp(x, rate = 1 / p),
logg.fn = function(x, p){
dweibull(x, shape = p[1], scale = p[2], log = TRUE) -
pweibull(x, shape = p[1], scale = p[2], lower.tail = FALSE, log.p = TRUE)
- (x / p[2]) ^ p[1]
},
Mu.fn=function(x, p){
- pweibull(x, shape = p[1], scale = p[2], lower.tail = FALSE, log.p = TRUE)
}){
p.mu <- pars[1:2]
p.h <- pars[3]
eta <- pars[4]
n <- length(tms)
iter<-1; diff<-10000
while(diff > tol & iter <=maxiter) {
mu <- function(t) mu.fn(t, p.mu)
Mu <- function(t) Mu.fn(t, p.mu)
h <- function(t) h.fn(t, p.h)
H <- function(t) H.fn(t, p.h)
phi <- function(s) eta * sum(h(s - tms[tms < s]))
mustar <- function(i,omega) sum(omega[i, 1:(i - 1)] *
mu(tms[i] - tms[tms < tms[i]]))
phi.fn <- function(s, p,eta) eta * sum(h.fn(s - tms[tms < s], p))
pik <- matrix(NA, nrow = n, ncol = n)
for(i in 2:n) for(k in 1:(i - 1)) {
pik[i, k] <- mu(tms[i] - tms[k]) / (mu(tms[i] - tms[k]) + phi(tms[i]))
}
pi <- numeric(n)
pi[1] <- 1
pi[2] <- mu(tms[2] - tms[1]) / (mu(tms[2] - tms[1]) + phi(tms[2]))
omega <- matrix(NA, nrow = n, ncol = n)
omega[2, 1] <- 1
i <- 3
while(i <=n ){
c <- 1 - pik[i-1, 1:(i - 2)]
omega[i, 1:(i - 2)] <- omega[i-1, 1:(i - 2)] * c
omega[i, i-1] <- pi[i-1]
pi[i] <- mustar(i, omega) / (mustar(i, omega) + phi(tms[i]))
i <- i + 1
}
c <- 1 - pik[n, 1:(n - 1)]
wnp1 <- c(omega[n, 1:(n - 1)] * c, pi[n])
Qmu <- function(p.mu){
sum <- logg.fn(tms[1], p.mu)
for(i in 2:n) for(j in 1:(i - 1)){
sum <- sum + omega[i, j] * pik[i, j] * log(mu.fn(tms[i] - tms[j], p.mu)) -
omega[i, j] * pik[i, j] * Mu.fn(tms[i] - tms[j], p.mu)
}
sum <- sum - sum(wnp1 * Mu.fn(cens - tms, p.mu))
return(sum)
}
temp <- optim(par = p.mu, fn = Qmu, control = list(fnscale = -1))
pars.mu <- temp$par
Qh <- function(pars){
tau0 <- pars[1]; eta <- pars[2]
sum <- 0
for(i in 2:n){
sum <- sum + (1 - pi[i]) * log(phi.fn(tms[i], tau0, eta))
}
sum <- sum - eta * sum(H.fn(cens - tms, tau0))
return(sum)
}
temp <- optim(par = c(p.h, eta), fn = Qh, control = list(fnscale = -1))
pars.h <- temp$par[1]
pars.eta <- temp$par[2]
diff <- sum(abs(c(pars.mu, pars.h, pars.eta)-c(p.mu ,p.h ,eta)))
p.mu <- pars.mu
p.h <- pars.h
eta <- pars.eta
print(c(p.mu, p.h, eta))
iter <- iter + 1
}
list(iterations = iter-1, diff = diff, pars = c(p.mu, p.h, eta))
} |
area.graph.statistics <- function(...) {
.Deprecated("area.graph.statistics", package="GeNetIt",
msg="this function is depreciated, please use graph.statistics with buffer argument")
graph.statistics(...)
} |
workingDirectoryPopulate <- function (directoryName=".")
{
directoryName = sub("(.*)/$","\\1",directoryName)
if(!file.exists(c(directoryName))[1]){
dir.create(directoryName,recursive=TRUE)
}
wkdir = configFilesDirectoryNameGet()
if(is_absolute_path(wkdir))
{
if(!file.exists(wkdir))
dir.create(wkdir, recursive=TRUE)
}
else
{
txdir = paste0(directoryName,"/",configFilesDirectoryNameGet())
if(!file.exists(c(txdir))) {
dir.create(txdir,recursive=TRUE)
}
}
localCopy<-function(n, fileType="text") {
thisFile = paste0(directoryName,"/",n)
if(file.exists(thisFile))
{
timeLt = as.POSIXlt(Sys.time())
expandedName = paste0(thisFile,".",as.character(julian(Sys.Date())),as.character(timeLt$hour),as.character(timeLt$min),as.character(timeLt$sec))
if(!file.exists(expandedName))
{
if(!all.equal(readBin(file(thisFile),"raw"), readBin(file(system.file("templates", n, package=packageName(),mustWork=TRUE)),"raw")))
{
file.copy(thisFile,expandedName,overwrite=TRUE)
warning("existing file ", thisFile, " saved as ", expandedName)
}
}
else
{
warning("file ", thisFile, " could not be saved as ", expandedName, " because it already exists, file overwritten")
}
}
print(paste("file to be copied ", n))
file.copy(system.file("templates", n, package=packageName(),mustWork=TRUE), directoryName)
}
localCopy("makerpt.ps1")
localCopy("makerpt.sh")
localCopy("logo.eps")
localCopy("webanalytics.cls")
localCopy("sampleRfile.R")
localCopy("sample.config")
} |
itrax_image <- function(file = "optical.tif",
meta = "document.txt",
plot = FALSE,
trim = TRUE){
image <- tiff::readTIFF(file)
meta <- itrax_meta(meta)
image <- aperm(image, c(2, 1, 3))
image <- image[c(dim(image)[1]: 1), , ]
row.names(image) <- seq(from = as.numeric(meta[ 9, 2]),
to = as.numeric(meta[10, 2]),
length.out = dim(image)[1])
colnames(image) <- seq(from = 0,
by = (as.numeric(meta[10, 2]) - as.numeric(meta[9, 2])) / dim(image)[1],
length.out = dim(image)[2])
if(length(trim) == 2){
image <- image[ which(as.numeric(rownames(image)) >= trim[1] & as.numeric(rownames(image)) <= trim[2]) , , ]
}
else if(trim == TRUE){
image <- image[ which(as.numeric(rownames(image)) >= as.numeric(meta[6, 2]) & as.numeric(rownames(image)) <= as.numeric(meta[7, 2])) , , ]
}
else if(trim == FALSE){
}
else{stop("If you define trim parameters, pass a two element numeric vector of the start and stop positions.")}
if(plot == TRUE){
print(ggplot() +
ylim(rev(range(as.numeric(rownames(image))))) +
scale_x_continuous(limits = range(as.numeric(colnames(image))),
breaks = range(as.numeric(colnames(image))),
labels = round(range(as.numeric(colnames(image))),1)) +
labs(y = "Position [mm]", x = "[mm]") +
coord_fixed(ratio = 1) +
annotation_custom(rasterGrob(image,
width = unit(1, "npc"),
height = unit(1, "npc")),
ymax = max(as.numeric(rownames(image)))/-1,
ymin = min(as.numeric(rownames(image)))/-1,
xmin = min(as.numeric(colnames(image))),
xmax = max(as.numeric(colnames(image))))
)
}
return(list(image = image, meta = meta[6:11, ]))
} |
get_lang <- function(query, api_key = NULL){
check_internet()
check_api_key(api_key)
queryurl <- URLencode(query)
url <- GET(paste0("http://apilayer.net/api/detect", "?access_key=", api_key, "&query=", queryurl))
check_status_code(url)
content <- json_raw_to_char(url)
check_success(content)
content <- content$results
res <- do.call(rbind, lapply(content, function(obj){
data.frame(query = query,
language_code = obj$language_code %||% NA,
language_name = obj$language_name %||% NA,
probability = obj$probability %||% NA,
percentage = obj$percentage %||% NA,
reliable_result = obj$reliable_result %||% NA,
stringsAsFactors = FALSE)
}))
as_tbl(res)
} |
gibble.POINT <- function(x, ...) {x <- tibble::as_tibble(ibble(x)); dplyr::mutate(x, type = names(types)[x$type])}
gibble.MULTIPOINT <- function(x, ...) {dm <- dim(unclass(x)); tibble::tibble(nrow = dm[1], ncol = dm[2])} %>% dplyr::mutate(type = "MULTIPOINT")
gibble.LINESTRING <- function(x, ...) {dm <- dim(unclass(x)); tibble::tibble(nrow = dm[1], ncol = dm[2])} %>% dplyr::mutate(type = "LINESTRING")
gibble.MULTILINESTRING <- function(x, ...) lapply(unclass(x), gibble.MULTIPOINT) %>% dplyr::bind_rows() %>% dplyr::mutate(type = "MULTILINESTRING")
gibble.POLYGON <- function(x, ...) lapply(unclass(x), gibble.MULTIPOINT) %>% dplyr::bind_rows() %>% dplyr::mutate(type = "POLYGON")
gibble.POLYPART <- function(x, subobject = 1L, ...) {
lapply(x, gibble.MULTIPOINT) %>%
dplyr::bind_rows() %>%
dplyr::mutate(subobject = subobject)
}
gibble.MULTIPOLYGON <- function(x, ...) {
x <- unclass(x)
lapply(seq_along(x), function(a) gibble.POLYPART(x[[a]], subobject = a)) %>%
dplyr::bind_rows() %>%
dplyr::mutate(type = "MULTIPOLYGON")
}
gibble.list <- function(x, ...) {
out <- try(ibble.sfc(x), silent = TRUE)
if (inherits(out, "try-error")) stop("we tried to interpret as an sf/sfc list-column but failed")
dplyr::mutate(tibble::as_tibble(out), type = names(types)[out[, "type", drop = TRUE]])
}
gibble.sfc <- function(x, ...) {
xout <- tibble::as_tibble(ibble(x))
if (xout[["type"]][1L] == 11L) {
classes <- unlist(lapply(x, function(xa) lapply(xa, function(xb) rev(class(xb))[2L])))
if (length(classes) == dim(xout)[1L]) {
xout[["type"]] <- classes
}
}
if (is.numeric(xout[["type"]][1L])) {
xout[["type"]] <- names(types)[xout[["type"]]]
}
xout
}
gibble.sf <- function(x, ...) {
gibble(x[[attr(x, "sf_column")]])
} |
context("plot.word_coverage")
test_that("plot() produces no error", {
c <- word_coverage(twitter_dict, twitter_test)
expect_error(plot(c, include_EOS = FALSE), NA)
expect_error(plot(c, include_EOS = TRUE), NA)
expect_error(plot(c, show_limit = FALSE), NA)
}) |
trtcombo.std.order = function(n)
{
fact = letters[1:n]
control = '1'
trt.combo = array(dim=1)
ini.1 = array(dim=1)
trt.combo[1] = control
for(i in 1:length(fact))
{
v = fact[i]
for(j in 1:length(trt.combo))
{
if(trt.combo[j] == '1')
{
ini.1[j] = paste(v)
}else
{
ini.1[j] = paste(trt.combo[j],v)
}
}
trt.combo = c(trt.combo,ini.1)
}
return(trt.combo)
} |
test_that("correct number of rows", {
expect_equal(nrow(imd_wales_msoa), 410)
})
test_that("Welsh MSOAs", {
expect_equal(unique(substr(imd_wales_msoa$msoa_code, 1, 1)), "W")
}) |
SP <- function(x = 1, Data, AddInd = "B", rescale = "mean1", start = NULL, fix_dep = TRUE, fix_n = TRUE, LWT = NULL,
n_seas = 4L, n_itF = 3L, use_r_prior = FALSE, r_reps = 1e2, SR_type = c("BH", "Ricker"),
silent = TRUE, opt_hess = FALSE, n_restart = ifelse(opt_hess, 0, 1),
control = list(iter.max = 5e3, eval.max = 1e4), ...) {
SP_(x = x, Data = Data, AddInd = AddInd, state_space = FALSE, rescale = rescale, start = start, fix_dep = fix_dep, fix_n = fix_n, fix_sigma = TRUE,
fix_tau = TRUE, LWT = LWT, n_seas = n_seas, n_itF = n_itF, use_r_prior = use_r_prior, r_reps = r_reps, SR_type = SR_type, integrate = FALSE,
silent = silent, opt_hess = opt_hess, n_restart = n_restart, control = control, inner.control = list(), ...)
}
class(SP) <- "Assess"
SP_SS <- function(x = 1, Data, AddInd = "B", rescale = "mean1", start = NULL, fix_dep = TRUE, fix_n = TRUE, fix_sigma = TRUE,
fix_tau = TRUE, LWT = NULL, early_dev = c("all", "index"), n_seas = 4L, n_itF = 3L,
use_r_prior = FALSE, r_reps = 1e2, SR_type = c("BH", "Ricker"), integrate = FALSE,
silent = TRUE, opt_hess = FALSE, n_restart = ifelse(opt_hess, 0, 1),
control = list(iter.max = 5e3, eval.max = 1e4), inner.control = list(), ...) {
SP_(x = x, Data = Data, AddInd = AddInd, state_space = TRUE, rescale = rescale, start = start, fix_dep = fix_dep, fix_n = fix_n, fix_sigma = fix_sigma,
fix_tau = fix_tau, early_dev = early_dev, LWT = LWT, n_seas = n_seas, n_itF = n_itF, use_r_prior = use_r_prior, r_reps = r_reps,
SR_type = SR_type, integrate = integrate, silent = silent, opt_hess = opt_hess, n_restart = n_restart,
control = control, inner.control = inner.control, ...)
}
class(SP_SS) <- "Assess"
SP_Fox <- function(x = 1, Data, ...) {
SP_args <- c(x = x, Data = Data, list(...))
SP_args$start$n <- 1
SP_args$fix_n <- TRUE
do.call(SP, SP_args)
}
class(SP_Fox) <- "Assess"
SP_ <- function(x = 1, Data, AddInd = "B", state_space = FALSE, rescale = "mean1", start = NULL, fix_dep = TRUE, fix_n = TRUE, fix_sigma = TRUE,
fix_tau = TRUE, early_dev = c("all", "index"), LWT = NULL, n_seas = 4L, n_itF = 3L,
use_r_prior = FALSE, r_reps = 1e2, SR_type = c("BH", "Ricker"), integrate = FALSE,
silent = TRUE, opt_hess = FALSE, n_restart = ifelse(opt_hess, 0, 1),
control = list(iter.max = 5e3, eval.max = 1e4), inner.control = list(), ...) {
dependencies = "Data@Cat, Data@Ind"
dots <- list(...)
start <- lapply(start, eval, envir = environment())
early_dev <- match.arg(early_dev)
if(any(names(dots) == "yind")) {
yind <- eval(dots$yind)
} else {
ystart <- which(!is.na(Data@Cat[x, ]))[1]
yind <- ystart:length(Data@Cat[x, ])
}
Year <- Data@Year[yind]
C_hist <- Data@Cat[x, yind]
if(any(is.na(C_hist))) stop('Model is conditioned on complete catch time series, but there is missing catch.')
ny <- length(C_hist)
if(rescale == "mean1") rescale <- 1/mean(C_hist)
Ind <- lapply(AddInd, Assess_I_hist, Data = Data, x = x, yind = yind)
I_hist <- vapply(Ind, getElement, numeric(ny), "I_hist")
I_sd <- vapply(Ind, getElement, numeric(ny), "I_sd")
if(is.null(I_hist)) stop("No indices found.")
nsurvey <- ncol(I_hist)
if(state_space) {
if(early_dev == "all") est_B_dev <- rep(1, ny)
if(early_dev == "index") {
first_year_index <- which(apply(I_hist, 1, function(x) any(!is.na(x))))[1]
est_B_dev <- ifelse(1:ny < first_year_index, 0, 1)
}
} else {
if(nsurvey == 1 && (AddInd == 0 | AddInd == "B")) {
fix_sigma <- FALSE
}
est_B_dev <- rep(0, ny)
}
if(is.null(LWT)) LWT <- rep(1, nsurvey)
if(length(LWT) != nsurvey) stop("LWT needs to be a vector of length ", nsurvey)
data <- list(model = "SP", C_hist = C_hist, rescale = rescale, I_hist = I_hist, I_sd = I_sd, I_lambda = LWT,
fix_sigma = as.integer(fix_sigma), nsurvey = nsurvey, ny = ny,
est_B_dev = est_B_dev, nstep = n_seas, dt = 1/n_seas, n_itF = n_itF)
if(use_r_prior) {
if(!is.null(start$r_prior) && length(start$r_prior) == 2) {
rp <- data$r_prior <- start$r_prior
} else {
rp <- r_prior_fn(x, Data, r_reps = r_reps, SR_type = SR_type)
data$r_prior <- c(mean(rp), max(sd(rp), 0.1 * mean(rp)))
}
} else {
rp <- data$r_prior <- c(0, 0)
}
params <- list()
if(!is.null(start)) {
if(!is.null(start$FMSY) && is.numeric(start$FMSY)) params$log_FMSY <- log(start$FMSY[1])
if(!is.null(start$MSY) && is.numeric(start$MSY)) params$MSYx <- log(start$MSY[1])
if(!is.null(start$dep) && is.numeric(start$dep)) params$log_dep <- log(start$dep[1])
if(!is.null(start$n) && is.numeric(start$n)) params$log_n <- log(start$n[1])
if(!is.null(start$sigma) && is.numeric(start$sigma)) params$log_sigma <- log(start$sigma)
if(!is.null(start$tau) && is.numeric(start$tau)) params$log_tau <- log(start$tau[1])
}
if(is.null(params$log_FMSY)) params$log_FMSY <- ifelse(is.na(Data@Mort[x]), 0.2, 0.5 * Data@Mort[x]) %>% log()
if(is.null(params$MSYx)) params$MSYx <- mean(3 * C_hist * rescale) %>% log()
if(is.null(params$log_dep)) params$log_dep <- log(1)
if(is.null(params$log_n)) params$log_n <- log(2)
if(is.null(params$log_sigma)) params$log_sigma <- rep(log(0.05), nsurvey)
if(is.null(params$log_tau)) params$log_tau <- log(0.1)
params$log_B_dev <- rep(0, ny)
map <- list()
if(fix_dep) map$log_dep <- factor(NA)
if(fix_n) map$log_n <- factor(NA)
if(fix_sigma) map$log_sigma <- factor(rep(NA, nsurvey))
if(fix_tau) map$log_tau <- factor(NA)
if(any(!est_B_dev)) map$log_B_dev <- factor(ifelse(est_B_dev, 1:sum(est_B_dev), NA))
random <- NULL
if(integrate) random <- "log_B_dev"
info <- list(Year = Year, data = data, params = params, rp = rp, control = control, inner.control = inner.control)
obj <- MakeADFun(data = info$data, parameters = info$params, hessian = TRUE,
map = map, random = random, DLL = "SAMtool", silent = silent)
high_F <- try(obj$report(c(obj$par, obj$env$last.par[obj$env$random]))$penalty > 0 ||
any(is.na(obj$report(c(obj$par, obj$env$last.par[obj$env$random]))$F)), silent = TRUE)
if(!is.character(high_F) && !is.na(high_F) && high_F) {
for(ii in 1:10) {
obj$par["MSYx"] <- 0.5 + obj$par["MSYx"]
if(all(!is.na(obj$report(obj$par)$F)) &&
obj$report(c(obj$par, obj$env$last.par[obj$env$random]))$penalty == 0) break
}
}
mod <- optimize_TMB_model(obj, control, opt_hess, n_restart)
opt <- mod[[1]]
SD <- mod[[2]]
report <- obj$report(obj$env$last.par.best)
Yearplusone <- c(Year, max(Year) + 1)
nll_report <- ifelse(is.character(opt), ifelse(integrate, NA, report$nll), opt$objective)
report$dynamic_SSB0 <- SP_dynamic_SSB0(obj, data = info$data, params = info$params, map = map) %>%
structure(names = Yearplusone)
Assessment <- new("Assessment", Model = ifelse(state_space, "SP_SS", "SP"),
Name = Data@Name, conv = SD$pdHess,
FMSY = report$FMSY, MSY = report$MSY, BMSY = report$BMSY, VBMSY = report$BMSY,
B0 = report$K, VB0 = report$K, FMort = structure(report$F, names = Year),
F_FMSY = structure(report$F/report$FMSY, names = Year),
B = structure(report$B, names = Yearplusone),
B_BMSY = structure(report$B/report$BMSY, names = Yearplusone),
B_B0 = structure(report$B/report$K, names = Yearplusone),
VB = structure(report$B, names = Yearplusone),
VB_VBMSY = structure(report$B/report$BMSY, names = Yearplusone),
VB_VB0 = structure(report$B/report$K, names = Yearplusone),
SSB = structure(report$B, names = Yearplusone),
SSB_SSBMSY = structure(report$B/report$BMSY, names = Yearplusone),
SSB_SSB0 = structure(report$B/report$K, names = Yearplusone),
Obs_Catch = structure(C_hist, names = Year), Obs_Index = structure(I_hist, dimnames = list(Year, paste0("Index_", 1:nsurvey))),
Catch = structure(report$Cpred, names = Year), Index = structure(report$Ipred, dimnames = list(Year, paste0("Index_", 1:nsurvey))),
NLL = structure(c(nll_report, report$nll_comp, report$penalty, report$prior),
names = c("Total", paste0("Index_", 1:nsurvey), "Dev", "Penalty", "Prior")),
info = info, obj = obj, opt = opt, SD = SD, TMB_report = report,
dependencies = dependencies)
if(state_space) {
Assessment@Dev <- structure(report$log_B_dev, names = Year)
Assessment@Dev_type <- "log-Biomass deviations"
Assessment@NLL <- structure(c(nll_report, report$nll_comp, report$penalty, report$prior),
names = c("Total", paste0("Index_", 1:nsurvey), "Dev", "Penalty", "Prior"))
} else {
Assessment@NLL <- structure(c(nll_report, report$nll_comp[1:nsurvey], report$penalty, report$prior),
names = c("Total", paste0("Index_", 1:nsurvey), "Penalty", "Prior"))
}
if(Assessment@conv) {
if(state_space) {
SE_Dev <- as.list(SD, "Std. Error")$log_B_dev
Assessment@SE_Dev <- structure(ifelse(is.na(SE_Dev), 0, SE_Dev), names = Year)
}
Assessment@SE_FMSY <- SD$sd[names(SD$value) == "FMSY"]
Assessment@SE_MSY <- SD$sd[names(SD$value) == "MSY"]
Assessment@SE_F_FMSY <- SD$sd[names(SD$value) == "F_FMSY_final"] %>% structure(names = max(Year))
Assessment@SE_B_BMSY <- Assessment@SE_SSB_SSBMSY <- Assessment@SE_VB_VBMSY <-
SD$sd[names(SD$value) == "B_BMSY_final"] %>% structure(names = max(Year))
Assessment@SE_B_B0 <- Assessment@SE_SSB_SSB0 <- Assessment@SE_VB_VB0 <-
SD$sd[names(SD$value) == "B_K_final"] %>% structure(names = max(Year))
catch_eq <- function(Ftarget) {
projection_SP(Assessment, Ftarget = Ftarget, p_years = 1, p_sim = 1, obs_error = list(matrix(1, 1, 1), matrix(1, 1, 1)),
process_error = matrix(1, 1, 1)) %>% slot("Catch") %>% as.vector()
}
Assessment@forecast <- list(catch_eq = catch_eq)
}
return(Assessment)
}
r_prior_fn <- function(x = 1, Data, r_reps = 1e2, SR_type = c("BH", "Ricker"), seed = x) {
SR_type <- match.arg(SR_type)
set.seed(x)
M <- trlnorm(r_reps, Data@Mort[x], Data@CV_Mort[x])
steep <- sample_steepness3(r_reps, Data@steep[x], Data@CV_steep[x], SR_type)
max_age <- Data@MaxAge
a <- Data@wla[x]
b <- Data@wlb[x]
Linf <- Data@vbLinf[x]
K <- Data@vbK[x]
t0 <- Data@vbt0[x]
La <- Linf * (1 - exp(-K * (c(1:max_age) - t0)))
Wa <- a * La ^ b
A50 <- min(0.5 * max_age, iVB(t0, K, Linf, Data@L50[x]))
A95 <- max(A50+0.5, iVB(t0, K, Linf, Data@L95[x]))
mat_age <- 1/(1 + exp(-log(19) * (c(1:max_age) - A50)/(A95 - A50)))
mat_age <- mat_age/max(mat_age)
log_r <- vapply(1:r_reps, function(y) uniroot(Euler_Lotka_fn, c(-6, 2), M = M[y], h = steep[y], weight = Wa,
mat = mat_age, maxage = max_age, SR_type = SR_type)$root, numeric(1))
return(exp(log_r))
}
Euler_Lotka_fn <- function(log_r, M, h, weight, mat, maxage, SR_type) {
M <- rep(M, maxage)
NPR <- calc_NPR(exp(-M), maxage)
SBPR <- sum(NPR * weight * mat)
CR <- ifelse(SR_type == "BH", 4*h/(1-h), (5*h)^1.25)
alpha <- CR/SBPR
EL <- alpha * sum(NPR * weight * mat * exp(-exp(log_r) * c(1:maxage)))
return(EL - 1)
}
SP_dynamic_SSB0 <- function(obj, par = obj$env$last.par.best, ...) {
dots <- list(...)
dots$data$C_hist <- rep(1e-8, dots$data$ny)
dots$params$log_dep <- log(1)
obj2 <- MakeADFun(data = dots$data, parameters = dots$params, map = dots$map,
random = obj$env$random, DLL = "SAMtool", silent = TRUE)
obj2$report(par)$B
} |
library(simstudy)
library(data.table)
library(ggplot2)
library(knitr)
library(data.table)
options(digits = 3)
opts_chunk$set(tidy.opts=list(width.cutoff=55), tidy=TRUE)
plotcolors <- c("
cbbPalette <- c("
"
ggtheme <- function(panelback = "white") {
ggplot2::theme(
panel.background = element_rect(fill = panelback),
panel.grid = element_blank(),
axis.ticks = element_line(colour = "black"),
panel.spacing =unit(0.25, "lines"),
panel.border = element_rect(fill = NA, colour="grey90"),
plot.title = element_text(size = 8,vjust=.5,hjust=0),
axis.text = element_text(size=8),
axis.title = element_text(size = 8)
)
}
def <- defData(varname="age", dist="normal", formula=10, variance = 2)
def <- defData(def, varname="female", dist="binary",
formula="-2 + age * 0.1", link = "logit")
def <- defData(def,varname="visits", dist="poisson",
formula="1.5 - 0.2 * age + 0.5 * female", link="log")
knitr::kable(def)
def <- defData(varname="age", dist="normal", formula=10, variance = 2)
def <- defData(def, varname="female", dist="binary",
formula="-2 + age * 0.1", link = "logit")
def <- defData(def,varname="visits", dist="poisson",
formula="1.5 - 0.2 * age + 0.5 * female", link="log")
set.seed(87261)
dd <- genData(1000, def)
dd
genData(1000)
study1 <- trtAssign(dd , n=3, balanced = TRUE, strata = c("female"), grpName = "rx")
study1
study1[, .N, keyby = .(female, rx)]
def <- defData(varname = "age", dist = "normal", formula=10, variance = 2)
def <- defData(def, varname="female", dist="binary",
formula="-2 + age * 0.1", link = "logit")
def <- defData(def,varname="visits", dist="poisson",
formula="1.5 - 0.2 * age + 0.5 * female", link="log")
myinv <- function(x) {
1/x
}
def <- defData(varname = "age", formula=10, variance = 2, dist = "normal")
def <- defData(def, varname="loginvage", formula="log(myinv(age))",
variance = 0.1, dist="normal")
genData(5, def)
def10 <- updateDef(def, changevar = "loginvage", newformula = "log10(myinv(age))")
def10
genData(5, def10)
age_effect <- 3
def <- defData(varname = "age", formula=10, variance = 2, dist = "normal")
def <- defData(def, varname="agemult",
formula="age * ..age_effect", dist="nonrandom")
def
genData(2, def)
age_effects <- c(0, 5, 10)
list_of_data <- list()
for (i in seq_along(age_effects)) {
age_effect <- age_effects[i]
list_of_data[[i]] <- genData(2, def)
}
list_of_data
d <- list()
d[[1]] <- data.table("beta", "mean", "both", "-", "dispersion", "X", "-", "X")
d[[2]] <- data.table("binary", "probability", "both", "-", "-", "X", "-", "X")
d[[3]] <- data.table("binomial", "probability", "both", "-", "
d[[4]] <- data.table("categorical", "probability", "string", " p_1;p_2;...;p_n", "a;b;c", "X", "-", "-")
d[[5]] <- data.table("exponential", "mean", "both", "-", "-", "X", "X", "-")
d[[6]] <- data.table("gamma", "mean", "both", "-", "dispersion", "X", "X", "-")
d[[7]] <- data.table("mixture", "formula", "string", "x_1 | p_1 + ... + x_n | p_n", "-", "X", "-", "-")
d[[8]] <- data.table("negBinomial", "mean", "both", "-", "dispersion", "X", "X", "-")
d[[9]] <- data.table("nonrandom", "formula", "both", "-", "-", "X", "-", "-")
d[[10]] <- data.table("normal", "mean", "both", "-", "variance", "X", "-", "-")
d[[11]] <- data.table("noZeroPoisson", "mean", "both", "-", "-", "X", "X", "-")
d[[12]] <- data.table("poisson", "mean", "both", "-", "-", "X", "X", "-")
d[[13]] <- data.table("trtAssign", "ratio", "string", "r_1;r_2;...;r_n", "stratification", "X", "X", "-")
d[[14]] <- data.table("uniform", "range", "string", "from ; to", "-", "X", "-", "-")
d[[15]] <- data.table("uniformInt", "range", "string", "from ; to", "-", "X", "-", "-")
d <- rbindlist(d)
setnames(d, c("name", "formula", "string/value", "format", "variance", "identity", "log", "logit"))
knitr::kable(d, align = "lllllccc")
def <- defRepeat(nVars = 4, prefix = "g", formula = "1/3;1/3;1/3",
variance = 0, dist = "categorical")
def <- defData(def, varname = "a", formula = "1;1", dist = "trtAssign")
def <- defRepeat(def, 3, "b", formula = "5 + a", variance = 3, dist = "normal")
def <- defData(def, "y", formula = "0.10", dist = "binary")
def
d1 <- defData(varname = "x1", formula = 0, variance = 1, dist = "normal")
d1 <- defData(d1, varname = "x2", formula = 0.5, dist = "binary")
d2 <- defRepeatAdd(nVars = 2, prefix = "q", formula = "5 + 3*rx",
variance = 4, dist = "normal")
d2 <- defDataAdd(d2, varname = "y", formula = "-2 + 0.5*x1 + 0.5*x2 + 1*rx",
dist = "binary", link = "logit")
dd <- genData(5, d1)
dd <- trtAssign(dd, nTrt = 2, grpName = "rx")
dd
dd <- addColumns(d2, dd)
dd
d <- defData(varname = "x", formula = 0, variance = 9, dist = "normal")
dc <- defCondition(condition = "x <= -2", formula = "4 + 3*x",
variance = 2, dist = "normal")
dc <- defCondition(dc, condition = "x > -2 & x <= 2", formula = "0 + 1*x",
variance = 4, dist = "normal")
dc <- defCondition(dc, condition = "x > 2", formula = "-5 + 4*x",
variance = 3, dist = "normal")
dd <- genData(1000, d)
dd <- addCondition(dc, dd, newvar = "y")
ggplot(data = dd, aes(y = y, x = x)) +
geom_point(color = " grey60", size = .5) +
geom_smooth(se = FALSE, size = .5) +
ggtheme("grey90") |
descript_d <- function(data, latex = FALSE){
if(is.na(class(data)[2])) {
stop("data is not an object of class dscore or dsciat")
} else if (class(data)[2] == "dscore" | class(data)[2] == "dsciat"){
if (class(data)[2] == "dscore"){
sel_var <- c(grep("dscore", colnames(data)),
grep("d_practice", colnames(data)),
grep("d_test", colnames(data)))
names_table <- c("D-score", "D-practice", "D-test")
} else if (class(data)[2] == "dsciat"){
sel_var <- c(grep("d_sciat", colnames(data)),
grep("RT_mean.mappingA", colnames(data)),
grep("RT_mean.mappingB", colnames(data)))
names_table <- c("D-Sciat", "RT.MappingA", "RT.MappingB")
}
data <- data[ , sel_var]
mean_all <- c(mean(data[,1]), mean(data[,2]), mean(data[,3]))
sd_all <- c(sd(data[,1]), sd(data[,2]), sd(data[,3]))
min_all <- c(min(data[,1]), min(data[,2]), min(data[,3]))
max_all <- c(max(data[,1]), max(data[,2]), max(data[,3]))
table_d <- data.frame(Mean = mean_all,
SD = sd_all,
Min = min_all,
Max = max_all)
rownames(table_d) <- names_table
table_d <- round(table_d, 2)
if (latex == TRUE){
return(xtable::xtable(table_d))
} else {
return(table_d)
}
} else {stop("data is not an object of class dscore or dsciat")}
} |
"cdfkap" <-
function(x,para) {
if(! are.parkap.valid(para)) return()
SMALL <- 1e-15
U <- para$para[1]
A <- para$para[2]
G <- para$para[3]
H <- para$para[4]
f <- sapply(1:length(x), function(i) {
Y <- (x[i]-U)/A
if(G == 0) {
Y <- exp(-Y)
} else {
ARG <- 1-G*Y
if(ARG > SMALL) {
Y <- exp(-1*(-log(ARG)/G))
} else {
if(G < 0) return(0)
if(G > 0) return(1)
stop("should not be here in execution")
}
}
if(H == 0) return(exp(-Y))
ARG <- 1-H*Y
if(ARG > SMALL) return(exp(-1*(-log(ARG)/H)))
return(0) })
names(f) <- NULL
return(f)
} |
ss.aipe.cv <- function(C.of.V=NULL, width=NULL, conf.level=.95, degree.of.certainty=NULL, assurance=NULL, certainty=NULL, mu=NULL, sigma=NULL, alpha.lower=NULL, alpha.upper=NULL, Suppress.Statement=TRUE, sup.int.warns=TRUE, ...)
{
if(!is.null(certainty)& is.null(degree.of.certainty)&is.null(assurance)) degree.of.certainty<-certainty
if (is.null(assurance) && !is.null (degree.of.certainty)& is.null(certainty)) assurance <-degree.of.certainty
if (!is.null(assurance) && is.null (degree.of.certainty)& is.null(certainty)) assurance -> degree.of.certainty
if(C.of.V<=0) stop("The 'C.of.V' value should be positive (see Chattopadhyaya and Kelley, 2016)")
if(!is.null(assurance) && !is.null (degree.of.certainty) && assurance!=degree.of.certainty)
stop("The arguments 'assurance' and 'degree.of.certainty' must have the same value.")
if(!is.null(assurance) && !is.null (certainty) && assurance!=certainty)
stop("The arguments 'assurance' and 'certainty' must have the same value.")
if(!is.null(degree.of.certainty) && !is.null (certainty) && degree.of.certainty!=certainty)
stop("The arguments 'degree.of.certainty' and 'certainty' must have the same value.")
if(sup.int.warns==TRUE) options(warn=-1)
if(is.null(conf.level))
{
if(alpha.lower>=1 | alpha.lower<0) stop("\'alpha.lower\' is not correctly specified.")
if(alpha.upper>=1 | alpha.upper<0) stop("\'alpha.upper\' is not correctly specified.")
}
if(is.null(width)) stop("A value for \'width\' must be specified.")
if(!is.null(conf.level))
{
if(!is.null(alpha.lower) | !is.null(alpha.upper)) stop("Since \'conf.level\' is specified, \'alpha.lower\' and \'alpha.upper\' should be \'NULL\'.")
alpha.lower <- (1-conf.level)/2
alpha.upper <- (1-conf.level)/2
}
if(!is.null(degree.of.certainty))
{
if((degree.of.certainty <= 0) | (degree.of.certainty >= 1)) stop("The 'degree.of.certainty' must either be NULL or some value greater than .50 and less than 1.", call.=FALSE)
if(degree.of.certainty <= .50) stop("The 'degree.of.certainty' should be > .5 (but less than 1).", call.=FALSE)
}
minimal.N <- 4
Lim.0 <- ci.cv(cv=cv(C.of.V=C.of.V, N=minimal.N, unbiased=TRUE), n=minimal.N, alpha.lower=alpha.lower, alpha.upper=alpha.upper, conf.level=NULL)
Current.Width <- Lim.0$Upper - Lim.0$Lower
dif <- Current.Width - width
N.0 <- minimal.N
while(dif > 0)
{
N <- N.0+1
CI.for.CV <- ci.cv(cv=cv(C.of.V=C.of.V, N=N, unbiased=TRUE), n=N, alpha.lower=alpha.lower, alpha.upper=alpha.upper, conf.level=NULL)
Current.Width <- CI.for.CV$Upper - CI.for.CV$Lower
dif <- Current.Width - width
N.0 <- N
}
if(!is.null(degree.of.certainty))
{
beyond.CV.NCP <- qt(p=1-degree.of.certainty, df=N-1, ncp = sqrt(N)/C.of.V, lower.tail = TRUE, log.p = FALSE)
Lim.for.Certainty <- sqrt(N)/beyond.CV.NCP
N.gamma <- ss.aipe.cv(C.of.V=cv(C.of.V=Lim.for.Certainty, N=N, unbiased=TRUE), width=width, alpha.lower=alpha.lower, alpha.upper=alpha.upper, conf.level=NULL, degree.of.certainty=NULL, Suppress.Statement=TRUE)
}
if(is.null(degree.of.certainty))
{
if(Suppress.Statement==FALSE) print(paste("In order the the expected confidence interval width to be no larger than", width, ",the sample size that should be used is:", N))
return(N)
}
if(!is.null(degree.of.certainty))
{
if(Suppress.Statement==FALSE) print(paste("In order the the confidence interval width to be no less than", width, "with no less than", degree.of.certainty*100, "certainty, the sample size that should be used is:", N.gamma))
return(N.gamma)
}
if(sup.int.warns==TRUE) options(warn=1)
} |
SensTimePlot <- function(object, fdata = NULL, date.var = NULL, facet = FALSE,
smooth = FALSE, nspline = NULL, ...) {
if (is.HessMLP(object)) {
object <- HessToSensMLP(object)
}
if (!is.SensMLP(object)) {
if (is.null(fdata)) {
stop("Must be passed fdata to calculate sensitivities of the model")
}
SensMLP <- NeuralSens::SensAnalysisMLP(object,
trData = fdata,
plot = FALSE,
...)
rawSens <- SensMLP$raw_sens
} else if(is.SensMLP(object)){
SensMLP <- object
rawSens <- SensMLP$raw_sens
fdata <- SensMLP$trData
} else {
stop(paste0("Class ", class(object)," is not accepted as object"))
}
if (is.null(date.var)) {
if (any(apply(fdata, 2, function(x){inherits(x,"POSIXct") || inherits(x,"POSIXlt")}))) {
date.var <- fdata[,sapply(fdata, function(x){
inherits(x,"POSIXct") || inherits(x,"POSIXlt")})]
} else {
date.var <- seq_len(dim(rawSens[[1]])[1])
}
}
if (is.null(nspline)) {
nspline <- floor(sqrt(dim(rawSens[[1]])[1]))
}
plot_for_output <- function(rawSens, out, smooth, facet, SensMLP) {
plotdata <- cbind(date.var,as.data.frame(rawSens[[out]]))
plotdata <- reshape2::melt(plotdata,id.vars = names(plotdata)[1])
p <- ggplot2::ggplot(plotdata, ggplot2::aes(x = plotdata[,1], y = plotdata$value,
group = plotdata$variable, color = plotdata$variable)) +
ggplot2::geom_line() +
ggplot2::labs(color = "Inputs") +
ggplot2::xlab("Time") +
ggplot2::ylab(NULL)
if (smooth) p <- p + ggplot2::geom_smooth(method = "lm", color = "blue", formula = y ~ splines::bs(x, nspline), se = FALSE)
if (facet) {
args <- list(...)
outname <- SensMLP$output_name
labsvect <- c()
for(ii in levels(plotdata$variable)) {
labsvect <- c(labsvect, paste0("frac(partialdiff~",outname,",partialdiff~",ii,")"))
}
levels(plotdata$variable) <- labsvect
p <- p + ggplot2::facet_wrap(plotdata$variable~.,
scales = "free_y",
nrow = length(levels(plotdata$variable)),
strip.position = "left",
labeller = ggplot2::label_parsed) +
ggplot2::theme(strip.background = ggplot2::element_blank(),
strip.placement = "outside",
legend.position = "none")
}
print(p)
return(p)
}
plotlist <- list()
for (out in 1:length(rawSens)) {
plotlist[[out]] <- plot_for_output(rawSens, out, smooth, facet, SensMLP)
}
return(invisible(plotlist))
} |
test_that("Password hidden", {
db <- rocker::newDB(verbose = FALSE)
expect_true(is.environment(db$.__enclos_env__))
expect_true(is.environment(db$.__enclos_env__$private))
expect_null(db$.__enclos_env__$private$key)
expect_null(db$.__enclos_env__$private$settings)
db$setupSQLite()
expect_null(db$.__enclos_env__)
expect_null(db$.__enclos_env__$private)
expect_null(db$.__enclos_env__$private$key)
expect_null(db$.__enclos_env__$private$settings)
db$unloadDriver()
expect_true(is.environment(db$.__enclos_env__))
expect_true(is.environment(db$.__enclos_env__$private))
expect_null(db$.__enclos_env__$private$key)
expect_null(db$.__enclos_env__$private$settings)
rm(db)
}) |
context("set_label.data.frame")
test_that(
"cast an error if x is not a data frame",
{
expect_error(
set_label.data.frame(letters, a = "A")
)
}
)
test_that(
"Cast an error if any element in vars is not a column in x",
{
expect_error(
set_label(mtcars,
abc = "ABC")
)
}
)
test_that(
"Cast an error if any element in vars is not an atomic vector",
{
A <- data.frame(abc = rep(NA, 3))
A[[1]][1] <- list(letters)
A[[1]][2] <- list(LETTERS)
A[[1]][3] <- list(months)
expect_error(
set_label(A,
abc = "lists")
)
}
)
test_that(
"set_label works for data frames",
{
expect_silent(set_label(mtcars,
am = "Automatic",
mpg = "Miles per gallon"))
}
)
test_that(
"set_label casts and error when given an unnamed vector",
{
expect_error(
set_label(mtcars, "")
)
}
) |
context("get_hydro")
test_that("get_hydro works", {
vcr::use_cassette("get_hydro_works_single", {
x <- get_hydro(dbkey = "15081", date_min = "2013-01-01",
date_max = "2013-02-02")
})
expect_is(x, "data.frame")
vcr::use_cassette("get_hydro_works_multiple", {
x <- get_hydro(dbkey = c("15081", "15069"), date_min = "2013-01-01",
date_max = "2013-02-02")
})
expect_is(x, "data.frame")
})
vcr::use_cassette("get_hydro_fails", {
test_that("get_hydro fails well", {
expect_error(
get_hydro(dbkey = "15081", date_min = "1980-01-01",
date_max = "1980-02-02"),
"No data found")
})
})
vcr::use_cassette("non-character_dates", {
test_that("non-character dates are handled", {
expect_error(get_hydro(dbkey = "15081", date_min = 1980-01-01,
date_max = "1980-02-02"),
"Enter dates as quote-wrapped character strings in YYYY-MM-DD format")
})
})
test_that("get_hydro retrieves dbkeys on-the-fly", {
vcr::use_cassette("fly_dbykeys_single", {
x <- get_hydro(stationid = "C-54", category = "GW",
freq = "DA", date_min = "1990-01-01", date_max = "1990-02-02",
longest = TRUE)
})
expect_equal(ncol(x), 2)
vcr::use_cassette("fly_dbykeys_multiple", {
x <- get_hydro(stationid = c("C-54", "G-561"),
category = "GW", freq = "DA",
date_min = "1990-01-01", date_max = "1990-02-02",
longest = TRUE)
})
expect_equal(ncol(x), 3)
}) |
library("testthat")
library("arules")
data(Groceries)
itset <- new("itemsets",
items = encode(c('whole milk', 'soda'), itemLabels = Groceries))
supp <- support(itset, Groceries, type = "absolute")
expect_equal(crossTable(Groceries, measure='count')['whole milk', 'soda'],
supp)
expect_equal(crossTable(Groceries, measure='support')['whole milk', 'soda'],
supp / length(Groceries))
expect_equal(crossTable(Groceries, measure='lift')['whole milk', 'soda'],
supp / length(Groceries) / prod(itemFrequency(Groceries)[c('whole milk', 'soda')])) |
"run09" |
Matern52 <- R6::R6Class(classname = "GauPro_kernel_Matern52",
inherit = GauPro_kernel_beta,
public = list(
sqrt5 = sqrt(5),
k = function(x, y=NULL, beta=self$beta, s2=self$s2, params=NULL) {
if (!is.null(params)) {
lenparams <- length(params)
if (self$beta_est) {
beta <- params[1:self$beta_length]
} else {
beta <- self$beta
}
if (self$s2_est) {
logs2 <- params[lenparams]
} else {
logs2 <- self$logs2
}
s2 <- 10^logs2
} else {
if (is.null(beta)) {beta <- self$beta}
if (is.null(s2)) {s2 <- self$s2}
}
theta <- 10^beta
if (is.null(y)) {
if (is.matrix(x)) {
val <- s2 * corr_matern52_matrix_symC(x, theta)
return(val)
} else {
return(s2 * 1)
}
}
if (is.matrix(x) & is.matrix(y)) {
s2 * corr_matern52_matrixC(x, y, theta)
} else if (is.matrix(x) & !is.matrix(y)) {
s2 * corr_matern52_matvecC(x, y, theta)
} else if (is.matrix(y)) {
s2 * corr_matern52_matvecC(y, x, theta)
} else {
self$kone(x, y, theta=theta, s2=s2)
}
},
kone = function(x, y, beta, theta, s2) {
if (missing(theta)) {theta <- 10^beta}
r <- sqrt(sum(theta * (x-y)^2))
t1 <- self$sqrt5 * r
s2 * (1 + t1 + t1^2 / 3) * exp(-t1)
},
dC_dparams = function(params=NULL, X, C_nonug, C, nug) {
n <- nrow(X)
lenparams <- length(params)
if (lenparams > 0) {
if (self$beta_est) {
beta <- params[1:self$beta_length]
} else {
beta <- self$beta
}
if (self$s2_est) {
logs2 <- params[lenparams]
} else {
logs2 <- self$logs2
}
} else {
beta <- self$beta
logs2 <- self$logs2
}
theta <- 10^beta
log10 <- log(10)
s2 <- 10 ^ logs2
if (missing(C_nonug)) {
C_nonug <- self$k(x=X, params=params)
C <- C_nonug + diag(nug*s2, nrow(C_nonug))
}
lenparams_D <- self$beta_length*self$beta_est + self$s2_est
dC_dparams <- array(dim=c(lenparams_D, n, n), data = 0)
if (self$s2_est) {
dC_dparams[lenparams_D,,] <- C * log10
}
if (self$beta_est) {
for (i in seq(1, n-1, 1)) {
for (j in seq(i+1, n, 1)) {
tx2 <- sum(theta * (X[i,]-X[j,])^2)
t1 <- sqrt(5 * tx2)
t3 <- C[i,j] * ((1+2*t1/3)/(1+t1+t1^2/3) - 1) * self$sqrt5 * log10
half_over_sqrttx2 <- .5 / sqrt(tx2)
for (k in 1:length(beta)) {
dt1dbk <- half_over_sqrttx2 * (X[i,k] - X[j,k])^2
dC_dparams[k,i,j] <- t3 * dt1dbk * theta[k]
dC_dparams[k,j,i] <- dC_dparams[k,i,j]
}
}
}
for (i in seq(1, n, 1)) {
for (k in 1:length(beta)) {
dC_dparams[k,i,i] <- 0
}
}
}
return(dC_dparams)
},
dC_dx = function(XX, X, theta, beta=self$beta, s2=self$s2) {
if (missing(theta)) {theta <- 10^beta}
if (!is.matrix(XX)) {stop()}
d <- ncol(XX)
if (ncol(X) != d) {stop()}
n <- nrow(X)
nn <- nrow(XX)
dC_dx <- array(NA, dim=c(nn, d, n))
for (i in 1:nn) {
for (j in 1:d) {
for (k in 1:n) {
r <- sqrt(sum(theta * (XX[i,] - X[k,]) ^ 2))
dC_dx[i, j, k] <- (-5*r/3 - 5/3*self$sqrt5*r^2) * s2 * exp(-self$sqrt5 * r) * theta[j] * (XX[i, j] - X[k, j]) / r
}
}
}
dC_dx
}
)
) |
check_augment_newdata_precedence <- function(aug, model, data, strict = TRUE) {
expect_true(TRUE)
if (!strict)
return(invisible())
if (nrow(data) < 6)
stop(
"Data for checking newdata predence must have at least 6 rows.",
call. = FALSE
)
newdata <- tail(data, 5)
au_data <- aug(model, data = data)
au_newdata <- aug(model, newdata = newdata)
au_data_newdata <- aug(model, data = data, newdata = newdata)
expect_true(
all.equal(au_newdata, au_data_newdata),
info = "Must specify either `data` or `newdata` argument."
)
expect_false(
all.equal(au_data, au_newdata),
info = "Must specify either `data` or `newdata` argument."
)
expect_false(
all.equal(au_data, au_data_newdata),
info = "Must specify either `data` or `newdata` argument."
)
} |
test_that("resolve", {
skip_on_cran()
skip_if_offline()
conf <- current_config()
tt <- dirname(dirname(attr(packageDescription("testthat"), "file")))
cache <- list(
package = NULL,
metadata = pkgcache::get_cranlike_metadata_cache(),
installed = make_installed_cache(dirname(tt)))
ref <- paste0("installed::", tt)
res <- synchronise(
resolve_remote_installed(parse_pkg_refs(ref)[[1]], TRUE, conf, cache,
dependencies = "Imports")
)
unun <- function(x) {
attr(x, "unknown_deps") <- NULL
x
}
expect_equal(
unun(as.list(res[c("ref", "type", "direct", "status", "package", "version")])),
list(ref = ref, type = "installed", direct = TRUE, status = "OK",
package = "testthat",
version = as.character(packageVersion("testthat")))
)
expect_true("crayon" %in% attr(res, "unknown_deps"))
expect_false(is.null(res$extra[[1]]$repotype))
})
test_that("download", {
skip_if_offline()
skip_on_cran()
dir.create(tmp <- tempfile())
on.exit(unlink(tmp, recursive = TRUE), add = TRUE)
dir.create(tmp2 <- tempfile())
on.exit(unlink(tmp2, recursive = TRUE), add = TRUE)
tt <- dirname(dirname(attr(packageDescription("testthat"), "file")))
ref <- paste0("installed::", tt)
r <- pkg_plan$new(
ref, library = dirname(tt),
config = list(dependencies = FALSE, cache_dir = tmp))
expect_error(suppressMessages(r$resolve()), NA)
expect_error(suppressMessages(r$download_resolution()), NA)
dl <- r$get_resolution_download()
expect_equal(dl$download_status, "Had")
})
test_that("satisfy", {
expect_true(satisfy_remote_installed())
}) |
DSBNormalizeProtein = function(cell_protein_matrix, empty_drop_matrix, denoise.counts = TRUE,
use.isotype.control = TRUE, isotype.control.name.vec = NULL,
define.pseudocount = FALSE, pseudocount.use, quantile.clipping = FALSE,
quantile.clip = c(0.001, 0.9995), return.stats = FALSE){
a = isotype.control.name.vec
b = rownames(empty_drop_matrix)
c = rownames(cell_protein_matrix)
if (!isTRUE(all.equal(rownames(cell_protein_matrix), rownames(empty_drop_matrix)))){
stopifnot(isTRUE(all.equal(nrow(cell_protein_matrix), nrow(empty_drop_matrix))))
diff = c(setdiff(c,b), setdiff(b,c))
if (length(diff) > 0) {
stop(paste0('rows of cell and background matrices have mis-matching names: \n', diff))
}
if (length(diff < 0)) {
warning('rows (proteins) of cell_protein_matrix and empty_drop_matrix are not in the same order')
rmatch = match(x = rownames(cell_protein_matrix), table = rownames(empty_drop_matrix) )
empty_drop_matrix = empty_drop_matrix[rmatch, ]
print('reordered empty_drop_matrix rows to match cell_protein_matrix rows')
}
}
if (!is.null(a) & !isTRUE(all(a %in% b)) & !isTRUE(all(a %in% c))){
stop(paste0("some elements of isotype.control.name.vec are not in input data rownames: \n",
'cell_protein_matrix - ', setdiff(a,b), ' \nempty_drop_matrix - ', setdiff(a,c))
)
}
if (isFALSE(denoise.counts)) {
print(paste0("Running step I ambient correction and log transformation, not running step II removal of cell to cell technical noise.",
" Setting use.isotype.control and isotype.control.name.vec to FALSE and NULL"))
use.isotype.control = FALSE
isotype.control.name.vec = NULL
}
iso_detect = rownames(cell_protein_matrix)[grepl('sotype|Iso|iso|control|CTRL|ctrl|Ctrl', rownames(cell_protein_matrix))]
if (isTRUE(use.isotype.control) & is.null(isotype.control.name.vec)) {
stop('if use.isotype.control = TRUE, set isotype.control.name.vec to names of isotype control rows')
if (length(iso_detect) > 0) {
print('potential isotype controls detected: ')
print(iso_detect)
}
}
if (isTRUE(denoise.counts) & isFALSE(use.isotype.control)) {
warning('denoise.counts = TRUE with use.isotype.control = FALSE not recommended if isotype controls are available.\n',
' If data include isotype controls, set `denoise.counts` = TRUE `use.isotype.control` = TRUE\n',
' and set `isotype.control.name.vec` to a vector of isotype control rownames from cell_protein_matrix'
)
if (length(iso_detect) > 0) {
print('potential isotype controls detected: ')
print(iso_detect)
}
}
adt = cell_protein_matrix %>% as.matrix()
adtu = empty_drop_matrix %>% as.matrix()
if(isTRUE(define.pseudocount)) {
adtu_log = log(adtu + pseudocount.use)
adt_log = log(adt + pseudocount.use)
} else {
adtu_log = log(adtu + 10)
adt_log = log(adt + 10)
}
print("correcting ambient protein background noise")
mu_u = apply(adtu_log, 1 , mean)
sd_u = apply(adtu_log, 1 , sd)
norm_adt = apply(adt_log, 2, function(x) (x - mu_u) / sd_u)
if(isTRUE(denoise.counts)){
print(paste0('calculating dsb technical component for each cell to remove cell to cell techncial noise'))
cellwise_background_mean = apply(norm_adt, 2, function(x) {
g = mclust::Mclust(x, G=2, warn = FALSE, verbose = FALSE)
return(g$parameters$mean[1])
})
gc()
if (isTRUE(use.isotype.control)) {
noise_matrix = rbind(norm_adt[isotype.control.name.vec, ], cellwise_background_mean)
get_noise_vector = function(noise_matrix) {
g = stats::prcomp(t(noise_matrix), scale = TRUE)
return(g$x[ ,1])
}
noise_vector = get_noise_vector(noise_matrix)
norm_adt = limma::removeBatchEffect(norm_adt, covariates = noise_vector)
} else {
noise_vector = cellwise_background_mean
norm_adt = limma::removeBatchEffect(norm_adt, covariates = noise_vector)
}
}
if (isTRUE(quantile.clipping)) {
ql = apply(norm_adt, 1, FUN = stats::quantile, quantile.clip[1])
qh = apply(norm_adt, 1, FUN = stats::quantile, quantile.clip[2])
for (i in 1:nrow(norm_adt)) {
norm_adt[i, ] = ifelse(norm_adt[i, ] < ql[i], ql[i], norm_adt[i, ])
norm_adt[i, ] = ifelse(norm_adt[i, ] > qh[i], qh[i], norm_adt[i, ])
}
}
if(isTRUE(return.stats) & isTRUE(denoise.counts)) {
print('returning list; access normalized matrix with x$dsb_normalized_matrix, protein stats list with x$protein_stats')
technical_stats = cbind(t(noise_matrix), dsb_technical_component = noise_vector)
protein_stats = list('background matrix stats' = data.frame(background_mean = mu_u, background_sd = sd_u),
'cell matrix stats' = data.frame(cell_mean = apply(adt_log, 1 , mean), cell_sd = apply(adt_log, 1 , sd)),
'dsb normalized stats' = data.frame(dsb_mean = apply(norm_adt, 1 , mean), dsb_sd = apply(adt_log, 1 , sd))
)
ret_obj = list(
'dsb_normalized_matrix' = norm_adt,
'technical_stats' = technical_stats,
'protein_stats' = protein_stats
)
return(ret_obj)
}
if(isTRUE(return.stats) & isFALSE(denoise.counts)) {
print('returning list; access normalized matrix with x$dsb_normalized_matrix, dsb and protein stats with x$protein_stats')
protein_stats = list('background matrix stats' = data.frame(background_mean = mu_u, background_sd = sd_u),
'cell matrix stats' = data.frame(cell_mean = apply(adt_log, 1 , mean), cell_sd = apply(adt_log, 1 , sd)),
'dsb normalized stats' = data.frame(dsb_mean = apply(norm_adt, 1 , mean), dsb_sd = apply(adt_log, 1 , sd))
)
ret_obj = list(
'dsb_normalized_matrix' = norm_adt,
'protein_stats' = protein_stats
)
return(ret_obj)
}
if(isFALSE(return.stats)) {
return(norm_adt)
}
} |
library(hadron)
data <- matrix(rnorm(120), ncol = 10)
data[, 3] <- NA
print(data)
cov(data)
jackknife_cov(data)
data <- matrix(rnorm(120), ncol = 10)
data[2, ] <- NA
print(data)
cov(data)
jackknife_cov(data)
cov(data, use = 'complete')
all(cov(data, use = 'complete') == cov(data[complete.cases(data), ]))
jackknife_cov(data, na.rm = TRUE) |
library(MixMatrix)
context("Testing matrixmixture")
test_that("Testing bad input", {
set.seed(20180221)
a_mat <- rmatrixnorm(15, mean = matrix(0, nrow = 3, ncol = 4))
b_mat <- rmatrixnorm(15, mean = matrix(2, nrow = 3, ncol = 4))
c_mat <- array(c(a_mat, b_mat), dim = c(3, 4, 30))
prior <- c(.5, .5)
init <- list(
centers = array(c(rep(0, 12), rep(2, 12)), dim = c(3, 4, 2)),
U = array(c(diag(3), diag(3)), dim = c(3, 3, 2)),
V = array(c(diag(4), diag(4)), dim = c(4, 4, 2))
)
expect_error(matrixmixture(c_mat, init, prior = c(.1, .1)))
expect_error(matrixmixture(c_mat, init, prior = 0))
expect_error(matrixmixture(c_mat, init, prior = c(5, .1)))
expect_error(matrixmixture(c_mat, init, prior = c(-1, .1)))
expect_error(matrixmixture(c_mat, init))
expect_error(matrixmixture(list(),
prior = c(.5, .5),
model = "t", nu = 10
))
expect_error(matrixmixture(numeric(0),
prior = c(.5, .5),
model = "t", nu = 10
))
})
test_that("Bad results warn or stop", {
set.seed(20180221)
a_mat <- rmatrixnorm(15, mean = matrix(0, nrow = 3, ncol = 4))
b_mat <- rmatrixnorm(15, mean = matrix(2, nrow = 3, ncol = 4))
c_mat <- array(c(a_mat, b_mat), dim = c(3, 4, 30))
prior <- c(.5, .5)
init <- list(
centers = array(c(rep(0, 12), rep(2, 12)), dim = c(3, 4, 2)),
U = array(c(diag(3), diag(3)), dim = c(3, 3, 2)),
V = array(c(diag(4), diag(4)), dim = c(4, 4, 2))
)
expect_warning(capture.output(matrixmixture(c_mat, init,
prior = c(.5, .5),
iter = 1, verbose = 100
),
type = "output"
))
expect_warning(matrixmixture(c_mat, init,
prior = 2,
model = "t", nu = 10, iter = 1
))
expect_warning(matrixmixture(c_mat,
K = 2, model = "t",
nu = 10, iter = 1
))
})
test_that("Mean restrictions work", {
test_allequal <- function(x) all(abs(c(x) - c(x)[1]) < 1e-6)
set.seed(20180221)
a_mat <- rmatrixnorm(15, mean = matrix(0, nrow = 3, ncol = 4))
b_mat <- rmatrixnorm(15, mean = matrix(1, nrow = 3, ncol = 4))
c_mat <- array(c(a_mat, b_mat), dim = c(3, 4, 30))
prior <- c(.5, .5)
expect_true(test_allequal(c(matrixmixture(c_mat,
prior = c(.5, .5),
col.mean = TRUE,
row.mean = TRUE
)$centers[, , 1])))
expect_true(test_allequal(c(matrixmixture(c_mat,
prior = c(.5, .5),
col.mean = FALSE,
row.mean = TRUE
)$centers[1, , 1])))
expect_true(test_allequal(matrixmixture(c_mat,
prior = c(.5, .5),
col.mean = TRUE,
row.mean = FALSE
)$centers[, 1, 1]))
expect_true(!test_allequal(matrixmixture(c_mat,
prior = c(.5, .5),
col.mean = FALSE,
row.mean = FALSE
)$centers[1, , 1]))
expect_true(test_allequal(matrixmixture(c_mat,
prior = c(.5, .5), col.mean = TRUE,
row.mean = TRUE, model = "t", nu = 5
)$centers[, , 1]))
expect_true(test_allequal(matrixmixture(c_mat,
prior = c(.5, .5), col.mean = FALSE,
row.mean = TRUE, model = "t", nu = 5
)$centers[1, , 1]))
expect_true(test_allequal(matrixmixture(c_mat,
prior = c(.5, .5), col.mean = TRUE,
row.mean = FALSE, model = "t", nu = 5
)$centers[, 1, 1]))
expect_true(!test_allequal(matrixmixture(c_mat,
prior = c(.5, .5), col.mean = FALSE,
row.mean = FALSE, model = "t", nu = 5
)$centers[, 1, 1]))
llrcmix <- logLik(matrixmixture(c_mat,
prior = c(.5, .5),
col.mean = TRUE, row.mean = TRUE
))
llrmix <- logLik(matrixmixture(c_mat,
prior = c(.5, .5),
col.mean = FALSE, row.mean = TRUE
))
llcmix <- logLik(matrixmixture(c_mat,
prior = c(.5, .5),
col.mean = TRUE, row.mean = FALSE
))
llmix <- logLik(matrixmixture(c_mat,
prior = c(.5, .5),
col.mean = FALSE, row.mean = FALSE
))
lltrcmix <- logLik(matrixmixture(c_mat,
prior = c(.5, .5), col.mean = TRUE,
row.mean = TRUE, model = "t", nu = 5
))
lltrmix <- logLik(matrixmixture(c_mat,
prior = c(.5, .5), col.mean = FALSE,
row.mean = TRUE, model = "t", nu = 5
))
lltcmix <- logLik(matrixmixture(c_mat,
prior = c(.5, .5), col.mean = TRUE,
row.mean = FALSE, model = "t", nu = 5
))
lltmix <- logLik(matrixmixture(c_mat,
prior = c(.5, .5), col.mean = FALSE,
row.mean = FALSE, model = "t", nu = 5
))
expect_equal(attributes(llrcmix)$df, attributes(lltrcmix)$df)
expect_equal(attributes(llmix)$df, attributes(lltmix)$df)
expect_equal(attributes(llcmix)$df, attributes(lltcmix)$df)
expect_equal(attributes(llrmix)$df, attributes(lltrmix)$df)
expect_lt(attributes(llrcmix)$df, attributes(llcmix)$df)
expect_lt(attributes(llcmix)$df, attributes(llmix)$df)
expect_lt(attributes(llrmix)$df, attributes(llmix)$df)
})
test_that("Predict Mix Model works", {
set.seed(20180221)
a_mat <- rmatrixnorm(15, mean = matrix(0, nrow = 3, ncol = 4))
b_mat <- rmatrixnorm(15, mean = matrix(1, nrow = 3, ncol = 4))
c_mat <- array(c(a_mat, b_mat), dim = c(3, 4, 30))
prior <- c(.5, .5)
mix <- matrixmixture(c_mat, prior = c(.5, .5))
mixt <- matrixmixture(c_mat, prior = c(.5, .5), model = "t", nu = 5)
expect_error(
predict(mix, newdata = matrix(0, nrow = 3, ncol = 2)),
"dimension"
)
expect_error(
predict(mix, newdata = (matrix(0, nrow = 2, ncol = 3))),
"dimension"
)
expect_equal(sum(predict(mix, newdata = matrix(
0,
nrow = 3, ncol = 4
))$posterior), 1)
expect_equal(sum(predict(mix, prior = c(.7, .3))$posterior[1, ]), 1)
expect_equal(sum(predict(mixt, newdata = matrix(
0,
nrow = 3, ncol = 4
))$posterior), 1)
expect_equal(sum(predict(mixt, prior = c(.7, .3))$posterior[1, ]), 1)
})
test_that("Init function works", {
set.seed(20180221)
a_mat <- rmatrixnorm(15, mean = matrix(0, nrow = 3, ncol = 4))
b_mat <- rmatrixnorm(15, mean = matrix(1, nrow = 3, ncol = 4))
c_mat <- array(c(a_mat, b_mat), dim = c(3, 4, 30))
prior <- c(.5, .5)
testinit <- init_matrixmixture(c_mat,
K = 2, centers = matrix(7, 3, 4),
U = 4 * diag(3), V = 3 * diag(4)
)
testinit_two <- init_matrixmixture(c_mat,
K = 2,
init = list(
centers = matrix(7, 3, 4),
U = 4 * diag(3),
V = 3 * diag(4)
)
)
expect_equal(testinit$U[1, 1, 1], 4)
expect_equal(testinit$U[2, 2, 2], 4)
expect_equal(testinit$V[2, 2, 2], 3)
expect_equal(testinit$centers[1, 1, 2], 7)
expect_equal(testinit_two$U[1, 1, 1], 4)
expect_equal(testinit_two$U[2, 2, 2], 4)
expect_equal(testinit$V[2, 2, 2], 3)
expect_equal(testinit_two$centers[1, 1, 2], 7)
}) |
library(testthat)
context('Spec v1.1, delimiters')
test_that( "Pair Behavior", {
template <- "{{=<% %>=}}(<%text%>)"
data <- list(text = "Hey!")
str <- whisker.render(template, data=data)
expect_equal(str, "(Hey!)", label=deparse(str), info="The equals sign (used on both sides) should permit delimiter changes.")
})
test_that( "Special Characters", {
template <- "({{=[ ]=}}[text])"
data <- list(text = "It worked!")
str <- whisker.render(template, data=data)
expect_equal(str, "(It worked!)", label=deparse(str), info="Characters with special meaning regexen should be valid delimiters.")
})
test_that( "Sections", {
template <- "[\n{{
data <- list(section = TRUE, data = "I got interpolated.")
str <- whisker.render(template, data=data)
expect_equal(str, "[\n I got interpolated.\n |data|\n\n {{data}}\n I got interpolated.\n]\n", label=deparse(str), info="Delimiters set outside sections should persist.")
})
test_that( "Inverted Sections", {
template <- "[\n{{^section}}\n {{data}}\n |data|\n{{/section}}\n\n{{= | | =}}\n|^section|\n {{data}}\n |data|\n|/section|\n]\n"
data <- list(section = FALSE, data = "I got interpolated.")
str <- whisker.render(template, data=data)
expect_equal(str, "[\n I got interpolated.\n |data|\n\n {{data}}\n I got interpolated.\n]\n", label=deparse(str), info="Delimiters set outside inverted sections should persist.")
})
test_that( "Partial Inheritence", {
template <- "[ {{>include}} ]\n{{= | | =}}\n[ |>include| ]\n"
data <- list(value = "yes")
partials <- list(include = ".{{value}}.")
str <- whisker.render(template, partials=partials, data=data)
expect_equal(str, "[ .yes. ]\n[ .yes. ]\n", label=deparse(str), info="Delimiters set in a parent template should not affect a partial.")
})
test_that( "Post-Partial Behavior", {
template <- "[ {{>include}} ]\n[ .{{value}}. .|value|. ]\n"
data <- list(value = "yes")
partials <- list(include = ".{{value}}. {{= | | =}} .|value|.")
str <- whisker.render(template, partials=partials, data=data)
expect_equal(str, "[ .yes. .yes. ]\n[ .yes. .|value|. ]\n", label=deparse(str), info="Delimiters set in a partial should not affect the parent template.")
})
test_that( "Surrounding Whitespace", {
template <- "| {{=@ @=}} |"
data <- list()
str <- whisker.render(template, data=data)
expect_equal(str, "| |", label=deparse(str), info="Surrounding whitespace should be left untouched.")
})
test_that( "Outlying Whitespace (Inline)", {
template <- " | {{=@ @=}}\n"
data <- list()
str <- whisker.render(template, data=data)
expect_equal(str, " | \n", label=deparse(str), info="Whitespace should be left untouched.")
})
test_that( "Standalone Tag", {
template <- "Begin.\n{{=@ @=}}\nEnd.\n"
data <- list()
str <- whisker.render(template, data=data)
expect_equal(str, "Begin.\nEnd.\n", label=deparse(str), info="Standalone lines should be removed from the template.")
})
test_that( "Indented Standalone Tag", {
template <- "Begin.\n {{=@ @=}}\nEnd.\n"
data <- list()
str <- whisker.render(template, data=data)
expect_equal(str, "Begin.\nEnd.\n", label=deparse(str), info="Indented standalone lines should be removed from the template.")
})
test_that( "Standalone Line Endings", {
template <- "|\r\n{{= @ @ =}}\r\n|"
data <- list()
str <- whisker.render(template, data=data)
expect_equal(str, "|\r\n|", label=deparse(str), info="\"\\r\\n\" should be considered a newline for standalone tags.")
})
test_that( "Standalone Without Previous Line", {
template <- " {{=@ @=}}\n="
data <- list()
str <- whisker.render(template, data=data)
expect_equal(str, "=", label=deparse(str), info="Standalone tags should not require a newline to precede them.")
})
test_that( "Standalone Without Newline", {
template <- "=\n {{=@ @=}}"
data <- list()
str <- whisker.render(template, data=data)
expect_equal(str, "=\n", label=deparse(str), info="Standalone tags should not require a newline to follow them.")
})
test_that( "Pair with Padding", {
template <- "|{{= @ @ =}}|"
data <- list()
str <- whisker.render(template, data=data)
expect_equal(str, "||", label=deparse(str), info="Superfluous in-tag whitespace should be ignored.")
}) |
rvmedian <- function (x) {
UseMethod("rvmedian")
}
rvmedian.rv <- function (x) {
rvsimapply(x, median, na.rm=TRUE)
}
rvmedian.rvsummary <- function (x) {
rvquantile(x, probs=0.50)
} |
require(rgdal)
library(raster)
library(sp)
library(RgoogleMaps)
library(maptools)
library(ggplot2)
library(car)
library(spatsta)
calib_inpath <-"/Users/hardimanb/Desktop/data.remote/biometry"
calib_infile <-read.csv(file.path(calib_inpath,"biometry_trimmed.csv"), sep=",", header=T)
coords<-data.frame(calib_infile$easting,calib_infile$northing)
Sr1<- SpatialPoints(coords,proj4string=CRS("+proj=utm +zone=15 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0"))
epsg4326String <- CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")
Sr1_4google <- spTransform(Sr1,epsg4326String)
wlef<-data.frame(paste(calib_infile$plot,calib_infile$subplot,sep="_"))
Sr1_4google <- SpatialPointsDataFrame(Sr1_4google, wlef)
writeOGR(Sr1_4google, layer=1, "WLEF.kml", driver="KML")
disturbance_inpath <-"/Users/hardimanb/Desktop/data.remote/biometry"
disturbance_infile <-read.csv(file.path(disturbance_inpath,"Cheas_coordinates_disturbance_year.csv"), sep=",", header=T)
disturbance_coords<-data.frame(cbind(-1*disturbance_infile$dec_lon,disturbance_infile$dec_lat))
dist_df<-data.frame(disturbance_infile$distyr)
coordinates(dist_df)<-disturbance_coords
disturbance_Sr1<- SpatialPoints(dist_df,CRS(as.character(NA)))
proj4string(disturbance_Sr1) <- CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")
writeOGR(dist_df, layer=1, "landtrendr_disturbances.kml", driver="KML")
lakes <-file.path("/Users/hardimanb/Desktop/data.remote/plot_coords/Lakes.kml")
lake_coord_list<-getKMLcoordinates(lakes,ignoreAltitude=FALSE)
lake_pts<-data.frame()
for(i in 1:length(lake_coord_list)){
lake_pts<-rbind(lake_pts,lake_coord_list[[i]])
}
lake_pts<-lake_pts[,1:2]
lake_coords<-coordinates(lake_pts)
lake_Sr1<- SpatialPoints(lake_coords,CRS(as.character(NA)))
proj4string(lake_Sr1) <- CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")
palsar_inpath <- file.path("/Users/hardimanb/Desktop/data.remote/palsar_scenes/geo_corrected_single_gamma")
file.info<-read.table(file="/Users/hardimanb/Desktop/data.remote/output/metadata/output_metadata.csv",header=T,sep="\t")
date.time<-as.vector(substr(file.info$scndate,1,8))
col_names<-c(rbind(paste(date.time, "HH",sep="_"),paste(date.time, "HV",sep="_")))
pol_bands<-c("HH", "HV")
numfiles<-length(date.time)
lake_extracted<-matrix(NA, nrow(lake_coords),length(pol_bands)*numfiles)
disturbance_extracted_40m<-matrix(NA, nrow(disturbance_coords),length(pol_bands)*numfiles)
colnames(lake_extracted)<-col_names
colnames(disturbance_extracted)<-col_names
colnames(disturbance_extracted_40m)<-col_names
extracted_7m<-matrix(NA, nrow(coords),length(pol_bands)*numfiles)
extracted<-matrix(NA, nrow(coords),length(pol_bands)*numfiles)
extracted_40m<-matrix(NA, nrow(coords),length(pol_bands)*numfiles)
colnames(extracted_7m)<-col_names
colnames(extracted)<-col_names
colnames(extracted_40m)<-col_names
for(i in 1:numfiles){
for(j in 1:2){
filelist<-as.vector(list.files(file.path(palsar_inpath, pol_bands[j]), pattern=".tif" ,recursive=F))
inpath<-file.path(palsar_inpath,pol_bands[j],filelist[i])
rast<-raster(inpath)
disturbance_data<-extract(rast, disturbance_Sr1, method="simple",buffer=40, small=T, fun=mean)
disturbance_cols<-seq(j,ncol(disturbance_extracted_40m),by=2)
disturbance_extracted_40m[,disturbance_cols[i]]<-disturbance_data
print(paste("i=",i,sep=""))
print(paste("j=",j,sep=""))
}
}
write.table(extracted,file="/Users/hardimanb/Desktop/data.remote/output/data/WLEF_extracted.csv",quote=F,sep="\t",eol="\r\n", row.names=F,col.names=T)
write.table(lake_extracted,file="/Users/hardimanb/Desktop/data.remote/output/data/lake_extracted.csv",quote=F,sep="\t",eol="\r\n", row.names=F,col.names=T)
write.table(disturbance_extracted_40m,file="/Users/hardimanb/Desktop/data.remote/output/data/disturbance_extracted_40m.csv",quote=F,sep="\t",eol="\r\n", row.names=F,col.names=T)
write.table(extracted_7m,file="/Users/hardimanb/Desktop/data.remote/output/data/WLEF_extracted_7m.csv",quote=F,sep="\t",eol="\r\n", row.names=F,col.names=T)
write.table(extracted_40m,file="/Users/hardimanb/Desktop/data.remote/output/data/WLEF_extracted_40m.csv",quote=F,sep="\t",eol="\r\n", row.names=F,col.names=T)
extracted <- read.table(file="/Users/hardimanb/Desktop/data.remote/output/data/WLEF_extracted.csv",sep="\t", header=T)
lake_extracted <- read.table(file="/Users/hardimanb/Desktop/data.remote/output/data/lake_extracted.csv",sep="\t", header=T)
disturbance_extracted <- read.table(file="/Users/hardimanb/Desktop/data.remote/output/data/disturbance_extracted.csv",sep="\t", header=T)
sd_10m_extracted<-matrix(NA, nrow(coords),length(pol_bands)*numfiles)
sd_20m_extracted<-matrix(NA, nrow(coords),length(pol_bands)*numfiles)
sd_40m_extracted<-matrix(NA, nrow(coords),length(pol_bands)*numfiles)
sd_60m_extracted<-matrix(NA, nrow(coords),length(pol_bands)*numfiles)
sd_80m_extracted<-matrix(NA, nrow(coords),length(pol_bands)*numfiles)
colnames(sd_10m_extracted)<-col_names
colnames(sd_20m_extracted)<-col_names
colnames(sd_40m_extracted)<-col_names
colnames(sd_60m_extracted)<-col_names
colnames(sd_80m_extracted)<-col_names
coords<-Sr1@coords
for(i in 1:numfiles){
for(j in 1:2){
for(k in 1:nrow(coords)){
filelist<-as.vector(list.files(file.path(palsar_inpath, pol_bands[j]), pattern=".tif" ,recursive=F))
inpath<-file.path(palsar_inpath,pol_bands[j],filelist[i])
rast<-raster(inpath)
if(as.numeric(substr(projection(rast),17,18)) == as.numeric(substr(projection(Sr1),17,18))){
radius<-10
buffext<-as.vector(disc(radius=radius, centre=coords[k,]))
ext<-extent(c(buffext[[2]],buffext[[3]]))
cellnums<-cellsFromExtent(rast,ext)
cols<-seq(j,ncol(sd_10m_extracted),by=2)
sd_10m_extracted[k,cols[i]]<- sd(extract(rast,cellnums))
radius<-20
buffext<-as.vector(disc(radius=radius, centre=coords[k,]))
ext<-extent(c(buffext[[2]],buffext[[3]]))
cellnums<-cellsFromExtent(rast,ext)
cols<-seq(j,ncol(sd_20m_extracted),by=2)
sd_20m_extracted[k,cols[i]]<- sd(extract(rast,cellnums))
radius<-40
buffext<-as.vector(disc(radius=radius, centre=coords[k,]))
ext<-extent(c(buffext[[2]],buffext[[3]]))
cellnums<-cellsFromExtent(rast,ext)
cols<-seq(j,ncol(sd_40m_extracted),by=2)
sd_40m_extracted[k,cols[i]]<- sd(extract(rast,cellnums))
radius<-60
buffext<-as.vector(disc(radius=radius, centre=coords[k,]))
ext<-extent(c(buffext[[2]],buffext[[3]]))
cellnums<-cellsFromExtent(rast,ext)
cols<-seq(j,ncol(sd_60m_extracted),by=2)
sd_60m_extracted[k,cols[i]]<- sd(extract(rast,cellnums))
radius<-80
buffext<-as.vector(disc(radius=radius, centre=coords[k,]))
ext<-extent(c(buffext[[2]],buffext[[3]]))
cellnums<-cellsFromExtent(rast,ext)
cols<-seq(j,ncol(sd_80m_extracted),by=2)
sd_80m_extracted[k,cols[i]]<- sd(extract(rast,cellnums))
print(paste("i=",i,sep=""))
print(paste("j=",j,sep=""))
print(paste("k=",k,sep=""))
}
}
}
}
par(mfrow=c(1,1))
for(i in 1:nrow(coords)){
plot(c(10,20,40,60,80),c(sd_10m_extracted[i,1],sd_20m_extracted[i,1],sd_40m_extracted[i,1], sd_60m_extracted[i,1], sd_80m_extracted[i,1]), xlim=c(10,80),ylim=c(0,3), xlab="plot radius (m)",ylab="STDEV of extracted PALSAR returns (HH, gamma (dB))",type="n")
lines(c(10,20,40,60,80),c(sd_10m_extracted[i,1],sd_20m_extracted[i,1],sd_40m_extracted[i,1], sd_60m_extracted[i,1], sd_80m_extracted[i,1]), type="b")
par(new=TRUE)
}
plot(sd_10m_extracted[,1],sd_20m_extracted[,1], xlim=c(0,2), ylim=c(0,2))
plot(sd_20m_extracted[,1],sd_40m_extracted[,1], xlim=c(0,2), ylim=c(0,2))
plot(sd_10m_extracted[,1],sd_40m_extracted[,1], xlim=c(0,2), ylim=c(0,2))
extracted <- extracted[ , !apply(is.na(extracted), 2, all)]
lake_extracted <- lake_extracted[ , !apply(is.na(lake_extracted), 2, all)]
disturbance_extracted <- disturbance_extracted[ , !apply(is.na(disturbance_extracted), 2, all)]
head(extracted)
head(lake_extracted)
head(disturbance_extracted)
extracted_7m
extracted
extracted_40m
sd_7m_extracted
sd_20m_extracted
sd_40m_extracted
odds<-seq(1,ncol(extracted),by=2)
evens<-seq(2,ncol(extracted),by=2)
HHscn.dates<-as.Date(substr(col_names[odds],1,8),"%Y%m%d")
HVscn.dates<-as.Date(substr(col_names[evens],1,8),"%Y%m%d")
HH_wlef<-extracted[,odds]
HV_wlef<-extracted[,evens]
par(mfrow=c(2,1))
boxplot(HV_wlef,ylab="HV_gamma",main='WLEF_plots (n=609)',xaxt="n")
axis(1, at=seq(1, ncol(HV_wlef), by=1), labels = F)
text(seq(1, ncol(HV_wlef), by=1),par("usr")[3]-0.02,labels = HVscn.dates, srt = 45, pos = 1, xpd = TRUE)
boxplot(HH_wlef, ylab="HH_gamma", xaxt="n")
axis(1, at=seq(1, ncol(HH_wlef), by=1), labels = F)
text(seq(1, ncol(HH_wlef), by=1),par("usr")[3]-0.15,labels = HHscn.dates, srt = 45, pos = 1, xpd = TRUE)
HH_lakes<-lake_extracted[,odds]
HV_lakes<-lake_extracted[,evens]
par(mfrow=c(2,1))
boxplot(HV_lakes,ylab="HV_gamma",main='lakes (n=25)',xaxt="n")
axis(1, at=seq(1, 12, by=1), labels = F)
text(seq(1, 12, by=1),par("usr")[3]-0.2,labels = HVscn.dates, srt = 45, pos = 1, xpd = TRUE)
boxplot(HH_lakes, ylab="HH_gamma", xaxt="n")
axis(1, at=seq(1, 12, by=1), labels = F)
text(seq(1, 12, by=1),par("usr")[3]-0.2,labels = HHscn.dates, srt = 45, pos = 1, xpd = TRUE)
dist_odds<-seq(1,ncol(disturbance_extracted),by=2)
dist_evens<-seq(2,ncol(disturbance_extracted),by=2)
HH_disturb<-disturbance_extracted[,dist_odds]
HV_disturb<-disturbance_extracted[,dist_evens]
par(mfrow=c(2,1))
boxplot(HV_disturb,ylab="HV_gamma",main='LandTrendr-Disturbance Plots (n=?)',xaxt="n")
axis(1, at=seq(1, ncol(HV_disturb), by=1), labels = F)
text(seq(0, ncol(HV_disturb)-1, by=1),par("usr")[3]-0.02,labels = HVscn.dates, srt = 45, pos = 1, xpd = TRUE)
boxplot(HH_disturb, ylab="HH_gamma", xaxt="n")
axis(1, at=seq(1, ncol(HH_disturb), by=1), labels = F)
text(seq(0, ncol(HH_disturb)-1, by=1),par("usr")[3]-0.075,labels = HHscn.dates, srt = 45, pos = 1, xpd = TRUE)
wlef_abg<-read.csv("/Users/hardimanb/Desktop/data.remote/biometry/biometry_trimmed.csv", sep="\t", header=T)
HVcol_names<-col_names[evens]
HHcol_names<-col_names[odds]
par(mfrow=c(3,length(odds)/3))
for(i in 1:ncol(extracted)){
if(i%%2==0){
if(extracted[,i])
plot(wlef_abg$ABG_biomass,extracted[,i], ylim=c(0,0.18), xlab="ABG-biomass", ylab="HV", main=col_names[i])
par(new=F)
}
}
par(mfrow=c(3,length(odds)/3))
for(i in 1:ncol(extracted)){
if(i%%2!=0){
plot(wlef_abg$ABG_biomass,extracted[,i], ylim=c(0,1), xlab="ABG-biomass", ylab="HH", main=col_names[i])
par(new=F)
}
}
noise <- colMeans(lake_extracted,na.rm=TRUE,1)
signal_extracted<-matrix(NA,nrow(extracted),ncol(extracted))
colnames(signal_extracted)<-colnames(extracted)
for(i in 1:ncol(extracted)){
signal_extracted[,i]<-extracted[,i]-noise[i]
}
HH_signal<-signal_extracted[,odds]
HV_signal<-signal_extracted[,evens]
par(mfrow=c(2,1))
boxplot(HV_signal,ylab="HV_gamma",main='Corrected WLEF returns (n=609 plots)',xaxt="n")
axis(1, at=seq(1, 12, by=1), labels = F)
text(seq(1, 12, by=1),par("usr")[3]-0.025,labels = HVscn.dates, srt = 45, pos = 1, xpd = TRUE)
boxplot(HH_signal, ylab="HH_gamma", xaxt="n")
axis(1, at=seq(1, 12, by=1), labels = F)
text(seq(1, 12, by=1),par("usr")[3]-0.2,labels = HHscn.dates, srt = 45, pos = 1, xpd = TRUE)
HVcol_names<-col_names[evens]
HHcol_names<-col_names[odds]
par(mfrow=c(2,ncol(HV_signal)/2))
for(i in 1:ncol(HV_signal)){
plot(wlef_abg$ABG_biomass,HV_signal[,i], ylim=c(0,0.2),xlab="ABG-biomass", ylab="HV_signal", main=HVcol_names[i])
par(new=F)
}
par(mfrow=c(2,ncol(HH_signal)/2))
for(i in 1:ncol(HH_signal)){
plot(wlef_abg$ABG_biomass,HH_signal[,i], ylim=c(0,1),xlab="ABG-biomass", ylab="HH_signal", main=HHcol_names[i])
par(new=F)
}
par(mfrow=c(1,2))
plot(wlef_abg$ABG_biomass,HH_signal[,1], ylim=c(0,1),xlab="ABG-biomass", ylab="HV_signal", main=HHcol_names[1])
plot(wlef_abg$ABG_biomass,HV_signal[,1], ylim=c(0,0.2),xlab="ABG-biomass", ylab="HH_signal", main=HVcol_names[1])
par(mfrow=c(1,2))
scatter.smooth(wlef_abg$ABG_biomass,HH_signal[,1],col="
scatter.smooth(wlef_abg$ABG_biomass,HV_signal[,1],col="
k<-100
HVmax<-.07
sd<-sd(HV_signal[,1])
params<-c(k,HVmax,sd)
y<-HV_signal[,1]
x<-wlef_abg$ABG_biomass
sel = which(x>0)
x = x[sel];y=y[sel]
ll.monod(params,x,y)
fit1 = optim(par=params,ll.monod,x=x,y=y)
fit1
params = fit1$par
plot(x,y,ylim=c(0,max(y)))
xseq = seq(min(x),max(x),length=1000)
lines(xseq,params[2]*xseq/(xseq+params[1]),col=2,lwd=3)
lines(cbind(biomass,HVvals),col=3,lwd=3)
params2 = c(50,0.7,0.2,1)
fit2 = optim(par=params2,ll.monod2,x=x,y=y)
fit2
params2 = fit2$par
lines(xseq,params2[2]*xseq/(xseq+params2[1])+params2[3],col=4,lwd=3)
lines(lowess(x,y),col=5,lwd=3)
bin.size = 25
xbin = seq(0,450,bin.size)
bin = findInterval(x,xbin)
bin.mu = tapply(y,bin,mean,na.rm=TRUE)
bin.sd = tapply(y,bin,sd,na.rm=TRUE)
points(xbin[sort(as.numeric(names(bin.mu)))]+bin.size/2,bin.mu,col="orange",cex=3,pch=18)
points(xbin[sort(as.numeric(names(bin.mu)))]+bin.size/2,bin.mu+bin.sd,col="orange",cex=3,pch="_")
points(xbin[sort(as.numeric(names(bin.mu)))]+bin.size/2,bin.mu-bin.sd,col="orange",cex=3,pch="_")
biomass<-loess.smooth(wlef_abg$ABG_biomass,HV_signal[,1])$x
HVvals<-loess.smooth(wlef_abg$ABG_biomass,HV_signal[,1])$y
par(mfrow=c(1,1))
plot(cbind(biomass,HVvals))
plot(loess.smooth(wlef_abg$ABG_biomass,HV_signal[,1]))
par(mfrow=c(2,ncol(HH_signal)/2))
for(i in 1:ncol(HH_signal)){
scatter.smooth(wlef_abg$ABG_biomass,HH_signal[,i],ylim=c(0,1),col="
par(new=F)
}
x<-loess.smooth(wlef_abg$ABG_biomass,HH_signal[,i])$x
y<-loess.smooth(wlef_abg$ABG_biomass,HH_signal[,i])$y
par(mfrow=c(2,ncol(HV_signal)/2))
for(i in 1:ncol(HV_signal)){
scatter.smooth(wlef_abg$ABG_biomass,HV_signal[,i],ylim=c(0,0.2),col="
par(new=F)
}
x<-loess.smooth(wlef_abg$ABG_biomass,HV_signal[,i])$x
y<-loess.smooth(wlef_abg$ABG_biomass,HV_signal[,i])$y
disturbance_signal<-matrix(NA,nrow(disturbance_extracted),ncol(disturbance_extracted))
colnames(disturbance_signal)<-colnames(disturbance_extracted)
HH_noise<-mean(noise[seq(1,length(noise),by=2)])
HV_noise<-mean(noise[seq(2,length(noise),by=2)])
noise_constant<-c(HH_noise,HV_noise)
for(i in 1:ncol(disturbance_extracted)){
if(i%%2==0){
disturbance_signal[,i]<-disturbance_extracted[,i]-noise_constant[1]
}else{
disturbance_signal[,i]<-disturbance_extracted[,i]-noise_constant[2]
}
}
HH_disturb<-disturbance_signal[,dist_odds]
HV_disturb<-disturbance_signal[,dist_evens]
scn.dates<-as.Date(substr(colnames(disturbance_extracted),2,9),"%Y%m%d")
scn.yr<-substr(colnames(disturbance_extracted),2,5)
scn.yr<-as.numeric(scn.yr[dist_odds])
colnames(HH_disturb)<-as.character(scn.dates[dist_odds])
colnames(HV_disturb)<-as.character(scn.dates[dist_evens])
disturbance_ages<-matrix(NA,nrow(HH_disturb),length(scn.yr))
colnames(disturbance_ages)<-as.character(scn.dates[dist_evens])
for(i in 1:length(scn.yr)){
disturbance_ages[,i]<-scn.yr[i]-disturbance_infile$distyr
}
disturbance_ages[is.na(HH_disturb)]=NA
par(mfrow=c(1,1))
for(i in 1:ncol(HH_disturb)){
plot(disturbance_ages[,i]>0, HH_disturb[,i], pch=i)
par(new=T)
}
ltzero<-HH_disturb[disturbance_ages[,1]<=0,1]
ltfive<-HH_disturb[disturbance_ages[,1]>0 & disturbance_ages[,1]<=5,1]
ltten<-HH_disturb[disturbance_ages[,1]>5 & disturbance_ages[,1]<=10,1]
ltfifteen<-HH_disturb[disturbance_ages[,1]>10 & disturbance_ages[,1]<=15,1]
lttwenty<-HH_disturb[disturbance_ages[,1]>15 & disturbance_ages[,1]<=20,1]
lttwentyfive<-HH_disturb[disturbance_ages[,1]>20 & disturbance_ages[,1]<=25,1]
n <- max(length(ltzero), length(ltfive), length(ltten) , length(ltfifteen) , length(lttwenty), length(lttwentyfive))
length(ltzero) <- n
length(ltfive) <- n
length(ltten) <- n
length(ltfifteen) <- n
length(lttwenty) <- n
length(lttwentyfive) <- n
binned<-cbind(ltfive,ltten,ltfifteen,lttwenty,lttwentyfive)
par(mfrow=c(1,1))
boxplot(binned)
par(mfrow=c(1,1))
plot(c(0,5,10,15,20,25),binned)
neg<-mean(HH_disturb[disturbance_ages<=0],na.rm=T)
five<-mean(HH_disturb[disturbance_ages>0 & disturbance_ages<=5],na.rm=T)
ten<-mean(HH_disturb[disturbance_ages>5 & disturbance_ages<=10],na.rm=T)
fif<-mean(HH_disturb[disturbance_ages>10 & disturbance_ages<=15],na.rm=T)
twen<-mean(HH_disturb[disturbance_ages>15 & disturbance_ages<=20],na.rm=T)
twenfi<-mean(HH_disturb[disturbance_ages>20 & disturbance_ages<=25],na.rm=T)
binned<-cbind(c(0,5,10,15,20,25),c(neg,five,ten,fif,twen,twenfi))
par(mfrow=c(1,1))
plot(binned[,1],binned[,2], ylim=c(0,.2))
scatter.smooth(disturbance_ages[disturbance_ages>0 & disturbance_ages<=5],HH_disturb[disturbance_ages>0 & disturbance_ages<=5],col="
scatter.smooth(disturbance_ages[disturbance_ages>5 & disturbance_ages<=10],HH_disturb[disturbance_ages>5 & disturbance_ages<=10],col="
scatter.smooth(disturbance_ages[disturbance_ages>10 & disturbance_ages<=15],HH_disturb[disturbance_ages>10 & disturbance_ages<=15],col="
plot(disturbance_infile$distyr,HH_disturb[,1])
boxplot(HH_disturb, xlab="Time since disturbance (years)",ylab="HH_gamma", xaxt="n")
axis(1, at=seq(1, length(age)-1, by=1), labels = F)
text(seq(min(age), max(age), by=1),par("usr")[3]-0.01,labels = age, srt = 0, pos = 1, xpd = TRUE)
boxplot(HH_disturb) ~ disturbance_ages),disturbance_ages,labels=NULL)
Boxplot(HV_disturb,disturbance_ages,labels=NULL)
if(i==1){
plot(x,y, xlab="Disturbance Age", ylab="HH",main="PALSAR returns from disturbed plots", pch=i)
}
else{plot(x,y,axes=F, xlab="",ylab="",pch=i)
}
par(new=T)
}
par(mfrow=c(1,2))
par(mfg=c(2,1))
for(i in 1:ncol(HH_disturb)){
if(i==1){plot(disturbance_ages[!is.na(HH_disturb[,i]),i],HH_disturb[!is.na(HH_disturb[,i]),i], xlab="Time since disturbance", ylab="HH",main="PALSAR returns from disturbed plots", pch=i)
}
else{plot(disturbance_ages[!is.na(HH_disturb[,i]),i],HH_disturb[!is.na(HH_disturb[,i]),i],axes=F, xlab="",ylab="",pch=i)
}
par(new=T)
}
par(mfg=c(1,2))
for(i in 1:ncol(HV_disturb)){
if(i==1){plot(disturbance_ages[!is.na(HV_disturb[,i]),i],HV_disturb[!is.na(HV_disturb[,i]),i], xlab="Time since disturbance", ylab="HV",main="PALSAR returns from disturbed plots", pch=i)
}
else{plot(disturbance_ages[!is.na(HV_disturb[,i]),i],HV_disturb[!is.na(HV_disturb[,i]),i],axes=F, xlab="",ylab="",pch=i)
}
par(new=T)
}
mean(HV_disturb[disturbance_ages[,1]<0,1])
plot(disturbance_ages[!is.na(HV_disturb[,1])],HV_disturb[!is.na(HV_disturb[,1]),1], xlab="Disturbance Date", ylab="HV",main=paste("scn date",scn.dates[1],sep=" "))
lm_data<-cbind(wlef_abg$ABG_biomass,extracted)
summary(lm(wlef_abg$ABG_biomass ~ extracted[,1]+extracted[,2]))
for(i in 1:nrow(extracted)){
plot(HH[i,],NULL, axes=F,ylim=c(0,max(HH)), main="HH",type="n")
lines(HH[i,],NULL,'b')
par(new=T)
}
par(mfrow=c(1,1), new=F)
plot()
for(i in 1:nrow(extracted)){
plot(HV[i,],NULL, axes=F,ylim=c(0,max(HV)), main="HV", type="n")
lines(HV[i,],NULL,'b')
par(new=T)
}
par(mfrow=c(2,1))
plot(extracted[,1],extracted[,3], xlim=c(0,.4), ylim=c(0,.6),xlab="2007 HH gamma", ylab="2010 HH gamma",main="2007 vs 2010 WLEF plots")
abline(0,1,col="red")
plot(extracted[,2],extracted[,4], xlab="2007 HV gamma", ylab="2010 HV gamma")
abline(0,1,col="red")
par(mfrow=c(2,1))
plot(infile$ANPPW,extracted[,1], xlab="ABG-biomass", ylab="HH")
plot(infile$ANPPW,extracted[,2], xlab="ABG-biomass", ylab="HV") |
seg.glm.fit.boot<-function(y, XREG, Z, PSI, w, offs, opz, n.boot=10, size.boot=NULL, jt=FALSE,
nonParam=TRUE, random=FALSE, break.boot=n.boot){
extract.psi<-function(lista){
dev.values<-lista[[1]][-1]
psi.values<-lista[[2]][-1]
dev.ok<-min(dev.values)
id.dev.ok<-which.min(dev.values)
if(is.list(psi.values)) psi.values<-matrix(unlist(psi.values),
nrow=length(dev.values), byrow=TRUE)
if(!is.matrix(psi.values)) psi.values<-matrix(psi.values)
psi.ok<-psi.values[id.dev.ok,]
r<-list(dev.no.gap=dev.ok, psi=psi.ok)
r
}
if(!nonParam){
nonParam<-TRUE
warning("`nonParam' set to TRUE for segmented glm..", call.=FALSE)
}
visualBoot<-opz$visualBoot
opz.boot<-opz
opz.boot$pow=c(1,1)
opz1<-opz
opz1$it.max <-1
n<-length(y)
o0<-try(suppressWarnings(seg.glm.fit(y, XREG, Z, PSI, w, offs, opz)), silent=TRUE)
rangeZ <- apply(Z, 2, range)
if(!is.list(o0)) {
o0<- suppressWarnings(seg.glm.fit(y, XREG, Z, PSI, w, offs, opz, return.all.sol=TRUE))
o0<-extract.psi(o0)
if(!nonParam) {warning("using nonparametric boot");nonParam<-TRUE}
}
if(is.list(o0)){
est.psi00<-est.psi0<-o0$psi
ss00<-o0$dev.no.gap
if(!nonParam) fitted.ok<-fitted(o0)
} else {
if(!nonParam) stop("semiparametric boot requires reasonable fitted values. try a different psi or use nonparam boot")
if(random) {
est.psi00<-est.psi0<-apply(rangeZ,2,function(r)runif(1,r[1],r[2]))
PSI1 <- matrix(rep(est.psi0, rep(nrow(Z), length(est.psi0))), ncol = length(est.psi0))
o0<-try(suppressWarnings(seg.glm.fit(y, XREG, Z, PSI1, w, offs, opz1)), silent=TRUE)
ss00<-o0$dev.no.gap
} else {
est.psi00<-est.psi0<-apply(PSI,2,mean)
ss00<-opz$dev0
}
}
n.intDev0<-nchar(strsplit(as.character(ss00),"\\.")[[1]][1])
all.est.psi.boot<-all.selected.psi<-all.est.psi<-matrix(, nrow=n.boot, ncol=length(est.psi0))
all.ss<-all.selected.ss<-rep(NA, n.boot)
if(is.null(size.boot)) size.boot<-n
Z.orig<-Z
count.random<-0
for(k in seq(n.boot)){
n.boot.rev<- 3
diff.selected.ss <- rev(diff(na.omit(all.selected.ss)))
if(length(diff.selected.ss)>=(n.boot.rev-1) && all(round(diff.selected.ss[1:(n.boot.rev-1)],6)==0)){
qpsi<-sapply(1:ncol(Z),function(i)mean(est.psi0[i]>=Z[,i]))
qpsi<-ifelse(abs(qpsi-.5)<.1,.8,qpsi)
est.psi0<-sapply(1:ncol(Z),function(i)quantile(Z[,i],probs=1-qpsi[i],names=FALSE))
}
PSI <- matrix(rep(est.psi0, rep(nrow(Z), length(est.psi0))), ncol = length(est.psi0))
if(jt) Z<-apply(Z.orig,2,jitter)
if(nonParam){
id<-sample(n, size=size.boot, replace=TRUE)
o.boot<-try(suppressWarnings(seg.glm.fit(y[id], XREG[id,,drop=FALSE], Z[id,,drop=FALSE], PSI[id,,drop=FALSE],
w[id], offs[id], opz)), silent=TRUE)
} else {
yy<-fitted.ok+sample(residuals(o0),size=n, replace=TRUE)
o.boot<-try(suppressWarnings(seg.glm.fit(yy, XREG, Z.orig, PSI, weights, offs, opz)), silent=TRUE)
}
if(is.list(o.boot)){
all.est.psi.boot[k,]<-est.psi.boot<-o.boot$psi
} else {
est.psi.boot<-apply(rangeZ,2,function(r)runif(1,r[1],r[2]))
}
PSI <- matrix(rep(est.psi.boot, rep(nrow(Z), length(est.psi.boot))), ncol = length(est.psi.boot))
opz$h<-max(opz$h*.9, .2)
opz$it.max<-opz$it.max+1
o<-try(suppressWarnings(seg.glm.fit(y, XREG, Z.orig, PSI, w, offs, opz)), silent=TRUE)
if(!is.list(o) && random){
est.psi00<-est.psi0<-apply(rangeZ,2,function(r)runif(1,r[1],r[2]))
PSI1 <- matrix(rep(est.psi0, rep(nrow(Z), length(est.psi0))), ncol = length(est.psi0))
o<-try(suppressWarnings(seg.glm.fit(y, XREG, Z, PSI1, w, offs, opz1)), silent=TRUE)
count.random<-count.random+1
}
if(is.list(o)){
if(!"coefficients"%in%names(o$obj)) o<-extract.psi(o)
all.est.psi[k,]<-o$psi
all.ss[k]<-o$dev.no.gap
if(o$dev.no.gap<=ifelse(is.list(o0), o0$dev.no.gap, 10^12)) o0<-o
est.psi0<-o0$psi
all.selected.psi[k,] <- est.psi0
all.selected.ss[k]<-o0$dev.no.gap
}
if (visualBoot) {
flush.console()
cat(paste("boot sample = ", sprintf("%2.0f",k),
" opt.dev = ", sprintf(paste("%", n.intDev0+6, ".5f",sep=""), o0$dev.no.gap),
" n.psi = ",formatC(length(unlist(est.psi0)),digits=0,format="f"),
" est.psi = ",paste(formatC(unlist(est.psi0),digits=3,format="f"), collapse=" "),
sep=""), "\n")
}
asss<-na.omit(all.selected.ss)
if(length(asss)>break.boot){
if(all(rev(round(diff(asss),6))[1:(break.boot-1)]==0)) break
}
}
all.selected.psi<-rbind(est.psi00,all.selected.psi)
all.selected.ss<-c(ss00, all.selected.ss)
ris<-list(all.selected.psi=drop(all.selected.psi),all.selected.ss=all.selected.ss, all.psi=all.est.psi, all.ss=all.ss)
if(is.null(o0$obj)){
PSI1 <- matrix(rep(est.psi0, rep(nrow(Z), length(est.psi0))), ncol = length(est.psi0))
o0<-try(suppressWarnings(seg.glm.fit(y, XREG, Z, PSI1, w, offs, opz1)), silent=TRUE)
}
if(!is.list(o0)) return(0)
o0$boot.restart<-ris
rm(.Random.seed, envir=globalenv())
return(o0)
} |
context('.json.tabular.to.data.frame')
source('utilities.R')
test_that('edge cases are handled correctly', {
.json.tabular.to.data.frame <- RPresto:::.json.tabular.to.data.frame
expect_equal_data_frame(
.json.tabular.to.data.frame(list(), character(0)),
data.frame()
)
expect_error(
.json.tabular.to.data.frame(1, c(some_type='a')),
'Unexpected data class',
label='Unexpected data class'
)
expect_error(
.json.tabular.to.data.frame(
list(list(1)),
c(unsupported_presto_type=NA_character_)
),
'Unsupported column type',
label='Unsupported column type'
)
expect_warning(
.json.tabular.to.data.frame(
list(
list(a=1L),
list(b=1L)
),
c(integer='integer')
),
'Item .*, column names differ across rows',
label='Different column names'
)
expect_error(
.json.tabular.to.data.frame(
list(list(a=1)),
c(integer='integer', varbinary='raw')
),
'Item .*,.+expected: 2 columns,.+received: 1',
label='Not enough columns'
)
e <- data.frame(
logical=TRUE,
integer=1L,
numeric=0.0,
character='',
Date=as.Date('2014-03-01'),
POSIXct_no_time_zone=as.POSIXct(
'2015-03-01 12:00:00',
tz=test.timezone()
),
POSIXct_with_time_zone=as.POSIXct('2015-03-01 12:00:00', tz='UTC'),
stringsAsFactors=FALSE)
e[['list_unnamed']] <- list(list(1))
e[['list_named']] <- list(list(a=1))
e[['raw']] <- list(charToRaw('abc'))
attr(e[['POSIXct_with_time_zone']], 'tzone') <- NULL
column.types <- c(
boolean='logical',
integer='integer',
double='numeric',
varchar='character',
date='Date',
timestamp='POSIXct_no_time_zone',
'timestamp with time zone'='POSIXct_with_time_zone',
array='list_unnamed',
map='list_named',
varbinary='raw'
)
r <- .json.tabular.to.data.frame(
list(),
column.types,
timezone=test.timezone()
)
colnames(r) <- column.types
expect_equal_data_frame(r, e[FALSE, ])
r <- .json.tabular.to.data.frame(
rep(list(list()), 3),
character(0),
timezone=test.timezone()
)
expect_equal_data_frame(r, data.frame(rep(NA, 3))[, FALSE, drop=FALSE])
})
with_locale(test.locale(), test_that)('regular data is converted correctly', {
.json.tabular.to.data.frame <- RPresto:::.json.tabular.to.data.frame
input <- list(
list(
TRUE,
1L,
0.0,
'0',
'',
'YQ==',
'2015-03-01',
'2015-03-01 12:00:00',
'2015-03-01 12:00:00 Europe/Paris',
iconv('\xFD\xDD\xD6\xF0', localeToCharset(test.locale()), 'UTF-8'),
list(1, 2),
list(a=1, b=2)
),
list(
FALSE,
2L,
1.0,
'1.414',
'z',
'YmM=',
'2015-03-02',
'2015-03-02 12:00:00.321',
'2015-03-02 12:00:00.321 Europe/Paris',
{ x <- '\xE1\xBD\xA0\x32'; Encoding(x) <- 'UTF-8'; x},
list(),
structure(list(), names=character(0))
)
)
column.classes <- c(
boolean='logical',
integer='integer',
double='numeric',
varchar='character',
varchar='character',
varbinary='raw',
date='Date',
timestamp='POSIXct_no_time_zone',
'timestamp with time zone'='POSIXct_with_time_zone',
varchar='character',
array='list_unnamed',
map='list_named'
)
column.names <- column.classes
column.names[length(column.names) - 2] <- '<odd_name>'
e <- data.frame.with.all.classes()
r <- .json.tabular.to.data.frame(
input,
column.classes,
timezone=test.timezone()
)
colnames(r) <- column.names
expect_equal_data_frame(r, e, label='unnamed items')
old.locale <- Sys.getlocale('LC_CTYPE')
tryCatch({
if (.Platform[['OS.type']] == 'windows') {
Sys.setlocale('LC_CTYPE', 'French_France.1252')
} else {
Sys.setlocale('LC_CTYPE', 'fr_FR.iso8859-15@euro')
}
},
warning=function(cond) {
Sys.setlocale('LC_CTYPE', 'fr_FR.iso8859-15')
}
)
if (Sys.getlocale('LC_CTYPE') != old.locale) {
expect_false(isTRUE(all.equal(r, e)))
}
input.with.names <- lapply(input,
function(x) { names(x) <- column.names; return(x) }
)
Sys.setlocale('LC_CTYPE', test.locale())
r <- .json.tabular.to.data.frame(
input.with.names,
column.classes,
timezone=test.timezone()
)
expect_equal_data_frame(r, e, label='auto parse names')
})
test_that('NAs are handled correctly', {
.json.tabular.to.data.frame <- RPresto:::.json.tabular.to.data.frame
expect_equal_data_frame(
.json.tabular.to.data.frame(list(list(A=NULL)), c(boolean='logical')),
data.frame(A=NA)
)
e <- data.frame(A=as.Date(NA), B=3L, C=as.POSIXct(NA))
attr(e[['C']], 'tzone') <- NULL
expect_equal_data_frame(
.json.tabular.to.data.frame(
list(list(A=NULL, B=3L, C=NULL)),
c(
date='Date',
integer='integer',
'timestamp with time zone'='POSIXct_with_time_zone'
),
timezone=test.timezone()
),
e
)
column.classes <- c(
boolean='logical',
integer='integer',
double='numeric',
varchar='character',
varbinary='raw',
date='Date',
timestamp='POSIXct_no_time_zone',
'timestamp with time zone'='POSIXct_with_time_zone',
array='list_unnamed',
map='list_named'
)
r <- .json.tabular.to.data.frame(
list(rep(list(NULL), length(column.classes))),
column.classes,
timezone=test.timezone()
)
colnames(r) <- column.classes
e <- data.frame(NA, NA_integer_, NA_real_, NA_character_, NA,
as.Date(NA), as.POSIXct(NA_character_), as.POSIXct(NA_character_),
NA, NA, stringsAsFactors=FALSE)
colnames(e) <- column.classes
e[['raw']] <- list(NA)
e[['list_unnamed']] <- list(NA)
e[['list_named']] <- list(NA)
attr(e[['POSIXct_no_time_zone']], 'tzone') <- test.timezone()
attr(e[['POSIXct_with_time_zone']], 'tzone') <- NULL
expect_equal_data_frame(r, e)
input <- list(
list(
logical=NULL,
integer=1L,
numeric=NULL,
character='',
raw=NULL,
Date='2015-03-01',
POSIXct_no_time_zone=NULL,
POSIXct_with_time_zone='2015-04-01 01:02:03.456 Europe/Paris',
list_unnamed=NULL,
list_named=list(A=1)
),
list(
logical=TRUE,
integer=NULL,
numeric=0.0,
character=NULL,
raw='YQ==',
Date=NULL,
POSIXct_no_time_zone='2015-04-01 01:02:03.456',
POSIXct_with_time_zone=NULL,
list_unnamed=list(1),
list_named=NULL
)
)
e <- data.frame(
logical=c(NA, TRUE),
integer=c(1L, NA),
numeric=c(NA, 0.0),
character=c('', NA),
raw=NA,
Date=as.Date(c('2015-03-01', NA)),
POSIXct_no_time_zone
=as.POSIXct(c(NA, '2015-04-01 01:02:03.456'), tz=test.timezone()),
POSIXct_with_time_zone=as.POSIXct(
c('2015-04-01 01:02:03.456', NA),
tz='Europe/Paris'
),
list_unnamed=NA,
list_named=NA,
stringsAsFactors=FALSE
)
e[['raw']] <- list(NA, charToRaw('a'))
e[['list_unnamed']] <- list(NA, list(1))
e[['list_named']] <- list(list(A=1), NA)
r <- .json.tabular.to.data.frame(
input,
column.classes,
timezone=test.timezone()
)
expect_equal_data_frame(r, e)
e.reversed <- e[c(2, 1), ]
rownames(e.reversed) <- NULL
r <- .json.tabular.to.data.frame(
input[c(2, 1)],
column.classes,
timezone=test.timezone()
)
expect_equal_data_frame(r, e.reversed)
})
test_that('Inf, -Inf and NaN are handled correctly', {
.json.tabular.to.data.frame <- RPresto:::.json.tabular.to.data.frame
expect_equal_data_frame(
.json.tabular.to.data.frame(
list(list(A='Infinity', B='-Infinity', C='NaN')),
c(double='numeric', double='numeric', double='numeric')
),
data.frame(A=Inf, B=-Inf, C=NaN)
)
expect_equal_data_frame(
.json.tabular.to.data.frame(
list(list(A='Infinity', B='-Infinity', C='NaN')),
c(varchar='character', varchar='character', varchar='character')
),
data.frame(A='Infinity', B='-Infinity', C='NaN', stringsAsFactors=FALSE)
)
expect_equal(
.json.tabular.to.data.frame(
list(
list(A=1.0, B=1.0, C=1.0),
list(A='Infinity', B='-Infinity', C='NaN'),
list(A=1.0, B=1.0, C=1.0)
),
c(double='numeric', double='numeric', double='numeric')
),
data.frame(A=c(1.0, Inf, 1.0), B=c(1.0, -Inf, 1.0), C=c(1.0, NaN, 1.0))
)
}) |
nsk <- function(x, df=NULL, knots=NULL, intercept=FALSE, b=.05,
Boundary.knots = quantile(x, c(b, 1-b), na.rm=TRUE)) {
if (is.logical(Boundary.knots) || length(Boundary.knots) == 0)
kx <- range(x, na.rm=TRUE)
else if (length(knots) == 0) kx <- Boundary.knots
else {
if (Boundary.knots[2] <= max(knots)) Boundary.knots <- Boundary.knots[1]
if (Boundary.knots[1] >= min(knots)) Boundary.knots <- Boundary.knots[-1]
kx <- sort(c(knots, Boundary.knots))
}
j <- c(1, length(kx))
bknot <- kx[j]
iknot <- kx[-j]
if (length(iknot) ==0)
basis <- ns(x, df=df, intercept=intercept,
Boundary.knots = bknot)
else basis <- ns(x, df=df, knots= iknot, intercept=intercept,
Boundary.knots = bknot)
iknot <- attr(basis, "knots")
kx <- c(bknot[1], iknot, bknot[2])
kbasis <- ns(kx, df=df, knots=iknot, intercept=intercept,
Boundary.knots = bknot)
if (intercept) ibasis <- basis %*% solve(kbasis)
else ibasis <- (cbind(1, basis) %*% solve(cbind(1, kbasis)))[, -1]
attributes(ibasis) <- attributes(basis)
class(ibasis) <- c("nsk", class(basis))
ibasis
}
makepredictcall.nsk <- function(var, call)
{
if(as.character(call)[1L] == "nsk" ||
(is.call(call) && identical(eval(call[[1L]]), nsk))) {
at <- attributes(var)[c("knots", "Boundary.knots", "intercept")]
call <- call[1L:2L]
call[names(at)] <- at
}
call
} |
predict.miss.lm <- function(object, newdata = NULL, seed = NA, ...)
{
if (!is.na(seed))
set.seed(seed)
X.new = newdata
mu.em = object$mu.X
sig2.em = object$Sig.X
beta.em = object$coef
if (is(X.new, "data.frame")){
X.new <- as.matrix(X.new)
}
if (!is.matrix(X.new)){
stop("Error: parameter 'X.new' should be either a matrix or a data frame.")
}
if (sum(sapply(X.new, is.numeric)) < ncol(X.new)) {
stop("Error: parameter 'X.new should be numeric'.")
}
X.prep <- cbind(rep(NA, nrow(X.new)), X.new)
X.prep <- t(t(X.prep) - mu.em)
Inv.Sigma.tmp <- solve(sig2.em)
X.pred <- t(apply(X.prep, 1, imputeEllP, Inv.Sigma.tmp))
X.pred <- t(t(X.pred) + mu.em)
return(pr.y=X.pred[,1])
} |
expected <- eval(parse(text="NULL"));
test(id=0, code={
argv <- eval(parse(text="list(\"'drop' argument will be ignored\", quote(`[.data.frame`(women, \"height\", drop = FALSE)))"));
.Internal(`.dfltWarn`(argv[[1]], argv[[2]]));
}, o=expected); |
NULL
is_matrix <- function(x) {
is.matrix(x)
}
is_numeric_matrix <- function(x) {
if (!is.matrix(x)) return(FALSE)
is.numeric(x)
}
is_string_matrix <- function(x) {
if (!is.matrix(x)) return(FALSE)
is.character(x)
}
is_logical_matrix <- function(x) {
if (!is.matrix(x)) return(FALSE)
is.logical(x)
}
is_not_matrix <- function(x) {
!is_matrix(x)
} |
library(TMB)
dyn.load(dynlib("linreg_parallel"))
set.seed(123)
x <- seq(0, 10, length = 50001)
data <- list(Y = rnorm(length(x)) + x, x = x)
parameters <- list(a=0, b=0, logSigma=0)
obj <- MakeADFun(data, parameters, DLL="linreg_parallel")
opt <- nlminb(obj$par, obj$fn, obj$gr) |
estimate.expression.cna.correlation <- function(exp.data = NULL, cna.data.log2 = NULL, corr.threshold = 0.3, corr.direction = "two.sided", subtypes.metadata = NULL, feature.ids = NULL, cancer.type = NULL, data.dir = NULL, graphs.dir = NULL) {
if (!identical(colnames(exp.data), colnames(cna.data.log2))) {
stop("\nDieing gracefully bcoz colnames(exp.data) != colnames(cna.data.log2)");
}
if (!identical(rownames(exp.data), rownames(cna.data.log2))) {
stop("\nDieing gracefully bcoz rownames(exp.data) != rownames(cna.data.log2)");
}
if (!file.exists(data.dir)) {
dir.create(data.dir, recursive = TRUE);
}
if (!file.exists(graphs.dir)) {
dir.create(graphs.dir, recursive = TRUE);
}
plot.venn <- TRUE;
subtype.samples.list <- subtypes.metadata[["subtype.samples.list"]];
corr.data <- matrix(
data = NA, nrow = length(feature.ids), ncol = 3,
dimnames= list(feature.ids, c("rho", "P", "Q"))
);
corr.data.subtypes <- list();
corr.threshold.genes <- vector();
corr.threshold.genes.subtypes <- list();
for (subtype.name in names(subtype.samples.list)) {
corr.threshold.genes.subtypes[[subtype.name]] <- vector();
}
if (length(feature.ids) > 0) {
for (subtype.name in names(subtype.samples.list)) {
cat("\n[Correlation] mRNA v CNA: ", cancer.type, subtype.name);
corr.data.subtype <- corr.data;
for (gene.name in feature.ids) {
corr.tmp <- cor.test(
x = exp.data[gene.name, subtype.samples.list[[subtype.name]]],
y = cna.data.log2[gene.name, subtype.samples.list[[subtype.name]]],
method = "spearman"
);
corr.data.subtype[gene.name, "rho"] <- corr.tmp$estimate;
corr.data.subtype[gene.name, "P"] <- corr.tmp$p.value;
}
corr.data.subtype[, "rho"] <- round(corr.data.subtype[, "rho"], digits = 3);
corr.data.subtype[, "Q"] <- p.adjust(corr.data.subtype[, "P"], method = "BH");
corr.data.subtypes[[subtype.name]] <- corr.data.subtype;
if (corr.direction == "two.sided") {
which.genes <- which(abs(corr.data.subtype[, "rho"]) > corr.threshold);
}
else if (corr.direction == "greater") {
which.genes <- which(corr.data.subtype[, "rho"] > corr.threshold);
}
else if (corr.direction == "less") {
which.genes <- which(corr.data.subtype[, "rho"] < corr.threshold);
}
else {
stop("\nDieing gracefully bcoz corr.direction is invalid");
}
if (length(which.genes) > 0) {
corr.threshold.genes <- union(
corr.threshold.genes,
rownames(corr.data.subtype)[which.genes]
);
corr.threshold.genes.subtypes[[subtype.name]] <- rownames(corr.data.subtype)[which.genes];
}
else {
plot.venn <- FALSE;
corr.threshold.genes.subtypes[[subtype.name]] <- NULL;
cat("\n\tvenn diagram will be not be plotted as empty correlation set for this subtype");
}
write.table(
x = corr.data.subtype[order(corr.data.subtype[, "rho"], decreasing = TRUE), , drop = FALSE],
file = paste(data.dir, "mRNA_abundance_cna_correlation__", subtype.name, ".txt", sep = ""),
row.names = TRUE,
col.names = NA,
sep = "\t",
quote = FALSE
);
}
if (length(corr.threshold.genes) > 1 && cancer.type == "Metabric" && plot.venn) {
venn.diagram(
x = list(
"Normal-like" = corr.threshold.genes.subtypes[["Normal"]],
"LuminalA" = corr.threshold.genes.subtypes[["LumA"]],
"LuminalB" = corr.threshold.genes.subtypes[["LumB"]],
"Basal" = corr.threshold.genes.subtypes[["Basal"]],
"Her2" = corr.threshold.genes.subtypes[["Her2"]]
),
imagetype = "png",
filename = paste(graphs.dir, "mRNA_abundance_cna_correlation__venn_PAM50.png", sep = ""),
col = "black",
fill = c("forestgreen", "dodgerblue3", "lightskyblue2", "red", "pink"),
alpha = 0.50,
fontface = "bold",
cex = c(1.5, 1.5, 1.5, 1.5, 1.5, 1, 0.8, 1, 0.8, 1, 0.8, 1, 0.8,
1, 0.8, 1, 0.8, 1, 0.8, 1, 0.8, 1, 0.8, 1, 0.8, 1, 1, 1, 1, 1, 1.5),
cat.col = "black",
cat.cex = 1.5,
cat.fontface = "bold",
margin = 0.23,
cat.dist = 0.32
);
}
}
return (
list(
"corr.threshold.genes" = corr.threshold.genes,
"correlated.genes.subtypes" = corr.threshold.genes.subtypes
)
);
} |
context("Studentize")
test_that("Studentize", {
N <- 100
declaration <- randomizr::declare_ra(N = N, m = 50)
Z <- randomizr::conduct_ra(declaration)
X <- rnorm(N)
Y <- .9 * X + .2 * Z + rnorm(N)
W <- runif(N)
df <- data.frame(Y, X, Z, W)
ri_out <-
conduct_ri(
formula = Y ~ Z,
declaration = declaration,
assignment = "Z",
sharp_hypothesis = 0,
studentize = TRUE,
data = df, sims = 100
)
plot(ri_out)
summary(ri_out)
ri_out <-
conduct_ri(
formula = Y ~ Z + X,
declaration = declaration,
assignment = "Z",
sharp_hypothesis = 0,
studentize = TRUE,
data = df, sims = 100
)
plot(ri_out)
summary(ri_out)
expect_true(TRUE)
}) |
sigma2_DML <- function(all_residuals, betahat) {
n <- length(all_residuals[[1]]$rY)
d <- nrow(as.matrix(betahat))
q <- ncol(all_residuals[[1]]$rA)
K <- length(all_residuals)
Jzerohat <- matrix(0, nrow = d, ncol = q)
cov_loss <- matrix(0, nrow = q, ncol = q)
for (k in seq_len(K)) {
mat_1 <- crossprod(all_residuals[[k]]$rA, all_residuals[[k]]$rX) / n
mat_2_inv_mat_1 <-
qr.solve(crossprod(all_residuals[[k]]$rA, all_residuals[[k]]$rA) / n, mat_1)
Jzerohat <-
Jzerohat + qr.solve(crossprod(mat_2_inv_mat_1, mat_1), t(mat_2_inv_mat_1))
loss <- sweep(all_residuals[[k]]$rA,
1,
all_residuals[[k]]$rY - all_residuals[[k]]$rX %*% betahat,
FUN = "*")
cov_loss <- cov_loss + crossprod(loss, loss) / n
}
Jzerohat <- Jzerohat / K
Jzerohat %*% tcrossprod(cov_loss / K, Jzerohat) / (n * K)
}
sigma2_DML_stable <- function(all_residuals, betahat) {
n <- length(all_residuals[[1]]$rY)
d <- nrow(as.matrix(betahat))
q <- ncol(all_residuals[[1]]$rA)
K <- length(all_residuals)
cov_loss <- matrix(0, nrow = q, ncol = q)
mat_2_full <- matrix(0, nrow = q, ncol = q)
mat_1_full <- matrix(0, nrow = d, ncol = q)
for (k in seq_len(K)) {
mat_1_full <- mat_1_full + crossprod(all_residuals[[k]]$rX, all_residuals[[k]]$rA) / n
mat_2_full <- mat_2_full + crossprod(all_residuals[[k]]$rA, all_residuals[[k]]$rA) / n
loss <- sweep(all_residuals[[k]]$rA,
1,
all_residuals[[k]]$rY - all_residuals[[k]]$rX %*% betahat,
FUN = "*")
cov_loss <- cov_loss + crossprod(loss, loss) / n
}
mat_1_full <- t(mat_1_full) / K
mat_2_full <- mat_2_full / K
mat_2_full_inf_mat_1_full <- qr.solve(mat_2_full, mat_1_full)
Jzerohat <- qr.solve(crossprod(mat_2_full_inf_mat_1_full, mat_1_full),
t(mat_2_full_inf_mat_1_full))
warning("Essentially perfect fit: DML summary may be unreliable.")
Jzerohat %*% tcrossprod(cov_loss / K, Jzerohat) / (n * K)
}
sigma2_gamma <- function(all_residuals, betahat, gamma) {
n <- length(all_residuals[[1]]$rY)
d <- length(betahat)
q <- ncol(all_residuals[[1]]$rA)
K <- length(all_residuals)
D1 <- matrix(0, nrow = d, ncol = d)
D2 <- matrix(0, nrow = d, ncol = d)
D4 <- matrix(0, nrow = d, ncol = d)
for (k in seq_len(K)) {
res <- all_residuals[[k]]$rY - all_residuals[[k]]$rX %*% betahat
losstilde <- sweep(all_residuals[[k]]$rX, 1, res, FUN = "*")
loss <- sweep(all_residuals[[k]]$rA, 1, res, FUN = "*")
loss_mean <- colSums(loss) / n
loss1 <- array(apply(cbind(all_residuals[[k]]$rX, all_residuals[[k]]$rA), 1,
function(x) outer(x[seq_len(d)], x[(d + 1):(d + q)])),
dim = c(d, q, n))
loss1_mean <- apply(loss1, c(1, 2), mean)
loss2 <- t(apply(as.matrix(all_residuals[[k]]$rA), 1, function(x) crossprod(rbind(x))))
loss2_mean <- if (q == 1) {
mean(loss2)
} else {
colSums(loss2) / n
}
loss2_mean_inv <- qr.solve(matrix(loss2_mean, nrow = q, ncol = q))
loss3_mean <- crossprod(all_residuals[[k]]$rX, all_residuals[[k]]$rX) / n
D1 <- D1 + loss3_mean
D2 <- D2 +
rbind(loss1_mean, deparse.level = 0) %*%
tcrossprod(loss2_mean_inv, rbind(loss1_mean, deparse.level = 0))
D3 <- rbind(loss1_mean, deparse.level = 0) %*% loss2_mean_inv
D5 <- loss2_mean_inv %*% cbind(loss_mean, deparse.level = 0)
mu <- gamma - 1
lossBarPrime <- losstilde + mu * tcrossprod(loss, D3) +
mu * if (q >= 2) {
intermediate <- sweep(sweep(loss2, 2, loss2_mean, FUN = "-"), 2,
rep(D5, each = q), FUN = "*")
intermediate_summed <- matrix(0, nrow = n, ncol = q)
for (i in seq_len(q)) {
intermediate_summed[, i] <- rowSums(intermediate[, seq(i, q ^ 2, by = q)])
}
t(apply(sweep(sweep(loss1, c(1, 2), loss1_mean, FUN = "-"),
c(2, 3), as.vector(D5), FUN = "*"), c(1, 3), sum)) -
tcrossprod(intermediate_summed, D3)
} else {
matrix(sweep(loss1, 3, as.vector(loss1_mean), FUN = "-"),
nrow = n, byrow = TRUE) * as.vector(D5) -
crossprod((loss2 - loss2_mean) * as.vector(D5), t(D3))
}
D4 <- D4 + crossprod(lossBarPrime, lossBarPrime) / n
}
D1 <- D1 / K
D2 <- D2 / K
D1plusD2inv <- qr.solve(D1 + mu * D2)
D4 <- D4 / K
D1plusD2inv %*% tcrossprod(D4, D1plusD2inv) / (n * K)
} |
tidy.TMB <- function(x, effects = c("fixed", "random"),
conf.int = FALSE,
conf.level = 0.95,
conf.method = c("wald", "uniroot", "profile"), ...) {
assert_dependency("TMB")
branch <- v <- param <- value <- zeta <- Estimate <- estimate <- std.error <- NULL
sdr <- TMB::sdreport(x)
retlist <- list()
if ("fixed" %in% effects) {
ss <- summary(sdr, select = "fixed") %>%
as.data.frame() %>%
tibble::rownames_to_column("term") %>%
rename(estimate = Estimate, std.error = "Std. Error")
if (conf.int) {
if (tolower(conf.method == "wald")) {
qval <- qnorm((1 + conf.level) / 2)
ss <- mutate(ss,
conf.low = estimate - qval * std.error,
conf.high = estimate + qval * std.error
)
} else if (conf.method == "uniroot") {
tt <- do.call(
rbind,
lapply(seq(nrow(ss)),
TMB::tmbroot,
obj = x,
...
)
)
ss$conf.low <- tt[, "lwr"]
ss$conf.high <- tt[, "upr"]
} else if (conf.method == "profile") {
all_vars <- names(x$env$last.par.best)
if (!is.null(rnd <- x$env$random)) {
all_vars <- all_vars[-rnd]
}
prof0 <- purrr::map_dfr(seq_along(all_vars),
~ setNames(TMB::tmbprofile(x,name=.,trace=FALSE),c("focal","value")),
.id="param")
prof1 <- (prof0
%>% group_by(param)
%>% mutate(zeta=sqrt(2*(value-min(value))),
branch=ifelse(cumsum(zeta==0)<1, "lwr", "upr"))
%>% ungroup()
)
bad_prof_flag <- FALSE
critval <- qnorm((1+conf.level)/2)
interp_fun <- function(dd) {
bakspl <-tryCatch(backSpline(
forspl <- interpSpline(dd$focal, dd$zeta, na.action=na.omit)),
error=function(e)e)
if (inherits(bakspl, "error")) {
bad_prof_flag <<- TRUE
res <- approx(dd$zeta, dd$focal, xout=critval)$y
} else {
res <- predict(bakspl, critval)$y
}
return(res)
}
tt <- prof1 %>% group_by(param, branch) %>% unique() %>% summarise(v=interp_fun(.data))
ss$conf.low <- filter(tt, branch=="lwr") %>% pull(v)
ss$conf.high <- filter(tt, branch=="upr") %>% pull(v)
} else {
stop(sprintf("conf.method=%s not implemented", conf.method))
}
}
}
retlist$fixed <- ss
ret <- dplyr::bind_rows(retlist, .id = "type")
return(ret)
} |
function() {
plot(1:10)
}
function() {
plot(1:10)
}
function() {
plot(1:10)
}
function(){
plot(1:10)
}
function(){
plot(1:10)
}
function() {
warning("Should not test. Image size does not decrease with dimension decrease")
plot(1:10)
} |
print.abnDag <- function(x, ...){
print(x$dag)
cat("Class 'abnDag'.\n")
invisible(x)
}
summary.abnDag <- function(object, ...) {
su <- infoDag(object$dag)
return(su)
}
plot.abnDag <- function(x, new=TRUE, ...){
if (new) dev.new()
on.exit(dev.flush())
mygraph <- new("graphAM", adjMat = t(x$dag), edgemode = "directed")
g <- Rgraphviz::plot(x = mygraph)
invisible(g)
}
print.abnCache <- function(x, ...){
cat("Number of nodes in the network:",max(x$children), "\n\n")
if(x$method=="bayes"){
cat("Distribution of the marginal likelihood: \n")
print(summary(x[["mlik"]]), digits=3)
}
if(x$method=="mle"){
cat(" Distribution of the aic: \n")
print(summary(x[["aic"]]), digits=3)
cat("\n Distribution of the bic: \n")
print(summary(x[["bic"]]), digits=3)
cat("\n Distribution of the mdl: \n")
print(summary(x[["mdl"]]), digits=3)
}
invisible(x)
}
print.abnHeuristic <- function(x, ...){
cat("Best DAG' score found with",x$algo,"algorithm with", x$num.searches,"different searches limited to" , x$max.steps,"steps:\n")
print(max(unlist(x$scores)), digits=2)
cat("\n Score distribution: \n")
print(summary(unlist(x[["scores"]])), digits=2)
invisible(x)
}
plot.abnHeuristic <- function(x, ...){
df <- unlist(x$scores)
par(mfrow=c(1,2))
plot(NULL, lty=1, xlab="Index of heuristic search", ylab="BN score", ylim = range(df), xlim = c(1,length(df)))
for(i in 1:length(df)){
if(sum(i==order(df, decreasing = FALSE)[1:10])){
points(x=i,y=df[i], type="p", pch=19, col=rgb(0,0,1, 0.8),lwd = 2)
} else {
points(x=i,y=df[i], type="p", pch=19, col=rgb(0,0,0, 0.3))
}
}
points(x = which.max(df), y = df[which.max(df)], col="red", pch=19)
title("Networks final score")
L <- (x$detailed.score)
test <- array(unlist(L), dim = c(nrow(L[[1]]), ncol(L[[1]]), length(L)))
plot(NULL,lty=1, xlab="Number of Steps",ylab="BN score", ylim = range(test), xlim = c(1,length(test[,,1])))
for(i in 1:length(L)){
if(sum(i==order(df,decreasing = FALSE)[1:10])){
points(x=1:(length(test[,,1])),y=test[1,,i], type="l", lty=1, col=rgb(0,0,1, 0.8),lwd = 2)
} else {
points(x=1:(length(test[,,1])),y=test[1,,i], type="l", lty=1, col=rgb(0,0,0, 0.17))
}
}
lines(x=1:(length(test[,,1])),y=test[1,,which.max(df)], type="l", col="red", lwd=3)
title("Networks score trajectory")
invisible(x)
}
print.abnHillClimber <- function(x, ...){
print(x$consensus)
cat("Consensus DAG from 'searchHillClimber' (class 'abnHillClimber').\n")
invisible(x)
}
plot.abnHillClimber <- function(x, new=TRUE, ...){
if (new) dev.new()
on.exit(dev.flush())
mygraph <- new("graphAM", adjMat = x$consensus, edgemode = "directed")
g <- Rgraphviz::plot(x = mygraph)
invisible(g)
}
print.abnMostprobable <- function(x, ...){
print(x$dag)
cat("Consensus DAG from 'mostProbable', can be use with 'fitAbn'.\n")
invisible(x)
}
summary.abnMostprobable <- function(object, ...){
cat("Optimal DAG from 'mostProbable':\n")
print(object$dag)
cat( paste0("Calculated on ", dim(object$score.cache$data.df)[1], " observations.\n"))
cat( paste0("(Cache length ", length(object$score.cache$mlik), '.)\n'))
invisible( object)
}
plot.abnMostprobable <- function(x, new=TRUE, ...){
if (new) dev.new()
on.exit(dev.flush())
mygraph <- new("graphAM", adjMat = t(x$dag), edgemode = "directed")
g <- Rgraphviz::plot(x = mygraph)
invisible(g)
}
print.abnFit <- function(x, ...){
if(x$method=="mle"){
cat("The ABN model was fitted using an mle approach. The estimated coefficients are:\n\n")
print(x$coef, digits=3)
cat(paste0("Number of nodes in the network:",length(x$coef), ".\n"))
}
if(x$method=="bayes"){
cat("The ABN model was fitted using a Bayesian approach. The estimated modes are:\n\n")
print(x$modes, digits=3)
cat(paste0("Number of nodes in the network: ",length(x$modes), ".\n"))
}
invisible(x)
}
summary.abnFit <- function(object, ...){
if(object$method=="mle"){
cat("The ABN model was fitted using an mle approach. The estimated coefficients are:\n")
print(object$coef, digits=3)
cat("Number of nodes in the network:",length(object$modes), ".\n")
cat("The AIC network score per node is: \n")
print(unlist(object[["aicnode"]]), digits=3)
cat("\n The BIC network score per node is: \n")
print(unlist(object[["bicnode"]]), digits=3)
cat("\n The MDL network score per node is: \n")
print(unlist(object[["mdlnode"]]), digits=3)
}
if(object$method=="bayes"){
cat("The ABN model was fitted using a Bayesian approach. The estimated modes are:\n")
print(object$modes, digits=3)
cat("Number of nodes in the network:",length(object$modes), ".\n\n")
cat("The network score per node is:\n")
print(unlist(object[1:length(object$modes)]))
}
invisible(object)
}
coef.abnFit <- function(object, ...){
if(object$method=="mle"){
cat("The ABN model was fitted using an mle approach. The estimated coefficients are:\n")
print(object$coef, digits=3)
}
if(object$method=="bayes"){
cat("The ABN model was fitted using a Bayesian approach. The estimated modes are:\n")
print(object$modes, digits=3)
}
invisible(object)
}
AIC.abnFit <- function(object, ...){
if(object$method=="mle"){
cat("The ABN model was fitted using an mle approach. The AIC network score per node is: \n")
print(unlist(object[["aicnode"]]), digits=3)
}
if(object$method=="bayes"){
cat("The ABN model was fitted using a Bayesian approach. AIC does not make sense but the network score per node is is is:\n")
print(unlist(object[1:length(object$modes)]))
}
invisible(object)
}
BIC.abnFit <- function(object, ...){
if(object$method=="mle"){
cat("The ABN model was fitted using an mle approach. The BIC network score per node is: \n")
print(unlist(object[["bicnode"]]), digits=3)
}
if(object$method=="bayes"){
cat("The ABN model was fitted using a Bayesian approach. BIC does not make sense but the network score per node is is is:\n")
print(unlist(object[1:length(object$modes)]))
}
invisible(object)
}
logLik.abnFit <- function(object, ...){
if(object$method=="mle"){
cat("The ABN model was fitted using an mle approach. The loglikelihood network score per node is: \n")
print(unlist(object[["mliknode"]]), digits=3)
}
if(object$method=="bayes"){
cat("The ABN model was fitted using a Bayesian approach. Loglikelihood does not make sense but the network score per node is is is:\n")
print(unlist(object[1:length(object$modes)]))
}
invisible(object)
}
family.abnFit <- function(object, ...){
cat("All link functions are canonical: \n
gaussian node = identy, binomial node = logit, Poisson node = log and multinomial node = logit.\n\n")
print(unlist(object$abnDag$data.dists))
invisible(object)
}
nobs.abnFit <- function(object, ...){
nrow(object$abnDag$data.df)
}
plot.abnFit <- function(x, which ="abnFit", ...){
if (which != "abnFit") stop('Function type not implemented yet. Use which="abnFit"')
if(x$method=="mle"){
g <- plotAbn(x$abnDag$dag, data.dists = x$abnDag$data.dists, fitted.values = x$coef, ...)
} else {
g <- plotAbn(x$abnDag$dag, data.dists = x$abnDag$data.dists, fitted.values = x$modes, ...)
}
invisible(g)
} |
nearest.neighbour.distribution<-function(X,Y,Z,X2=X,Y2=Y,Z2=Z,same=TRUE,psz=25,main="Nearest neighbour distribution",file=NULL, return=FALSE)
{
if(!is.null(file))png(file)
nn<-nearest.neighbours(X,Y,Z,X2,Y2,Z2,same=same,psz=psz)
hist.nn<-hist(nn,freq=FALSE,n=100,xlab="Distance",main=main)
graphics::lines(density(nn,na.rm=TRUE))
if(!is.null(file))dev.off()
if(return)return(hist.nn)
} |
write_gitignore <- function(path) {
writeLines(
c(".Rproj.user", ".Rhistory", ".RData", ".Ruserdata"),
file.path(path, ".gitignore")
)
} |
library(micompr)
context("grpoutputs")
test_that("grpoutputs constructs the expected objects", {
outputs <- c("PopSheep", "PopWolf", "QtyGrass",
"EnSheep", "EnWolf", "EnGrass",
"All")
dir_nl_ok <- system.file("extdata", "nl_ok", package = "micompr")
dir_jex_ok <- system.file("extdata", "j_ex_ok", package = "micompr")
dir_jex_noshuff <- system.file("extdata", "j_ex_noshuff", package = "micompr")
dir_jex_diff <- system.file("extdata", "j_ex_diff", package = "micompr")
dir_na <- system.file("extdata", "testdata", "NA", package = "micompr")
files <- glob2rx("stats400v1*.tsv")
filesA_na <- glob2rx("stats400v1*n20A.tsv")
filesB_na <- glob2rx("stats400v1*n20B.tsv")
go_ok <- grpoutputs(outputs, c(dir_nl_ok, dir_jex_ok),
c(files, files),
lvls = c("NLOK", "JEXOK"), concat = T)
go_noshuff <- grpoutputs(outputs, c(dir_nl_ok, dir_jex_noshuff),
c(files, files),
lvls = c("NLOK", "JEXNOSHUF"), concat = T)
go_diff <- grpoutputs(outputs, c(dir_nl_ok, dir_jex_diff),
c(files, files),
lvls = c("NLOK", "JEXDIFF"), concat = T)
go_tri <- grpoutputs(6,
c(dir_nl_ok, dir_jex_noshuff, dir_jex_diff),
c(files, files, files))
go_1out <- grpoutputs("OnlyOne", c(dir_nl_ok, dir_jex_ok),
c(files, files), concat = F)
go_1lvl <- grpoutputs(3, dir_nl_ok, files)
go_diflencatT <- grpoutputs(7, dir_na, c(filesA_na, filesB_na), concat = T)
go_diflencatF <- grpoutputs(6, dir_na, c(filesA_na, filesB_na), concat = F)
expect_is(go_ok, "grpoutputs")
expect_is(go_noshuff, "grpoutputs")
expect_is(go_diff, "grpoutputs")
expect_is(go_tri, "grpoutputs")
expect_is(go_1out, "grpoutputs")
expect_is(go_1lvl, "grpoutputs")
expect_is(go_diflencatT, "grpoutputs")
expect_is(go_diflencatF, "grpoutputs")
expect_equal(names(go_ok$data), outputs)
expect_equal(names(go_noshuff$data), outputs)
expect_equal(names(go_diff$data), outputs)
expect_equal(names(go_tri$data),
c("out1", "out2", "out3", "out4", "out5", "out6"))
expect_equal(names(go_1out$data), "OnlyOne")
expect_equal(names(go_1lvl$data), c("out1", "out2", "out3"))
expect_equal(names(go_diflencatT$data),
c("out1", "out2", "out3", "out4", "out5", "out6", "out7"))
expect_equal(names(go_diflencatF$data),
c("out1", "out2", "out3", "out4", "out5", "out6"))
expect_equal(sum(sapply(go_ok$data[1:6], function(x) dim(x)[2])),
dim(go_ok$data[[7]])[2])
expect_equal(sum(sapply(go_noshuff$data[1:6], function(x) dim(x)[2])),
dim(go_noshuff$data[[7]])[2])
expect_equal(sum(sapply(go_diff$data[1:6], function(x) dim(x)[2])),
dim(go_diff$data[[7]])[2])
expect_equal(sum(sapply(go_diflencatT$data[1:6], function(x) dim(x)[2])),
dim(go_diflencatT$data[[7]])[2])
expect_equal(go_ok$groupsize, c(10, 10))
expect_equal(go_noshuff$groupsize, c(10, 10))
expect_equal(go_diff$groupsize, c(10, 10))
expect_equal(go_tri$groupsize, c(10, 10, 10))
expect_equal(go_1out$groupsize, c(10, 10))
expect_equal(go_1lvl$groupsize, 10)
expect_equal(go_diflencatT$groupsize, c(3, 3))
expect_equal(go_diflencatF$groupsize, c(3, 3))
expect_equal(go_ok$lvls, c("NLOK", "JEXOK"))
expect_equal(go_noshuff$lvls, c("NLOK", "JEXNOSHUF"))
expect_equal(go_diff$lvls, c("NLOK", "JEXDIFF"))
expect_equal(go_tri$lvls, c(1, 2, 3))
expect_equal(go_1out$lvls, c(1, 2))
expect_equal(go_1lvl$lvls, 1)
expect_equal(go_diflencatT$lvls, c(1, 2))
expect_equal(go_diflencatF$lvls, c(1, 2))
expect_true(go_ok$concat)
expect_true(go_noshuff$concat)
expect_true(go_diff$concat)
expect_false(go_tri$concat)
expect_false(go_1out$concat)
expect_false(go_1lvl$concat)
expect_true(go_diflencatT$concat)
expect_false(go_diflencatF$concat)
})
test_that("grpoutputs throws errors when improperly invoked", {
fs <- .Platform$file.sep
expect_error(
grpoutputs(4, c("dir1", "dir2"), glob2rx("*.tsv"), lvls = c("A", "B")),
"Number of file sets is not the same as the given number of factor levels.",
fixed = TRUE
)
expect_error(
grpoutputs(4, "some_fake_folder",
c(glob2rx("fake_files*.csv"), glob2rx("also_fakes*.csv"))),
paste("No files were found: some_fake_folder", fs,
glob2rx("fake_files*.csv"),
sep = ""),
fixed = TRUE
)
expect_error(
grpoutputs(7, system.file("extdata", "nl_ok", package = "micompr"),
"stats400v1r1.tsv", lvls = "just_the_one", concat = F),
paste("Specified number of outputs is larger than the number ",
"of outputs in file '",
system.file("extdata", "nl_ok", package = "micompr"), fs,
"stats400v1r1.tsv'.", sep = ""),
fixed = TRUE
)
expect_error(
grpoutputs(4, c(system.file("extdata", "nl_ok", package = "micompr"),
system.file("extdata", "testdata", "n50",
package = "micompr")),
c("stats400v1r1.tsv", "stats400v1r1n50.tsv")),
paste("Length of outputs in file '",
system.file("extdata", "testdata", "n50", package = "micompr"),
fs, "stats400v1r1n50.tsv",
"' does not match the length of outputs in file '",
system.file("extdata", "nl_ok", package = "micompr"),
fs, "stats400v1r1.tsv",
"'.", sep = ""),
fixed = TRUE
)
expect_error(
grpoutputs(2, c(system.file("extdata", "nl_ok", package = "micompr"),
system.file("extdata", "j_ex_ok", package = "micompr")),
c(glob2rx("stats400v1*.tsv"), glob2rx("stats400v1*.tsv")),
concat = T),
paste("A minimum of 3 outputs must be specified in order to use ",
"output concatenation.", sep = ""),
fixed = TRUE
)
expect_error(
grpoutputs(6, c(system.file("extdata", "nl_ok", package = "micompr"),
system.file("extdata", "testdata", "NA",
package = "micompr")),
c(glob2rx("stats400v1*.tsv"), glob2rx("stats400v1*A.tsv"))),
paste("Length of outputs in file '",
system.file("extdata", "testdata", "NA", package = "micompr"), fs,
"stats400v1r[0-9]+n20A.tsv' ",
"does not match the length of outputs in file '",
system.file("extdata", "nl_ok", package = "micompr"), fs,
"stats400v1r[0-9]+.tsv'.", sep = "")
)
}) |
tamaan.3pl.mixture <- function( res0, anal.list, con, ... )
{
if ( ! is.null( anal.list$NSTARTS ) ){
NSTARTS <- anal.list$NSTARTS
} else {
NSTARTS <- c(0,0)
}
con0 <- con
con0$maxiter <- NSTARTS[2]
con0$progress <- FALSE
devmin <- 1E100
itempartable <- res0$itempartable_MIXTURE
itempartable.int <- itempartable[ itempartable$int==1, ]
itempartable.slo <- itempartable[ itempartable$slo==1, ]
gammaslope0 <- itempartable$val
resp <- res0$resp
items0 <- res0$items
I <- ncol(resp)
beta0 <- sapply( 1:I, FUN=function(ii){
ncat.ii <- items0[ii, "ncat"] - 1
l1 <- rep(0,ncat.ii)
for (hh in 1:ncat.ii){
l1[hh] <- stats::qlogis( mean( resp[,ii] >=hh, na.rm=TRUE ) / ncat.ii )
}
return(l1)
} )
beta0 <- unlist( beta0)
B0 <- length(beta0)
ncl <- anal.list$NCLASSES
if (NSTARTS[1] > 0 ){
for (nn in 1:(NSTARTS[1]) ){
gammaslope <- gammaslope0
gammaslope[ itempartable.int$index ] <- rep( beta0, ncl ) +
stats::rnorm( ncl*B0, mean=0, sd=log(1+nn^(1/5) ) )
N0 <- nrow(itempartable.slo)
if ( ! res0$raschtype ){
gammaslope[ itempartable.slo$index ] <- stats::runif( N0, max(.2,1-nn/5), min( 1.8, 1+nn/5) )
}
if (nn==1){ delta.inits <- NULL }
res <- tam.mml.3pl(resp=res0$resp, E=res0$E, skillspace="discrete",
theta.k=res0$theta.k, gammaslope=gammaslope,
gammaslope.constr.V=res0$gammaslope.constr.V,
gammaslope.constr.c=res0$gammaslope.constr.c,
notA=TRUE, control=con0, delta.inits=delta.inits,
delta.designmatrix=res0$delta.designmatrix,
delta.fixed=res0$delta.fixed,
gammaslope.fixed=res0$gammaslope.fixed,
... )
if (con$progress){
cat( paste0( "*** Random Start ", nn,
" | Deviance=", round( res$deviance, 2 ), "\n") )
utils::flush.console()
}
if ( res$deviance < devmin ){
devmin <- res$deviance
gammaslope.min <- res$gammaslope
delta.min <- res$delta
}
}
}
if (NSTARTS[1] > 0 ){
gammaslope <- gammaslope.min
delta.inits <- delta.min
} else {
gammaslope <- NULL
delta.inits <- NULL
}
res <- tam.mml.3pl(resp=res0$resp, E=res0$E, skillspace="discrete",
theta.k=res0$theta.k, gammaslope=gammaslope,
gammaslope.fixed=res0$gammaslope.fixed,
gammaslope.constr.V=res0$gammaslope.constr.V,
gammaslope.constr.c=res0$gammaslope.constr.c,
notA=TRUE, delta.inits=delta.inits,
delta.fixed=res0$delta.fixed,
control=con,
delta.designmatrix=res0$delta.designmatrix,
... )
itempartable <- res0$itempartable_MIXTURE
theta_MIXTURE <- res0$theta_MIXTURE
TG <- nrow(theta_MIXTURE)
TP <- ncl*TG
pi.k <- res$pi.k
D <- ncol(theta_MIXTURE )
G <- 1
probs_MIXTURE <- rep(NA,ncl)
names(probs_MIXTURE) <- paste0("Cl", 1:ncl )
moments_MIXTURE <- as.list( 1:ncl )
for (cl in 1:ncl){
cl.index <- 1:TG + (cl-1)*TG
probs_MIXTURE[cl] <- sum(pi.k[ cl.index, 1 ] )
pi.ktemp <- pi.k[ cl.index,,drop=FALSE]
pi.ktemp <- pi.ktemp / colSums( pi.ktemp)
moments_MIXTURE[[cl]] <- tam_mml_3pl_distributionmoments( D=D,
G=G, pi.k=pi.ktemp, theta.k=theta_MIXTURE )
}
res$probs_MIXTURE <- probs_MIXTURE
res$moments_MIXTURE <- moments_MIXTURE
ipar <- res0$itempartable_MIXTURE
p11 <- strsplit( paste(ipar$parm), split="_Cl" )
ipar$parm0 <- unlist( lapply( p11, FUN=function(pp){ pp[1] } ) )
ipar$est <- gammaslope[ ipar$index ]
res$gammaslope <- gammaslope
ipar2 <- ipar[ ipar$Class==1, c("item", "parm0")]
colnames(ipar2)[2] <- "parm"
for (cl in 1:ncl){
ipar2[, paste0("Cl", cl ) ] <- ipar[ ipar$Class==cl, "est" ]
}
res$itempartable_MIXTURE <- ipar2
res$ind_classprobs <- tamaan_3pl_mixture_individual_class_probabilities(hwt=res$hwt,
NCLASSES=anal.list$NCLASSES)
res$tamaan.method <- "tam.mml.3pl"
return(res)
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.