code
stringlengths 1
13.8M
|
---|
context("Privacy controls")
test_that("Privacy control list created correctly", {
pc <- make_privacy_controls(list(suppression = 10))
expect_equal(
names(pc),
c("round", "suppression_matrix", "suppress",
"suppress_quantile",
"rse_matrix", "markup", "has", "get")
)
expect_is(pc$round, "function")
expect_is(pc$suppression_matrix, "function")
expect_is(pc$suppress, "function")
})
test_that("Suppression matrix created correctly", {
pc <- make_privacy_controls(list(suppression = 10))
tab <- as.table(c(A = 20, B = 30, C = 5))
st <- pc$suppression_matrix(tab)
expect_equivalent(st, c(FALSE, FALSE, TRUE, TRUE))
expect_equivalent(
pc$suppress(c(tab, sum(tab)), st),
c("20", "30", "S", "S")
)
tab <- as.table(cbind(c(20, 30, 5), c(30, 40, 8), c(5, 20, 40)))
st <- pc$suppression_matrix(tab)
expect_equivalent(rowSums(st), c(2, 0, 2))
expect_equivalent(
pc$suppress(cbind(tab, rowSums(tab)), st),
cbind(c(20, 30, "S"), c(30, 40, "S"), c("S", 20, 40), c("S", 90, 53))
)
})
test_that("Rounding is correct", {
pc <- make_privacy_controls(list(rounding = "RR3"))
expect_true(
all(pc$round(sample(0:10, 100L, TRUE)) %% 3L == 0)
)
pc <- make_privacy_controls(list(rounding = "GRR"))
expect_true(
all(
pc$round(c(0, 5, 18, 25, 100, 123, 898, 2041)) %%
c(3, 3, 3, 5, 10, 10, 10, 100) == 0L
)
)
pc <- make_privacy_controls(list(rounding = 100L))
expect_true(
all(
pc$round(sample(1e5, 100)) %% 100L == 0L
)
)
})
test_that("RSE checking/annotation/suppression", {
pc <- make_privacy_controls(
list(
check_rse = list(
cut = c(50, 100),
output = c(" *", "suppress")
)
)
)
x <- matrix(c(50, 5, 10, 200), nrow = 2L)
e <- matrix(c(20, 4, 12, 20), nrow = 2L)
m <- pc$rse_matrix(x, e)
expect_equal(
as.character(m),
c(NA_character_, " *", "suppress", NA_character_, NA_character_, NA_character_)
)
ms <- pc$suppression_matrix(x) | !(is.na(m) | m != "suppress")
x <- cbind(x, rowSums(x))
x <- pc$suppress(x, ms)
r <- pc$markup(x, m)
expect_equal(
as.character(r),
c("50", "5 *", "S", "200", "60", "205")
)
})
library(survey)
data(api)
dclus2 <- svydesign(~dnum+snum, fpc=~fpc1+fpc2, data = apiclus2)
test_that("Unweighted survey counts", {
inf <- inzsummary(sch.wide ~ both,
data = apiclus2,
privacy_controls = list(rounding = "RR3")
)
expect_match(
inf[grep("Table of Counts", inf) + 4L],
"Yes\\s+0"
)
tb <- read.table(
textConnection(inf[grep("Table of Counts", inf) + 3:4]),
)
expect_true(all(tb[,-1] %% 3L == 0L))
})
test_that("Weighted survey counts", {
inf <- inzsummary(~ stype,
design = dclus2,
privacy_controls = list(
rounding = 100L,
suppression = 700L,
secondary_suppression = FALSE
)
)
tbl <- read.table(
textConnection(
inf[grep("Population Estimates", inf) + 3L]
)
)
expect_true(all(tbl[c(2, 4:5)] %% 100 == 0L))
expect_equivalent(tbl[3], "S")
inf <- inzsummary(sch.wide ~ stype,
design = dclus2,
table.direction = "v",
privacy_controls = list(
check_rse = list(
cut = c(20, 30, 50),
output = c(" *", " **", "suppress")
)
)
)
expect_match(inf, "242 **", fixed = TRUE, all = FALSE)
expect_match(inf, "689 *", fixed = TRUE, all = FALSE)
inf <- inzsummary(sch.wide ~ stype,
design = dclus2,
privacy_controls = list(
suppression_raw_counts = 6L
)
)
})
test_that("Value magnitudes (cell totals and means)", {
inf <- inzsummary(api00 ~ stype | awards + both,
g1.level = "Yes",
g2.level = "Yes",
design = dclus2,
privacy_controls = list(
suppression = 500L,
suppression_magnitude = 200L
)
)
popest <- read.table(
textConnection(
inf[grep("Population estimates", inf) + 3:5]
)
)
expect_equal(popest[2, 5], "S")
expect_equal(popest[2, 7], "S")
expect_equal(popest[2, 8], "S")
expect_equal(popest[3, 8], "S")
})
test_that("Medians, quantiles, and percentiles", {
inf <- inzsummary(api00 ~ stype | awards + both,
g1.level = "Yes",
g2.level = "Yes",
design = dclus2,
privacy_controls = list(
suppression_quantiles = list(
p = c(0.25, 0.5, 0.75),
n = c(20, 10, 20)
)
)
)
popest <- read.table(
textConnection(
inf[grep("Population estimates", inf) + 3:5]
)
)
expect_equal(popest[2, 2], "S")
})
test_that("Percentages, proportions, and ratios", {
inf <- inzsummary(~stype, design = dclus2, round_percent = 1L)
pr <- inf[grep("Population Estimates", inf) + 5]
prs <- strsplit(pr, "\\s+")[[1]][3:5]
expect_match(
prs,
"[0-9]+\\.[0-9]\\%"
)
}) |
context("plotAlgoPerfDensities")
test_that("plotAlgoPerfDensities", {
s = plotAlgoPerfDensities(testscenario1)
}) |
rssSplit<-function(fit,df0=max(1,floor(fit$df/10)),seed=-71407){
e<-fit$res
U<-qr.Q(fit$qr)
n<-nrow(U)
set.seed(seed)
Z<-matrix(rnorm(n*df0),n,df0)
V<-svd(Z-U%*%crossprod(U,Z))$u
ss0<-sum( (t(V)%*%e)^2 )
ss1<-sum(e^2) - ss0
ss<-c(ss0,ss1)
names(ss)<-paste0("df",c(df0,n-ncol(U)-df0) )
ss
} |
check_missing <- function(data, ..., ret_prop = TRUE) {
check_missing_(data, .dots = lazyeval::lazy_dots(...),
ret_prop = ret_prop)
}
check_missing_ <- function(data, ..., .dots, ret_prop = TRUE) {
dots <- lazyeval::all_dots(.dots, ...)
vars <- dplyr::select_vars_(names(data), dots)
if (ret_prop) {
fun <- dplyr::funs_(quote(sum(is.na(.)) / n()))
} else {
fun <- dplyr::funs_(quote(sum(is.na(.))))
}
dplyr::summarise_at(data, .cols = vars, .funs = fun)
} |
context("Ranges")
test_that("R6 inheritance works", {
expect_error(ContinuousRange$new(), NA)
expect_error(DiscreteRange$new(), NA)
expect_true(R6::is.R6(ContinuousRange$new()))
expect_true(R6::is.R6(DiscreteRange$new()))
})
test_that("Mutable ranges work", {
x <- ContinuousRange$new()
x$train(c(-1, 45, 10))
expect_equal(x$range, c(-1, 45))
x$train(c(1000))
expect_equal(x$range, c(-1, 1000))
x$reset()
expect_equal(x$range, NULL)
x <- DiscreteRange$new()
x$train(factor(letters[1:3]))
expect_equal(x$range, c("a", "b", "c"))
x$train(factor("a", "h"))
expect_equal(x$range, c("a", "b", "c", "h"))
x$reset()
expect_equal(x$range, NULL)
})
test_that("starting with NULL always returns new", {
expect_equal(discrete_range(NULL, 1:3), 1:3)
expect_equal(discrete_range(NULL, 3:1), 1:3)
expect_equal(discrete_range(NULL, c("a", "b", "c")), c("a", "b", "c"))
expect_equal(discrete_range(NULL, c("c", "b", "a")), c("a", "b", "c"))
f1 <- factor(letters[1:3], levels = letters[1:4])
expect_equal(discrete_range(NULL, f1, drop = FALSE), letters[1:4])
expect_equal(discrete_range(NULL, f1, drop = TRUE), letters[1:3])
f2 <- factor(letters[1:3], levels = letters[4:1])
expect_equal(discrete_range(NULL, f2, drop = FALSE), letters[4:1])
expect_equal(discrete_range(NULL, f2, drop = TRUE), letters[3:1])
})
test_that("factor discrete ranges stay in order", {
f <- factor(letters[1:3], levels = letters[3:1])
expect_equal(discrete_range(f, f), letters[3:1])
expect_equal(discrete_range(f, "c"), letters[3:1])
expect_equal(discrete_range(f, c("a", "b", "c")), letters[3:1])
}) |
context("Visualize clinical data with error bars")
library(plotly)
test_that("The data is correctly displayed in vertical error bars", {
data <- data.frame(
AVISIT = c("Baseline", "Week 2"),
Mean = c(25.6, 40),
SE = c(2, 3),
stringsAsFactors = FALSE
)
pl <- errorbarClinData(
data = data,
xVar = "AVISIT",
yVar = "Mean",
yErrorVar = "SE"
)
expect_s3_class(pl, "plotly")
plData <- plotly_build(pl)$x$data
plDataScatter <- plData[sapply(plData, function(x) x$type == "scatter")]
plDataScatterDf <- do.call(rbind,
lapply(plDataScatter, function(x)
data.frame(
x = as.character(x[["x"]]),
y = x[["y"]],
yError = x[["error_y"]]$array,
stringsAsFactors = FALSE
)
)
)
expect_equivalent(
object = plDataScatterDf,
expected = data[, c("AVISIT", "Mean", "SE")]
)
})
test_that("The data is correctly displayed in horizontal error bars", {
data <- data.frame(
AVISIT = c("Baseline", "Week 2"),
Mean = c(25.6, 40),
SE = c(2, 3),
stringsAsFactors = FALSE
)
pl <- errorbarClinData(
data = data,
yVar = "AVISIT",
xVar = "Mean",
xErrorVar = "SE"
)
expect_s3_class(pl, "plotly")
plData <- plotly_build(pl)$x$data
plDataScatter <- plData[sapply(plData, function(x) x$type == "scatter")]
plDataScatterDf <- do.call(rbind,
lapply(plDataScatter, function(x)
data.frame(
x = x[["x"]],
xError = x[["error_x"]]$array,
y = as.character(x[["y"]]),
stringsAsFactors = FALSE
)
)
)
expect_equal(
object = plDataScatterDf,
expected = data[, c("Mean", "SE", "AVISIT")],
check.attributes = FALSE
)
})
test_that("Vertical error bars are correctly colored based on a specified variable", {
data <- data.frame(
AVISIT = factor(
c("Baseline", "Screening", "Baseline", "Screening"),
levels = c("Screening", "Baseline")
),
Mean = c(25.6, 40, 12, 5),
SE = c(2, 3, 1, 2),
TRT = c("A", "A", "B", "B"),
stringsAsFactors = FALSE
)
pl <- errorbarClinData(
data = data,
xVar = "AVISIT",
yVar = "Mean",
yErrorVar = "SE",
colorVar = "TRT"
)
plData <- plotly_build(pl)$x$data
plData <- plData[sapply(plData, function(x) x$type == "scatter")]
plData <- do.call(rbind,
lapply(plData, function(x)
data.frame(
x = round(x[["x"]], 0),
y = x[["y"]],
yError = x[["error_y"]]$array,
color = as.character(x[["error_y"]]$color),
group = x[["name"]],
stringsAsFactors = FALSE
)
)
)
colors <- with(plData, tapply(color, group, unique))
expect_type(colors, "character")
expect_length(colors, 2)
plData <- plData[, c("x", "y", "yError", "group")]
dataReference <- data[, c("AVISIT", "Mean", "SE", "TRT")]
dataReference$AVISIT <- as.numeric(as.factor(dataReference$AVISIT))
expect_equivalent(
object = plData[do.call(order, plData), ],
expected = dataReference[do.call(order, dataReference), ]
)
plXAxis <- plotly_build(pl)$x$layout$xaxis
plXTickLab <- plXAxis$ticktext
plXTickLab <- plXTickLab[order(plXAxis$tickvals, decreasing = FALSE)]
expect_equal(object = unname(plXTickLab), c("Screening", "Baseline"))
})
test_that("Horizontal error bars are correctly colored based on a specified variable", {
data <- data.frame(
AVISIT = factor(
c("Baseline", "Screening", "Baseline", "Screening"),
levels = c("Screening", "Baseline")
),
Mean = c(25.6, 40, 12, 5),
SE = c(2, 3, 1, 2),
TRT = c("A", "A", "B", "B"),
stringsAsFactors = FALSE
)
pl <- errorbarClinData(
data = data,
yVar = "AVISIT",
xVar = "Mean",
xErrorVar = "SE",
colorVar = "TRT"
)
plData <- plotly_build(pl)$x$data
plData <- plData[sapply(plData, function(x) x$type == "scatter")]
plData <- do.call(rbind,
lapply(plData, function(x)
data.frame(
y = round(x[["y"]], 0),
x = x[["x"]],
xError = x[["error_x"]]$array,
color = as.character(x[["error_x"]]$color),
group = x[["name"]],
stringsAsFactors = FALSE
)
)
)
colors <- with(plData, tapply(color, group, unique))
expect_type(colors, "character")
expect_length(colors, 2)
plData <- plData[, c("y", "x", "xError", "group")]
dataReference <- data[, c("AVISIT", "Mean", "SE", "TRT")]
dataReference$AVISIT <- as.numeric(as.factor(dataReference$AVISIT))
dataReference$AVISIT <- max(dataReference$AVISIT)-dataReference$AVISIT+1
expect_equivalent(
object = plData[do.call(order, plData), ],
expected = dataReference[do.call(order, dataReference), ]
)
plYAxis <- plotly_build(pl)$x$layout$yaxis
plYTickLab <- plYAxis$ticktext
plYTickLab <- plYTickLab[order(plYAxis$tickvals, decreasing = TRUE)]
expect_equal(object = unname(plYTickLab), c("Screening", "Baseline"))
})
test_that("A color palette is correctly set in the errorbar visualization", {
data <- data.frame(
AVISIT = c("Baseline", "Week 2", "Baseline", "Week 2"),
Mean = c(25.6, 40, 12, 5),
SE = c(2, 3, 1, 2),
TRT = c("A", "A", "B", "B"),
stringsAsFactors = FALSE
)
colorPalette <- c(B = "blue", A = "red")
pl <- errorbarClinData(
data = data,
xVar = "AVISIT",
yVar = "Mean",
yErrorVar = "SE",
colorVar = "TRT", colorPalette = colorPalette
)
plData <- plotly_build(pl)$x$data
plData <- plData[sapply(plData, function(x) x$type == "scatter")]
plColorPalette <- do.call(c,
lapply(plData, function(x)
setNames(
as.character(x[["error_y"]]$color),
x[["name"]]
)
)
)
colorPaletteRGB <- col2rgb(colorPalette)
colorPaletteRGBA <- paste0(
"rgba(",
apply(colorPaletteRGB, 2, paste, collapse = ","),
",1)"
)
names(colorPaletteRGBA) <- names(colorPalette)
expect_mapequal(object = plColorPalette, expected = colorPaletteRGBA)
})
test_that("An interactive table is correctly included in the errorbar visualization", {
data <- data.frame(
AVISIT = c("Baseline", "Week 2"),
Mean = c(25.6, 40),
SE = c(2, 3),
stringsAsFactors = FALSE
)
res <- errorbarClinData(
data = data,
xVar = "AVISIT",
yVar = "Mean",
yErrorVar = "SE",
table = TRUE
)
expect_s3_class(res$table, "datatables")
dataTable <- res$table$x$data[, colnames(data)]
dataTable$AVISIT <- as.character(dataTable$AVISIT)
expect_equal(
object = dataTable,
expected = data[, colnames(data)]
)
})
test_that("Specified variables for the interactive table are correctly included in the errorbar visualization", {
data <- data.frame(
AVISIT = c("Baseline", "Week 2"),
Mean = c(25.6, 40),
SE = c(2, 3),
TRT = "A",
stringsAsFactors = FALSE
)
tableVars <- c("TRT", "AVISIT", "Mean")
res <- errorbarClinData(
data = data,
xVar = "AVISIT",
yVar = "Mean",
yErrorVar = "SE",
table = TRUE,
tableVars = tableVars
)
expect_s3_class(res$table, "datatables")
dataTable <- res$table$x$data[, tableVars]
dataTable[, c("TRT", "AVISIT")] <- lapply(dataTable[, c("TRT", "AVISIT")], as.character)
expect_identical(
object = dataTable,
expected = data[, tableVars]
)
})
test_that("Labels are correctly extracted for the hover", {
data <- data.frame(
AVISIT = "Baseline",
Mean = 25.6,
SE = 2,
stringsAsFactors = FALSE
)
labelVars <- c(
AVISIT = "Actual Visit",
"Mean" = "Observed Mean",
SE = "Standard Error"
)
pl <- errorbarClinData(
data = data,
xVar = "AVISIT",
yVar = "Mean",
yErrorVar = "SE",
labelVars = labelVars
)
plData <- plotly_build(pl)$x$data
plData <- plData[sapply(plData, function(x) x$type == "scatter")]
plData <- do.call(rbind,
lapply(plData, function(x)
data.frame(
x = x[["x"]],
y = x[["y"]],
yError = x[["error_y"]]$array,
hover = x[["hovertemplate"]],
stringsAsFactors = FALSE
)
)
)
expect_match(
object = subset(plData, x == "Baseline" & y == 25.6)$hover,
regexp = "Actual Visit: Baseline.+Observed Mean: 25.6.+Standard Error: 2"
)
})
test_that("Labels for the x-axis are correctly set from variables", {
dataPlot <- data.frame(
AVISIT = factor(
c("Week 2", "Screening"),
levels = c("Screening", "Baseline", "Week 2")
),
Mean = c(12, 15),
SE = c(1, 2),
n = c("N = 3", "N = 5"),
stringsAsFactors = FALSE
)
pl <- errorbarClinData(
data = dataPlot,
xVar = "AVISIT", xLabVars = c("AVISIT", "n"),
yVar = "Mean", yErrorVar = "SE"
)
plXAxis <- plotly_build(pl)$x$layout$xaxis
expect_equal(plXAxis$tickvals, c(1, 2))
plXTickLab <- plXAxis$ticktext
plXTickLab <- plXTickLab[order(plXAxis$tickvals, decreasing = FALSE)]
expect_match(object = plXTickLab[1], regexp = "Screening.+N = 5")
expect_match(object = plXTickLab[2], regexp = "Week 2.+N = 3")
})
test_that("Symbols correctly set based on a specified variable", {
data <- data.frame(
AVISIT = factor(
c("Baseline", "Screening", "Baseline", "Screening"),
levels = c("Screening", "Baseline")
),
Mean = c(25.6, 40, 12, 5),
SE = c(2, 3, 1, 2),
TRT = c("A", "A", "B", "B"),
stringsAsFactors = FALSE
)
pl <- errorbarClinData(
data = data,
xVar = "AVISIT",
yVar = "Mean",
yErrorVar = "SE",
shapeVar = "TRT"
)
plData <- plotly_build(pl)$x$data
plData <- plData[sapply(plData, function(x) x$type == "scatter")]
plData <- do.call(rbind,
lapply(plData, function(x)
data.frame(
x = x[["x"]],
y = x[["y"]],
yError = x[["error_y"]]$array,
shape = as.character(x[["marker"]]$symbol),
group = x[["name"]],
stringsAsFactors = FALSE
)
)
)
shapes <- with(plData, tapply(shape, group, unique))
expect_type(shapes, "character")
expect_length(shapes, 2)
plData <- plData[, c("x", "y", "yError", "group")]
dataReference <- data[, c("AVISIT", "Mean", "SE", "TRT")]
expect_equivalent(
object = plData[do.call(order, plData), ],
expected = dataReference[do.call(order, dataReference), ]
)
})
test_that("The points are correctly shaped with a specified palette", {
data <- data.frame(
AVISIT = c("Baseline", "Week 2", "Baseline", "Week 2"),
Mean = c(25.6, 40, 12, 5),
SE = c(2, 3, 1, 2),
TRT = c("A", "A", "B", "B"),
stringsAsFactors = FALSE
)
shapePalette <- c(B = "diamond", A = "cross")
pl <- errorbarClinData(
data = data,
xVar = "AVISIT",
yVar = "Mean",
yErrorVar = "SE",
shapeVar = "TRT", shapePalette = shapePalette
)
plData <- plotly_build(pl)$x$data
plData <- plData[sapply(plData, function(x) x$type == "scatter")]
plShapePalette <- do.call(c,
lapply(plData, function(x)
setNames(
as.character(x[["marker"]]$symbol),
x[["name"]]
)
)
)
expect_mapequal(object = plShapePalette, expected = shapePalette)
}) |
as_survey_twophase <- function(.data, ...) {
UseMethod("as_survey_twophase")
}
as_survey_twophase.data.frame <-
function(.data, id, strata = NULL, probs = NULL,
weights = NULL, fpc = NULL, subset,
method = c("full", "approx", "simple"), ...) {
id <- srvyr_select_vars_list(rlang::enquo(id), .data)
strata <- srvyr_select_vars_list(rlang::enquo(strata), .data)
probs <- srvyr_select_vars_list(rlang::enquo(probs), .data)
weights <- srvyr_select_vars_list(rlang::enquo(weights), .data)
fpc <- srvyr_select_vars_list(rlang::enquo(fpc), .data)
subset <- srvyr_select_vars(rlang::enquo(subset), .data)
out <- survey::twophase(
data = .data,
id = id,
strata = strata,
probs = probs,
weights = weights,
fpc = fpc,
subset = subset,
method = method
)
as_tbl_svy(out, list(ids = id, strata = strata, probs = probs,
weights = weights, fpc = fpc, subset = subset))
}
as_survey_twophase.twophase2 <- function(.data, ...) {
as_tbl_svy(.data)
}
as_survey_twophase_ <- function(.data, id, strata = NULL, probs = NULL,
weights = NULL, fpc = NULL, subset,
method = c("full", "approx", "simple")) {
as_survey_twophase(
.data,
id = !!n_compat_lazy(id),
strata = !!n_compat_lazy(strata),
probs = !!n_compat_lazy(probs),
weights = !!n_compat_lazy(weights),
fpc = !!n_compat_lazy(fpc),
subset = !!n_compat_lazy(subset),
method = method
)
} |
p.oGFisher = function(p, DF, W, M, p.type="two", method="HYB", combine="cct", nsim=NULL, seed=NULL){
out = stat.oGFisher(p, DF, W, M, p.type="two", method="HYB", nsim=NULL, seed=NULL)
if(combine=="cct"){
thr = out$cct
pval = 0.5 - atan(thr)/pi
}else{
thr = out$minp
nd = dim(DF)[1]
COR_GFisher = getGFisherCOR(DD=DF, M=M, p.type=p.type)
pval = 1 - pmvnorm(lower=rep(-Inf,nd), upper=qnorm(1-thr), mean=rep(0,nd), corr=COR_GFisher)[1]
}
return(list(pval=pval, pval_indi=out$PVAL))
} |
`person.reports` <-
function(LC, n.c, ud, treat.extreme, max.iter, conv.crit, steps, as.LCA){
matcher <- match(ud$people,ud$x.x)
matcher.d <- match(ud$people,ud$degen.x.x)
for(c in 1:n.c){
namer <- c("theta", "SE.theta", "r", "infit", "in.Z", "outfit", "out.Z")
suppressWarnings(LC[[c]]$person.par <- data.frame(cbind(LC[[c]]$person.par$theta[matcher],
LC[[c]]$person.par$SE.theta[matcher], ud$r[matcher],
LC[[c]]$person.par$in.out[matcher,])) )
colnames(LC[[c]]$person.par) <- namer
if(length(ud$degen.r) > 0){
skiprest <- FALSE
ud$degen.r <- ifelse(ud$degen.r == 0, ud$degen.r + treat.extreme, ud$degen.r - treat.extreme)
degen.theta <- rep(0,length(ud$degen.r))
if(! as.LCA) for(its in 1:max.iter) {
odt <- degen.theta
degen.theta <- deg.theta(LC[[c]], ud$degen.r, ud$degen.x.i, degen.theta, steps)
if(any(is.na(degen.theta))) { warning("Extreme person estimates did not converge \n")
skiprest <- TRUE
break}
if(abs(max(odt - degen.theta)) < conv.crit) break
}
if(! skiprest){
LC[[c]]$person.par$theta[! is.na(matcher.d)] <- degen.theta[na.omit(matcher.d)]
Pxji <- array(apply(LC[[c]]$item.par$delta,2,P.xj, th=degen.theta),
dim=c(steps,length(degen.theta),LC[[c]]$i.stat$n.i))
LC[[c]]$person.par$SE.theta[! is.na(matcher.d)] <- (-1*d.v(Pxji, ud$degen.r,
matrix(! is.na(ud$degen.x.i), nrow=length(ud$degen.r)))$d2[na.omit(matcher.d)])^(-.5)
}
}
}
LC
} |
NULL
all_continuous <- function(continuous2 = TRUE) {
if (continuous2 == TRUE) {
return(broom.helpers::all_continuous())
} else {
return(
.generic_selector("variable", "var_type",
.data$var_type %in% "continuous",
fun_name = "all_continuous"
)
)
}
}
all_continuous2 <- function() {
.generic_selector("variable", "var_type",
.data$var_type %in% "continuous2",
fun_name = "all_continuous"
)
}
all_categorical <- broom.helpers::all_categorical
all_dichotomous <- broom.helpers::all_dichotomous
all_tests <- function(tests = NULL) {
if (is.null(tests) || !is.character(tests) || any(!tests %in% df_add_p_tests$test_name)) {
paste(
"The `tests=` argument must be one or more of the following:",
paste(shQuote(df_add_p_tests$test_name), collapse = ", ")
) %>%
stop(call. = FALSE)
}
.generic_selector("variable", "test_name",
.data$test_name %in% .env$tests,
fun_name = "all_tests"
)
}
all_stat_cols <- function(stat_0 = TRUE) {
if (stat_0 == TRUE) {
return(
union(
dplyr::matches("^stat_\\d+$"),
dplyr::matches("^stat_\\d+_.*$")
)
)
}
if (stat_0 == FALSE) {
return(
union(
dplyr::matches("^stat_\\d*[1-9]\\d*$"),
dplyr::matches("^stat_\\d*[1-9]\\d*_.*$")
)
)
}
}
all_interaction <- broom.helpers::all_interaction
all_intercepts <- broom.helpers::all_intercepts
all_contrasts <- broom.helpers::all_contrasts |
setupV <- function(fit, f, xvar, nn, cond, type, trans, xtrans, alpha, jitter, by, yName, ...) {
if (length(xvar) > 1 & length(cond) > 1) stop("Cannot specify 'by' and multiple x variables simultaneously", call.=FALSE)
J <- max(length(xvar), length(cond))
Attempt <- try(max(attr(terms(as.formula(formula(fit))), "order")) > 1, silent=TRUE)
hasInteraction <- ifelse(inherits(Attempt, 'try-error'), FALSE, Attempt)
lev <- attr(cond, "lev")
xy <- vector("list", J)
for (j in 1:J) {
cond.j <- if (length(cond) > 1) cond[[j]] else cond[[1]]
name <- if (length(xvar) > 1) xvar[j] else xvar
xy[[j]] <- getXY(fit, f, name, nn, cond.j, type, trans, xtrans, alpha, jitter, ...)
}
if (!missing(by)) xy <- subsetV(xy, f, by, lev, type)
meta <- list(x=xvar, y=xy[[1]]$y$name, hasInteraction=hasInteraction, yName=yName, trans=trans, class=class(fit))
K <- xy[[1]]$y$n
if (K==1) {
if (!missing(by)) {
meta$by <- by
v <- list(fit=NULL, res=NULL, meta=meta)
for (j in 1:length(xy)) {
fit.j <- data.frame(xy[[j]]$x$DD, visregFit=xy[[j]]$y$fit, visregLwr=xy[[j]]$y$lwr, visregUpr=xy[[j]]$y$upr)
res.j <- data.frame(xy[[j]]$x$D, visregRes=xy[[j]]$y$r, visregPos=xy[[j]]$y$pos)
fit.j[, xvar] <- xy[[j]]$x$xx
res.j[, xvar] <- xy[[j]]$x$x
v$fit <- rbind(v$fit, fit.j)
v$res <- rbind(v$res, res.j)
}
class(v) <- "visreg"
} else {
v <- vector("list", J)
for (j in 1:J) {
meta.j <- meta
meta.j$x <- xvar[j]
v[[j]] <- list(fit=data.frame(xy[[j]]$x$DD, visregFit=xy[[j]]$y$fit, visregLwr=xy[[j]]$y$lwr, visregUpr=xy[[j]]$y$upr),
res=data.frame(xy[[j]]$x$D, visregRes=xy[[j]]$y$r, visregPos=xy[[j]]$y$pos),
meta=meta.j)
v[[j]]$fit[, xvar[j]] <- xy[[j]]$x$xx
v[[j]]$res[, xvar[j]] <- xy[[j]]$x$x
class(v[[j]]) <- "visreg"
}
if (J==1) {
v <- v[[1]]
} else {
class(v) <- "visregList"
}
}
} else {
if (!missing(by)) {
meta$by <- by
v <- vector("list", K)
for (k in 1:K) {
meta.k <- meta
meta.k$y <- meta$y[k]
meta.k$yName <- meta$yName[k]
v[[k]] <- list(fit=NULL, res=NULL, meta=meta.k)
for (j in 1:J) {
fit.jk <- data.frame(xy[[j]]$x$DD, visregFit=xy[[j]]$y$fit[,k], visregLwr=xy[[j]]$y$lwr[,k], visregUpr=xy[[j]]$y$upr[,k])
res.jk <- data.frame(xy[[j]]$x$D, visregRes=xy[[j]]$y$r[,k], visregPos=xy[[j]]$y$pos[,k])
fit.jk[, xvar] <- xy[[j]]$x$xx
res.jk[, xvar] <- xy[[j]]$x$x
v[[k]]$fit <- rbind(v[[k]]$fit, fit.jk)
v[[k]]$res <- rbind(v[[k]]$res, res.jk)
}
class(v[[k]]) <- "visreg"
}
class(v) <- "visregList"
} else {
v <- vector("list", J*K)
for (j in 1:J) {
for (k in 1:K) {
meta.jk <- meta
meta.jk$x <- meta$x[j]
meta.jk$y <- meta$y[k]
meta.jk$yName <- meta$yName[k]
l <- (j-1)*K + k
v[[l]] <- list(fit=data.frame(xy[[j]]$x$DD, visregFit=xy[[j]]$y$fit[,k], visregLwr=xy[[j]]$y$lwr[,k], visregUpr=xy[[j]]$y$upr[,k]),
res=data.frame(xy[[j]]$x$D, visregRes=xy[[j]]$y$r[,k], visregPos=xy[[j]]$y$pos[,k]),
meta=meta.jk)
v[[l]]$fit[, xvar[j]] <- xy[[j]]$x$xx
v[[l]]$res[, xvar[j]] <- xy[[j]]$x$x
class(v[[l]]) <- "visreg"
}
}
class(v) <- "visregList"
}
}
v
} |
neldermead.isrkelley <- function(this=this){
istorestart <- FALSE
if (this$kelleystagnationflag){
status <- optimbase.get(this=this$optbase,key='status')
if (status =='kelleystagnation')
istorestart <- TRUE
}
varargout <- list(this=this,istorestart=istorestart)
return(varargout)
} |
makeAirrClone <-
function(data, id="sequence_id", seq="sequence_alignment",
germ="germline_alignment_d_mask", v_call="v_call", j_call="j_call",
junc_len="junction_length", clone="clone_id", mask_char="N",
max_mask=0, pad_end=TRUE, text_fields=NULL, num_fields=NULL, seq_fields=NULL,
add_count=TRUE, verbose=FALSE, collapse=TRUE, chain="H", heavy=NULL,
cell="cell_id", locus="locus", traits=NULL, mod3=TRUE, randomize=TRUE,
use_regions=TRUE, dup_singles=FALSE){
check <- alakazam::checkColumns(data,
unique(c(id, seq, germ, v_call, j_call, junc_len, clone,
text_fields, num_fields, seq_fields, traits)))
if (check != TRUE) { stop(check) }
if(chain=="HL"){
check <- alakazam::checkColumns(data, c(cell,locus))
if (check != TRUE) { stop(check) }
if(is.null(heavy)){
stop(paste("clone",unique(dplyr::pull(data,clone)),
"heavy chain loci ID must be specified if combining loci!"))
}
text_fields <- text_fields[text_fields != rlang::sym(cell)]
text_fields <- text_fields[text_fields != rlang::sym(locus)]
seq_fields <- seq_fields[seq_fields != rlang::sym(cell)]
seq_fields <- seq_fields[seq_fields != rlang::sym(locus)]
tmp_df <- data[, unique(c(id, seq, junc_len, text_fields, num_fields,
seq_fields, cell, locus, traits))]
tmp_df[[seq]] <- alakazam::maskSeqGaps(tmp_df[[seq]], mask_char=mask_char,
outer_only=FALSE)
hc <- dplyr::filter(tmp_df,!!rlang::sym(locus)==rlang::sym(heavy))
alt <- dplyr::filter(tmp_df,!!rlang::sym(locus)!=rlang::sym(heavy))
if(nrow(hc) == 0){
stop(paste("clone",unique(dplyr::pull(data,clone)),
"heavy chain locus not found in dataset!"))
}
if(nrow(alt) == 0){
chain <- "H"
}else{
if(length(unique(dplyr::pull(alt,!!locus))) > 1){
stop(paste("clone",paste(unique(dplyr::pull(data,clone)),collapse=""),
"currently only one alternate loci per clone supported"))
}
}
}else{
tmp_df <- data[, unique(c(id, seq, text_fields, num_fields, seq_fields, traits))]
tmp_df[[seq]] <- alakazam::maskSeqGaps(tmp_df[[seq]], mask_char=mask_char,
outer_only=FALSE)
}
if(chain=="HL"){
hc[[seq]] <- alakazam::maskSeqEnds(hc[[seq]], mask_char=mask_char,
max_mask=max_mask, trim=FALSE)
alt[[seq]] <- alakazam::maskSeqEnds(alt[[seq]], mask_char=mask_char,
max_mask=max_mask, trim=FALSE)
if(pad_end) {
hc[[seq]] <- alakazam::padSeqEnds(hc[[seq]], pad_char=mask_char, mod3=mod3)
alt[[seq]] <- alakazam::padSeqEnds(alt[[seq]], pad_char=mask_char, mod3=mod3)
}
hc_length <- unique(nchar(dplyr::pull(hc,rlang::sym(seq))))
alt_length <- unique(nchar(dplyr::pull(alt,rlang::sym(seq))))
if(length(hc_length) > 1){
stop(paste("clone",unique(dplyr::pull(data,clone)),
"Heavy chain sequences must be same length!"))
}
if(length(hc_length) > 1){
stop(paste("clone",unique(dplyr::pull(data,clone)),
"Light chain sequences must be same length!"))
}
hc$lsequence <- ""
hc$hlsequence <- ""
for(cell_name in unique(dplyr::pull(hc,!!rlang::sym(cell)))){
if(!cell_name %in% dplyr::pull(alt,rlang::sym(cell))){
altseq <- paste(rep(mask_char,alt_length),collapse="")
}else{
altseq <- dplyr::pull(dplyr::filter(alt,
!!rlang::sym(cell) == cell_name),rlang::sym(seq))
}
hc[dplyr::pull(hc,!!rlang::sym(cell)) == cell_name,]$lsequence <- altseq
hc[dplyr::pull(hc,!!rlang::sym(cell)) == cell_name,]$hlsequence <-
paste0(hc[dplyr::pull(hc,!!rlang::sym(cell)) == cell_name,seq],altseq)
}
hcd <- dplyr::filter(data,!!rlang::sym(locus)==rlang::sym(heavy))
altd <- dplyr::filter(data,!!rlang::sym(locus)!=rlang::sym(heavy))
germline <- alakazam::maskSeqGaps(hcd[[germ]][1], mask_char=mask_char,
outer_only=FALSE)
lgermline <- alakazam::maskSeqGaps(altd[[germ]][1], mask_char=mask_char,
outer_only=FALSE)
if(pad_end){
germline <- alakazam::padSeqEnds(germline, pad_char=mask_char, mod3=mod3)
lgermline <- alakazam::padSeqEnds(lgermline, pad_char=mask_char, mod3=mod3)
}
hlgermline <- paste0(germline,lgermline)
tmp_df <- hc
loci <- unique(dplyr::pull(data,!!locus))
tmp_df[[locus]] <- NULL
tmp_df[[locus]] <- paste(loci,collapse=",")
chains <- c(rep(unique(dplyr::pull(hc,!!locus)),times=hc_length),
rep(unique(dplyr::pull(alt,!!locus)),times=alt_length))
numbers <- c(1:hc_length,1:alt_length)
if(use_regions){
hregions <- as.character(
shazam::setRegionBoundaries(unique(hc[[junc_len]]),
germline,
shazam::IMGT_VDJ_BY_REGIONS)@boundaries)
lregions <- as.character(
shazam::setRegionBoundaries(unique(alt[[junc_len]]),
lgermline,
shazam::IMGT_VDJ_BY_REGIONS)@boundaries)
regions <- c(hregions, lregions)
}else{
regions <- rep("N", times=nchar(hlgermline))
}
if(length(chains) != unique(nchar(tmp_df$hlsequence))){
stop(paste("clone",unique(dplyr::pull(data,clone)),
"chains vector not equal to total sequence length!"))
}
if(length(chains) != nchar(hlgermline)){
stop(paste("clone",unique(dplyr::pull(data,clone)),
"chains vector not equal to germline sequence length!"))
}
new_seq <- "hlsequence"
}else{
tmp_df[[seq]] <- alakazam::maskSeqEnds(tmp_df[[seq]],
mask_char=mask_char, max_mask=max_mask, trim=FALSE)
if(pad_end){
tmp_df[[seq]] <- alakazam::padSeqEnds(tmp_df[[seq]],
pad_char=mask_char, mod3=mod3)
}
germline <- alakazam::maskSeqGaps(data[[germ]][1],
mask_char=mask_char, outer_only=FALSE)
if(pad_end){
germline <- alakazam::padSeqEnds(germline,
pad_char=mask_char, mod3=mod3)
}
check <- alakazam::checkColumns(data, c(locus))
if(check == TRUE){
loci <- unique(dplyr::pull(data,locus))
if(length(loci) > 1){
warning(paste("clone",unique(dplyr::pull(data,clone)),
"mutliple loci present but not dealt with!"))
loci <- paste(loci,collapse=",")
}
}else{
loci <- "N"
}
chains <- rep(loci,times=nchar(germline))
numbers <- 1:nchar(germline)
lgermline <- ""
hlgermline <- germline
tmp_df$lsequence <- ""
tmp_df$hlsequence <- tmp_df[[seq]]
new_seq <- seq
if(use_regions){
regions <- as.character(
shazam::setRegionBoundaries(unique(data[[junc_len]]),
germline,
shazam::IMGT_VDJ_BY_REGIONS)@boundaries)
}else{
regions <- rep("N", times=nchar(germline))
}
}
seq_len <- nchar(tmp_df[[seq]])
if(any(seq_len != seq_len[1])){
len_message <- paste0("All sequences are not the same length for data with first ",
id, " = ", tmp_df[[id]][1], ".")
if (!pad_end) {
len_message <- paste(len_message,
"Consider specifying pad_end=TRUE and verify the multiple alignment.")
} else {
len_message <- paste(len_message,
"Verify that all sequences are properly multiple-aligned.")
}
stop(len_message)
}
if(collapse){
if(is.null(traits)){
tmp_df <- alakazam::collapseDuplicates(tmp_df, id=id, seq=new_seq,
text_fields=text_fields,
num_fields=num_fields, seq_fields=seq_fields,
add_count=add_count, verbose=verbose)
}else{
tmp_df <- tmp_df %>%
dplyr::group_by_at(dplyr::vars(tidyselect::all_of(traits))) %>%
dplyr::do(alakazam::collapseDuplicates(!!rlang::sym("."), id=id, seq=new_seq,
text_fields=text_fields,
num_fields=num_fields, seq_fields=seq_fields,
add_count=add_count, verbose=verbose)) %>%
dplyr::ungroup()
}
}
if(randomize){
tmp_df <- tmp_df[sample(1:nrow(tmp_df),replace=FALSE),]
}
tmp_names <- names(tmp_df)
if ("sequence" %in% tmp_names & seq != "sequence") {
tmp_df <- tmp_df[, tmp_names != "sequence"]
tmp_names <- names(tmp_df)
}
names(tmp_df)[tmp_names == seq] <- "sequence"
names(tmp_df)[tmp_names == id] <- "sequence_id"
if(chain=="HL"){
phylo_seq <- "hlsequence"
}else if(chain=="L"){
phylo_seq <- "lsequence"
}else{
phylo_seq <- "sequence"
}
if(nrow(tmp_df) == 1 && dup_singles){
tmp_df2 <- tmp_df
tmp_df2[[id]] <- paste0(tmp_df[[id]],"_DUPLICATE")
tmp_df <- bind_rows(tmp_df, tmp_df2)
}
outclone <- new("airrClone",
data=as.data.frame(tmp_df),
clone=as.character(data[[clone]][1]),
germline=alakazam::maskSeqGaps(germline, mask_char=mask_char,
outer_only=FALSE),
lgermline=alakazam::maskSeqGaps(lgermline, mask_char=mask_char,
outer_only=FALSE),
hlgermline=alakazam::maskSeqGaps(hlgermline, mask_char=mask_char,
outer_only=FALSE),
v_gene=alakazam::getGene(data[[v_call]][1]),
j_gene=alakazam::getGene(data[[j_call]][1]),
junc_len=data[[junc_len]][1],
locus=chains,
region=regions,
numbers=numbers,
phylo_seq=phylo_seq)
outclone
}
cleanAlignment <- function(clone, seq="sequence"){
if(seq=="hlsequence"){
g <- strsplit(clone@hlgermline[1],split="")[[1]]
}else{
g <- strsplit(clone@germline[1],split="")[[1]]
}
sk <- strsplit(clone@data[[seq]],split="")
sites=seq(1,length(g)-3,by=3)
ns <- c()
for(i in sites){
l=lapply(sk,function(x) paste(x[i:(i+2)],collapse="")=="NNN")
ns <- c(ns,sum(unlist(l)),sum(unlist(l)),sum(unlist(l)))
}
informative <- ns != length(sk)
l=lapply(sk,function(x) x=paste(x[informative],collapse=""))
gm=paste(g[informative],collapse="")
if(.hasSlot(clone,"locus")){
clone@locus <- clone@locus[informative]
}
if(.hasSlot(clone,"region")){
clone@region <- clone@region[informative]
}
if(.hasSlot(clone,"numbers")){
clone@numbers <- clone@numbers[informative]
}
if(seq=="hlsequence"){
clone@hlgermline=gm
}else{
clone@germline=gm
}
clone@data[[seq]]=unlist(l)
return(clone)
}
formatClones <- function(data, seq="sequence_alignment", clone="clone_id",
subclone="subclone_id",
nproc=1, chain="H", heavy="IGH", cell="cell_id",
locus="locus", minseq=2, split_light=FALSE, majoronly=FALSE,
columns=NULL, ...) {
if(majoronly){
if(!subclone %in% names(data)){
stop("Need subclone designation if majoronly=TRUE")
}
data <- filter(data, !!rlang::sym(subclone) <= 1)
}
if(chain == "H"){
if(!is.null(heavy)){
if(locus %in% names(data)){
data <- filter(data, !!rlang::sym(locus) == rlang::sym(heavy))
}
}
}
if(chain == "HL"){
if(!subclone %in% names(data)){
stop("Need subclone designation for heavy+light chain clones")
}
if(!locus %in% names(data)){
stop("Need locus designation for heavy+light chain clones")
}
if(is.null(heavy)){
stop("Need heavy chain (heavy) designation for heavy+light chain clones")
}
lcells <- filter(data,!!rlang::sym(locus)!=rlang::sym(heavy))[[cell]]
hcells <- filter(data,!!rlang::sym(locus)==rlang::sym(heavy))[[cell]]
nohcells <- lcells[!lcells %in% hcells]
if(length(nohcells) > 0){
data <- filter(data,!(!!rlang::sym(cell) %in% nohcells))
warning(paste("Removed",length(nohcells),
"cells with no heavy chain information"))
}
}
if(split_light){
if(!subclone %in% names(data)){
stop("Need subclone designation for heavy+light chain clones")
}
if(sum(data[[subclone]] == 0) > 0){
warning("Assigning subclone 0 (missing light chain) to subclone 1")
data[data[[subclone]] == 0,][[subclone]] <- 1
}
data[[clone]] <- paste0(data[[clone]],"_",data[[subclone]])
}else if(!split_light && chain=="HL"){
data <- filter(data, !(!!rlang::sym(locus) != rlang::sym(heavy) &
!!rlang::sym(subclone) > 1))
}
if(!is.null(columns)){
if(sum(!columns %in% names(data)) != 0){
stop(paste("column",
paste(columns[!columns %in% names(data)]),
"not in data table!"))
}
}
if(sum(is.na(data[[seq]])) > 0){
warning(paste("Removing",sum(is.na(data[[seq]]))
,"with missing sequences"))
}
counts <- table(data[[clone]])
rmclones <- names(counts[counts < minseq])
data <- data[!data[[clone]] %in% rmclones,]
data <- data[!is.na(data[[seq]]),]
clones <- data %>%
dplyr::group_by(!!rlang::sym(clone)) %>%
dplyr::do(data=makeAirrClone(.data, seq=seq,
clone=clone, chain=chain, heavy=heavy, cell=cell,...))
if(chain == "HL"){
seq_name <- "hlsequence"
}else{
seq_name <- "sequence"
}
fclones <- processClones(clones, nproc=nproc, seq=seq_name, minseq=minseq)
if(clone != "clone_id"){
fclones$clone_id <- fclones[[clone]]
fclones <- dplyr::select(fclones, -!!clone)
}
colpaste <- function(x){
s <- sort(unique(x))
if(length(s) > 1){
paste(s,collapse=",")
}else{
s
}
}
if(!is.null(columns)){
d <- data %>%
dplyr::select(!!rlang::sym(clone),dplyr::all_of(columns)) %>%
dplyr::group_by(!!rlang::sym(clone)) %>%
dplyr::summarize(dplyr::across(dplyr::all_of(columns), dplyr::n_distinct)) %>%
dplyr::ungroup() %>%
dplyr::summarize(dplyr::across(dplyr::all_of(columns),max)) %>%
unlist()
multi <- names(d[d > 1])
if(length(multi) > 0){
warning(paste("columns",paste(multi,collapse=" "),
"contain multiple values per clone, flattening with comma"))
}
d <- data %>%
dplyr::select(!!rlang::sym(clone),columns) %>%
dplyr::group_by(!!rlang::sym(clone)) %>%
dplyr::summarize(dplyr::across(columns, colpaste))
m <- match(fclones[[clone]],d[[clone]])
fclones[,columns] <- d[m,columns]
}
fclones
}
maskCodons <- function(id, q, s, keep_alignment=FALSE, gap_opening=5,
gap_extension=1, keep_insertions=FALSE, mask=TRUE){
results <- list(
sequence_id =id,
sequence_masked="",
masking_note="",
insertions="",
subject_alignment="",
query_alignment="")
sg <- gsub("\\.\\.\\.","",s)
if(sg == q || !mask){
results$subject_alignment <- sg
results$query_alignment <- q
results$sequence_masked_v <- s
return(results)
}
gaps <- stringr::str_locate_all(s,"\\.\\.\\.")
sgf <- gsub("---", "", sg)
if(grepl("-",sgf)){
results$sequence_masked_v <- NA
results$masking_note <- "Frameshift in sequence"
return(results)
}
sg <- gsub("---", "XXX", sg)
n <- Biostrings::pairwiseAlignment(q, sg, type="global",
gapOpening=gap_opening, gapExtension=gap_extension)
qa <- as.character(n@pattern)
sa <- as.character(n@subject)
if(keep_alignment){
results$subject_alignment <- sa
results$query_alignment <- qa
}
if(keep_insertions){
insertions <- stringr::str_locate_all(sa,"\\-+")[[1]]
indels <- ""
if(nrow(insertions) > 0){
for(i in 1:nrow(insertions)){
ins <- substr(qa,insertions[i,1],insertions[i,2])
if(i == 1){
indels <- paste0(insertions[i,1],"-",ins)
}else{
indels <- paste0(indels,",",
paste0(insertions[i,1],"-",ins))
}
}
}
results$insertions <- indels
}
if(grepl("^\\.\\.",sg)){
sa <- paste0("..",sa)
if(keep_alignment){
results$subject_alignment <- sa
}
}else if(grepl("^\\.",sg)){
sa <- paste0(".",sa)
if(keep_alignment){
results$subject_alignment <- sa
}
}
if(nchar(sa) < nchar(sg) || nchar(qa) < nchar(q)){
results$sequence_masked_v <- NA
results$masking_note <- "Alignment error"
return(results)
}
sgf <- gsub("---", "", sa)
if(grepl("-",sgf)){
results$sequence_masked_v <- NA
results$masking_note <- "Frameshift after alignment"
return(results)
}
if(qa != sa && grepl("\\-",sa)){
sas <- strsplit(sa,split="")[[1]]
mask <- c()
nseq <- c()
for(j in 1:ceiling(length(sas)/3)){
index <- (j-1)*3 + 1
triple <- paste0(sas[index:(index+2)],collapse="")
triple <- gsub("NA","",triple)
m <- 0
if(grepl("\\-",triple)){
triple <- gsub("[A-Z]","N",triple)
triple <- gsub("\\-","",triple)
if(nchar(triple) != 0){
m <- 1
}
}
mask <- c(mask,m)
nseq <- c(nseq,triple)
}
maskseq <- paste0(nseq,collapse="")
if(nchar(maskseq) != nchar(sg)){
print(paste(maskseq,"\n",sg))
stop("Sequence masking failed")
}
sequence_alignment <- maskseq
if(nrow(gaps[[1]]) > 0){
for(j in 1:nrow(gaps[[1]])){
sequence_alignment <-
paste0(substr(sequence_alignment,1,gaps[[1]][j,1]-1),
"...",substr(sequence_alignment,gaps[[1]][j,1],
nchar(sequence_alignment)))
}
}
sequence_alignment <- gsub("X","-",sequence_alignment)
if(alakazam::seqDist(sequence_alignment,s) != 0){
print(paste(sequence_alignment,"\n",s))
stop("Adding gaps failed")
}
results$sequence_masked_v <- sequence_alignment
if(sum(mask == 1) > 0){
results$masking_note <-
paste(which(mask == 1),collapse=",")
}
return(results)
}else{
results$sequence_masked_v <- s
return(results)
}
}
maskSequences <- function(data, sequence_id = "sequence_id", sequence = "sequence",
sequence_alignment="sequence_alignment", v_sequence_start = "v_sequence_start",
v_sequence_end = "v_sequence_end", v_germline_start = "v_germline_start",
v_germline_end = "v_germline_end", junction_length="junction_length",
keep_alignment = FALSE, keep_insertions=FALSE,
mask_codons=TRUE, mask_cdr3=TRUE, nproc=1){
ids <- data[[sequence_id]]
qi <- substr(data[[sequence]],
data[[v_sequence_start]],data[[v_sequence_end]])
si <- substr(data[[sequence_alignment]],
data[[v_germline_start]],
data[[v_germline_end]])
ei <- substr(data[[sequence_alignment]],
data[[v_germline_end]]+1,
nchar(data[[sequence_alignment]]))
if(max(table(ids)) > 1){
stop("Sequence IDs are not unique")
}
results <- dplyr::bind_rows(
parallel::mclapply(1:length(qi),function(x){
mask <- maskCodons(ids[x], qi[x], si[x],
keep_alignment=keep_alignment,
keep_insertions=keep_insertions,
mask=mask_codons)
if(is.na(mask$sequence_masked_v)){
mask$sequence_masked <- NA
}else{
mask$sequence_masked <-
paste0(mask$sequence_masked_v,ei[x])
}
mask
}, mc.cores=nproc))
m <- match(data[[sequence_id]], results[[sequence_id]])
if(sum(results[m,]$sequence_id != data$sequence_id) > 0){
stop("Sequence ids don't match")
}
data <- bind_cols(data,
sequence_masked=results[m,]$sequence_masked,
masking_note=results [m,]$masking_note)
if(keep_alignment){
data <- bind_cols(data,
subject_alignment=results[m,]$subject_alignment,
query_alignment=results[m,]$query_alignment)
}
if(keep_insertions){
data <- bind_cols(data,
insertions=results[m,]$insertions)
}
if(mask_cdr3){
data$sequence_masked <- unlist(lapply(1:nrow(data),function(x){
if(is.na(data$sequence_masked[x])){
return(data$sequence_masked[x])
}
regions <- as.character(
shazam::setRegionBoundaries(data[[junction_length]][x],
data$sequence_masked[x],
shazam::IMGT_VDJ_BY_REGIONS)@boundaries)
if(!is.na(data$sequence_masked[x]) && sum(regions == "cdr3") > 0){
s <- strsplit(data$sequence_masked[x],split="")[[1]]
s[regions == "cdr3"] = "N"
s <- paste(s, collapse="")
}else{
s <- data$sequence_masked[x]
}
s
}))
}
include <- !is.na(data$sequence_masked)
if(sum(include) == 0){
warning("Masking failed for all sequences")
return(data)
}
diffs <- nchar(data$sequence_alignment[include]) -
nchar(data$sequence_masked[include])
if(sum(diffs) > 0){
print(data[diffs > 0,]$sequence_id)
stop("Error in masking above sequences (length)")
}
dists <- unlist(parallel::mclapply(1:nrow(data[include,]), function(x)
alakazam::seqDist(data$sequence_alignment[include][x],
data$sequence_masked[include][x]),mc.cores=nproc))
if(sum(dists) > 0){
print(data[dists > 0,]$sequence_id)
stop("Error in masking above sequences (mismatches)")
}
return(data)
}
getSubclones <- function(heavy, light, nproc=1, minseq=1,
id="sequence_id", seq="sequence_alignment",
clone="clone_id", cell_id="cell_id", v_call="v_call", j_call="j_call",
junc_len="junction_length", nolight="missing"){
subclone <- "subclone_id"
scount <- table(heavy[[clone]])
big <- names(scount)[scount >= minseq]
heavy <- filter(heavy,(!!rlang::sym(clone) %in% big))
heavy$vj_gene <- nolight
heavy$vj_alt_cell <- nolight
heavy$subclone_id <- 0
light$vj_gene <- nolight
light$vj_alt_cell <- nolight
light$subclone_id <- 0
light[[clone]] <- -1
paired <- parallel::mclapply(unique(heavy[[clone]]),function(cloneid){
hd <- filter(heavy,!!rlang::sym(clone) == cloneid)
ld <- filter(light,!!rlang::sym(cell_id) %in% hd[[!!cell_id]])
hd <- filter(hd,(!!rlang::sym(cell_id) %in% ld[[!!cell_id]]))
if(nrow(ld) == 0){
return(hd)
}
ltemp <- ld
ltemp$clone_id <- -1
ld <- dplyr::tibble()
lclone <- 1
while(nrow(ltemp) > 0){
lvs <- strsplit(ltemp[[v_call]],split=",")
ljs <- strsplit(ltemp[[j_call]],split=",")
combos <-
lapply(1:length(lvs),function(w)
unlist(lapply(lvs[[w]],function(x)
lapply(ljs[[w]],function(y)paste(x,y,sep=":")))))
cells <- unique(ltemp[[cell_id]])
cellcombos <- lapply(cells,function(x)
unique(unlist(combos[ltemp[[cell_id]] == x])))
lcounts <- table(unlist(lapply(cellcombos,function(x)x)))
max <- names(lcounts)[which.max(lcounts)]
cvs <- unlist(lapply(combos,function(x)max %in% x))
ltemp[cvs,][[subclone]] <- lclone
ltemp[cvs,]$vj_gene <- max
rmseqs <- c()
cell_counts <- table(ltemp[cvs,][[cell_id]])
mcells <- names(cell_counts)[cell_counts > 1]
for(cell in mcells){
ttemp <- filter(ltemp,cvs & !!rlang::sym(cell_id) == cell)
ttemp$str_counts <-
stringr::str_count(ttemp[[seq]],"[A|C|G|T]")
rmtemp <- ttemp[-which.max(ttemp$str_counts),]
rmseqs <- c(rmseqs,rmtemp[[id]])
}
include <- filter(ltemp,cvs & !(!!rlang::sym(id) %in% rmseqs))
leave <- filter(ltemp,!cvs | (!!rlang::sym(id) %in% rmseqs))
mcells <- unique(include[[cell_id]])
for(cell in mcells){
if(cell %in% leave[[cell_id]]){
include[include[[cell_id]] == cell,]$vj_alt_cell <-
paste(paste0(leave[leave[[cell_id]] == cell,][[v_call]],":",
leave[leave[[cell_id]] == cell,][[j_call]]),
collapse=",")
}
}
ld <- bind_rows(ld,include)
ltemp <- filter(ltemp,!(!!rlang::sym(cell_id) %in% ltemp[cvs,][[!!cell_id]]))
lclone <- lclone + 1
}
ld[[clone]] <- cloneid
for(cell in unique(hd[[cell_id]])){
if(cell %in% ld[[cell_id]]){
lclone <- ld[ld[[cell_id]] == cell,][[subclone]]
ld[ld[[cell_id]] == cell,][[subclone]] <- lclone
hd[hd[[cell_id]] == cell,][[subclone]] <- lclone
hd[hd[[cell_id]] == cell,]$vj_gene <- ld[ld[[cell_id]] == cell,]$vj_gene
hd[hd[[cell_id]] == cell,]$vj_alt_cell <- ld[ld[[cell_id]] == cell,]$vj_alt_cell
}
}
comb <- bind_rows(hd,ld)
comb$vj_clone <- paste0(comb[[clone]],"_",comb[[subclone]])
comb$vj_cell <- paste(comb$vj_gene,comb$vj_alt_cell,sep=",")
comb
},mc.cores=nproc)
paired <- bind_rows(paired)
return(paired)
}
processClones <- function(clones, nproc=1 ,minseq=2, seq){
if(!"tbl" %in% class(clones)){
print(paste("clones is of class",class(clones)))
stop("clones must be a tibble of airrClone objects!")
}else{
if(class(clones$data[[1]]) != "airrClone"){
print(paste("clones is list of class",class(clones$data[[1]])))
stop("clones$data must be a list of airrClone objects!")
}
}
threshold <- unlist(lapply(clones$data,function(x)
length(x@data[[seq]]) >= minseq))
clones <- clones[threshold,]
if(nrow(clones) == 0){
warning(paste("All clones have less than minseq =",minseq,"sequences"))
return(clones)
}
clones$data <- lapply(clones$data,function(x){
x@data$sequence_id= gsub(":","_",x@data$sequence_id);
x })
clones$data <- lapply(clones$data,function(x){
x@data$sequence_id=gsub(";","_",x@data$sequence_id);
x })
clones$data <- lapply(clones$data,function(x){
x@data$sequence_id=gsub(",","_",x@data$sequence_id);
x })
clones$data <- lapply(clones$data,function(x){
x@data$sequence_id=gsub("=","_",x@data$sequence_id);
x })
clones$data <- lapply(clones$data,function(x){
x@data$sequence_id=gsub(" ","_",x@data$sequence_id);
x })
max <- max(unlist(lapply(clones$data,function(x)max(nchar(x@data$sequence_id)))))
if(max > 1000){
wc <- which.max(unlist(lapply(clones$data,function(x)
max(nchar(x@data$sequence_id)))))
stop(paste("Sequence ID of clone",clones$data[[wc]]@clone,"index",
wc,"too long - over 1000 characters!"))
}
or <- order(unlist(lapply(clones$data,function(x) nrow(x@data))),
decreasing=TRUE)
clones <- clones[or,]
clones$data <- parallel::mclapply(clones$data,
function(x)cleanAlignment(x,seq),mc.cores=nproc)
if(.hasSlot(clones$data[[1]],"locus")){
clones$locus <- unlist(lapply(clones$data,function(x)
paste(sort(unique(x@locus)),collapse=",")))
}
clones$seqs <- unlist(lapply(clones$data,function(x)nrow(x@data)))
clones <- dplyr::rowwise(clones)
clones <- dplyr::ungroup(clones)
clones
} |
render_heatmap <- reactive({
req(length(DATA()) > 0)
withProgress({
target <- as.numeric(input$RT_Stats.Overview.Target)
data <- subset(DATA(), ID %in% input$RT_Stats.Overview.ID)
Plot.Stats.Significance_Heatmap(data, target, alpha = as.numeric(input$RT_Stats.Overview.Alpha),
bootstrap.size = input$RT_Stats.Overview.Samples)
},
message = "Creating plot")
})
output$RT_Stats.Overview.Heatmap <- renderPlotly(
render_heatmap()
)
create_stats_table <- reactive({
req(length(DATA()) > 0)
req(length(get_id(DATA())) > 1)
data <- subset(DATA(), ID %in% input$RT_Stats.Overview.ID)
target <- as.numeric(input$RT_Stats.Overview.Target)
df <- pairwise.test(data, target, bootstrap.size = input$RT_Stats.Overview.Samples)
df <- format(df, digits = 3)
df
})
output$RT_Stats.Overview.Pmatrix <- DT::renderDataTable({
create_stats_table()
}, options = list(dom = 'lrtip', pageLength = 15, scrollX = T, server = T))
output$RT_Stats.Overview.Graph <- renderPlot({
render_graph()
})
render_graph <- reactive({
req(length(DATA()) > 0)
withProgress({
target <- as.numeric(input$RT_Stats.Overview.Target)
data <- subset(DATA(), ID %in% input$RT_Stats.Overview.ID)
Plot.Stats.Significance_Graph(data, target, alpha = as.numeric(input$RT_Stats.Overview.Alpha),
bootstrap.size = input$RT_Stats.Overview.Samples)
},
message = "Creating plot")
})
output$RT_Stats.Overview.DownloadTable <- downloadHandler(
filename = function() {
eval(RT_Stats_table_name)
},
content = function(file) {
df <- create_stats_table()
save_table(df, file)
}
)
output$RT_Stats.Overview.DownloadHeatmap <- downloadHandler(
filename = function() {
eval(RT_Stats_heatmap_name)
},
content = function(file) {
save_plotly(render_heatmap(), file)
},
contentType = paste0('image/', input$RT_Stats.Overview.Format)
)
data_table_glicko2 <- reactive({
input$RT_Stats.Glicko.Create
isolate({
withProgress({
data <- RT_glicko_data()
nr_games <- as.numeric(input$RT_Stats.Glicko.Nrgames)
df <- glicko2_ranking(data, nr_games, target_dt = RT_stats_glicko_targets_obj, which = 'by_FV')$ratings
format(df, digits = 3)
}, message = "Creating Ranking, this might take a while")
})
})
output$RT_Stats.Glicko.Dataframe <- DT::renderDataTable({
req(length(DATA_RAW()) > 0)
data_table_glicko2()
}, options = list(dom = 'lrtip', pageLength = 15, scrollX = T, server = T))
render_glico2_plot <- reactive({
isolate({
data <- RT_glicko_data()
nr_games <- as.numeric(input$RT_Stats.Glicko.Nrgames)
})
Plot.Stats.Glicko2_Candlestick(data, nr_games, data_table_glicko2(), which = 'by_FV',
target_dt = RT_stats_glicko_targets_obj)
})
output$RT_Stats.Glicko.Candlestick <- renderPlotly({
render_glico2_plot()
})
output$RT_Stats.Glicko.DownloadTable <- downloadHandler(
filename = function() {
eval(RT_Glicko2_table_name)
},
content = function(file) {
df <- data_table_glicko2()
save_table(df, file)
}
)
output$RT_Stats.Glicko.Download <- downloadHandler(
filename = function() {
eval(RT_Glicko2_figure_name)
},
content = function(file) {
save_plotly(render_glico2_plot(), file)
},
contentType = paste0('image/', input$RT_Stats.Glicko.Format)
)
RT_glicko_data <- function() {
data <- subset(DATA_RAW(), ID %in% isolate(input$RT_Stats.Glicko.ID))
if (length(data) == 0) return(NULL)
data <- subset(data, DIM %in% input$RT_Stats.Glicko.Dim)
data <- subset(data, funcId %in% input$RT_Stats.Glicko.Funcid)
if (length(unique(get_id(data))) < 2) {
shinyjs::alert("This plot is only available when the dataset contains
multiple IDs for the selected functions and dimensions.")
return(NULL)
}
data
}
RT_stats_glicko_targets <- reactive({
data <- RT_glicko_data()
if (is.null(data)) return(NULL)
get_target_dt(data, "by_FV")
})
RT_stats_glicko_targets_obj <- NULL
proxy_RT_Stats.Glicko.Targets <- dataTableProxy('RT_Stats.Glicko.Targets')
output$RT_Stats.Glicko.Targets <- DT::renderDataTable({
req(length(DATA_RAW()) > 0)
RT_stats_glicko_targets_obj <<- RT_stats_glicko_targets()
RT_stats_glicko_targets_obj
}, editable = list(target = 'cell', disable = list(columns = c(0,1))), rownames = FALSE,
options = list(pageLength = 5, lengthMenu = c(5, 10, 25, -1), scrollX = T, server = T))
observeEvent(input$RT_Stats.Glicko.Targets_cell_edit, {
info <- input$RT_Stats.Glicko.Targets_cell_edit
i <- info$row
j <- info$col
v <- info$value
data <- RT_glicko_data()
if (is.null(data)) return(NULL)
RT_stats_glicko_targets_obj$target[[i]] <<- v
replaceData(proxy, RT_stats_glicko_targets_obj, resetPaging = FALSE, rownames = FALSE)
}) |
Hals.snow <-
function(j, Z, Hs, Ht, Hst.ls, b.lag, GP.mx) {
rho <- GP.mx[j, 1]
reg <- GP.mx[j, 2]
Z.hat <- H.als.b(Z=Z, Hs=Hs, Ht=Ht, Hst.ls=Hst.ls, rho=rho, reg=reg, b.lag=b.lag, Hs0=NULL, Ht0=NULL, Hst0.ls=NULL)$Z.hat
return(Z.hat)
} |
test_that("Check github_commits()", {
testthat::skip_on_cran()
local_edition(3)
df <- data.frame(x = "test", y = 1:10)
expect_output(Luminescence:::.as.latex.table.data.frame(df))
}) |
"print.psych.omega" <-
function(x,digits=2,all=FALSE,cut=NULL,sort=FALSE,...) {
xx <- x
if(!is.null(x$ci)) {
x <- x$om}
if(is.null(cut)) cut <- .2
cat( x$title,"\n")
cat("Call: ")
print(x$call)
cat("Alpha: ",round(x$alpha,digits),"\n")
cat("G.6: ",round(x$G6,digits),"\n")
cat("Omega Hierarchical: " ,round(x$omega_h,digits),"\n")
cat("Omega H asymptotic: " ,round(x$omega.lim,digits),"\n")
cat("Omega Total " ,round(x$omega.tot,digits),"\n")
cat("\nSchmid Leiman Factor loadings greater than ",cut, "\n")
loads <- x$schmid$sl
nfactor <- ncol(loads)-3
if(sort) {
ord <- sort(abs(loads[,1]),decreasing=TRUE,index.return=TRUE)
loads[,] <- loads[ord$ix,]
rownames(loads) <- rownames(loads)[ord$ix]
loads <- cbind(v=ord$ix,loads)
}
tn <- colnames(loads)
loads <- data.frame(loads)
colnames(loads) <- tn
if(sort) {loads[,1] <- as.integer(loads[,1])
load.2 <- loads[,2:(nfactor+1)]} else {load.2 <- loads[,1:nfactor] }
h2 <- round(loads[,"h2"],digits)
u2 <- round(loads[,"u2"],digits)
loads <-round(loads,digits)
fx <- format(loads,digits=digits)
nc <- nchar(fx[1,3], type = "c")
fx[abs(loads)< cut] <- paste(rep(" ", nc), collapse = "")
p2 <- loads[,"p2"]
mp2 <- mean(p2)
vp2 <- var(p2)
p2 <- round(p2,digits)
print(cbind(fx[,1:(nfactor+sort)],h2,u2,p2),quote="FALSE")
numfactors <- dim(x$schmid$sl)[2] -3
eigenvalues <- diag(t(x$schmid$sl[,1:numfactors]) %*% x$schmid$sl[,1:numfactors])
cat("\nWith eigenvalues of:\n")
ev.rnd <- round(eigenvalues,digits)
print(ev.rnd,digits=digits)
maxmin <- max(eigenvalues[2:numfactors])/min(eigenvalues[2:numfactors])
gmax <- eigenvalues[1]/max(eigenvalues[2:numfactors])
cat("\ngeneral/max " ,round(gmax,digits)," max/min = ",round(maxmin,digits))
cat("\nmean percent general = ",round(mp2,digits), " with sd = ", round(sqrt(vp2),digits), "and cv of ",round(sqrt(vp2)/mp2,digits),"\n")
if(!is.null(x$ECV)) cat("Explained Common Variance of the general factor = ", round(x$ECV,digits),"\n")
if(!is.null(x$schmid$dof)) {cat("\nThe degrees of freedom are",x$schmid$dof," and the fit is ",round(x$schmid$objective,digits),"\n")
if(!is.null(x$schmid$n.obs)&&!is.na(x$schmid$n.obs)) {cat("The number of observations was ",x$schmid$n.obs, " with Chi Square = ",round(x$schmid$STATISTIC,digits), " with prob < ", signif(x$schmid$PVAL,digits))}
}
if(!is.null(x$schmid$rms)) {cat("\nThe root mean square of the residuals is ", round(x$schmid$rms,digits),"\n") }
if(!is.null(x$schmid$crms)) {cat("The df corrected root mean square of the residuals is ", round(x$schmid$crms,digits)) }
if(!is.null(x$schmid$RMSEA)) {cat("\nRMSEA index = ",round(x$schmid$RMSEA[1],digits+1), " and the", (1- x$schmid$RMSEA[4])*100,"% confidence intervals are ",round(x$schmid$RMSEA[2:3],digits+1)) }
if(!is.null(x$schmid$BIC)) {cat("\nBIC = ",round(x$schmid$BIC,digits))}
cat("\n\nCompare this with the adequacy of just a general factor and no group factors")
if(!is.null(x$gstats$dof)) {cat("\nThe degrees of freedom for just the general factor are",x$gstats$dof," and the fit is ",round(x$gstats$objective,digits),"\n")
if(!is.null(x$gstats$n.obs)&&!is.na(x$gstats$n.obs)) {cat("The number of observations was ",x$gstats$n.obs, " with Chi Square = ",round(x$gstats$STATISTIC,digits), " with prob < ", signif(x$gstats$PVAL,digits))}
}
if(!is.null(x$gstats$rms)) {cat("\nThe root mean square of the residuals is ", round(x$gstats$rms,digits),"\n") }
if(!is.null(x$gstats$crms)) {cat("The df corrected root mean square of the residuals is ", round(x$gstats$crms,digits),"\n") }
if(!is.null(x$gstats$RMSEA)) {cat("\nRMSEA index = ",round(x$gstats$RMSEA[1],digits+1), " and the", (1- x$gstats$RMSEA[4])*100,"% confidence intervals are ",round(x$gstats$RMSEA[2:3],digits+1)) }
if(!is.null(x$gstats$BIC)) {cat("\nBIC = ",round(x$gstats$BIC,digits),"\n")}
stats.df <- t(data.frame(sqrt(x$stats$R2),x$stats$R2,2*x$stats$R2 -1))
cat("\nMeasures of factor score adequacy \n")
rownames(stats.df) <- c("Correlation of scores with factors ","Multiple R square of scores with factors ","Minimum correlation of factor score estimates")
print(round(stats.df,digits))
cat("\n Total, General and Subset omega for each subset\n")
colnames(x$omega.group) <- c("Omega total for total scores and subscales","Omega general for total scores and subscales ", "Omega group for total scores and subscales")
print(round(t(x$omega.group),digits))
if(!is.null(xx$ci)) {
cat("\n Estimates and bootstrapped confidence intervals\n")
li <- data.frame(lower=xx$ci$ci[,1],estimate=xx$ci$means,upper=xx$ci$ci[,2])
li[1,2] <- x$omega_h
li[2,2] <- x$alpha
li[3,2] <- x$omega.tot
li[4,2] <- x$G6
li[5,2] <- x$omega.lim
print(li,digits=digits)}
}
"print.psych.omegaSem" <-
function(x,digits=2,all=FALSE,cut=NULL,sort=FALSE,...) {
if(is.null(cut)) cut <- .2
cat( x$title,"\n")
if(!is.null(x$Call)) {
cat("Call: ")
print(x$Call)
print.psych.omega(x$omegaSem,digits=digits,all=all,cut=cut,sort=sort,...)
x <- x$omega.efa
}
loads <- x$cfa.loads
class(loads) <- NULL
nfactor <- ncol(loads)
cat("\n The following analyses were done using the ", x$sem," package \n")
if(nfactor > 1) { cat("\n Omega Hierarchical from a confirmatory model using sem = ", round(x$omega,digits)) } else {
cat("\n With only 1 factor specified in the sem model, we can only calculate omega Total.\n You should probably rerun the sem specifying either a bifactor or hierarchical model.\n") }
cat("\n Omega Total from a confirmatory model using sem = ", round(x$omega.tot,digits),"\n")
cat("With loadings of \n")
loads <- data.frame(loads)
if(nfactor > 1) {
tn <- c("g", paste0("F",1:(nfactor-1),"*"))
colnames(loads) <- tn }
load.2 <- as.matrix(loads)
h2 <- round(rowSums(load.2^2),digits)
loads <- round(loads,digits)
fx <- format(loads,digits=digits)
if(nfactor > 1 ) {
nc <- nchar(fx[1,3], type = "c")
fx[abs(loads)< cut] <- paste(rep(" ", nc), collapse = "")}
h2 <- round(rowSums(load.2^2),digits)
u2 <- 1 - h2
p2 <- loads[,1]^2/h2
mp2 <- mean(p2)
vp2 <- var(p2)
p2 <- round(p2,digits)
print(cbind(fx,h2,u2,p2),quote="FALSE")
loads <- as.matrix(load.2)
eigenvalues <- diag(t(loads) %*% loads)
cat("\nWith sum of squared loadings of:\n")
ev.rnd <- round(eigenvalues,digits)
print(ev.rnd,digits=digits)
maxmin <- max(eigenvalues[2:nfactor])/min(eigenvalues[2:nfactor])
gmax <- eigenvalues[1]/max(eigenvalues[2:nfactor])
ECV <- eigenvalues[1]/sum(eigenvalues)
if(!is.null(x$Fit)) {
cat("\nThe degrees of freedom of the confirmatory model are ",x$Fit[[1]]$df, " and the fit is ", x$Fit[[1]]$stat, " with p = ",x$Fit[[1]]$pvalue)
}
cat("\ngeneral/max " ,round(gmax,digits)," max/min = ",round(maxmin,digits))
cat("\nmean percent general = ",round(mp2,digits), " with sd = ", round(sqrt(vp2),digits), "and cv of ",round(sqrt(vp2)/mp2,digits),"\n")
cat("Explained Common Variance of the general factor = ", round(ECV,digits),"\n")
if(nfactor > 1) {
cat("\nMeasures of factor score adequacy \n")
fsa.df <- t(data.frame(sqrt(x$gR2),x$gR2,2*x$gR2 -1))
rownames(fsa.df) <- c("Correlation of scores with factors ","Multiple R square of scores with factors ","Minimum correlation of factor score estimates")
colnames(fsa.df) <- tn
print(round(fsa.df,digits))
cat("\n Total, General and Subset omega for each subset\n")
colnames(x$omega.group) <- c("Omega total for total scores and subscales","Omega general for total scores and subscales ", "Omega group for total scores and subscales")
rownames(x$omega.group) <- tn
print(round(t(x$omega.group),digits))
}
cat("\nTo get the standard sem fit statistics, ask for summary on the fitted object")
} |
knitr::opts_chunk$set(fig.width=6, fig.height=6)
set.seed(52523)
d <- data.frame(
wt = 100*rnorm(100),
stringsAsFactors = FALSE)
WVPlots::PlotDistCountNormal(d,'wt','example')
WVPlots::PlotDistDensityNormal(d,'wt','example')
set.seed(34903490)
x = rnorm(50)
y = 0.5*x^2 + 2*x + rnorm(length(x))
frm = data.frame(
x=x,
y=y,
yC=y>=as.numeric(quantile(y,probs=0.8)),
stringsAsFactors = FALSE)
frm$absY <- abs(frm$y)
frm$posY = frm$y > 0
WVPlots::ScatterHist(frm, "x", "y", smoothmethod="lm",
title="Example Linear Fit")
set.seed(34903490)
y = abs(rnorm(20)) + 0.1
x = abs(y + 0.5*rnorm(20))
frm = data.frame(
model=x,
value=y,
stringsAsFactors = FALSE)
frm$costs=1
frm$costs[1]=5
frm$rate = with(frm, value/costs)
frm$isValuable = (frm$value >= as.numeric(quantile(frm$value, probs=0.8)))
gainx = 0.10
labelfun = function(gx, gy) {
pctx = gx*100
pcty = gy*100
paste("The top ", pctx, "% most valuable points by the model\n",
"are ", pcty, "% of total actual value", sep='')
}
WVPlots::GainCurvePlotWithNotation(frm, "model", "value",
title="Example Gain Curve with annotation",
gainx=gainx,labelfun=labelfun)
set.seed(52523)
d = data.frame(
meas=rnorm(100),
stringsAsFactors = FALSE)
threshold = 1.5
WVPlots::ShadedDensity(d, "meas", threshold, tail="right",
title="Example shaded density plot, right tail")
set.seed(34903490)
frm = data.frame(
x=rnorm(50),
y=rnorm(50),
stringsAsFactors = FALSE)
frm$z <- frm$x+frm$y
WVPlots::ScatterHistN(frm, "x", "y", "z", title="Example Joint Distribution")
set.seed(34903490)
x = rnorm(50)
y = 0.5*x^2 + 2*x + rnorm(length(x))
frm = data.frame(
x = x,
yC = y>=as.numeric(quantile(y,probs=0.8)),
stringsAsFactors = FALSE)
WVPlots::ROCPlot(frm, "x", "yC", TRUE, title="Example ROC plot") |
context("Infix")
test_that("Infix operators for S3", {
Test <- function(.x) {
".+" <- function(e2) Test(getX() + e2)
".==" <- function(e2) getX() == e2
".>=" <- function(e2) getX() >= e2
".-" <- function(e2) {
if (missing(e2)) -.x else .x - e2
}
getX <- function() .x
retList(c("Test", "Infix"))
}
expect_equal(-Test(3), -3)
expect_equal(Test(3) - 2, 1)
expect_true(Test(2) + 2 == 4)
expect_true(Test(2) == 2)
expect_false(Test(3) == 2)
expect_true(Test(2) >= 1)
expect_true(Test(2) >= 2)
expect_false(Test(2) >= 3)
})
test_that("Infix selects next method.", {
Test <- function(.x) {
".+" <- function(e2) Test(getX() + e2)
".==" <- function(e2) getX() == e2
getX <- function() .x
retList(c("Test", "Infix"))
}
expect_true(Test(2) + 2 == 4)
expect_error(Test(2) - 2 == 4)
}) |
summary.pvol <- function(object, ...) {
print.pvol(object)
}
print.pvol <- function(x, digits = max(3L, getOption("digits") - 3L), ...) {
stopifnot(inherits(x, "pvol"))
cat(" Polar volume (class pvol)\n\n")
cat("
cat(" radar: ", x$radar, "\n")
cat(" source: ", x$attributes$what$source, "\n")
cat("nominal time: ", as.character(x$datetime), "\n\n")
}
is.pvol <- function(x) {
inherits(x, "pvol")
}
dim.pvol <- function(x) {
stopifnot(inherits(x, "pvol"))
c(length(x$scans))
} |
source("incl/start.R")
message("*** BatchtoolsFuture() and garbage collection ...")
plan(batchtools_local)
for (how in c("resolve", "value")) {
f <- future({ 1 })
if (how == "value") {
v <- value(f)
print(v)
} else if (how == "resolve") {
resolve(f)
}
stopifnot(resolved(f))
reg <- f$config$reg
rm(list = "f")
gc()
stopifnot(!file_test("-d", reg$file.dir))
fail <- try(checkIds(reg, ids = 1L), silent = TRUE)
stopifnot(inherits(fail, "try-error"))
}
message("*** BatchtoolsFuture() and garbage collection ... DONE")
source("incl/end.R") |
expected <- eval(parse(text="1:2"));
test(id=0, code={
argv <- eval(parse(text="list(TRUE, FALSE, c(FALSE, FALSE))"));
.Internal(`order`(argv[[1]], argv[[2]], argv[[3]]));
}, o=expected); |
make.thresholds.character <-
function(item.params,design.matrix="normal",...) {
return(make.thresholds(CQmodel(show=item.params),...))
} |
get_time_bounds_run <- function(times, nts) {
time_bnds <- array(NA, dim = c(2, length(times)-(nts-1)))
for (j in 1:(length(times)-(nts-1))) {
time_bnds[1, j] <- times[j]
time_bnds[2, j] <- times[j+(nts-1)]
}
return(time_bnds)
} |
build_hist_binning <- function(actual, predicted, bins=NULL){
decide_on_break <- function(predicted, breaks=15){
if (length(predicted)<=75){
breaks <- floor(length(predicted)/6)
}
if(all(predicted<=1) && all(predicted>=0)){
histogram <- hist(predicted, breaks=seq(0,1,(1/breaks)), plot=FALSE)
while(sum(histogram$counts>0 & histogram$counts<18) > 3 && !breaks <= 7){
breaks <- breaks-1
histogram <- hist(predicted, breaks=seq(0,1,(1/breaks)), plot=FALSE)
}}
else {
histogram <- hist(predicted, breaks=breaks, plot=FALSE)
while(sum(histogram$counts>0 & histogram$counts<18) > 3 && !breaks <= 7){
breaks <- breaks-1
histogram <- hist(predicted, breaks=breaks, plot=FALSE)
}}
return(suggested_break_number=breaks)
}
if (is.null(bins)){
n_bins <- decide_on_break(predicted)
}
else
n_bins <- bins
predicted_real <- data.frame()
predicted_real[1:length(predicted),1] <- predicted
predicted_real[,2] <- actual
predicted_real <- na.omit(predicted_real)
histogram <- hist(predicted_real[,1], breaks=seq(0,1,(1/n_bins)), plot=FALSE, include.lowest = T)
true_bin <- data.frame()
true_bin[c(1:length(histogram$count)),1] <- seq(1,length(histogram$count))
true_bin[,2] <- 0
true_bin[,3] <- 0
true_bin[,4] <- 0
colnames(predicted_real) <- c("ML score", "real score")
for(i in 1:nrow(predicted_real)){
for (j in 1:(length(histogram$breaks)-1)){
if (predicted_real[i,1]==histogram$breaks[1]){
true_bin[1,3] <- true_bin[1,3] + 1
true_bin[1,2] <- true_bin[1,2] + predicted_real[i,2]
break
}
if (histogram$breaks[j] < predicted_real[i,1] && predicted_real[i,1]<= histogram$breaks[j+1]){
true_bin[j,3] <- true_bin[j,3] + 1
true_bin[j,2] <- true_bin[j,2] + predicted_real[i,2]
break
}
}
}
true_bin[,4] <- true_bin[,2]/true_bin[,3]
true_bin[,4][is.na(true_bin[,4])] <- 0
p_values_binom <- unlist(apply(cbind(true_bin[,2], true_bin[,3]),1,binom_for_histogram))
true_bin[,5] <- p_values_binom
for (i in 1:nrow(true_bin)){
if(is.nan(true_bin[i,4])){
true_bin[i,6] <- "no value"
}
else if(true_bin[i,5]<0.001){
true_bin[i,6] <- "***"
}
else if(true_bin[i,5]<0.01){
true_bin[i,6] <- "**"
}
else if(true_bin[i,5]<0.05){
true_bin[i,6] <- "*"
}
else
true_bin[i,6] <- "ns"
}
colnames(true_bin) <- c("no bin", "true cases", "all", "prob_case", "p_value", "significance")
colnames(predicted_real) <- c("ML score", "real score")
min <- min(predicted)
max <- max(predicted)
calibration_points <- true_bin[,4]
calibration_points_sign <- true_bin[,5]<0.05
calibration_points_number <- length((true_bin[,4]))
calibration_points_number_sign <- length((subset(true_bin[,4], true_bin[,5]<0.05)))
calibration_range <- range(true_bin[,4])
if(sum(calibration_points_sign) != 0){
calibration_range_sign <- range(true_bin[,4][true_bin[,5]<0.05])
}
else{
calibration_range_sign <- 0
}
return(list(type="hist", histogram=histogram,probs_per_bin=true_bin[,4],
binnning_scheme=true_bin, min=min, max=max,
calibration_points=list(calibration_points=calibration_points,calibration_points_sign=calibration_points_sign),
calibration_range=list(calibration_range=calibration_range, calibration_range_sign=calibration_range_sign),
calibration_points_number=list(calibration_points_number=calibration_points_number, calibration_points_number_sign=calibration_points_number_sign)))
} |
prov.summarize <- function (save=FALSE, create.zip=FALSE) {
tool <- get.tool()
if (tool == "rdtLite") {
prov.json <- rdtLite::prov.json
} else {
prov.json <- rdt::prov.json
}
prov <- provParseR::prov.parse(prov.json(), isFile = FALSE)
summarize.prov (prov, save, create.zip)
}
prov.summarize.file <- function (prov.file, save=FALSE, create.zip=FALSE) {
if (!file.exists(prov.file)) {
cat("Provenance file not found.\n")
return()
}
prov <- provParseR::prov.parse(prov.file)
summarize.prov (prov, save, create.zip)
}
prov.summarize.run <- function(r.script, save=FALSE, create.zip=FALSE, ...) {
tool <- get.tool()
if (tool == "rdtLite") {
prov.run <- rdtLite::prov.run
prov.json <- rdtLite::prov.json
} else {
prov.run <- rdt::prov.run
prov.json <- rdt:: prov.json
}
tryCatch (prov.run(r.script, ...), error = function(x) {print (x)})
prov <- provParseR::prov.parse(prov.json(), isFile=FALSE)
summarize.prov (prov, save, create.zip)
}
summarize.prov <- function (prov, save, create.zip) {
environment <- provParseR::get.environment(prov)
if (save) {
save.to.text.file(prov, environment)
}
else {
generate.summaries(prov, environment)
}
if (create.zip) {
save.to.zip.file (environment)
}
}
get.tool <- function () {
loaded <- loadedNamespaces()
if ("rdtLite" %in% loaded) {
return("rdtLite")
}
if ("rdt" %in% loaded) {
return("rdt")
}
installed <- utils::installed.packages ()
if ("rdtLite" %in% installed) {
return("rdtLite")
}
if ("rdt" %in% installed) {
return("rdt")
}
stop ("One of rdtLite or rdt must be installed.")
}
save.to.text.file <- function(prov, environment) {
prov.path <- environment[environment$label == "provDirectory", ]$value
prov.file <- paste(prov.path, "/prov-summary.txt", sep="")
sink(prov.file, split=TRUE)
generate.summaries(prov, environment)
sink()
cat(paste("Saving provenance summmary in", prov.file))
}
generate.summaries <- function(prov, environment) {
script.path <- environment[environment$label == "script", ]$value
script.file <- sub(".*/", "", script.path)
generate.environment.summary (environment, provParseR::get.tool.info(prov), script.file)
generate.library.summary (provParseR::get.libs(prov))
if (script.file != "") {
generate.script.summary (provParseR::get.scripts(prov))
}
generate.preexisting.summary(provParseR::get.preexisting(prov))
generate.file.summary ("INPUTS:", provParseR::get.input.files(prov), prov)
generate.file.summary ("OUTPUTS:", provParseR::get.output.files(prov), prov)
if (script.file != "") {
generate.stdout.summary (prov)
generate.error.summary (prov)
}
}
generate.environment.summary <- function (environment, tool.info, script.file) {
if (script.file != "") {
cat (paste ("PROVENANCE SUMMARY for", script.file, "\n\n"))
} else {
cat (paste ("PROVENANCE SUMMARY for Console Session\n\n"))
}
cat (paste ("ENVIRONMENT:\n"))
cat (paste ("Executed at", environment[environment$label == "provTimestamp", ]$value, "\n"))
cat (paste ("Total execution time is", environment[environment$label == "totalElapsedTime", ]$value, "seconds\n"))
if (script.file != "") {
cat (paste ("Script last modified at", environment[environment$label == "scriptTimeStamp", ]$value, "\n"))
}
cat (paste ("Executed with", environment[environment$label == "langVersion", ]$value, "\n"))
cat (paste ("Executed on", environment[environment$label == "architecture", ]$value,
"running", environment[environment$label == "operatingSystem", ]$value, "\n"))
cat (paste ("Provenance was collected with", tool.info$tool.name, tool.info$tool.version, "\n"))
cat (paste ("Provenance is stored in", environment[environment$label == "provDirectory", ]$value, "\n"))
cat (paste ("Hash algorithm is", environment[environment$label == "hashAlgorithm", ]$value, "\n" ))
cat ("\n")
}
generate.library.summary <- function (libs) {
cat ("LIBRARIES:\n")
cat (paste (libs$name, libs$version, collapse="\n"))
cat ("\n\n")
}
generate.script.summary <- function (scripts) {
cat (paste ("SOURCED SCRIPTS:\n"))
if (nrow(scripts) > 1) {
script.info <- dplyr::select(scripts[2:nrow(scripts), ], "script", "timestamp")
for (i in 1:nrow(script.info)) {
cat(script.info[i, "script"], "\n")
cat(" ", script.info[i, "timestamp"], "\n")
}
} else {
cat("None\n")
}
cat ("\n")
}
generate.preexisting.summary <- function(vars) {
cat (paste ("PRE-EXISTING:\n"))
if (is.null(vars) || nrow(vars) == 0) {
cat("None\n")
} else {
for (i in 1:nrow(vars)) {
cat(vars[i, "name"], "\n")
}
}
cat("\n")
}
generate.file.summary <- function (direction, files, prov) {
cat(direction, "\n")
if (nrow(files) == 0) {
cat ("None\n")
}
else {
file.info <- dplyr::select(files, "type", "name", "value", "location", "hash", "timestamp")
tool.info <- provParseR::get.tool.info(prov)
tool <- tool.info$tool.name
version <- tool.info$tool.version
if (tool == "rdtLite" && utils::compareVersion (version, "1.0.3") < 0) {
use.original.timestamp <- TRUE
}
else if (tool == "rdt" && utils::compareVersion (version, "3.0.3") < 0) {
use.original.timestamp <- TRUE
}
else {
use.original.timestamp <- FALSE
}
if (use.original.timestamp) {
file.info$filetime <- as.character (file.mtime(file.info$location))
}
else {
environment <- provParseR::get.environment(prov)
prov.dir <- environment[environment$label == "provDirectory", ]$value
file.info$filetime <- as.character (file.mtime(paste0 (prov.dir, "/", file.info$value)))
}
for (i in 1:nrow(file.info)) {
cat(file.info[i, "type"], ": ")
if (file.info[i, "type"] == "File") {
cat(file.info[i, "location"], "\n")
}
else {
cat(file.info[i, "name"], "\n")
}
if (is.na (file.info[i, "filetime"])) {
if (file.info[i, "timestamp"] != "") {
cat(" ", file.info[i, "timestamp"], "\n")
}
}
else {
cat(" ", file.info[i, "filetime"], "\n")
}
if (file.info[i, "hash"] != "") cat(" ", file.info[i, "hash"], "\n")
}
}
cat("\n")
}
generate.stdout.summary <- function (prov) {
stdout.nodes <- provParseR::get.stdout.nodes(prov)
generate.message.summary(prov, stdout.nodes, "CONSOLE")
}
generate.error.summary <- function (prov) {
error.nodes <- provParseR::get.error.nodes(prov)
generate.message.summary(prov, error.nodes, "ERRORS")
}
generate.message.summary <- function (prov, output.nodes, msg) {
cat (msg, ":\n", sep="")
if (nrow(output.nodes) == 0) {
cat ("None\n\n")
return()
}
proc.data.edges <- provParseR::get.proc.data(prov)
proc.nodes <- provParseR::get.proc.nodes(prov)
output.report <- merge (output.nodes, proc.data.edges, by.x="id", by.y="entity")
output.report <- merge (output.report, proc.nodes, by.x="activity", by.y="id")
scripts <- provParseR::get.scripts(prov)
scripts <- sub (".*/", "", scripts$script)
for (i in 1:nrow(output.nodes)) {
script.name <- scripts[output.report[i, "scriptNum"]]
if (!is.na (script.name)) {
cat ("In", scripts[output.report[i, "scriptNum"]])
if (is.na (output.report[i, "startLine"])) {
cat (" on line:\n")
cat (" ", output.report[i, "name"], "\n")
}
else if (output.report[i, "startLine"] == output.report[i, "endLine"] ||
is.na (output.report[i, "endLine"])){
cat (" on line ", output.report[i, "startLine"], ":\n")
}
else {
cat (" on lines ", output.report[i, "startLine"], " to ", output.report[i, "endLine"], ":\n")
}
}
cat (" ", output.report[i, "value"], "\n")
}
cat("\n")
}
save.to.zip.file <- function (environment) {
cur.dir <- getwd()
prov.path <- environment[environment$label == "provDirectory", ]$value
setwd(prov.path)
prov.dir <- sub (".*/", "", prov.path)
zipfile <- paste0 (prov.dir, "_",
environment[environment$label == "provTimestamp", ]$value, ".zip")
zippath <- paste0 (cur.dir, "/", zipfile)
if (file.exists (zippath)) {
warning (zippath, " already exists.")
}
else {
zip.program <- Sys.getenv("R_ZIPCMD", "zip")
if (.Platform$OS.type == "windows" && endsWith (zip.program, "7z.exe")) {
zip.result <- utils::zip (zippath, ".", flags="a", extras="-r -x!debug")
}
else {
zip.result <- utils::zip (zippath, ".", flags="-r", extras="-x debug/")
}
if (zip.result == 0) {
cat (paste ("Provenance saved in", zipfile))
}
else if (zip.result == 127) {
warning ("Unable to create a zip file. Please check that you have a zip program, such as 7-zip, on your path, and have the R_ZIPCMD environment variable set.")
}
else if (zip.result == 124) {
warning ("Unable to create a zip file. The zip program timed out.")
}
else {
warning ("Unable to create a zip file. The zip program ", zip.program, " returned error ", zip.result)
}
}
setwd(cur.dir)
} |
na.skip <- function (x, FUN=NULL, ...)
{
nx <- na.omit(x)
fx <- FUN(nx, ... = ...)
if (is.vector(fx)) {
result <- .xts(fx, .index(x), .indexCLASS = indexClass(x))
}
else {
result <- merge(fx, .xts(, .index(x)))
}
return(result)
} |
library(FDboost)
library(fda)
data("CanadianWeather", package = "fda")
dataM <- with(CanadianWeather,
list( temp = t(monthlyTemp),
l10precip = t(log10(monthlyPrecip)),
lat = coordinates[,"N.latitude"],
lon = coordinates[,"W.longitude"],
region = factor(region),
place = factor(place)
))
dataM$lon["Pr. George"] <- 122.75
dataM$lat["Pr. George"] <- 53.9
dataM$tempRaw <- dataM$temp
dataM$temp <- sweep(dataM$temp, 2, colMeans(dataM$temp))
dataM$month.t <- 1:12
dataM$month.s <- 1:12
par(mfrow=c(1,2))
with(dataM, {
matplot(t(l10precip), type = "l", lty = as.numeric(region),
col = as.numeric(region),
xlab = "", ylab = "",
ylim = c(-1.2, 1))
legend("bottom", col = 1:4, lty = 1:4, legend = levels(region),
cex = 1, bty = "n")
})
mtext("time [month]", 1, line = 2)
mtext("log(precipitation) [mm]", 2, line = 2)
with(dataM, {
matplot(t(tempRaw), type = "l", lty = as.numeric(region),
col = as.numeric(region),
xlab = "", ylab = "")
})
mtext("time [month]", 1, line = 2)
mtext("temperature [C]", 2, line = 2)
locations <- cbind(dataM$lon, dataM$lat)
rownames(locations) <- as.character(dataM$place)
library(fields)
dist <- rdist.earth(locations, miles = FALSE, R = 6371)
r.5 <- Matern.cor.to.range(500, nu = 0.5, cor.target = .2)
r1 <- Matern.cor.to.range(1500, nu = 1.0, cor.target = .2)
r10 <- Matern.cor.to.range(3000, nu = 10.0, cor.target = .2)
corr_nu.5 <- apply(dist, 1, Matern, nu = .5, range = r.5)
corr_nu1 <- apply(dist, 1, Matern, nu = 1, range = r1)
corr_nu10 <- apply(dist, 1, Matern, nu = 10, range = r10)
P_nu.5 <- solve(corr_nu.5)
P_nu1 <- solve(corr_nu1)
if(FALSE){
curve(Matern(x, nu = .5, range = r.5), 0, 5000, ylab = "Correlation(d)",
xlab = "d [km]", lty = 2)
curve(Matern(x, nu = 1, range = r1), 0, 5000, add = TRUE, lty = 3)
curve(Matern(x, nu = 10, range = r10), 0, 5000, add = TRUE, lty = 4)
legend("topright", inset = 0.2, lty = c(2, NA, 3, NA, 4),
legend = c(".5", "", "1", "", "10"),
title = expression(nu), cex = .8, bty = "n")
}
set.seed(210114)
mod3 <- FDboost(l10precip ~ bols(region, df = 2.5, contrasts.arg = "contr.dummy")
+ bsignal(temp, month.s, knots = 11, cyclic = TRUE,
df = 2.5, boundary.knots = c(0.5, 12.5), check.ident = FALSE)
+ bolsc(place, df = 2.5, K = P_nu.5, contrasts.arg = "contr.dummy"),
timeformula = ~ bbs(month.t, knots = 11, cyclic = TRUE,
df=3, boundary.knots = c(0.5, 12.5)),
offset="scalar", offset_control = o_control(k_min = 5),
data=dataM)
mod3 <- mod3[47]
par(mfrow=c(1,2))
predRegion <- predict(mod3, which = 1,
newdata = list(region = factor(c("Arctic", "Atlantic",
"Continental", "Pacific")),
month.t = seq(1, 12, l=20))) + mod3$offset
matplot(seq(1, 12, l = 20), t(predRegion), col = 1:4,
type = "l", lwd = 2, lty = 1:4,
main = "region", ylab = "", xlab = "")
mtext("t, time [month]", 1, line = 2)
legend("bottom", lty = 1:4, legend = levels(dataM$region), col = 1:4, bty = "n", lwd = 2)
plot(mod3, which = 2, pers = TRUE, main = "temperature", zlab = "",
xlab = "s, time [month]", ylab = "t, time [month]")
mod3 <- mod3[750]
par(mfrow=c(1,2))
predRegion <- predict(mod3, which=1,
newdata = list(region = factor(c("Arctic", "Atlantic",
"Continental", "Pacific")),
month.t=seq(1, 12, l = 20))) + mod3$offset
matplot(seq(1, 12, l=20), t(predRegion), col = 1:4,
type = "l", lwd = 2, lty = 1:4,
main = "region", ylab = "", xlab = "")
mtext("t, time [month]", 1, line = 2)
legend("bottom", lty = 1:4, legend = levels(dataM$region), col = 1:4, bty = "n", lwd = 2)
plot(mod3, which = 2, pers = TRUE, main = "temperature", zlab = "",
xlab = "s, time [month]", ylab = "t, time [month]")
ord <- c("Dawson", "Whitehorse", "Yellowknife", "Uranium City", "Churchill",
"Edmonton", "Pr. Albert", "The Pas", "Calgary", "Regina", "Winnipeg",
"Thunder Bay",
"Pr. George", "Pr. Rupert", "Kamloops", "Vancouver", "Victoria",
"Scheffervll", "Bagottville", "Arvida", "St. Johns", "Quebec",
"Fredericton", "Sydney", "Ottawa", "Montreal", "Sherbrooke", "Halifax",
"Yarmouth", "Toronto", "London", "Charlottvl",
"Inuvik", "Resolute", "Iqaluit" )
ind <- sapply(1:35, function(s){ which(dataM$place == ord[s]) })
smoothRes <- predict(mod3, which=3)
if( is.null(dim(smoothRes)) ) smoothRes <- matrix(0, ncol = 12, nrow = 35)
smoothRes <- (smoothRes )[ind, ]
regionOrd <- dataM$region[ind]
fit3 <- (predict(mod3))[ind, ]
response <- dataM$l10precip[ind, ]
par(mar = c(2.55, 2.05, 2.05, 1.05), oma=c(0, 0, 0, 0))
layout(rbind(matrix(1:36, 6, 6), rep(37, 6), rep(37, 6)))
for(i in 1:35) {
plot(1:12, smoothRes[i, ], col = as.numeric(regionOrd[i]), type = "l",
ylim = range(smoothRes, response-fit3),
main = paste(ord[i], " (", i, ")", sep = ""),
cex = 1.2, cex.axis = .8, ylab = "", xlab = "")
abline(h = 0, col = 8)
lines(1:12, smoothRes[i, ], col = as.numeric(regionOrd[i]))
points(1:12, response[i, ] - fit3[i, ], cex = 0.8)
}
plot(0, 0, col = "white", xaxt = "n", yaxt = "n", bty = "n")
if(require(maps) & require(mapdata)){
mapcanada <- map(database="world", regions="can", plot=FALSE)
plot(mapcanada, type = "l", xaxt = "n", yaxt = "n", ylab = "", xlab = "", bty = "n",
xlim = c(-141, -50), ylim=c(43, 74),
col = "grey", mar = c(0, 0, 0, 0))
for(i in 1:35) {
text(-dataM$lon[ind[i]], dataM$lat[ind[i]], col = as.numeric(regionOrd[i]),
labels = as.character(i), cex = 0.8)
}
} |
.separate_img = function(img,
levels = NULL,
drop_zero = TRUE){
if (is.null(levels)) {
levels = unique(c(img))
} else {
levels = unique(levels)
}
if (drop_zero) {
levels = setdiff(levels, 0)
}
if (length(levels) == 0) {
stop("No non-zero values in the levels this image!")
}
levels = sort(levels)
res = lapply(levels, function(x) {
img == x
})
names(res) = levels
return(res)
}
setGeneric("separate_img", function(img,
levels = NULL,
drop_zero = TRUE) standardGeneric("separate_img"))
setMethod("separate_img", "nifti", function(img, levels = NULL,
drop_zero = TRUE) {
res = .separate_img(img = img,
levels = levels,
drop_zero = drop_zero)
return(res)
})
setMethod("separate_img", "array", function(img, levels = NULL,
drop_zero = TRUE) {
res = .separate_img(img = img,
levels = levels,
drop_zero = drop_zero)
return(res)
})
setMethod("separate_img", "ANY", function(img, levels = NULL,
drop_zero = TRUE) {
if (inherits(img, "niftiImage")) {
res = .separate_img(img = img,
levels = levels,
drop_zero = drop_zero)
res = lapply(res, function(x) {
RNifti::updateNifti(x, template = img)
})
return(res)
} else {
stop("Not implemented for this type!")
}
return(img)
})
setMethod("separate_img", "factor", function(img,
levels = NULL,
drop_zero = TRUE) {
img = as.character(img)
img = separate_img(img,
levels = levels,
drop_zero = drop_zero)
return(img)
})
setMethod("separate_img", "character", function(img,
levels = NULL,
drop_zero = TRUE) {
img = check_nifti(img)
img = separate_img(img,
levels = levels,
drop_zero = drop_zero)
return(img)
})
setMethod("separate_img", "list", function(img, levels = NULL,
drop_zero = TRUE) {
img = lapply(img, separate_img,
levels = levels,
drop_zero = drop_zero
)
return(img)
}) |
censRegLogLikPanel <- function( beta, yMat, xArr, left, right, nInd, nTime,
obsBelow, obsBetween, obsAbove, nGHQ = nGHQ, ghqPoints ) {
yMatHat <- matrix( matrix( xArr, ncol = dim( xArr )[3] ) %*%
beta[ 1:( length( beta ) - 2 ) ], nrow = nInd, ncol = nTime )
sigmaMu <- exp( beta[ length( beta ) - 1 ] )
sigmaNu <- exp( beta[ length( beta ) ] )
logLikIndMat <- matrix( NA, nrow = nInd, ncol = nGHQ )
grad1LogIndArr <- array( NA, c( nInd, length( beta ), nGHQ ) )
grad2IndArr <- array( NA, c( nInd, length( beta ), nGHQ ) )
for( h in 1:nGHQ ) {
likGhqInner <- matrix( NA, nrow = nInd, ncol = nTime )
likGhqInner[ obsBelow ] <-
( left - yMatHat[ obsBelow ] - sqrt( 2 ) * sigmaMu *
ghqPoints$zeros[ h ] ) / sigmaNu
likGhqInner[ obsAbove ] <-
( yMatHat[ obsAbove ] - right + sqrt( 2 ) * sigmaMu *
ghqPoints$zeros[ h ] ) / sigmaNu
likGhqInner[ obsBetween ] <-
( yMat[ obsBetween ] - yMatHat[ obsBetween ] -
sqrt( 2 ) * sigmaMu * ghqPoints$zeros[ h ] ) / sigmaNu
logLikGhq <- matrix( 0, nrow = nInd, ncol = nTime )
logLikGhq[ obsBelow | obsAbove ] <-
pnorm( likGhqInner[ obsBelow | obsAbove ], log.p = TRUE )
logLikGhq[ obsBetween ] <-
dnorm( likGhqInner[ obsBetween ], log = TRUE ) - log( sigmaNu )
logLikGhqSum <- apply( logLikGhq, 1, sum )
logLikIndMat[ , h ] <- log( ghqPoints$weights[ h ] ) + logLikGhqSum
gradPartGhqLog <- matrix( 0, nrow = nInd, ncol = nTime )
gradPartGhqSign <- gradPartGhqLog
grad2PartGhq <- gradPartGhqLog
gradPartGhqSign[ obsBelow ] <- -1
gradPartGhqLog[ obsBelow ] <-
dnorm( likGhqInner[ obsBelow ], log = TRUE ) - log( sigmaNu )
gradPartGhqSign[ obsAbove ] <- 1
gradPartGhqLog[ obsAbove ] <-
dnorm( likGhqInner[ obsAbove ], log = TRUE ) - log( sigmaNu )
gradPartGhqSign[ obsBetween ] <- sign( likGhqInner[ obsBetween ] )
gradPartGhqLog[ obsBetween ] <-
dnorm( likGhqInner[ obsBetween ], log = TRUE ) +
log( abs( likGhqInner[ obsBetween ] ) ) - 2 * log(sigmaNu)
for( i in 1:( length( beta ) - 2 ) ) {
grad1LogIndArr[ , i, h ] <- log( ghqPoints$weights[ h ] ) +
logLikGhqSum
grad2IndArr[ , i, h ] <-
rowSums( exp( gradPartGhqLog - logLikGhq ) * gradPartGhqSign *
xArr[ , , i ], na.rm = TRUE )
}
grad1LogIndArr[ , length( beta ) - 1, h ] <-
log( sigmaMu ) + log( ghqPoints$weights[ h ] ) + logLikGhqSum
grad2IndArr[ , length( beta ) - 1, h ] <-
rowSums( exp( gradPartGhqLog - logLikGhq ) * gradPartGhqSign *
sqrt( 2 ) * ghqPoints$zeros[ h ] )
grad2PartGhq[ obsBelow ] <-
exp( gradPartGhqLog[ obsBelow ] - logLikGhq[ obsBelow ] ) *
gradPartGhqSign[ obsBelow ] * likGhqInner[ obsBelow ]
grad2PartGhq[ obsAbove ] <-
- exp( gradPartGhqLog[ obsAbove ] - logLikGhq[ obsAbove ] ) *
gradPartGhqSign[ obsAbove ] * likGhqInner[ obsAbove ]
grad2PartGhq[ obsBetween ] <-
exp( gradPartGhqLog[ obsBetween ] - logLikGhq[ obsBetween ] ) *
gradPartGhqSign[ obsBetween ] * likGhqInner[ obsBetween ] - 1 / sigmaNu
grad1LogIndArr[ , length( beta ), h ] <-
log( sigmaNu ) + log( ghqPoints$weights[ h ] ) + logLikGhqSum
grad2IndArr[ , length( beta ), h ] <-
rowSums( grad2PartGhq )
}
logLikInd <- rep( NA, nInd )
gradInd <- matrix( NA, nrow = nInd, ncol = length( beta ) )
for( i in 1:nInd ) {
val <- logLikIndMat[ i, ]
logLikInd[ i ] <- log( sum( exp( val - max( val ) ) ) ) + max( val )
for( j in 1:length( beta ) ) {
gradInd[ i, j ] <- sum(
exp( grad1LogIndArr[ i, j, ] - logLikInd[ i ] ) *
grad2IndArr[ i, j, ] )
}
}
ll <- logLikInd - 0.5 * log( pi )
attr( ll, "gradient" ) <- gradInd
return( ll )
} |
structure(list(url = "/api/datasets/5b6c9f/actions/", status_code = 202L,
headers = structure(list(date = "Thu, 25 Mar 2021 15:23:51 GMT",
`content-type` = "application/json;charset=utf-8", `content-length` = "179",
server = "nginx", `content-encoding` = "gzip", vary = "Cookie, Accept-Encoding",
allow = "GET, HEAD, OPTIONS, POST", `x-timing` = "",
`x-xss-protection` = "1; mode=block", `strict-transport-security` = "max-age=31536000; includeSubDomains",
`x-content-type-options` = "nosniff"), class = c("insensitive",
"list")), all_headers = list(list(status = 202L, version = "HTTP/2",
headers = structure(list(date = "Thu, 25 Mar 2021 15:23:51 GMT",
`content-type` = "application/json;charset=utf-8",
`content-length` = "179", server = "nginx", `content-encoding` = "gzip",
vary = "Cookie, Accept-Encoding", allow = "GET, HEAD, OPTIONS, POST",
`x-timing` = "", `x-xss-protection` = "1; mode=block",
`strict-transport-security` = "max-age=31536000; includeSubDomains",
`x-content-type-options` = "nosniff"), class = c("insensitive",
"list")))), cookies = structure(list(domain = logical(0),
flag = logical(0), path = logical(0), secure = logical(0),
expiration = structure(numeric(0), class = c("POSIXct",
"POSIXt")), name = logical(0), value = logical(0)), row.names = integer(0), class = "data.frame"),
content = charToRaw("{\"element\": \"shoji:view\", \"self\": \"/api/datasets/5b6c9f/actions/\", \"value\": \"/api/progress/\"}"),
date = structure(1616685831, class = c("POSIXct", "POSIXt"
), tzone = "GMT"), times = c(redirect = 0, namelookup = 2.9e-05,
connect = 3.1e-05, pretransfer = 8.8e-05, starttransfer = 9.2e-05,
total = 0.810913)), class = "response") |
SimShiny <- function(filename = NULL, dir = getwd(), design, ...){
if(!is.null(filename)) on.exit(sink())
UI_CONDITION <- function(condition, name){
cond <- unique(condition)
is_numeric <- is.numeric(cond)
cat(sprintf(' selectInput("%s", "Select %s:",\n', name, name))
cat(sprintf(' choices = c(%s)),\n\n',
if(is_numeric) paste0(cond, collapse = ',')
else paste0('"', cond, '"', collapse = ',')))
invisible(NULL)
}
if(!is.null(filename)){
cat(sprintf('Writing SimShiny components to file \"%s\" in \n directory \"%s\"',
filename, dir))
sink(filename)
}
nms <- names(design)
dots <- list(...)
inputs <- cbind(names(dots), '=', names(dots))
pick <- inputs[!(inputs[,1] %in% c('generate', 'analyse', 'summarise')), 1L]
if(length(pick)){
for(i in 1:length(pick))
inputs[which(pick[i] == inputs[,1]), 3] <- as.character(dots[pick[i]][[1L]])
}
inputs <- paste0(apply(inputs, 1, paste0, collapse=''), collapse=', \n ')
design_is_numeric <- sapply(design, is.numeric)
Functions <- dots[c('generate', 'analyse', 'summarise')]
for(i in seq_len(3L)){
if(!is.null(Functions[[i]])){
output <- capture.output(print(Functions[[i]]))
if(grepl('<bytecode: ', output[length(output)]))
output <- output[-length(output)]
output <- paste0(output, '\n', collapse='')
cat(sprintf('%s <- %s\n', names(Functions[i]), output))
}
}
cat('
cat('library(SimDesign)\n')
cat('library(shiny)\n\n')
cat(sprintf('design_is_numeric <- c(%s)\n\n',
paste0(design_is_numeric, collapse=',')))
cat('ui <- fluidPage(\n\n')
cat(' titlePanel("Simulation"),\n\n')
cat(' sidebarLayout(\n')
cat(' sidebarPanel(\n')
cat(' numericInput("reps", "Number of replications:", 0),\n\n')
for(i in seq_len(ncol(design)))
UI_CONDITION(design[,i], name=nms[i])
cat(' submitButton("Run Simulation")\n')
cat(' ),\n\n')
cat(' mainPanel(\n')
cat(' tableOutput("results")\n')
cat(' )\n')
cat(' )\n')
cat(')\n')
cat('\nserver <- function(input, output) {\n\n')
cat(' Design <- reactive({\n')
cat(sprintf(' df <- data.frame(%s, stringsAsFactors = FALSE)\n',
paste0('input$', nms, collapse=',\n ')))
cat(sprintf(' names(df) <- c(%s)\n', paste0('"', nms, '"', collapse=',\n ')))
cat(' for(i in seq_len(length(df))) if(design_is_numeric[i]) df[,i] <- as.numeric(df[,i])\n')
cat(' df\n')
cat(' })\n\n')
cat(' output$results <- renderTable({\n')
cat(' reps <- input$reps\n')
cat(' if(reps > 0){\n')
cat(' res <- runSimulation(design=Design(), replications=reps,\n')
cat(sprintf(' %s)\n', inputs))
cat(' res <- res[,(ncol(Design())+1):ncol(res)]\n')
cat(' res$REPLICATIONS <- NULL\n')
cat(' res$SEED <- NULL\n')
cat(' return(res)\n')
cat(' } else return(NULL)\n')
cat(' }, digits = 3)\n')
cat('}\n\n')
cat('shinyApp(ui=ui, server=server)\n\n')
invisible(NULL)
} |
stri_datetime_format <- function(
time, format = "uuuu-MM-dd HH:mm:ss", tz = NULL, locale = NULL
) {
.Call(C_stri_datetime_format, time, format, tz, locale)
}
stri_datetime_parse <- function(
str, format = "uuuu-MM-dd HH:mm:ss",
lenient = FALSE, tz = NULL, locale = NULL
) {
.Call(C_stri_datetime_parse, str, format, lenient, tz, locale)
}
stri_datetime_fstr <- function(x, ignore_special=TRUE)
{
x <- .Call(C_stri_datetime_fstr, x)
ignore_special <- (is.logical(ignore_special) && length(ignore_special) == 1L && !is.na(ignore_special) && ignore_special)
if (length(x) > 0 && !ignore_special) {
formats <- outer(
c("date", "time", "datetime", "date_relative", "datetime_relative"),
c("full", "long", "medium", "short"),
stri_paste,
sep="_"
)
which_p <- match(x, stringi::stri_sprintf("'%s'", formats))
x[which(!is.na(which_p))] <- formats[which_p[!is.na(which_p)]]
}
x
} |
NULL
makeTuneMultiCritResult = function(learner, ind, x, y, resampling, control, opt.path, measures, ...) {
or = makeOptResult(learner, control, x, y, resampling, NULL, opt.path, "TuneMultiCritResult", ...)
or$ind = ind
or$measures = measures
return(or)
}
makeTuneMultiCritResultFromOptPath = function(learner, par.set, measures, resampling, control, opt.path) {
j = getOptPathParetoFront(opt.path, index = TRUE)
els = lapply(j, getOptPathEl, op = opt.path)
xs = extractSubList(els, "x", simplify = FALSE)
xs = lapply(xs, trafoValue, par = par.set)
xs = lapply(xs, removeMissingValues)
ys = extractSubList(els, "y", simplify = "rows")
colnames(ys) = opt.path$y.names
makeTuneMultiCritResult(learner, j, xs, ys, resampling, control, opt.path, measures)
}
print.TuneMultiCritResult = function(x, ...) {
catf("Tune multicrit result:")
catf("Points on front: %i", length(x$x))
} |
context('ESF_p151')
s0=c(rep(4,8),rep(16,8),rep(64,8),rep(256,8))
n0.tmp=rep(c(2^2,2^4,2^8,2^12),8)
n0=s0*n0.tmp
e0.tmp=rep(c(rep(2^2,4),rep(2^10,4)),4)
e0=n0*e0.tmp |
timePlot <- function(mydata, pollutant = "nox", group = FALSE, stack = FALSE,
normalise = NULL, avg.time = "default", data.thresh = 0,
statistic = "mean", percentile = NA, date.pad = FALSE,
type = "default", cols = "brewer1", plot.type = "l",
key = TRUE, log = FALSE, windflow = NULL, smooth = FALSE, ci = TRUE,
y.relation = "same", ref.x = NULL, ref.y = NULL,
key.columns = 1, key.position = "bottom",
name.pol = pollutant, date.breaks = 7,
date.format = NULL, auto.text = TRUE, ...) {
variable <- year <- NULL
if (log) nlog <- 10 else nlog <- FALSE
vars <- c("date", pollutant)
if (length(cols) == 1 && cols == "greyscale") {
trellis.par.set(list(strip.background = list(col = "white")))
}
current.strip <- trellis.par.get("strip.background")
current.font <- trellis.par.get("fontsize")
on.exit(trellis.par.set(
fontsize = current.font
))
Args <- list(...)
Args$xlab <- if ("xlab" %in% names(Args)) {
quickText(Args$xlab, auto.text)
} else {
quickText("", auto.text)
}
Args$ylab <- if ("ylab" %in% names(Args)) {
quickText(Args$ylab, auto.text)
} else {
NULL
}
if ("main" %in% names(Args)) {
if (!is.list(Args$main)) Args$main <- quickText(Args$main, auto.text)
}
if ("fontsize" %in% names(Args)) {
trellis.par.set(fontsize = list(text = Args$fontsize))
}
xlim <- if ("xlim" %in% names(Args)) {
Args$xlim
} else {
NULL
}
if (!"pch" %in% names(Args)) {
Args$pch <- NA
}
if (!"lwd" %in% names(Args)) {
Args$lwd <- 1
}
if (!"lty" %in% names(Args)) {
Args$lty <- NULL
}
if (!"layout" %in% names(Args)) {
Args$layout <- NULL
}
strip <- if ("strip" %in% names(Args)) {
Args$strip
} else {
TRUE
}
len.all <- length(mydata$date)
len.unique <- length(unique(mydata$date))
if (type == "default" & "site" %in% names(mydata) & len.all != len.unique) {
if (length(unique(factor(mydata$site))) > 1) stop("More than one site has been detected: choose type = 'site' and pollutant(s)")
}
if (length(percentile) > 1 & length(pollutant) > 1) {
stop("Only one pollutant allowed when considering more than one percentile")
}
if (!missing(statistic) & missing(avg.time)) {
message("No averaging time applied, using avg.time ='month'")
avg.time <- "month"
}
if (!is.null(windflow)) {
vars <- unique(c(vars, "wd", "ws"))
}
mydata <- checkPrep(mydata, vars, type, remove.calm = FALSE)
theStrip <- strip
if (date.pad) mydata <- date.pad(mydata, type = type)
mydata <- cutData(mydata, type, ...)
if (avg.time != "default") {
if (length(percentile) > 1) {
mydata <- group_by(mydata, UQS(syms(type))) %>%
do(calcPercentile(
.,
pollutant = pollutant, avg.time = avg.time,
data.thresh = data.thresh,
percentile = percentile
))
pollutant <- paste("percentile.", percentile, sep = "")
if (missing(group)) group <- TRUE
} else {
mydata <- timeAverage(
mydata,
pollutant = pollutant,
type = type, statistic = statistic,
avg.time = avg.time,
data.thresh = data.thresh,
percentile = percentile
)
}
}
if (type == "default") mydata$default <- "default"
if (!is.null(windflow)) {
mydata <- gather(mydata, key = variable, value = value, UQS(syms(pollutant)))
} else {
mydata <- gather(mydata, key = variable, value = value, UQS(syms(pollutant)))
}
if (type != "default") group <- TRUE
npol <- length(unique(mydata$variable))
if (is.null(Args$layout) & !group & !stack) Args$layout <- c(1, npol)
divide.by.mean <- function(x) {
Mean <- mean(x$value, na.rm = TRUE)
x$value <- x$value / Mean
x
}
norm.by.date <- function(x, thedate) {
temp <- na.omit(x)
id <- which(abs(temp$date - thedate) == min(abs(temp$date - thedate)))
id <- temp$date[id]
x$value <- 100 * x$value / x$value[x$date == id]
x
}
if (!missing(normalise)) {
mydata <- mutate(mydata, variable = factor(variable, levels = unique(variable)))
if (is.null(Args$ylab)) {
Args$ylab <- "normalised level"
}
if (normalise == "mean") {
mydata <- group_by(mydata, variable) %>%
do(divide.by.mean(.))
} else {
thedate <- as.POSIXct(strptime(normalise, format = "%d/%m/%Y", tz = "GMT"))
mydata <- group_by(mydata, variable) %>%
do(norm.by.date(., thedate = thedate))
}
}
if (is.null(Args$ylab)) {
Args$ylab <- quickText(paste(pollutant, collapse = ", "), auto.text)
}
mylab <- sapply(seq_along(pollutant), function(x) quickText(pollutant[x], auto.text))
if (!missing(name.pol)) {
mylab <- sapply(seq_along(name.pol), function(x)
quickText(name.pol[x], auto.text))
}
myColors <- if (length(cols) == 1 && cols == "greyscale") {
openColours(cols, npol + 1)[-1]
} else {
openColours(cols, npol)
}
myform <- formula(paste("value ~ date |", type))
if (is.null(Args$strip)) {
strip <- TRUE
}
strip.left <- FALSE
dates <- dateBreaks(mydata$date, date.breaks)$major
if (is.null(date.format)) {
formats <- dateBreaks(mydata$date, date.breaks)$format
} else {
formats <- date.format
}
scales <- list(
x = list(at = dates, format = formats),
y = list(log = nlog, relation = y.relation, rot = 0)
)
if (!group) {
if (is.null(Args$strip)) {
strip <- FALSE
}
myform <- formula("value ~ date | variable")
if (npol == 1) {
strip.left <- FALSE
} else {
strip.left <- strip.custom(
par.strip.text = list(cex = 0.9), horizontal = FALSE,
factor.levels = mylab
)
}
scales <- list(
x = list(at = dates, format = formats),
y = list(
relation = y.relation,
rot = 0, log = nlog
)
)
if (is.null(Args$lty)) Args$lty <- 1
}
if (is.null(Args$lty)) {
Args$lty <- 1:length(pollutant)
}
if (type == "default") strip <- FALSE
if (stack) {
mydata$year <- as.character(year(mydata$date))
if (is.null(Args$layout)) {
Args$layout <- c(1, length(unique(mydata$year)))
}
strip <- FALSE
myform <- formula("value ~ date | year")
strip.left <- strip.custom(par.strip.text = list(cex = 0.9), horizontal = FALSE)
dates <- as.POSIXct(unique(paste(format(mydata$date, "%Y-%m"), "-01", sep = "")), "GMT")
scales <- list(x = list(format = "%d-%b", relation = "sliced"), y = list(log = nlog))
xlim <- lapply(split(mydata, mydata["year"]), function(x) range(x$date))
}
if (missing(key.columns)) key.columns <- npol
if (key) {
if (any(!is.na(Args$pch))) {
key <- list(
lines = list(
col = myColors[1:npol], lty = Args$lty,
lwd = Args$lwd
), points = list(
pch = Args$pch,
col = myColors[1:npol]
),
text = list(lab = mylab), space = key.position, columns = key.columns
)
} else {
key <- list(
lines = list(
col = myColors[1:npol], lty = Args$lty,
lwd = Args$lwd
),
text = list(lab = mylab), space = key.position, columns = key.columns
)
}
} else {
key <- NULL
}
if (theStrip) {
strip <- strip
strip.left <- strip.left
} else {
strip <- FALSE
strip.left <- FALSE
}
if (length(type) == 1 & type[1] == "wd" & is.null(Args$layout)) {
wds <- c("NW", "N", "NE", "W", "E", "SW", "S", "SE")
mydata$wd <- ordered(mydata$wd, levels = wds)
wd.ok <- sapply(wds, function(x) {
if (x %in% unique(mydata$wd)) FALSE else TRUE
})
skip <- c(wd.ok[1:4], TRUE, wd.ok[5:8])
mydata$wd <- factor(mydata$wd)
Args$layout <- c(3, 3)
if (!"skip" %in% names(Args)) {
Args$skip <- skip
}
}
if (!"skip" %in% names(Args)) {
Args$skip <- FALSE
}
gap <- difftime(max(mydata$date), min(mydata$date), units = "secs") / 80
if (is.null(xlim)) xlim <- range(mydata$date) + c(-1 * gap, gap)
mydata$variable <- factor(mydata$variable, levels = pollutant)
xyplot.args <- list(
x = myform, data = mydata, groups = mydata$variable,
as.table = TRUE,
par.strip.text = list(cex = 0.8),
scales = scales,
key = key,
xlim = xlim,
strip = strip,
strip.left = strip.left,
windflow = windflow,
yscale.components = yscale.components.log10ticks,
panel = panel.superpose,
panel.groups = function(x, y, col.line, col.symbol, col, col.se, type,
group.number, lty, lwd, pch, subscripts, windflow, ...) {
if (group.number == 1) {
panel.grid(-1, 0)
panel.abline(v = dates, col = "grey90")
}
if (!group & !stack) {
panel.abline(v = dates, col = "grey90")
panel.grid(-1, 0)
}
panel.xyplot(
x, y,
type = plot.type, lty = lty, lwd = lwd, pch = pch,
col.line = myColors[group.number],
col.symbol = myColors[group.number], ...
)
if (any(!is.na(Args$pch))) {
lpoints(
x, y,
type = "p", pch = Args$pch[group.number],
col.symbol = myColors[group.number], ...
)
}
if (!is.null(windflow)) {
list1 <- list(x, y, dat = mydata, subscripts)
list2 <- windflow
flow.args <- listUpdate(list1, list2)
do.call(panel.windflow, flow.args)
}
if (smooth) {
panel.gam(
x, y,
col = myColors[group.number],
col.se = myColors[group.number],
lty = 1, lwd = 1, se = ci, k = NULL, ...
)
}
if (!is.null(ref.x)) do.call(panel.abline, ref.x)
if (!is.null(ref.y)) do.call(panel.abline, ref.y)
}
)
xyplot.args <- listUpdate(xyplot.args, Args)
plt <- do.call(xyplot, xyplot.args)
plot(plt)
newdata <- mydata
output <- list(plot = plt, data = newdata, call = match.call())
class(output) <- "openair"
invisible(output)
}
panel.windflow <- function(x, y, dat, subscripts, scale = 0.2, ws = "ws", wd = "wd",
col = "black", lwd = 1, length = 0.1, angle = 20, ...) {
max.ws <- max(dat[[ws]], na.rm = TRUE)
delta.x <- scale * diff(current.panel.limits()$xlim)
delta.y <- scale * diff(current.panel.limits()$ylim)
delta.x.cm <- diff(current.panel.limits(unit = "cm")$xlim)
delta.y.cm <- diff(current.panel.limits(unit = "cm")$ylim)
if (delta.x.cm > delta.y.cm) {
delta.y <- delta.y * delta.x.cm / delta.y.cm
} else {
delta.x <- delta.x * delta.y.cm / delta.x.cm
}
x0 <- delta.x * dat[[ws]][subscripts] *
sin(2 * pi * dat[[wd]][subscripts] / 360) / max.ws
y0 <- delta.y * dat[[ws]][subscripts] *
cos(2 * pi * dat[[wd]][subscripts] / 360) / max.ws
panel.arrows(
x0 = x - x0 / 2,
y0 = y - y0 / 2,
x1 = x + x0 / 2,
y1 = y + y0 / 2, length = length, angle = angle, code = 1,
col = col, lwd = lwd
)
} |
example8 <- function(t, y, parameters) {
list(c(y[2], -y[1] - y[2]))
} |
qCompound <-
function(p,parent,compound,compoundDist,params,...) {
if (!exists(paste("p",parent,sep=""))) {
return(paste("The parent distribution",parent,"doesn't exist"))
}
if (!is.element(compound,compoundDist)) {
return(paste("The discrete distribution",compound,"doesn't exist"))
}
l <- p[p<0|p>1]
if (length(l)>0) stop("Parameter p is probability")
xval <- double(length(p))
Finv <- get(paste("q", parent, sep = ""), mode = "function")
phi <- get(paste("pgf", compound, sep = ""), mode = "function")
phiInv <- get(paste("pgfI", compound, sep = ""), mode = "function")
xval <- Finv(1-phiInv(1-p*(1-phi(0,params)),params),...)
return(xval)
} |
runDataTypeTests <- function(db, redshift=FALSE) {
test_that("integer works", {
events <- data.frame(counter=c(1, 2))
dbxInsert(db, "events", events)
res <- dbxSelect(db, "SELECT * FROM events ORDER BY id")
expect_equal(res$counter, events$counter)
})
test_that("bigint works", {
events <- data.frame(bigcounter=c(1, 2))
dbxInsert(db, "events", events)
res <- dbxSelect(db, "SELECT * FROM events ORDER BY id")
expect_equal(res$bigcounter, events$bigcounter)
})
test_that("large bigint", {
dbxExecute(db, "INSERT INTO events (bigcounter) VALUES (9007199254740991)")
dbxSelect(db, "SELECT * FROM events ORDER BY id")
dbxExecute(db, "INSERT INTO events (bigcounter) VALUES (9007199254740992)")
res <- dbxSelect(db, "SELECT * FROM events ORDER BY id")
expect_equal(res$bigcounter, c(9007199254740991, 9007199254740992))
})
test_that("small bigint", {
dbxExecute(db, "INSERT INTO events (bigcounter) VALUES (-9007199254740991)")
dbxSelect(db, "SELECT * FROM events ORDER BY id")
dbxExecute(db, "INSERT INTO events (bigcounter) VALUES (-9007199254740992)")
res <- dbxSelect(db, "SELECT * FROM events ORDER BY id")
expect_equal(res$bigcounter, c(-9007199254740991, -9007199254740992))
})
test_that("float works", {
events <- data.frame(speed=c(1.2, 3.4))
dbxInsert(db, "events", events)
res <- dbxSelect(db, "SELECT * FROM events ORDER BY id")
expect_equal(res$speed, events$speed, tolerance=0.000001)
})
test_that("decimal works", {
events <- data.frame(distance=c(1.2, 3.4))
dbxInsert(db, "events", events)
res <- dbxSelect(db, "SELECT * FROM events ORDER BY id")
expect_equal(res$distance, events$distance)
})
test_that("boolean works", {
events <- data.frame(active=c(TRUE, FALSE))
dbxInsert(db, "events", events)
res <- dbxSelect(db, "SELECT * FROM events ORDER BY id")
if (isSQLite(db) || isRMariaDB(db)) {
res$active <- res$active != 0
} else if (isODBCPostgres(db)) {
res$active <- res$active != "0"
}
expect_equal(res$active, events$active)
})
test_that("json works", {
skip_if(isRMariaDB(db))
events <- data.frame(properties=c('{"hello": "world"}'), stringsAsFactors=FALSE)
dbxInsert(db, "events", events)
res <- dbxSelect(db, "SELECT * FROM events ORDER BY id")
expect_equal(res$properties, events$properties)
})
test_that("jsonb works", {
skip_if(!isPostgres(db))
events <- data.frame(propertiesb=c('{"hello": "world"}'), stringsAsFactors=FALSE)
dbxInsert(db, "events", events)
res <- dbxSelect(db, "SELECT * FROM events ORDER BY id")
expect_equal(res$propertiesb, events$propertiesb)
})
test_that("jsonlite with jsonb works", {
skip_if(!isPostgres(db))
events <- data.frame(propertiesb=c(jsonlite::toJSON(list(hello="world"))), stringsAsFactors=FALSE)
dbxInsert(db, "events", events)
res <- dbxSelect(db, "SELECT * FROM events ORDER BY id")
expect_equal(jsonlite::fromJSON(res$propertiesb), jsonlite::fromJSON(events$propertiesb))
})
test_that("dates works", {
events <- data.frame(created_on=as.Date(c("2018-01-01", "2018-01-02")))
dbxInsert(db, "events", events)
res <- dbxSelect(db, "SELECT * FROM events ORDER BY id")
if (isSQLite(db)) {
res$created_on <- as.Date(res$created_on)
}
expect_equal(res$created_on, events$created_on)
expect_true(all(format(res$created_on, "%Z") == "UTC"))
})
test_that("datetimes works", {
t1 <- as.POSIXct("2018-01-01 12:30:55")
t2 <- as.POSIXct("2018-01-01 16:59:59")
events <- data.frame(updated_at=c(t1, t2))
dbxInsert(db, "events", events)
res <- dbxSelect(db, "SELECT * FROM events ORDER BY id")
if (isSQLite(db)) {
res$updated_at <- as.POSIXct(res$updated_at, tz="Etc/UTC")
attr(res$updated_at, "tzone") <- Sys.timezone()
}
expect_equal(res$updated_at, events$updated_at)
res <- dbxSelect(db, "SELECT COUNT(*) AS count FROM events WHERE updated_at = '2018-01-01 20:30:55.000000'")
expect_equal(1, res$count)
})
test_that("datetimes with time zones works", {
t1 <- as.POSIXct("2018-01-01 12:30:55", tz="America/New_York")
t2 <- as.POSIXct("2018-01-01 16:59:59", tz="America/New_York")
events <- data.frame(updated_at=c(t1, t2))
dbxInsert(db, "events", events)
res <- dbxSelect(db, "SELECT * FROM events ORDER BY id")
if (isSQLite(db)) {
res$updated_at <- as.POSIXct(res$updated_at, tz="Etc/UTC")
attr(res$updated_at, "tzone") <- Sys.timezone()
}
attr(events$updated_at, "tzone") <- Sys.timezone()
expect_equal(res$updated_at, events$updated_at)
res <- dbxSelect(db, "SELECT COUNT(*) AS count FROM events WHERE updated_at = '2018-01-01 17:30:55.000000'")
expect_equal(res$count, 1)
})
test_that("timestamp with time zone works", {
skip_if(isSQLite(db))
t1 <- as.POSIXct("2018-01-01 12:30:55", tz="America/New_York")
t2 <- as.POSIXct("2018-01-01 16:59:59", tz="America/New_York")
events <- data.frame(deleted_at=c(t1, t2))
dbxInsert(db, "events", events)
attr(events$deleted_at, "tzone") <- Sys.timezone()
res <- dbxSelect(db, "SELECT * FROM events ORDER BY id")
expect_equal(res$deleted_at, events$deleted_at)
res <- dbxSelect(db, "SELECT COUNT(*) AS count FROM events WHERE deleted_at = '2018-01-01 17:30:55'")
expect_equal(res$count, 1)
})
test_that("datetimes have precision", {
t1 <- as.POSIXct("2018-01-01 12:30:55.123456")
events <- data.frame(updated_at=c(t1))
dbxInsert(db, "events", events)
res <- dbxSelect(db, "SELECT * FROM events ORDER BY id")
if (isSQLite(db)) {
res$updated_at <- as.POSIXct(res$updated_at, tz="Etc/UTC")
attr(res$updated_at, "tzone") <- Sys.timezone()
}
expect_equal(res$updated_at, events$updated_at)
res <- dbxSelect(db, "SELECT COUNT(*) AS count FROM events WHERE updated_at = '2018-01-01 20:30:55.123456'")
expect_equal(res$count, 1)
})
test_that("time zone is UTC", {
skip_if(isSQLite(db) || isSQLServer(db))
if (isPostgres(db)) {
expect_equal("UTC", dbxSelect(db, "SHOW timezone")$TimeZone)
} else {
expect_equal("+00:00", dbxSelect(db, "SELECT @@session.time_zone")$`@@session.time_zone`)
}
})
test_that("times work", {
events <- data.frame(open_time=c("12:30:55", "16:59:59"), stringsAsFactors=FALSE)
dbxInsert(db, "events", events)
res <- dbxSelect(db, "SELECT * FROM events ORDER BY id")
expect_equal(res$open_time, events$open_time)
res <- dbxSelect(db, "SELECT COUNT(*) AS count FROM events WHERE open_time = '12:30:55'")
expect_equal(res$count, 1)
})
test_that("times with time zone work", {
skip_if(!isPostgres(db))
events <- data.frame(close_time=c("12:30:55", "16:59:59"), stringsAsFactors=FALSE)
dbxInsert(db, "events", events)
res <- dbxSelect(db, "SELECT * FROM events ORDER BY id")
if (isODBCPostgres(db)) {
expect_equal(res$close_time, paste0(events$close_time, "+00"))
} else {
expect_equal(res$close_time, events$close_time)
}
res <- dbxSelect(db, "SELECT COUNT(*) AS count FROM events WHERE close_time = '12:30:55'")
expect_equal(res$count, 1)
})
test_that("hms with times work", {
events <- data.frame(open_time=c(hms::as_hms("12:30:55"), hms::as_hms("16:59:59")), stringsAsFactors=FALSE)
dbxInsert(db, "events", events)
res <- dbxSelect(db, "SELECT * FROM events ORDER BY id")
expect_equal(res$open_time, as.character(events$open_time))
res <- dbxSelect(db, "SELECT COUNT(*) AS count FROM events WHERE open_time = '12:30:55'")
expect_equal(res$count, 1)
})
test_that("binary works", {
skip_if(redshift || isODBCPostgres(db) || isSQLServer(db))
images <- list(1:3, 4:6)
serialized_images <- lapply(images, function(x) { serialize(x, NULL) })
events <- data.frame(image=I(serialized_images))
dbxInsert(db, "events", events)
if (isRMySQL(db)) {
res <- dbxSelect(db, "SELECT hex(image) AS image FROM events ORDER BY id")
res$image <- lapply(res$image, hexToRaw)
} else {
res <- dbxSelect(db, "SELECT * FROM events ORDER BY id")
}
expect_equal(lapply(res$image, unserialize), images)
})
test_that("blob with binary works", {
skip_if(redshift || isODBCPostgres(db) || isSQLServer(db))
images <- list(1:3, 4:6)
serialized_images <- lapply(images, function(x) { serialize(x, NULL) })
events <- data.frame(image=blob::as_blob(serialized_images))
dbxInsert(db, "events", events)
if (isRMySQL(db)) {
res <- dbxSelect(db, "SELECT hex(image) AS image FROM events ORDER BY id")
res$image <- lapply(res$image, hexToRaw)
} else {
res <- dbxSelect(db, "SELECT * FROM events ORDER BY id")
}
expect_equal(blob::as_blob(res$image), events$image)
})
test_that("empty blob works", {
events <- data.frame(city=c("Boston"))
dbxInsert(db, "events", events)
sql <- "SELECT id, image FROM events WHERE image IS NULL"
res <- dbxSelect(db, sql)
expect_equal(nrow(res), 1)
if (isRMySQL(db)) {
expect_equal(res$image[[1]], as.character(NA))
} else {
expect_null(res$image[[1]])
dbxUpdate(db, "events", res, where_cols=c("id"))
res <- dbxSelect(db, sql)
expect_equal(nrow(res), 1)
}
})
test_that("ts uses observation values", {
events <- data.frame(counter=ts(1:3, start=c(2018, 1), end=c(2018, 3), frequency=12))
dbxInsert(db, "events", events)
res <- dbxSelect(db, "SELECT * FROM events ORDER BY id")
expect_equal(res$counter, as.integer(events$counter))
})
test_that("can update what what just selected and get same result", {
skip_if(isODBCPostgres(db) || isSQLServer(db))
df <- data.frame(
active=c(TRUE, FALSE),
created_on=as.Date(c("2018-01-01", "2018-02-01")),
updated_at=as.POSIXct(c("2018-01-01 12:30:55", "2018-01-01 16:59:59")),
open_time=c("09:30:55", "13:59:59"),
properties=c('{"hello": "world"}', '{"hello": "r"}')
)
dbxInsert(db, "events", df)
all <- dbxSelect(db, "SELECT * FROM events ORDER BY id")
dbxUpdate(db, "events", all, where_cols=c("id"))
res <- dbxSelect(db, "SELECT * FROM events ORDER BY id")
expect_equal(res, all)
})
} |
test_that("brk_manual", {
expect_silent(brk_res(brk_manual(1:3, c(TRUE, TRUE, TRUE))))
expect_silent(brk_res(brk_manual(c(1, 2, 2, 3), c(TRUE, TRUE, FALSE, TRUE))))
expect_silent(brk_res(brk_manual(c(-Inf, 1, Inf), rep(TRUE, 3))))
expect_error(brk_res(brk_manual(c(1, 2, 2, 3), c(TRUE, FALSE, FALSE, TRUE))))
expect_error(brk_res(brk_manual(c(0, 3, 1), c(TRUE, TRUE, TRUE))))
})
test_that("categorize works", {
x <- seq(0.5, 3.5, 0.5)
breaks <- brk_res(brk_manual(1:3, c(TRUE, TRUE, TRUE)))
r <- categorize(x, breaks)
expect_equivalent(r, c(NA, 1, 1, 2, 2, NA, NA))
breaks <- brk_res(brk_manual(1:3, c(TRUE, TRUE, FALSE)))
r <- categorize(x, breaks)
expect_equivalent(r, c(NA, 1, 1, 2, 2, 2, NA))
breaks <- brk_res(brk_manual(c(1, 2, 2, 3), c(TRUE, TRUE, FALSE, TRUE)))
r <- categorize(x, breaks)
expect_equivalent(r, c(NA, 1, 1, 2, 3, NA, NA))
x <- c(Inf, 1, -Inf, NA, NaN)
breaks <- brk_res(brk_manual(1:3, c(TRUE, TRUE, TRUE)))
r <- categorize(x, breaks)
expect_equivalent(r, c(NA, 1, NA, NA, NA))
})
test_that("categorize_impl/categorize_non_numeric equivalence", {
replicate(100, {
n <- 10
x <- rnorm(n) * 10
breaks_pop <- c(-Inf, -5:5, Inf)
b <- sort(sample(breaks_pop, n, replace = FALSE))
left <- sample(c(TRUE, FALSE), n, replace = TRUE)
x[c(3,5,7)] <- sample(breaks_pop, 3)
b[3] <- b[2]
b[8] <- b[7]
left[2:3] <- left[7:8] <- c(TRUE, FALSE)
ci <- categorize_impl(x, b, left)
cnn <- categorize_non_numeric(x, b, left)
expect_equal(ci, cnn)
})
}) |
PW_C <- function(train, test, beta=8.0, ro=0.001, epsilon=0.001){
alg <- RKEEL::R6_PW_C$new()
alg$setParameters(train, test, beta, ro, epsilon)
return (alg)
}
R6_PW_C <- R6::R6Class("R6_PW_C",
inherit = ClassificationAlgorithm,
public = list(
beta = 8.0,
ro = 0.001,
epsilon = 0.001,
setParameters = function(train, test, beta=8.0, ro=0.001, epsilon=0.001){
super$setParameters(train, test)
self$beta <- beta
self$ro <- ro
self$epsilon <- epsilon
}
),
private = list(
jarName = "PW.jar",
algorithmName = "PW-C",
algorithmString = "Prototipe dependant classifier",
getParametersText = function(){
text <- ""
text <- paste0(text, "Beta = ", self$beta, "\n")
text <- paste0(text, "Ro = ", self$ro, "\n")
text <- paste0(text, "Epsilon = ", self$epsilon, "\n")
return(text)
}
)
) |
chisqFit <- function(resObserved, data, DstarM = FALSE, tt = NULL, formula = NULL) {
if (is.DstarM.fitObs(resObserved)) {
if (is.null(tt)) {
tt <- resObserved$tt
m <- resObserved$obs
} else {
m <- getPdfs(resObserved$resDecision, tt)
}
formula <- resObserved[["resDecision"]][["formula"]]
} else {
m <- resObserved
}
by <- unique(zapsmall(diff(tt)))
if (length(by) != 1) {
stop("Time grid tt must be equally spaced and length(unique(zapsmall(diff(tt)))) == 1 must be TRUE.",
call. = FALSE)
}
if (!is.data.frame(data)) {
stop(sprintf("Argument data should be dataframe. The supplied object has mode %s",
mode(data)))
} else {
if (!is.matrix(resObserved))
formula <- resObserved$resDecision$formula
data <- getData(formula, data)
rtime <- data[["rtime"]]
response <- data[["response"]]
condition <- data[["condition"]]
hasConditions <- data[["hasConditions"]]
data <- data[["data"]]
ncondition <- length(unique(data[[condition]]))
if (is.DstarM.fitObs(resObserved) && !is.null(resObserved$ncondition) &&
ncondition != resObserved$ncondition) {
stop(sprintf("Number of conditions in resObserved (%d) does not match number of conditions in the data (%d)",
as.integer(resObserved$ncondition), as.integer(ncondition)))
} else if (!is.DstarM.fitObs(resObserved) && 2 * ncondition != NCOL(resObserved)) {
stop(sprintf("Number of conditions in resObserved (%d) does not match number of conditions in the data (%d)",
as.integer(NCOL(resObserved)), as.integer(2 * ncondition)))
}
mm <- matrix(0, ncondition * 2, ncondition)
mm[1:dim(mm)[1L] + dim(mm)[1L] * rep(1:dim(mm)[2L] - 1, each = 2)] <- 1
rt <- split(data[[rtime]], list(data[[response]], data[[condition]]))
ncr <- lengths(rt)
g <- getGhat(rt = rt, tt = tt, ncondition = ncondition, mm = mm,
by = by)
}
if (DstarM) {
tmp <- 1:(NCOL(m) - 1)
ii <- rep(tmp, times = rev(tmp))
jj <- unlist(lapply(tmp, function(x, m) (x + 1):m, m = ncol(m)))
out <- numeric(length(ii))
for (l in 1:length(ii)) {
a <- customConvolveO(g[, ii[l]], by * rev(m[, jj[l]]))[seq_along(tt)]
b <- customConvolveO(g[, jj[l]], by * rev(m[, ii[l]]))[seq_along(tt)]
out[i] <- chisq(tt = tt, a = a, b = b) * 100 * (ncr[ii[l]] +
ncr[jj[l]])/sum(ncr)
}
} else {
out <- numeric(NCOL(m))
for (i in 1:NCOL(m)) {
out[i] <- chisq(tt = tt, a = m[, i], b = g[, i]) * 100 * ncr[i]/sum(ncr)
}
}
return(list(sum = sum(out), chisq = out))
} |
R0types<-function(l11,l12,l21,l22,death1,death2){
L<-l11-l22-death1+death2
c<-sqrt(L*L+4*l12*l21)
f1<-(c+L)/(c+L+2*l12)
R0<-f1*(l11+l12)/death1+(1-f1)*(l22+l21)/death2
R0
} |
drive_update <- function(file,
media = NULL,
...,
verbose = deprecated()) {
warn_for_verbose(verbose)
if ((!is.null(media)) && (!file.exists(media))) {
drive_abort(c(
"No file exists at the local {.arg media} path:",
bulletize(gargle_map_cli(media, "{.path <<x>>}"), bullet = "x")
))
}
file <- as_dribble(file)
file <- confirm_single_file(file)
meta <- toCamel(list2(...))
if (is.null(media) && length(meta) == 0) {
drive_bullets(c(
"!" = "No updates specified."
))
return(invisible(file))
}
meta[["fields"]] <- meta[["fields"]] %||% "*"
if (is.null(media)) {
out <- drive_update_metadata(file, meta)
} else {
if (length(meta) == 0) {
out <- drive_update_media(file, media)
} else {
media <- enc2utf8(media)
out <- drive_update_multipart(file, media, meta)
}
}
drive_bullets(c("File updated:", bulletize(gargle_map_cli(out))))
invisible(out)
}
drive_update_media <- function(file, media) {
request <- request_generate(
endpoint = "drive.files.update.media",
params = list(
fileId = file$id,
uploadType = "media",
fields = "*"
)
)
request$body <- httr::upload_file(path = media)
response <- request_make(request)
as_dribble(list(gargle::response_process(response)))
}
drive_update_metadata <- function(file, meta) {
params <- meta %||% list()
params$fileId <- file$id
request <- request_generate(
endpoint = "drive.files.update",
params = params
)
response <- request_make(request)
as_dribble(list(gargle::response_process(response)))
}
drive_update_multipart <- function(file, media, meta) {
params <- meta %||% list()
params$fileId <- file$id
params$uploadType <- "multipart"
request <- request_generate(
endpoint = "drive.files.update.media",
params = params
)
meta_file <- withr::local_file(
tempfile("drive-update-meta", fileext = ".json")
)
write_utf8(jsonlite::toJSON(meta), meta_file)
request$body <- list(
metadata = httr::upload_file(
path = meta_file,
type = "application/json; charset=UTF-8"
),
media = httr::upload_file(path = media)
)
response <- request_make(request, encode = "multipart")
as_dribble(list(gargle::response_process(response)))
} |
left_equal <- function(x1,x2){
x <- c(x1,x2)[which.min(c(nchar(x1),nchar(x2)))]
object <- c(x1,x2)[which.max(c(nchar(x1),nchar(x2)))]
tolower(x) == tolower(left(object,nchar(x)))
}
right_equal <- function(x1,x2){
x <- c(x1,x2)[which.min(c(nchar(x1),nchar(x2)))]
object <- c(x1,x2)[which.max(c(nchar(x1),nchar(x2)))]
tolower(x) == tolower(right(object,nchar(x)))
} |
rm(list = ls())
do.calc <- FALSE
do.save <- FALSE
if (do.calc == FALSE) message("For performance reasons, this vignette builds from pre-calculated data.\n\n To run all calculations, set 'do.calc <- TRUE' in the vignette's first code chunk. \n Building the vignette will take a while.")
knitr::opts_chunk$set(
collapse = TRUE,
comment = "
fig.align = "center",
dev = "png",
dpi=150, fig.height=7, fig.width=7,
dev.args = list(),
out.width = "90%"
)
op <- par(no.readonly = TRUE)
library(GUTS)
packageVersion("GUTS")
JA_data_file_name <- system.file("extdata", "Data_for_GUTS_software_ring_test_A_v05.xlsx", package = "GUTS", mustWork = TRUE)
par_A <- data.frame(symbols = c("hb", "ke", "kk", "mn", "beta"), JAsymbols = c("hb", "kd", "kk", "mw", "beta"), SD = c(0.01, 0.8, 0.6, 3, NA), IT = c(0.02, 0.8, NA, 5, 5.3))
par_A
library(xlsx)
read.xlsx(
file = paste0(JA_data_file_name),
sheetName = "Data A",
rowIndex = c(1:11),
colIndex = seq(which(LETTERS == "A"), which(LETTERS == "G")),
header = TRUE
)
data_A_SD <- read.xlsx(
file = paste0(JA_data_file_name),
sheetName = "Data A",
rowIndex = c(4:11),
colIndex = seq(which(LETTERS == "B"), which(LETTERS == "G")),
header = FALSE
)
con_A_SD <- as.numeric(data_A_SD[1,])
data_A_SD <- data_A_SD[-1,]
day_A_SD <-
as.numeric(
t(
read.xlsx(
file = paste0(JA_data_file_name),
sheetName = "Data A",
rowIndex = c(5:11),
colIndex = seq(which(LETTERS == "A")),
header = FALSE
)
)
)
names(data_A_SD) <- paste0("c", con_A_SD)
rownames(data_A_SD) <- paste0("d", day_A_SD)
GUTS_A_SD <- list(
C0 = guts_setup(
C = rep_len(con_A_SD[1], length(day_A_SD)), Ct = day_A_SD,
y = data_A_SD$c0, yt = day_A_SD,
model = "SD"
),
C2 = guts_setup(
C = rep_len(con_A_SD[2], length(day_A_SD)), Ct = day_A_SD,
y = data_A_SD$c2, yt = day_A_SD,
model = "SD"
),
C4 = guts_setup(
C = rep_len(con_A_SD[3], length(day_A_SD)), Ct = day_A_SD,
y = data_A_SD$c4, yt = day_A_SD,
model = "SD"
),
C6 = guts_setup(
C = rep_len(con_A_SD[4], length(day_A_SD)), Ct = day_A_SD,
y = data_A_SD$c6, yt = day_A_SD,
model = "SD"
),
C8 = guts_setup(
C = rep_len(con_A_SD[5], length(day_A_SD)), Ct = day_A_SD,
y = data_A_SD$c8, yt = day_A_SD,
model = "SD"
),
C16 = guts_setup(
C = rep_len(con_A_SD[6], length(day_A_SD)), Ct = day_A_SD,
y = data_A_SD$c16, yt = day_A_SD,
model = "SD"
)
)
library('adaptMCMC')
logposterior <- function( pars, guts_objects,
isOutOfBoundsFun = function(p) any( is.na(p), is.infinite(p) ) ) {
if ( isOutOfBoundsFun(pars) ) return(-Inf)
return(
sum(sapply( guts_objects, function(obj) guts_calc_loglikelihood(obj, pars) ))
)
}
is_out_of_bounds_fun_SD <- function(p) any( is.na(p), is.infinite(p), p < 0, p["kk"] > 30 )
load(system.file("extdata", "vignetteGUTS-ringTest-SD-MCMCresults.Rdata",
package = "GUTS", mustWork = TRUE)
)
if (all(is.finite(mcmc_result_SD$log.p))) {
par( mfrow = c(dim(mcmc_result_SD$samples)[2] + 1, 2) , mar = c(5,4,1,0.5))
plot(as.mcmc(cbind(mcmc_result_SD$samples, LL = mcmc_result_SD$log.p)), auto.layout = FALSE)
par(op)
} else {
par( mfrow = c(dim(mcmc_result_SD$samples)[2], 2) , mar = c(5,4,1,0.5))
plot(as.mcmc(mcmc_result_SD$samples), auto.layout = FALSE)
par(op)
}
eval_MCMC <- function(sampMCMC, expectedVal = NULL, plot = TRUE) {
bestFit <- sampMCMC$samples[which.max(sampMCMC$log.p),]
qu <- apply(sampMCMC$samples, 2, quantile, probs = c(0.025, 0.5, 0.975))
if (plot) {
if(is.null(expectedVal)) expectedVal <- rep(NA, dim(sampMCMC$samples)[2])
plot(seq(dim(sampMCMC$samples)[2]), expectedVal, pch = 20, col = "darkgrey", cex = 2, ylim = range(qu),
xaxt = "n", xlab = "Model parameter", ylab = "Parameter value")
arrows(x0 = seq(dim(sampMCMC$samples)[2]), y0 = qu[1,], y1 = qu[3,], angle = 90, length = 0.1, code = 3)
points(x = seq(dim(sampMCMC$samples)[2]), y = bestFit, pch = "-", cex = 4)
axis(side = 1, at = seq(dim(sampMCMC$samples)[2]), dimnames(sampMCMC$samples)[[2]])
}
res <- rbind(bestFit, qu)
rownames(res)[1] <- "best"
if (!all(is.na(expectedVal))) {
res <- rbind(res, expectedVal)
rownames(res)[dim(res)[1]] <- "expect"
}
return(res)
}
eval_MCMC(mcmc_result_SD, expectedVal = par_A$SD[-which(is.na(par_A$SD))])
data_A_IT <- read.xlsx(
file = paste0(JA_data_file_name),
sheetName = "Data A",
rowIndex = c(17:24),
colIndex = seq(which(LETTERS == "B"), which(LETTERS == "G")),
header = FALSE
)
con_A_IT <- as.numeric(data_A_IT[1,])
data_A_IT <- data_A_IT[-1,]
day_A_IT <-
as.numeric(
t(
read.xlsx(
file = paste0(JA_data_file_name),
sheetName = "Data A",
rowIndex = c(18:24),
colIndex = seq(which(LETTERS == "A")),
header = FALSE
)
)
)
names(data_A_IT) <- paste0("c", con_A_IT)
rownames(data_A_IT) <- paste0("d", day_A_IT)
GUTS_A_IT <- lapply(seq(length(con_A_IT)),
function(i, dat, days, con) guts_setup(
C = rep_len(con[i], length(days)), Ct = days,
y = dat[,i], yt = days,
model = "IT", dist = "loglogistic"
), dat = data_A_IT, days = day_A_IT, con = con_A_IT
)
names(GUTS_A_IT) <- paste0("c", con_A_IT)
is_out_of_bounds_fun_IT <- function(p) any( is.na(p), is.infinite(p), p < 0, p[4] <= 1, exp(8/p[4]) * p[3] > 1e200)
load(system.file("extdata", "vignetteGUTS-ringTest-IT-MCMCresults.Rdata",
package = "GUTS", mustWork = TRUE)
)
if (all(is.finite(mcmc_result_IT$log.p))) {
par( mfrow = c(dim(mcmc_result_IT$samples)[2] + 1, 2) , mar = c(5,4,1,0.5))
plot(as.mcmc(cbind(mcmc_result_IT$samples, LL = mcmc_result_IT$log.p)), auto.layout = FALSE)
par(op)
} else {
par( mfrow = c(dim(mcmc_result_IT$samples)[2], 2) , mar = c(5,4,1,0.5))
plot(as.mcmc(mcmc_result_IT$samples), auto.layout = FALSE)
par(op)
}
eval_MCMC(mcmc_result_IT, expectedVal = par_A$IT[-which(is.na(par_A$IT))])
conc <- seq(0, 16, by = 2)
guts_obj_forecast <- lapply(conc,
function(concentration) guts_setup(
C = rep(concentration, 7),
Ct = seq(0,12, by = 2),
y = c(100, rep(0,6)),
yt = seq(0,12, by = 2),
model = "IT", dist = "loglogistic", N = 1000
)
)
mcmc_forecasts_paras <- mcmc_result_IT$samples
mcmc_forecasts_paras[,1] <- 0
load(system.file("extdata", "vignetteGUTS-ringTest-forecast.Rdata",
package = "GUTS", mustWork = TRUE)
)
par(mfrow = c(2,3), mar = c(5,4,3, 0.5))
sapply(tail(names(forec), -2),
function(day)
plot(as.factor(forec$conc), forec[, day],
ylim = c(0,1),
xlab = "concentration (micromol/l)", ylab = "probability of survival",
main = day)
)
par(op)
load(system.file("extdata", "vignetteGUTS-ringTest-logLC50.Rdata",
package = "GUTS", mustWork = TRUE)
)
LC50 <- quantile(exp(logLC50s), c(0.025, 0.5, 0.975)) |
calc_regres_coef_b <- function(y)
{
time_count <- seq_along(y)
ones <- seq(1,1,length=length(y))
content.matrix <- c(ones, time_count)
X <- matrix(content.matrix, byrow = FALSE, ncol = 2)
suppressWarnings({
fm <- stats::lsfit(x = X, y = y, intercept = FALSE)
data.b <- fm$coef[[2]]
})
return(data.b)
} |
source(system.file("extdata", "utils.R", package = "nhdplusTools"))
data_dir <- file.path(tempdir(check = TRUE), "nhdplusTools")
download_pkg_data("petapsco_flowlines.gpkg",
"https://usgs-r.github.io/nhdplusTools/data/petapsco_flowlines.gpkg",
data_dir)
sample_flines <- sf::read_sf(file.path(data_dir, "petapsco_flowlines.gpkg")) |
.mergeFlow <- function(ct1=ct1, iSpec=iSpec, gageID=gageID, hydro.var=hydro.var
, flow.detrended=flow.detrended) {
tmp <- flow.detrended[[gageID]]
tmp <- merge(ct1[, c("date",iSpec$dep)],
tmp[ ,c("date",hydro.var)], by="date", all.x=TRUE)
tmp[,iSpec$dep] <- if (iSpec$isSurv) impute(tmp[,iSpec$dep]) else tmp[,iSpec$dep]
hydro.var.corr <- data.frame(hydro.var=hydro.var, spearman= NA_real_ ,
chosen="-", stringsAsFactors = FALSE)
for (i in 1:nrow(hydro.var.corr)) {
hydro.var.corr[i,"spearman"] <- cor(y = tmp[,iSpec$dep], x = tmp[,hydro.var[i]],
use = "pairwise.complete.obs", method = "spearman")
}
hydro.var.corr[which.max(abs(hydro.var.corr[,"spearman"])),"chosen"] <- 'max'
iSpec$hydro.var.corr <- hydro.var.corr
iSpec$hydroTermSel.var <- hydro.var.corr[which.max(abs(hydro.var.corr[,"spearman"])),"hydro.var"]
tmp <- flow.detrended[[gageID]]
tmp <- merge(ct1, tmp[ ,c("date",iSpec$hydroTermSel.var )], by="date", all.x=TRUE)
names(tmp)[names(tmp) == iSpec$hydroTermSel.var] <- 'flw_sal'
ct1.list <- list(ct1=tmp,iSpec=iSpec)
return(ct1.list)
} |
cogi <- function(data, targets = c(70,180), weights = c(.5,.35,.15)){
id = w_f = ir = br = stddev = weight_features = feature = scale_range = weight = NULL
rm(list = c("id", "w_f", "ir", "br", "stddev", "weight_features", "feature", "scale_range", "weight"))
data = check_data_columns(data)
is_vector = attr(data, "is_vector")
targets = sort(as.double(targets))
weight_features = function(feature, scale_range, weight = 1, increasing = FALSE) {
if (increasing) {
out = pmin(as.numeric(feature>min(scale_range))*(feature-min(scale_range))/(max(scale_range)-min(scale_range)), 1)
return(out*weight)
} else {
out = pmin(as.numeric(feature<max(scale_range))*(feature-max(scale_range))/(min(scale_range)-max(scale_range)), 1)
return(out*weight)
}
}
ir = in_range_percent(data, list(targets))[,2]
br = below_percent(data, targets_below = targets[1])[,2]
stddev = sd_glu(data)$SD
weighted_features = weight_features(ir,c(0,100),weight = weights[1], increasing = TRUE)+weight_features(br,c(0,15),weight = weights[2])+weight_features(stddev,c(18,108),weight = weights[3])
out = dplyr::tibble(weighted_features)
out$id = sd_glu(data)$id
out = out[,c(2,1)]
colnames(out) = c("id", "COGI")
if (is_vector) {
out$id = NULL
}
return(out)
} |
test.gdi32 <- function() {
dataPath <- file.path(path.package(package="clusterCrit"),"unitTests","data","testsInternal_400_4.Rdata")
load(file=dataPath, envir=.GlobalEnv)
idx <- intCriteria(traj_400_4, part_400_4[[4]], c("GDI32"))
cat(paste("\nFound idx =",idx))
val <- 11.1643758011678
cat(paste("\nShould be =",val,"\n"))
checkEqualsNumeric(idx[[1]],val)
} |
fitmm <- function(sequences, states, k = 1, init.estim = "mle") {
if (!(is.list(sequences) & all(sapply(sequences, class) %in% c("character", "numeric")))) {
stop("The parameter 'sequences' should be a list of vectors")
}
if (!all(unique(unlist(sequences)) %in% states)) {
stop("Some states in the list of observed sequences 'sequences' are not in the state space 'states'")
}
if (!((k > 0) & ((k %% 1) == 0))) {
stop("'k' must be a strictly positive integer")
}
processes <- processesMarkov(sequences = sequences, states = states, k = k)
s <- processes$s
Nij <- processes$Nij
Ni <- processes$Ni
Nstarti <- processes$Nstarti
ptrans <- Nij / tcrossprod(Ni, rep.int(1, s))
ptrans[which(is.na(ptrans))] <- 0
ptrans <- .normalizePtrans(ptrans)
if (is.vector(init.estim) & length(init.estim) == 1) {
if (init.estim == "mle") {
init <- Nstarti / sum(Nstarti)
} else if (init.estim == "stationary") {
if (k == 1) {
init <- .stationaryDistribution(ptrans = ptrans)
} else {
init <- .stationaryDistribution(ptrans = .blockMatrix(ptrans = ptrans))
}
} else if (init.estim == "freq") {
Nstart <- as.vector(count(seq = unlist(sequences), wordsize = k, alphabet = states))
init <- Nstart / sum(Nstart)
} else if (init.estim == "prod") {
Nstart <- as.vector(count(seq = unlist(sequences), wordsize = 1, alphabet = states))
prob <- Nstart / sum(Nstart)
init <- as.vector(.productProb(length = k, prob = prob))
} else if (init.estim == "unif") {
init <- rep.int(x = 1 / (s ^ k), times = s ^ k)
} else {
stop("'init.estim' must be equal to \"mle\", \"stationary\", \"freq\", \"prod\" or \"unif\".
'init.estim' can also be a vector of length s ^ k for custom initial distribution")
}
} else {
if (!(is.numeric(init.estim) & !anyNA(init.estim) & is.vector(init.estim) & length(init.estim) == s ^ k)) {
stop("'init.estim' is not a numeric vector of length s ^ k")
}
if (!(all(init.estim >= 0) & all(init.estim <= 1))) {
stop("Probabilities in 'init.estim' must be between [0, 1]")
}
if (!((sum(init.estim) >= 1 - sqrt(.Machine$double.eps)) | (sum(init.estim) <= 1 + sqrt(.Machine$double.eps)))) {
stop("The sum of 'init.estim' is not equal to one")
}
init <- init.estim
}
init <- as.vector(init / sum(init))
mm <- mm(states = states, init = init, ptrans = ptrans, k = k)
if (any(mm$init == 0)) {
message("The probabilities of the initial state(s) \"",
paste0(names(which(mm$init == 0)), collapse = "\", \""),
"\" are 0.")
}
loglik <- .loglik(x = mm, processes = processes)
estimate <- mmfit(mm = mm, M = processes$M, loglik = loglik, sequences = sequences)
return(estimate)
} |
library(testthat)
library(splines)
test_that("Check regular glm", {
n <- 100
set.seed(123)
nl_ds <- data.frame(
x = sample(seq(
from = 0,
to = pi * 3,
length.out = n
),
size = n,
replace = TRUE
),
sex = factor(sample(c("Male", "Female"),
size = n,
replace = TRUE
))
)
nl_ds$y <-
sin(nl_ds$x) * 2 +
(nl_ds$sex == "Male") * 1 +
rnorm(n = n, mean = 0, sd = .5)
l_ds <- nl_ds
l_ds$y <-
nl_ds$x * 2 +
(nl_ds$sex == "Male") * 1 +
rnorm(n = n, mean = 0, sd = .5)
vals <- sapply(2:7, function(x) {
AIC(glm(sprintf(
"y ~ ns(x, %d) + sex",
x
), data = nl_ds))
})
expect_equivalent(
AIC(addNonlinearity(glm(y ~ x + sex, data = nl_ds),
min_fn = AIC,
flex_param = 2:7,
variable = "x", spline_fn = "ns",
workers = FALSE
)),
min(vals)
)
expect_equivalent(
addNonlinearity(glm(y ~ x + sex, data = l_ds),
min_fn = AIC,
flex_param = 2:7,
variable = "x", spline_fn = "ns",
workers = FALSE
),
glm(y ~ x + sex, data = l_ds)
)
})
test_that("Check regular lm", {
n <- 100
set.seed(123)
nl_ds <- data.frame(
x = sample(seq(
from = 0,
to = pi * 3,
length.out = n
),
size = n,
replace = TRUE
),
sex = factor(sample(c("Male", "Female"),
size = n,
replace = TRUE
))
)
nl_ds$y <-
sin(nl_ds$x) * 2 +
(nl_ds$sex == "Male") * 1 +
rnorm(n = n, mean = 0, sd = .5)
l_ds <- nl_ds
l_ds$y <-
nl_ds$x * 2 +
(nl_ds$sex == "Male") * 1 +
rnorm(n = n, mean = 0, sd = .5)
vals <- sapply(2:7, function(x) {
AIC(lm(sprintf(
"y ~ ns(x, %d) + sex",
x
), data = nl_ds))
})
expect_equivalent(
AIC(addNonlinearity(lm(y ~ x + sex, data = nl_ds),
min_fn = AIC,
flex_param = 2:7,
variable = "x", spline_fn = "ns",
workers = FALSE
)),
min(vals)
)
expect_equivalent(
addNonlinearity(lm(y ~ x + sex, data = l_ds),
min_fn = AIC,
flex_param = 2:7,
variable = "x", spline_fn = "ns",
workers = FALSE
),
lm(y ~ x + sex, data = l_ds)
)
})
test_that("That rms-functions work", {
n <- 100
set.seed(123)
nl_ds <- data.frame(
x = sample(seq(
from = 0,
to = pi * 3,
length.out = n
),
size = n,
replace = TRUE
),
sex = factor(sample(c("Male", "Female"),
size = n,
replace = TRUE
))
)
nl_ds$y <-
sin(nl_ds$x) * 2 +
(nl_ds$sex == "Male") * 1 +
rnorm(n = n, mean = 0, sd = .5)
l_ds <- nl_ds
l_ds$y <-
nl_ds$x * 2 +
(nl_ds$sex == "Male") * 1 +
rnorm(n = n, mean = 0, sd = .5)
library(rms)
vals <- sapply(3:7, function(x) {
AIC(ols(as.formula(sprintf("y ~ rcs(x, %d) + sex", x)),
data = nl_ds
))
})
expect_equivalent(
AIC(addNonlinearity(ols(y ~ x + sex, data = nl_ds),
min_fn = AIC,
flex_param = 3:7,
variable = "x",
spline_fn = "rcs",
workers = FALSE
)),
min(vals)
)
expect_error(AIC(addNonlinearity(ols(y ~ x + sex, data = nl_ds),
min_fn = AIC,
flex_param = 3:7,
variable = "x",
spline_fn = "ns",
workers = FALSE
)))
expect_equivalent(
addNonlinearity(ols(y ~ x + sex, data = l_ds),
min_fn = AIC,
flex_param = 3:7,
variable = "x",
spline_fn = "rcs",
workers = FALSE
),
ols(y ~ x + sex, data = l_ds)
)
}) |
Sncf <- function(x, y, z, w = NULL, df = NULL, type = "boot", resamp = 1000,
npoints = 300, save = FALSE, filter = FALSE, fw = 0, max.it = 25,
xmax = FALSE, na.rm = FALSE, latlon = FALSE, circ = FALSE,
quiet = FALSE) {
NAO <- FALSE
if (any(!is.finite(unlist(z)))) {
if (na.rm) {
warning("Missing values exist; Pairwise deletion will be used")
NAO <- TRUE
} else {
stop("Missing values exist; use na.rm = TRUE for pairwise deletion")
}
}
if (is.null(w)) {
n <- dim(z)[1]
p <- dim(z)[2]
z <- as.matrix(z) + 0
moran <- cor2(t(z), circ = circ)
} else {
n <- dim(z)[1]
p <- dim(z)[2]
z <- as.matrix(z) + 0
w <- as.matrix(w) + 0
moran <- cor2(t(z), t(w), circ = circ)
}
if (is.null(df)) {
df <- sqrt(n)
}
if (latlon) {
xdist <- gcdist(x, y)
} else {
xdist <- sqrt(outer(x, x, "-")^2 + outer(y, y, "-")^2)
}
maxdist <- ifelse(!xmax, max(na.omit(xdist)), xmax)
if (is.null(w)) {
triang <- lower.tri(xdist)
} else {
triang <- is.finite(xdist)
}
u <- xdist[triang]
v <- moran[triang]
sel <- is.finite(v) & is.finite(u)
u <- u[sel]
v <- v[sel]
v <- v[u <= maxdist]
u <- u[u <= maxdist]
xpoints <- seq(0, maxdist, length = npoints)
out <- gather(u = u, v = v, w = w, moran = moran, df = df, xpoints = xpoints,
filter = filter, fw = fw)
real <- list(cbar = out$cbar, x.intercept = out$xint, e.intercept = out$eint,
y.intercept = out$yint, cbar.intercept = out$cint,
predicted = list(x = matrix(out$x, nrow = 1),
y = matrix(out$y, nrow = 1)))
boot <- list(NULL)
boot$boot.summary <- list(NULL)
if (resamp != 0) {
boot$boot.summary$x.intercept <- matrix(NA, nrow = resamp, ncol = 1)
boot$boot.summary$y.intercept <- matrix(NA, nrow = resamp, ncol = 1)
boot$boot.summary$e.intercept <- matrix(NA, nrow = resamp, ncol = 1)
boot$boot.summary$cbar.intercept <- matrix(NA, nrow = resamp, ncol = 1)
boot$boot.summary$cbar <- matrix(NA, nrow = resamp, ncol = 1)
predicted <- list(x = matrix(NA, nrow = 1, ncol = npoints),
y = matrix(NA, nrow = resamp, ncol = npoints))
type <- charmatch(type, c("boot", "perm"), nomatch = NA)
if (is.na(type))
stop("method should be \"boot\", or \"perm\"")
for (i in 1:resamp) {
whn <- pretty(c(1, resamp), n = 10)
if (!quiet & any(i == whn)) {
cat(i, " of ", resamp, "\r")
flush.console()
}
if (type == 1) {
trekkx <- sample(1:n, replace = TRUE)
trekky <- trekkx
}
if (type == 2) {
trekky <- sample(1:n, replace = FALSE)
trekkx <- 1:n
}
xdistb <- xdist[trekkx, trekkx]
if (is.null(w)) {
triang <- lower.tri(xdistb)
} else {
triang <- is.finite(xdistb)
}
xdistb <- xdistb[triang]
moranb <- moran[trekky, trekky][triang]
if (type == 1 & is.null(w)) {
moranb <- moranb[!(xdistb == 0)]
xdistb <- xdistb[!(xdistb == 0)]
}
u <- xdistb
v <- moranb
sel <- is.finite(v) & is.finite(u)
u <- u[sel]
v <- v[sel]
v <- v[u <= maxdist]
u <- u[u <= maxdist]
out <- gather(u = u, v = v, w = w, moran = moranb, df = df, xpoints = xpoints,
filter = filter, fw = fw)
boot$boot.summary$cbar[i, 1] <- out$cbar
boot$boot.summary$y.intercept[i, 1] <- out$yint
boot$boot.summary$x.intercept[i, 1] <- out$xint
boot$boot.summary$e.intercept[i, 1] <- out$eint
boot$boot.summary$cbar.intercept[i, 1] <- out$cint
predicted$x[1, ] <- out$x
predicted$y[i, ] <- out$y
}
if (save == TRUE) {
boot$boot <- list(predicted = predicted)
} else {
boot$boot <- NULL
}
ty <- apply(predicted$y, 2, quantile, probs = c(0, 0.025, 0.05, 0.1, 0.25, 0.5,
0.75, 0.9, 0.95, 0.975, 1),
na.rm = TRUE)
dimnames(ty) <- list(c(0, 0.025, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.975, 1),
NULL)
tx <- predicted$x
boot$boot.summary$predicted <- list(x = tx,y = ty)
} else {
boot <- NULL
boot.summary <- NULL
}
res <- list(real = real, boot = boot, max.distance = maxdist,
call = deparse(match.call()))
class(res) <- "Sncf"
res
}
plot.Sncf <- function(x, ylim = c(-1, 1), add = FALSE, ...) {
args.default <- list(xlab = "Distance", ylab = "Correlation")
args.input <- list(...)
args <- c(args.default[!names(args.default) %in% names(args.input)], args.input)
cbar <- x$real$cbar
if (!add) {
do.call(plot, c(list(x = x$real$predicted$x, y = x$real$predicted$y,
ylim = ylim, type = "l"), args))
}
if (!is.null(x$boot$boot.summary)) {
polygon(c(x$boot$boot.summary$predicted$x, rev(x$boot$boot.summary$predicted$x)),
c(x$boot$boot.summary$predicted$y["0.025", ],
rev(x$boot$boot.summary$predicted$y["0.975", ])), col = gray(0.8),
lty = 0)
}
lines(x$real$predicted$x, x$real$predicted$y)
lines(c(0, max(x$real$predicted$x)), c(0, 0))
lines(c(0, max(x$real$predicted$x)), c(cbar, cbar))
}
print.Sncf <- function(x, ...) {
cat("This is an object of class Sncf produced by the call:\n\n", x$call,
"\n\n Use summary() or plot() for inspection (or print.default() to see all the gory details).", ...)
}
summary.Sncf <- function(object, ...) {
xy <- cbind(object$real$x.intercept, object$real$e.intercept,
object$real$y.intercept, object$real$cbar.intercept)
dimnames(xy) <- list(c("intercepts"), c("x", "e","y", "cbar"))
if (!is.null(object$boot$boot.summary)) {
yd <- apply(object$boot$boot.summary$y.intercept, 2, quantile,
probs = c(0, 0.025, 0.25, 0.5, 0.75, 0.975, 1), na.rm = TRUE)
xd <- apply(object$boot$boot.summary$x.intercept, 2, quantile,
probs = c(0, 0.025, 0.25, 0.5, 0.75, 0.975, 1), na.rm = TRUE)
ed <- apply(object$boot$boot.summary$e.intercept, 2, quantile,
probs = c(0, 0.025, 0.25, 0.5, 0.75, 0.975, 1), na.rm = TRUE)
synchd <- quantile(object$boot$boot.summary$cbar[, 1],
probs = c(0, 0.025, 0.25, 0.5, 0.75, 0.975, 1), na.rm = TRUE)
cbard <- quantile(object$boot$boot.summary$cbar.intercept[, 1],
probs = c(0, 0.025, 0.25, 0.5, 0.75, 0.975, 1), na.rm = TRUE)
xyd <- cbind(xd, ed, yd, cbard)
dimnames(xyd) <- list(c(0, 0.025, 0.25, 0.5, 0.75, 0.975, 1), c("x", "e", "y", "cbar"))
}
if (is.null(object$boot$boot.summary)) {
synchd <- NULL
xyd <- NULL
}
res <- list(call = object$call, Regional.synch = object$real$cbar, Squantile = synchd,
estimates = xy, quantiles = xyd)
res
}
Sncf.srf <- function(x, y, z, w = NULL, avg = NULL, avg2 = NULL, corr = TRUE,
df = NULL, type = "boot", resamp = 0, npoints = 300,
save = FALSE, filter = FALSE, fw = 0, max.it = 25,
xmax = FALSE, jitter = FALSE, quiet = FALSE) {
p <- dim(z)[2]
n <- dim(z)[1]
if (is.null(df)) {
df <- sqrt(n)
}
if (is.null(avg)) {
avg <- mean(as.vector(z), na.rm = TRUE)
if (!is.null(w)) {
avg2 <- mean(as.vector(w), na.rm = TRUE)
}
}
sca <- 1
sca2 <- 1
if (corr == TRUE) {
sca <- sqrt(var(as.vector(z)))
if (!is.null(w)) {
sca2 <- sqrt(var(as.vector(w)))
}
}
xdist <- sqrt(outer(x, x, "-")^2 + outer(y, y, "-")^2)
if (jitter == TRUE) {
xdist <- apply(xdist, 2, jitter)
}
if (is.null(w)) {
moran <- crossprod((t(z) - avg)/(sca))/p
} else {
moran <- crossprod((t(z) - avg)/(sca), (t(w) - avg2)/(sca2))/p
}
maxdist <- ifelse(!xmax, max(xdist), xmax)
if (is.null(w)) {
triang <- lower.tri(xdist)
} else {
triang <- is.finite(xdist)
}
u <- xdist[triang]
v <- moran[triang]
v <- v[u <= maxdist]
u <- u[u <= maxdist]
xpoints <- seq(0, maxdist, length = npoints)
out <- gather(u = u, v = v, w = w, moran = moran, df = df, xpoints = xpoints,
filter = filter, fw = fw)
real <- list(cbar = out$cbar, x.intercept = out$xint, e.intercept = out$eint,
y.intercept = out$yint, cbar.intercept = out$cint,
predicted = list(x = matrix(out$x, nrow = 1),
y = matrix(out$y, nrow = 1)))
boot <- list(NULL)
boot$boot.summary <- list(NULL)
if (resamp != 0) {
boot$boot.summary$x.intercept <- matrix(NA, nrow = resamp, ncol = 1)
boot$boot.summary$y.intercept <- matrix(NA, nrow = resamp, ncol = 1)
boot$boot.summary$e.intercept <- matrix(NA, nrow = resamp, ncol = 1)
boot$boot.summary$cbar.intercept <- matrix(NA, nrow = resamp, ncol = 1)
boot$boot.summary$cbar <- matrix(NA, nrow = resamp, ncol = 1)
predicted <- list(x = matrix(NA, nrow = 1, ncol = npoints),
y = matrix(NA, nrow = resamp, ncol = npoints))
type <- charmatch(type, c("boot", "perm"), nomatch = NA)
if (is.na(type))
stop("method should be \"boot\", or \"perm\"")
for (i in 1:resamp) {
whn <- pretty(c(1, resamp), n = 10)
if (!quiet & any(i == whn)) {
cat(i, " of ", resamp, "\r")
flush.console()
}
if (type == 1) {
trekkx <- sample(1:n, replace = TRUE)
trekky <- trekkx
}
if (type == 2) {
trekky <- sample(1:n, replace = FALSE)
trekkx <- 1:n
}
xdistb <- xdist[trekkx, trekkx]
if (is.null(w)) {
triang <- lower.tri(xdistb)
} else {
triang <- is.finite(xdistb)
}
xdistb <- xdistb[triang]
moranb <- moran[trekky, trekky][triang]
if (type == 1 & is.null(w)) {
moranb <- moranb[!(xdistb == 0)]
xdistb <- xdistb[!(xdistb == 0)]
}
u <- xdistb
v <- moranb
v <- v[u <= maxdist]
u <- u[u <= maxdist]
out <- gather(u = u, v = v, w = w, moran = moranb, df = df, xpoints = xpoints,
filter = filter, fw = fw)
boot$boot.summary$cbar[i, 1] <- out$cbar
boot$boot.summary$y.intercept[i, 1] <- out$yint
boot$boot.summary$x.intercept[i, 1] <- out$xint
boot$boot.summary$e.intercept[i, 1] <- out$eint
boot$boot.summary$cbar.intercept[i, 1] <- out$cint
predicted$x[1, ] <- out$x
predicted$y[i, ] <- out$y
}
if (save == TRUE) {
boot$boot <- list(predicted = predicted)
} else {
boot$boot <- NULL
}
ty <- apply(predicted$y, 2, quantile,
probs = c(0, 0.025, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.975, 1),
na.rm = TRUE)
dimnames(ty) <- list(c(0, 0.025, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.975, 1), NULL)
tx <- predicted$x
boot$boot.summary$predicted <- list(x = tx, y = ty)
} else {
boot <- NULL
boot.summary <- NULL
}
res <- list(real = real, boot = boot, max.distance = maxdist,
call = deparse(match.call()))
if (corr) {
class(res) <- "Sncf"
} else {
class(res) <- "Sncf.cov"
}
res
}
plot.Sncf.cov <- function(x, ...) {
args.default <- list(xlab = "Distance", ylab = "Covariance")
args.input <- list(...)
args <- c(args.default[!names(args.default) %in% names(args.input)], args.input)
do.call(plot, c(list(x = x$real$predicted$x, y = x$real$predicted$y,
type = "l"), args))
if (!is.null(x$boot$boot.summary)) {
polygon(c(x$boot$boot.summary$predicted$x, rev(x$boot$boot.summary$predicted$x)),
c(x$boot$boot.summary$predicted$y["0.025", ],
rev(x$boot$boot.summary$predicted$y["0.975", ])), col = gray(0.8),
lty = 0)
}
lines(x$real$predicted$x, x$real$predicted$y)
lines(c(0, max(x$real$predicted$x)), c(0, 0))
} |
resolved <- function(x, ...) UseMethod("resolved")
resolved.default <- function(x, ...) TRUE
resolved.list <- function(x, ...) {
fs <- futures(x)
res <- rep(TRUE, times = length(fs))
for (ii in seq_along(fs)) {
f <- fs[[ii]]
if (inherits(f, "Future")) res[[ii]] <- resolved(f, ...)
}
dim <- dim(fs)
if (!is.null(dim)) {
dim(res) <- dim
dimnames(res) <- dimnames(fs)
}
names(res) <- names(fs)
res
}
resolved.environment <- function(x, ...) {
fs <- futures(x)
names <- names(fs)
fs <- as.list(fs)
names(fs) <- names
resolved(fs, ...)
} |
Return1 <- ISNUMBER(2)
Return2 <- ISNUMBER("This is not a number 1")
expect_equal(Return1,T)
expect_equal(Return2,F) |
f <- list.files(pattern = "*.pdf")
f <- f[-which(f == "spatial-mv.pdf")]
f <- f[-which(f == "cons-plans-n.pdf")]
for(i in 1:length(f)) {
system(paste0("pdftops -eps ", f[i]))
} |
lsm_c_ai <- function(landscape) {
landscape <- landscape_as_list(landscape)
result <- lapply(X = landscape,
FUN = lsm_c_ai_calc)
layer <- rep(seq_len(length(result)),
vapply(result, nrow, FUN.VALUE = integer(1)))
result <- do.call(rbind, result)
tibble::add_column(result, layer, .before = TRUE)
}
lsm_c_ai_calc <- function(landscape) {
if (!inherits(x = landscape, what = "matrix")) {
landscape <- raster::as.matrix(landscape)
}
if (all(is.na(landscape))) {
return(tibble::tibble(level = "class",
class = as.integer(NA),
id = as.integer(NA),
metric = "ai",
value = as.double(NA)))
}
like_adjacencies <- rcpp_get_coocurrence_matrix_diag(landscape,
directions = as.matrix(4)) / 2
cells_class <- rcpp_get_composition_vector(landscape)
cells_class <- tibble::tibble(class = names(cells_class),
value = cells_class)
cells_class$n <- trunc(sqrt(cells_class$value))
cells_class$m <- cells_class$value - cells_class$n ^ 2
cells_class$max_adj <- ifelse(test = cells_class$m == 0,
yes = 2 * cells_class$n * (cells_class$n - 1),
no = ifelse(test = cells_class$m <= cells_class$n,
yes = 2 * cells_class$n * (cells_class$n - 1) + 2 * cells_class$m - 1,
no = ifelse(test = cells_class$m > cells_class$n,
yes = 2 * cells_class$n * (cells_class$n - 1) + 2 * cells_class$m - 2,
no = NA)))
if (anyNA(cells_class$max_adj)) {
warning("NAs introduced by lsm_c_ai", call. = FALSE)
}
max_adj <- cells_class$max_adj
ai <- (like_adjacencies / max_adj) * 100
ai[is.nan(ai)] <- NA
return(tibble::tibble(level = "class",
class = as.integer(names(like_adjacencies)),
id = as.integer(NA),
metric = "ai",
value = as.double(ai)))
} |
create_zip <- function(x, ...) {
UseMethod("create_zip", x)
}
create_zip.character <- function(x, ...) {
f <- file.exists(x)
if (any(!f)) {
stop(paste0(ngettext(f, "One file does not", paste0(sum(f), " files do not"))), "exist: ", paste0(x[which(f)], collapse = ", "))
} else {
tmp <- tempfile(fileext = ".zip")
stopifnot(!utils::zip(tmp, x))
return(tmp)
}
}
create_zip.data.frame <- function(x, ...) {
tmpdf <- tempfile(fileext = ".zip")
on.exit(file.remove(tmpdf), add = TRUE)
tmp <- tempfile(fileext = ".zip")
save(x, file = tmpdf)
stopifnot(!utils::zip(tmp, tmpdf))
return(tmp)
}
create_zip.list <- function(x, ...) {
tmpdf <- sapply(seq_along(x), tempfile(fileext = ".zip"))
on.exit(file.remove(tmpdf), add = TRUE)
mapply(x, tmpdf, function(x, f) save(x, file = f), SIMPLIFY = TRUE)
tmp <- tempfile(fileext = ".zip")
stopifnot(!utils::zip(tmp, tmpdf))
return(tmp)
}
add_file <- function(dataset, file, key = Sys.getenv("DATAVERSE_KEY"), server = Sys.getenv("DATAVERSE_SERVER"), ...) {
if (inherits(dataset, "dataset_atom")) {
u <- dataset$links[["edit-media"]]
} else if (inherits(dataset, "dataset_statement")) {
dataset <- prepend_doi(dataset$id)
u <- paste0(api_url(server, prefix="dvn/api/"), "data-deposit/v1.1/swordv2/edit-media/study/", dataset)
} else if (is.character(dataset) && grepl("^http", dataset)) {
if (grepl("edit-media/study/", dataset)) {
u <- dataset
} else {
stop("'dataset' not recognized.")
}
} else {
dataset <- prepend_doi(dataset)
u <- paste0(api_url(server, prefix="dvn/api/"), "data-deposit/v1.1/swordv2/edit/study/", dataset)
}
file <- create_zip(file)
h <- httr::add_headers("Content-Disposition" = paste0("filename=", file),
"Content-Type" = "application/zip",
"Packaging" = "http://purl.org/net/sword/package/SimpleZip")
r <- httr::POST(u, httr::authenticate(key, ""), h, body = httr::upload_file(file), ...)
httr::stop_for_status(r, task = httr::content(r)$message)
parse_atom(httr::content(r, as = "text", encoding = "UTF-8"))
}
delete_file <- function(id, key = Sys.getenv("DATAVERSE_KEY"), server = Sys.getenv("DATAVERSE_SERVER"), ...) {
if (grepl("^http", id)) {
u <- id
} else {
u <- paste0(api_url(server, prefix="dvn/api/"), "data-deposit/v1.1/swordv2/edit-media/file/", id)
}
r <- httr::DELETE(u, httr::authenticate(key, ""), ...)
httr::stop_for_status(r, task = httr::content(r)$message)
cont <- httr::content(r, as = "text", encoding = "UTF-8")
if (cont == "") {
return(TRUE)
} else {
return(cont)
}
} |
Traverse = function(node,
traversal = c("pre-order", "post-order", "in-order", "level", "ancestor"),
pruneFun = NULL,
filterFun = NULL) {
nodes <- list()
if(length(traversal) > 1L) {
traversal <- traversal[1L]
}
if(is.function(traversal) || traversal == "pre-order" || traversal == "post-order") {
if (length(pruneFun) == 0 || pruneFun(node)) {
if (is.function(traversal)) {
children <- traversal(node)
if (is(children, "Node")) children <- list(children)
if (is.null(children)) children <- list()
} else children <- node$children
for(child in children) {
nodes <- c(nodes, Traverse(child, traversal = traversal, pruneFun = pruneFun, filterFun = filterFun))
}
if(length(filterFun) == 0 || filterFun(node)) {
if(is.function(traversal) || traversal == "pre-order") nodes <- c(node, nodes)
else nodes <- c(nodes, node)
}
}
} else if(traversal == "in-order") {
if(!node$isBinary) stop("traversal in-order valid only for binary trees")
if(length(pruneFun) == 0 || pruneFun(node)) {
if(!node$isLeaf) {
n1 <- Traverse(node$children[[1]], traversal = traversal, pruneFun = pruneFun, filterFun = filterFun)
if(length(filterFun) == 0 || filterFun(node)) n2 <- node
else n2 <- list()
n3 <- Traverse(node$children[[2]], traversal = traversal, pruneFun = pruneFun, filterFun = filterFun)
nodes <- c(n1, n2, n3)
} else {
if(length(filterFun) == 0 || filterFun(node)) n2 <- node
else n2 <- list()
nodes <- c(nodes, n2)
}
}
} else if (traversal == "ancestor") {
if (!isRoot(node)) {
nodes <- Traverse(node$parent, traversal = traversal, pruneFun = pruneFun, filterFun = filterFun)
}
if(length(filterFun) == 0 || filterFun(node)) {
nodes <- c(node, nodes)
}
} else if (traversal == "level") {
nodes <- Traverse(node, filterFun = filterFun, pruneFun = pruneFun)
if (length(nodes) > 0) nodes <- nodes[order(Get(nodes, function(x) x$level))]
} else {
stop("traversal must be pre-order, post-order, in-order, ancestor, or level")
}
return (nodes)
}
Get = function(nodes,
attribute,
...,
format = FALSE,
inheritFromAncestors = FALSE,
simplify = c(TRUE, FALSE, "array", "regular")) {
if (length(nodes) == 0) return(NULL)
if (!is(nodes, "list")) stop("nodes must be a list of Node objects!")
simplify <- simplify[1]
nodes <- unname(nodes)
if (simplify == "regular") {
regular = TRUE
simplify = FALSE
} else regular = FALSE
res <- sapply(nodes,
function(x) GetAttribute(x,
attribute,
...,
format = format,
inheritFromAncestors = inheritFromAncestors),
simplify = simplify
)
if (is.character(attribute) && attribute == "name") {
names(res) <- res
} else {
if(is.null(dim(res))){
names(res) <- Get(nodes, "name")
} else {
if(is.null(dimnames(res)))
dimnames(res) <- list()
dimnames(res)[[length(dim(res))]] <- Get(nodes, "name")
}
}
if (regular) {
res <- do.call(cbind, res)
}
return (res)
}
Do <- function(nodes,
fun,
...) {
if (length(nodes) == 0) invisible(nodes)
if (!is(nodes, "list")) stop("nodes must be a list of Node objects!")
for (node in nodes) fun(node, ...)
invisible (nodes)
}
Set <- function(nodes,
...) {
if (length(nodes) == 0) return(nodes)
if (!is(nodes, "list")) stop("nodes must be a list of Node objects!")
args <- list(...)
argsnames <- sapply(substitute(list(...))[-1], deparse)
gargsnames <- names(args)
if (is.null(gargsnames)) gargsnames <- vector(mode = "character", length = length(args))
gargsnames[nchar(gargsnames) == 0] <- argsnames[nchar(gargsnames) == 0]
names(args) <- gargsnames
appFun <- function(x, arg, name) {
x[[name]] <- arg
}
for(nme in names(args)) {
arg <- args[[nme]]
if (length(arg) == 0) arg <- vector("list", 1)
mapply(appFun, nodes, arg, nme)
}
invisible (nodes)
} |
spodt <- function(formula ,data,
weight=FALSE, graft=0,
level.max=5, min.parent=10, min.child=5, rtwo.min=0.001)
{
if (class(data)!="SpatialPointsDataFrame") stop("use a SpatialPointsDataFrame")
if (is.na((is.projected(data)))|(! is.projected(data)) ) warning("the coordinates are not projected. Please, provide projected coordinates or be sure to use euclidian coordinates!")
coord.x <- coordinates(data)[,1]
coord.y <- coordinates(data)[,2]
loc.data <- as.numeric(row.names(data@data))
data.temp <- data@data
Call <- match.call()
indx <- match(c("formula", "data"), names(Call), nomatch = 0L)
if (indx[1] == 0L) stop("a 'formula' with the cofactors is required\n for single spatial analysis (with no cofactor) the right hand side should be z~1")
dataset.prep <- model.frame(Call$formula, data=data.temp)
dataset <- cbind(loc.data, coord.x, coord.y, dataset.prep)
colnames(dataset)[1:4] <- c("loc", "x", "y", "z")
spodt.fct(dataset, weight=weight, graft=graft, level.max=level.max,
min.parent=min.parent, min.child=min.child, rtwo.min=rtwo.min)
} |
posteriorPredictive <- function(fittedModel,
M=100,
numItems=NULL,
expected=FALSE,
nCPU=4){
mptInfo <- fittedModel$mptInfo
tree <- mptInfo$MPT$Tree
TreeNames <- unique(tree)
sel.cat <- lapply(TreeNames, function(tt) tree %in% tt)
S <- length(mptInfo$thetaUnique)
numTrees <- length(TreeNames)
chains <- length(fittedModel$runjags$mcmc)
sample <- nrow(fittedModel$runjags$mcmc[[1]])
max.samp <- min(sample, ceiling(M/chains))
sel.thetaFE <- grep("thetaFE", varnames(fittedModel$runjags$mcmc), fixed=TRUE)
n.thetaFE <- length(sel.thetaFE)
if(missing(numItems) || is.null(numItems)){
pred.new <- FALSE
N <- nrow(mptInfo$data)
treeLabels <- unique(mptInfo$MPT$Tree)
numItems <- matrix(NA, nrow(mptInfo$data), length(treeLabels))
colnames(numItems) <- treeLabels
for(tl in treeLabels){
numItems[,tl] <- rowSums(mptInfo$data[,mptInfo$MPT$Tree == tl,drop=FALSE])
}
}else{
pred.new <- TRUE
if(length(numItems) != length(unique(fittedModel$mptInfo$MPT$Tree))){
stop("Length of 'numItems' does not match number of MPT trees.")
}
if(is.null(names(numItems))){
warning("numItems are ordered alphabetically:\n ", paste(TreeNames, sep =", "))
names(numItems) <- TreeNames
}
numItems <- matrix(numItems, nrow=1, dimnames=list(NULL, names(numItems)))
N <- 1
}
par.ind <- par.thetaFE <- c()
for(m in 1:chains){
sel.samp <- sample(1:sample, max.samp)
if(!pred.new){
sel.var <- setdiff(grep("theta", varnames(fittedModel$runjags$mcmc), fixed=TRUE),
sel.thetaFE)
par.tmp <- as.matrix(fittedModel$runjags$mcmc[[m]][sel.samp, sel.var,drop=FALSE])
}else{
par.tmp <- matrix(NA, max.samp, S,
dimnames=list(NULL, paste0("theta[",1:S, ",1]")))
if(inherits(fittedModel, "betaMPT")){
if(S==1){
alpha <- as.matrix(fittedModel$runjags$mcmc[[m]][sel.samp, "alph",drop=FALSE])
beta <- as.matrix(fittedModel$runjags$mcmc[[m]][sel.samp, "bet",drop=FALSE])
}else{
alpha <- as.matrix(fittedModel$runjags$mcmc[[m]][
sel.samp, paste0("alph[",1:S,"]"),drop=FALSE])
beta <- as.matrix(fittedModel$runjags$mcmc[[m]][
sel.samp, paste0("bet[",1:S,"]"),drop=FALSE])
}
for(i in 1:S){
par.tmp[,i] <- rbeta(max.samp, alpha[,i], beta[,i])
}
}else if(inherits(fittedModel, "traitMPT")){
if(S==1){
mu <- as.matrix(fittedModel$runjags$mcmc[[m]][sel.samp, "mu",drop=FALSE])
sig <- as.matrix(fittedModel$runjags$mcmc[[m]][sel.samp, "sigma",drop=FALSE])
} else{
mu <- as.matrix(fittedModel$runjags$mcmc[[m]][
sel.samp, paste0("mu[",1:S,"]"),drop=FALSE])
sig <- as.matrix(fittedModel$runjags$mcmc[[m]][
sel.samp, paste0("sigma[",1:S,"]"),drop=FALSE])
}
sel.rho <- grep("rho", varnames(fittedModel$runjags$mcmc))
rho <- as.matrix(fittedModel$runjags$mcmc[[m]][sel.samp, sel.rho,drop=FALSE])
for(mm in 1:max.samp){
Sig <- matrix(rho[mm,],S,S) *( sig[mm,] %*% t( sig[mm,] ) )
par.tmp[mm,] <- pnorm(mvrnorm(1, mu[mm,], Sig))
}
}
}
par.ind <- rbind(par.ind, par.tmp)
if(n.thetaFE > 0){
par.thetaFE <- rbind(par.thetaFE,
as.matrix(fittedModel$runjags$mcmc[[m]][sel.samp, sel.thetaFE]))
}else{
par.thetaFE <- NULL
}
}
expectedFreq <- function(n, theta, thetaFE){
sapply(mptInfo$MPT$Equation,USE.NAMES = FALSE,
function(ff) {
eval(parse(text=ff),
envir = list(n=n, theta=theta, thetaFE=thetaFE) )
})
}
getPostPred <- function(tt){
theta <- matrix(tt[(n.thetaFE+1):length(tt)], S, N, byrow=FALSE)
if(n.thetaFE>0){
thetaFE <- tt[1:n.thetaFE]
}else{
thetaFE <- NULL
}
freq.exp <- t(sapply(1:N, expectedFreq,
theta = theta, thetaFE = thetaFE))*numItems[,tree]
if(!expected){
rmultinom_stable <- function(x){
if (sum(x) > 0)
rand <- rmultinom(1, size=round(sum(x)), prob=x/sum(x))
else
rand <- matrix(rep(0, length(x)))
rand
}
for(k in 1:length(TreeNames)){
freq.exp[,sel.cat[[k]]] <- t(apply(freq.exp[,sel.cat[[k]],drop=FALSE], 1,
rmultinom_stable))
}
}
colnames(freq.exp) <- mptInfo$MPT$Category
list(freq.exp)
}
var.names <- apply(expand.grid("theta[",1:S,",",1:N,"]"), 1, paste0, collapse="")
par.ind <- par.ind[,gsub(" ", "", var.names, fixed=TRUE)]
if(nCPU >1){
cl <- makeCluster(nCPU)
clusterExport(cl, c("S","N","mptInfo","numItems","expected","expectedFreq",
"tree","TreeNames","n.thetaFE","sel.cat"), envir=environment())
freq.list <- parApply(cl, cbind(par.thetaFE, par.ind), 1, getPostPred)
stopCluster(cl)
}else{
freq.list <- apply(cbind(par.thetaFE, par.ind), 1, getPostPred)
}
freq.list <- lapply(freq.list, function(xx) xx[[1]])
if(M == 1){
freq.list[[1]]
}else if(pred.new){
do.call("rbind", freq.list)[1:M,,drop=FALSE]
}else{
freq.list[1:min(M, length(freq.list))]
}
} |
source("incl/start.R")
if (requireNamespace("doFuture", quietly = TRUE)) {
library("doFuture", character.only = TRUE)
registerDoFuture()
for (strategy in c("sequential", "multisession", "multicore")) {
future::plan(strategy)
print(future::plan())
message("* with_progress()")
with_progress({
p <- progressor(4)
y <- foreach(n = 3:6) %dopar% {
p()
slow_sum(1:n, stdout=TRUE, message=TRUE)
}
})
message("* global progression handler")
handlers(global = TRUE)
local({
p <- progressor(4)
y <- foreach(n = 3:6) %dopar% {
p()
slow_sum(1:n, stdout=TRUE, message=TRUE)
}
})
handlers(global = FALSE)
}
}
source("incl/end.R") |
FilterDISR = R6Class("FilterDISR",
inherit = Filter,
public = list(
initialize = function() {
param_set = ps(
threads = p_int(lower = 0L, default = 0L, tags = "threads")
)
param_set$values = list(threads = 1L)
super$initialize(
id = "disr",
task_type = c("classif", "regr"),
param_set = param_set,
feature_types = c("integer", "numeric", "factor", "ordered"),
packages = "praznik",
man = "mlr3filters::mlr_filters_disr"
)
}
),
private = list(
.calculate = function(task, nfeat) {
call_praznik(self, task, praznik::DISR, nfeat)
}
)
)
mlr_filters$add("disr", FilterDISR) |
e4ddp <- function(N, n, P1, P2, P3, P4, DEFF = 1, conf = 0.95, plot = FALSE)
{
Q1 <- 1 - P1
Q2 <- 1 - P2
Q3 <- 1 - P3
Q4 <- 1 - P4
S2 <- (P1 * Q1 + P2 * Q2 + P3 * Q3 + P4 * Q4) * DEFF
Z <- 1 - ((1 - conf) / 2)
f <- n/N
VAR <- (1/n) * (1 - f) * S2
CVE <- 100 * sqrt(VAR) / abs((P1 - P2) - (P3 - P4))
ME <- 100 * qnorm(Z) * sqrt(VAR)
if (plot == TRUE)
{
nseq <- seq(1, N, 10)
cveseq <- rep(NA, length(nseq))
meseq <- rep(NA, length(nseq))
for (k in 1:length(nseq))
{
fseq <- nseq[k]/N
varseq <- (1/nseq[k]) * (1 - fseq) * S2
cveseq[k] <- 100 * sqrt(varseq)/abs((P1 - P2) - (P3 - P4))
meseq[k] <- 100 * qnorm(Z) * sqrt(varseq)
}
par(mfrow = c(1, 2))
plot(nseq, cveseq, type = "l", lty = 1, pch = 1, col = 3, ylab = "Coefficient of variation", xlab = "Sample Size")
points(n, CVE, pch = 8, bg = "blue")
abline(h = CVE, lty = 3)
abline(v = n, lty = 3)
plot(nseq, meseq, type = "l", lty = 1, pch = 1, col = 3, ylab = "Margin of error", xlab = "Sample Size")
points(n, ME, pch = 8, bg = "blue")
abline(h = ME, lty = 3)
abline(v = n, lty = 3)
}
msg <- cat("With the parameters of this function: N =", N, "n = ", n, "P1 =", P1, "P2 =", P2, "DEFF = ", DEFF,
"conf =", conf, ". \n \n \nThe estimated coefficient of variation is ", CVE, ". \n \nThe margin of error is",
ME, ". \n \n")
result <- list(cve = CVE, Margin_of_error = ME)
result
} |
rdacca.hp <- function (dv,iv,method=c("RDA","dbRDA","CCA"),type=c("adjR2","R2"),scale=FALSE,add = FALSE, sqrt.dist = FALSE,n.perm=1000,var.part = FALSE)
{
if(is.data.frame(iv))
{
if(sum(is.na(dv))>=1|sum(is.na(iv))>=1)
{stop("NA/NaN/Inf is not allowed in this analysis")}
if(nrow(iv)<=ncol(iv))
{stop("sample size (row) is less than the number of predictors")}
else
{method <- method[1]
type <- type[1]
if(inherits(dv, "dist"))
{method <- "dbRDA"}
if(method=="dbRDA"||method=="dbrda"||method=="DBRDA")
{
if(!inherits(dv, "dist"))
return("response variables should be a 'dist' matrix for dbRDA")
}
if(method=="RDA"||method=="rda")
{dv<-scale(dv,scale=scale)}
iv <- data.frame(iv)
ivname <- colnames(iv)
iv.name <- ivname
nvar <- dim(iv)[2]
if (nvar < 2)
stop("Analysis not conducted. Insufficient number of predictors.")
totalN <- 2^nvar - 1
binarymx <- matrix(0, nvar, totalN)
for (i in 1:totalN) {
binarymx <- creatbin(i, binarymx)
}
commonM <- matrix(nrow = totalN, ncol = 3)
for (i in 1:totalN) {
tmp.design.ct <- iv[as.logical(binarymx[, i])]
if(method=="RDA"||method=="rda")
{
gfa <- vegan::RsquareAdj(vegan::rda(dv~.,tmp.design.ct))
}
if(method=="CCA"||method=="cca")
{gfa <- vegan::RsquareAdj(vegan::cca(dv~.,tmp.design.ct,permutations = n.perm))
}
if(method=="dbRDA"||method=="dbrda"||method=="DBRDA")
{
gfa <- vegan::RsquareAdj(vegan::capscale(dv~.,tmp.design.ct,add=add,sqrt.dist = sqrt.dist))
}
if(type=="R2")commonM[i, 2] <- gfa$r.squared
if(type=="adjR2")commonM[i, 2] <- gfa$adj.r.squared
}
commonlist <- vector("list", totalN)
seqID <- vector()
for (i in 1:nvar) {
seqID[i] = 2^(i-1)
}
for (i in 1:totalN) {
bit <- binarymx[1, i]
if (bit == 1)
ivname <- c(0, -seqID[1])
else ivname <- seqID[1]
for (j in 2:nvar) {
bit <- binarymx[j, i]
if (bit == 1) {
alist <- ivname
blist <- genList(ivname, -seqID[j])
ivname <- c(alist, blist)
}
else ivname <- genList(ivname, seqID[j])
}
ivname <- ivname * -1
commonlist[[i]] <- ivname
}
for (i in 1:totalN) {
r2list <- unlist(commonlist[i])
numlist <- length(r2list)
ccsum <- 0
for (j in 1:numlist) {
indexs <- r2list[[j]]
indexu <- abs(indexs)
if (indexu != 0) {
ccvalue <- commonM[indexu, 2]
if (indexs < 0)
ccvalue <- ccvalue * -1
ccsum <- ccsum + ccvalue
}
}
commonM[i, 3] <- ccsum
}
orderList <- vector("list", totalN)
index <- 0
for (i in 1:nvar) {
for (j in 1:totalN) {
nbits <- sum(binarymx[, j])
if (nbits == i) {
index <- index + 1
commonM[index, 1] <- j
}
}
}
outputcommonM <- matrix(nrow = totalN + 1, ncol = 2)
totalRSquare <- sum(commonM[, 3])
for (i in 1:totalN) {
outputcommonM[i, 1] <- round(commonM[commonM[i,
1], 3], digits = 4)
outputcommonM[i, 2] <- round((commonM[commonM[i,
1], 3]/totalRSquare) * 100, digits = 2)
}
outputcommonM[totalN + 1, 1] <- round(totalRSquare,
digits = 4)
outputcommonM[totalN + 1, 2] <- round(100, digits = 4)
rowNames <- NULL
for (i in 1:totalN) {
ii <- commonM[i, 1]
nbits <- sum(binarymx[, ii])
cbits <- 0
if (nbits == 1)
rowName <- "Unique to "
else rowName <- "Common to "
for (j in 1:nvar) {
if (binarymx[j, ii] == 1) {
if (nbits == 1)
rowName <- paste(rowName, iv.name[j], sep = "")
else {
cbits <- cbits + 1
if (cbits == nbits) {
rowName <- paste(rowName, "and ", sep = "")
rowName <- paste(rowName, iv.name[j], sep = "")
}
else {
rowName <- paste(rowName, iv.name[j], sep = "")
rowName <- paste(rowName, ", ", sep = "")
}
}
}
}
rowNames <- c(rowNames, rowName)
}
rowNames <- c(rowNames, "Total")
rowNames <- format.default(rowNames, justify = "left")
colNames <- format.default(c("Fractions", " % Total"),
justify = "right")
dimnames(outputcommonM) <- list(rowNames, colNames)
VariableImportance <- matrix(nrow = nvar, ncol = 4)
for (i in 1:nvar) {
VariableImportance[i, 3] <- round(sum(binarymx[i, ] * (commonM[,3]/apply(binarymx,2,sum))), digits = 4)
}
VariableImportance[,1] <- outputcommonM[1:nvar,1]
VariableImportance[,2] <- VariableImportance[,3]-VariableImportance[,1]
total=round(sum(VariableImportance[,3]),digits = 3)
VariableImportance[, 4] <- round(100*VariableImportance[, 3]/total,2)
dimnames(VariableImportance) <- list(iv.name, c("Unique","Average.share","Individual","I.perc(%)"))
if(var.part)
{outputList <- list(Method_Type=c(method,type),Total_explained_variation=total,Var.part = outputcommonM, Hier.part = VariableImportance)}
else
{outputList<-list(Method_Type=c(method,type),Total_explained_variation=total,Hier.part= VariableImportance)}
class(outputList) <- "rdaccahp"
outputList
}
}
else
{nvar <- length(iv)
if(sum(unlist(lapply(iv,is.data.frame)))<nvar)
stop("data.frame is required for each group explanatory table")
if(sum(is.na(dv))>=1|sum(is.na(unlist(iv)))>=1)
{stop("NA/NaN/Inf is not allowed in this analysis")}
else
{method <- method[1]
type <- type[1]
if(inherits(dv, "dist"))
{method <- "dbRDA"}
if(method=="dbRDA"||method=="dbrda"||method=="DBRDA"){
if(!inherits(dv, "dist"))
return("dv should be a 'dist' matrix for dbRDA")
}
if(method=="RDA"||method=="rda")
{dv<-scale(dv,scale=scale)}
ilist <- names(iv)
if(is.null(ilist))
{names(iv) <- paste("X",1:nvar,sep="")}
else
{whichnoname <- which(ilist=="")
names(iv)[whichnoname] <- paste("X",whichnoname,sep="")}
ilist <- names(iv)
ivlist <- ilist
iv.name <- ilist
if (nvar < 2)
stop("Analysis not conducted. Insufficient number of predictor groups.")
ivID <- matrix(nrow = nvar, ncol = 1)
for (i in 0:nvar - 1) {
ivID[i + 1] <- 2^i
}
totalN <- 2^nvar - 1
binarymx <- matrix(0, nvar, totalN)
for (i in 1:totalN) {
binarymx <- creatbin(i, binarymx)
}
commonM <- matrix(nrow = totalN, ncol = 3)
for (i in 1:totalN) {
ivls <- iv[as.logical(binarymx[, i])]
N <- length(ivls)
if(N==1)
{
tmp.design.ct <- ivls[[1]]
if(method=="RDA"||method=="rda")
{
gfa <- vegan::RsquareAdj(vegan::rda(dv~.,tmp.design.ct))
}
if(method=="CCA"||method=="cca")
{gfa <- vegan::RsquareAdj(vegan::cca(dv~.,tmp.design.ct,permutations = n.perm))
}
if(method=="dbRDA"||method=="dbrda"||method=="DBRDA")
{
gfa <- vegan::RsquareAdj(vegan::dbrda(dv~.,tmp.design.ct,add=add,sqrt.dist = sqrt.dist))
}
if(type=="R2")commonM[i, 2] <- gfa$r.squared
if(type=="adjR2")commonM[i, 2] <- gfa$adj.r.squared
}
if(N>1)
{tmp.design.ct <- ivls[[1]]
for(k in 2:N)
{tmp.design.ct <- cbind(tmp.design.ct,ivls[[k]])}
if(method=="RDA"||method=="rda")
{
gfa <- vegan::RsquareAdj(vegan::rda(dv~.,tmp.design.ct))
}
if(method=="CCA"||method=="cca")
{gfa <- vegan::RsquareAdj(vegan::cca(dv~.,tmp.design.ct,permutations = n.perm))
}
if(method=="dbRDA"||method=="dbrda"||method=="DBRDA")
{
gfa <- vegan::RsquareAdj(vegan::capscale(dv~.,tmp.design.ct,add=add,sqrt.dist = sqrt.dist))
}
if(type=="R2")commonM[i, 2] <- gfa$r.squared
if(type=="adjR2")commonM[i, 2] <- gfa$adj.r.squared
}
}
commonalityList <- vector("list", totalN)
for (i in 1:totalN) {
bit <- binarymx[1, i]
if (bit == 1)
ilist <- c(0, -ivID[1])
else ilist <- ivID[1]
for (j in 2:nvar) {
bit <- binarymx[j, i]
if (bit == 1) {
alist <- ilist
blist <- genList(ilist, -ivID[j])
ilist <- c(alist, blist)
}
else ilist <- genList(ilist, ivID[j])
}
ilist <- ilist * -1
commonalityList[[i]] <- ilist
}
for (i in 1:totalN) {
r2list <- unlist(commonalityList[i])
numlist <- length(r2list)
ccsum = 0
for (j in 1:numlist) {
indexs <- r2list[[j]]
indexu <- abs(indexs)
if (indexu != 0) {
ccvalue <- commonM[indexu, 2]
if (indexs < 0)
ccvalue <- ccvalue * -1
ccsum <- ccsum + ccvalue
}
}
commonM[i, 3] <- ccsum
}
orderList <- vector("list", totalN)
index <- 0
for (i in 1:nvar) {
for (j in 1:totalN) {
nbits <- sum(binarymx[, j])
if (nbits == i) {
index <- index + 1
commonM[index, 1] <- j
}
}
}
outputcommonM <- matrix(nrow = totalN + 1, ncol = 2)
totalRSquare <- sum(commonM[, 3])
for (i in 1:totalN) {
outputcommonM[i, 1] <- round(commonM[commonM[i,
1], 3], digits = 4)
outputcommonM[i, 2] <- round((commonM[commonM[i,
1], 3]/totalRSquare) * 100, digits = 2)
}
outputcommonM[totalN + 1, 1] <- round(totalRSquare,
digits = 4)
outputcommonM[totalN + 1, 2] <- round(100, digits = 4)
rowNames = NULL
for (i in 1:totalN) {
ii <- commonM[i, 1]
nbits <- sum(binarymx[, ii])
cbits <- 0
if (nbits == 1)
rowName <- "Unique to "
else rowName = "Common to "
for (j in 1:nvar) {
if (binarymx[j, ii] == 1) {
if (nbits == 1)
rowName <- paste(rowName, ivlist[j], sep = "")
else {
cbits = cbits + 1
if (cbits == nbits) {
rowName <- paste(rowName, "and ", sep = "")
rowName <- paste(rowName, ivlist[j], sep = "")
}
else {
rowName <- paste(rowName, ivlist[j], sep = "")
rowName <- paste(rowName, ", ", sep = "")
}
}
}
}
rowNames <- c(rowNames, rowName)
}
rowNames <- c(rowNames, "Total")
rowNames <- format.default(rowNames, justify = "left")
colNames <- format.default(c("Fractions", " % Total"),
justify = "right")
dimnames(outputcommonM) <- list(rowNames, colNames)
VariableImportance <- matrix(nrow = nvar, ncol = 4)
for (i in 1:nvar) {
VariableImportance[i, 3] <- round(sum(binarymx[i, ] * (commonM[,3]/apply(binarymx,2,sum))), digits = 4)
}
VariableImportance[,1] <- outputcommonM[1:nvar,1]
VariableImportance[,2] <- VariableImportance[,3]-VariableImportance[,1]
total=round(sum(VariableImportance[,3]),digits = 3)
VariableImportance[, 4] <- round(100*VariableImportance[, 3]/total,2)
dimnames(VariableImportance) <- list(iv.name, c("Unique","Average.share","Individual","I.perc(%)"))
if(var.part)
{outputList <- list(Method_Type=c(method,type),Total_explained_variation=total,Var.part = outputcommonM, Hier.part = VariableImportance)}
else
{outputList<-list(Method_Type=c(method,type),Total_explained_variation=total,Hier.part= VariableImportance)}
class(outputList) <- "rdaccahp"
outputList
}
}
} |
IWLSiteration <- function(x, y, inib, iniscale, maxiter, tol, b, c1, c2) {
n <- nrow(x)
p <- ncol(x)
res <- y - x %*% inib
if (iniscale == 0) {
scale <- median(abs(res))/.6745
} else {
scale <- iniscale
}
oldbeta <- inib
betadiff <- 2*tol
iter <- 0
fwOpt <- function(x, cc) {
tmp <- (-1.944 / cc^2 + 1.728 * x^2 / cc^4 - 0.312 * x^4 / cc^6 + 0.016 * x^6 / cc^8) / 3.25
tmp[abs(x) < 2*cc] <- 1 / (3.25*cc^2)
tmp[abs(x) > 3*cc] <- 0
tmp
}
psixOpt <- function(x, cc) {
tmp <- x^2 / (3.25*cc^2)
tmp2 <- (-1.944 * x^2 / cc^2 + 1.728 * x^4 / cc^4 - 0.312 * x^6 / cc^6 + 0.016 * x^8 / cc^8) / 3.25
tmp[abs(x) > 2*cc] <- tmp2[abs(x) > 2*cc]
tmp[abs(x) > 3*cc] <- 0
tmp
}
rhoOpt <- function(x, cc) {
tmp <- x^2 / 2 / (3.25*cc^2)
tmp2 <- (1.792 - 0.972 * x^2 / cc^2 + 0.432 * x^4 / cc^4 - 0.052 * x^6 / cc^6 + 0.002 * x^8 / cc^8) / 3.25
tmp[abs(x) > 2*cc] <- tmp2[abs(x) > 2*cc]
tmp[abs(x) > 3*cc] <- 1
tmp
}
WtellerOpt <- function(x, cc) {
tmp <- (3.584 - 0.864 * x^4 / cc^4 + 0.208 * x^6 / cc^6 - 0.012 * x^8 / cc^8) / 3.25
tmp[abs(x) < 2*cc] <- 0
tmp[abs(x) > 3*cc] <- 2
tmp
}
while ((betadiff > tol) && (iter < maxiter)) {
scale <- sqrt( scale^2 * mean( rhoOpt(res/scale,c1) ) / b )
scaledres <- res/scale
Wn.teller <- sum(WtellerOpt(scaledres,c2))
Wn.noemer <- sum(psixOpt(scaledres,c1))
Wn <- Wn.teller / Wn.noemer
weights <- (Wn * fwOpt(scaledres,c1) + fwOpt(scaledres,c2))
weights <- apply(cbind(weights, 0), 1, max)
sqweights <- sqrt(weights)
xw <- x * as.vector(sqweights)
yw <- y * sqweights
newbeta <- qr.coef(qr(xw),yw)
if (any(!is.finite(newbeta))) {
newbeta <- inib
scale <- iniscale
break
}
betadiff <- sqrt(sum((oldbeta - newbeta)^2))
res <- y - x %*% newbeta
oldbeta <- newbeta
iter <- iter + 1
}
return( list( betarw = newbeta, scalerw = scale ) )
} |
library(dbglm)
library(DBI)
library(MonetDBLite)
library(dplyr)
library(dbplyr)
library(purrr)
library(tidyr)
library(rlang)
library(tibble)
library(vctrs)
library(tidypredict)
ms <- MonetDBLite::src_monetdblite("~/VEHICLE")
monetdb.read.csv(ms$con, "Fleet30Nov2017.csv",tablename="vehicles",quote="",nrow.check=10000,best.effort=TRUE,lower.case.names=TRUE)
vehicles<-tbl(ms,"vehicles")
cars <- filter(vehicles, vehicle_type == "PASSENGER CAR/VAN") %>%
mutate(isred=ifelse(basic_colour=="RED",1,0)) %>%
filter(number_of_seats >1 & number_of_seats < 7) %>% filter(number_of_axles==2) %>%
compute()
system.time({
model<-dbglm(isred~power_rating+number_of_seats+gross_vehicle_mass,tbl=cars)
})
library(dbglm)
library(RSQLite)
library(dplyr)
library(dbplyr)
vehicles<-readr::read_csv("Fleet30Nov2017.csv")
names(vehicles)<-tolower(names(vehicles))
vehicles$power_rating<-as.numeric(as.character(vehicles$power_rating))
vehicles$number_of_seats<-as.numeric(as.character(vehicles$number_of_seats))
vehicles$number_of_axles<-as.numeric(as.character(vehicles$number_of_axles))
sqlite<-dbDriver("SQLite")
con<-dbConnect(sqlite,"nzcars.db")
RSQLite:::initExtension(con)
dbWriteTable(con,"vehicles",vehicles)
rm(vehicles)
dbDisconnect(con)
library(dbglm)
library(RSQLite)
library(dplyr)
library(dbplyr)
sqlite<-dbDriver("SQLite")
con<-dbConnect(sqlite,"nzcars.db")
RSQLite:::initExtension(con)
sqlitevehicles<-tbl(con,"vehicles")
cars <- filter(sqlitevehicles, vehicle_type == "PASSENGER CAR/VAN") %>%
mutate(isred=ifelse(basic_colour=="RED",1,0)) %>%
filter(number_of_seats >1 & number_of_seats < 7) %>% filter(number_of_axles==2) %>%
compute()
system.time({
sqlitemodel<-dbglm(isred~power_rating+number_of_seats+gross_vehicle_mass,tbl=cars)
})
sqrt(diag(sqlitemodel$hatV)*2917)
library(duckdb)
con_duck<- dbConnect(duckdb::duckdb(), "duck")
vehicles<- read.csv("Fleet30Nov2017.csv")
names(vehicles)<-tolower(names(vehicles))
vehicles$power_rating<-as.numeric(as.character(vehicles$power_rating))
vehicles$number_of_seats<-as.numeric(as.character(vehicles$number_of_seats))
vehicles$number_of_axles<-as.numeric(as.character(vehicles$number_of_axles))
dbWriteTable(con_duck, "cars", vehicles, overwrite = T)
cars<- tbl(con_duck, "cars")
cars1 <- filter(cars, vehicle_type == "PASSENGER CAR/VAN") %>%
mutate(isred=ifelse(basic_colour=="RED",1,0)) %>%
filter(number_of_seats >1 & number_of_seats < 7) %>% filter(number_of_axles==2) %>%
compute()
model<-dbglm(isred~power_rating+number_of_seats+gross_vehicle_mass,tbl=cars1) |
library(animation)
library(Cairo)
saveHTML({
ani.options(interval = 0.05, nmax = 1)
CairoPNG(filename = ani.options('img.fmt'))
par(pty = 's', mar = rep(1, 4))
vi.lilac.chaser()
dev.off()
}, img.name='chaser_cairo', htmlfile='chaser.html',
use.dev=FALSE, ani.type='png',
description = 'Using the high-quality CairoPNG() device')
saveHTML({
ani.options(interval = 0.05, nmax = 1)
par(pty = 's', mar = rep(1, 4))
vi.lilac.chaser()
}, img.name='chaser_png', htmlfile='chaser.html',
use.dev=TRUE, ani.dev='png', ani.type='png',
description = c("Using R's png() device; can you see the difference?",
'Note the borders of the dots.')) |
library(dplyr)
summarise(mtcars, mean(disp))
summarise(group_by(mtcars, cyl), mean(disp))
summarise(group_by(mtcars, cyl), m = mean(disp), sd = sd(disp)) |
cumskew <-
function(x) sapply(seq_along(x), function(k, z) skewness(z[1:k]), z = x) |
library(stringi)
function(a=1){
list(msg=paste0("Welcome to the root URL! a = ", a))
}
function(req, res){
if (!stri_startswith_fixed(req$QUERY_STRING, "?user=")){
res$status <- 401
return(list(err="Not authorized"))
}
user <- substr(req$QUERY_STRING, 7, nchar(req$QUERY_STRING))
req$username <- user
forward()
}
function(req, res){
list(name=req$username)
}
function(req, res){
stop("I throw an error!")
}
function(req){
req$testVal <- 1
req$testVal
}
function(req){
req$testVal
}
function(){
'<html>
<head>
</head>
<body>
<h3>Expected answer (good): `{}`<h3>
<h3>Shared TCP Connections answer (bad): `[1]`.</h3>
<br/>
<h3>Answer:</h3>
<span id="answer"></span>
<script src="https://code.jquery.com/jquery-2.1.4.min.js"></script>
<script>
$.get("/get").done(function(){
$.get("/set").done(function(){
$.get("/get").done(function(a){
window.document.getElementById("answer").append(JSON.stringify(a, null, " "))
});
});
});
</script>
</body>
</html>'
} |
`PlotPolysOnStaticMap` <-structure(function
(
MyMap,
polys,
col,
border = NULL,
lwd = .25,
verbose = 0,
add=TRUE,
textInPolys = NULL,
...
){
stopifnot(class(polys)[1] == "SpatialPolygons" | class(polys)[1] == "PolySet" | class(polys)[1] == "data.frame" | class(polys)[1] == "matrix")
if (class(polys)[1] == "SpatialPolygons")
polys = SpatialToPBS(polys)$xy
Rcoords <- LatLon2XY.centered(MyMap,lat= polys[,"Y"],lon= polys[,"X"]);
polys.XY <- as.data.frame(polys);
polys.XY[,"X"] <- Rcoords$newX;
polys.XY[,"Y"] <- Rcoords$newY;
if ( !( "PID" %in% colnames(polys.XY)) )
polys.XY[,"PID"] <- 1;
if ( !( "SID" %in% colnames(polys.XY)) )
polys.XY[,"SID"] <- 1;
polys.XY[,"PIDSID"] <- apply(polys.XY[,c("PID","SID")],1,paste,collapse=":")
if (!add) tmp <- PlotOnStaticMap(MyMap, verbose=0, ...)
if (verbose>1) browser()
if (!is.null(textInPolys)) Centers = PBSmapping::calcCentroid(polys.XY)
if (requireNamespace("PBSmapping", quietly = TRUE) & all(c("PID","X","Y","POS") %in% colnames(polys.XY)) ) {
attr(polys.XY, "projection") <- NULL;
usr <- par('usr')
PBSmapping::addPolys(polys.XY,col=col, border = border, lwd = lwd, xlim =usr[1:2], ylim = usr[3:4], ...)
if (!is.null(textInPolys)) {
text(Centers[,"X"],Centers[,"Y"],textInPolys,cex=0.75, col = "blue")
}
} else {
if (!missing(col)) {
polys.XY[,"col"] <- col;
PIDtable <- as.numeric(table(polys.XY[,"PID"]));
SIDtable <- as.numeric(table(polys.XY[,"PIDSID"]))
if (length(SIDtable)==length(col)) polys.XY[,"col"] <- rep(col, SIDtable);
if (length(PIDtable)==length(col)) polys.XY[,"col"] <- rep(col, PIDtable);
}
if ( !( "col" %in% colnames(polys.XY)) )
polys.XY[,"col"] <- rgb(.1,.1,.1,.05);
pids = unique(polys.XY[,"PIDSID"])
for (i in pids){
jj = polys.XY[,"PIDSID"] == i;
xx= polys.XY[jj,];
if ( ( "POS" %in% colnames(xx)) ) xx <- xx[order(xx[,"POS"]),]
polygon( xx[, c("X","Y")], col=xx[,"col"]);
if (!is.null(textInPolys)) {
text(Centers[i,"X"],Centers[i,"Y"],textInPolys[i], col = "blue")
}
}
}
}, ex = function(){
if (0){
shpFile <- paste(system.file(package = "RgoogleMaps"), "/shapes/bg11_d00.shp", sep = "")
shp=importShapefile(shpFile,projection="LL");
bb <- qbbox(lat = shp[,"Y"], lon = shp[,"X"]);
MyMap <- GetMap.bbox(bb$lonR, bb$latR, destfile = "DC.png");
PlotPolysOnStaticMap(MyMap, shp, lwd=.5, col = rgb(0.25,0.25,0.25,0.025), add = F);
mapOSM <- GetMap.bbox(bb$lonR, bb$latR, destfile = "DC.png", type="osm");
PlotPolysOnStaticMap(mapOSM, shp, lwd=.5, col = rgb(0.75,0.25,0.25,0.15), add = F);
shpFile <- system.file("shapes/sids.shp", package="maptools");
shp=importShapefile(shpFile,projection="LL");
bb <- qbbox(lat = shp[,"Y"], lon = shp[,"X"]);
MyMap <- GetMap.bbox(bb$lonR, bb$latR, destfile = "SIDS.png");
sid <- 100*attr(shp, "PolyData")$SID74/(attr(shp, "PolyData")$BIR74+500)
b <- as.integer(cut(sid, quantile(sid, seq(0,1,length=8)) ));
b[is.na(b)] <- 1;
opal <- col2rgb(grey.colors(7), alpha=TRUE)/255; opal["alpha",] <- 0.2;
shp[,"col"] <- rgb(0.1,0.1,0.1,0.2);
for (i in 1:length(b))
shp[shp[,"PID"] == i,"col"] <- rgb(opal[1,b[i]],opal[2,b[i]],opal[3,b[i]],opal[4,b[i]]);
PlotPolysOnStaticMap(MyMap, shp, lwd=.5, col = shp[,"col"], add = F);
library(maptools);
qk <- SpatialPointsDataFrame(as.data.frame(shp[, c("X","Y")]), as.data.frame(shp[, c("X","Y")]))
sp::proj4string(qk) <- CRS("+proj=longlat");
tf <- "NC.counties";
SGqk <- GE_SpatialGrid(qk)
png(file=paste(tf, ".png", sep=""), width=SGqk$width, height=SGqk$height,
bg="transparent")
par(mar=c(0,0,0,0), xaxs="i", yaxs="i");par(mai = rep(0,4))
PBSmapping::plotPolys(shp, plt=NULL)
dev.off()
maptools::kmlOverlay(SGqk, paste(tf, ".kml", sep=""), paste(tf, ".png", sep=""));
MyMap <- GetMap.bbox(bb$lonR, bb$latR, destfile = "SIDS.png", size = c(640, 320), zoom = 7);
PlotPolysOnStaticMap(MyMap, shp, lwd=.5, col = shp[,"col"], add = F);
}
}) |
uifit <- function(x.closedp)
{
if(!inherits(x.closedp, "closedp.t")) stop("'x.closedp' must be an object produced with 'closedp' or 'closedp.t")
t <- x.closedp$t
ifirstcap <- NULL
for (i in 1:t) { ifirstcap <- c(ifirstcap,rep(i,2^(t-i))) }
lmn<-rownames(x.closedp$results)
nm<-length(lmn)
tableau <- matrix(nrow=t+5,ncol=nm+1)
dimnames(tableau) <- list(paste("u",1:(t+5),sep = ""),c("observed",lmn))
stat <- matrix(nrow=nm,ncol=1)
dimnames(stat) <- list(lmn,c("Chi-suare value"))
desc<- descriptive(x.closedp$X,x.closedp$dfreq)
tableau[,1]<-c(desc$base.freq[,2],rep(NA,5))
for (j in 1:nm)
{
glmo <- x.closedp$glm[[j]]
N <- x.closedp$parameters[[j]][1,1]
if (lmn[j]=="M0")
{
p <- exp(glmo$coef[2])/(1+exp(glmo$coef[2]))
tableau[,j+1] <- N*p*(1-p)^(0:(t+4))
} else
if (lmn[j]=="Mb")
{
p <- 1-exp(glmo$coef[2])/(1+exp(glmo$coef[3]))
tableau[,j+1] <- N*p*(1-p)^(0:(t+4))
} else
if (lmn[j]=="Mh Poisson2")
{
EprobaP_general <- function(x,beta,tau,a,t,k){
(exp(beta)*(1+exp(beta)*a^x)^(t-k))*(a*tau)^x/(factorial(x)*sum(na.rm=TRUE,choose(t,0:t)*exp(beta*(0:t)+tau*a^(0:t))))
}
value_Eproba <- rep(0,t+5)
for (i in 1:(t+5))
{
EprobaP <- function(x){ EprobaP_general(x,glmo$coef[2],glmo$coef[3],2,t,i)}
value_Eproba[i] <- sum(na.rm=TRUE,EprobaP(0:100))
}
tableau[,j+1] <- N*value_Eproba
} else
if (lmn[j]=="Mh Darroch")
{
if(glmo$coef[3]>0)
{
EprobaD_general <- function(x,beta,tau,t,k){
exp(-(x^2)/(2*tau))*((1+exp(beta+x))^(t-k))*exp(beta+x)/
(sqrt(2*pi*tau)*sum(na.rm=TRUE,choose(t,0:t)*exp(beta*(0:t)+tau*((0:t)^2)/2)))
}
value_Eproba <- rep(0,t+5)
for (i in 1:(t+5))
{
EprobaD <- function(x){EprobaD_general(x,glmo$coef[2],glmo$coef[3],t,i)}
value_Eproba[i] <- integrate(EprobaD,-100,100)$value
}
tableau[,j+1] <- N*value_Eproba
} else {
for ( i in 1:t ) { tableau[i,j+1] <- sum(na.rm=TRUE,glmo$fitted.values[ifirstcap==i]) }
}
} else {
for ( i in 1:t ) { tableau[i,j+1] <- sum(na.rm=TRUE,glmo$fitted.values[ifirstcap==i]) }
}
stat[j,1] <- sum(na.rm=TRUE,((tableau[1:t,1]-tableau[1:t,j+1])^2)/tableau[1:t,j+1])
}
Mean <- colSums((1:t)*tableau[1:t,])/colSums(tableau[1:t,])
Variance <- colSums(((1:t)^2)*tableau[1:t,])/colSums(tableau[1:t,]) - Mean^2
firstcapt <- cbind(Mean,Variance)
list(predicted=tableau,fit.stat=stat,day.first.capt=firstcapt)
} |
library(frbs)
data(frbsData)
data.train <- frbsData$MackeyGlass1000.dt[1: 500, ]
data.fit <- data.train[, 1 : 4]
data.tst <- frbsData$MackeyGlass1000.dt[501 : 1000, 1 : 4]
real.val <- matrix(frbsData$MackeyGlass1000.dt[501 : 1000, 5], ncol = 1)
range.data <- matrix(c(0.43462, 1.3105, 0.43462, 1.3105, 0.43462, 1.3105, 0.43462, 1.3105, 0.43462, 1.3105), nrow=2)
method.type <- "GFS.MEMETIC"
control.MA = list(maxEvals = 1000, effort = 0.8, alpha = 0.3, popsize = 20, istep = 100, ls = "cmaes")
control <- list(num.labels = 4, type.snorm = "MAX", type.implication.func = "ZADEH", control.MA = control.MA, name = "GasFur")
object <- frbs.learn(data.train, range.data, method.type, control)
res.fit <- predict(object, data.fit)
res.test <- predict(object, data.tst)
y.pred <- res.test
y.real <- real.val
bench <- cbind(y.pred, y.real)
colnames(bench) <- c("pred. val.", "real. val.")
print("Comparison GFS.memetic Vs Real Value on Mackey Glass Data Set")
print(bench)
residuals <- (y.real - y.pred)
MSE <- mean(residuals^2)
RMSE <- sqrt(mean(residuals^2))
SMAPE <- mean(abs(residuals)/(abs(y.real) + abs(y.pred))/2)*100
err <- c(MSE, RMSE, SMAPE)
names(err) <- c("MSE", "RMSE", "SMAPE")
print("GFS.memetic: Error Measurement: ")
print(err)
op <- par(mfrow = c(2, 1))
x1 <- seq(from = 1, to = nrow(res.fit))
result.fit <- cbind(data.train[, 5], res.fit)
plot(x1, result.fit[, 1], col="red", main = "Mackey Glass: Fitting phase (the training data(red) Vs Sim. result(blue))", type = "l", ylab = "MG")
lines(x1, result.fit[, 2], col="blue")
result.test <- cbind(real.val, res.test)
x2 <- seq(from = 1, to = nrow(result.test))
plot(x2, result.test[, 1], col="red", main = "Mackey Glass: Predicting phase (the Real Data(red) Vs Sim. result(blue))", type = "l", ylab = "MG")
lines(x2, result.test[, 2], col="blue", type = "l")
par(op) |
ShuffleMC <-
function(MClist, Weights = rep(1, length(MClist)), CheckArguments = TRUE)
{
if (CheckArguments)
CheckentropartArguments()
if (is.null(names(MClist)))
names(MClist) <- paste("MC", seq_along(MClist), sep="")
Reduce(function(...) mergeandlabel(...), lapply(MClist, function(x) x$Nsi)) -> Gabundances
Shuffled <- sample(seq_len(ncol(Gabundances)))
NumCommunities <- unlist(lapply(MClist, function(x) length(x$Ni)))
MCnames <- rep(names(MClist), NumCommunities)
names(Gabundances) <- paste(MCnames, unlist(lapply(MClist, function(x) names(x$Ni))), sep=".")[Shuffled]
FirstC <- 1
ShuffledMCList <- list()
for (i in seq_along(MClist)) {
LastC <- FirstC + length((MClist[[i]])$Wi) - 1
ShuffledMCList[[i]] <- MetaCommunity(Gabundances[, Shuffled[FirstC:LastC]], (MClist[[i]])$Wi)
names(ShuffledMCList)[i] <- paste("MC", i, sep="")
FirstC <- LastC + 1
}
return(ShuffledMCList)
} |
get.hess <- function(data, knots.x, knots.y, Gr=NA, r=seq(0, 2, 0.01), p.bkg=0.5){
x <- data$x
y <- data$y-data$SB
sigma <- data$sigma
lambda <- data$lambda
Phi <- basisMatrix(x=x, knots.x=knots.x)
bkg <- Phi %*% t(t(knots.y))
D <- DMatrix(knots.x=knots.x)$matrix
cDc <- as.vector(t(knots.y) %*% D %*% t(t(knots.y)))
E <- length(knots.y)
bkg.pp <- D %*% t(t(knots.y))
hess.prior <- E/2 * (2*(D / cDc) - (4 * bkg.pp %*% t(bkg.pp)) / (cDc ^ 2) )
deviation <- y - bkg
deviation.norm <- deviation/sigma
grad.f <- as.vector(-deviation/sigma^2)
hess.f <- as.vector(-1/sigma^2)
funct.f <- (log(p.bkg) - 0.5 * log(2 * pi) - log(sigma) - 0.5 * deviation.norm ^ 2)
f <- list(funct=funct.f, grad=grad.f, hess=hess.f)
rho <- sigma/lambda
z <- (y - bkg) / lambda
qq <- z / rho - rho
funct.h <- log(1 - p.bkg) - log(lambda) + pnorm(log.p=TRUE, q=qq) - z + 0.5 * (rho ^ 2)
gamma.q <- exp(-0.5 * qq ^ 2 - pnorm(q=qq, log.p=TRUE)) / (rho * sqrt(2 * pi))
grad.h <- as.vector(-(1 - gamma.q) / lambda)
hess.h <- as.vector(-gamma.q * (gamma.q + qq / rho) / (lambda ^ 2))
h <- list(funct=funct.h, grad=grad.h, hess=hess.h)
f.fract <- as.vector(1 / (1 + exp(h$funct - f$funct)))
h.fract <- 1 - f.fract
grad.g <- hess.g <- NA
grad.g <- (f.fract * f$grad + (1 - f.fract) * h$grad)
f.contr <- f.fract*f$hess + f.fract*f$grad*f$grad
h.contr <- h.fract*h$hess + h.fract*h$grad*h$grad
hess.gg <- -(f.contr + h.contr - (grad.g)^2)
Phi.prime <- hess.gg*Phi
hess.g <- t(Phi.prime) %*% Phi
hess.gr <- 0
if(!is.na(Gr[1])){
if(is.na(Gr$type1))
hess.gr.r1 <- 0
else if(Gr$type1=="gaussianNoise")
hess.gr.r1 <- logLikelihoodGrGauss(y=data$y-data$SB, knots.y=knots.y, alpha=1, Phi=Phi, bkg.r=Gr$bkg.r,
sigma.r=Gr$sigma.r, matrix.FT=Gr$matrix.FT1, Hessian=TRUE)$hess
else if(Gr$type1=="correlatedNoise")
hess.gr.r1 <- logLikelihoodGrCorr(knots.y=knots.y, Phi=Phi, bkg.r=Gr$bkg.r,
KG.inv=Gr$KG.inv, matrix.FT=Gr$matrix.FT1, Hessian=TRUE)$hess
if(is.na(Gr$type2))
hess.gr.r2 <- 0
else if(Gr$type2=="secondDeriv")
hess.gr.r2 <- logPriorBkgRSmooth(bkg.r=Gr$matrix.FT2 %*% bkg, D=Gr$D, Hessian=TRUE, Phi=Phi,
matrix.FT=Gr$matrix.FT2, knots.y=knots.y)$hess
else if(Gr$type2=="gaussianProcess")
hess.gr.r2 <- logPriorBkgRGP(bkg.r=Gr$matrix.FT2 %*% bkg, covMatrix=Gr$covMatrix, Hessian=TRUE)$hess
hess.gr <- hess.gr.r1 + hess.gr.r2
}
hess <- hess.prior + hess.gr + hess.g
hess.inv <- solve(hess)
Phi <- basisMatrix(x=data$x, knots.x=knots.x)
H <- Phi%*%hess.inv%*%t(Phi)
H <- H + diag((data$sigma)^2, length(data$x))
cov.diag <- diag(H)
cov.diag[which(cov.diag<0)]<-0
stdev <- sqrt(cov.diag)
MFT <- sineFT.matrix(Q=data$x, r=r)
cov.r <- MFT %*% H %*% t(MFT)
cov.diag.r <- diag(cov.r)
cov.diag.r[which(cov.diag.r<0)]<-0
stdev.r <- sqrt(cov.diag.r)
return(list(stdev=stdev, stdev.r=stdev.r, hess=hess, cov.matrix=hess.inv, cov.matrix.r=cov.r, hess.gg=hess.gg))
}
get.hess.numerically <- function(data, knots.x, knots.y, Gr=NA, r=seq(0, 2, 0.01), p.bkg=0.5, h=1e-4){
n <- length(knots.y)
incr <- function(cc, h, i=0, j=0){
if(i!=0)
cc[i] <- cc[i]+h
if(j!=0)
cc[j] <- cc[j]+h
return(cc)
}
hess <- matrix(0, nrow=n, ncol=n)
for(i in 1:n){
cat("knot.i = ", i, " of ", n,"\n")
for(j in 1:n){
a1 <- logPosterior(data=data, alpha=1, knots.x=knots.x, knots.y=incr(cc=knots.y,h=h,i=i,j=j), Gr=Gr, p.bkg=p.bkg)
a2 <- logPosterior(data=data, alpha=1, knots.x=knots.x, knots.y=incr(cc=knots.y,h=h,i=i, j=0), Gr=Gr, p.bkg=p.bkg)
a3 <- logPosterior(data=data, alpha=1, knots.x=knots.x, knots.y=incr(cc=knots.y,h=h,i=0, j=j), Gr=Gr, p.bkg=p.bkg)
a4 <- logPosterior(data=data, alpha=1, knots.x=knots.x, knots.y=knots.y, Gr=Gr, p.bkg=p.bkg)
hess[i,j] <- (a1-a2-a3+a4)/h^2
}
}
hess.inv <- solve(hess)
Phi <- basisMatrix(x=data$x, knots.x=knots.x)
H <- Phi%*%hess.inv%*%t(Phi)
H <- H + diag((data$sigma)^2, length(data$x))
cov.diag <- diag(H)
cov.diag[which(cov.diag<0)]<-0
stdev <- sqrt(cov.diag)
MFT <- sineFT.matrix(Q=data$x, r=r)
cov.r <- MFT %*% H %*% t(MFT)
cov.diag.r <- diag(cov.r)
cov.diag.r[which(cov.diag.r<0)]<-0
stdev.r <- sqrt(cov.diag.r)
return(list(stdev=stdev, stdev.r=stdev.r, hess=hess, cov.matrix=hess.inv, cov.matrix.r=cov.r))
}
grad.descent <- function(data, knots.x, knots.y, Gr=NA, p.bkg=0.5, eps=1e-3, N=10000){
x.p <- knots.y
x.pp <- x.p
lambda <- c(abs(0.0001/get.deriv(data=data, knots.x, x.p, Gr=Gr, p.bkg=0.5)))
N <- round(N, digits=-2)
if(N==0) N <- 100
cat("iterations will stop once convergence is reached \n")
cat("indicating % of itermax... \n")
for(j in 1:(N/100)){
cat("...",(j-1)/N*100*100, "% done \n")
for(i in 1:100){
grad.f <- as.vector(get.deriv(data=data, knots.x=knots.x, knots.y=x.p, Gr=Gr, p.bkg=p.bkg))
x.f <- x.p - lambda*grad.f
x.p <- x.f
}
if(max(abs(x.pp-x.f))<eps){
cat("\n convergence reached! \n")
break
}
x.pp <- x.f
}
if(j == (N/100)){
cat("convergence not reached! \n")
return(knots.y)
}
else
return(x.f)
}
get.deriv.numerically <- function(data, knots.x, knots.y, Gr=NA, p.bkg=0.5, h=1e-6){
n <- length(knots.y)
incr <- function(cc, h, i=0){
cc[i] <- cc[i]+h
cc
}
der <- 0
for(i in 1:n){
a1 <- logPosterior(data=data, alpha=1, knots.x=knots.x, knots.y=incr(cc=knots.y,h=h,i=i), Gr=Gr, p.bkg=p.bkg)
a2 <- logPosterior(data=data, alpha=1, knots.x=knots.x, knots.y=knots.y, Gr=Gr, p.bkg=p.bkg)
der[i] <- (a1-a2)/h
}
return(der)
}
get.deriv <- function(data, knots.x, knots.y, Gr=NA, p.bkg=0.5){
x <- data$x
y <- data$y-data$SB
sigma <- data$sigma
lambda <- data$lambda
Phi <- basisMatrix(x=x, knots.x=knots.x)
bkg <- Phi %*% t(t(knots.y))
D <- DMatrix(knots.x=knots.x)$matrix
cDc <- as.vector(t(knots.y) %*% D %*% t(t(knots.y)))
E <- length(knots.y)
deviation <- y - bkg
norm.dev <- deviation/sigma
grad.f <- as.vector(deviation/sigma^2)*Phi
funct.f <- (log(p.bkg) - 0.5 * log(2 * pi) - log(sigma) - 0.5 * norm.dev ^ 2)
f <- list(funct=funct.f, grad=grad.f)
rho <- sigma/lambda
z <- (y - bkg) / lambda
qq <- z / rho - rho
funct.h <- (log(1 - p.bkg) - log(lambda) + pnorm(log.p=TRUE, q=qq) - z + 0.5 * (rho ^ 2))
gamma.q <- exp(-0.5 * qq ^ 2 - pnorm(q=qq, log.p=TRUE)) / (rho * sqrt(2 * pi))
grad.h <- as.vector((1 - gamma.q) / lambda) * Phi
h <- list(funct=funct.h, grad=grad.h)
f.fract <- as.vector(1 / (1 + exp(h$funct - f$funct)))
grad.g <- hess.g <- NA
grad.g <- f.fract * f$grad + (1 - f.fract) * h$grad
grad.g <- f.fract * f$grad + (1 - f.fract) * h$grad
grad <- -colSums(grad.g) + (t(knots.y) %*% D) * E / cDc
if(!is.na(Gr[1])){
matrix.FT <- Gr$matrix.FT1
sigma.r <- Gr$sigma.r
Mprime <- matrix.FT%*%Phi / (sqrt(2)*sigma.r)
b.prime <- Gr$bkg.r / (sqrt(2)*sigma.r)
grad.gr <- 2*t(Mprime) %*% Mprime %*%knots.y- 2*t(Mprime)%*%b.prime
grad <- as.vector(grad) + as.vector(grad.gr)
}
return(grad)
}
golden.search = function(data, lower.bound=-.01, upper.bound=.01, tolerance=1e-6, knots.x, knots.y, Gr, p.bkg, grad.f){
f <- function(lambda){
logPosterior(data=data, alpha=1, knots.x=knots.x, knots.y=(knots.y - lambda*grad.f), Gr=Gr, p.bkg=p.bkg)
}
golden.ratio = 2/(sqrt(5) + 1)
x1 = upper.bound - golden.ratio*(upper.bound - lower.bound)
x2 = lower.bound + golden.ratio*(upper.bound - lower.bound)
f1 = f(x1)
f2 = f(x2)
iteration = 0
while(abs(upper.bound - lower.bound) > tolerance){
iteration = iteration + 1
if (f2 > f1){
upper.bound = x2
x2 = x1
f2 = f1
x1 = upper.bound - golden.ratio*(upper.bound - lower.bound)
f1 = f(x1)
}
else{
lower.bound = x1
x1 = x2
f1 = f2
x2 = lower.bound + golden.ratio*(upper.bound - lower.bound)
f2 = f(x2)
}
}
estimated.minimizer = (lower.bound + upper.bound)/2
estimated.minimizer
}
regularized.cholesky <- function(Matr, eps.max=1e-2, eps.min=1e-20, numTries=17) {
baseVal <- min(diag(Matr))
U <- try(chol(Matr), silent=T)
epsilon <- eps.min
I <- diag(nrow(Matr))
while (!is.null(attr(U, "class")) && attr(U, "class") == "try-error" && epsilon <= eps.max) {
U <- try(chol(Matr + baseVal * epsilon * I), silent=T)
epsilon <- epsilon * 10
}
if (epsilon >= eps.max) stop("We just couldn't Cholesky-decompose this matrix\n")
return (U)
}
row.outer.product <- function(Phi) {
rows <- nrow(Phi)
cols <- ncol(Phi)
ppt <- array(apply(Phi, 1, function(x) x %*% t(x)), dim=c(cols, cols, rows))
return (ppt)
}
covMatrixSE <- function(x, sig=0.05, l=0.1){
N <- length(x)
covX <- matrix(nrow=N, ncol=N)
if(length(l)==1){
covX <- outer(X=x, Y=x, FUN=function(x, y) {
sig^2*exp( -0.5*((x - y) / l)^2 )
})
}
else{
for(i in 1:N)
for(j in 1:N)
covX[i,j] <- sig^2*sqrt( 2*l[i]*l[j]/(l[i]^2+l[j]^2) )*
exp( -(x[i]-x[j])^2/(l[i]^2+l[j]^2) )
}
factor <- 1
while (det(covX)==0) {
covX <- covX*1e1
factor <- factor*1e1
}
list(cov=covX, factor=factor)
}
covMatrix.DI <- function(covMatrix){
U <- regularized.cholesky(covMatrix)
covMatrix.inv <- chol2inv(U)
covMatrix.det <- det(U)
list(inv=covMatrix.inv, det=covMatrix.det)
} |
optimize <- function(population, Ly, harvest, biomass) {
stats::optimize(yield_pi, c(0, 1),
population = population,
Ly = Ly, harvest = harvest, biomass = biomass,
maximum = TRUE
)$maximum
}
ypr_optimize <- function(population,
Ly = 0, harvest = TRUE, biomass = FALSE) {
chk_population(population)
chk_number(Ly)
chk_gte(Ly)
chk_flag(biomass)
chk_flag(harvest)
yield <- optimize(
population = population, Ly = Ly,
harvest = harvest, biomass = biomass
)
sanitize(yield)
}
ypr_optimise <- ypr_optimize |
full_join.disk.frame <- function(x, y, by=NULL, copy=FALSE, ..., outdir = tempfile("tmp_disk_frame_full_join"), overwrite = TRUE, merge_by_chunk_id, .progress = FALSE) {
stopifnot("disk.frame" %in% class(x))
overwrite_check(outdir, overwrite)
if("data.frame" %in% class(y)) {
ncx = nchunks(x)
dy = shard(y, shardby = by, nchunks = ncx, overwrite = FALSE)
dx = rechunk(x, shardby = by, outdir=tempfile(fileext = ".jdf"), overwrite = FALSE)
return(full_join.disk.frame(dx, dy, by, copy=copy, outdir=outdir, merge_by_chunk_id = TRUE))
} else if("disk.frame" %in% class(y)) {
if(is.null(merge_by_chunk_id)) {
stop("both x and y are disk.frames. You need to specify merge_by_chunk_id = TRUE or FALSE explicitly")
}
if(is.null(by)) {
by <- intersect(names(x), names(y))
}
ncx = nchunks(x)
ncy = nchunks(y)
if (merge_by_chunk_id == FALSE) {
warning("merge_by_chunk_id = FALSE. This will take significantly longer and the preparations needed are performed eagerly which may lead to poor performance. Consider making y a data.frame or set merge_by_chunk_id = TRUE for better performance.")
x = rechunk(x, by, nchunks = max(ncy,ncx), outdir=tempfile(fileext = ".jdf"), overwrite = FALSE)
y = rechunk(y, by, nchunks = max(ncy,ncx), outdir=tempfile(fileext = ".jdf"), overwrite = FALSE)
return(full_join.disk.frame(x, y, by, copy = copy, outdir = outdir, merge_by_chunk_id = TRUE, overwrite = overwrite, .progress = .progress))
} else if ((identical(shardkey(x)$shardkey, "") & identical(shardkey(y)$shardkey, "")) | identical(shardkey(x), shardkey(y))) {
res = cmap2(x, y, ~{
if(is.null(.y)) {
return(.x)
} else if (is.null(.x)) {
return(.y)
}
full_join(.x, .y, by = by, copy = copy)
}, outdir = outdir, overwrite = overwrite, .progress = .progress)
return(res)
} else {
stop("merge_by_chunk_id is TRUE but shardkey(x) does NOT equal to shardkey(y). You may want to perform a hard_group_by() on both x and/or y or set merge_by_chunk_id = FALSE")
}
}
} |
testData = createData(sampleSize = 100, overdispersion = 0.5, randomEffectVariance = 0)
fittedModel <- glm(observedResponse ~ Environment1 , family = "poisson", data = testData)
simulationOutput <- simulateResiduals(fittedModel = fittedModel)
plot(simulationOutput, quantreg = TRUE)
testUniformity(simulationOutput)
testCategorical(simulationOutput, testData$group)
testDispersion(simulationOutput)
testOutliers(simulationOutput, type = "binomial")
testZeroInflation(simulationOutput)
countOnes <- function(x) sum(x == 1)
testGeneric(simulationOutput, summary = countOnes)
testGeneric(simulationOutput, summary = countOnes, alternative = "less")
means <- function(x) mean(x)
testGeneric(simulationOutput, summary = means)
spread <- function(x) sd(x)
testGeneric(simulationOutput, summary = spread) |
if (interactive()) pkgload::load_all(".")
if (get_boolean_envvar("IS_RUNIT") && get_run_r_tests()) {
test_r <- function() {
wd <- setwd(tempdir())
on.exit(setwd(wd))
path <- tempfile()
tempdir <- tempfile()
dir.create(tempdir)
packager::create(path, fakemake = FALSE)
on.exit(unlink(path, recursive = TRUE), add = TRUE)
description_path <- file.path(path, "DESCRIPTION")
lines <- readLines(description_path)
expectation <- fritools:::get_desc_value("Package", lines = lines)
result <- basename(path)
RUnit::checkIdentical(expectation, result)
RUnit::checkException(fritools:::get_current_tarball(tempdir))
tarball <- fritools:::r_cmd_build(path = path,
output_directory = tempdir)
package_tarball <- list.files(tempdir, pattern = "file.*\\.tar\\.gz",
full.names = TRUE)
RUnit::checkTrue(file.exists(tarball))
RUnit::checkIdentical(tarball, package_tarball)
touch(file.path(tempdir, "foo.tar.gz"))
expectation <- file.path(tempdir, "bar.tar.gz")
touch(expectation); touch(expectation)
RUnit::checkIdentical(fritools:::get_current_tarball(tempdir),
expectation)
package_tarball <- list.files(tempdir, pattern = "file.*\\.tar\\.gz",
full.names = TRUE)
fake_tarball <- sub("_.*\\.tar.gz", "_99.0.0.tar.gz", package_tarball)
file.copy(package_tarball, fake_tarball)
result <- fritools:::get_current_tarball(tempdir)
RUnit::checkIdentical(result, fake_tarball)
result <- fritools:::get_current_tarball(tempdir,
package_dir = tempdir)
RUnit::checkIdentical(result, fake_tarball)
result <- fritools:::get_current_tarball(tempdir, package_dir = path)
RUnit::checkIdentical(result, package_tarball)
RUnit::checkIdentical(fritools:::r_cmd_check(path = package_tarball),
0L)
RUnit::checkIdentical(r_cmd_install(path = package_tarball), 0L)
RUnit::checkIdentical(r_cmd_install(path = path), 0L)
RUnit::checkIdentical(r_cmd_install(path = path, try_tarball = FALSE),
0L)
}
if (interactive()) {
test_r()
}
} |
sot <- function(Sigma,A,ncores=1,...)
{
if (is.list(Sigma))
sot_list(Sigma,A,dim(Sigma[[1]])[1],dim(A[[1]])[3],ncores=ncores,...)
else sot_single(Sigma,A,dim(Sigma)[1],dim(A)[3],...)
}
sot_single <- function(Sigma,A,N=dim(Sigma)[1],H=dim(A)[3],perm=1:N)
{
res <- numeric(N*N)
dim(res) <- c(N,N)
scaling_factor <- 1/sqrt(.Call(C_fev,Sigma,A,N,H))
res[] <- 100*.sot_FAST(.Call(C_scaleSigma,Sigma,scaling_factor,N),.Call(C_scaleA,A,scaling_factor,N,H),N,H,perm)
dimnames(res) <- dimnames(Sigma)
res
}
sot_list <- function(Sigma,A,N=dim(Sigma[[1]])[1],H=dim(A[[1]])[3],perm=1:N,ncores=1)
{
len <- length(Sigma)
res <- vector("list",len)
if ( (ncores!=1) && (!requireNamespace("parallel")) )
{
print("Parallelization not possible because package 'parallel' is not installed. Using single core version instead.")
ncores <- 1
}
if (ncores==1)
{
for (i in 1:len)
res[[i]] <- sot_single(Sigma[[i]],A[[i]],N,H,perm)
}
else
{
if (ncores==0)
{
ncores <- detectCores()
cat("Number of cores used:",ncores,"\n")
}
splitted <- splitIndices(len,ncores)
cl <- makeCluster(ncores)
clusterEvalQ(cl, library(fastSOM))
clusterExport(cl,c("Sigma","A","N","H","perm"),envir=environment())
tmp <- clusterApply(cl,1:ncores,function(ind) sot_list(Sigma[splitted[[ind]]],A[splitted[[ind]]],N,H,perm,1))
stopCluster(cl)
for (i in 1:ncores)
{
res[splitted[[i]]] <- tmp[[i]]
}
}
names(res) <- names(Sigma)
res
} |
"dummyVars" <-
function(formula, ...){
UseMethod("dummyVars")
}
dummyVars.default <- function (formula, data, sep = ".", levelsOnly = FALSE, fullRank = FALSE, ...)
{
formula <- as.formula(formula)
if(!is.data.frame(data)) data <- as.data.frame(data, stringsAsFactors = FALSE)
vars <- all.vars(formula)
if(any(vars == "."))
{
vars <- vars[vars != "."]
vars <- unique(c(vars, colnames(data)))
}
isFac <- unlist(lapply(data[,vars,drop = FALSE], is.factor))
if(sum(isFac) > 0)
{
facVars <- vars[isFac]
lvls <- lapply(data[,facVars,drop = FALSE], levels)
if(levelsOnly)
{
tabs <- table(unlist(lvls))
if(any(tabs > 1))
{
stop(paste("You requested `levelsOnly = TRUE` but",
"the following levels are not unique",
"across predictors:",
paste(names(tabs)[tabs > 1], collapse = ", ")))
}
}
} else {
facVars <- NULL
lvls <- NULL
}
trms <- attr(model.frame(formula, data), "terms")
out <- list(call = match.call(),
form = formula,
vars = vars,
facVars = facVars,
lvls = lvls,
sep = sep,
terms = trms,
levelsOnly = levelsOnly,
fullRank = fullRank)
class(out) <- "dummyVars"
out
}
print.dummyVars <- function(x, ...)
{
cat("Dummy Variable Object\n\n")
cat("Formula: ")
print(x$form)
cat(length(x$vars), " variables, ", length(x$facVars), " factors\n", sep = "")
if(!is.null(x$sep) & !x$levelsOnly) cat("Variables and levels will be separated by '",
x$sep, "'\n", sep = "")
if(x$levelsOnly) cat("Factor variable names will be removed\n")
if(x$fullRank) cat("A full rank encoding is used") else cat("A less than full rank encoding is used")
cat("\n")
invisible(x)
}
predict.dummyVars <- function(object, newdata, na.action = na.pass, ...)
{
if(is.null(newdata)) stop("newdata must be supplied")
if(!is.data.frame(newdata)) newdata <- as.data.frame(newdata, stringsAsFactors = FALSE)
if(!all(object$vars %in% names(newdata))) stop(
paste("Variable(s)",
paste("'", object$vars[!object$vars %in% names(newdata)],
"'", sep = "",
collapse = ", "),
"are not in newdata"))
Terms <- object$terms
Terms <- delete.response(Terms)
if(!object$fullRank)
{
oldContr <- options("contrasts")$contrasts
newContr <- oldContr
newContr["unordered"] <- "contr.ltfr"
options(contrasts = newContr)
on.exit(options(contrasts = oldContr))
}
m <- model.frame(Terms, newdata, na.action = na.action, xlev = object$lvls)
x <- model.matrix(Terms, m)
cnames <- colnames(x)
if(object$levelsOnly) {
for(i in object$facVars) {
for(j in object$lvls[[i]]) {
from_text <- paste0(i, j)
cnames[which(cnames == from_text)] <- j
}
}
}
if(!is.null(object$sep) & !object$levelsOnly) {
for(i in object$facVars[order(-nchar(object$facVars))]) {
for(j in object$lvls[[i]]) {
from_text <- paste0(i, j)
to_text <- paste(i, j, sep = object$sep)
pos = which(cnames == from_text)
if (length(pos) > 1) {
if (which(object$lvls[[i]] == j) > 1) {
cnames[pos][cnames[pos-1] == paste(i, object$lvls[[i]][which(object$lvls[[i]] == j)-1], sep = object$sep)] <- to_text
} else {
cnames[pos][cnames[pos-1] == paste(object$facVars[order(-nchar(object$facVars))][which(object$facVars[order(-nchar(object$facVars))] == i) - 1], utils::tail(object$lvls[[object$facVars[order(-nchar(object$facVars))][which(object$facVars[order(-nchar(object$facVars))] == i) - 1]]],n=1), sep = object$sep)] <- to_text
}
} else {
cnames[pos] <- to_text
}
}
}
}
colnames(x) <- cnames
x[, colnames(x) != "(Intercept)", drop = FALSE]
}
contr.ltfr <- function (n, contrasts = TRUE, sparse = FALSE)
{
if (is.numeric(n) && length(n) == 1L) {
if (n > 1L)
levels <- as.character(seq_len(n))
else stop("not enough degrees of freedom to define contrasts")
}
else {
levels <- as.character(n)
n <- length(n)
}
contr <- .RDiag(levels, sparse = sparse)
if (contrasts) {
if (n < 2L) stop(gettextf("contrasts not defined for %d degrees of freedom", n - 1L), domain = NA)
}
contr
}
contr.dummy <- function(n, ...)
{
if (is.numeric(n) && length(n) == 1L) {
if (n > 1L)
levels <- as.character(seq_len(n))
else stop("not enough degrees of freedom to define contrasts")
}
else {
levels <- as.character(n)
n <- length(n)
}
out <- diag(n)
rownames(out) <- levels
colnames(out) <- levels
out
}
class2ind <- function(x, drop2nd = FALSE) {
if(!is.factor(x)) stop("'x' should be a factor")
y <- model.matrix(~ x - 1)
colnames(y) <- gsub("^x", "", colnames(y))
attributes(y)$assign <- NULL
attributes(y)$contrasts <- NULL
if(length(levels(x)) == 2 & drop2nd) {
y <- y[,1]
}
y
} |
set.seed(9)
rm(list = ls())
suppressMessages(library("knitr"))
suppressMessages(library("ChemoSpec"))
suppressMessages(library("mclust"))
suppressMessages(library("RColorBrewer"))
suppressMessages(library("ggplot2"))
suppressMessages(library("patchwork"))
CSdesc <- packageDescription("ChemoSpec")
CSUdesc <- packageDescription("ChemoSpecUtils")
tmp <- knitr::write_bib(c("knitr", "mclust", "baseline", "hyperSpec", "ggplot2", "plotly"), file = "manuals.bib", prefix = "R_")
knitr::opts_hooks$set(sq.fig = function(options) {
if (isFALSE(options$sq.fig)) {
if ((!is.null(options$fig.width)) & (!is.null(options$fig.height))) return(options)
if ((is.null(options$fig.width)) & (is.null(options$fig.height))) {
options$fig.width = 6
options$fig.height = 3.5
}
}
if (isTRUE(options$sq.fig)) {
options$fig.width = 5
options$fig.height = 5
}
options
})
knitr::opts_chunk$set(fig.align = "center", sq.fig = FALSE,
fig.width = NULL, fig.height = NULL, out.width = "80%")
knitr::include_graphics("MetabPreProcess.png")
data(SrE.IR)
sumSpectra(SrE.IR)
myt <- expression(bolditalic(Serenoa)~bolditalic(repens)~bold(Extract~IR~Spectra))
p <- plotSpectra(SrE.IR, which = c(1, 2, 14, 16), yrange = c(0, 1.6),
offset = 0.4, lab.pos = 2200)
p <- p + ggtitle(myt)
p
p <- plotSpectra(SrE.IR, which = c(1, 2, 14, 16), yrange = c(0, 0.6),
offset = 0.1, lab.pos = 1775)
p <- p + ggtitle(myt) + coord_cartesian(xlim = c(1650, 1800))
p
SrE.IR$names
grep("OO", SrE.IR$names)
SrE2.IR <- baselineSpectra(SrE.IR, int = FALSE, method = "modpolyfit", retC = TRUE)
tmp <- binSpectra(SrE.IR, bin.ratio = 4)
sumSpectra(tmp)
noTD <- removeSample(SrE2.IR, rem.sam = c("TD_adSrE"))
sumSpectra(noTD)
grep("TD_adSrE", noTD$names)
SrE <- grep("SrE", SrE2.IR$names)
SrE2.IR$names[SrE]
SrE
p <- surveySpectra(SrE2.IR, method = "iqr", by.gr = FALSE)
p <- p + ggtitle(myt)
p
p <- surveySpectra2(SrE2.IR, method = "iqr")
p <- p + ggtitle(myt)
p
p <- surveySpectra(SrE2.IR, method = "iqr", by.gr = FALSE)
p <- p + ggtitle("Detail of Carbonyl Region") + coord_cartesian(xlim = c(1650, 1800))
p
p <- surveySpectra(SrE2.IR, method = "iqr", by.gr = TRUE)
p <- p + ggtitle("Detail of Carbonyl Region") + coord_cartesian(xlim = c(1650, 1800))
p
p <- surveySpectra(SrE2.IR, method = "iqr", by.gr = FALSE)
p <- p + ggtitle("An Uninteresting Region") +
coord_cartesian(xlim = c(1800, 2500), ylim = c(0.0, 0.03))
p
SrE3.IR <- removeFreq(SrE2.IR, rem.freq = SrE2.IR$freq > 1800 & SrE2.IR$freq < 2500)
sumSpectra(SrE3.IR)
check4Gaps(SrE3.IR$freq, SrE3.IR$data[1,])
HCA <- hcaSpectra(SrE3.IR, main = myt)
c_res <- c_pcaSpectra(SrE3.IR, choice = "noscale")
p <- plotScores(SrE3.IR, c_res, pcs = c(1,2), ellipse = "rob", tol = 0.01)
p <- p + plot_annotation(myt)
p
r_res <- r_pcaSpectra(SrE3.IR, choice = "noscale")
p <- plotScores(SrE3.IR, r_res, pcs = c(1,2), ellipse = "rob", tol = 0.01)
p
p <- diagnostics <- pcaDiag(SrE3.IR, c_res, pcs = 2, plot = "OD")
p
p <- diagnostics <- pcaDiag(SrE3.IR, c_res, pcs = 2, plot = "SD")
p
p <- plotScree(c_res) + ggtitle(myt)
p
p <- plotScree(c_res, style = "trad") + ggtitle(myt)
p
out <- cv_pcaSpectra(SrE3.IR, pcs = 5)
p <- plotLoadings(SrE3.IR, c_res, loads = c(1, 2), ref = 1)
p <- p & ggtitle(myt)
p
p <- plot2Loadings(SrE3.IR, c_res, loads = c(1, 2), tol = 0.001)
p <- p + ggtitle(myt)
p
p <- sPlotSpectra(SrE3.IR, c_res, pc = 1, tol = 0.001)
p <- p + ggtitle(myt)
p
p <- sPlotSpectra(SrE3.IR, c_res, pc = 1, tol = 0.001)
p <- p + coord_cartesian(xlim = c(-0.04, -0.01), ylim = c(-1.05, -0.9))
p <- p + ggtitle("Detail of s-Plot")
p
knitr::include_graphics("aovPCA2.png")
knitr::include_graphics("aovPCA1.png")
model <- mclustSpectra(SrE3.IR, c_res, plot = "BIC", main = myt)
model <- mclustSpectra(SrE3.IR, c_res, plot = "proj", main = myt)
model <- mclustSpectra(SrE3.IR, c_res, plot = "errors", main = myt, truth = SrE3.IR$groups)
data(Col7)
data(Col12)
data(Sym12)
data(Col8)
data(Sym8)
auto <- RColorBrewer::brewer.pal(8, "Set1")
sp <- 0.75
tsp <- 0.15
h <- 0.25
y <- 0.0
plot(1:12, rep(0.0, 12),
type = "n", yaxt = "n", xaxt = "n", bty = "n",
xlab = "", ylab = "", ylim = c(0, 3.5)
)
text(6.5, y + h + tsp * 4 + sp * 3.5,
labels = "Automatic Color & Symbol Options", cex = 1.25, font = 2
)
for (i in 1:12) {
rect(i - 0.5, y, i + 0.5, y + h, border = NA, col = Col12[i])
}
points(1:12, rep(y + h + tsp, 12), pch = Sym12)
text(0.6, y + h + tsp * 2, adj = 0,
labels = "gr.cols = 'Col12' 12 mostly paired distinct colors/symbols"
)
for (i in 1:8) {
rect(i - 0.5, y + sp, i + 0.5, y + sp + h, border = NA, col = Col8[i])
}
points(1:8, rep(y + h + tsp + sp, 8), pch = Sym8)
text(0.6, y + h + tsp * 2 + sp, adj = 0,
labels = "gr.cols = 'Col8' 8 distinct colors/symbols"
)
for (i in 1:8) {
rect(i - 0.5, y + sp * 2, i + 0.5, y + sp * 2 + h, border = NA, col = auto[i])
}
points(1:8, rep(y + h + tsp + sp * 2, 8), pch = Sym8)
text(0.6, y + h + tsp * 2 + sp * 2, adj = 0,
labels = "gr.cols = 'auto' 8 distinct colors/symbols"
)
for (i in 1:7) {
rect(i - 0.5, y + sp * 3, i + 0.5, y + sp * 3 + h, border = NA, col = Col7[i])
}
points(1:7, rep(y + h + tsp + sp * 3, 7), pch = Sym8[1:7])
text(0.6, y + h + tsp * 2 + sp * 3, adj = 0,
labels = "gr.cols = 'Col7' 7 colorblind-friendly colors"
) |
span(
h4(Mod6Step1_txt$title),
p(HTML(Mod6Step1_txt$subgoal)),
p(HTML(Mod6Step1_txt$intro)),
p(HTML(Mod6Step1_txt$exercise)),
p(paste0("$$",NOT$trait.1,"_{",NOT$time, NOT$ind,"}=
",EQ3$mean0,"+
",NOT$devI,"_",NOT$ind,"+
",NOT$mean," ",NOT$env,"_{",NOT$time, NOT$ind,"}+
",NOT$error,"_{",NOT$time, NOT$ind,"}$$")),
displayRCode(Mod6Step1_txt$RCode1),
p(HTML(Mod6Step1_txt$para1)),
p(paste0("$$",NOT$trait.1,"_{",NOT$time, NOT$ind,"}=
",EQ3$mean0,"+
",NOT$devI,"_",NOT$ind,"+
(",NOT$mean,"+", NOT$devS,"_", NOT$ind,")", NOT$env,"_{",NOT$time, NOT$ind,"}+
",NOT$error,"_{",NOT$time, NOT$ind,"}$$")),
displayRCode(Mod6Step1_txt$RCode2),
p(HTML(Mod6Step1_txt$para2)),
getSliderInput("Mod6Step1_NI", Modules_VAR$NI),
getSliderInput("Mod6Step1_NR", Modules_VAR$NR),
fluidRow(
column(8,getSliderInput("Mod6Step1_Vi", Modules_VAR$Vi)),
column(4,textOutput("Mod6Step1_Vi_proportion", inline = TRUE))
),
fluidRow(
column(8,getSliderInput("Mod6Step1_Ve", Modules_VAR$Ve)),
column(4,textOutput("Mod6Step1_Ve_proportion", inline = TRUE))
),
div(info_msg(Mod6Step1_txt$note)),
fluidRow(
column(8,getSliderInput("Mod6Step1_Vbx", Modules_VAR$Vbx)),
column(4,textOutput("Mod6Step1_Vbx_proportion", inline = TRUE))
),
fluidRow(
column(8,getSliderInput("Mod6Step1_Vs", Modules_VAR$Vsx)),
column(4,textOutput("Mod6Step1_Vs_proportion", inline = TRUE))
),
conditionalPanel(
condition = "0",
uiOutput("Mod6Step1_hidden")
),
p(HTML(Mod6Step1_txt$para3)),
actionButton("Mod6Step1_Run", label = Modules_VAR$Run$label, icon= Modules_VAR$Run$icon, class="runButton"),
runningIndicator(),
sim_msg(),
p(HTML(Mod6Step1_txt$results)),
uiOutput("Mod6Step1_summary_table"),
p(HTML(Mod6Step1_txt$para4)),
p(plotOutput("Mod6Step1_plot", width = Modules_VAR$Plot$width)),
p(HTML(Mod6Step1_txt$point)),
p(HTML(module1_txt$statModTitle)),
p(paste0("$$",NOT$trait.1,"_{",NOT$time, NOT$ind,"}=
",EQ3$mean0,"+
",NOT$devI,"_",NOT$ind,"+
(",NOT$mean,"+", NOT$devS,"_", NOT$ind,")", NOT$env,"_{",NOT$time, NOT$ind,"}+
",NOT$error,"_{",NOT$time, NOT$ind,"}$$")),
p(paste0("$$V_",NOT$total,"=
V_",NOT$devI,"+
V_{",NOT$mean,"}+
V_{",NOT$devS,"}+
V_",NOT$residualUpper,"$$")),
p("where"),
p(paste0("$$V_{",NOT$mean,"}=",NOT$mean,"^2Var(",NOT$env,")=",NOT$mean,"^2$$")),
p(paste0("$$V_{",NOT$devS,"}=Var(",NOT$devS,")Var(",NOT$env,")+E(",NOT$env,")^2Var(",NOT$devS,")=Var(",NOT$devS,")$$")),
p(paste0("Note that $Var(",NOT$env,")$ is the true variance in $",NOT$env,"$, and $E(",NOT$env,")$ is the true mean of $",NOT$env,"$.
Also, in SQuID each environmental variable $(",NOT$env,")$ is standardized (i.e., $Var(",NOT$env,")=1$ and $E(",NOT$env,")=0$)$")),
displayRCode(Mod6Step1_txt$RCode),
div(class="line"),
actionLink("Mod6Step1GotoStep2",
label = "Next Step (2) >>",
class= "linkToModuleSteps")
) |
BIAR.phi.kalman<-function (x, y1, y2, t, yerr1, yerr2, zero.mean = "TRUE")
{
sigmay <- var(cbind(y1,y2))
if (zero.mean == "FALSE")
{
y1 = y1 - mean(y1)
y2 = y2 - mean(y2)
}
n = length(y1)
Sighat = sigmay %*% matrix(c(1, 0, 0, 1), 2, 2)
xhat = matrix(0, nrow = 2, ncol = n)
delta <- diff(t)
Q = Sighat
phi.R = x[1]
phi.I = x[2]
F = matrix(0, nrow = 2, ncol = 2)
G = diag(2)
phi = complex(1, real = phi.R, imaginary = phi.I)
sum.Lambda = 0
sum.error = 0
phi = ifelse(is.na(phi) == TRUE, 1.1, phi)
y=rbind(y1,y2)
if (Mod(phi) < 1) {
Phi = Mod(phi)
psi <- acos(phi.R/Phi)
if (phi.I < 0 & Mod(phi) < 1)
psi = -acos(phi.R/Phi)
for (i in 1:(n - 1)) {
R= matrix(c(yerr1[i + 1]^2,0,0,yerr2[i + 1]^2),2,2)
Lambda = G %*% Sighat %*% t(G) + R
if (det(Lambda) <= 0 | length(which(is.na(Lambda)))>0) {
sum.Lambda <- n * 1e+10
break
}
phi2.R <- (Phi^delta[i]) * cos(delta[i] * psi)
phi2.I <- (Phi^delta[i]) * sin(delta[i] * psi)
F[1, 1] = phi2.R
F[1, 2] = -phi2.I
F[2, 1] = phi2.I
F[2, 2] = phi2.R
phi2 <- 1 - Mod(phi^delta[i])^2
Qt <- phi2 * Q
sum.Lambda = sum.Lambda + log(det(Lambda))
Theta=F%*%Sighat%*%t(G)
sum.error = sum.error + t(y[,i] - G %*% xhat[, i]) %*% solve(Lambda) %*% (y[,i] - G %*% xhat[, i])
xhat[,i+1]=F%*%xhat[,i]+Theta%*%solve(Lambda)%*%(y[,i]-G%*%xhat[,i])
if(sum(R)==0)
Sighat=Qt + F%*%(Sighat-t(Sighat))%*%t(F)
else
Sighat=F%*%Sighat%*%t(F)+ Qt - Theta%*%solve(Lambda)%*%t(Theta)
}
yhat = G %*% xhat
out <- ifelse(is.na(sum.Lambda) == TRUE, 1e+10, (sum.Lambda +
sum.error)/2)
}
else out = 1e+10
return(out)
} |
print.peperr <- function(x, ...){
cat("ESTIMATION OF PREDICTION ERROR","\n", "Data split in ",
length(x$indices$sample.index), " samples","\n", sep="")
if (is.list(x$selected.complexity)){
if ((all.equal(x$complexity,x$selected.complexity)!=TRUE)[1]){
cat("Selected complexity in full data set: ", "\n")
print(unlist(x$selected.complexity))
cat("\n", "Selected sample complexity: ", "\n")
for (i in 1:(length(x$sample.complexity)/2)){
print(x$sample.complexity[c((i*2)-1, i*2)])
}
} else {
cat("Passed complexity: ", "\n")
print(x$complexity)
}
} else {
if ((all.equal(x$complexity,x$selected.complexity)!=TRUE)[1]){
cat("Selected complexity in full data set: ", x$selected.complexity, "\n")
cat("Selected sample complexity: ", x$sample.complexity, "\n")
} else {
cat("Passed complexity: ", x$sample.complexity, "\n")
}
}
} |
library(Ostats)
context("Ostats_multivariate")
iris_traits <- as.matrix(iris[,1:4])
result1 <- Ostats_multivariate(traits = iris_traits, plots = factor(rep(c('a','b'),times=75)), sp = iris$Species, random_seed = 111, run_null_model = FALSE, hypervolume_args = list(method = 'box'), hypervolume_set_args = list(num.points.max = 1000))
result1 <- as.numeric(result1$overlaps_norm)
expected1 <- c(0.755, 0.783)
test_that (
"Ostats_multivariate returns expected output",
{
expect_equal(result1, expected1, tolerance = 0.01)
}
)
result2 <- Ostats_multivariate(traits = iris_traits[,1,drop=FALSE], plots = factor(rep(c('a','b'),times=75)), sp = iris$Species, random_seed = 111, run_null_model = FALSE)
expected2 <- Ostats(traits = iris_traits[,1,drop=FALSE], plots = factor(rep(c('a','b'),times=75)), sp = iris$Species, random_seed = 111, run_null_model = FALSE)
test_that (
"Ostats_multivariate returns the same output as Ostats if one trait is selected",
{
expect_equivalent(result2$overlaps_norm, expected2$overlaps_norm, tolerance = 0.01)
}
)
result3 <- Ostats_multivariate(traits = iris_traits, plots = factor(rep(c('a','b'),times=75)), sp = iris$Species, random_seed = 111, run_null_model = FALSE,
hypervolume_args = list(method = 'box'), hypervolume_set_args = list(distance.factor = 2, num.points.max = 1000))
result3 <- as.numeric(result3$overlaps_norm)
expected3 <- c(0.93, 0.95)
test_that (
"Ostats_multivariate correctly handles arguments to hypervolume functions",
{
expect_equal(result3, expected3, tolerance = 0.01)
}
) |
NULL
.knitEnv = new.env()
.knitEnv$meta = list()
`$.knitr_strict_list` = function(x, name) x[[name]]
as.strict_list = function(x) {
if (!is.list(x)) stop("'x' is not a list")
class(x) = 'knitr_strict_list'
x
} |
context("add_theme and rshiny-blue testing")
test_that("Function fails for wrong inputs", {
expect_error(mtcars %>%
tableHTML() %>%
add_theme('abc'),
'should be one of ')
expect_error(mtcars %>%
tableHTML() %>%
add_theme('rshiny-blue'),
NA)
expect_true(grepl('"background-color:
mtcars %>%
tableHTML() %>%
add_theme('rshiny-blue')))
expect_true(grepl('style="vertical-align:top;background-color:white;"',
tableHTML(mtcars,
rownames = TRUE,
widths = c(110, 140, rep(50, 11)),
row_groups = list(c(10, 10, 12), c('Group 1', 'Group 2', 'Group 3')),
second_headers = list(c(2, 5, 6), c('', 'col2', 'col3'))) %>%
add_theme('rshiny-blue')))
expect_true(grepl('style="font-size:25px;',
tableHTML(mtcars,
rownames = TRUE,
widths = c(110, 140, rep(50, 11)),
row_groups = list(c(10, 10, 12), c('Group 1', 'Group 2', 'Group 3')),
second_headers = list(c(2, 5, 6), c('', 'col2', 'col3'))) %>%
add_theme('rshiny-blue')))
}) |
library(CREAM)
context("WindowSizeRecog")
test_that("Specify window size for each order of COREs", {
InputData <- read.table("A549_Chr21.bed", sep="\t")
colnames(InputData) <- c("chr", "start", "end")
MinLength <- 1000
if(nrow(InputData) < MinLength){
stop(paste( "Number of functional regions is less than ", MinLength, ".", sep = "", collapse = ""))
}
peakNumMin <- 2
WScutoff <- 1.5
WindowSize <- WindowSizeRecog(InputData, peakNumMin, WScutoff)
expect_equal_to_reference(WindowSize, "windowSize.rds")
}) |
options(width=49)
options(prompt=" ")
options(continue=" ")
library(mactivate)
set.seed(777)
d <- 11
N <- 3000
X <- matrix(rnorm(N*d, 1, 1), N, d)
colnames(X) <- paste0("x", I(1:d))
b <- rep_len( c(-1, 1), d )
ystar <-
X %*% b +
1/3 * X[ , 1] * X[ , 2] * X[ , 3] -
1/3 * X[ , 3] * X[ , 4] * X[ , 5] * X[ , 6] +
1/2 * X[ , 8] * X[ , 9] -
2 * X[ , 1] * X[ , 2] * X[ , 7] * X[ , 11]
xtrue_formula <- eval(parse(text="y ~ . + x1:x2:x3 + x3:x4:x5:x6 + x8:x9 + x1:x2:x7:x11"))
xnoint_formula <- eval(parse(text="y ~ ."))
errs <- rnorm(N, 0, 3)
y <- ystar + errs
Xall <- X
yall <- y
Nall <- N
dfx <- data.frame("y"=yall, Xall)
xlm <- lm(y ~ . , data=dfx)
yhat <- predict(xlm, newdata=dfx)
sqrt( mean( (yall - yhat)^2 ) )
xlm <- lm(xtrue_formula , data=dfx)
yhat <- predict(xlm, newdata=dfx)
sqrt( mean( (yall - yhat)^2 ) )
xcmact_hybrid <-
f_control_mactivate(
param_sensitivity = 10^10,
w0_seed = 0.1,
w_col_search = "one",
bool_headStart = FALSE,
max_internal_iter = 500,
ss_stop = 10^(-8),
escape_rate = 1.01,
Wadj = 1/1,
tol = 10^(-8)
)
m_tot <- 5
Uall <- Xall
xxnow <- Sys.time()
xxls_out <-
f_fit_hybrid_01(
X = Xall,
y = yall,
m_tot = m_tot,
U = Uall,
m_start = 1,
mact_control = xcmact_hybrid,
verbosity = 5
)
cat( difftime(Sys.time(), xxnow, units="mins"), "\n" )
class(xxls_out)
yhatall <- predict(object=xxls_out, X0=Xall, U0=Uall, mcols=m_tot)
sqrt( mean( (yall - yhatall)^2 ) ) |
DBR <- function(raw.pvalues, pCDFlist, alpha = 0.05, lambda = NULL, ret.crit.consts = FALSE){
if(is.null(alpha) || is.na(alpha) || !is.numeric(alpha) || alpha < 0 || alpha > 1)
stop("'alpha' must be a probability between 0 and 1!")
if (is.null(lambda)){
lambda <- alpha
}else{
if(is.na(lambda) || !is.numeric(lambda) || lambda < 0 || lambda > 1)
stop("'lambda' must be a probability between 0 and 1!")
}
m <- length(raw.pvalues)
if(m != length(pCDFlist)) stop("The lengths of 'raw.pvalues' and 'pCDFlist' must be equal!")
pvec <- match.pvals(pCDFlist, raw.pvalues)
o <- order(pvec)
sorted.pvals <- pvec[o]
pv.list.all <- sort(unique(as.numeric(unlist(pCDFlist))))
if(ret.crit.consts){
y <- kernel_DBR_crit(pCDFlist, pv.list.all, sorted.pvals, lambda, alpha)
crit.constants <- y$crit.consts
idx <- which(sorted.pvals <= crit.constants)
}
else{
y <- kernel_DBR_fast(pCDFlist, sorted.pvals, lambda)
idx <- which(y <= alpha)
}
m.rej <- length(idx)
if(m.rej){
idx <- which(pvec <= sorted.pvals[m.rej])
pvec.rej <- raw.pvalues[idx]
}else{
idx <- integer(0)
pvec.rej <- numeric(0)
}
output <- list(Rejected = pvec.rej, Indices = idx, Alpha = m.rej * alpha / m, Num.rejected = m.rej, Lambda = lambda)
if(ret.crit.consts){
output$Critical.values = crit.constants
pv.adj <- rev(cummin(rev(pmin(y$pval.transf, 1))))
}
else{
pv.adj <- rev(cummin(rev(pmin(y, 1))))
}
ro <- order(o)
output$Adjusted = pv.adj[ro]
output$Method <- paste("Discrete Blanchard-Roquain procedure (lambda = ", lambda, ")", sep = "")
output$Signif.level <- alpha
output$Tuning <- lambda
output$Data <- list()
output$Data$raw.pvalues <- raw.pvalues
output$Data$pCDFlist <- pCDFlist
output$Data$data.name <- paste(deparse(substitute(raw.pvalues)), "and", deparse(substitute(pCDFlist)))
class(output) <- "DiscreteFDR"
return(output)
return(output)
} |
item_replace_files <- function(sb_id, files, ..., all=FALSE, session=current_session()){
if(length(files) > 50){
warning('Trying to attach a large number of files to a SB item. SB imposes file limits which may cause this to fail')
}
if(all){
item_rm_files(sb_id, ..., session=session)
}else{
item_rm_files(sb_id, files, ..., session=session)
}
item_append_files(sb_id, files = files, ..., session=session)
} |
"NO2_2013" |
fdiff<- function(x, d){
iT <- length(x)
np2 <- nextn(2*iT - 1, 2)
k <- 1:(iT-1)
b <- c(1, cumprod((k - d - 1)/k))
dx <- fft(fft(c(b, rep(0, np2-iT)))*
fft(c(x, rep(0, np2-iT))), inverse=T)/np2;
return(Re(dx[1:iT]))
} |
datagrid <- function(data = list(),
...,
sortable = TRUE,
pagination = NULL,
filters = FALSE,
colnames = NULL,
colwidths = "fit",
align = "auto",
theme = c("clean", "striped", "default"),
draggable = FALSE,
data_as_input = FALSE,
contextmenu = FALSE,
width = NULL,
height = NULL,
elementId = NULL) {
data <- as.data.frame(data)
theme <- match.arg(theme)
filters_type <- simple_filters(data)
if (!is.vector(colnames)) {
colnames <- names(data)
} else if (!identical(length(colnames), ncol(data))) {
warning(
"datagrid: if provided, 'colnames' must be a vector of same length as number of cols in data.",
call. = FALSE
)
colnames <- names(data)
}
options <- list(
columns = lapply(
X = seq_along(names(data)),
FUN = function(i) {
nm <- names(data)[i]
dropNulls(list(
header = colnames[i],
name = nm,
sortable = isTRUE(sortable),
filter = if (isTRUE(filters)) filters_type[[nm]]
))
}
),
bodyHeight = "fitToParent",
draggable = draggable
)
options <- modifyList(x = options, val = list(...), keep.null = FALSE)
if (!isTRUE(contextmenu))
options <- c(options, list(contextMenu = NULL))
if (!is.null(pagination)) {
options$pageOptions <- list(
perPage = pagination,
useClient = TRUE
)
options$bodyHeight <- "auto"
}
if (is.null(options$rowHeight))
options$rowHeight <- "auto"
x <- dropNulls(list(
data_df = data,
nrow = nrow(data),
ncol = ncol(data),
data = data,
colnames = names(data),
options = options,
theme = theme,
themeOptions = getOption(
x = "datagrid.theme",
default = list(
cell = list(
normal = list(
showHorizontalBorder = TRUE
)
)
)
),
language = getOption("datagrid.language", default = "en"),
languageOptions = getOption("datagrid.language.options", default = list()),
filters = filters,
rowAttributes = list(),
updateEditOnClick = NULL,
validationInput = FALSE,
dataAsInput = data_as_input,
dragInput = isTRUE(draggable)
))
widget <- createWidget(
name = "datagrid",
x = x,
width = width,
height = height,
package = "toastui",
elementId = elementId,
preRenderHook = function(widget) {
widget$x$data_df <- NULL
widget$x$data <- unname(widget$x$data)
widget
},
sizingPolicy = sizingPolicy(
defaultWidth = "100%",
defaultHeight = "auto",
viewer.defaultHeight = "100%",
viewer.defaultWidth = "auto",
viewer.fill = TRUE,
viewer.suppress = FALSE,
viewer.padding = 0,
knitr.figure = FALSE,
knitr.defaultWidth = "100%",
knitr.defaultHeight = `if`(identical(options$bodyHeight, "auto"), "auto", "600px"),
browser.fill = TRUE,
browser.external = TRUE
)
)
if (!is.null(align)) {
align <- match.arg(align, choices = c("auto", "left", "right", "center"))
if (identical(align, "auto")) {
widget <- grid_columns(
grid = widget,
align = get_align(data)
)
} else {
widget <- grid_columns(
grid = widget,
align = align
)
}
}
if (identical(colwidths, "guess")) {
widget <- grid_columns(
grid = widget,
minWidth = nchar_cols(
data = data,
add_header = isTRUE(sortable) * 10 + isTRUE(filters) * 10
),
whiteSpace = "normal",
renderer = list(
styles = list(
wordBreak = "normal"
)
)
)
} else if (identical(colwidths, "fit")) {
widget <- grid_columns(
grid = widget,
columns = names(data),
width = NULL,
whiteSpace = "normal",
renderer = list(
styles = list(
wordBreak = "normal"
)
)
)
} else {
widget <- grid_columns(
grid = widget,
columns = names(data),
width = colwidths
)
}
return(widget)
}
datagrid_html <- function(id, style, class, ...) {
tags$div(
id = id,
class = class,
style = style,
style = "margin-bottom: 15px;",
tags$div(
id = paste0(id, "-container"), class = class, style = style, ...
)
)
} |
make_exercise_transform_html <- function(converter = c("ttm", "tth", "pandoc", "tex2image"), base64 = TRUE, ...)
{
options <- strsplit(converter, "-", fixed = TRUE)[[1L]]
converter <- match.arg(options[1L], c("ttm", "tth", "pandoc", "tex2image"))
options <- options[-1L]
options <- if(length(options) > 0L) {
paste0("--", options, collapse = " ")
} else {
"--mathml"
}
if(converter %in% c("tth", "ttm")) {
stopifnot(requireNamespace("tth"))
} else if(converter == "pandoc") {
stopifnot(requireNamespace("rmarkdown"))
}
if(is.null(base64)) base64 <- c("bmp", "gif", "jpeg", "jpg", "png", "svg")
base64 <- if(isTRUE(base64)) {
base64 <- .fileURI_mime_types[, "ext"]
} else {
if(is.logical(base64)) NA_character_ else tolower(base64)
}
if(b64 <- !all(is.na(base64))) stopifnot(requireNamespace("base64enc"))
if(converter == "pandoc") {
make_exercise_transform_pandoc(to = "html", base64 = base64, options = options, ...)
} else if(converter == "tex2image") {
function(x)
{
bsname <- if(is.null(x$metainfo$file)) basename(tempfile()) else x$metainfo$file
sdir <- attr(x$supplements, "dir")
images <- list(); inames <- NULL
for(i in c("question", "questionlist", "solution", "solutionlist")) {
if(!is.null(x[[i]])) {
if(grepl("list", i)) {
images <- c(images, as.list(x[[i]]))
inames <- c(inames, paste(i, 1:length(x[[i]]), sep = "_"))
} else {
images <- c(images, list(x[[i]]))
inames <- c(inames, i)
}
}
}
names(images) <- inames
dir <- tex2image(images, width = 6, idir = sdir, name = bsname, ...)
inames <- file_path_sans_ext(basename(dir))
if(b64) {
for(i in seq_along(dir))
dir[i] <- sprintf('<img src="%s" />', base64enc::dataURI(file = dir[i],
mime = paste('image', format = file_ext(dir[i]), sep = '/')))
for(sf in dir(sdir)) {
if(length(grep(file_ext(sf), base64, ignore.case = TRUE))) {
file.remove(file.path(sdir, sf))
x$supplements <- x$supplements[!grepl(sf, x$supplements)]
attr(x$supplements, "dir") <- sdir
}
}
}
for(i in c("question", "questionlist", "solution", "solutionlist")) {
if(!is.null(x[[i]])) {
if(grepl("list", i)) {
j <- grep(i, inames)
} else {
j <- grep(i, inames)
j <- j[!grepl("list", inames[j])]
}
x[[i]] <- if(b64) {
dir[j]
} else {
paste("<img src=\"", file.path(sdir, basename(dir[j])), "\" />", sep = "")
}
names(x[[i]]) <- inames[j]
}
}
if(!b64) {
for(i in dir) {
fp <- file.path(sdir, basename(i))
file.copy(i, fp)
if(!(fp %in% x$supplements))
x$supplements <- c(x$supplements, fp)
}
attr(x$supplements, "dir") <- sdir
}
x
}
} else {
apply_ttx_on_list <- function(object, converter = "ttm",
sep = "\\007\\007\\007\\007\\007", ...)
{
empty <- sapply(object, identical, "")
object <- lapply(object, c, sep)
rval <- switch(converter,
"tth" = tth::tth(unlist(object), ...),
"ttm" = tth::ttm(unlist(object), ...)
)
img <- attr(rval, "images")
ix <- grepl(sep, rval, fixed = TRUE)
rval <- split(rval, c(0, head(cumsum(ix), -1L)))
names(rval) <- rep(names(object), length.out = length(rval))
cleansep <- function(x) {
n <- length(x)
if(n < 1L) return(x)
if(x[n] == sep) return(x[-n])
return(c(x[-n], gsub(sep, "", x[n], fixed = TRUE)))
}
rval <- lapply(rval, cleansep)
if(any(empty)) rval[empty] <- rep.int(list(""), sum(empty))
attr(rval, "images") <- img
rval
}
function(x)
{
owd <- getwd()
setwd(sdir <- attr(x$supplements, "dir"))
what <- c(
"question" = list(x$question),
"questionlist" = as.list(x$questionlist),
"solution" = list(x$solution),
"solutionlist" = as.list(x$solutionlist)
)
args <- list("x" = what, ...)
trex <- apply_ttx_on_list(what, converter, ...)
namtrex <- names(trex)
if(b64 && length(sfiles <- dir(sdir))) {
for(sf in sfiles) {
if(any(grepl(sf, unlist(trex), fixed = TRUE)) && file_ext(sf) %in% base64) {
sfx <- rbind(
c(sprintf('alt="%s"', sf), 'alt="\\007\\007_exams_supplement_\\007\\007"'),
c(sprintf('href="%s"', sf), sprintf('href="%s" download="\\007\\007_exams_supplement_\\007\\007"', sf)),
c(sprintf('="%s"', sf), sprintf('="%s"', fileURI(file = sf))),
c('\\007\\007_exams_supplement_\\007\\007', sf)
)
for(i in seq_along(trex)) {
if(length(j <- grep(sf, trex[[i]], fixed = TRUE))) {
for(k in 1L:nrow(sfx)) trex[[i]][j] <- gsub(sfx[k, 1L], sfx[k, 2L], trex[[i]][j], fixed = TRUE)
}
}
file.remove(file.path(sdir, sf))
x$supplements <- x$supplements[!grepl(sf, x$supplements)]
}
}
attr(x$supplements, "dir") <- sdir
}
x$question <- trex$question
x$questionlist <- sapply(trex[grep("questionlist", namtrex)], paste, collapse = "\n")
x$solution <- trex$solution
x$solutionlist <- sapply(trex[grep("solutionlist", namtrex)], paste, collapse = "\n")
for(j in c("question", "questionlist", "solution", "solutionlist")) {
if(length(x[[j]]) < 1L) x[[j]] <- NULL
}
setwd(owd)
x
}
}
} |
as.datatable_widget = function(data, ...){
stopif(!requireNamespace("DT", quietly = TRUE) || !requireNamespace("htmltools", quietly = TRUE),
"DT and htmltools packages are required for 'as.datatable_widget' function. Please, install it with 'install.packages(\"DT\")'"
)
UseMethod("as.datatable_widget")
}
as.datatable_widget.default = function(data, ...){
DT::datatable(data, ...)
}
as.datatable_widget.etable = function(data,
...,
repeat_row_labels = FALSE,
show_row_numbers = FALSE,
digits = get_expss_digits()
){
data = round_dataframe(data, digits = digits)
if(NCOL(data)>0){
first_lab = colnames(data)[1]
row_labels = data[[1]]
data[[1]] = NULL
header = t(split_labels(colnames(data), split = "|", fixed = TRUE, remove_repeated = FALSE))
row_labels = split_labels(row_labels, split = "|", fixed = TRUE, remove_repeated = !repeat_row_labels)
if(length(row_labels)){
row_labels = sheet(row_labels)
} else {
row_labels = sheet(matrix("", nrow = nrow(data), ncol = 1))
}
if(show_row_numbers) {
row_labels = sheet(seq_len(nrow(row_labels)), row_labels)
}
colnames(row_labels) = rep("", ncol(row_labels))
if(nrow(header)>0){
empty_corner = matrix("", nrow = nrow(header) , ncol = ncol(row_labels))
} else {
empty_corner = matrix("", nrow = 1, ncol = ncol(row_labels))
}
if(is.na(first_lab) || first_lab=="row_labels") first_lab = ""
empty_corner[1, 1] = first_lab
header = matrix_header_to_html(empty_corner, header)
data = cbind(row_labels, data)
} else {
if(show_row_numbers) {
row_labels = sheet(seq_len(nrow(data)))
} else {
row_labels = as.sheet(matrix(NA, nrow = nrow(data), ncol = 0))
}
data = cbind(row_labels, data)
header = '<table class="display"><thead><tr><th> </th></thead></table>'
empty_corner = NULL
}
args = list(...)
args[["class"]] = if_null(args[["class"]], 'stripe hover cell-border row-border order-column compact')
args[["filter"]] = if_null(args[["filter"]], "none")
args[["options"]] = if_null(args[["options"]],
list(paging = FALSE,
searching = FALSE,
sorting = FALSE,
ordering = FALSE,
bFilter = FALSE,
bInfo = FALSE,
columnDefs = list(
list(
className = 'dt-head-left',
targets = 0:(ncol(data)-1)
)
)
)
)
args[["rownames"]] = if_null(args[["rownames"]], FALSE)
args[["container"]] = header
args[["data"]] = data
res = do.call(DT::datatable, args)
if(NCOL(data)>0) {
DT::formatStyle(res, seq_len(NCOL(data))[-seq_len(NCOL(empty_corner))], textAlign = 'right')
} else {
res
}
}
matrix_header_to_html = function(corner, m_cols){
thead = NULL
tr = NULL
th = NULL
row_rle = list()
if(NCOL(m_cols)>0){
m_cols[is.na(m_cols)] = ""
strange = colSums(m_cols != "") ==0
m_cols[1, strange] = " "
for(i in seq_len(nrow(m_cols))){
y = colSums((m_cols[1:i,-1L, drop = FALSE] != m_cols[1:i, -ncol(m_cols), drop = FALSE]))>0
changes = c(which(y | is.na(y)), ncol(m_cols))
row_rle[[i]] = structure(list(lengths = diff(c(0L, changes)), values = m_cols[i, changes]))
}
for (each_row in seq_along(row_rle)){
curr_col = 1
names(row_rle[[each_row]]) = c("colspan","values")
row_rle[[each_row]][["rowspan"]] = rep(1, length(row_rle[[each_row]]$values))
curr_row = row_rle[[each_row]]
for(each_item in seq_along(curr_row$values)){
for(each in m_cols[-(1:each_row), curr_col]){
if(each == "") {
curr_row$rowspan[each_item] = curr_row$rowspan[each_item] + 1
} else {
break
}
}
curr_col = curr_col + curr_row$colspan[each_item]
}
empty = curr_row$values %in% ""
curr_row$values = curr_row$values[!empty]
curr_row$colspan = curr_row$colspan[!empty]
curr_row$rowspan = curr_row$rowspan[!empty]
row_rle[[each_row]] = curr_row
}
row_rle[[1]]$values = c(corner[1,1],row_rle[[1]]$values)
row_rle[[1]]$colspan = c(ncol(corner),row_rle[[1]]$colspan)
row_rle[[1]]$rowspan = c(nrow(corner),row_rle[[1]]$rowspan)
} else {
row_rle[[1]] = list(values = corner[1,1],
colspan = ncol(corner),
rowspan = nrow(corner)
)
}
htmltools::withTags(table(
class = 'display',
thead(
lapply(row_rle, function(row){
tr(lapply(seq_along(row$values),function(item){
th(
htmltools::tags$style(type = "text/css",
htmltools::HTML("th { text-align: center; } ")
),
htmltools::tags$style(type = "text/css",
htmltools::HTML("th {border: 1px solid
),
rowspan = row$rowspan[item],
colspan = row$colspan[item],
row$values[item]
)
}))
})
)
))
}
as.datatable_widget.with_caption = function(data,
...,
repeat_row_labels = FALSE,
show_row_numbers = FALSE,
digits = get_expss_digits()){
caption = get_caption(data)
data = set_caption(data, NULL)
as.datatable_widget(
data,
...,
repeat_row_labels = repeat_row_labels,
show_row_numbers = show_row_numbers,
digits = digits,
caption = caption
)
} |
knitr::opts_chunk$set(
collapse = TRUE,
comment = "
)
library(eatGADS)
df <- data.frame(ID = 1:4, sex = c(0, 0, 1, 1),
forename = c("Tim", "Bill", "Ann", "Chris"), stringsAsFactors = FALSE)
varLabels <- data.frame(varName = c("ID", "sex", "forename"),
varLabel = c("Person Identifier", "Sex as self reported",
"first name as reported by teacher"),
stringsAsFactors = FALSE)
valLabels <- data.frame(varName = rep("sex", 3),
value = c(0, 1, -99),
valLabel = c("male", "female", "missing - omission"),
missings = c("valid", "valid", "miss"), stringsAsFactors = FALSE)
df
varLabels
valLabels
gads <- import_raw(df = df, varLabels = varLabels, valLabels = valLabels)
gads
extractMeta(gads, vars = c("sex"))
extractMeta(gads)
dat1 <- extractData(gads, convertMiss = TRUE, convertLabels = "numeric")
dat1
dat2 <- extractData(gads, convertMiss = TRUE, convertLabels = "character")
dat2
gads2 <- changeVarLabels(gads, varName = c("ID"), varLabel = c("Test taker ID"))
extractMeta(gads2, vars = "ID")
gads3 <- changeVarNames(gads, oldNames = c("ID"), newNames = c("idstud"))
extractMeta(gads3, vars = "idstud")
extractData(gads3)
gads4 <- recodeGADS(gads, varName = "sex", oldValues = c(0, 1, -99), newValues = c(1, 2, 99))
extractMeta(gads4, vars = "sex")
extractData(gads4, convertLabels = "numeric")
varChanges <- getChangeMeta(gads, level = "variable")
varChanges[varChanges$varName == "ID", "varLabel_new"] <- "Test taker ID"
gads5 <- applyChangeMeta(varChanges, gads)
extractMeta(gads5, vars = "ID")
haven_dat <- export_tibble(gads)
haven_dat
lapply(haven_dat, attributes) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.