code
stringlengths 1
13.8M
|
---|
update_beta <- function(tms,a,rho,omega) {
B <- length(tms)
beta <- double(B)
for (b in 1:B) {
band <- tms[[b]]
m <- band[,2]
s <- sin(omega*band[,1] + rho[b])
beta[b] <- sum( (m-a[b]*s)/(band[,3]**2))/sum(1/band[,3]**2)
}
return(beta)
} |
`fun.RPRS.hs` <-
function (data, default = "Y", rs.init = c(-1.5, 1.5), no.c.rs = 50,
leap = 3, FUN = "runif.sobol",no=10000)
{
if (default == "Y") {
no.c.rs <- fun.nclass.e(data)
}
RPRS <- fun.fit.gl.v2a(a=rs.init[1], b=rs.init[2], data=data, fun=fun.auto.perc.rs, no=no,
nclass = no.c.rs, leap = leap, FUN = FUN)$unique.optim.result
RPRS <- fun.fit.gl.v2b(RPRS[1], RPRS[2], RPRS[3], RPRS[4],
data, fun.auto.perc.rs, nclass = no.c.rs)
return(RPRS)
} |
model_parameters.data.frame <- function(model,
centrality = "median",
dispersion = FALSE,
ci = .95,
ci_method = "hdi",
test = c("pd", "rope"),
rope_range = "default",
rope_ci = 0.95,
keep = NULL,
drop = NULL,
parameters = keep,
verbose = TRUE,
...) {
params <- .extract_parameters_bayesian(
model,
centrality = centrality,
dispersion = dispersion,
ci = ci,
ci_method = ci_method,
test = test,
rope_range = rope_range,
rope_ci = rope_ci,
bf_prior = NULL,
diagnostic = NULL,
priors = FALSE,
keep_parameters = keep,
drop_parameters = drop,
verbose = verbose,
...
)
attr(params, "ci") <- ci
attr(params, "object_name") <- deparse(substitute(model), width.cutoff = 500)
class(params) <- c("parameters_model", "see_parameters_model", class(params))
params
}
standard_error.factor <- function(model, force = FALSE, verbose = TRUE, ...) {
if (force) {
standard_error(as.numeric(model), ...)
} else {
if (verbose) {
warning("Can't compute standard error of non-numeric variables.", call. = FALSE)
}
return(NA)
}
}
standard_error.character <- standard_error.factor
standard_error.numeric <- function(model, ...) {
sqrt(stats::var(model, na.rm = TRUE) / length(stats::na.omit(model)))
}
standard_error.data.frame <- function(model, verbose = TRUE, ...) {
unlist(sapply(model, standard_error, verbose = verbose))
}
standard_error.list <- function(model, verbose = TRUE, ...) {
if ("gam" %in% names(model)) {
model <- model$gam
class(model) <- c("gam", "lm", "glm")
standard_error(model)
} else {
if (isTRUE(verbose)) {
insight::print_color("\nCould not extract standard errors from model object.\n", "red")
}
}
}
standard_error.table <- function(model, ...) {
if (length(dim(model)) == 1) {
total.n <- as.vector(sum(model))
rel.frq <- as.vector(model) / total.n
out <- .data_frame(
Value = names(model),
Proportion = rel.frq,
SE = suppressWarnings(sqrt(rel.frq * (1 - rel.frq) / total.n))
)
} else {
out <- NA
}
out
}
standard_error.xtabs <- standard_error.table
standard_error.effectsize_std_params <- function(model, verbose = TRUE, ...) {
se <- attr(model, "standard_error")
if (is.null(se)) {
if (isTRUE(verbose)) {
insight::print_color("\nCould not extract standard errors of standardized coefficients.\n", "red")
}
return(NULL)
}
if (is.data.frame(se) && "SE" %in% colnames(se)) {
se <- se$SE
}
out <- .data_frame(
Parameter = model$Parameter,
SE = as.vector(se)
)
.remove_backticks_from_parameter_names(out)
}
p_value.numeric <- function(model, null = 0, ...) {
x <- stats::na.omit(model)
xM <- mean(x)
if (is.null(null) || all(is.na(null))) {
x0 <- x - xM
} else {
x0 <- null
}
k <- sum(x > x0)
N <- length(x)
(k + 1) / (N + 1)
}
p_value.data.frame <- function(model, ...) {
data <- model[sapply(model, is.numeric)]
.data_frame(
Parameter = names(data),
p = sapply(data, p_value)
)
}
p_value.list <- function(model, method = NULL, verbose = TRUE, ...) {
if ("gam" %in% names(model)) {
model <- model$gam
class(model) <- c("gam", "lm", "glm")
p_value(model, method = method)
} else {
if (isTRUE(verbose)) {
warning("Could not extract p-values from model object.", call. = FALSE)
}
}
} |
testIndLogistic = function(target, dataset, xIndex, csIndex, wei = NULL, univariateModels = NULL, hash = FALSE, stat_hash = NULL,
pvalue_hash = NULL) {
csIndex[ which( is.na(csIndex) ) ] = 0
if (hash) {
csIndex2 = csIndex[which(csIndex!=0)]
csIndex2 = sort(csIndex2)
xcs = c(xIndex,csIndex2)
key = paste(as.character(xcs) , collapse=" ");
if ( !is.null(stat_hash[key]) ) {
stat = stat_hash[key];
pvalue = pvalue_hash[key];
results <- list(pvalue = pvalue, stat = stat, stat_hash=stat_hash, pvalue_hash=pvalue_hash);
return(results);
}
}
pvalue = log(1);
stat = 0;
if ( !is.na( match(xIndex, csIndex) ) ) {
if ( hash) {
stat_hash[key] <- 0;
pvalue_hash[key] <- log(1);
}
results <- list(pvalue = log(1), stat = 0, stat_hash=stat_hash, pvalue_hash=pvalue_hash);
return(results);
}
if ( any(xIndex < 0) || any(csIndex < 0) ) {
message(paste("error in testIndLogistic : wrong input of xIndex or csIndex"))
results <- list(pvalue = pvalue, stat = stat, stat_hash=stat_hash, pvalue_hash=pvalue_hash);
return(results);
}
x = dataset[ , xIndex];
cs = dataset[ , csIndex];
if (length(cs) == 0 || any( is.na(cs) ) ) cs = NULL;
if ( length(cs) != 0 ) {
if ( is.null(dim(cs)[2]) ) {
if ( identical(x, cs) ) {
if (hash) {
stat_hash[key] <- 0;
pvalue_hash[key] <- log(1);
}
results <- list(pvalue = log(1), stat = 0, stat_hash=stat_hash, pvalue_hash=pvalue_hash);
return(results);
}
} else {
for ( col in 1:dim(cs)[2] ) {
if ( identical( x, cs[, col] ) ) {
if (hash) {
stat_hash[key] <- 0;
pvalue_hash[key] <- log(1);
}
results <- list(pvalue = log(1), stat = 0, stat_hash=stat_hash, pvalue_hash=pvalue_hash);
return(results);
}
}
}
}
if (length(cs) == 0) {
if ( !is.null(univariateModels) ) {
pvalue = univariateModels$pvalue[[xIndex]];
stat = univariateModels$stat[[xIndex]];
results <- list(pvalue = pvalue, stat = stat, stat_hash=stat_hash, pvalue_hash=pvalue_hash);
return(results);
}
fit2 = glm(target ~ x, binomial, weights = wei, model = FALSE)
stat = fit2$null.deviance - fit2$deviance
dof = length( fit2$coefficients ) - 1
} else {
fit1 <- glm(target ~., data = data.frame( cs ), binomial, weights = wei, model = FALSE)
fit2 <- glm(target ~., data = data.frame( dataset[, c(csIndex, xIndex)] ), binomial, weights = wei, model = FALSE)
stat <- fit1$deviance - fit2$deviance
dof <- length( fit2$coefficients ) - length( fit1$coefficients )
}
pvalue = pchisq(stat, dof, lower.tail = FALSE, log.p = TRUE);
if (hash) {
stat_hash[key] <- stat;
pvalue_hash[key] <- pvalue;
}
if ( is.na(pvalue) | is.na(stat) ) {
pvalue = log(1);
stat = 0;
} else {
if (hash) {
stat_hash[key] <- stat;
pvalue_hash[key] <- pvalue;
}
}
results <- list(pvalue = pvalue, stat = stat, stat_hash=stat_hash, pvalue_hash=pvalue_hash);
return(results);
} |
[
{
"title": "Random points on some hemisphere",
"href": "http://freakonometrics.hypotheses.org/11018"
},
{
"title": "Really useful R package: sas7bdat",
"href": "https://feedproxy.google.com/~r/SASandR/~3/fwFPoUJ20U4/really-useful-r-package-sas7bdat.html"
},
{
"title": "My New Book and Other Matters",
"href": "https://matloff.wordpress.com/2015/05/22/my-new-book-and-other-matters/"
},
{
"title": "Animating the Metropolis algorithm",
"href": "https://web.archive.org/web/http://mbjoseph.github.io/blog/2013/09/08/metropolis/"
},
{
"title": "2012-6 Working with the gridSVG Coordinate System",
"href": "http://stattech.wordpress.fos.auckland.ac.nz/2012-6-working-with-the-gridsvg-coordinate-system/"
},
{
"title": "For descriptive statistics, values below LLOQ set to …",
"href": "http://wiekvoet.blogspot.com/2013/02/for-descriptive-statistics-values-below.html"
},
{
"title": "JMBayes R package (webinar)",
"href": "http://r4stats.com/2014/04/08/jmbayes-r-package-webinar/"
},
{
"title": "Two New Soils-Related KMZ Demos",
"href": "https://casoilresource.lawr.ucdavis.edu/"
},
{
"title": "Presentations and video from useR! 2010 available",
"href": "http://blog.revolutionanalytics.com/2010/08/presentations-and-video-from-user-2010-available.html"
},
{
"title": "Scales and transformations in ggplot2 0.9.0",
"href": "https://heuristically.wordpress.com/2012/03/14/scales-and-transformations-in-ggplot2-0-9-0/"
},
{
"title": "Update on Snowdoop, a MapReduce Alternative",
"href": "https://matloff.wordpress.com/2015/05/29/update-on-snowdoop-a-mapreduce-alternative/"
},
{
"title": "Using R — Standalone Scripts & Error Messages",
"href": "http://mazamascience.com/WorkingWithData/?p=888"
},
{
"title": "The Lady Loves Statistics",
"href": "http://www.mango-solutions.com/wp/2015/11/the-lady-loves-statistics/"
},
{
"title": "Plugging hierarchical data from R into d3",
"href": "http://blog.rolffredheim.com/2013/11/d3-without-javascript.html"
},
{
"title": "Making Static & Interactive Maps With ggvis (+ using ggvis maps w/shiny)",
"href": "http://rud.is/b/2014/12/29/making-static-interactive-maps-with-ggvis-using-ggvis-maps-wshiny/"
},
{
"title": "twitteR now supports database persistence",
"href": "http://geoffjentry.blogspot.com/2014/02/twitter-now-supports-database.html"
},
{
"title": "FRAMA Part IV: Continuing the Long/Short Filter Search",
"href": "https://quantstrattrader.wordpress.com/2014/07/09/frama-part-iv-continuing-the-longshort-filter-search/"
},
{
"title": "Yet another reason to avoid loops in R",
"href": "https://theaverageinvestor.wordpress.com/2011/07/12/yet-another-reason-to-avoid-loops-in-r/"
},
{
"title": "Welcome Rumpel!",
"href": "https://binfalse.de/2010/10/18/welcome-rumpel/"
},
{
"title": "O’Reilly R ebooks half price – today only",
"href": "https://4dpiecharts.com/2013/09/19/oreilly-r-ebooks-half-price-today-only/"
},
{
"title": "V8 version 0.5: typed arrays and sql.js",
"href": "https://www.opencpu.org/posts/v8-release-0-5/"
},
{
"title": "Earthquakes",
"href": "https://web.archive.org/web/http://isomorphism.es//post/13558632608"
},
{
"title": "MLB Baseball Pitching Matchups ~ grabbing pitcher and/or batter codes by specify game date using R XML",
"href": "https://probabilitynotes.wordpress.com/2010/06/01/mlb-baseball-pitching-matchups-grabbing-pitcher-andor-batter-codes-by-specify-game-date-using-r-xml/"
},
{
"title": "Effect of sample size on the accuracy of Cohen’s d estimates (95 % CI)",
"href": "http://rpsychologist.com/effect-of-sample-size-on-the-accuracy-of-cohens-d-estimates-95-ci"
},
{
"title": "Call for Help: Front-End Javascript/.NET Web App Developer",
"href": "http://fantasyfootballanalytics.net/2015/10/call-for-help-front-end-javascript-net-web-app-developer.html"
},
{
"title": "Off to Banff!!",
"href": "https://xianblog.wordpress.com/2010/09/10/off-to-banff/"
},
{
"title": "13 videos for learning R",
"href": "http://blog.revolutionanalytics.com/2010/05/13-videos-for-learning-r.html"
},
{
"title": "Updates to the Deducer family of packages",
"href": "http://blog.fellstat.com/?p=160"
},
{
"title": "infuser: a template engine for R",
"href": "http://fishyoperations.com/2015/05/17/infuser-a-template-engine-for-r.html"
},
{
"title": "Factors are not first-class citizens in R",
"href": "http://www.win-vector.com/blog/2014/09/factors-are-not-first-class-citizens-in-r/"
},
{
"title": "Lightning strike trend prediction with GBM in R",
"href": "https://blog.snap.uaf.edu/2015/08/28/lightning-strike-trend-prediction-with-gbm-in-r/"
},
{
"title": "Revolution Newsletter: February 2012",
"href": "http://blog.revolutionanalytics.com/2012/02/revolution-newsletter-february-2012.html"
},
{
"title": "Project Tycho, Correlation between states",
"href": "http://wiekvoet.blogspot.com/2014/04/project-tycho-correlation-between-states.html"
},
{
"title": "Choosing an SQL Engine for Analytics",
"href": "http://www.cerebralmastication.com/2009/03/chosing-an-sql-engine-for-analytics/"
},
{
"title": "How I made some Pokémon Business Cards",
"href": "http://www.sumsar.net/blog/2016/09/how-i-made-some-pokemon-business-cards/"
},
{
"title": "Introducing shinyjs: perform common JavaScript operations in Shiny apps using plain R code",
"href": "http://deanattali.com/2015/04/23/shinyjs-r-package/"
},
{
"title": "Google Summer of Code Student Project Wins Statistical Software Award",
"href": "https://opensource.googleblog.com/2011/04/google-summer-of-code-student-project.html"
},
{
"title": "Economic geography of the eastern USA\r\ncirca 1999, median incomes…",
"href": "http://isomorphism.es/post/30211830674/economic-geography"
},
{
"title": "Le Monde puzzle [
"href": "https://xianblog.wordpress.com/2013/05/01/le-monde-puzzle-818/"
},
{
"title": "how to provide a variance calculation on your public-use survey data file without disclosing sampling clusters or violating respondent confidentiality",
"href": "http://www.asdfree.com/2014/09/how-to-provide-variance-calculation-on.html"
},
{
"title": "Monte Carlo simulation of a 2-factor interest rates model with ESGtoolkit",
"href": "https://thierrymoudiki.wordpress.com/2014/10/12/monte-carlo-simulation-of-a-2-factor-interest-rates-model-with-esgtoolkit/"
},
{
"title": "Rolling means (and other functions) with zoo",
"href": "http://is-r.tumblr.com/post/37024322796/rolling-means-and-other-functions-with-zoo"
},
{
"title": "Posted Question for R Users",
"href": "http://econometricsense.blogspot.com/2011/01/posted-question-for-r-users.html"
},
{
"title": "Playing cards, with R",
"href": "http://freakonometrics.hypotheses.org/6491"
},
{
"title": "Multiple Factor Model – Building Fundamental Factors",
"href": "https://systematicinvestor.wordpress.com/2012/02/04/multiple-factor-model-building-fundamental-factors/"
},
{
"title": "Feature selection: Using the caret package",
"href": "http://www.cybaea.net/journal/2010/11/16/Feature-selection-Using-the-caret-package/?utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+CybaeaOnR+%28CYBAEA+on+R%29"
},
{
"title": "The Bernoulli factory",
"href": "https://xianblog.wordpress.com/2010/04/23/the-bernoulli-factory/"
},
{
"title": "Measuring time series characteristics",
"href": "http://robjhyndman.com/hyndsight/tscharacteristics/"
},
{
"title": "\"R\": PLS Regression (Gasoline) – 005",
"href": "http://nir-quimiometria.blogspot.com/2012/02/r-pls-regression-gasoline-005.html"
},
{
"title": "Tips & Tricks 4: Reading In Data Files",
"href": "http://ww1.geomorph.net/2014/07/tips-tricks-4-reading-in-data-files.html"
}
] |
fun_load_mutation_gz <- function(mutation_file) {
Ref <- NULL
Alt <- NULL
Neighborhood_sequence <- NULL
df_mutation <- utils::read.csv(mutation_file,
stringsAsFactors = FALSE,
header = TRUE,
check.names = F,
sep = "\t")
if (dim(df_mutation)[1] > 1) {
if (!"SimpleRepeat_TRF" %in% colnames(df_mutation)) {
df_mutation$SimpleRepeat_TRF <- "NA"
}
if (!"Transition" %in% colnames(df_mutation)) {
df_mutation$Transition <- "NA"
}
df_mutation$Pos <- as.integer(df_mutation$Pos)
df_mutation <- df_mutation %>%
mutate(Ref = toupper(Ref)) %>%
mutate(Alt = toupper(Alt)) %>%
mutate(Neighborhood_sequence = toupper(Neighborhood_sequence))
} else {
if (!"SimpleRepeat_TRF" %in% colnames(df_mutation)) {
df_mutation$SimpleRepeat_TRF <- character(0)
}
if (!"Transition" %in% colnames(df_mutation)) {
df_mutation$Transition <- character(0)
}
}
return(df_mutation)
}
NULL |
data_dir <- file.path("..", "testdata")
tempfile_nc <- function() {
tempfile_helper("gridboxmax_")
}
file_out <- tempfile_nc()
gridboxmax("SIS", 3, 6, file.path(data_dir, "ex_gridboxx.nc"), file_out)
file <- nc_open(file_out)
test_that("data is correct", {
actual <- ncvar_get(file)
expected_data <- c(336,339,342,345,348,350,333,
340,343,346,349,350,334,337,
344,347,350,332,335,338,341,
348,350,333,336,339,342,345,
350,334,337,340,343,346,349,
335,338,341,344,347,350,336,
343,346,349,350,350,337,340,
305,308,311,314,317,320,323,
347,350,336,339,342,345,348,
350,337,340,343,346,349,350,
338,341,344,347,350,332,335,
342,345,348,350,333,336,339)
expected <- array(expected_data, dim = c(4, 7, 3))
expect_equivalent(actual, expected)
})
test_that("variable attributes are correct", {
actual <- ncatt_get(file, "SIS", "units")$value
expect_equal(actual, "W m-2")
actual <- ncatt_get(file, "SIS", "_FillValue")$value
expect_equal(actual, -999)
actual <- ncatt_get(file, "SIS", "standard_name")$value
expect_equal(actual, "SIS_standard")
actual <- ncatt_get(file, "SIS", "long_name")$value
expect_equal(actual, "Surface Incoming Shortwave Radiation")
actual <- ncatt_get(file, "SIS", "missing_value")$value
expect_equal(actual, 0)
})
test_that("attributes are correct", {
actual <- ncatt_get(file, "lon", "units")$value
expect_equal(actual, "degrees_east")
actual <- ncatt_get(file, "lon", "long_name")$value
expect_equal(actual, "longitude")
actual <- ncatt_get(file, "lon", "standard_name")$value
expect_equal(actual, "longitude")
actual <- ncatt_get(file, "lon", "axis")$value
expect_equal(actual, "X")
actual <- ncatt_get(file, "lat", "units")$value
expect_equal(actual, "degrees_north")
actual <- ncatt_get(file, "lat", "long_name")$value
expect_equal(actual, "latitude")
actual <- ncatt_get(file, "lat", "standard_name")$value
expect_equal(actual, "latitude")
actual <- ncatt_get(file, "lat", "axis")$value
expect_equal(actual, "Y")
actual <- ncatt_get(file, "time", "units")$value
expect_equal(actual, "hours since 1983-01-01 00:00:00")
actual <- ncatt_get(file, "time", "long_name")$value
expect_equal(actual, "time")
actual <- ncatt_get(file, "time", "standard_name")$value
expect_equal(actual, "time")
actual <- ncatt_get(file, "time", "calendar")$value
expect_equal(actual, "standard")
actual <- ncatt_get(file, "SIS", "standard_name")$value
expect_equal(actual, "SIS_standard")
actual <- ncatt_get(file, "SIS", "long_name")$value
expect_equal(actual, "Surface Incoming Shortwave Radiation")
actual <- ncatt_get(file, "SIS", "units")$value
expect_equal(actual, "W m-2")
actual <- ncatt_get(file, "SIS", "_FillValue")$value
expect_equal(actual, -999)
actual <- ncatt_get(file, "SIS", "cmsaf_info")$value
expect_equal(actual, "cmsafops::gridboxmax for variable SIS")
global_attr <- ncatt_get(file, 0)
expect_equal(length(global_attr), 1)
actual <- names(global_attr[1])
expect_equal(actual, "Info")
actual <- global_attr[[1]]
expect_equal(actual, "Created with the CM SAF R Toolbox.")
})
test_that("coordinates are correct", {
actual <- ncvar_get(file, "lon")
expect_identical(actual, array(c(5.5, 7.0, 8.5, 10.0, 11.5, 13.0, 14.5)))
actual <- ncvar_get(file, "lat")
expect_identical(actual, array(c(46.25, 49.25, 52.25, 54.5)))
actual <- ncvar_get(file, "time")
expect_equal(actual, array(c(150456, 151200, 151920)))
})
nc_close(file) |
dslash <- function(x, mu = 0, sigma = 1, log = FALSE) {
cpp_dslash(x, mu, sigma, log[1L])
}
pslash <- function(q, mu = 0, sigma = 1, lower.tail = TRUE, log.p = FALSE) {
cpp_pslash(q, mu, sigma, lower.tail[1L], log.p[1L])
}
rslash <- function(n, mu = 0, sigma = 1) {
if (length(n) > 1) n <- length(n)
cpp_rslash(n, mu, sigma)
} |
fromto_to_mat = function(fromto, nodename)
{
if(dim(fromto)[1] == 0)
{
stop("It has not any arc");
}
num_of_nodes = length(nodename)
arcs_mat = matrix(0, num_of_nodes, num_of_nodes)
arcs_order_mat = cbind(nodename, c(1:length(nodename)))
temp_arcs = cbind(match(fromto[,1], arcs_order_mat), match(fromto[,2], arcs_order_mat))
if (length(temp_arcs) > 0)
{
for (i in 1:dim(temp_arcs)[1])
{
from = as.numeric(temp_arcs[i,1])
to = as.numeric(temp_arcs[i,2])
arcs_mat[from, to] = arcs_mat[from, to] + 1
}
}
dimnames(arcs_mat)[[1]] = nodename
dimnames(arcs_mat)[[2]] = nodename
return(arcs_mat);
} |
l_getBinData <- function(widget) {
stopifnot({inherits(widget, "l_hist")})
l_throwErrorIfNotLoonWidget(widget)
tcl_obj_varname <- function(widget, varname = NULL) {
x <- tcl("info", "object", "namespace", widget)
if (!is.null(varname)) {
x <- paste(x, varname, sep="::")
}
x
}
dict_get <- function(d, keys) {
.Tcl(paste0("dict get $", d, " ", paste(keys, collapse = " ")))
}
dict_with <- function(d, expr) {
as.character(.Tcl(paste("dict with", paste(d, collapse = " "), paste("{", expr, "}"))))
}
tclbins <- tcl_obj_varname(widget, "bins")
binNames <- sort(as.numeric(dict_with(tclbins, "dict keys $bin")))
setNames(
lapply(binNames,
function(binid) {
keys_count <- dict_with(c(tclbins, "bin", binid), "dict keys $count")
keys_points <- dict_with(c(tclbins, "bin", binid), "dict keys $points")
list(
count = sapply(keys_count, function(x) {
as.numeric(dict_get(tclbins, c("bin", binid, "count", x)))
}, USE.NAMES = TRUE, simplify = FALSE),
points = sapply(keys_points, function(x) {
as.numeric(dict_get(tclbins, c("bin", binid, "points", x))) + 1
}, USE.NAMES = TRUE, simplify = FALSE),
x0 = as.numeric(dict_get(tclbins, c("bin", binid, "x0"))),
x1 = as.numeric(dict_get(tclbins, c("bin", binid, "x1")))
)
}),
paste("bin", binNames + 1)
)
}
l_getBinIds <- function(widget) {
stopifnot({
inherits(widget, "l_hist")
})
x <- widget['x']
len_x <- length(x)
if(len_x == 0) return(numeric(0))
bin <- l_getBinData(widget)
lapply(bin,
function(b) {
b$points$all
})
}
l_breaks <- function(widget) {
stopifnot({
inherits(widget, "l_hist")
})
x <- widget['x']
len_x <- length(x)
if(len_x == 0) return(numeric(0))
bin <- l_getBinData(widget)
lapply(bin,
function(b) {
c(b$x0, b$x1)
})
} |
import <- function(file = file.choose(), file.type = "csv", sep = ",", header.at = 1,
data.at = 2, date = "date", date.format = "%d/%m/%Y %H:%M",
time = NULL, time.format = NULL, tzone = "GMT", na.strings = c("", "NA"),
quote = "\"", ws = NULL, wd = NULL,
correct.time = NULL, ...) {
if (header.at > 0) {
Names <- read.table(
file,
nrows = 1, skip = (header.at - 1), sep = sep,
colClasses = "character", na.strings = ""
)
if (any(is.na(Names))) {
id <- which(is.na(Names))
Names[id] <- colnames(Names)[id]
}
}
thedata <- read.table(
file,
skip = (data.at - 1), sep = sep, na.strings = na.strings,
quote = quote, ...
)
names(thedata) <- Names
if (!date %in% Names) stop(paste("Can't find variable", date))
names(thedata)[which(Names == date)] <- "date"
if (!is.null(ws)) {
if (!ws %in% Names) stop(paste("Can't find variable", ws))
names(thedata)[which(Names == ws)] <- "ws"
}
if (!is.null(wd)) {
if (!wd %in% Names) stop(paste("Can't find variable", wd))
names(thedata)[which(Names == wd)] <- "wd"
}
if (is.null(time)) {
exam.date <- do.call("paste", list(head(thedata$date), collapse = ", "))
thedata$date <- as.POSIXct(
strptime(thedata$date, format = date.format, tz = tzone),
tz = tzone
)
if (all(is.na(thedata$date))) stop(paste("Date conversion problems, check that date.format is correct.\n First few dates looks like this:", exam.date))
} else {
if (time.format == "%H") {
if (min(thedata[, time]) == 1 & max(thedata[, time] == 24)) {
thedata[, time] <- thedata[, time] - 1
}
}
thedata$date <- as.POSIXct(strptime(
paste(thedata$date, thedata[, time]),
format = paste(date.format, time.format),
tz = tzone
), tz = tzone)
if (all(is.na(thedata$date))) stop("Date conversion problems, check that date.format and/or time.format is correct")
}
if (!is.null(correct.time)) thedata$date <- thedata$date + correct.time
attr(thedata$date, "tzone") <- tzone
ids <- which(is.na(thedata$date))
if (length(ids) > 0) {
thedata <- thedata[-ids, ]
warning(paste(
"Missing dates detected, removing",
length(ids), "lines"
), call. = FALSE)
}
print(unlist(sapply(thedata, class)))
thedata
} |
NULL
row_number <- function(x) {
if (missing(x)){
seq_len(n())
} else {
rank(x, ties.method = "first", na.last = "keep")
}
}
ntile <- function(x = row_number(), n) {
if (!missing(x)) {
x <- row_number(x)
}
len <- length(x) - sum(is.na(x))
n <- as.integer(floor(n))
if (len == 0L) {
rep(NA_integer_, length(x))
} else {
n_larger <- as.integer(len %% n)
n_smaller <- as.integer(n - n_larger)
size <- len / n
larger_size <- as.integer(ceiling(size))
smaller_size <- as.integer(floor(size))
larger_threshold <- larger_size * n_larger
bins <- if_else(
x <= larger_threshold,
(x + (larger_size - 1L)) / larger_size,
(x + (- larger_threshold + smaller_size - 1L)) / smaller_size + n_larger
)
as.integer(floor(bins))
}
}
min_rank <- function(x) rank(x, ties.method = "min", na.last = "keep")
dense_rank <- function(x) {
match(x, sort(unique(x)))
}
percent_rank <- function(x) {
(min_rank(x) - 1) / (sum(!is.na(x)) - 1)
}
cume_dist <- function(x) {
rank(x, ties.method = "max", na.last = "keep") / sum(!is.na(x))
} |
demo.coast <- "-JM16c -R30W/15E/54N/70N -Di -G100/200/100 -B10f5g5/4f2g2 -K" |
require(spatstat.utils)
x0 <- (0:4)/4
y0 <- runif(5)
x1 <- x0 + runif(5)
y1 <- y0 + runif(5)
xc <- runif(4)
yc <- runif(4)
rc <- runif(4)
ansX <- xysegXcircle(xc, yc, rc[1:3], x0, y0, x1, y1)
ansM <- xysegMcircle(xc, yc, matrix(rc, 4, 2), x0, y0, x1, y1)
ansP <- xysegPcircle(xc, yc, rc, x0, y0, x1, y1) |
ssize.twoSampVaryDelta <- function (deltaMean, deltaSE, sigma, fdr = 0.05,
power = 0.8, pi0 = 0.95, maxN = 35,
side = "two-sided", cex.title = 1.15,
cex.legend = 1){
N <- maxN
a <- fdr
p <- pi0
if (side == "two-sided") {
TSVaryDelta <- function(c) {
r <- a * (1 - p)/((1 - a) * p)
dif <- abs((2 * pt(q = -c, df = 2 * n - 2)/
(1 - pt(q = c/sqrt(deltaSE^2 / (sigma^2 * 2/n) +1),
df = 2 * n - 2,
ncp = deltaMean/sqrt(deltaSE^2 + sigma^2 * 2/n))
+ pt(q = -c/sqrt(deltaSE^2 / (sigma^2 * 2/n) +1),
df = 2 * n - 2,
ncp = deltaMean/sqrt(deltaSE^2 + sigma^2 * 2/n)))
- r))
return(dif)
}
}
if (side == "upper") {
TSVaryDelta <- function(c) {
r <- a * (1 - p)/((1 - a) * p)
dif <- abs((2 * pt(q = -c, df = 2 * n - 2)/
(1 - pt(q = c/sqrt(deltaSE^2 / (sigma^2 * 2/n) +1),
df = 2 * n - 2,
ncp = deltaMean/sqrt(deltaSE^2 + sigma^2 * 2/n)))
- r))
return(dif)
}
}
if (side == "lower") {
TSVaryDelta <- function(c) {
r <- a * (1 - p)/((1 - a) * p)
dif <- abs((2 * pt(q = -c, df = 2 * n - 2)/
pt(q = -c/sqrt(deltaSE^2 / (sigma^2 * 2/n) +1),
df = 2 * n - 2,
ncp = deltaMean/sqrt(deltaSE^2 + sigma^2 * 2/n)) - r))
return(dif)
}
}
pwr2 <- NULL
crit <- NULL
ssize <- matrix(0, nrow = length(pi0), ncol = 3)
colnames(ssize) <- c("pi0", "ssize", "power")
up.start <- 50
for (i in 1:length(pi0)) {
p <- pi0[i]
up <- up.start
for (n in 3:N) {
ci <- optimize(f = TSVaryDelta, interval = c(0, up))$min
up <- ci
if (abs(ci - up.start) >= 1) {
if (side == "two-sided") {
pwr.new <- (1 - pt(q = ci/sqrt(deltaSE^2 / (sigma^2 * 2/n) +1),
df = 2 * n - 2,
ncp = deltaMean/sqrt(deltaSE^2 + sigma^2 * 2/n))
+ pt(q = -ci/sqrt(deltaSE^2 / (sigma^2 * 2/n) +1),
df = 2 * n - 2,
ncp = deltaMean/sqrt(deltaSE^2 + sigma^2 * 2/n)))
}
}
if (abs(ci - up.start) < 1) {
pwr.new <- 0
ci <- NA
}
crit <- c(crit, ci)
pwr2 <- c(pwr2, pwr.new)
if (pwr2[(i - 1) * (N - 2) + n - 2] >= power & ssize[i, 1] == 0) {
ssize[i, ] <- c(p, n, pwr2[(i - 1) * (N - 2) + n - 2])
}
}
}
ssize[, 1] <- pi0
if (sum(ssize == 0) > 0) {
warning("Desired power not achieved for at least one pi0")
}
ssize[ssize == 0] <- NA
pwrMatrix <- matrix(c(3:N, pwr2), ncol = length(pi0) + 1,
byrow = FALSE)
for (i in 1:length(pi0)) {
if (i == 1) {
plot(3:N, pwrMatrix[, i + 1], col = i, xlim = c(0, N), ylim = c(0, 1),
xlab = "", ylab = "", pch = 16)
lines(3:N, pwrMatrix[, i + 1], col = i, lty = i)
}
if (i != 1) {
points(3:N, pwrMatrix[, i + 1], col = i, pch = 16)
lines(3:N, pwrMatrix[, i + 1], col = i, lty = i)
}
}
abline(h = power, lty = 2, lwd = 2)
abline(v = 0:N, h = 0.1 * (0:10), col = "gray", lty = 3)
title(xlab = "Sample size (n)", ylab = "Power")
mtext(bquote(paste("Average power vs. sample size with fdr=", .(fdr), ",")),
cex = cex.title, padj = -1.85)
mtext(bquote(paste(Delta[g], "~N(", .(round(deltaMean, 4)), ",",
.(round(deltaSE, 4)), ") and ", sigma[g], " = ",
.(round(sigma, 4)))), cex = cex.title, padj = -0.1)
legend(x = N, y = 0, xjust = 1, yjust = 0, col = 1:i, pch = c(16, 16, 16),
lty = 1:length(pi0), legend = as.character(pi0),
bg = "white", title = expression(pi[0]), cex = cex.legend)
pwrMatrix <- round(pwrMatrix, 7)
colnames(pwrMatrix) <- c("n", as.character(pi0))
critMatrix <- matrix(c(3:N, crit), ncol = length(pi0) + 1, byrow = FALSE)
colnames(critMatrix) <- c("n", as.character(pi0))
ret <- NULL
ret$ssize <- ssize
ret$power <- pwrMatrix
ret$crit.vals <- critMatrix
return(ret)
} |
VRATIO <- function(y, v, B=2000, alpha=0.05){
reml1 <- rma(yi=y,vi=v)
mu0 <- as.numeric(reml1$beta)
v0 <- v + reml1$tau2
V0 <- reml1$tau2
V1 <- reml1$se^2
n <- length(y)
VR <- TR <- numeric(n)
for(i in 1:n){
y_i <- y[setdiff(1:n,i)]
v_i <- v[setdiff(1:n,i)]
reml_i <- rma(yi=y_i,vi=v_i)
VR[i] <- reml_i$se^2 / V1
TR[i] <- reml_i$tau2 / V0
}
VR.b <- TR.b <- matrix(numeric(n*B),B)
for(b in 1:B){
y.b <- rnorm(n, mean=mu0, sd=sqrt(v0))
reml.b <- REML(y.b,v)
V0.b <- reml.b$V0
V1.b <- reml.b$V1
for(i in 1:n){
y_i <- y.b[setdiff(1:n,i)]
v_i <- v[setdiff(1:n,i)]
reml_i <- REML(y_i,v_i)
VR.b[b,i] <- reml_i$V1 / V1.b
TR.b[b,i] <- reml_i$V0 / V0.b
}
print1 <- paste0("The ",b,"th bootstrap is completed.")
if(b%%100==0) print(print1)
}
Q1 <- Q2 <- numeric(n)
for(i in 1:n){
X1 <- VR.b[,i]
X2 <- TR.b[,i]
X2[is.nan(X2)] <- 1
X2[X2==Inf] <- 10^20
Q1[i] <- as.numeric(quantile(X1,alpha))
Q2[i] <- as.numeric(quantile(X2,alpha))
}
id <- 1:n
R1 <- data.frame(id,VR,Q1)
R1 <- R1[order(VR),]
R2 <- data.frame(id,TR,Q2)
R2 <- R2[order(TR),]
return(list(VRATIO=R1,TAU2RATIO=R2))
} |
calculate_db <-
function(tb, tset_low, tset_up){
db <- dplyr::case_when(
tb > tset_low & tb < tset_up ~ 0,
tb < tset_low ~ tset_low - tb,
tb > tset_up ~ tb - tset_up)
db_stats<- as.list(psych::describe(db))
} |
block_section_continuous <- function( ){
block_section(prop_section(type = "continuous"))
}
block_section_landscape <- function( w = 21 / 2.54, h = 29.7 / 2.54, break_page = "oddPage" ){
block_section(prop_section(
page_size = page_size(width = w, height = h, orient = "landscape"),
type = break_page))
}
block_section_portrait <- function( w = 21 / 2.54, h = 29.7 / 2.54, break_page = "oddPage"){
block_section(prop_section(
page_size = page_size(width = w, height = h, orient = "portrait"),
type = break_page))
}
block_section_columns <- function(widths = c(2.5,2.5), space = .25, sep = FALSE){
block_section(prop_section(
section_columns = section_columns(widths = widths, space = space, sep = sep),
type = "continuous"))
}
to_wml.block_pour_docx <- function(x, add_ns = FALSE, base_document = NULL, ...) {
paste0("<w:altChunk r:id=\"", x$file, "\"/>")
} |
table_initializeExisting <- function(
locationTbl = NULL,
stateDataset = "NaturalEarthAdm1",
countryCodes = NULL,
distanceThreshold = NULL,
measure = c("geodesic", "haversine", "vincenty", "cheap"),
verbose = TRUE
) {
validateMazamaSpatialUtils()
MazamaLocationUtils::validateLocationTbl(locationTbl, locationOnly = TRUE)
MazamaCoreUtils::stopIfNull(distanceThreshold)
measure <- match.arg(measure)
if ( !exists(stateDataset) ) {
stop(paste0(
"You must load \"stateDataset\" with: \n",
" loadSpatialData(\"", stateDataset, "\")\n"
))
}
if ( !is.numeric(distanceThreshold) )
stop("Parameter 'distanceThreshold' must be a numeric value.")
locationTbl <- table_addCoreMetadata(locationTbl)
if ( anyNA(locationTbl$locationID) ) {
locationTbl$locationID <- location_createID(
longitude = locationTbl$longitude,
latitude = locationTbl$latitude
)
}
tbl_1 <- dplyr::filter(locationTbl, !is.na(.data$countryCode))
tbl_2 <- dplyr::filter(locationTbl, is.na(.data$countryCode))
if ( nrow(tbl_2) > 0 ) {
if ( verbose )
message(sprintf("Creating countryCodes for %d locations ...", nrow(tbl_2)))
tbl_2$countryCode <- MazamaSpatialUtils::getCountryCode(
lon = tbl_2$longitude,
lat = tbl_2$latitude,
dataset = "EEZCountries",
countryCodes = countryCodes,
useBuffering = FALSE
)
locationTbl <- dplyr::bind_rows(tbl_1, tbl_2)
}
tbl_1 <- dplyr::filter(locationTbl, !is.na(.data$stateCode))
tbl_2 <- dplyr::filter(locationTbl, is.na(.data$stateCode))
if ( nrow(tbl_2) > 0 ) {
if ( verbose )
message(sprintf("Creating stateCodes for %d locations ...", nrow(tbl_2)))
tbl_2$stateCode <- MazamaSpatialUtils::getStateCode(
lon = tbl_2$longitude,
lat = tbl_2$latitude,
dataset = stateDataset,
countryCodes = countryCodes,
useBuffering = TRUE
)
locationTbl <- dplyr::bind_rows(tbl_1, tbl_2)
}
tbl_1 <- dplyr::filter(locationTbl, !is.na(.data$locationName))
tbl_2 <- dplyr::filter(locationTbl, is.na(.data$locationName))
if ( nrow(tbl_2) > 0 ) {
if ( verbose )
message(sprintf("Creating locationNames for %d locations ...", nrow(tbl_2)))
tbl_2$locationName <- paste0(
tolower(tbl_2$countryCode), ".",
tolower(tbl_2$stateCode), "_",
stringr::str_sub(tbl_2$locationID, 1, 6)
)
locationTbl <- dplyr::bind_rows(tbl_1, tbl_2)
}
tbl_1 <- dplyr::filter(locationTbl, !is.na(.data$timezone))
tbl_2 <- dplyr::filter(locationTbl, is.na(.data$timezone))
if ( nrow(tbl_2) > 0 ) {
if ( verbose )
message(sprintf("Creating timezones for %d locations ...", nrow(tbl_2)))
tbl_2$timezone <- MazamaSpatialUtils::getTimezone(
lon = tbl_2$longitude,
lat = tbl_2$latitude,
dataset = "OSMTimezones",
useBuffering = TRUE
)
locationTbl <- dplyr::bind_rows(tbl_1, tbl_2)
}
overlappingTbl <- table_findAdjacentDistances(locationTbl, distanceThreshold, measure)
if ( nrow(overlappingTbl) > 0 ) {
overlappingCount <- nrow(overlappingTbl)
firstLine <- sprintf(
"%d locations have neighbors that are < %d m apart\n",
round(overlappingCount),
distanceThreshold
)
overlappingLines <- vector("character", length = overlappingCount)
for ( i in seq_len(nrow(overlappingTbl)) ) {
overlappingLines[i] <- sprintf(
"Distance: %6.1f -- rows %s %s",
round(overlappingTbl[i, 3], 1),
overlappingTbl[i, 1],
overlappingTbl[i, 2]
)
}
instructions <- sprintf("
The presence of locations closer than twice the specified distanceThreshold invalidate the
uniqueness of a 'known locations' table and should be rectified. There are several
basic options:
1) Reduce the distanceThreshold to less than the half the minimum distance.
2) Manually remove one location from each pair.
3) Manually merge nearby locations to share the same longitude, latitude and
locationID
Please review the returned locationTbl for the identified rows with:
locationTbl %%>%%
table_findAdjacentLocations(distanceThreshold = %d, measure = \"%s\") %%>%%
table_leaflet()
", round(distanceThreshold), measure)
lines <- c(firstLine, overlappingLines, instructions)
warning(paste(lines, collapse = "\n"))
}
return(locationTbl)
} |
detect_phrases <-
function(
text,
language = "en",
...
) {
if (length(text) > 1L) {
bod <- list(TextList = text, LanguageCode = language)
out <- comprehendHTTP(action = "BatchDetectKeyPhrases", body = bod, ...)
x <- bind_and_index(out$ResultList$Index, out$ResultList$KeyPhrases)
return(structure(x, ErrorList = out$ErrorList))
} else {
bod <- list(Text = text, LanguageCode = language)
out <- comprehendHTTP(action = "DetectKeyPhrases", body = bod, ...)
return(cbind(Index = 0, out$KeyPhrases))
}
} |
DeaMultiplierModel <-
function (x = x,
y = y,
rts = "crs",
orientation = "input",
weightRestriction) {
rts <-
.checkoption(rts, "rts", options.rts.l)
orientation <-
.checkoption(orientation, "orientation", options.orientation.l)
x <- .checkData(x, "x")
y <- .checkData(y, "y")
.checkDataGood(x, y)
if (!missing(weightRestriction)) {
.checkWeightRestriction(x, y, weightRestriction)
}
if (nrow(x) != nrow(y))
stop("Number of DMU's in inputs != number of DMU's in outputs", call. = FALSE)
if (orientation == "input" && rts == "crs") {
results <- .Mult_lP(x, y, "crs", weightRestriction)
return(results)
} else if (orientation == "input" && rts == "vrs") {
results <- .Mult_lP(x, y, "vrs", weightRestriction)
return(results)
} else if (orientation == "output" && rts == "crs") {
results <- .Mult_OP(x, y, "crs", weightRestriction)
return(results)
} else if (orientation == "output" && rts == "vrs") {
results <- .Mult_OP(x, y, "vrs", weightRestriction)
return(results)
}
}
CrossEfficiency <-
function (x = x,
y = y,
rts = "crs",
orientation = "input",
weightRestriction) {
result <- DeaMultiplierModel(x, y, rts, orientation, weightRestriction)
dmu <- rownames(result$InputValues)
length(dmu)
CrossEvaluation_Matrix <-
matrix(nrow = length(dmu), ncol = length(dmu))
rownames(CrossEvaluation_Matrix) <- dmu
colnames(CrossEvaluation_Matrix) <- dmu
ce_ave <- matrix(nrow = 1, ncol = length(dmu))
colnames(ce_ave) <- dmu
rownames(ce_ave) <- "Average"
ceva_max <- matrix(nrow = 1, ncol = length(dmu))
colnames(ceva_max) <- dmu
rownames(ceva_max) <- "Max"
ceva_min <- matrix(nrow = 1, ncol = length(dmu))
colnames(ceva_min) <- dmu
rownames(ceva_min) <- "Min"
if (result$Orientation == 'Input') {
for (i in 1:length(dmu)) {
for (j in 1:length(dmu)) {
cr_ip <- result$InputValues[i, ] * result$vx[j, ]
cr_op <- result$OutputValues[i, ] * result$uy[j, ]
CrossEvaluation_Matrix[j, i] <- sum(cr_op) / sum(cr_ip)
}
}
} else if (result$Orientation == 'Output') {
for (i in 1:length(dmu)) {
for (j in 1:length(dmu)) {
cr_ip <- result$InputValues[i, ] * result$vx[j, ]
cr_op <- result$OutputValues[i, ] * result$uy[j, ]
CrossEvaluation_Matrix[j, i] <- 1 / (sum(cr_ip) / sum(cr_op))
}
}
}
for (i in 1:length(dmu)) {
ce_ave[, i] <- mean(CrossEvaluation_Matrix[, i])
}
for (i in 1:length(dmu)) {
ceva_max[, i] <- max(CrossEvaluation_Matrix[, i])
}
for (i in 1:length(dmu)) {
ceva_min[, i] <- min(CrossEvaluation_Matrix[, i])
}
return(
list(
"ceva_matrix" = CrossEvaluation_Matrix,
"ce_ave" = ce_ave,
"ceva_max" = ceva_max,
"ceva_min" = ceva_min,
"vx" = result$vx,
"uy" = result$uy,
"Model_Status" = result$Model_Status
)
)
}
Mal_Ben <-
function(x = x,
y = y,
rts = "crs",
orientation = "input",
phase = "mal",
weightRestriction,
include = TRUE) {
rts <-
.checkoption(rts, "rts", options.rts.l)
orientation <-
.checkoption(orientation, "orientation", options.orientation.l)
phase <-
.checkoption(phase, "phase", options.phase.l)
result <- DeaMultiplierModel(x, y, rts, orientation, weightRestriction)
if (orientation == "input" && rts == "crs" && phase == "mal") {
results <-
.Mal_Ben_Input(x, y, "crs", result$Efficiency, "mal", weightRestriction, include)
return(
list(
"rts" = result$rts,
"Orientation" = result$Orientation,
"InputValues" = result$InputValues,
"OutputValues" = result$OutputValues,
"Phase1_Efficiency" = result$Efficiency,
"Phase1_Lambda" = result$Lambda,
"Phase1_vx" = result$vx,
"Phase1_uy" = result$uy,
"Phase1_Free_Weights" = result$Free_Weights,
"Phase1_Model_Status" = result$Model_Status,
"Phase2_Efficiency" = results$Phase2_Efficiency,
"Phase2_Lambda" = results$Phase2_Lambda,
"Phase2_vx" = results$Phase2_vx,
"Phase2_uy" = results$Phase2_uy,
"Phase2_Free_weights" = results$Phase2_Free_weights,
"Phase2_Model_Status" = results$Phase2_Model_Status,
"ceva_matrix" = results$ceva_matrix,
"ce_ave" = results$ce_ave,
"ceva_max" = results$ceva_max,
"ceva_min" = results$ceva_min
)
)
} else if (orientation == "input" && rts == "crs" && phase == "ben") {
results <-
.Mal_Ben_Input(x, y, "crs", result$Efficiency, "ben", weightRestriction, include)
return(
list(
"rts" = result$rts,
"Orientation" = result$Orientation,
"InputValues" = result$InputValues,
"OutputValues" = result$OutputValues,
"Phase1_Efficiency" = result$Efficiency,
"Phase1_Lambda" = result$Lambda,
"Phase1_vx" = result$vx,
"Phase1_uy" = result$uy,
"Phase1_Free_Weights" = result$Free_Weights,
"Phase1_Model_Status" = result$Model_Status,
"Phase2_Efficiency" = results$Phase2_Efficiency,
"Phase2_Lambda" = results$Phase2_Lambda,
"Phase2_vx" = results$Phase2_vx,
"Phase2_uy" = results$Phase2_uy,
"Phase2_Free_weights" = results$Phase2_Free_weights,
"Phase2_Model_Status" = results$Phase2_Model_Status,
"ceva_matrix" = results$ceva_matrix,
"ce_ave" = results$ce_ave,
"ceva_max" = results$ceva_max,
"ceva_min" = results$ceva_min
)
)
} else if (orientation == "input" && rts == "vrs" && phase == "mal") {
results <-
.Mal_Ben_Input(x, y, "vrs", result$Efficiency, "mal", weightRestriction, include)
return(
list(
"rts" = result$rts,
"Orientation" = result$Orientation,
"InputValues" = result$InputValues,
"OutputValues" = result$OutputValues,
"Phase1_Efficiency" = result$Efficiency,
"Phase1_Lambda" = result$Lambda,
"Phase1_vx" = result$vx,
"Phase1_uy" = result$uy,
"Phase1_Free_Weights" = result$Free_Weights,
"Phase1_Model_Status" = result$Model_Status,
"Phase2_Efficiency" = results$Phase2_Efficiency,
"Phase2_Lambda" = results$Phase2_Lambda,
"Phase2_vx" = results$Phase2_vx,
"Phase2_uy" = results$Phase2_uy,
"Phase2_Free_weights" = results$Phase2_Free_weights,
"Phase2_Model_Status" = results$Phase2_Model_Status,
"ceva_matrix" = results$ceva_matrix,
"ce_ave" = results$ce_ave,
"ceva_max" = results$ceva_max,
"ceva_min" = results$ceva_min
)
)
} else if (orientation == "input" && rts == "vrs" && phase == "ben") {
results <-
.Mal_Ben_Input(x, y, "vrs", result$Efficiency, "ben", weightRestriction, include)
return(
list(
"rts" = result$rts,
"Orientation" = result$Orientation,
"InputValues" = result$InputValues,
"OutputValues" = result$OutputValues,
"Phase1_Efficiency" = result$Efficiency,
"Phase1_Lambda" = result$Lambda,
"Phase1_vx" = result$vx,
"Phase1_uy" = result$uy,
"Phase1_Free_Weights" = result$Free_Weights,
"Phase1_Model_Status" = result$Model_Status,
"Phase2_Efficiency" = results$Phase2_Efficiency,
"Phase2_Lambda" = results$Phase2_Lambda,
"Phase2_vx" = results$Phase2_vx,
"Phase2_uy" = results$Phase2_uy,
"Phase2_Free_weights" = results$Phase2_Free_weights,
"Phase2_Model_Status" = results$Phase2_Model_Status,
"ceva_matrix" = results$ceva_matrix,
"ce_ave" = results$ce_ave,
"ceva_max" = results$ceva_max,
"ceva_min" = results$ceva_min
)
)
} else if (orientation == "output" &&
rts == "crs" && phase == "mal") {
results <-
.Mal_Ben_Output(x, y, "crs", result$Efficiency, "mal", weightRestriction, include)
return(
list(
"rts" = result$rts,
"Orientation" = result$Orientation,
"InputValues" = result$InputValues,
"OutputValues" = result$OutputValues,
"Phase1_Efficiency" = result$Efficiency,
"Phase1_Lambda" = result$Lambda,
"Phase1_vx" = result$vx,
"Phase1_uy" = result$uy,
"Phase1_Free_Weights" = result$Free_Weights,
"Phase1_Model_Status" = result$Model_Status,
"Phase2_Efficiency" = results$Phase2_Efficiency,
"Phase2_Lambda" = results$Phase2_Lambda,
"Phase2_vx" = results$Phase2_vx,
"Phase2_uy" = results$Phase2_uy,
"Phase2_Free_weights" = results$Phase2_Free_weights,
"Phase2_Model_Status" = results$Phase2_Model_Status,
"ceva_matrix" = results$ceva_matrix,
"ce_ave" = results$ce_ave,
"ceva_max" = results$ceva_max,
"ceva_min" = results$ceva_min
)
)
} else if (orientation == "output" &&
rts == "crs" && phase == "ben") {
results <-
.Mal_Ben_Output(x, y, "crs", result$Efficiency, "ben", weightRestriction, include)
return(
list(
"rts" = result$rts,
"Orientation" = result$Orientation,
"InputValues" = result$InputValues,
"OutputValues" = result$OutputValues,
"Phase1_Efficiency" = result$Efficiency,
"Phase1_Lambda" = result$Lambda,
"Phase1_vx" = result$vx,
"Phase1_uy" = result$uy,
"Phase1_Free_Weights" = result$Free_Weights,
"Phase1_Model_Status" = result$Model_Status,
"Phase2_Efficiency" = results$Phase2_Efficiency,
"Phase2_Lambda" = results$Phase2_Lambda,
"Phase2_vx" = results$Phase2_vx,
"Phase2_uy" = results$Phase2_uy,
"Phase2_Free_weights" = results$Phase2_Free_weights,
"Phase2_Model_Status" = results$Phase2_Model_Status,
"ceva_matrix" = results$ceva_matrix,
"ce_ave" = results$ce_ave,
"ceva_max" = results$ceva_max,
"ceva_min" = results$ceva_min
)
)
} else if (orientation == "output" &&
rts == "vrs" && phase == "mal") {
results <-
.Mal_Ben_Output(x, y, "vrs", result$Efficiency, "mal", weightRestriction, include)
return(
list(
"rts" = result$rts,
"Orientation" = result$Orientation,
"InputValues" = result$InputValues,
"OutputValues" = result$OutputValues,
"Phase1_Efficiency" = result$Efficiency,
"Phase1_Lambda" = result$Lambda,
"Phase1_vx" = result$vx,
"Phase1_uy" = result$uy,
"Phase1_Free_Weights" = result$Free_Weights,
"Phase1_Model_Status" = result$Model_Status,
"Phase2_Efficiency" = results$Phase2_Efficiency,
"Phase2_Lambda" = results$Phase2_Lambda,
"Phase2_vx" = results$Phase2_vx,
"Phase2_uy" = results$Phase2_uy,
"Phase2_Free_weights" = results$Phase2_Free_weights,
"Phase2_Model_Status" = results$Phase2_Model_Status,
"ceva_matrix" = results$ceva_matrix,
"ce_ave" = results$ce_ave,
"ceva_max" = results$ceva_max,
"ceva_min" = results$ceva_min
)
)
} else if (orientation == "output" &&
rts == "vrs" && phase == "ben") {
results <-
.Mal_Ben_Output(x, y, "vrs", result$Efficiency, "ben", weightRestriction, include)
return(
list(
"rts" = result$rts,
"Orientation" = result$Orientation,
"InputValues" = result$InputValues,
"OutputValues" = result$OutputValues,
"Phase1_Efficiency" = result$Efficiency,
"Phase1_Lambda" = result$Lambda,
"Phase1_vx" = result$vx,
"Phase1_uy" = result$uy,
"Phase1_Free_Weights" = result$Free_Weights,
"Phase1_Model_Status" = result$Model_Status,
"Phase2_Efficiency" = results$Phase2_Efficiency,
"Phase2_Lambda" = results$Phase2_Lambda,
"Phase2_vx" = results$Phase2_vx,
"Phase2_uy" = results$Phase2_uy,
"Phase2_Free_weights" = results$Phase2_Free_weights,
"Phase2_Model_Status" = results$Phase2_Model_Status,
"ceva_matrix" = results$ceva_matrix,
"ce_ave" = results$ce_ave,
"ceva_max" = results$ceva_max,
"ceva_min" = results$ceva_min
)
)
}
} |
test_that("errors return a github_error object", {
skip_if_offline("github.com")
skip_on_cran()
skip_if_no_token()
e <- tryCatch(gh("/missing", .token = tt()), error = identity)
expect_s3_class(e, "github_error")
expect_s3_class(e, "http_error_404")
})
test_that("can catch a given status directly", {
skip_if_offline("github.com")
skip_on_cran()
skip_if_no_token()
e <- tryCatch(
gh("/missing", .token = tt()),
"http_error_404" = identity
)
expect_s3_class(e, "github_error")
expect_s3_class(e, "http_error_404")
}) |
library(highcharter)
data(worldgeojson, package = "highcharter")
data("GNI2014", package = "treemap")
head(GNI2014)
GNI2014$value <- GNI2014$population
highchart(type = "map") %>%
hc_add_series(mapData = worldgeojson, data = list_parse(GNI2014),
joinBy = "iso3") %>%
hc_colorAxis() %>%
hc_tooltip(useHTML = TRUE, headerFormat = "",
pointFormat = "this is {point.name} and have {point.population} people with gni of {point.GNI}") |
.parse_icd9cm_cc <- function(save_pkg_data = FALSE) {
assert_flag(save_pkg_data)
hcc_icd9_dir <- file.path(.get_raw_data_path(), "icd_hcc_rawdata", "icd9")
icd9_map_cc <- lapply(
list.files(hcc_icd9_dir, full.names = TRUE),
FUN = read.fwf, widths = c(7, 4), header = FALSE, stringsAsFactors = FALSE
)
years <- list()
years$icd9 <- as.numeric(
substr(list.files(hcc_icd9_dir), 0, 4)
)
icd9_map_cc <- mapply(
cbind, icd9_map_cc,
"year" = years$icd9, SIMPLIFY = FALSE
)
rm(years)
icd9_map_cc <- do.call(rbind, icd9_map_cc)
colnames(icd9_map_cc) <- c("icd_code", "cc", "year")
icd9_map_cc$cc <- as.numeric(icd9_map_cc$cc)
icd9_map_cc$icd_code <- trimws(icd9_map_cc$icd_code)
extracodes <- list()
extracodes$e1 <- c("40403", "40413", "40493")
extracodes$e1 <- expand.grid(
extracodes$e1, 80, 2007:2012,
stringsAsFactors = FALSE
)
extracodes$e2 <- c("40401", "40403", "40411", "40413", "40491", "40493")
extracodes$e2 <- expand.grid(
extracodes$e2, 85, 2013,
stringsAsFactors = FALSE
)
extracodes$e3 <- c("40403", "40413", "40493")
extracodes$e3 <- expand.grid(
extracodes$e3, 85, 2014:2015,
stringsAsFactors = FALSE
)
extracodes$e4 <- c("3572", "36202")
extracodes$e4 <- expand.grid(
extracodes$e4, 18, 2013,
stringsAsFactors = FALSE
)
extracodes$e5 <- "36202"
extracodes$e5 <- expand.grid(
extracodes$e5, 18, 2014:2015,
stringsAsFactors = FALSE
)
extracodes <- do.call(rbind, extracodes)
colnames(extracodes) <- c("icd_code", "cc", "year")
icd9_map_cc <- rbind(icd9_map_cc, extracodes)
rm(extracodes)
if (save_pkg_data) {
.save_in_data_dir(icd9_map_cc)
}
invisible(icd9_map_cc)
}
.icd10_hcc_fix_tabs <- function() {
.fix <- function(x) {
p1 <- sub("[[:space:]]+.*", "", x)
p2 <- sub("[[:alnum:]]*[[:space:]]+", "", x)
p2 <- sub("[[:space:]]+", "", p2)
p2 <- sub("[[:alpha:]]$", "", trimws(p2))
sprintf("%-8s%-4s", p1, p2)
}
hcc_icd10_dir <- file.path(.get_raw_data_path(), "icd_hcc_rawdata", "icd10")
for (f in list.files(hcc_icd10_dir, full.names = TRUE)) {
tabbed <- readLines(f)
writeLines(.fix(tabbed), f)
}
}
.parse_icd10cm_cc <- function(save_pkg_data = FALSE) {
assert_flag(save_pkg_data)
hcc_icd10_dir <- file.path(.get_raw_data_path(), "icd_hcc_rawdata", "icd10")
icd10_map_cc <- lapply(
list.files(hcc_icd10_dir,
full.names = TRUE
),
FUN = read.fwf,
widths = c(7, 4),
header = FALSE,
stringsAsFactors = FALSE
)
years <- list()
years$icd10 <- as.numeric(
substr(list.files(hcc_icd10_dir), 0, 4)
)
icd10_map_cc <- mapply(
cbind,
icd10_map_cc,
"year" = years$icd10,
SIMPLIFY = FALSE
)
rm(years)
icd10_map_cc <- do.call(rbind, icd10_map_cc)
colnames(icd10_map_cc) <- c("icd_code", "cc", "year")
cc_as_num <- as.integer(trimws(icd10_map_cc$cc))
if (any(is.na(cc_as_num))) {
stop("Some condition codes are not integers")
}
icd10_map_cc$cc <- cc_as_num
icd10_map_cc$icd_code <- trimws(icd10_map_cc$icd_code)
if (save_pkg_data) {
.save_in_data_dir(icd10_map_cc)
}
invisible(icd10_map_cc)
}
.parse_cc_hierarchy <- function(save_pkg_data = FALSE) {
assert_flag(save_pkg_data)
hierarchy_path <- file.path(
.get_raw_data_path(),
"icd_hcc_rawdata", "hierarchy"
)
stopifnot(dir.exists(hierarchy_path))
hierarchy_files <- list.files(hierarchy_path)
hierarchy_file_paths <- list.files(hierarchy_path, full.names = TRUE)
icd_map_cc_hcc <- lapply(hierarchy_file_paths, FUN = readLines)
years <- substr(hierarchy_files, 0, 4)
icd_map_cc_hcc <- mapply(cbind, icd_map_cc_hcc,
"year" = years,
SIMPLIFY = FALSE
)
icd_map_cc_hcc <- lapply(icd_map_cc_hcc, as.data.frame,
stringsAsFactors = FALSE
)
icd_map_cc_hcc <- do.call(rbind, icd_map_cc_hcc)
icd_map_cc_hcc$year <- as.numeric(icd_map_cc_hcc$year)
icd_map_cc_hcc <- icd_map_cc_hcc[grepl("if hcc|%SET0", icd_map_cc_hcc[[1]]), ]
colnames(icd_map_cc_hcc)[1] <- "condition"
icd_map_cc_hcc[["ifcc"]] <- as.numeric(
str_extract(icd_map_cc_hcc$condition,
"(?<=hcc)([0-9]*)|(?<=CC\\=)([0-9]*)",
perl = TRUE
)
)
todrop <- str_extract(
string = icd_map_cc_hcc$condition,
pattern =
"(?<=i\\=)([[:print:]]*)(?=;hcc)|(?<=STR\\()([[:print:]]*)(?= \\)\\);)",
perl = TRUE,
fun = `[`
)
longest <- max(vapply(strsplit(todrop, " ,"), length, integer(1))) + 1
todrop <- strsplit(todrop, ",")
todrop <- do.call(rbind, lapply(todrop, function(x) {
length(x) <- longest
x
}))
todrop <- suppressWarnings(
as.data.frame(apply(todrop, 2, as.numeric), stringsAsFactors = FALSE)
)
icd_map_cc_hcc <- cbind(icd_map_cc_hcc[, c("year", "ifcc")], todrop)
if (save_pkg_data) {
.save_in_data_dir(icd_map_cc_hcc)
}
invisible(icd_map_cc_hcc)
} |
CHull <-
function(data, bound = "lower", PercentageFit = .01){
UseMethod("CHull")
} |
bcbcsf_fitpred <- function (
X_tr, y_tr, nos_fsel = ncol (X_tr),
X_ts = NULL, standardize = FALSE, rankf = FALSE,
burn = NULL, thin = 1, offset_sdxj = 0.5,
no_rmc = 1000, no_imc = 5, no_mhwmux = 10,
fit_bcbcsf_filepre = ".fitbcbcsf_",
w0_mu = 0.05, alpha0_mu = 0.5, alpha1_mu = 3,
w0_x = 1.00, alpha0_x = 0.5, alpha1_x = 10,
w0_nu = 0.05, alpha0_nu = 0.5, prior_psi = NULL,
stepadj_mhwmux = 1, diag_mhwmux = FALSE,
bcor = 1, cut_qf = exp (-10), cut_dpoi = exp (-10), nos_sim = 1000,
monitor = TRUE)
{
if (!is.matrix (X_tr)) stop ("X_tr must be a matrix")
n <- nrow (X_tr)
p <- ncol (X_tr)
nos_g <- as.vector (table (y_tr))
G <- length (nos_g)
if (any(nos_g < 2)) stop ("less than 2 cases in some group in your data")
if (is.null (prior_psi)) prior_psi <- rep (1, G)
if (length (prior_psi) != G) stop ("length of prior_psi is wrong")
post_y <- nos_g + prior_psi
freqy <- post_y / sum (post_y)
info_sel <- list (vars = 1:p)
if (any (c(rankf, nos_fsel < p)))
info_sel <- rank_F (X_tr, y_tr)
p_fselmax <- max (nos_fsel)
fselmax <- info_sel $ vars [1 : p_fselmax]
X_tr <- X_tr[, fselmax, drop = FALSE]
mle_ori_fselmax <- trpr_mle (
X_tr = X_tr, y_tr = y_tr, rankf = FALSE)$list_fit_mle[[1]]
nuj_ori_fselmax <- rep (0, p_fselmax)
wxj_ori_fselmax <- rep (1, p_fselmax)
if (standardize)
{
nuj_ori_fselmax <- mle_ori_fselmax $ nuj
wxj_ori_fselmax <- mle_ori_fselmax $ wxj
X_tr <- sweep (X_tr, 2, nuj_ori_fselmax, "-")
X_tr <- sweep (X_tr, 2, sqrt (wxj_ori_fselmax), "/")
}
tgsum_X_fselmax <- t (rowsum (X_tr,y_tr))
sum_X2_fselmax <- colSums (X_tr^2)
nnfsel <- length (nos_fsel)
fitfiles <- rep ("", nnfsel)
array_probs_pred <- NULL
if (!is.null (X_ts))
{
n_ts <- nrow (X_ts)
array_probs_pred <- array (0, dim = c(n_ts, G, nnfsel) )
}
if (monitor)
cat (" Be Patient ... BCBCSF is fitting... \n")
finished <- 0
total <- sum (nos_fsel) * no_rmc
if (monitor) pb <- txtProgressBar(min = 0, max = total, style = 3)
for (i in seq (1, nnfsel) )
{
k <- nos_fsel [i]
if (!is.null (fit_bcbcsf_filepre))
fitfiles[i] <-
paste (fit_bcbcsf_filepre,
"alpha1_mu_",alpha1_mu, "_n_",n, "_p_", p,
"_nfsel_", k, "_biascor_", bcor, ".RData", sep = "")
if (k >= 1)
{
fsel <- info_sel $ vars [1:k]
nuj_std <- nuj_ori_fselmax [1:k]
wxj_std <- wxj_ori_fselmax [1:k]
cut_F <- info_sel $ fstats [k]
nos_omit <- p - k
tgsum_X <- tgsum_X_fselmax [1:k,,drop = FALSE]
sum_X2 <- sum_X2_fselmax [1:k]
if (bcor == 1 & k < p)
{
qflmd <- gen_qflmd (y_tr, cut_F, alpha1_mu, alpha1_x,
cut_qf, nos_sim)
}
alpha_wmuj <- (alpha1_mu + G) / 2
alpha_wxj <- (alpha1_x + n) / 2
alpha_wnu <- (alpha0_nu + k) / 2
alpha_wmu <- alpha1_mu * k / 2 - alpha0_mu / 2
alpha_wx <- alpha1_x * k / 2 - alpha0_x / 2
lambda0_wmu <- alpha0_mu * w0_mu / 2
lambda0_wx <- alpha0_x * w0_x / 2
stepsizes_mhwmux <- stepadj_mhwmux /
sqrt ( 10 * c(max(alpha0_mu,alpha_wmu), max(alpha0_x, alpha_wx)) )
muj <- mle_ori_fselmax$muj[1:k,,drop = FALSE] - nuj_std
wxj <- mle_ori_fselmax$wxj[1:k] / wxj_std
wmuj <- mle_ori_fselmax$wmuj[1:k] * 0.01
nuj <- rowMeans (muj)
wx <- 1/mean (1/wxj)
logwx <- log (wx)
wmu <- w0_mu
logwmu <- log (wmu)
wnu <- 1
MUJ <- array (0, dim = c(k, G, no_rmc))
WXJ <- array (0, dim = c(k, no_rmc))
WMUJ <- array (0, dim = c(k, no_rmc))
NUJ <- array (0, dim = c(k, no_rmc))
WX <- array (0, dim = no_rmc)
WMU <- array (0, dim = no_rmc)
WNU <- array (0, dim = no_rmc)
j_save <- 1
i_save <- no_imc * j_save
for (i_mc in 1 : (no_rmc * no_imc))
{
vars_muj <- 1 / (1/wmuj + outer(1/wxj,nos_g) )
means_muj <- (nuj / wmuj + tgsum_X / wxj) * vars_muj
muj <- means_muj + replicate (G, rnorm (k)) * sqrt (vars_muj)
lambda_wxj <- (alpha1_x * wx + sum_X2 -
2 * rowSums (tgsum_X * muj) +
rowSums (sweep (muj^2, 2, nos_g, "*")) )/2
wxj <- rinvgam (k, alpha_wxj, lambda_wxj)
lambda_wmuj <- (alpha1_mu * wmu + rowSums ((muj - nuj)^2) ) / 2
wmuj <- rinvgam (k, alpha_wmuj, lambda_wmuj)
sum_muj <- rowSums (muj)
vars_nuj <- 1 / (1 / wnu + G / wmuj)
means_nuj <- sum_muj / wmuj * vars_nuj
nuj <- means_nuj + rnorm (k) * sqrt (vars_nuj)
lambda_wnu <- (alpha0_nu * w0_nu + sum (nuj^2) ) / 2
wnu <- rinvgam (1, alpha_wnu, lambda_wnu)
lambda_wmu <- alpha1_mu * sum(1/wmuj) / 2
lambda_wx <- alpha1_x * sum(1/wxj) / 2
logpost_logwmux <- function (lw)
{
w <- exp (lw)
b4cor <-
alpha_wmu * lw[1] - lambda_wmu * w[1] -
lambda0_wmu / w[1] + alpha_wx * lw[2] -
lambda_wx * w[2] - lambda0_wx / w[2]
if (bcor == 1 & nos_omit > 0)
b4cor + nos_omit *
log (comp_adjfactor (w[1], w[2], qflmd, cut_dpoi))
else b4cor
}
log_wmu_wx <- met_gauss (
iters = no_mhwmux, log_f = logpost_logwmux,
ini_value = c(logwmu, logwx), stepsize = stepsizes_mhwmux,
diag_mh = diag_mhwmux )
logwmu <- log_wmu_wx [1]
logwx <- log_wmu_wx [2]
wmu <- exp (logwmu)
wx <- exp (logwx)
if (i_mc == i_save)
{
MUJ [,,j_save] <- muj
WXJ [,j_save] <- wxj
NUJ [,j_save] <- nuj
WMUJ [,j_save] <- wmuj
WX [j_save] <- wx
WMU [j_save] <- wmu
WNU [j_save] <- wnu
j_save <- j_save + 1
i_save <- j_save * no_imc
finished <- finished + nos_fsel [i]
if (monitor)
{
setTxtProgressBar(pb, finished)
}
}
}
fit_bcbcsf <- list (
fsel = fsel, nuj_std = nuj_std, wxj_std = wxj_std,
MUJ = MUJ, WXJ = WXJ, NUJ = NUJ, WMUJ = WMUJ, WX = WX,
WMU = WMU, WNU = WNU, freqy = freqy,
no_imc = no_imc, no_rmc = no_rmc, bias_corrected = bcor )
}
else fit_bcbcsf <- list (fsel = NULL, freqy = freqy)
if (fitfiles[i] != "") save (fit_bcbcsf, file = fitfiles[i])
if (!is.null (X_ts))
{
array_probs_pred[,,i] <-
mcmc_pred (X_ts = X_ts, fit_bcbcsf = fit_bcbcsf,
burn = burn, thin = thin, offset_sdxj = offset_sdxj)
}
}
if (monitor) close (pb)
if (!is.null (array_probs_pred))
{
dims <- dim (array_probs_pred)
dimnames (array_probs_pred) <- list(paste("Case", 1:dims[1], sep=""),
paste("Class", 1:dims[2], sep=""),
paste("fsel", 1:dims[3], sep=""))
}
list (fit_bcbcsf = fit_bcbcsf,
fitfiles = fitfiles,
array_probs_pred = array_probs_pred,
nos_fsel = nos_fsel )
}
bcbcsf_pred <- function (
X_ts, out_fit, burn = NULL, thin = 1, offset_sdxj = 0.5)
{
if (is.vector (X_ts)) X_ts <- matrix (X_ts,1,)
fitfiles <- out_fit$fitfiles
nos_fsel <- out_fit$nos_fsel
array_probs_pred <- NULL
for (i in 1:length (nos_fsel))
{
fit_bcbcsf <- reload_fit_bcbcsf (fitfiles[i])
probs_pred <- mcmc_pred (X_ts = X_ts, fit_bcbcsf = fit_bcbcsf,
burn = burn, thin = thin, offset_sdxj = offset_sdxj)
array_probs_pred <- abind (array_probs_pred, probs_pred, along = 3)
}
dims <- dim (array_probs_pred)
dimnames (array_probs_pred) <- list(paste("Case", 1:dims[1], sep=""),
paste("Class", 1:dims[2], sep=""),
paste("fsel", 1:dims[3], sep=""))
list (fitfiles = fitfiles,
array_probs_pred = array_probs_pred,
nos_fsel = nos_fsel)
}
mcmc_pred <- function (
X_ts, fit_bcbcsf = NULL, fit_bcbcsf_file = NULL,
burn = NULL, thin = 1, offset_sdxj = 0.5)
{
n <- nrow (X_ts)
if (is.null (fit_bcbcsf))
{
fit_bcbcsf <- reload_fit_bcbcsf (fit_bcbcsf_file)
}
if (is.null (fit_bcbcsf$fsel))
{
t (replicate (n, fit_bcbcsf$freqy) )
}
else
{
if (is.null (burn)) burn <- floor (fit_bcbcsf$no_rmc * 0.2)
mu_dim <- dim (fit_bcbcsf$MUJ)
k <- mu_dim [1]
G <- mu_dim [2]
no_rmc <- mu_dim [3]
X_ts <- X_ts[, fit_bcbcsf$fsel, drop = FALSE]
X_ts <- sweep (X_ts,2, fit_bcbcsf$nuj_std, "-")
X_ts <- sweep (X_ts,2, sqrt(fit_bcbcsf$wxj_std), "/")
ix_pred <- burn + thin * seq(0, (no_rmc - burn) %/% thin )
SDXJ <- sqrt(fit_bcbcsf$WXJ[,ix_pred, drop = FALSE])
if (offset_sdxj > 1E-5)
{
offset <- quantile (SDXJ, offset_sdxj)
}
else offset <- 0
SDXJ <- SDXJ + offset
.C ( "pred_ht", n, k, G, length(ix_pred), X_ts,
fit_bcbcsf$MUJ[,,ix_pred], SDXJ, log(fit_bcbcsf$freqy),
probs_pred = matrix(0,n,G), PACKAGE = "BCBCSF"
) $ probs_pred
}
}
mlepred <- function (X_ts, fit_mle)
{
n <- nrow (X_ts)
if (is.null (fit_mle$fsel) )
{
t(replicate (n, fit_mle$freqy))
}
else
{
k <- nrow (fit_mle$muj)
G <- ncol (fit_mle$muj)
.C ("pred_ht",
n, k, G, as.integer(1), X_ts[, fit_mle$fsel], fit_mle$muj,
sqrt (fit_mle$wxj),log (fit_mle$freqy),
probs_pred = matrix (0, n, G), PACKAGE = "BCBCSF"
) $ probs_pred
}
}
bcbcsf_sumfit <- function (
fit_bcbcsf = NULL, fit_bcbcsf_afile = NULL, burn = NULL, thin = 1)
{
if (is.null(fit_bcbcsf)) fit_bcbcsf <- reload_fit_bcbcsf (fit_bcbcsf_afile)
if (is.null (burn)) burn <- floor (fit_bcbcsf$no_rmc * 0.2)
nuj <- apply (fit_bcbcsf $ NUJ[, - (1:burn), drop = FALSE], 1, median )
wmuj <- apply (fit_bcbcsf $ WMUJ[, - (1:burn), drop = FALSE], 1, median )
wx <- median (fit_bcbcsf $ WX[- (1:burn)] )
wmu <- median (fit_bcbcsf $ WMU[- (1:burn)] )
wnu <- median (fit_bcbcsf $ WNU[- (1:burn)] )
muj <- apply (fit_bcbcsf $ MUJ[,, - (1:burn), drop = FALSE], c(1,2), median)
wxj <- apply (fit_bcbcsf $ WXJ[, - (1:burn), drop = FALSE], 1, median)
cmuj <- muj - apply (muj,1, mean)
scmuj <- cmuj/sqrt(wxj)
signalj <- apply (scmuj, 1, sd)
freqy <- fit_bcbcsf$freqy
fsel <- fit_bcbcsf$fsel
list (
nuj_std = fit_bcbcsf$nuj_std,
wxj_std = fit_bcbcsf$wxj_std,
nuj = nuj,
wx = wx,
wmu = wmu,
wnu = wnu,
wmuj = wmuj,
cmuj = cmuj,
muj = muj,
wxj = wxj,
scmuj = scmuj,
signalj = signalj,
freqy = freqy,
fsel = fsel
)
}
reload_fit_bcbcsf <- function (fit_bcbcsf_afile)
{
local ({
fit_bcbcsf <- get(load (fit_bcbcsf_afile))
return (fit_bcbcsf)
})
}
bcbcsf_plotsumfit <- function (sum_fit)
{
G <- ncol (sum_fit$scmuj)
par (mfrow = c(G+1,1), mar = c(4,4,3,0.5))
ylim <- range (sum_fit$scmuj)
for (g in 1:G)
{
plot (sum_fit$scmuj[,g], type = "h", ylim = ylim,
ylab = "Normalized Mean",
xlab = "Gene Rank by F-statistic",
main = sprintf ("Normalized Means (Signals) of Class %d", g))
}
plot (sum_fit$signalj, type = "h", ylim = c(0, max(sum_fit$signalj)),
ylab = "Average Signal Level",
xlab = "Gene Rank by F-statistic",
main = "Overall Signal Levels of Top Genes")
}
log_sum_exp <- function(lx)
{ mlx <- max(lx)
log(sum(exp(lx - mlx))) + mlx
}
rinvgam <- function (n, alpha, lambda)
{
1 / rgamma (n, alpha, 1) * lambda
}
richisq <- function (n, alpha, w = 1)
{
1 / rgamma (n, alpha / 2) * alpha * w / 2
}
met_gauss <- function
( iters = 100, log_f, ini_value, stepsize = 0.5, diag_mh = FALSE, ...)
{
state <- ini_value
no_var <- length (state)
mchain <- matrix (state, no_var , iters)
nos_rej <- 0
logf <- log_f (state,...)
if (!is.finite (logf)) stop ("Initial value has 0 probability")
for (i in 1:iters)
{
new_state <- rnorm (no_var, state, stepsize)
new_logf <- log_f (new_state,...)
if (log (runif(1)) < new_logf - logf)
{
state <- new_state
logf <- new_logf
}
else nos_rej <- nos_rej + 1
mchain[,i] <- state
}
if (diag_mh)
{
cat ("Markov chain is saved in 'mchain' with columns for iterations\n")
cat ("Rejection rate = ", nos_rej / iters, "\n")
browser ()
}
state
}
trpr_mle <- function (X_tr, y_tr, X_ts = NULL,
nos_fsel = ncol (X_tr), rankf = FALSE)
{
n <- nrow (X_tr)
p <- ncol (X_tr)
nos_g <- as.vector (tapply (rep(1,n),INDEX = y_tr, sum))
G <- length (nos_g)
if (any(nos_g < 2)) stop ("Less than 2 cases in some group")
freqy = nos_g / sum (nos_g)
if (any (c(rankf, nos_fsel < p)) )
info_sel <- rank_F (X_tr, y_tr)
else
{ info_sel <- list (vars = 1:p)
}
nnfsel <- length (nos_fsel)
list_fit_mle <- rep (list (""), nnfsel)
array_probs_pred <- NULL
if (!is.null (X_ts))
array_probs_pred <- array (0, dim = c (nrow (X_ts), G, nnfsel) )
for (i in 1:nnfsel)
{
k <- nos_fsel [i]
if (k == 0)
{
fsel <- NULL
fit_mle <- list (freqy = freqy)
}
else
{
fsel <- info_sel $ vars [1:k]
X_tr_sel <- X_tr [, fsel, drop = FALSE]
gsum_X <- rowsum (X_tr_sel,y_tr)
sum_X2 <- colSums (X_tr_sel^2)
sum_gsum2 <- colSums (gsum_X^2 / nos_g )
pvars <- (sum_X2 - sum_gsum2) / (n-G)
muj <- t(gsum_X / nos_g)
wxj <- pvars
wx <- 1/mean (1/wxj)
nuj <- rowMeans (muj)
wnu <- mean (nuj^2)
cmuj <- muj - nuj
wmuj <- rowMeans (cmuj^2)
wmu <- 1/mean (1/wmuj)
scmuj <- cmuj/sqrt (wxj)
fit_mle <- list (
muj = muj, wxj = wxj, wmuj = wmuj, nuj = nuj, cmuj = cmuj,
wx = wx, wmu = wmu, wnu = wnu, scmuj = scmuj,
freqy = freqy,
fsel = fsel )
}
list_fit_mle [[i]] <- fit_mle
if (!is.null (X_ts))
{
array_probs_pred [,,i] <- mlepred (X_ts = X_ts, fit_mle = fit_mle)
}
}
list (
array_probs_pred = array_probs_pred,
nos_fsel = nos_fsel, list_fit_mle = list_fit_mle)
}
rank_F <- function(X, y)
{
comp_fstats <- function (X,y)
{
n <- length (y)
nos_g <- as.vector (tapply (rep (1,n), INDEX = y, sum))
G <- length (nos_g)
gsum_X <- rowsum (X,y)
sum_X2 <- colSums (X^2)
sum_gsum2 <- colSums (gsum_X^2 / nos_g )
pvars <- (sum_X2 - sum_gsum2) / (n-G)
sum_X <- colSums (X)
gvars <- (sum_gsum2 - sum_X^2 / n) / (G-1)
gvars / pvars
}
fstats <- comp_fstats (X, y)
vars <- order (fstats, decreasing = TRUE)
list (vars=vars, fstats = fstats [vars])
}
gen_qflmd <- function (y_tr, cut_F, alpha1_mu = 1, alpha1_x = 10,
cut_qf = exp (-10), nos_sim = 1000)
{
n <- length (y_tr)
nos_g <- tapply (rep(1,n), INDEX = y_tr, sum)
G <- length(nos_g)
qf <- c()
l <- 1
while (TRUE)
{
qf [l] <- pf(cut_F*(G-1)/(G-1 + 2*(l-1)), G-1 + 2*(l-1), n-G)
if( qf[l] <= cut_qf ) break
l <- l + 1
}
gen_adev <- function ()
{
mu <- rnorm (G)
mu.bar <- sum(mu * nos_g) / n
sum (mu^2 * nos_g) - n * mu.bar^2
}
devs <- replicate (nos_sim, gen_adev() )
plmd <- devs/2 * richisq (nos_sim,alpha1_mu) / richisq (nos_sim,alpha1_x)
list (qf = qf, plmd = plmd)
}
comp_adjfactor <- function(w_mu, w_x, qflmd, cut_dpoi = exp (-10) )
{
lmd <- qflmd$plmd * w_mu / w_x
qf <- qflmd$qf
.C("comp_adjfactor", PACKAGE = "BCBCSF",
cut_dpoi, length(qf),length(lmd), qf, lmd, adjf = 0.0 )$adjf
} |
suppressPackageStartupMessages(library("argparse"))
parser = ArgumentParser()
parser$add_argument("--infercnv_obj", help="infercnv_obj file", required=TRUE, nargs=1)
args = parser$parse_args()
library(infercnv)
library(tidyverse)
library(futile.logger)
infercnv_obj_file = args$infercnv_obj
infercnv_obj = readRDS(infercnv_obj_file)
if (! infercnv:::has_reference_cells(infercnv_obj)) {
stop("Error, cannot tune parameters without reference 'normal' cells defined")
}
expr_vals <- [email protected]
mu = mean(expr_vals)
sigma = sd(expr_vals)
nrounds = 1000
sds = c()
ngenes = nrow(expr_vals)
normal_samples = infercnv_obj@reference_grouped_cell_indices
num_normal_samples = length(normal_samples)
mean_vals_df = NULL;
z_p_val = 0.05
num_cells_to_empirical_sd = list()
ncells_partitions = seq (1,100,5)
for (ncells in ncells_partitions) {
means = c()
message(sprintf("num cells: %g", ncells))
cells_counted = 0;
for(i in 1:nrounds) {
rand.gene = sample(1:ngenes)
rand.sample = sample(num_normal_samples)
vals = sample(expr_vals[rand.gene, normal_samples[[rand.sample]] ], size=ncells, replace=T)
m_val = mean(vals)
means = c(means, m_val)
cells_counted = cells_counted + length(vals)
}
my.sd = sd(means)
sds = c(sds, my.sd)
num_cells_to_empirical_sd[[ ncells ]] = my.sd
df = data.frame(num_cells=ncells, vals=means)
if(is.null(mean_vals_df)) {
mean_vals_df = df
} else {
mean_vals_df = rbind(mean_vals_df, df)
}
}
num_cells = ncells_partitions
write.table(data.frame(num_cells=num_cells, sds=sds), file='num_cells_vs_sds.table.dat', quote=F, sep="\t")
fit = lm(log(sds) ~ log(num_cells))
my.spline = smooth.spline(log(num_cells), log(sds))
message("plotting log(sd) vs. log(num_cells)")
plot(log(num_cells), log(sds), main='log(sd) vs. log(num_cells)')
plot(num_cells, sds, main='sd vs. num_cells')
my.spline2 = smooth.spline(num_cells, sds)
mean_delta = qnorm(p=1-z_p_val, sd=sigma, mean=0)
normal_sd_trend = list(mu=mu,
sigma=sigma,
fit=fit,
spline=my.spline,
mean_delta=mean_delta)
for (ncells in ncells_partitions) {
message(sprintf("plotting ncells distribution: %g", ncells))
data.want = mean_vals_df %>% filter(num_cells == ncells)
p = data.want %>% ggplot(aes(vals, fill=num_cells)) +
geom_density(alpha=0.3)
sigma <- exp(predict(normal_sd_trend$fit,
newdata=data.frame(num_cells=ncells))[[1]])
message("ncells:", ncells, " sigma: ", sigma)
p = p +
stat_function(fun=dnorm, color='black', args=list('mean'=1,'sd'=sigma)) +
ggtitle(sprintf("num_cells: %g, sd: %g", ncells, sigma))
p = p +
stat_function(fun=dnorm, color='magenta', args=list('mean'=1,'sd'=num_cells_to_empirical_sd[[ ncells]] ))
pval=0.01
left_mean = 1 - 2 * (1-qnorm(p=pval, mean=1, sd=sigma))
message("left_mean: ", left_mean)
p = p +
stat_function(fun=dnorm, color='blue', args=list('mean'=left_mean,'sd'=sigma))
right_mean = 1 + 2 * (qnorm(p=1-pval, mean=1, sd=sigma)-1)
message("right_mean: ", right_mean)
p = p +
stat_function(fun=dnorm, color='blue', args=list('mean'=right_mean,'sd'=sigma))
if (FALSE) {
spline.sd = exp(predict(my.spline, x=log(ncells))$y)
p = p +
stat_function(fun=dnorm, color='green', args=list('mean'=1,'sd'=spline.sd))
spline2.sd = predict(my.spline2, x=ncells)$y
message(spline2.sd)
p = p +
stat_function(fun=dnorm, color='orange', args=list('mean'=1,'sd'=spline2.sd))
}
plot(p)
} |
map.output <- function(table, variable) {
if (any(table$lat < 0) | any(table$lon > 0)) {
world <- data.table::data.table(ggplot2::map_data("world"))
} else {
world <- data.table::data.table(ggplot2::map_data("usa"))
}
map <- ggplot2::ggplot() +
ggplot2::geom_polygon(
data = world,
ggplot2::aes(x = long, y = lat, group = group),
fill = "white",
color = "darkgrey") +
ggplot2::geom_point(
data = table,
ggplot2::aes(x = lon, y = lat, color = table[, variable]),
size = 5) +
ggplot2::scale_color_gradientn(
colours = c("red", "orange", "yellow", "green", "blue", "violet")) +
ggplot2::theme_bw() +
ggplot2::xlim(range(pretty(table$lon))) +
ggplot2::ylim(range(pretty(table$lat)))
return(map)
} |
mspSubset <- function(data,
labelVector = rep(1, nrow(data)),
subsetMatrix = matrix(TRUE,nrow = length(unique(labelVector)), ncol = ncol(data))){
if(!is.xts(data)){
stop("Object 'data' is not an xts object. Please transform your data to an
extendible time series.")
}
classes <- unique(labelVector)
names(classes) <- LETTERS[1:length(classes)]
if(is.vector(labelVector)){
labelVector <- matrix(labelVector, ncol = 1)
}
classData <- cbind(labelVector, data)
SubsetData <- lapply(1:length(classes), function(i){
dataset <- classData[classData[,1] == classes[i],]
dataset <- dataset[,-1]
dataset <- dataset[,subsetMatrix[i,]]
})
names(SubsetData) <- c("Class1Data","Class2Data","Class3Data")
SubsetData
} |
findPostCov <-
function (formula, alpha = 1, data) {
checkArgsFindPostCov(formula, alpha, data)
dimens <- array (0, c(dim(data)[2]-1))
for (j in 1:(dim(data)[2]-1)) {
v <- as.factor(data[,j])
dimens[j] <- length(levels(v))
data[,j] <- as.factor(v)
}
colnames(data)[dim(data)[2]] <- "freq"
rownames(dimens) <- colnames(data[,1:dim(data)[2]-1])
tools <- getTools(alpha, data)
graph <- findGraph (formula, tools)
if(!is.chordal(graph)$chordal) {
stop("graph must be decomposable")
}
cliques <- maximal.cliques (graph)
nCliques <- length(cliques)
separators <- minimal.st.separators(graph)
nSeparators <- length(separators)
cliqueFormulas <- list()
for (i in 1:nCliques) {
cliqueFormulas[[i]] <- paste("freq", tools$varNames[cliques[[i]][1]], sep = " ~ ")
if (length(cliques[[i]]) > 1)
for (j in 2:length(cliques[[i]]))
cliqueFormulas[[i]] <- paste (cliqueFormulas[[i]], tools$varNames[cliques[[i]][j]], sep = "+")
cliqueFormulas[[i]] <- as.formula(cliqueFormulas[[i]])
}
separatorFormulas <- list()
for (i in 1:nSeparators) {
if (length(separators[[i]]) > 0) {
separatorFormulas[[i]] <- paste("freq", tools$varNames[separators[[i]][1]], sep = " ~ ")
if (length(separators[[i]]) > 1)
for (j in 2:length(separators[[i]]))
separatorFormulas[[i]] <- paste (separatorFormulas[[i]], tools$varNames[separators[[i]][j]], sep = "+")
separatorFormulas[[i]] <- as.formula(separatorFormulas[[i]])
}
else
separatorFormulas[[i]] <- NA
}
cliqueMargins <- list()
for (i in 1:nCliques) {
cliqueMargins[[i]] <- xtabs (cliqueFormulas[[i]], data)
cliqueMargins[[i]] <- as.data.frame.table(cliqueMargins[[i]], responseName = "freq")
}
separatorMargins <- list()
for (i in 1:nSeparators) {
if (length(separators[[i]]) > 0) {
separatorMargins[[i]] <- xtabs (separatorFormulas[[i]], data)
separatorMargins[[i]] <- as.data.frame.table(separatorMargins[[i]], responseName = "freq")
}
else
separatorMargins[[i]] <- NA
}
X <- model.matrix (object = formula, data = data)
X <- standardizeColumnNames(X)
cliqueFormulas <- list()
for (i in 1:nCliques) {
cliqueFormulas[[i]] <- paste("freq", tools$varNames[cliques[[i]][1]], sep = " ~ ")
if (length(cliques[[i]]) > 1)
for (j in 2:length(cliques[[i]]))
cliqueFormulas[[i]] <- paste (cliqueFormulas[[i]], tools$varNames[cliques[[i]][j]], sep = "*")
cliqueFormulas[[i]] <- as.formula(cliqueFormulas[[i]])
}
separatorFormulas <- list()
for (i in 1:nSeparators) {
if (length(separators[[i]]) > 0) {
separatorFormulas[[i]] <- paste("freq", tools$varNames[separators[[i]][1]], sep = " ~ ")
if (length(separators[[i]]) > 1)
for (j in 2:length(separators[[i]]))
separatorFormulas[[i]] <- paste (separatorFormulas[[i]], tools$varNames[separators[[i]][j]], sep = "*")
separatorFormulas[[i]] <- as.formula(separatorFormulas[[i]])
}
else
separatorFormulas[[i]] <- NA
}
cliqueModelMatrices <- list()
for (i in 1:nCliques) {
cliqueModelMatrices[[i]] <- model.matrix (object = cliqueFormulas[[i]], data = cliqueMargins[[i]])
cliqueModelMatrices[[i]] <- standardizeColumnNames(cliqueModelMatrices[[i]])
cliqueModelMatrices[[i]] <- solve(cliqueModelMatrices[[i]])
}
separatorModelMatrices <- list()
for (i in 1:nSeparators) {
if (length(separators[[i]]) > 0) {
separatorModelMatrices[[i]] <- model.matrix (object = separatorFormulas[[i]], data = separatorMargins[[i]])
separatorModelMatrices[[i]] <- standardizeColumnNames(separatorModelMatrices[[i]])
separatorModelMatrices[[i]] <- solve(separatorModelMatrices[[i]])
}
else
separatorModelMatrices[[i]] <- NA
}
covMatrix <- array (0, c(dim(X)[2], dim(X)[2]))
rownames(covMatrix) <- colnames(covMatrix) <- colnames(X)
m <- multiplicity (graph, cliques, separators, tools)
for (i in 1:nCliques) {
for (j in 1:dim(cliqueModelMatrices[[i]])[2]) {
v <- array (0, c(dim(X)[2]))
rownames(v) <- colnames(X)
v[names(cliqueModelMatrices[[i]][,j])] <- cliqueModelMatrices[[i]][,j]
covMatrix <- covMatrix + trigamma(cliqueMargins[[i]]$freq[j] + alpha / length(cliqueMargins[[i]]$freq)) * v %*% t(v)
}
}
for (i in 1:nSeparators) {
if (length(separators[[i]]) > 0) {
for (j in 1:dim(separatorModelMatrices[[1]])[2]) {
v <- array (0, c(dim(X)[2]))
rownames(v) <- colnames(X)
v[names(separatorModelMatrices[[i]][,j])] <- separatorModelMatrices[[i]][,j]
covMatrix <- covMatrix - m[i] * trigamma(separatorMargins[[i]]$freq[j] + alpha / length(separatorMargins[[i]]$freq)) * v %*% t(v)
}
}
else {
v <- array (0, c(dim(X)[2]))
v[1] <- 1
covMatrix <- covMatrix - m[i] * trigamma(sum(data$freq) + alpha) * v %*% t(v)
}
}
return(covMatrix)
} |
copas <- function(x,
level.ma = x$level.ma,
gamma0.range = NULL,
gamma1.range = NULL,
ngrid = 20,
nlevels = 10,
levels = NULL,
slope = NULL,
left = NULL,
rho.bound = 0.9999,
sign.rsb = 0.1,
backtransf = x$backtransf,
title = x$title, complab = x$complab, outclab = x$outclab,
silent = TRUE,
warn = options()$warn) {
chkclass(x, "meta")
if (!is.numeric(rho.bound) && (rho.bound <=0 | rho.bound > 1))
stop("no valid value for 'rho.bound'")
if (!is.null(slope)) {
if (!is.numeric(slope))
stop("Argument 'slope' must be numeric")
if (length(slope) > 1) {
warning(paste("Argument 'slope' must be of length 1;",
"first element of vector is used"))
slope <- slope[1]
}
}
chklevel(level.ma)
if (!is.null(gamma0.range))
chknumeric(gamma0.range, length = 2)
if (!is.null(gamma1.range))
chknumeric(gamma1.range, length = 2)
chknumeric(ngrid, length = 1)
chknumeric(nlevels, length = 1)
if (!is.null(levels))
chknumeric(levels)
if (!is.null(slope))
chknumeric(slope, length = 1)
if (!is.null(left))
chklogical(left)
chknumeric(rho.bound, max = 1, length = 1)
chklevel(sign.rsb)
chklogical(backtransf)
chklogical(silent)
chklogical(warn)
oldopt <- options(warn = warn)
on.exit(options(oldopt))
TE <- x$TE
seTE <- x$seTE
studlab <- x$studlab
sel <- !is.na(TE) & !is.na(seTE)
if (length(TE) != sum(sel))
warning(paste(length(TE) - sum(sel),
"observation(s) dropped due to missing values"))
TE <- TE[sel]
seTE <- seTE[sel]
studlab <- studlab[sel]
TE.random <- x$TE.random
seTE.random <- x$seTE.random
if (x$level.ma != level.ma) {
if (x$hakn)
ci.random <- ci(TE.random, seTE.random, level = level.ma, df = x$df.hakn)
else
ci.random <- ci(TE.random, seTE.random, level = level.ma)
lower.random <- ci.random$lower
upper.random <- ci.random$upper
statistic.random <- ci.random$statistic
pval.random <- ci.random$p
}
else {
lower.random <- x$lower.random
upper.random <- x$upper.random
statistic.random <- x$statistic.random
pval.random <- x$pval.random
}
tau <- x$tau
seTE.min <- min(seTE)
seTE.max <- max(seTE)
if (!silent) {
cat("\n\n")
cat("====================================\n")
cat("========== COPAS ANALYSIS ==========\n")
cat("====================================\n\n")
cat("\n")
cat("1) Summary statistics and test for heterogeneity\n")
cat("================================================\n")
print(x)
}
if (is.null(gamma1.range)) {
gamma1.range <- c(0, 1.29 / (1 / seTE.min - 1 / seTE.max))
}
if (is.null(gamma0.range))
gamma0.range <- c(-0.25 - gamma1.range[2] / seTE.max, 2)
gamma0 <- seq(gamma0.range[1], gamma0.range[2], length = ngrid)
gamma1 <- seq(gamma1.range[1], gamma1.range[2], length = ngrid)
gamma0.min <- min(gamma0)
gamma0.max <- max(gamma0)
gamma1.min <- min(gamma1)
gamma1.max <- max(gamma1)
if (!silent) {
cat("\n")
cat("2) Ranges of gamma0, gamma1 used for calculating contour plot\n")
cat("=============================================================\n")
cat("gamma0 ranges from ",
round(gamma0.range[1], 2),
" to ",
round(gamma0.range[2],2),
"\n")
cat("gamma1 ranges from ",
round(gamma1.range[1], 2),
" to ",
round(gamma1.range[2],2),
"\n")
publprob.seTE.max <- range(pnorm(gamma0 + gamma1 / seTE.max))
publprob.seTE.min <- range(pnorm(gamma0 + gamma1 / seTE.min))
cat(paste("Range of probability publishing trial with largest SE: (",
round(publprob.seTE.max[1], 3),", ",
round(publprob.seTE.max[2], 3),")",sep = "") ,"\n")
cat(paste("Range of probability publishing trial with smallest SE: (",
round(publprob.seTE.min[1], 3),", ",
round(publprob.seTE.min[2], 3),")",sep = "") ,"\n\n")
}
if (!silent) {
cat("\n")
cat("3) Starting calculations for contour plot\n")
cat("=========================================\n")
}
if (is.null(left))
left <- as.logical(sign(metabias(x, meth = "linreg", k.min = 3)$estimate[1]) == 1)
if (left)
rho0 <- rho.bound / 2
else
rho0 <- -rho.bound / 2
TE.contour <- matrix(NA, nrow = ngrid, ncol = ngrid)
for (i in seq(along = gamma0)) {
for (j in seq(along = gamma1)) {
try(junk0 <- optim(c(TE.random, rho0, tau),
fn = copas.loglik.without.beta,
gr = copas.gradient.without.beta,
lower = c(-Inf, -rho.bound, 0),
upper = c( Inf, rho.bound, Inf),
gamma = c(gamma0[i], gamma1[j]),
TE = TE, seTE = seTE,
method = "L-BFGS-B"),
silent = TRUE)
TE.contour[i, j] <- junk0$par[1]
}
if (!silent) {
cat(paste(round(100 * i * j / (ngrid * ngrid), 0), "%, ",sep = "" ))
}
}
if (!silent) {
cat("Done\n\n")
}
if (!silent) {
cat("\n")
cat("4) Calculating approximate orthogonal line\n")
cat("==========================================\n")
}
gamma0.rescale <- (gamma0 - gamma0.min) / (gamma0.max - gamma0.min)
gamma1.rescale <- (gamma1 - gamma1.min) / (gamma1.max - gamma1.min)
if (is.null(levels))
junk <- contourLines(x = gamma0.rescale,
y = gamma1.rescale,
z = TE.contour,
nlevels = nlevels)
else
junk <- contourLines(x = gamma0.rescale,
y = gamma1.rescale,
z = TE.contour,
levels = levels)
levels <- as.numeric(unlist(junk)[names(unlist(junk)) == "level"])
nlevels <- length(levels)
nobs <- rep(NA, nlevels)
adj.r.squareds <- rep(NA, nlevels)
slopes <- rep(NA, nlevels)
se.slopes <- rep(NA, nlevels)
intercepts <- rep(NA, nlevels)
for(l in 1:(nlevels)) {
lm.op <- lm(junk[[l]]$y ~ junk[[l]]$x)
nobs[l] <- length(junk[[l]]$x)
adj.r.squareds[l] <- summary(lm.op)$adj.r.squared
slopes[l] <- lm.op$coef[2]
se.slopes[l] <- sqrt(diag(vcov(lm.op))[2])
intercepts[l] <- lm.op$coef[1]
}
adj.r.squareds[is.nan(adj.r.squareds)] <- -100
sel <- adj.r.squareds > 0
if (is.null(slope)) {
if (all(!sel)) {
warning("No contour line with corresponding adjusted r.square ",
"larger than zero")
slope <- NA
}
else
slope <- -1 / metagen(slopes[sel], 1 / sqrt(nobs)[sel],
method.tau.ci = "")$TE.fixed
}
x.slope <- ((1 - slope - intercepts ) / (slopes - slope))
y.slope <- 1 + slope * (x.slope - 1)
if (!silent) {
cat("Done\n\n")
}
if (!silent) {
cat("\n")
cat("5) Calculating TE and seTE, as come down slope\n")
cat("==============================================\n")
}
gamma0.slope <- x.slope * (gamma0.max - gamma0.min) + gamma0.min
gamma1.slope <- y.slope * (gamma1.max - gamma1.min) + gamma1.min
sel0 <- (gamma0.slope >= min(gamma0.range) &
gamma0.slope <= max(gamma0.range))
sel1 <- (gamma1.slope >= min(gamma1.range) &
gamma1.slope <= max(gamma1.range))
x.slope <- x.slope[sel0&sel1]
y.slope <- y.slope[sel0&sel1]
gamma0.slope <- gamma0.slope[sel0&sel1]
gamma1.slope <- gamma1.slope[sel0&sel1]
ord <- rev(order(pnorm(gamma0.slope + gamma1.slope / seTE.max)))
x.slope <- x.slope[ord]
y.slope <- y.slope[ord]
gamma0.slope <- gamma0.slope[ord]
gamma1.slope <- gamma1.slope[ord]
gamma0.slope <- c(10, gamma0.slope)
gamma1.slope <- c( 0, gamma1.slope)
sel2 <- !is.na(x.slope) & !is.na(y.slope)
sel3 <- !is.na(gamma0.slope) & !is.na(gamma1.slope)
x.slope <- x.slope[sel2]
y.slope <- y.slope[sel2]
gamma0.slope <- gamma0.slope[sel3]
gamma1.slope <- gamma1.slope[sel3]
n.gamma0.slope <- length(gamma0.slope)
publprob <- pnorm(gamma0.slope + gamma1.slope / seTE.max)
TE.slope <- rep(NA, n.gamma0.slope)
seTE.slope <- rep(NA, n.gamma0.slope)
rho.slope <- rep(NA, n.gamma0.slope)
tau.slope <- rep(NA, n.gamma0.slope)
beta.slope <- rep(NA, n.gamma0.slope)
loglik1 <- rep(NA, n.gamma0.slope)
conv1 <- rep(NA, n.gamma0.slope)
message1 <- rep("", n.gamma0.slope)
for (i in seq(along = gamma1.slope)) {
try(junk1 <- optim(c(TE.random, rho0, tau),
fn = copas.loglik.without.beta,
gr = copas.gradient.without.beta,
lower = c(-Inf, -rho.bound, 0),
upper = c( Inf, rho.bound, Inf),
gamma = c(gamma0.slope[i], gamma1.slope[i]),
TE = TE, seTE = seTE,
method = "L-BFGS-B"),
silent = TRUE)
TE.slope[i] <- junk1$par[1]
rho.slope[i] <- junk1$par[2]
tau.slope[i] <- junk1$par[3]
loglik1[i] <- junk1$value
conv1[i] <- junk1$convergence
message1[i] <- junk1$message
try(junk2 <- optim(c(TE.random, rho0, tau),
fn = copas.loglik.without.beta,
gr = copas.gradient.without.beta,
lower = c(-Inf, -rho.bound, 0),
upper = c( Inf, rho.bound, Inf),
gamma = c(gamma0.slope[i], gamma1.slope[i]),
TE = TE, seTE = seTE,
method = "L-BFGS-B", hessian = TRUE),
silent = TRUE)
try(seTE.slope[i] <-
sqrt(solve(junk2$hessian + 0.00000001)[1, 1]),
silent = TRUE)
if ((i > 1 & is.na(seTE.slope[i])) ||
(i > 1 & seTE.slope[i] == 0))
seTE.slope[i] <- seTE.slope[i - 1]
if (is.na(seTE.slope[i]) || seTE.slope[i] == 0)
try(seTE.slope[i] <- sqrt(1 / junk2$hessian[1, 1]),
silent = TRUE)
}
if (!silent) {
cat("Done\n\n")
}
if (!silent) {
cat("\n")
cat("6) Calculating goodness of fit, as come down orthogonal line\n")
cat("============================================================\n")
}
if (left)
rho.lim <- c(0, rho.bound)
else
rho.lim <- c(-rho.bound, 0)
TE.slope.bc <- rep(NA, n.gamma0.slope)
rho.slope.bc <- rep(NA, n.gamma0.slope)
tau.slope.bc <- rep(NA, n.gamma0.slope)
beta.slope.bc <- rep(NA, n.gamma0.slope)
loglik2 <- rep(NA, n.gamma0.slope)
conv2 <- rep(NA, n.gamma0.slope)
message2 <- rep("", n.gamma0.slope)
for (i in seq(along = gamma1.slope)) {
try(junk3 <- optim(c(TE.random, rho0, tau, 0),
fn = copas.loglik.with.beta,
lower = c(-Inf, rho.lim[1], 0 , -Inf),
upper = c( Inf, rho.lim[2], Inf, Inf),
gamma = c(gamma0.slope[i], gamma1.slope[i]),
TE = TE, seTE = seTE,
method = "L-BFGS-B"),
silent = TRUE)
TE.slope.bc[i] <- try(junk3$par[1])
rho.slope.bc[i] <- try(junk3$par[2])
tau.slope.bc[i] <- try(junk3$par[3])
beta.slope.bc[i] <- try(junk3$par[4])
loglik2[i] <- try(junk3$value)
conv2[i] <- try(junk3$convergence)
message2[i] <- try(junk3$message)
}
pval.rsb <- 1 - pchisq(2 * (loglik1 - loglik2), df = 1)
if (!silent) {
cat("Done\n\n")
}
N.unpubl <- rep(NA, length(publprob))
for (i in seq(along = N.unpubl)) {
p.si <- pnorm(gamma0.slope[i] + gamma1.slope[i] / seTE)
N.unpubl[i] <- sum((1 - p.si) / p.si)
}
ord <- rev(order(publprob))
pom <- publprob[ord]
TE.slope <- TE.slope[ord]
seTE.slope <- seTE.slope[ord]
ci.slope <- ci(TE.slope, seTE.slope, level.ma)
rho.slope <- rho.slope[ord]
tau.slope <- tau.slope[ord]
pval.rsb <- pval.rsb[ord]
N.unpubl <- N.unpubl[ord]
tres <- data.frame(seq = seq(along = pval.rsb),
cumsum = cumsum(pval.rsb <= sign.rsb),
diff = seq(along = pval.rsb) - cumsum(pval.rsb <= sign.rsb))
pval.rsb.sign.all <- all(tres$diff == 0)
pval.rsb.sign <- ifelse(sum(tres$diff == 0) > 0, TRUE, FALSE)
if (pval.rsb.sign.all) {
TE.adj <- NA
seTE.adj <- NA
pval.rsb.adj <- NA
N.unpubl.adj <- NA
tau.adj <- NA
}
else {
if (pval.rsb.sign) {
sel.adj <- tres$seq[tres$diff > 0][1]
TE.adj <- TE.slope[sel.adj]
seTE.adj <- seTE.slope[sel.adj]
pval.rsb.adj <- pval.rsb[sel.adj]
N.unpubl.adj <- N.unpubl[sel.adj]
tau.adj <- tau.slope[sel.adj]
}
else {
TE.adj <- TE.slope[1]
seTE.adj <- seTE.slope[1]
pval.rsb.adj <- pval.rsb[1]
N.unpubl.adj <- N.unpubl[1]
tau.adj <- tau.slope[1]
}
}
ci.adjust <- ci(TE.adj, seTE.adj, level.ma)
res <- list(TE = TE,
seTE = seTE,
studlab = x$studlab,
TE.random = TE.random,
seTE.random = seTE.random,
lower.random = lower.random,
upper.random = upper.random,
statistic.random = statistic.random,
pval.random = pval.random,
TE.adjust = TE.adj,
seTE.adjust = seTE.adj,
lower.adjust = ci.adjust$lower,
upper.adjust = ci.adjust$upper,
statistic.adjust = ci.adjust$statistic,
pval.adjust = ci.adjust$p,
tau.adjust = tau.adj,
pval.rsb.adj = pval.rsb.adj,
N.unpubl.adj = N.unpubl.adj,
level.ma = level.ma,
left = left,
rho.bound = rho.bound,
gamma0.range = gamma0.range,
gamma1.range = gamma1.range,
slope = slope,
regr = list(
levels = levels,
nobs = nobs,
adj.r.squareds = adj.r.squareds,
slopes = slopes,
se.slopes = se.slopes,
intercepts = intercepts),
ngrid = ngrid,
nlevels = nlevels,
gamma0 = gamma0,
gamma1 = gamma1,
TE.contour = TE.contour,
x.slope = x.slope,
y.slope = y.slope,
TE.slope = TE.slope,
seTE.slope = seTE.slope,
lower.slope = ci.slope$lower,
upper.slope = ci.slope$upper,
statistic.slope = ci.slope$statistic,
pval.slope = ci.slope$p,
rho.slope = rho.slope,
tau.slope = tau.slope,
loglik1 = loglik1,
conv1 = conv1,
message1 = message1,
loglik2 = loglik2,
conv2 = conv2,
message2 = message2,
publprob = pom,
pval.rsb = pval.rsb,
sign.rsb = sign.rsb,
N.unpubl = N.unpubl,
sm = x$sm,
title = title,
complab = complab,
outclab = outclab,
call = match.call(),
x = x)
res$version <- utils::packageDescription("metasens")$Version
if (!is.null(x$title))
res$title <- x$title
if (!is.null(x$complab))
res$complab <- x$complab
if (!is.null(x$outclab))
res$outclab <- x$outclab
res$backtransf <- backtransf
class(res) <- c("copas")
res
} |
"regul.adj" <-
function(x, xmin=min(x), frequency=NULL, deltat=(max(x, na.rm=TRUE)-min(x, na.rm=TRUE))/(length(x)-1), tol=deltat, tol.type="both", nclass=50, col=c(4, 5, 2), xlab=paste("Time distance"), ylab=paste("Frequency"), main="Number of matching observations", plotit=TRUE, ...) {
xmin <- xmin
frequency <- frequency
deltat <- deltat
x <- sort(x)
x <- x[!is.na(x)]
n <- length(x)
if (is.null(frequency)) {
if (is.null(deltat)) {
stop("You must define at least one of frequency or deltat")
} else {
frequency <- 1/deltat
}
} else {
deltat <- 1/frequency
}
if (is.null(tol) || tol == 0) tol2 <- 0 else {
tol2 <- abs(tol)
if (tol2 > deltat) tol2 <- deltat else {
tol2 <- deltat/round(deltat/tol2)
}
}
if (max(x) < xmin) {
nx <- 0
stop("xmin is higher than max value in x!")
} else {
nx <- floor((max(x) - xmin)/deltat) + 1
}
xout <- 0:(nx-1) * deltat + xmin
pos <- match.tol(xout, x, tol.type=tol.type, tol=tol2)
match.dist <- xout - x[pos]
match.dist[is.na(match.dist)] <- Inf
match.dist[(xout < min(x) | xout > max(x)) & match.dist == Inf] <- -Inf
match <- sum(is.finite(match.dist))
exact.match <- sum(match.dist == 0)
params <- c(xmin, nx, deltat, tol2)
names(params) <- c("xmin", "n", "deltat", "tol")
if (plotit == TRUE) {
if (tol2 == 0) HT <- 1.001 else HT <- 101*tol2/100
Data <- abs(match.dist)
Data[is.infinite(Data)] <- HT
Data[Data == 0] <- -0.00001
res <- hist(Data, nclass=nclass, plot=FALSE)
classes <- res$breaks[2:length(res$breaks)]
ncl <- length(classes)
classes[ncl] <- Inf
counts <- res$counts
names(counts) <- classes
cols <- NULL
cols[1] <- col[2]
if (sum(Data == -0.00001) > 0) cols[1] <- col[1]
if (ncl > 2) cols[2:(ncl-1)] <- col[2]
cols[ncl] <- col[3]
hist(Data, nclass=nclass, col=cols, xlab=xlab, ylab=ylab, main=main)
counts <- counts[counts != 0]
lc <- length(counts)
counts2 <- NULL
for (i in 1:lc) {
counts2[i] <- sum(counts[1:i])
}
names(counts2) <- names(counts)
res <- list(params=params, match=match, exact.match=exact.match, match.counts=counts2)
} else {
res <- list(params=params, match=match, exact.match=exact.match)
}
res
} |
csbet.aSE <- function(model){
if(class(model)[1] != "lme") {stop("a compound symmetry model (mCS) of nlme class is needed")
}
if (model$call[4] == "(~1 | vrTi/vrGr)()"){
t0 <- as.numeric(VarCorr(model)[4,1])
t1 <- as.numeric(VarCorr(model)[2,1])
sig2 <- as.numeric(VarCorr(model)[5,1])
Ny <- (max(model$data$vrTi)-min(model$data$vrTi))-1
a_betw_vrGr <- t1/sqrt((t0+t1+sig2)*(t0+t1+sig2))
SE_betw_vrGr <- sqrt(1.96*a_betw_vrGr^2*(1-a_betw_vrGr)^2/Ny)
a_betw_Grp <- round(a_betw_vrGr, 3)
SE_betw_Grp <- round(SE_betw_vrGr, 3)
}
else {stop("a compound compound symmetry structure model (mCS) of nlme class is needed")}
res <- cbind(a_betw_Grp, SE_betw_Grp)
return(res)
}
|
print.baggr <- function(x, exponent=FALSE, digits = 2, group, fixed = TRUE, ...) {
ppd <- attr(x, "ppd")
if(ppd) {
cat("Model type: Prior predictive draws for",
crayon::bold(model_names[x$model]), "\n")
} else {
cat("Model type:", crayon::bold(model_names[x$model]), "\n")
cat("Pooling of effects:", crayon::bold(x$pooling), "\n")
}
cat("\n")
if(length(x$effects) == 1)
cat(crayon::bold(paste0("Aggregate treatment effect (on ", x$effects, "):\n")))
else
cat(crayon::bold(paste0("Aggregate treatment effect:\n")))
if(x$pooling == "none") {
cat("No treatment effect estimated as pooling = 'none'.\n\n")
} else {
if(exponent)
te <- treatment_effect(x, transform = base::exp)
else
te <- treatment_effect(x)
if(exponent)
cat("Exponent of hypermean (exp(tau))")
else
cat("Hypermean (tau)")
if(length(x$effects) == 1){
tau <- format(mint(te[[1]]), digits = digits, trim = TRUE)
sigma_tau <- format(mint(te[[2]]), digits = digits, trim = TRUE)
cat(" = ", tau[2], "with 95% interval", tau[1], "to", tau[3], "\n")
if(x$pooling == "partial" && !exponent){
cat("Hyper-SD (sigma_tau) =", sigma_tau[2], "with 95% interval",
sigma_tau[1], "to", sigma_tau[3], "\n")
tot_pool <- format(heterogeneity(x)[,,1], digits = digits, trim = TRUE)
if(!ppd)
cat("Total pooling (1 - I^2) =", tot_pool[2], "with 95% interval",
tot_pool[1], "to", tot_pool[3], "\n")
}
} else {
tau <- mint(te[[1]])
sigma_tau <- mint(te[[2]])
if(x$model == "quantiles"){
rownames(tau) <- paste0(100*x$quantiles, "% quantile")
if(!exponent)
rownames(sigma_tau) <- paste0(100*x$quantiles, "% quantile")
} else if(x$model == "sslab") {
rownames(tau) <- x$effects
if(!exponent)
rownames(sigma_tau) <- x$effects
}
print(tau, digits = digits)
if(!exponent && x$pooling == "partial"){
cat(crayon::bold("\nSD of treatement effects:"))
print(sigma_tau, digits = digits)
if(x$model == "mutau") {
print("\nCorrelation of treatment effect and baseline:")
print(mutau_cor(x, summary = TRUE), digits = digits)
}
}
}
}
if(x$pooling == "full")
cat("(SD(tau) undefined.)\n")
cat("\n")
if(ppd)
return(invisible(x))
group_warning_flag <- FALSE
if(missing(group)){
group_warning_flag <- TRUE
group <- ifelse(x$n_groups > 20, FALSE, TRUE)
}
if(group){
if(x$pooling != "full") {
pooling_tab <- pooling(x, summary = TRUE)
if(exponent)
study_eff_tab <- group_effects(x, summary = TRUE, transform=exp)
else
study_eff_tab <- group_effects(x, summary = TRUE)
for(i in 1:dim(study_eff_tab)[3]){
cat(paste0("Treatment effects on ", x$effects[i]))
if(exponent){
cat(" (converted to exp scale):\n")
tab <- cbind(study_eff_tab[,c("mean", "lci", "uci"),i],
pooling = pooling_tab[2,,i])
} else{
cat(":\n")
tab <- cbind(study_eff_tab[,c("mean", "sd"),i],
pooling = pooling_tab[2,,i])
}
print(tab, digits = digits)
}
cat("\n")
}
} else if(group_warning_flag) {
cat("Group effects omitted, as number of groups is > 20.",
"\nUse print.baggr() with group = TRUE to print them.\n")
}
if(fixed && length(x$covariates) > 0) {
if(exponent)
fixed_eff_tab <- fixed_effects(x, summary = TRUE, transform=exp)
else
fixed_eff_tab <- fixed_effects(x, summary = TRUE)
for(i in 1:dim(fixed_eff_tab)[3]){
cat(paste0("Covariate (fixed) effects on ", x$effects[i]))
if(exponent){
cat(" (converted to exp scale):\n")
tab <- cbind(fixed_eff_tab[,c("mean", "lci", "uci"),i])
} else{
cat(":\n")
tab <- cbind(fixed_eff_tab[,c("mean", "sd"),i])
}
print(tab, digits = digits)
cat("\n")
}
}
if(!is.null(x[["mean_lpd"]]))
cat("Cross-validation result: -2 * mean log predictive density =",
crayon::bold(format(x$mean_lpd)), "\n")
invisible(x)
} |
knitr::opts_chunk$set(
collapse = TRUE,
comment = "
)
library(lrd)
data("sentence_data")
head(sentence_data)
sentence_data$Sentence <- tolower(sentence_data$Sentence)
sentence_data$Response <- tolower(sentence_data$Response)
sentence_ouptut <-
prop_correct_sentence(data = sentence_data,
responses = "Response",
key = "Sentence",
key.trial = "Trial.ID",
id = "Sub.ID",
id.trial = "Trial.ID",
cutoff = 1,
flag = TRUE,
group.by = "Condition",
token.split = " ")
str(sentence_ouptut)
sentence_ouptut$DF_Scored
sentence_ouptut$DF_Participant
sentence_ouptut$DF_Group |
library(testthat)
context("tabulate")
test_that("tabulate.ff works",{
x <- c(1,1,2,3,3)
xf <- ff(x)
expect_identical(tabulate(x), tabulate.ff(xf))
expect_identical(tabulate(x,2), tabulate.ff(xf,2))
}) |
dequeueJobs <- function(package, directory, exclude=NULL, date=format(Sys.Date())) {
runSanityChecks()
db <- getQueueFile(package=package, path=directory, date=date)
q <- ensure_queue("jobs", db = db)
con <- getDatabaseConnection(db)
createTable(con)
meta <- dbGetQuery(con, "select * from metadata")
on.exit(dbDisconnect(con))
pid <- Sys.getpid()
hostname <- Sys.info()[["nodename"]]
wd <- cwd <- getwd()
debug <- verbose <- FALSE
env <- character()
if (!is.null(cfg <- getConfig())) {
if ("setup" %in% names(cfg)) source(cfg$setup)
if ("workdir" %in% names(cfg)) {
wd <- cfg$workdir
if (!dir.exists(wd)) {
dir.create(wd)
}
}
if ("libdir" %in% names(cfg)) {
Sys.setenv("R_LIBS_USER"=cfg$libdir)
if (!dir.exists(cfg$libdir)) {
dir.create(cfg$libdir)
}
env <- paste0("R_LIBS=\"", cfg$libdir, "\"")
}
if ("verbose" %in% names(cfg)) verbose <- cfg$verbose == "true"
if ("debug" %in% names(cfg)) debug <- cfg$debug == "true"
}
good <- bad <- skipped <- 0
exclset <- if (!is.null(exclude)) getExclusionSet(exclude) else character()
if (verbose) print(exclset)
cat("
while (!is.null(msg <- try_consume(q))) {
starttime <- Sys.time()
if (debug) print(msg)
cat(msg$message, "started at", format(starttime), "")
tok <- strsplit(msg$message, "_")[[1]]
pkgfile <- paste0(msg$message, ".tar.gz")
if (tok[1] %in% exclset) {
rc <- 2
} else {
setwd(wd)
if (file.exists(pkgfile)) {
if (verbose) cat("Seeing file", pkgfile, "\n")
} else {
dl <- download.packages(tok[1], ".", quiet=TRUE)
pkgfile <- basename(dl[,2])
if (verbose) cat("Downloaded ", pkgfile, "\n")
}
cmd <- "R"
args <- c("CMD", "check", "--no-manual", "--no-vignettes", pkgfile)
if (.pkgenv[["xvfb"]] != "") {
splits <- strsplit(.pkgenv[["xvfb"]], " ")[[1]]
args <- c(splits[-1], cmd, args)
cmd <- splits[1]
}
logfile <- paste0(pkgfile, ".log")
if (debug) {
print(cmd)
print(args)
}
rc <- system2(cmd, args=args, env=env, stdout=logfile, stderr=logfile)
if (debug) print(rc)
setwd(cwd)
}
endtime <- Sys.time()
if (rc == 0) {
good <- good + 1
cat(green("success"))
ack(msg)
} else if (rc == 2) {
skipped <- skipped + 1
cat(blue("skipped"))
ack(msg)
} else {
bad <- bad + 1
cat(red("failure"))
ack(msg)
}
cat("", "at", format(endtime),
paste0("(",green(good), "/", blue(skipped), "/", red(bad), ")"),
"\n")
row <- data.frame(package=tok[1],
version=tok[2],
result=rc,
starttime=format(starttime),
endtime=format(endtime),
runtime=as.numeric(difftime(endtime, starttime, units="secs")),
runner=pid,
host=hostname)
if (debug) print(row)
insertRow(con, row)
}
requeue_failed_messages(q)
lst <- list_messages(q)
if (verbose) print(lst)
lst
}
dequeueDepends <- function(package, directory) {
db <- getQueueFile(package=package, path=directory)
q <- ensure_queue("depends", db = db)
if (!is.null(cfg <- getConfig())) {
if ("setup" %in% names(cfg)) source(cfg$setup)
if ("libdir" %in% names(cfg)) {
.libPaths(cfg$libdir)
Sys.setenv("R_LIBS_USER"=cfg$libdir)
if (!dir.exists(cfg$libdir)) {
dir.create(cfg$libdir)
}
}
}
while (!is.null(msg <- try_consume(q))) {
pkg <- msg$message
try(install.packages(pkg))
ack(msg)
}
requeue_failed_messages(q)
lst <- list_messages(q)
lst
}
globalVariables(c(".pkgenv")) |
jack.se=function(x, theta, ...){
call <- match.call()
n <- length(x)
u <- rep(0, n)
for(i in 1:n) {u[i] <- theta(x[ - i], ...)}
jack.se <- sqrt(((n - 1)/n) * sum((u - mean(u))^2))
return(jack.se)} |
library(RobStatTM)
library(robustarima)
set.seed(600)
n.innov = 300
n = 200
phi=c(4/3, -5/6,1/6 )
n.start = n.innov - n
innov = rnorm(n.innov)
cont=lmrobdet.control(bb = 0.5, efficiency = 0.85, family = "bisquare")
ar3= arima.sim(model = list(ar = phi), n, innov = innov, n.start = n.start)
ar3lm=lm(ar3[4:200]~ ar3[3:199]+ar3[2:198]+ar3[1:197])
ar3mm=lmrobdetMM(ar3[4:200]~ ar3[3:199]+ar3[2:198]+ar3[1:197] )
ar3tau=arima.rob(ar3~1, p=3)
ar3tau$regcoef=ar3tau$regcoef*(1-sum(ar3tau$model$ar))
summary(ar3lm)
summary(ar3mm)
ar3tau
ao=rep(0,n)
tt=seq(20,200,20)
ao[tt]=4
ar3c5 =ar3 +ao
ar3c5lm=lm(ar3c5[4:200]~ ar3c5[3:199]+ar3c5[2:198]+ar3c5[1:197])
ar3c5mm=lmrobdetMM(ar3c5[4:200]~ ar3c5[3:199]+ar3c5[2:198]+ar3c5[1:197], control=cont )
ar3c5tau=arima.rob(ar3c5~1, p=3)
ar3c5tau$regcoef=ar3c5tau$regcoef*(1-sum(ar3c5tau$model$ar))
summary(ar3c5lm)
summary(ar3c5mm)
ar3c5tau
ao=rep(0,n)
tt=seq(10,200,10)
ao[tt]=4
ar3c10=ar3 +ao
ar3c10lm=lm(ar3c10[4:200]~ ar3c10[3:199]+ar3c10[2:198]+ar3c10[1:197])
ar3c10mm=lmrobdetMM(ar3c10[4:200]~ ar3c10[3:199]+ar3c10[2:198]+ar3c10[1:197],control=cont)
ar3c10tau=arima.rob(ar3c10~1, p=3)
ar3c10tau$regcoef=ar3c5tau$regcoef*(1-sum(ar3c5tau$model$ar))
summary(ar3c10lm)
summary(ar3c10mm)
ar3c10tau
|
library(PELVIS)
test_that("finalSE gives correct answers", {
x <- c(0.96, 0.5, 0.04)
expect_equal(finalSE(x, 0.95), c("M", "I", "F"))
}) |
lsmi_union <- function(net, n.seeds, n.wave, seeds = NULL){
n.seeds <- n.seeds[n.seeds <= net$n]
if (is.null(seeds)) {
max_seeds <- max(n.seeds)
seeds <- sample(1:net$n, max_seeds, replace = FALSE)
} else {
seeds <- unique(seeds)
if (length(seeds) > net$n) {
seeds <- seeds[1:net$n]
}
max_seeds <- length(seeds)
n.seeds <- c(n.seeds[n.seeds < max_seeds], max_seeds)
}
seeds <- sort(seeds)
n.wave <- round(max(n.wave))
n.seeds <- sort(unique(n.seeds), decreasing = TRUE)
lsmi_big <- lsmi(net, n.wave = n.wave, seeds = seeds)
sequence_seeds <- list(seeds)
if (length(n.seeds) > 1){
for (i in 2:length(n.seeds)) {
sequence_seeds[[i]] <- sort(sample(sequence_seeds[[i - 1]], n.seeds[i], replace = FALSE))
}
}
list(lsmi_big = lsmi_big, sequence_seeds = sequence_seeds)
} |
setClass(Class = "MxFitFunctionML",
contains = "MxBaseFitFunction",
representation = representation(
fellner = "logical",
verbose = "integer",
profileOut="MxOptionalChar",
rowwiseParallel="logical",
jointConditionOn="character",
components="MxCharOrNumber"),
)
setMethod("initialize", "MxFitFunctionML",
function(.Object, ...) {
.Object <- callNextMethod()
.Object@vector <- ..1
.Object@rowDiagnostics <- ..2
.Object@fellner <- ..3
.Object@verbose <- ..4
.Object@profileOut <- ..5
.Object@rowwiseParallel <- ..6
.Object@jointConditionOn <- ..7
.Object
})
setMethod("genericFitConvertEntities", "MxFitFunctionML",
function(.Object, flatModel, namespace, labelsData) {
name <- .Object@name
modelname <- imxReverseIdentifier(flatModel, .Object@name)[[1]]
expectName <- paste(modelname, "expectation", sep=".")
expectation <- flatModel@expectations[[expectName]]
dataname <- expectation@data
if (flatModel@datasets[[dataname]]@type != 'raw') {
if (.Object@vector) {
modelname <- getModelName(.Object)
msg <- paste("The ML fit function",
"in model", omxQuotes(modelname), "has specified",
"'vector' = TRUE, but the observed data is not raw data")
stop(msg, call.=FALSE)
}
if (.Object@rowDiagnostics) {
modelname <- getModelName(.Object)
msg <- paste("The ML fit function",
"in model", omxQuotes(modelname), "has specified",
"'rowDiagnostics' = TRUE, but the observed data is not raw data")
stop(msg, call.=FALSE)
}
}
return(flatModel)
})
setMethod("genericFitFunConvert", "MxFitFunctionML",
function(.Object, flatModel, model, labelsData, dependencies) {
.Object <- callNextMethod()
name <- .Object@name
modelname <- imxReverseIdentifier(model, .Object@name)[[1]]
expectName <- paste(modelname, "expectation", sep=".")
if (expectName %in% names(flatModel@expectations)) {
expectIndex <- imxLocateIndex(flatModel, expectName, name)
ex <- flatModel@expectations[[1L + expectIndex]]
if (is(ex, "MxExpectationHiddenMarkov") || is(ex, "MxExpectationMixture")) {
.Object@components <-
sapply(paste(ex@components, "fitfunction", sep="."),
function(ff) imxLocateIndex(flatModel, ff, name),
USE.NAMES = FALSE)
}
} else {
expectIndex <- as.integer(NA)
}
.Object@expectation <- expectIndex
return(.Object)
})
setMethod("genericFitInitialMatrix", "MxFitFunctionML",
function(.Object, flatModel) {
flatFitFunction <- flatModel@fitfunctions[[.Object@name]]
if (flatFitFunction@vector == FALSE) {
return(matrix(as.double(NA), 1, 1))
} else {
modelname <- imxReverseIdentifier(flatModel, flatFitFunction@name)[[1]]
expectationName <- paste(modelname, "expectation", sep = ".")
expectation <- flatModel@expectations[[expectationName]]
if (is.null(expectation)) {
msg <- paste("The ML fit function has vector = TRUE",
"and a missing expectation in the model",
omxQuotes(modelname))
stop(msg, call.=FALSE)
}
if (is.na(expectation@data)) {
msg <- paste("The ML fit function has vector = TRUE",
"and an expectation function with no data in the model",
omxQuotes(modelname))
stop(msg, call.=FALSE)
}
mxDataObject <- flatModel@datasets[[expectation@data]]
if (mxDataObject@type != 'raw') {
msg <- paste("The dataset associated with the ML expectation function",
"in model", omxQuotes(modelname), "is not raw data.")
stop(msg, call.=FALSE)
}
rows <- nrow(mxDataObject@observed)
return(matrix(as.double(NA), rows, 1))
}
})
setMethod("generateReferenceModels", "MxFitFunctionML",
function(.Object, model, distribution, equateThresholds) {
modelName <- model@name
datasource <- model$data
if (is.null(datasource)) {
stop(paste("Model", omxQuotes(modelName), "does not contain any data"))
}
expectation <- model@expectation
if (is(expectation, "MxExpectationBA81")) {
return(generateIFAReferenceModels(model, distribution))
}
if(is(expectation, "MxExpectationGREML")){
stop("Reference models for GREML expectation are not implemented")
}
datatype <- datasource@type
obsdata <- datasource@observed
datanobs <- datasource@numObs
wasRun <- [email protected]
if(wasRun) {
if (is.null(model@[email protected])) stop("Not clear which data were used to fit model")
selVars <- model@[email protected]
if(nrow(obsdata) == ncol(obsdata)){
if(!single.na(model@[email protected])) { obsdata <- obsdata[selVars, selVars] }
} else { obsdata <- obsdata[,selVars, drop=FALSE] }
} else {
message(paste("The model", omxQuotes(modelName), "has not been run. So reference models",
"of all the variables in the data will be made. For reference models",
"of only the variables used in the model, provide the model after it has been run."))
}
generateNormalReferenceModels(modelName, obsdata, datatype, any(!is.na(datasource@means)),
datanobs, datasource@means, distribution=distribution,
equateThresholds)
})
mxFitFunctionML <- function(vector = FALSE, rowDiagnostics=FALSE, ..., fellner=as.logical(NA),
verbose=0L, profileOut=c(), rowwiseParallel=as.logical(NA),
jointConditionOn=c('auto', 'ordinal', 'continuous')) {
prohibitDotdotdot(list(...))
if (length(vector) > 1 || typeof(vector) != "logical") {
stop("'vector' argument is not a logical value")
}
if (length(rowDiagnostics) > 1 || typeof(rowDiagnostics) != "logical") {
stop("'rowDiagnostics' argument is not a logical value")
}
if (length(fellner) > 1) {
stop("'fellner' argument must be one thing")
}
if (!is.na(fellner) && fellner && (vector || rowDiagnostics)) {
stop("'fellner' cannot be combined with 'vector' or 'rowDiagnostics'")
}
jointConditionOn <- match.arg(jointConditionOn)
return(new("MxFitFunctionML", vector, rowDiagnostics, fellner,
as.integer(verbose), as.character(profileOut), rowwiseParallel,
jointConditionOn))
}
displayMxFitFunctionML <- function(fitfunction) {
cat("MxFitFunctionML", omxQuotes(fitfunction@name), '\n')
cat("$vector :", fitfunction@vector, '\n')
cat("$rowDiagnostics :", fitfunction@rowDiagnostics, '\n')
cat("$fellner :", fitfunction@fellner, '\n')
cat("$verbose :", fitfunction@verbose, '\n')
cat("$rowwiseParallel :", fitfunction@rowwiseParallel, '\n')
cat("$jointConditionOn :", fitfunction@jointConditionOn, '\n')
cat("$result :", head(fitfunction@result),
ifelse(length(fitfunction@result)>6, "...", ""), '\n')
invisible(fitfunction)
}
setMethod("print", "MxFitFunctionML", function(x, ...) {
displayMxFitFunctionML(x)
})
setMethod("show", "MxFitFunctionML", function(object) {
displayMxFitFunctionML(object)
}) |
ss.power.reg1 <- function(t.observed, N, p, alpha.prior=.05, alpha.planned=.05, assurance=.80, power=.80, step=.001)
{
if(alpha.prior > 1 | alpha.prior <= 0) stop("There is a problem with 'alpha' of the prior study (i.e., the Type I error rate), please specify as a value between 0 and 1 (the default is .05).")
if(alpha.prior == 1) {alpha.prior <- .999 }
if(alpha.planned >= 1 | alpha.planned <= 0) stop("There is a problem with 'alpha' of the planned study (i.e., the Type I error rate), please specify as a value between 0 and 1 (the default is .05).")
if(assurance >= 1)
{
assurance <- assurance/100
}
if(assurance<0 | assurance>1)
{
stop("There is a problem with 'assurance' (i.e., the proportion of times statistical power is at or above the desired value), please specify as a value between 0 and 1 (the default is .80).")
}
if(assurance <.5)
{
warning( "THe assurance you have entered is < .5, which implies you will have under a 50% chance at achieving your desired level of power" )
}
if(power >= 1) power <- power/100
if(power<0 | power>1) stop("There is a problem with 'power' (i.e., desired statistical power), please specify as a value between 0 and 1 (the default is .80).")
if(missing(N)) stop("You need to specify a sample size (i.e., the number of pairs) used in the original study.")
if(N <= 1) stop("Your total sample size is too small")
if(p < 1) stop("Your number of predictors is too small")
if(N-p-1 < 1) stop("The combination of your sample size and number of predictors leads to 0 or negative degrees of freedom")
df.numerator <- 1
df.denominator <- N-p-1
NCP <- seq(from=0, to=100, by=step)
value.critical <- qf(1-alpha.prior, df1=df.numerator, df2=df.denominator)
if(t.observed^2 <= value.critical) stop("Your observed t statistic is nonsignificant based on your specfied alpha of the prior study. Please increase 'alpha.prior' so 't.observed' exceeds the critical value")
power.values <- 1 - pf(value.critical, df1=df.numerator, df2=df.denominator, ncp = NCP)
area.above.F <- 1 - pf(t.observed^2, df1=df.numerator, df2=df.denominator, ncp = NCP)
area.between <- power.values - area.above.F
TM <- area.between/power.values
TM.Percentile <- min(NCP[which(abs(TM-assurance)==min(abs(TM-assurance)))])
if(TM.Percentile==0) stop("The corrected noncentrality parameter is zero. Please either choose a lower value of assurance and/or a higher value of alpha for the prior study (e.g. accounting for less publication bias)")
if (TM.Percentile > 0)
{
Nrep <- 2+p+1
denom.df <- Nrep-p-1
diff <- -1
while (diff < 0 )
{
critical.F <- qf(1-alpha.planned, df1 = df.numerator, df2 = denom.df)
powers <- 1 - pf(critical.F, df1 = df.numerator, df2 = denom.df, ncp = (Nrep/N)*TM.Percentile)
diff <- powers - power
Nrep <- Nrep + 1
denom.df = Nrep - p - 1
}
repN <- Nrep - 1
}
return(list(repN, TM.Percentile))
} |
ManageVariables <- function(cols, vars, query, changelog, parent=NULL) {
SaveChanges <- function(type) {
SaveNb()
if (!identical(cols, old.cols)) {
rtn <<- list(cols=cols, vars=vars, query=query, changelog=changelog)
old.cols <<- cols
}
if (type == "ok") tclvalue(tt.done.var) <- 1
}
SetVarId <- function(idx=NULL) {
if (is.null(idx)) idx <- as.integer(tkcurselection(f1.lst)) + 1
if (length(idx) == 0) return()
nam <- tclvalue(name.var)
cols[[idx]]$name <<- nam
if (nam == "") nam <- "Unknown"
new.id <- nam
old.id <- cols[[idx]]$id
old.ids <- vapply(cols, function(i) i$id, "")
i <- 1L
hold.new.id <- new.id
while (new.id %in% old.ids[-idx]) {
new.id <- paste0(hold.new.id, " (", i, ")")
i <- i + 1L
}
cols[[idx]]$id <<- new.id
tclvalue(list.var) <- tcl("lreplace", tclvalue(list.var), idx - 1, idx - 1, new.id)
if (!is.null(old.id)) {
old.fun <- cols[[idx]]$fun
str.1 <- paste0("\"", old.id, "\"")
str.2 <- paste0("\"", new.id, "\"")
funs <- sapply(cols, function(i) gsub(str.1, str.2, i$fun, fixed=TRUE))
sapply(seq_along(cols), function(i) cols[[i]]$fun <<- funs[[i]])
new.fun <- cols[[idx]]$fun
if (!identical(old.fun, new.fun)) {
tkconfigure(f2.txt.4.2, state="normal")
tcl(f2.txt.4.2, "delete", "1.0", "end")
tkinsert(f2.txt.4.2, "end", new.fun)
tkconfigure(f2.txt.4.2, state="disabled")
}
if (!is.null(query))
query <<- gsub(str.1, str.2, query, fixed=TRUE)
if (!is.null(changelog))
changelog[changelog[, "variable"] %in% old.id, "variable"] <<- new.id
}
}
SaveNb <- function() {
idx <- as.integer(tkcurselection(f1.lst)) + 1L
if (length(idx) == 0) return()
old.fmt <- cols[[idx]]$format
new.fmt <- as.character(tclvalue(fmt.var))
cols[[idx]]$format <<- new.fmt
old.fun <- cols[[idx]]$fun
new.fun <- as.character(tclvalue(tkget(f2.txt.4.2, "1.0", "end-1c")))
cols[[idx]]$fun <<- new.fun
if (!identical(old.fun, new.fun))
cols[[idx]]$summary <- summary(EvalFunction(new.fun, cols))
SetVarId(idx)
}
UpdateNb <- function() {
idx <- as.integer(tkcurselection(f1.lst)) + 1L
if (length(idx) == 0) return()
saved.name <- cols[[idx]]$name
if (is.null(saved.name)) saved.name <- ""
tclvalue(name.var) <- saved.name
tkconfigure(f2.ent.3.2, state="normal")
saved.class <- cols[[idx]]$class
tclvalue(class.var) <- paste(saved.class)
tkconfigure(f2.ent.3.2, state="readonly")
saved.fmt <- cols[[idx]]$format
tkconfigure(f2.ent.2.2, state="normal")
tclvalue(fmt.var) <- saved.fmt
tkconfigure(f2.ent.2.2, state="readonly")
tkconfigure(f2.txt.4.2, state="normal")
tcl(f2.txt.4.2, "delete", "1.0", "end")
tkinsert(f2.txt.4.2, "end", cols[[idx]]$fun)
tkconfigure(f2.txt.4.2, state="disabled")
s <- "disabled"
if (is.na(cols[[idx]]$index)) s <- "normal"
tkconfigure(f2.but.4.3, state=s)
tkconfigure(f3.txt, state="normal")
tcl(f3.txt, "delete", "1.0", "end")
if (!is.null(cols[[idx]]$summary)) {
txt <- paste(c("", utils::capture.output(cols[[idx]]$summary)), collapse="\n")
tkinsert(f3.txt, "end", txt)
}
tkconfigure(f3.txt, state="disabled")
}
ChangeTab <- function() {
idx <- as.integer(tkcurselection(f1.lst)) + 1L
if (length(idx) == 0) return()
SaveNb()
UpdateNb()
tabid <- tclvalue(tcl(nb, "select"))
if (tabid == f2$ID) {
tkfocus(f2)
} else if (tabid == f3$ID) {
tkfocus(f3)
}
}
DeleteVar <- function() {
idx <- as.integer(tkcurselection(f1.lst)) + 1L
if (length(idx) == 0) return()
var.str <- paste0("\"", cols[[idx]]$id, "\"")
if (!is.null(query) && grepl(var.str, query, fixed=TRUE))
query <<- NULL
if (!is.null(changelog) && grepl(var.str, changelog, fixed=TRUE))
changelog <<- NULL
funs.with.var <- grep(var.str, sapply(cols, function(i) i$fun), fixed=TRUE)
dependent.vars <- funs.with.var[!funs.with.var %in% idx]
if (length(dependent.vars) > 0) {
ids <- vapply(cols, function(i) i$id, "")[dependent.vars]
msg <- paste0("Variables dependent on variable \"", cols[[idx]]$id,
"\" include:\n\n ", paste(ids, collapse=", "),
"\n\nThese variables must first be removed before this ",
"operation can be completed.")
tkmessageBox(icon="error", message=msg, title="Deletion Prevented", type="ok", parent=tt)
return()
}
if (!is.na(cols[[idx]]$index)) {
msg <- paste0("Variable \"", cols[[idx]]$id, "\" corresponds with imported data.\n\n",
"Are you sure you want to remove it?")
ans <- tkmessageBox(icon="question", message=msg, title="Question", type="yesno", parent=tt)
if (as.character(ans) == "no") return()
}
tclvalue(list.var) <- tcl("lreplace", tclvalue(list.var), idx - 1L, idx - 1L)
cols <<- cols[-idx]
vars <<- vars[!vars %in% idx]
if (length(cols) == 0) cols <<- NULL
if (length(vars) == 0) vars <<- NULL
for (i in seq_along(vars)) {
if (vars[[i]] > idx) vars[[i]][1] <<- vars[[i]] - 1
}
tkselection.clear(f1.lst, 0, "end")
n <- length(cols)
if (n > 0) {
if (idx > n)
tkselection.set(f1.lst, idx - 2)
else
tkselection.set(f1.lst, idx - 1)
UpdateNb()
} else {
tclvalue(name.var) <- ""
tclvalue(class.var) <- ""
tclvalue(fmt.var) <- ""
tkconfigure(f2.txt.4.2, state="normal")
tcl(f2.txt.4.2, "delete", "1.0", "end")
tkconfigure(f2.txt.4.2, state="disabled")
tkconfigure(f3.txt, state="normal")
tcl(f3.txt, "delete", "1.0", "end")
tkconfigure(f3.txt, state="disabled")
}
}
SaveNewVar <- function() {
SaveNb()
new.name <- "New Variable"
idx <- length(cols) + 1L
cols[[idx]] <- list(id="", class="")
m <- if (length(cols) > 1) length(EvalFunction(cols[[1]]$fun, cols)) else NULL
f <- EditFunction(cols, index=idx, value.length=m, win.title="New Variable", parent=tt)
if (is.null(f$fun) || f$fun == "") return()
cols[[idx]] <<- list(id="", name="New Variable", format="", class=f$class,
index=NA, fun=f$fun, sample=f$sample, summary=f$summary)
tcl("lappend", list.var, new.name)
tkselection.clear(f1.lst, 0, "end")
tkselection.set(f1.lst, idx - 1L, idx - 1L)
tkyview(f1.lst, idx - 1L)
UpdateNb()
SetVarId(idx)
}
CallEditFunction <- function() {
SaveNb()
idx <- as.integer(tkcurselection(f1.lst)) + 1L
if (length(idx) == 0) return()
m <- if (length(cols) > 1) length(EvalFunction(cols[[1]]$fun, cols)) else NULL
f <- EditFunction(cols, index=idx, value.length=m, parent=tt)
if (is.null(f$fun)) return()
if (f$fun == "") {
msg <- paste0("Nothing has been defined for this function; therefore,\n",
"the variable '", cols[[idx]]$name, "' will be removed.")
ans <- tkmessageBox(icon="question", message=msg, title="Warning", type="okcancel", parent=tt)
if (as.character(ans) == "ok") DeleteVar()
return()
}
if (!identical(f$class, cols[[idx]]$class)) cols[[idx]]$format <<- ""
cols[[idx]]$fun <<- f$fun
cols[[idx]]$class <<- f$class
cols[[idx]]$summary <<- f$summary
cols[[idx]]$sample <<- f$sample
UpdateNb()
}
CallFormat <- function() {
idx <- as.integer(tkcurselection(f1.lst)) + 1L
if (length(idx) == 0) return()
sample.value <- cols[[idx]]$sample
old.fmt <- as.character(tclvalue(fmt.var))
if (inherits(sample.value, c("POSIXt", "Date"))) {
new.fmt <- FormatDateTime(sample=sample.value, fmt=old.fmt, parent=tt)
} else {
if (is.null(sample.value)) sample.value <- NA
new.fmt <- Format(sample=sample.value, fmt=old.fmt, parent=tt)
}
if (is.null(new.fmt)) new.fmt <- ""
tclvalue(fmt.var) <- new.fmt
}
Arrange <- function(type) {
idx <- as.integer(tkcurselection(f1.lst)) + 1L
if (length(idx) == 0) return()
n <- length(cols)
idxs <- seq_len(n)
if (type == "back") {
if (idx == 1) return()
new.idxs <- c(idx, idxs[-idx])
new.idx <- 1
} else if (type == "front") {
if (idx == n) return()
new.idxs <- c(idxs[-idx], idx)
new.idx <- n
} else if (type == "backward") {
if (idx == 1) return()
new.idxs <- seq_len(n)
new.idxs[c(idx - 1L, idx)] <- c(idx, idx - 1L)
new.idx <- idx - 1L
} else if (type == "forward") {
if (idx == n) return()
new.idxs <- seq_len(n)
new.idxs[c(idx, idx + 1L)] <- c(idx + 1L, idx)
new.idx <- idx + 1L
}
cols <<- cols[new.idxs]
for (i in seq_along(vars)) {
vars[[i]][1] <<- idxs[new.idxs %in% vars[[i]][1]]
}
ids <- vapply(cols, function(i) i$id, "")
for (i in seq_len(n))
tclvalue(list.var) <- tcl("lreplace", tclvalue(list.var), i - 1, i - 1, ids[i])
tkselection.clear(f1.lst, 0, "end")
tkselection.set(f1.lst, new.idx - 1L)
tkyview(f1.lst, new.idx - 1L)
}
rtn <- NULL
old.cols <- cols
ids <- vapply(cols, function(i) i$id, "")
w <- 300
h <- 50
list.var <- tclVar()
for (i in seq_along(ids)) tcl("lappend", list.var, ids[i])
name.var <- tclVar()
fmt.var <- tclVar()
class.var <- tclVar()
tt.done.var <- tclVar(0)
tclServiceMode(FALSE)
tt <- tktoplevel()
if (!is.null(parent)) {
tkwm.transient(tt, parent)
geo <- unlist(strsplit(as.character(tkwm.geometry(parent)), "\\+"))
geo <- as.integer(geo[2:3]) + 25
tkwm.geometry(tt, sprintf("+%s+%s", geo[1], geo[2]))
}
tktitle(tt) <- "Variable Manager"
top.menu <- tkmenu(tt, tearoff=0)
menu.edit <- tkmenu(tt, tearoff=0, relief="flat")
tkadd(top.menu, "cascade", label="Edit", menu=menu.edit, underline=0)
tkadd(menu.edit, "command", label="New\u2026", accelerator="Ctrl+N",
command=SaveNewVar)
tkadd(menu.edit, "command", label="Delete", command=DeleteVar)
menu.arrange <- tkmenu(tt, tearoff=0)
tkadd(top.menu, "cascade", label="Arrange", menu=menu.arrange, underline=0)
tkadd(menu.arrange, "command", label="Send to top", accelerator="Ctrl+Shift+[",
command=function() Arrange("back"))
tkadd(menu.arrange, "command", label="Send upward", accelerator="Ctrl+[",
command=function() Arrange("backward"))
tkadd(menu.arrange, "command", label="Bring downward", accelerator="Ctrl+]",
command=function() Arrange("forward"))
tkadd(menu.arrange, "command", label="Bring to bottom", accelerator="Ctrl+Shift+]",
command=function() Arrange("front"))
tkconfigure(tt, menu=top.menu)
f0 <- ttkframe(tt, relief="flat")
f0.but.1 <- ttkbutton(f0, width=2, image=GetBitmapImage("top"),
command=function() Arrange("back"))
f0.but.2 <- ttkbutton(f0, width=2, image=GetBitmapImage("up"),
command=function() Arrange("backward"))
f0.but.3 <- ttkbutton(f0, width=2, image=GetBitmapImage("down"),
command=function() Arrange("forward"))
f0.but.4 <- ttkbutton(f0, width=2, image=GetBitmapImage("bottom"),
command=function() Arrange("front"))
f0.but.5 <- ttkbutton(f0, width=2, image=GetBitmapImage("plus"),
command=SaveNewVar)
f0.but.6 <- ttkbutton(f0, width=2, image=GetBitmapImage("delete"),
command=DeleteVar)
f0.but.8 <- ttkbutton(f0, width=12, text="OK",
command=function() SaveChanges("ok"))
f0.but.9 <- ttkbutton(f0, width=12, text="Cancel",
command=function() tclvalue(tt.done.var) <- 1)
f0.but.10 <- ttkbutton(f0, width=12, text="Apply",
command=function() SaveChanges("apply"))
f0.but.11 <- ttkbutton(f0, width=12, text="Help",
command=function() {
print(utils::help("ManageVariables", package="RSurvey", verbose=FALSE))
})
f0.grp.12 <- ttksizegrip(f0)
tkgrid(f0.but.1, f0.but.2, f0.but.3, f0.but.4, f0.but.5, f0.but.6, "x",
f0.but.8, f0.but.9, f0.but.10, f0.but.11, f0.grp.12)
tkgrid.columnconfigure(f0, 6, weight=1)
tkgrid.configure(f0.but.1, f0.but.2, f0.but.3, f0.but.4, f0.but.5, f0.but.6,
sticky="n", padx=c(0, 2), pady=c(0, 0))
tkgrid.configure(f0.but.1, padx=c(10, 2))
tkgrid.configure(f0.but.6, padx=c(26, 0))
tkgrid.configure(f0.but.8, f0.but.9, f0.but.10, f0.but.11, padx=c(0, 4), pady=c(15, 10))
tkgrid.configure(f0.but.11, columnspan=2, padx=c(0, 10))
tkgrid.configure(f0.grp.12, sticky="se")
tkraise(f0.but.11, f0.grp.12)
tkpack(f0, fill="x", side="bottom", anchor="e")
pw <- ttkpanedwindow(tt, orient="horizontal")
f1 <- tkframe(pw, relief="flat")
f1.lst <- tklistbox(f1, selectmode="browse", activestyle="none",
relief="flat", borderwidth=5, width=25,
exportselection=FALSE, listvariable=list.var, highlightthickness=0)
f1.ysc <- ttkscrollbar(f1, orient="vertical")
tkconfigure(f1.lst, background="white", yscrollcommand=paste(.Tk.ID(f1.ysc), "set"))
tkconfigure(f1.ysc, command=paste(.Tk.ID(f1.lst), "yview"))
tkpack(f1.lst, side="left", fill="both", expand=TRUE, pady=c(2, 2))
tkpack(f1.ysc, side="right", fill="y", anchor="w", padx=c(0, 2), pady=c(2, 2))
tkselection.set(f1.lst, 0)
tkadd(pw, f1, weight=0)
nb <- ttknotebook(pw)
f2 <- ttkframe(nb, relief="flat", padding=10, borderwidth=2)
tkadd(nb, f2, text=" Variable ")
f2.lab.1.1 <- ttklabel(f2, text="Name")
f2.lab.2.1 <- ttklabel(f2, text="Format")
f2.lab.3.1 <- ttklabel(f2, text="Class")
f2.lab.4.1 <- ttklabel(f2, text="Function")
f2.ent.1.2 <- ttkentry(f2, textvariable=name.var)
f2.ent.2.2 <- ttkentry(f2, textvariable=fmt.var)
f2.ent.3.2 <- ttkentry(f2, textvariable=class.var)
f2.txt.4.2 <- tktext(f2, padx=2, pady=2, width=45, height=6, undo=1, wrap="none",
relief="flat", foreground="black", background="
borderwidth=1, font="TkFixedFont", state="disabled")
f2.but.2.3 <- ttkbutton(f2, text="Edit", width=5, command=CallFormat)
f2.but.4.3 <- ttkbutton(f2, text="Edit", width=5, command=CallEditFunction)
tkgrid(f2.lab.1.1, f2.ent.1.2, "x")
tkgrid(f2.lab.2.1, f2.ent.2.2, f2.but.2.3)
tkgrid(f2.lab.3.1, f2.ent.3.2, "x")
tkgrid(f2.lab.4.1, f2.txt.4.2, f2.but.4.3)
tkgrid.configure(f2.lab.1.1, f2.lab.2.1, f2.lab.3.1, sticky="w")
tkgrid.configure(f2.lab.4.1, sticky="ne")
tkgrid.configure(f2.ent.1.2, f2.ent.2.2, f2.ent.3.2,
sticky="we", padx=2, pady=2)
tkgrid.configure(f2.txt.4.2, padx=3, pady=3, sticky="nswe")
tkgrid.configure(f2.but.2.3, sticky="w")
tkgrid.configure(f2.lab.4.1, pady=c(4, 0))
tkgrid.configure(f2.but.4.3, sticky="nw", pady=c(1, 0))
tkgrid.columnconfigure(f2, 1, weight=1, minsize=25)
tkgrid.rowconfigure(f2, 3, weight=1, minsize=25)
f3 <- ttkframe(nb, relief="flat", padding=0, borderwidth=0)
tkadd(nb, f3, text=" Summary ")
f3.ysc <- ttkscrollbar(f3, orient="vertical")
f3.txt <- tktext(f3, bg="white", padx=2, pady=2, width=60, height=8,
undo=1, wrap="none", foreground="black", relief="flat",
font="TkFixedFont", yscrollcommand=function(...) tkset(f3.ysc, ...))
tkconfigure(f3.ysc, command=paste(.Tk.ID(f3.txt), "yview"))
tkgrid(f3.txt, f3.ysc)
tkgrid.configure(f3.txt, sticky="news")
tkgrid.configure(f3.ysc, sticky="ns")
tkgrid.columnconfigure(f3, 0, weight=1, minsize=25)
tkgrid.rowconfigure(f3, 0, weight=1, minsize=25)
tkadd(pw, nb, weight=1)
tkpack(pw, fill="both", expand="yes", padx=10, pady=c(10, 2))
UpdateNb()
tclServiceMode(TRUE)
tkbind(tt, "<Control-KeyPress-n>", SaveNewVar)
tkbind(tt, "<Control-KeyPress-bracketright>", function() Arrange("forward"))
tkbind(tt, "<Control-Shift-KeyPress-braceright>", function() Arrange("front"))
tkbind(tt, "<Control-KeyPress-bracketleft>", function() Arrange("backward"))
tkbind(tt, "<Control-Shift-KeyPress-braceleft>", function() Arrange("back"))
tkbind(tt, "<Destroy>", function() tclvalue(tt.done.var) <- 1)
tkbind(nb, "<<NotebookTabChanged>>", ChangeTab)
tkbind(f1.lst, "<ButtonPress-1>", SaveNb)
tkbind(f1.lst, "<Up>", SaveNb)
tkbind(f1.lst, "<Down>", SaveNb)
tkbind(f1.lst, "<<ListboxSelect>>", UpdateNb)
tkbind(f2.ent.1.2, "<Return>", function() SetVarId())
tkfocus(tt)
tkgrab(tt)
tkwait.variable(tt.done.var)
tclServiceMode(FALSE)
tkgrab.release(tt)
tkdestroy(tt)
tclServiceMode(TRUE)
return(rtn)
} |
lweights_gaussian <-
function(data,a = ncol(data),mu = numeric(p),au = 1,T = diag(ncol(data),ncol(data)),nbcores=1){
p <- ncol(data)
n <- nrow(data)
datamean <- apply(data,2,mean)
R <- T + (n-1)*cov(data) + (au*n/(au+n))*matrix((mu - datamean),p,1)%*%matrix((mu - datamean),1,p)
uptri <- upper.tri(matrix(0,p,p))
if (requireNamespace("parallel",quietly = TRUE) &&
(nbcores > 1)){
lW <- matrix(parallel::mcmapply(function(y, i, j) if (y){
0.5*(a-p+2)*ldet(T[c(i,j),c(i,j)]) - 0.5*(a-p+n+2)*ldet(R[c(i,j),c(i,j)])
} else 0,
uptri,
row(uptri),
col(uptri),
mc.cores = nbcores),
nrow = nrow(uptri))
} else {
lW <- matrix(mapply(function(y, i, j) if (y){
0.5*(a-p+2)*ldet(T[c(i,j),c(i,j)]) - 0.5*(a-p+n+2)*ldet(R[c(i,j),c(i,j)])
} else 0,
uptri,
row(uptri),
col(uptri)),
nrow = nrow(uptri))
}
diaglW <- sapply(1:p,function(i) 0.5*(a-p+1)*log(abs(T[i,i])) -0.5*(a-p+n+1)*log(abs(R[i,i])))
lW <- lW + t(lW)
lW <- sapply(1:p,function(x) lW[,x] - diaglW[x])
lW <- t(sapply(1:p,function(x) lW[x,] - diaglW[x]))
diag(lW) <- 0
return(lW)
} |
library(mclogit)
options(error=recover)
mclogitP <- function(eta,s){
expeta <- exp(eta)
sum.expeta <- rowsum(expeta,s)
expeta/sum.expeta[s]
}
N <- 10000
n <- 100
test.data <- data.frame(
x = rnorm(N),
f = gl(4,N/4),
set = gl(N/5,5,N),
altern0 = gl(5,1,N),
nat = gl(15,N/15,N),
occ = gl(10,1,N)
)
test.data <- within(test.data,{
altern <- as.integer(interaction(altern0,nat))
altern.occ <- as.integer(interaction(altern,occ))
b1 <- rnorm(n=length(altern))
b2 <- rnorm(n=length(altern.occ))
ff <- 1+.2*(as.numeric(f)-1)
eta <- x*ff + b1[altern] + b2[altern.occ]
p <- mclogitP(eta,set)
n <- unlist(tapply(p,set,function(p)rmultinom(n=1,size=n,prob=p)))
rm(b1,b2)
})
test.mc0 <- mclogit(cbind(n,set)~x:f,data=test.data
)
test.mc <- mclogit(cbind(n,set)~x:f,data=test.data,
random=~1|altern/occ,
maxit=100
)
coef(test.mc)
vcov(test.mc)
print(test.mc)
summary(test.mc) |
ab_scenario = function(
od,
zones,
zones_d = NULL,
origin_buildings = NULL,
destination_buildings = NULL,
pop_var = 3,
time_fun = ab_time_normal,
output = "sf",
modes = c("Walk", "Bike", "Transit", "Drive"),
...
) {
if(methods::is(od, class2 = "sf")) {
od = sf::st_drop_geometry(od)
}
if(!any(modes %in% names(od))) {
message("Column names, at least on of: ", paste0(modes, collapse = ", "))
message("Column names in od object: ", paste0(names(od), collapse = ", "))
stop("Column names in od data do not match modes. Try renaming od columns")
}
modes_in_od = modes[modes %in% names(od)]
od = od[c(names(od)[1:2], modes_in_od)]
od_long = tidyr::pivot_longer(od, cols = modes_in_od, names_to = "mode")
repeat_indices = rep(seq(nrow(od_long)), od_long$value)
od_longer = od_long[repeat_indices, 1:3]
if(!is.null(origin_buildings)) {
suppressMessages({
origin_buildings = sf::st_centroid(origin_buildings)
})
}
if(!is.null(destination_buildings)) {
suppressMessages({
destination_buildings = sf::st_centroid(destination_buildings)
})
}
res = od::od_jitter(
od = od_longer,
z = zones,
zd = zones_d,
subpoints_o = origin_buildings,
subpoints_d = destination_buildings
)
if(output == "sf") {
return(res)
} else if(output == "json_list") {
return(ab_json(res, time_fun = time_fun, ...))
} else {
ab_save(ab_json(res, time_fun = time_fun, ...), f = output)
}
}
ab_json = function(
desire_lines,
mode_column = NULL,
time_fun = ab_time_normal,
scenario_name = "test",
default_purpose = "Work",
...
) {
if(scenario_name == "test") {
message("Default scenario name of 'test' used.")
}
if(is.null(mode_column)) {
mode_column = "mode"
}
n = nrow(desire_lines)
if(is.null(desire_lines$departure)) {
desire_lines$departure = time_fun(n = n, ...)
}
if(max(desire_lines$departure) > 7 * 24 * 60 * 60) {
stop(
"Values greater than 604800 found in the input for departure timesT Try:\n",
"desire_lines$departure = desire_lines$departure / 10000 \n",
"if the original input was in 10,000th of a second (used internally by A/B Street)"
)
}
desire_lines$departure = round(desire_lines$departure * 10000)
start_points = lwgeom::st_startpoint(desire_lines) %>% sf::st_coordinates()
end_points = lwgeom::st_endpoint(desire_lines) %>% sf::st_coordinates()
colnames(start_points) = c("ox", "oy")
colnames(end_points) = c("dx", "dy")
ddf = cbind(
sf::st_drop_geometry(desire_lines),
start_points,
end_points
)
if(is.null(desire_lines$person)) {
ddf$person = seq(nrow(desire_lines))
}
if(is.null(desire_lines$purpose)) {
ddf$purpose = default_purpose
}
people = lapply(unique(ddf$person), function(p) {
ddfp = ddf[ddf$person == p, ]
list(
trips = lapply(seq(nrow(ddfp)), function(i) {
list(
departure = ddfp$departure[i],
origin = list(Position = list(
longitude = ddfp$ox[i],
latitude = ddfp$oy[i]
)),
destination = list(Position = list(
longitude = ddfp$dx[i],
latitude = ddfp$dy[i]
)),
mode = ddfp$mode[i],
purpose = ddfp$purpose[i]
)
} )
)
} )
if(is.null(scenario_name)) {
scenario_name = gsub(pattern = "mode_", replacement = "", x = mode_column)
}
json_r = list(scenario_name = scenario_name, people = people)
json_r
}
ab_sf = function(
json
) {
if(is.character(json)) {
json = jsonlite::read_json(json, simplifyVector = TRUE)
}
trip_data = dplyr::bind_rows(json$people$trips, .id = "person")
linestrings = od::odc_to_sfc(cbind(
trip_data$origin$Position$longitude,
trip_data$origin$Position$latitude,
trip_data$destination$Position$longitude,
trip_data$destination$Position$latitude
))
sf_data = subset(trip_data, select = -c(origin, destination))
sf_linestring = sf::st_sf(
sf_data,
geometry = linestrings,
crs = 4326
)
sf_linestring$departure = sf_linestring$departure / 10000
sf_linestring
}
ab_save = function(x, f) {
jsonlite::write_json(x, f, pretty = TRUE, auto_unbox = TRUE)
}
ab_time_normal = function(hr = 8.5, sd = 0.5, n = 1) {
round(stats::rnorm(n = n, mean = hr * 60^2, sd = sd * 60^2))
}
match_scenario_mode = function(cnames, scenario = "base", mode = "Walk") {
cnames_match_scenario = grepl(pattern = scenario, x = cnames, ignore.case = TRUE)
cnames_match_mode = grepl(pattern = mode, x = cnames, ignore.case = TRUE)
cname_matching = cnames[cnames_match_scenario & cnames_match_mode]
cname_matching
}
globalVariables(names = c("origin", "destination")) |
context("slpMBMF")
load('../data/test_slpMBMF.RData')
st <- list(alpha = .3, lambda = .5, w = 0.5, beta = 2, p = .2, tprob
= rbind(c(.7,.3),c(.3,.7)), q1.mf = c(.5, .5),
q1.mb = c(.5,.5), q2 = c(.5, .5))
out <- slpMBMF(st,tr1)
test_that("slpMBMF reproduces an arbitrarily chosen short simulation.", {
expect_equal(out$out[,1], cor.out)
}) |
has.path = function(from, to, nodes, amat, exclude.direct = FALSE,
underlying.graph = FALSE, debug = FALSE) {
.Call(call_has_pdag_path,
from = which(nodes == from),
to = which(nodes == to),
amat = amat,
nrows = nrow(amat),
nodes = nodes,
underlying = underlying.graph,
exclude.direct = exclude.direct,
debug = debug)
}
mb2arcs = function(mb, nodes) {
empty.mb = sapply(mb, function(x) {(length(x$nbr) == 0) || is.null(x$nbr) || identical(x$nbr, "")})
result = do.call(rbind, lapply(nodes[!empty.mb],
function(x) { cbind(from = x, to = mb[[x]][['nbr']]) }))
if (is.null(result))
matrix(character(0), ncol = 2, dimnames = list(c(), c("from", "to")))
else
result
}
root.leaf.nodes = function(x, leaf = FALSE) {
.Call(call_root_nodes,
bn = x,
check = leaf)
}
mb.fitted = function(x, node) {
.Call(call_fitted_mb,
bn = x,
target = node)
}
dag2ug.backend = function(x, moral = FALSE, debug = FALSE) {
nodes = names(x$nodes)
arcs = .Call(call_dag2ug,
bn = x,
moral = moral,
debug = debug)
x$arcs = arcs
x$nodes = cache.structure(nodes = nodes, arcs = arcs)
x$learning$undirected = TRUE
return(x)
}
pdag2dag.backend = function(arcs, ordering) {
arcs = .Call(call_pdag2dag,
arcs = arcs,
nodes = ordering)
if (!is.acyclic(arcs = arcs, nodes = ordering))
stop("this complete orientation of the graph is not acyclic.")
return(arcs)
}
mutilated.backend.bn = function(x, evidence) {
if (identical(evidence, TRUE))
return(x)
fixed = names(evidence)
nodes = names(x$nodes)
x$arcs = x$arcs[x$arcs[, "to"] %!in% fixed, ]
amat = arcs2amat(x$arcs, nodes)
for (node in fixed)
x$nodes[[node]] = cache.partial.structure(nodes, target = node,
amat = amat, debug = FALSE)
return(x)
}
mutilated.backend.fitted = function(x, evidence) {
if (identical(evidence, TRUE))
return(x)
fixed = names(evidence)
nodes = names(x)
for (node in fixed) {
cur = x[[node]]
fix = evidence[[node]]
if (is(cur, "bn.fit.gnode")) {
cur$coefficients = c("(Intercept)" = as.numeric(fix))
cur$sd = 0
if (!is.null(cur$fitted.values))
cur$residuals = rep(fix, length(cur$fitted.values))
if (!is.null(cur$residuals))
cur$residuals = rep(0, length(cur$residuals))
}
else if (is(cur, c("bn.fit.dnode", "bn.fit.onode"))) {
levels = dimnames(cur$prob)[[1]]
values = (levels %in% fix) + 0
cur$prob = as.table(structure(values / sum(values), names = levels))
}
parents = cur$parents
cur$parents = character(0)
for (p in parents) {
temp = x[[p]]
temp$children = temp$children[temp$children != node]
x[p] = list(temp)
}
x[node] = list(cur)
}
return(x)
}
perturb.backend = function(network, iter, nodes, amat, whitelist,
maxp = Inf, blacklist, debug = FALSE) {
new = network
updates = character(0)
for (i in seq_len(3 * iter)) {
nparents = colSums(amat)
to.be.added = arcs.to.be.added(amat, nodes, blacklist = blacklist,
nparents = nparents, maxp = maxp)
to.be.dropped = arcs.to.be.dropped(new$arcs, whitelist)
to.be.reversed = arcs.to.be.reversed(new$arcs, blacklist, nparents, maxp)
if (iter == 0) break
op = sample(c("set", "drop", "reverse"), 1, replace = TRUE)
if ((op == "set") && (nrow(to.be.added) > 0)) {
a = sample(seq_len(nrow(to.be.added)), 1, replace = TRUE)
arc = to.be.added[a, ]
if (!has.path(arc[2], arc[1], nodes, amat)) {
new$arcs = set.arc.direction(from = arc[1], to = arc[2],
arcs = new$arcs, debug = debug)
updates = c(updates, arc[2])
}
}
else if ((op == "drop") && (nrow(to.be.dropped) > 0)) {
a = sample(seq_len(nrow(to.be.dropped)), 1, replace = TRUE)
arc = to.be.dropped[a, ]
new$arcs = drop.arc.backend(arcs = new$arcs, dropped = arc,
debug = debug)
updates = c(updates, arc[2])
}
else if ((op == "reverse") && (nrow(to.be.reversed) > 0)) {
a = sample(seq_len(nrow(to.be.reversed)), 1, replace = TRUE)
arc = to.be.reversed[a, ]
if (!has.path(arc[1], arc[2], nodes, amat, exclude.direct = TRUE)) {
new$arcs = reverse.arc.backend(from = arc[1], to = arc[2],
arcs = new$arcs, debug = debug)
updates = c(updates, arc)
}
}
amat = arcs2amat(arcs = new$arcs, nodes = nodes)
if (!identical(new$arcs, network$arcs))
iter = iter - 1
}
updates = unique(updates)
new$updates = array(rep(0, length(updates)), dimnames = list(updates))
return(new)
}
structural.hamming.distance = function(learned, true, wlbl = FALSE, debug = FALSE) {
.Call(call_shd,
learned = cpdag.backend(learned, wlbl = wlbl),
golden = cpdag.backend(true, wlbl = wlbl),
debug = debug)
}
hamming.distance = function(learned, true, debug = FALSE) {
.Call(call_shd,
learned = dag2ug.backend(learned),
golden = dag2ug.backend(true),
debug = debug)
}
colliders.backend = function(x, return.arcs = FALSE, including.shielded = TRUE,
including.unshielded = TRUE, debug = FALSE) {
nodes = names(x$nodes)
coll = .Call(call_colliders,
arcs = x$arcs,
nodes = nodes,
return.arcs = return.arcs,
shielded = including.shielded,
unshielded = including.unshielded,
debug = debug)
if (return.arcs) {
coll = arcs.rbind(coll[, c("X", "Z")], coll[, c("Y", "Z")])
coll = unique.arcs(coll, nodes = nodes)
}
return(coll)
}
equal.backend.bn = function(target, current) {
.Call(call_all_equal_bn,
target = target,
current = current)
}
equal.backend.fit = function(target, current, tolerance) {
same.structure = all.equal(bn.net(target), bn.net(current))
if (!isTRUE(same.structure))
return(same.structure)
for (node in nodes(target)) {
tnode = target[[node]]
cnode = current[[node]]
target.type = class(tnode)
current.type = class(cnode)
if (target.type != current.type)
return(paste("Different distributions for node", node))
if (target.type %in% c("bn.fit.dnode", "bn.fit.onode")) {
tprob = tnode$prob
cprob = cnode$prob
tprob = check.dnode.vs.dnode(tprob, cnode)
if (!isTRUE(all.equal(tprob, cprob, tolerance = tolerance)))
return(paste("Different probabilities for node", node))
}
else if (target.type %in% c("bn.fit.gnode", "bn.fit.cgnode")) {
tparams = list(coef = tnode$coefficients, sd = tnode$sd,
dlevels = tnode$dlevels)
if (target.type == "bn.fit.gnode")
tparams = check.gnode.vs.gnode(tparams, cnode)
if (target.type == "bn.fit.cgnode")
tparams = check.cgnode.vs.cgnode(tparams, cnode)
if (!isTRUE(all.equal(tparams$coef, cnode$coefficients, tolerance = tolerance)))
return(paste("Different regression coefficients for node", node))
if (!isTRUE(all.equal(tparams$sd, cnode$sd, tolerance = tolerance)))
return(paste("Different standard errors for node", node))
}
}
return(TRUE)
}
tiers.backend = function(nodes, debug = FALSE) {
.Call(call_tiers,
nodes = nodes,
debug = debug)
}
subgraph.backend = function(x, nodes, preserve.learning = FALSE) {
spanned.arcs = function(x) all(!is.na(match(x, nodes)))
res = empty.graph.backend(nodes)
spanning = apply(x$arcs, 1, spanned.arcs)
res$arcs = x$arcs[spanning, , drop = FALSE]
res$nodes = cache.structure(nodes, arcs = res$arcs)
if (preserve.learning) {
res$learning = x$learning
for (el in c("whitelist", "blacklist", "illegal")) {
if (is.null(x$learning[[el]]))
next
spanning = apply(x$learning[[el]], 1, spanned.arcs)
res$learning[[el]] = x$learning[[el]][spanning, , drop = FALSE]
}
if (!is.null(x$learning$args$prior) && (x$learning$args$prior == "cs")) {
spanning = apply(x$learning$args$beta[, c("from", "to")], 1, spanned.arcs)
res$learning$args$beta = res$learning$args$beta[spanning, , drop = FALSE]
}
}
return(res)
}
dseparation = function(bn, x, y, z) {
if ((x %in% z) || (y %in% z))
return(TRUE)
upper.closure = topological.ordering(bn, start = c(x, y, z), reverse = TRUE)
ucgraph = subgraph.backend(bn, upper.closure)
mgraph = dag2ug.backend(ucgraph, moral = TRUE)
upper.closure = upper.closure[upper.closure %!in% z]
mgraph = subgraph.backend(mgraph, upper.closure)
connected = has.path(x, y, nodes = upper.closure,
amat = arcs2amat(mgraph$arcs, upper.closure))
return(!connected)
}
compare.backend = function(target.arcs, current.arcs, nodes, arcs = FALSE) {
which.dir = which.directed(target.arcs, nodes)
target.dir = target.arcs[which.dir, , drop = FALSE]
target.und = target.arcs[!which.dir, , drop = FALSE]
which.dir = which.directed(current.arcs, nodes)
current.dir = current.arcs[which.dir, , drop = FALSE]
current.und = current.arcs[!which.dir, , drop = FALSE]
which.tp.dir = which.listed(target.dir, current.dir)
which.tp.und = which.listed(target.und, current.und)
which.fn.dir = !which.listed(target.dir, current.dir)
which.fn.und = !which.listed(target.und, current.und)
which.fp.dir = !which.listed(current.dir, target.dir)
which.fp.und = !which.listed(current.und, target.und)
if (arcs) {
tp = arcs.rbind(target.dir[which.tp.dir, , drop = FALSE],
target.und[which.tp.und, , drop = FALSE])
fn = arcs.rbind(target.dir[which.fn.dir, , drop = FALSE],
target.und[which.fn.und, , drop = FALSE])
fp = arcs.rbind(current.dir[which.fp.dir, , drop = FALSE],
current.und[which.fp.und, , drop = FALSE])
}
else {
tp = length(which(which.tp.dir)) + length(which(which.tp.und))/2
fn = length(which(which.fn.dir)) + length(which(which.fn.und))/2
fp = length(which(which.fp.dir)) + length(which(which.fp.und))/2
}
return(list(tp = tp, fp = fp, fn = fn))
} |
"oaks" |
northcumbria <-
structure(c(346618.656524861, 340011.691269045, 338487.006979241,
332896.497916628, 332896.497916628, 333658.84006153, 330609.471481922,
328576.559095517, 326543.646709113, 325273.076467609, 315362.628583885,
305452.180700161, 306722.750941665, 301894.584023953, 297574.645202842,
295541.732816438, 294017.048526634, 293762.934478333, 306976.864989965,
306722.750941665, 312567.374052579, 320190.795501597, 319682.567404996,
321207.2516948, 320190.795501597, 321461.3657431, 324510.734322708,
327814.216950616, 328068.330998916, 325273.076467609, 325527.19051591,
328068.330998916, 334421.182206432, 339249.349124143, 340774.033413947,
342298.717703751, 347380.998669763, 365423.096099106, 373808.859693027,
375587.658031131, 381178.167093745, 380669.938997143, 383973.421625051,
388547.474494462, 389818.044735965, 389563.930687665, 386514.562108057,
386514.562108057, 379907.596852241, 381432.281142045, 381432.281142045,
377874.684465837, 380669.938997143, 379653.482803941, 373046.517548125,
369743.034920217, 366185.438244008, 363390.183712702, 362881.9556161,
364914.868002505, 364152.525857603, 362119.613471198, 363898.411809303,
363898.411809303, 369234.806823616, 368472.464678714, 365423.096099106,
361611.385374597, 357291.446553487, 357291.446553487, 356274.990360284,
583056.7468651, 575941.553512683, 573908.641126278, 574162.755174579,
573400.413029677, 571621.614691573, 566031.105628959, 565776.991580659,
563744.079194254, 564760.535387456, 563998.193242554, 549005.464392818,
541636.1569921, 537062.104122689, 529692.796721971, 516732.98025864,
515208.295968836, 512921.269534131, 492846.259718382, 488780.434945572,
475312.39038564, 477599.416820345, 481157.013496554, 482173.469689756,
493100.373766683, 495133.286153088, 501740.251408904, 502248.479505505,
502756.707602106, 505043.734036812, 507838.988568119, 508347.21666472,
511650.699292628, 508093.102616419, 507838.988568119, 510380.129051125,
509109.558809622, 497674.426636094, 497420.312587793, 495895.62829799,
498436.768780996, 504281.39189191, 506060.190230014, 506314.304278315,
507584.874519818, 515716.524065438, 515462.410017137, 518257.664548444,
526389.314094063, 529184.56862537, 530709.252915174, 533250.39339818,
542144.385088701, 545193.753668309, 550784.262730922, 549005.464392818,
546210.209861511, 551292.490827524, 554341.859407131, 559170.026324843,
563489.965145953, 563744.079194254, 566285.21967726, 569334.588256867,
572129.842788174, 577466.237802487, 576958.009705886, 581786.176623597,
586106.115444708, 587376.685686211, 588901.369976014), .Dim = c(71L,
2L), .Dimnames = list(NULL, c("x", "y"))) |
trasnlit_all <- function(df) {
df[] <- lapply(df, function(x) {
y <- stringi::stri_trans_general(x, "Latin")
stringi::stri_trans_general(y, "latin-ascii")
})
return(df)
}
translate_str <- function(y, df) {
for (i in seq(1, nrow(df))) {
y <- stringr::str_replace_all(y, df$from[i], df$to[i])
}
y
}
map_owners <- function(y) {
y <- stringr::str_replace_all(y, " ", "")
df <- data.frame(
from = c(
"DEMOSIAEPICHEIRISEELEKTRISMOU", "ETHNIKEMETEOROLOGIKEYPERESIA",
"ETHNIKOASTEROSKOPEIOATHENAS", "ETHNIKOIDRYMAAGROTIKESEREUNAS",
"MOUSEIOPHYSIKESISTORIASKRETES", "NOMARCHIAKEAUTODIOIKESE",
"POLYTECHNEIOKRETES", "YPOURGEIOAGROTIKESANAPTYXESKAITROPHIMON",
"YPOURGEIOPERIBALLONTOS,ENERGEIASKAIKLIMATIKESALLAGES"
),
to = c(
"public_power_corp", "natio_meteo_service", "natio_observ_athens",
"natio_argic_resear", "crete_natural_museum", "greek_prefectures",
"crete_eng_faculty", "min_agricult", "min_envir_energy"
),
stringsAsFactors = FALSE
)
translate_str(y, df)
}
map_variables <- function(y) {
y <- stringr::str_to_lower(y)
df <- data.frame(
from = c(
" -", "-", " ", "[()]", "agnosto", "anemos", "dieuthynse",
"parelthon", "tachyteta", "mese", "brochoptose", "diarkeia",
"exatmise",
"exatmisodiapnoe", "thermokrasia", "edaphous", "bathos",
"elachiste", "megiste", "piese", "semeiake", "chioni",
"ypsometro", "stathme", "plemmyra", "paroche", "broche",
"katastase", "ektimemene", "athroistiko", "stereo", "ygrasia",
"ygro", "apolyte", "schetike", "asbestio", "wetu",
"chionobrochometro", "xero", "ydrometrese", "thalasses",
"semeio_drosou", "oratoteta", "steria", "thalassa", "barometro",
"tase_ydratmon", "psychrometro", "isodynamo_ypsos", "agogimoteta",
"aktinobolia", "anthraka", "dioxeidio", "ypoloipo", "argilio",
"argilos", "arseniko", "pyritiou", "aera", "nephokalypse", "nephose",
"axiosemeiota", "nephe", "kairos", "diafora", "atmosfairiki", "stathera",
"parousa", "parelthousa", "kalymeno", "el.", "meg.", "skleroteta",
"eliophaneia", "eisroe_se_tamieuteres"
),
to = c(
"", "_", "_", "", "unknown", "wind", "direction", "past", "speed",
"average", "precipitation", "duration", "evaporation",
"evapotranspiration", "temperature", "ground", "depth",
"min", "max", "pressure", "point", "snow", "elevation",
"level", "flood", "flow", "precipitation", "condition", "estimation",
"cumulative", "sediment", "humidity", "wet", "absolute", "relative",
"calcium", "precipitation", "snow_rain_gauge", "dry", "flow_gauge", "sea",
"dew_point", "visibility", "land", "sea", "barometer",
"vapour_pressure", "psychrometer", "water_equivalent",
"conductance", "radiation", "carbon", "dioxide", "residual",
"aluminum", "clay", "arsenic", "silicon", "air", "cloud_cover", "clouds",
"remarkably", "clouds", "weather", "difference", "atmospheric",
"constant", "present", "past", "cover", "min", "max", "hardness",
"sunshine", "inflow_reservoir"
),
stringsAsFactors = FALSE
)
translate_str(y, df)
}
map_ts <- function(y) {
y <- stringr::str_to_lower(y)
df <- data.frame(
from = c("lepte", "emeresia", "meniaia", "etesia", "oriaia"),
to = c("_minutes", "daily", "monthly", "annual", "hourly"),
stringsAsFactors = FALSE
)
translate_str(y, df)
}
map_wd <- function(y) {
y <- stringr::str_replace_all(y, " ", "")
df <- data.frame(
from = c(
"DYTIKEPELOPONNESOS", "BOREIAPELOPONNESOS", "ANATOLIKEPELOPONNES",
"DYTIKESTEREAELLADA", "EPEIROS", "ATTIKE", "ANATOLIKESTEREAELL",
"THESSALIA", "DYTIKEMAKEDONIA", "KENTRIKEMAKEDONIA", "ANATOLIKEMAKEDONIA",
"THRAKE", "KRETE", "NESOIAIGAIOU"
),
to = c(
"GR01", "GR02", "GR03", "GR04", "GR05", "GR06", "GR07", "GR08",
"GR09", "GR10", "GR11", "GR12", "GR13", "GR14"
),
stringsAsFactors = FALSE
)
translate_str(y, df)
} |
SimPhase3=function(Dose,Phase12,PE,PT,Hypermeans,Hypervars,betaA,ProbC,betaC,Family,alpha,Nmax,Opt,Accrue,Time12,Twait,NLookSwitch,NLook,Sup,Fut){
B=2e3
ProbE=PE
ProbT=PT
probE=PE
probT=PT
YE=Phase12[,2]
YT=Phase12[,3]
Nmax1=Nmax
NET=nrow(Phase12)
TIMES=rep(NA,NET)
Doses=Dose[Phase12[,1]]
if(Family=="Gamma"){
for(h in 1:NET){
TIMES[h]=rgamma(1,alpha,1/exp(betaA[1]*Doses[h]+betaA[2]*YE[h]+betaA[3]*YT[h]+betaA[4]*Doses[h]^2+betaA[5]))
}
}
if(Family=="Weibull"){
for(h in 1:NET){
TIMES[h]=rgamma(1,alpha,exp(betaA[1]*Doses[h]+betaA[2]*YE[h]+betaA[3]*YT[h]+betaA[4]*Doses[h]^2+betaA[5]))
}
}
if(Family=="Lognormal"){
for(h in 1:NET){
TIMES[h]=rlnorm(1,betaA[1]*Doses[h]+betaA[2]*YE[h]+betaA[3]*YT[h]+betaA[4]*Doses[h]^2+betaA[5],alpha)
}
}
if(Family=="Exponential"){
for(h in 1:NET){
TIMES[h]=rexp(1,1/exp(betaA[1]*Doses[h]+betaA[2]*YE[h]+betaA[3]*YT[h]+betaA[4]*Doses[h]^2+betaA[5]))
}
}
ACC = cumsum(rexp(Nmax*2,Accrue))
Trt = rep(NA,Nmax*2)
TimeCont=rep(NA,Nmax)
YECont=TimeCont
YTCont=TimeCont
if(Family=="Gamma"){
for(b in 1:Nmax){
YE=rbinom(1,1,ProbC[1])
YT=rbinom(1,1,ProbC[2])
TimeCont[b]=rgamma(1,alpha,1/exp(YE*betaC[1]+YT*betaC[2]+betaC[3]))
}
}
if(Family=="Exponential"){
for(b in 1:Nmax){
YE=rbinom(1,1,ProbC[1])
YT=rbinom(1,1,ProbC[2])
TimeCont[b]=rgamma(1,1,1/exp(YE*betaC[1]+YT*betaC[2]+betaC[3]))
}
}
if(Family=="Weibull"){
for(b in 1:Nmax){
YE=rbinom(1,1,ProbC[1])
YT=rbinom(1,1,ProbC[2])
TimeCont[b]=rweibull(1,alpha,exp(YE*betaC[1]+YT*betaC[2]+betaC[3]))
}
}
if(Family=="Lognormal"){
for(b in 1:Nmax){
YE=rbinom(1,1,ProbC[1])
YT=rbinom(1,1,ProbC[2])
TimeCont[b]=rlnorm(1,YE*betaC[1]+YT*betaC[2]+betaC[3],alpha)
}
}
INDSTOP=0
trial.time=0
trial.time1=0
Times=rep(NA,Nmax*2)
TimesREG=Times
YE=Times
YT=Times
Doses=Times
Treat=Times
XOLD = Phase12[,1]
YEOLD=Phase12[,2]
YTOLD=Phase12[,3]
ACCOLD =Phase12[,4]
TIMEOLD=TIMES
OptDose=Opt
trial.time=ACC[1]
Times=rep(NA,Nmax1*2)
YE=Times
YT=Times
Doses=Times
Treat=Times
Best=NA
NDeath=0
i=1
Nmax=Nmax1
trial.time1=trial.time
Time2=Time12
while(NDeath<NLookSwitch){
trial.time = trial.time +(ACC[i+1]-ACC[i])
Trt[i]=rbinom(1,1,.5)
if(Trt[i]==0){
Times[i]=TimeCont[i]
Treat[i]=0
Doses[i]=0
}else{
YE[i] = rbinom(1,1,probE[OptDose])
YT[i] = rbinom(1,1,probT[OptDose])
if(Family=="Gamma"){
Times[i]= rgamma(1,alpha,1/exp(betaA[1]*Dose[OptDose]+betaA[4]*Dose[OptDose]^2+YE[i]*betaA[2]+YT[i]*betaA[3]+betaA[5]))
}
if(Family=="Exponential"){
Times[i]= rgamma(1,1,1/exp(betaA[1]*Dose[OptDose]+betaA[4]*Dose[OptDose]^2+YE[i]*betaA[2]+YT[i]*betaA[3]+betaA[5]))
}
if(Family=="Weibull"){
Times[i]= rweibull(1,alpha,exp(betaA[1]*Dose[OptDose]+betaA[4]*Dose[OptDose]^2+YE[i]*betaA[2]+YT[i]*betaA[3]+betaA[5]))
}
if(Family=="Lognormal"){
Times[i]= rlnorm(1,(betaA[1]*Dose[OptDose]+betaA[4]*Dose[OptDose]^2+YE[i]*betaA[2]+YT[i]*betaA[3]+betaA[5]),alpha)
}
Doses[i]=OptDose
}
NDeath = sum((ACC+Times)<trial.time,na.rm=TRUE)
i=i+1
}
TimesREG = Times
YEREG=YE
YTREG=YT
DosesREG=Doses
iREG=i
trial.reg = trial.time
TrtREG=Trt
Y = Times[Trt>0]
ACC1=ACC[Trt>0]
ACC1=ACC1[!is.na(Y)]
Y=Y[!is.na(Y)]
Y = pmin(Y,trial.time-ACC1)
I = (Y+ACC1)<trial.time
YE1 = subset(YE,!(is.na(YE)))
YT1 = subset(YT, !(is.na(YT)))
YE1=c(YE1,YEOLD)
YT1=c(YT1,YTOLD)
Doses1= subset(Doses, !(is.na(Doses)))
Doses1=Doses1[Doses1>0]
ACCOLD1=max(ACCOLD)-ACCOLD - Time2 + Twait
TIMEOLD1 = pmin(TIMEOLD,ACCOLD1+trial.time)
IOLD1=(TIMEOLD==TIMEOLD1)
Y=c(Y,TIMEOLD1)
I=c(I,IOLD1)
Doses2=c(Doses1,XOLD)
OptDose3=OptDose
OptDose= Reoptimize1(Y,I,YE1,YT1, Doses2, Dose, Hypermeans, Hypervars, B )
OptDose1=OptDose
NumLooks = length(NLook)
DECISION=NA
DECISIONREG=NA
Decision=NA
DecisionREG=NA
DECISON=NA
DECISIONREG=NA
if(OptDose==OptDose3){
NDeath=0
i=iREG
INDSTOP=0
for(k in 1:NumLooks){
if(INDSTOP==0){
NDeath=0
while(NDeath<NLook[k]){
if(i==Nmax){
INDSTOP=1
break
}
trial.time = trial.time +(ACC[i+1]-ACC[i])
Trt[i]=rbinom(1,1,.5)
if(Trt[i]==0){
Times[i]=TimeCont[i]
Treat[i]=0
Doses[i]=0
}else{
YE[i] = rbinom(1,1,probE[OptDose])
YT[i] = rbinom(1,1,probT[OptDose])
if(Family=="Gamma"){
Times[i]= rgamma(1,alpha,1/exp(betaA[1]*Dose[OptDose]+betaA[4]*Dose[OptDose]^2+YE[i]*betaA[2]+YT[i]*betaA[3]+betaA[5]))
}
if(Family=="Exponential"){
Times[i]= rgamma(1,1,1/exp(betaA[1]*Dose[OptDose]+betaA[4]*Dose[OptDose]^2+YE[i]*betaA[2]+YT[i]*betaA[3]+betaA[5]))
}
if(Family=="Weibull"){
Times[i]= rweibull(1,alpha,exp(betaA[1]*Dose[OptDose]+betaA[4]*Dose[OptDose]^2+YE[i]*betaA[2]+YT[i]*betaA[3]+betaA[5]))
}
if(Family=="Lognormal"){
Times[i]= rlnorm(1,(betaA[1]*Dose[OptDose]+betaA[4]*Dose[OptDose]^2+YE[i]*betaA[2]+YT[i]*betaA[3]+betaA[5]),alpha)
}
Doses[i]=OptDose
}
NDeath = sum((ACC+Times)<trial.time,na.rm=TRUE)
i=i+1
}
if(INDSTOP==1){
trial.time = quantile((ACC+Times), NLook[k]/Nmax,na.rm=TRUE)
NDeath = sum((ACC+Times)<trial.time,na.rm=TRUE)
}
}else{
trial.time = quantile((ACC+Times), NLook[k]/Nmax,na.rm=TRUE)
NDeath = sum((ACC+Times)<trial.time,na.rm=TRUE)
}
Y = Times[!is.na(Times)]
Trt1 = Trt[!is.na(Times)]
ACC1=ACC[!is.na(Times)]
Y = pmin(Y,trial.time-ACC1)
I = (Y+ACC1)<trial.time
LR= coxph(Surv(Y,I)~Trt1)
if(sqrt(LR$wald.test)<Fut[k]){
DECISION=0
Npats=i
TRIALTIMES=trial.time
LOOK=k
LOOKREG=k
DECISIONREG=0
NpatsREG=i
TRIALTIMESREG=trial.time
break
}else{
if(sqrt(LR$wald.test)>Sup[k]){
TRIALTIMESREG=trial.time
NpatsREG=i
TRIALTIMES=trial.time
Npats=i
if(LR$coefficients[1]<0){
DECISIONREG=1
DECISION=1
}else{
DECISION=-1
DECISIONREG=-1
}
break
}
}
}
if(is.na(DECISION)){
DECISION=0
Npats=i
TRIALTIMES=trial.time
DECISIONREG=0
NpatsREG=i
TRIALTIMESREG=trial.time
}
}else{
NDeath=0
i=iREG
INDSTOP=0
DECISION=NA
DECISIONREG=NA
for(k in 1:NumLooks){
OptDose=OptDose3
if(INDSTOP==0){
while(NDeath<NLook[k]){
if(i==Nmax){
INDSTOP=1
break
}
trial.reg = trial.reg +(ACC[i+1]-ACC[i])
TrtREG[i]=rbinom(1,1,.5)
if(TrtREG[i]==0){
TimesREG[i]=TimeCont[i]
Treat[i]=0
Doses[i]=0
}else{
YE[i] = rbinom(1,1,probE[OptDose])
YT[i] = rbinom(1,1,probT[OptDose])
if(Family=="Gamma"){
TimesREG[i]= rgamma(1,alpha,1/exp(betaA[1]*Dose[OptDose]+betaA[4]*Dose[OptDose]^2+YE[i]*betaA[2]+YT[i]*betaA[3]+betaA[5]))
}
if(Family=="Exponential"){
TimesREG[i]= rgamma(1,1,1/exp(betaA[1]*Dose[OptDose]+betaA[4]*Dose[OptDose]^2+YE[i]*betaA[2]+YT[i]*betaA[3]+betaA[5]))
}
if(Family=="Weibull"){
TimesREG[i]= rweibull(1,alpha,exp(betaA[1]*Dose[OptDose]+betaA[4]*Dose[OptDose]^2+YE[i]*betaA[2]+YT[i]*betaA[3]+betaA[5]))
}
if(Family=="Lognormal"){
TimesREG[i]= rlnorm(1,(betaA[1]*Dose[OptDose]+betaA[4]*Dose[OptDose]^2+YE[i]*betaA[2]+YT[i]*betaA[3]+betaA[5]),alpha)
}
}
NDeath = sum((ACC+TimesREG)<trial.reg,na.rm=TRUE)
i=i+1
}
if(INDSTOP==1){
trial.time = quantile(ACC+TimesREG,NLook[k]/Nmax,na.rm=TRUE)
NDeath = sum((ACC+TimesREG)<trial.time,na.rm=TRUE)
}
}else{
trial.reg = quantile((ACC+TimesREG),NLook[k]/Nmax,na.rm=TRUE)
NDeath = sum((ACC+TimesREG)<trial.reg,na.rm=TRUE)
}
Y = TimesREG[1:(i-1)]
Trt1 = TrtREG[1:(i-1)]
ACC1=ACC[1:(i-1)]
Y = pmin(Y,trial.reg-ACC1)
I = (Y+ACC1)<trial.time
LR= coxph(Surv(Y,I)~Trt1)
if(sqrt(LR$wald.test)<Fut[k]){
DECISIONREG=0
NpatsREG=i
TRIALTIMESREG=trial.reg
break
}else{
if(sqrt(LR$wald.test)>Sup[k]){
TRIALTIMESREG=trial.reg
NpatsREG=i
if(LR$coefficients[1]<0){
DECISIONREG=1
}else{
DECISIONREG=-1
}
break
}
}
}
if(is.na(DECISIONREG)){
DECISIONREG=0
NpatsREG=i
TRIALTIMESREG=trial.reg
}
i=iREG
OptDose=OptDose1
INDSTOP=0
for(k in 1:NumLooks){
NDeath=0
if(INDSTOP==0){
while(NDeath<NLook[k]){
if(i==Nmax){
INDSTOP=1
break
}
trial.time = trial.time +(ACC[i+1]-ACC[i])
Trt[i]=rbinom(1,1,.5)
if(Trt[i]==0){
Times[i]=TimeCont[i]
Treat[i]=0
Doses[i]=0
}else{
YE[i] = rbinom(1,1,probE[OptDose])
YT[i] = rbinom(1,1,probT[OptDose])
if(Family=="Gamma"){
Times[i]= rgamma(1,alpha,1/exp(betaA[1]*Dose[OptDose]+betaA[4]*Dose[OptDose]^2+YE[i]*betaA[2]+YT[i]*betaA[3]+betaA[5]))
}
if(Family=="Exponential"){
Times[i]= rgamma(1,1,1/exp(betaA[1]*Dose[OptDose]+betaA[4]*Dose[OptDose]^2+YE[i]*betaA[2]+YT[i]*betaA[3]+betaA[5]))
}
if(Family=="Weibull"){
Times[i]= rweibull(1,alpha,exp(betaA[1]*Dose[OptDose]+betaA[4]*Dose[OptDose]^2+YE[i]*betaA[2]+YT[i]*betaA[3]+betaA[5]))
}
if(Family=="Lognormal"){
Times[i]= rlnorm(1,(betaA[1]*Dose[OptDose]+betaA[4]*Dose[OptDose]^2+YE[i]*betaA[2]+YT[i]*betaA[3]+betaA[5]),alpha)
}
Doses[i]=OptDose
}
NDeath = sum((ACC+Times)<trial.time,na.rm=TRUE)
i=i+1
}
if(INDSTOP==1){
trial.time = quantile((ACC+Times),NLook[k]/Nmax,na.rm=TRUE)
NDeath = sum((ACC+Times)<trial.time,na.rm=TRUE)
}
}else{
trial.time = quantile((ACC+Times),NLook[k]/Nmax,na.rm=TRUE)
NDeath = sum((ACC+Times)<trial.time,na.rm=TRUE)
}
Y = Times[1:(i-1)]
Trt1 = Trt[1:(i-1)]
Doses1 = Doses[1:(i-1)]
ACC1=ACC[1:(i-1)]
Y = pmin(Y,trial.time-ACC1)
I = (Y+ACC1)<trial.time
Trt2=subset(Trt1, Trt1==0 | Doses1==OptDose)
Y=subset(Y, Trt1==0 | Doses1==OptDose)
I=subset(I, Trt1==0 | Doses1==OptDose)
LR= coxph(Surv(Y,I)~Trt2)
if(sqrt(LR$wald.test)<Fut[k]){
DECISION=0
Npats=i
TRIALTIMES=trial.time
break
}else{
if(sqrt(LR$wald.test)>Sup[k]){
TRIALTIMES=trial.time
Npats=i
TRIALTIMESSET=trial.time
NpatsSET=i
if(LR$coefficients[1]<0){
DECISION=1
}else{
DECISION=-1
}
break
}
}
}
if(is.na(DECISION)){
DECISION=0
Npats=i
TRIALTIMES=trial.time
}
}
Phase123 = rep(0,4)
Phase123[1]=OptDose1
Phase123[2]=DECISION
Phase123[3]=Npats
Phase123[4]=TRIALTIMES
G = rep(0,4)
G[1]=OptDose3
G[2]=DECISIONREG
G[3]=NpatsREG
G[4]=TRIALTIMESREG
Z=as.list(c(0,0))
Z[[1]]=Phase123
Z[[2]]=G
return(Z)
} |
use_crs <- function(crs = NULL) {
if (!is.null(crs)) {
options(crs.in.use = crs)
return(crs)
}
crs <- getOption("crs.in.use")
if (is.null(crs)) warning("No crs.in.use")
crs
} |
InputsForMLEdensity <- function(ModelType, Yields, PdynamicsFactors, FactorLabels, mat, Economies, DataFrequency,
JLLinputs = NULL, GVARinputs = NULL){
if (length(Economies) == 1 &
( ModelType == "GVAR jointQ" || ModelType == "VAR jointQ" || ModelType == "JLL original"
|| ModelType == "JLL NoDomUnit" || ModelType == "JLL jointSigma" )){
stop("The models 'GVAR jointQ', 'VAR jointQ', 'JLL original', 'JLL NoDomUnit', and 'JLL jointSigma'
require the estimation of several countries")
}
if (ModelType == 'JPS' || ModelType == 'JPS jointP' || ModelType == "GVAR sepQ"){
i <- get("i", globalenv())
Economies <- Economies[i]
}
N <- length(FactorLabels$Spanned)
G <- length(FactorLabels$Global)
C <- length(Economies)
J <- nrow(Yields)/C
if (C==1){
IdxCountry <- which(grepl(Economies, rownames(Yields)))
J <- length(IdxCountry)
}
if (DataFrequency == "Daily All Days"){ dt <- 1/365}
if (DataFrequency == "Daily Business Days"){ dt <- 1/252}
if (DataFrequency == "Weekly"){ dt <- 1/52}
if (DataFrequency == "Monthly"){ dt <- 1/12}
if (DataFrequency == "Quartely"){ dt <- 1/4}
if (DataFrequency == "Annually"){ dt <- 1}
idxJ0 <- 0
idxN0 <- 0
if (C == 1){idxJ0 <- IdxCountry[1]-1 }
for (i in 1:C){
idxJ1 <- idxJ0 + J
idxN1 <- idxN0 + N
YCS <- Yields[(idxJ0+1):idxJ1,]
WpcaCS <- 100*pca_weights_one_country(YCS, Economy= Economies[i])[1:N,]
WeCS <- t(null(WpcaCS))
WpcaFullCS <- rbind(WpcaCS, WeCS)
PPCS <- Spanned_Factors(YCS, Economies = Economies[i], N)
K1XQCS <- Reg_K1Q(YCS, mat, PPCS, dt, type="Jordan")
if (i==1){
K1XQ <- K1XQCS
Wpca <- WpcaCS
WpcaFull <- WpcaFullCS
We <- WeCS
PP <- PPCS
Y <- YCS
}else{
K1XQ <- magic::adiag(K1XQ,K1XQCS)
Wpca <- magic::adiag(Wpca,WpcaCS)
We <- magic::adiag(We,WeCS)
WpcaFull <- magic::adiag(WpcaFull, WpcaFullCS)
PP <- rbind(PP, PPCS)
Y <- rbind(Y, YCS)
}
idxJ0<- idxJ1
idxN0 <- idxN1
}
if (ModelType == 'JPS'){
idxCountryFactors <- which(grepl(Economies, rownames(PdynamicsFactors)))
idxFactors <- c(seqi(1,G), idxCountryFactors)
ZZ <- PdynamicsFactors[idxFactors,]
}else{
ZZ <- PdynamicsFactors
}
colnames(Y) <- colnames(ZZ)
K <- nrow(ZZ)
Gy.0 <- diag(K)
if (ModelType == 'JPS' || ModelType == 'JPS jointP' || ModelType == 'VAR jointQ'){
VARpara <- VAR(ZZ, VARtype= 'unconstrained', Bcon = NULL)
K0Z <- VARpara$K0Z
K1Z <- VARpara$K1Z
SSZ <- VARpara$SSZ
}
if (ModelType == 'GVAR sepQ'|| ModelType == 'GVAR jointQ'){
GVARpara <- GVAR(GVARinputs, N)
K0Z <- GVARpara$F0
K1Z <- GVARpara$F1
SSZ <- GVARpara$Sigma_y
}
if (ModelType == "JLL original" || ModelType == "JLL NoDomUnit" || ModelType == "JLL jointSigma"){
JLLinputs$WishSigmas <- 1
JLLPara <- JLL(ZZ, N, JLLinputs)
K0Z <-JLLPara$k0
K1Z <-JLLPara$k1
JLLinputs$WishSigmas <- 0
if (ModelType == "JLL original" || ModelType == "JLL NoDomUnit"){ SSZ <- JLLPara$Sigmas$VarCov_NonOrtho }
if (ModelType == "JLL jointSigma"){ SSZ <- JLLPara$Sigmas$VarCov_Ortho
}
}
Output <- list(Wpca, We, WpcaFull, Y, PP, Gy.0, K1XQ, ZZ, SSZ, K0Z, K1Z, JLLinputs, GVARinputs)
names(Output) <- c("Wpca","We", "WpcaFull", "Y","PP", "Gy.0","K1XQ", "ZZ", "SSZ", "K0Z", "K1Z", "JLLinputs", "GVARinputs")
return(Output)
}
InputsForMLEdensity_BS <- function(ModelType, Y_artificial, Z_artificial, FactorLabels, mat,
Economies, DataFrequency, JLLinputs = NULL, GVARinputs= NULL){
if (ModelType == 'JPS' || ModelType == 'JPS jointP' || ModelType == "GVAR sepQ"){
i <- get("i", globalenv())
Economies <- Economies[i]
}
J <- length(mat)
C <- length(Economies)
N <- length(FactorLabels$Spanned)
G <- length(FactorLabels$Global)
M <- length(FactorLabels$Domestic) - N
if (DataFrequency == "Daily All Days"){ dt <- 1/365}
if (DataFrequency == "Daily Business Days"){ dt <- 1/252}
if (DataFrequency == "Weekly"){ dt <- 1/52}
if (DataFrequency == "Monthly"){ dt <- 1/12}
if (DataFrequency == "Quartely"){ dt <- 1/4}
if (DataFrequency == "Annually"){ dt <- 1}
ZZ_artificial <- t(Z_artificial)
YY_artificial <- t(Y_artificial)
idxJ0 <- 0
idxN0 <- 0
for ( i in 1:C){
idxJ1 <- idxJ0 + J
idxN1 <- idxN0 + N
P_CS <- ZZ_artificial[(c(FactorLabels$Tables[[Economies[i]]] [(M+1):(M+N)])),]
Y_CS <- YY_artificial[(idxJ0+1):idxJ1,]
K1XQ__CS <- Reg_K1Q(Y_CS, mat, P_CS, dt, type="Jordan")
Wpca_CS <- mrdivide(P_CS,Y_CS)
if (all(Wpca_CS[1,] < 0)){ Wpca_CS[1,] <- Wpca_CS[1,]*(-1) }
if (Wpca_CS[2,1] > Wpca_CS[2,J]){ Wpca_CS[2,] <- Wpca_CS[2,]*(-1) }
if (Wpca_CS[3,1] > Wpca_CS[3,M] & Wpca_CS[3,J] > Wpca_CS[3,M]){ Wpca_CS[3,] <- Wpca_CS[3,]*(-1) }
We_CS <- t(null(Wpca_CS))
WpcaFull_CS <- rbind(Wpca_CS, We_CS)
if (i == 1 ){
P_artificial <- P_CS
Y_artificial <- Y_CS
Wpca_artificial <- Wpca_CS
We_artificial <- We_CS
WpcaFull_artificial <- WpcaFull_CS
K1XQ_artificial <- K1XQ__CS
}else{
P_artificial <-rbind(P_artificial, P_CS)
Y_artificial <- rbind(Y_artificial, Y_CS)
Wpca_artificial <- magic::adiag(Wpca_artificial, Wpca_CS)
We_artificial <- magic::adiag(We_artificial, We_CS)
WpcaFull_artificial <- magic::adiag(WpcaFull_artificial, WpcaFull_CS)
K1XQ_artificial <- magic::adiag(K1XQ_artificial, K1XQ__CS)
}
idxJ0<- idxJ1
idxN0 <- idxN1
}
K <- nrow(ZZ_artificial)
Gy.0 <- diag(K)
if (ModelType == 'JPS' || ModelType == 'JPS jointP' || ModelType == 'VAR jointQ'){
VARpara <- VAR(ZZ_artificial, VARtype= 'unconstrained', Bcon = NULL)
K0Z_artificial <- VARpara$K0Z
K1Z_artificial <- VARpara$K1Z
SSZ_artificial <- VARpara$SSZ
}
if (ModelType == 'GVAR sepQ'|| ModelType == 'GVAR jointQ'){
GVARpara <- GVAR(GVARinputs, N)
K0Z_artificial <- GVARpara$F0
K1Z_artificial <- GVARpara$F1
SSZ_artificial <- GVARpara$Sigma_y
}
if (ModelType == "JLL original" || ModelType == "JLL NoDomUnit" || ModelType == "JLL jointSigma"){
JLLinputs$WishSigmas <- 1
JLLPara <- JLL(ZZ_artificial, N, JLLinputs)
K0Z_artificial <-JLLPara$k0
K1Z_artificial <-JLLPara$k1
JLLinputs$WishSigmas <- 0
if (ModelType == "JLL original" || ModelType == "JLL NoDomUnit"){ SSZ_artificial <- JLLPara$Sigmas$VarCov_NonOrtho }
if (ModelType == "JLL jointSigma"){ SSZ_artificial <- JLLPara$Sigmas$VarCov_Ortho
}
}
ListOutputs <- list(Wpca_artificial, We_artificial, WpcaFull_artificial, Y_artificial, P_artificial, ZZ_artificial,
Gy.0, SSZ_artificial, K1XQ_artificial, K0Z_artificial, K1Z_artificial, JLLinputs, GVARinputs)
names(ListOutputs) <- c("Wpca","We", "WpcaFull", "Y", "PP", "ZZ", "Gy.0", "SSZ", "K1XQ", "K0Z", "K1Z",
"JLLinputs", "GVARinputs")
return(ListOutputs)
} |
summary.CV_Data <- function(object,...){
cat("\nContaining the data required in cross-validation. \n");
cat("Number of bins:", object$stat$g,"\n");
cat("Ratio p:", object$p, "\n");
} |
setGeneric("node", function(obj, node=list(), what=NULL, cond.attr=NULL, cond.value=NULL, element=NULL){standardGeneric("node")})
setClassUnion("XiMpLe.XML", members=c("XiMpLe.node", "XiMpLe.doc"))
setMethod("node",
signature(obj="XiMpLe.XML"),
function(obj, node=list(), what=NULL, cond.attr=NULL, cond.value=NULL, element=NULL){
if(is.XiMpLe.node(obj)){
got.this <- identical(slot(obj, "name"), node[[1]])
if(!isTRUE(got.this)){
stop(simpleError(paste0("Can't find node ", node[[1]], " in ", sQuote(deparse(substitute(obj))), "!")))
} else {
node[[1]] <- NULL
}
} else {}
result.node.path <- "obj"
for (this.node in node){
for (this.path in result.node.path){
this.node.part <- eval(parse(text=this.path))
got.this <- lapply(slot(this.node.part, "children"), function(this.child){slot(this.child, "name")}) %in% this.node
if(!any(got.this)){
stop(simpleError(paste0("Can't find node ", sQuote(this.node), " in ", sQuote(deparse(substitute(obj))), "!")))
} else {
result.node.path <- unique(paste0(result.node.path, paste0("@children[[",which(got.this),"]]")))
}
}
}
if(!is.null(cond.attr)){
filter <- names(cond.attr)
filtered.paths <- c()
for (this.path in result.node.path){
this.node.part <- eval(parse(text=this.path))
this.attrs <- slot(this.node.part, "attributes")
attr.found <- filter %in% names(this.attrs)
if(all(attr.found)){
found.this <- sapply(
filter,
function(this.attr){
results <- unlist(cond.attr[this.attr]) == unlist(this.attrs[this.attr])
return(results)
},
USE.NAMES=FALSE
)
if(all(found.this)){
filtered.paths <- unique(c(filtered.paths, this.path))
} else {}
} else {}
}
result.node.path <- filtered.paths
} else {}
result.cond <- sapply(
result.node.path,
function(this.path){
eval(parse(text=this.path))
},
USE.NAMES=FALSE
)
if(!is.null(cond.value)){
stopifnot(length(cond.value) == 1)
filtered.paths <- c()
for (this.path in result.node.path){
this.node.part <- eval(parse(text=this.path))
this.value <- slot(this.node.part, "value")
if(identical(this.value, cond.value)){
filtered.paths <- unique(c(filtered.paths, this.path))
} else {}
}
result.node.path <- filtered.paths
} else {}
if(!is.null(what)){
stopifnot(length(what) == 1)
if(!what %in% c(slotNames(new("XiMpLe.node")), "@path", "obj@path")){
stop(simpleError(paste0("Invalid slot for class XiMpLe.node:", paste(sQuote(what), collapse=", "), "!")))
} else {}
if(identical(what, "@path")){
result.node.path <- gsub("^obj", paste(deparse(substitute(obj))), result.node.path)
return(result.node.path)
} else if(identical(what, "obj@path")){
return(result.node.path)
} else {}
result <- unlist(lapply(result.node.path, function(this.path){
this.node <- eval(parse(text=this.path))
results <- slot(this.node, what)
if(identical(what, "value")){
for (this.child in slot(this.node, "children")){
if(identical(slot(this.child, "name"), "") & isTRUE(nchar(slot(this.child, "value")) > 0))
results <- paste(slot(this.child, "value"), results, sep=" ")
}
} else {}
if(!is.null(element)){
results <- results[element]
} else {}
return(results)
}))
if(identical(what, "attributes")){
result <- as.list(result)
} else {}
} else {
result <- unlist(lapply(result.node.path, function(this.path){
return(eval(parse(text=this.path)))
}))
}
if(length(result) == 1){
if(is.XiMpLe.node(result[[1]]) | !is.null(element)){
result <- result[[1]]
} else {}
} else {}
return(result)
}
)
setGeneric("node<-", function(obj, node=list(), what=NULL, cond.attr=NULL, cond.value=NULL, element=NULL, value){standardGeneric("node<-")})
setMethod("node<-",
signature(obj="XiMpLe.XML"),
function(obj, node=list(), what=NULL, cond.attr=NULL, cond.value=NULL, element=NULL, value){
obj.paths <- node(obj, node=node, what="obj@path", cond.attr=cond.attr, cond.value=cond.value)
if(is.null(obj.paths)){
stop(simpleError("Node not found."))
} else {}
for (this.node in obj.paths){
if(!is.null(what)){
if(identical(what, "value")){
eval(parse(text=paste0(this.node, "@value <- character()")))
all.node.children <- slot(eval(parse(text=this.node)), "children")
child.is.value <- unlist(sapply(
all.node.children,
function(this.child){
if(
all(
identical(slot(this.child, "name"), ""),
isTRUE(nchar(slot(this.child, "value")) > 0)
)
){
return(TRUE)
} else {
return(FALSE)
}
},
USE.NAMES=FALSE
))
if(length(all.node.children) != length(child.is.value)){
warning("a child node contained text values and other nodes, we probably messed up the markup!")
} else {}
remove.nodes <- paste0(this.node, "@children[child.is.value] <- NULL")
eval(parse(text=remove.nodes))
pseudo.node <- paste0(this.node, "@children <- append(", this.node, "@children, ",
"new(\"XiMpLe.node\", name=\"\", value=\"", value, "\"), after=0)")
eval(parse(text=pseudo.node))
return(obj)
} else {
this.node <- paste0(this.node, "@", what)
}
if(!is.null(element)){
this.node <- paste0(this.node, "[[\"",element,"\"]]")
} else {}
} else {}
eval(parse(text=paste0(this.node, " <- ", deparse(value))))
}
return(obj)
}
) |
fs.dosage<-function (dos, pop, matching = FALSE)
{
if (!matching) {
Mij<-matching(dos)
Mii <- (diag(Mij)) * 2 - 1
diag(Mij) <- NA
}
else {
Mij <- dos
Mii <- diag(Mij) * 2 - 1
diag(Mij) <- NA
}
pop <- factor(pop)
x <- levels(pop)
npop <- length(x)
wil <- lapply(x, function(z) which(pop == z))
Fi <- lapply(wil, function(x) Mii[x])
Fsts <- unlist(lapply(wil, function(x) mean(Mij[x, x], na.rm = TRUE)))
Mb <- 0
mMij <- matrix(numeric(npop^2), ncol = npop)
for (i in 2:npop) {
p1 <- wil[[i]]
for (j in 1:(i - 1)) {
p2 <- wil[[j]]
mMij[i, j] <- mMij[j, i] <- mean(Mij[p1, p2], na.rm = TRUE)
Mb <- Mb + mMij[i, j]
}
}
diag(mMij) <- Fsts
Fst2x2<-matrix(NA,ncol=npop,nrow=npop)
for (i in 2:npop)
for (j in 1:(i-1)) Fst2x2[i,j]<-Fst2x2[j,i]<-((mMij[i,i]+mMij[j,j])/2-mMij[i,j])/(1-mMij[i,j])
Mb <- Mb * 2/(npop * (npop - 1))
resM <- (mMij - Mb)/(1 - Mb)
rownames(resM) <- colnames(resM) <- levels(pop)
rownames(Fst2x2)<-colnames(Fst2x2)<-levels(pop)
for (i in 1:npop) Fi[[i]] <- (Fi[[i]] - Fsts[[i]])/(1 - Fsts[[i]])
names(Fi) <- as.character(x)
Fis <- unlist(lapply(Fi, mean, na.rm = TRUE))
Fis <- c(Fis, mean(Fis, na.rm = TRUE))
Fsts <- c((Fsts - Mb)/(1 - Mb), mean((Fsts - Mb)/(1 - Mb),
na.rm = TRUE))
res <- rbind(Fis, Fsts)
colnames(res) <- c(levels(pop), "All")
rownames(res) <- c("Fis", "Fst")
all.res <- (list(Fi = Fi, FsM = resM, Fst2x2 = Fst2x2, Fs = res))
class(all.res) <- "fs.dosage"
all.res
}
plot.fs.dosage<-function(x,...){
graphics::par(mfrow=c(2,2))
graphics::boxplot(x$Fi,xlab="pop",ylab=expression(F[i]),main="Ind. Inb. coeff.",...)
np<-dim(x$FsM)[1]
graphics::image(1:np,1:np,x$FsM,main=expression(F[ST]^{XY}),xlab="Pop. X",ylab="Pop. Y")
graphics::image(1:np,1:np,x$Fst2x2,main=expression(F[ST]^{pairwise}),xlab="Pop. X",ylab="Pop. Y")
graphics::barplot(x$Fs[2,1:np],xlab="pop",ylab="Fst",main="",...);graphics::abline(h=x$Fs[2,np+1])
graphics::par(mfrow=c(1,1))
}
print.fs.dosage<-function(x,digits=4,...){
print(round(x$Fs,digits=digits,...))
invisible(x)
}
fst.dosage<-function(dos, pop, matching = FALSE){
fs.dosage(dos,pop,matching)$Fs[2,]
}
fis.dosage<-function(dos, pop, matching = FALSE){
fs.dosage(dos,pop,matching)$Fs[1,]
}
pairwise.fst.dosage<-function(dos, pop, matching = FALSE){
fs.dosage(dos,pop,matching)$Fst2x2
} |
ISOpureS2.model_optimize.opt_kappa <- function(tumordata, model, NUM_ITERATIONS_RMINIMIZE, iter, NUM_GRID_SEARCH_ITERATIONS) {
kappa <- (model$kappa);
init_xx <- t(log(kappa - model$MIN_KAPPA));
returnval <- ISOpure.model_optimize.cg_code.rminimize(init_xx, ISOpureS2.model_optimize.kappa.kappa_loglikelihood, ISOpureS2.model_optimize.kappa.kappa_deriv_loglikelihood, NUM_ITERATIONS_RMINIMIZE, model);
xx <- as.numeric(returnval[[1]]);
model$kappa <- t(as.matrix(exp(xx) + model$MIN_KAPPA));
return(model);
} |
supervised.pca <- function(target, dataset, indices, center = TRUE, scale = TRUE, colours = NULL, graph = TRUE) {
mod.all <- prcomp(dataset, center = center, scale = scale)
mod.sel <- prcomp(dataset[, indices], center = center, scale = scale)
rat <- sum(mod.sel$sdev^2) / sum(mod.all$sdev^2)
if ( graph ) {
if ( is.null(colours) ) target <- as.numeric( as.factor(target) )
plot( mod.all$x[, 1:2], col = target, main = "Scores using all variables" )
dev.new()
plot( mod.sel$x[, 1:2], col = target, main = "Scores using the selected variables" )
}
list(mod.all = mod.all, mode.sel = mod.sel, var.percent = rat)
} |
select_data_type <- function(temp_meta, simplified=NULL){
if(isTRUE(simplified)){
temp_meta <- temp_meta[ grepl(pattern="simplified", temp_meta$download_path), ]
}
else if(isFALSE(simplified)){
temp_meta <- temp_meta[ !(grepl(pattern="simplified", temp_meta$download_path)), ]
} else { stop(paste0("Argument 'simplified' needs to be either TRUE or FALSE")) }
return(temp_meta)
}
select_year_input <- function(temp_meta, y=year){
if (is.null(y)){ stop(paste0("Error: Invalid Value to argument 'year'. It must be one of the following: ",
paste(unique(temp_meta$year),collapse = " "))) }
else if (y %in% temp_meta$year){ message(paste0("Using year ", y))
temp_meta <- temp_meta[temp_meta[,2] == y,]
return(temp_meta) }
else { stop(paste0("Error: Invalid Value to argument 'year'. It must be one of the following: ",
paste(unique(temp_meta$year), collapse = " ")))
}
}
select_metadata <- function(geography, year=NULL, simplified=NULL){
metadata <- download_metadata()
if (is.null(metadata)) { return(invisible(NULL)) }
temp_meta <- subset(metadata, geo == geography)
temp_meta <- select_year_input(temp_meta, y=year)
temp_meta <- select_data_type(temp_meta, simplified=simplified)
return(temp_meta)
}
download_gpkg <- function(file_url, progress_bar = showProgress){
if( !(progress_bar %in% c(T, F)) ){ stop("Value to argument 'showProgress' has to be either TRUE or FALSE") }
if (length(file_url)==1 & progress_bar == TRUE) {
temps <- paste0(tempdir(),"/", unlist(lapply(strsplit(file_url,"/"),tail,n=1L)))
if (!file.exists(temps) | file.info(temps)$size == 0) {
check_con <- check_connection(file_url[1])
if(is.null(check_con) | isFALSE(check_con)){ return(invisible(NULL)) }
try( httr::GET(url=file_url,
httr::progress(),
httr::write_disk(temps, overwrite = T),
config = httr::config(ssl_verifypeer = FALSE)
), silent = T)
}
temp_sf <- load_gpkg(file_url, temps)
return(temp_sf)
}
else if (length(file_url)==1 & progress_bar == FALSE) {
temps <- paste0(tempdir(),"/", unlist(lapply(strsplit(file_url,"/"),tail,n=1L)))
if (!file.exists(temps) | file.info(temps)$size == 0) {
check_con <- check_connection(file_url[1])
if(is.null(check_con) | isFALSE(check_con)){ return(invisible(NULL)) }
try( httr::GET(url=file_url,
httr::write_disk(temps, overwrite = T),
config = httr::config(ssl_verifypeer = FALSE)
), silent = T)
}
temp_sf <- load_gpkg(file_url, temps)
return(temp_sf)
}
else if(length(file_url) > 1 & progress_bar == TRUE) {
total <- length(file_url)
pb <- utils::txtProgressBar(min = 0, max = total, style = 3)
check_con <- check_connection(file_url[1])
if(is.null(check_con) | isFALSE(check_con)){ return(invisible(NULL)) }
lapply(X=file_url, function(x){
temps <- paste0(tempdir(),"/", unlist(lapply(strsplit(x,"/"),tail,n=1L)))
if (!file.exists(temps) | file.info(temps)$size == 0) {
i <- match(c(x),file_url)
try( httr::GET(url=x,
httr::write_disk(temps, overwrite = T),
config = httr::config(ssl_verifypeer = FALSE)
), silent = T)
utils::setTxtProgressBar(pb, i)
}
})
close(pb)
temp_sf <- load_gpkg(file_url)
return(temp_sf)
}
else if(length(file_url) > 1 & progress_bar == FALSE) {
check_con <- check_connection(file_url[1])
if(is.null(check_con) | isFALSE(check_con)){ return(invisible(NULL)) }
lapply(X=file_url, function(x){
temps <- paste0(tempdir(),"/", unlist(lapply(strsplit(x,"/"),tail,n=1L)))
if (!file.exists(temps) | file.info(temps)$size == 0) {
i <- match(c(x),file_url)
httr::GET(url=x,
httr::write_disk(temps, overwrite = T),
config = httr::config(ssl_verifypeer = FALSE)
)
}
})
temp_sf <- load_gpkg(file_url)
return(temp_sf)
}
}
load_gpkg <- function(file_url, temps=NULL){
if(length(file_url)==1){
temp_sf <- sf::st_read(temps, quiet=T)
return(temp_sf)
}
else if(length(file_url) > 1){
files <- unlist(lapply(strsplit(file_url,"/"), tail, n = 1L))
files <- paste0(tempdir(),"/",files)
files <- lapply(X=files, FUN= sf::st_read, quiet=T)
temp_sf <- sf::st_as_sf(data.table::rbindlist(files, fill = TRUE))
return(temp_sf)
}
temp_sf <- load_gpkg(file_url, temps)
return(temp_sf)
}
check_connection <- function(file_url = 'https://www.ipea.gov.br/geobr/metadata/metadata_gpkg.csv'){
if (!curl::has_internet()) { message("\nNo internet connection.")
return(FALSE)
}
msg <- "Problem connecting to data server. Please try geobr again in a few minutes."
x <- try(silent = TRUE,
httr::GET(file_url,
config = httr::config(ssl_verifypeer = FALSE)))
if (class(x)=="try-error") {
message( msg )
return(FALSE)
}
else if ( identical(httr::status_code(x), 200L)) {
return(TRUE)
}
else if (! identical(httr::status_code(x), 200L)) {
message(msg )
return(FALSE)
} else if (httr::http_error(x) == TRUE) {
message(msg)
return(FALSE)
}
} |
ScaleMat <-
function(samRectangle, parRectangle){
xrange_sam <- range(samRectangle[, 1])
yrange_sam <- range(samRectangle[, 2])
xrange_par <- range(parRectangle[, 1])
yrange_par <- range(parRectangle[, 2])
xlength_sam <- xrange_sam[2] - xrange_sam[1]
ylength_sam <- yrange_sam[2] - yrange_sam[1]
xlength_par <- xrange_par[2] - xrange_par[1]
ylength_par <- yrange_par[2] - yrange_par[1]
x_scalefactor <- xlength_par / xlength_sam
y_scalefactor <- ylength_par / ylength_sam
scale_mat <- matrix(c(x_scalefactor, 0, 0, y_scalefactor), ncol = 2)
return(scale_mat)
} |
options( echo=FALSE)
library( spam, warn.conflict=FALSE)
Rprof <- function(memory.profiling=TRUE, interval=0.1)
return()
summaryRprof <- function(memory="both")
return(list(by.total=rbind(1:4)))
i <- c( 2,4,4,5,5)
j <- c( 1,1,2,1,3)
A <- spam(0,5,5)
A[cbind(i,j)] <- rep(.5, length(i))
A <- t( A)+A+diag.spam(5)
U <- chol( A)
pivot <- U@pivot
B <- A[pivot,pivot]
R <- chol( B)
U@pivot
U@snmember
U@supernodes
U@entries
U@colindices
U@colpointers
U@rowpointers
if (F){
display( A)
display( as.spam( chol(as.matrix( A))))
display( B)
display( as.spam(R))
abline( h=-U@supernodes+.5,col=3,lty=2)
}
theta1 <- .1
theta2 <- .01
n <- dim( UScounties.storder)[1]
USmat <- diag.spam(n) + theta1 * UScounties.storder + theta2 * UScounties.ndorder
U <- chol( USmat,memory=list(nnzR=146735))
if (F) {
display( as.spam(U))
text(400,-2200,"MMD\nz=146735\nw=30182\ns=1262",adj=0)
}
U <- chol( USmat, pivot="RCM",memory=list(nnzR=256198,nnzcolindices=140960))
if (F) {
display( as.spam(U))
text(400,-2200,"RCM\nz=256198\nw=140960\ns=1706",adj=0)
}
U <- chol( USmat, pivot=FALSE,memory=list(nnzR=689615,nnzcolindices=96463))
if (F) {
display( as.spam(U))
text(400,-2200,"no permutation\nz=689615\nw=96463\ns=711",adj=0)
}
N <- 10
stsel <- 1
rPsx <- 1
rPsy <- 3
rPint <- .0001
theta1 <- .1
theta2 <- .05
xseq <- ceiling(4 + exp(seq(0,to=4,by=1))/2)
xseql <- length(xseq)
table <- array(NA,c(xseql,4))
for (ix in 1:xseql) {
egdx <- expand.grid(1:xseq[ix],1:xseq[ix])
Cspam <- nearest.dist( egdx, delta=1., upper=NULL)
Dspam <- nearest.dist( egdx, delta=1.5,upper=NULL)
mat <- diag.spam(xseq[ix]^2) + theta1 * Cspam + theta2 * Dspam
Rprof( memory.profiling=TRUE, interval = rPint)
table[ix,1] <- system.time( { ch1 <- chol(mat);
for (i in 1:N) ch1 <- chol(mat)}
)[stsel]
Rprof( NULL)
table[ix,2] <- summaryRprof( memory="both")$by.total[rPsx,rPsy]
Rprof( memory.profiling=TRUE, interval = rPint)
table[ix,3] <- system.time( { ch1 <- chol(mat);
for (i in 1:N) ch2 <- update(ch1,mat) }
)[stsel]
Rprof( NULL)
table[ix,4] <- summaryRprof( memory="both")$by.total[rPsx,rPsy]
}
if (F) {
table <- pmax(table, 0.0001)
par(mfcol=c(1,2))
plot(xseq, table[,1], type='l', log='xy', ylim=range(table[,c(1,3)]),
xlab="L (log scale)", ylab="seconds (log scale)")
lines(xseq, table[,3], lty=2)
plot(xseq, table[,2], type='l', log='xy', ylim=range(table[,c(2,4)]+0.01),
xlab="L (log scale)", ylab="Mbytes (log scale)")
lines(xseq, table[,4], lty=2)
}
x <- 20
maxnn <- 3
egdx <- expand.grid( 1:(maxnn+1), 1:(maxnn+1))
dval <- sort(unique(nearest.dist( egdx, delta=maxnn)@entries))
dvall <- length( dval)
egdx <- expand.grid( 1:x, 1:x)
table <- array(NA, c(dvall,5))
for (id in 1:dvall) {
mat <- nearest.dist( egdx, delta=dval[id],upper=NULL)
mat@entries <- exp(-2*mat@entries)
table[id,5] <- length(Cspam)
Rprof( memory.profiling=TRUE, interval = rPint)
table[id,1] <- system.time( { ch1 <- chol(mat);
for (i in 1:N) ch1 <- chol(mat)}
)[stsel]
Rprof( NULL)
table[id,2] <- summaryRprof( memory="both")$by.total[rPsx,rPsy]
Rprof( memory.profiling=TRUE, interval = rPint)
table[id,3] <- system.time( { ch1 <- chol(mat);
for (i in 1:N) ch2 <- update(ch1,mat) }
)[stsel]
Rprof( NULL)
table[id,4] <- summaryRprof( memory="both")$by.total[rPsx,rPsy]
}
if (F) {
table <- pmax(table, 0.0001)
par(mfcol=c(1,2))
plot( dval, table[,1], type='l', log='xy',ylim=range(table[,c(1,3)]),
xlab="distance (log scale)", ylab="seconds (log scale)")
lines( dval, table[,3],lty=2)
plot( dval, table[,2], type='l', log='xy',ylim=range(table[,c(2,4)]),
xlab="distance (log scale)", ylab="Mbytes (log scale)")
lines( dval, table[,4],lty=2)
}
table <- array(NA,c(9,4))
x <- 10
egdx <- expand.grid(1:x,1:x)
gridmat <- diag.spam(x^2) + .2 * nearest.dist( egdx, delta=1.,upper=NULL) +
.1 * nearest.dist( egdx, delta=1.5,upper=NULL)
Rprof( memory.profiling=TRUE, interval = rPint)
table[1,1] <- system.time( for (i in 1:N) ch1 <- chol(gridmat) )[stsel]
Rprof( NULL)
table[1,2] <- summaryRprof(memory="both")$by.total[rPsx,rPsy]
Rprof( memory.profiling=TRUE, interval = rPint)
table[1,3] <- system.time( for (i in 1:N) ch2 <- chol(USmat) )[stsel]
Rprof( NULL)
table[1,4] <- summaryRprof(memory="both")$by.total[rPsx,rPsy]
Rprof( memory.profiling=TRUE, interval = rPint)
table[2,1] <- system.time( for (i in 1:N) ch1 <- chol.spam(gridmat))[stsel]
Rprof( NULL)
table[2,2] <- summaryRprof(memory="both")$by.total[rPsx,rPsy]
Rprof( memory.profiling=TRUE, interval = rPint)
table[2,3] <- system.time( for (i in 1:N) ch2 <- chol.spam(USmat) )[stsel]
Rprof( NULL)
table[2,4] <- summaryRprof(memory="both")$by.total[rPsx,rPsy]
options(spam.safemode=c(FALSE, FALSE, FALSE))
Rprof( memory.profiling=TRUE, interval = rPint)
table[3,1] <- system.time( for (i in 1:N) ch1 <- chol( gridmat) )[stsel]
Rprof( NULL)
table[3,2] <- summaryRprof(memory="both")$by.total[rPsx,rPsy]
Rprof( memory.profiling=TRUE, interval = rPint)
table[3,3] <- system.time( for (i in 1:N) ch2 <- chol( USmat) )[stsel]
Rprof( NULL)
table[3,4] <- summaryRprof(memory="both")$by.total[rPsx,rPsy]
options(spam.safemode=c(TRUE, TRUE, TRUE))
options(spam.cholsymmetrycheck=FALSE)
Rprof( memory.profiling=TRUE, interval = rPint)
table[4,1] <- system.time( for (i in 1:N) ch1 <- chol( gridmat) )[stsel]
Rprof( NULL)
table[4,2] <- summaryRprof(memory="both")$by.total[rPsx,rPsy]
Rprof( memory.profiling=TRUE, interval = rPint)
table[4,3] <- system.time( for (i in 1:N) ch2 <- chol( USmat) )[stsel]
Rprof( NULL)
table[4,4] <- summaryRprof(memory="both")$by.total[rPsx,rPsy]
options(spam.cholsymmetrycheck=TRUE)
memory1 = summary(ch1)[1:2]
memory2 = summary(ch2)[1:2]
Rprof( memory.profiling=TRUE, interval = rPint)
table[5,1] <- system.time( for (i in 1:N) ch1 <- chol( gridmat,memory=memory1) )[stsel]
Rprof( NULL)
table[5,2] <- summaryRprof(memory="both")$by.total[rPsx,rPsy]
Rprof( memory.profiling=TRUE, interval = rPint)
table[5,3] <- system.time( for (i in 1:N) ch2 <- chol( USmat,memory=memory2) )[stsel]
Rprof( NULL)
table[5,4] <- summaryRprof(memory="both")$by.total[rPsx,rPsy]
options(spam.cholsymmetrycheck=FALSE, safemode=c(FALSE,FALSE,FALSE))
Rprof( memory.profiling=TRUE, interval = rPint)
table[6,1] <- system.time( for (i in 1:N) ch1 <- chol.spam(gridmat,memory=memory1) )[stsel]
Rprof( NULL)
table[6,2] <- summaryRprof(memory="both")$by.total[rPsx,rPsy]
Rprof( memory.profiling=TRUE, interval = rPint)
table[6,3] <- system.time( for (i in 1:N) ch2 <- chol.spam(USmat,memory=memory2) )[stsel]
Rprof( NULL)
table[6,4] <- summaryRprof(memory="both")$by.total[rPsx,rPsy]
pivot1 <- ch1@pivot
pivot2 <- ch2@pivot
Rprof( memory.profiling=TRUE, interval = rPint)
table[7,1] <- system.time( for (i in 1:N) ch1 <- chol.spam(gridmat,pivot=pivot1,
memory=memory1) )[stsel]
Rprof( NULL)
table[7,2] <- summaryRprof(memory="both")$by.total[rPsx,rPsy]
Rprof( memory.profiling=TRUE, interval = rPint)
table[7,3] <- system.time( for (i in 1:N) ch1 <- chol.spam(USmat,pivot=pivot2,
memory=memory2) )[stsel]
Rprof( NULL)
table[7,4] <- summaryRprof(memory="both")$by.total[rPsx,rPsy]
options(spam.cholpivotcheck=FALSE)
Rprof( memory.profiling=TRUE, interval = rPint)
table[8,1] <- system.time( for (i in 1:N) ch1 <- chol.spam(gridmat,pivot=pivot1,
memory=memory1) )[stsel]
Rprof( NULL)
table[8,2] <- summaryRprof(memory="both")$by.total[rPsx,rPsy]
Rprof( memory.profiling=TRUE, interval = rPint)
table[8,3] <- system.time( for (i in 1:N) ch2 <- chol.spam(USmat,pivot=pivot2,
memory=memory2) )[stsel]
Rprof( NULL)
table[8,4] <- summaryRprof(memory="both")$by.total[rPsx,rPsy]
Rprof( memory.profiling=TRUE, interval = rPint)
table[9,1] <- system.time( for (i in 1:N) ch1 <- update(ch1,gridmat) )[stsel]
Rprof( NULL)
table[9,2] <- summaryRprof(memory="both")$by.total[rPsx,rPsy]
Rprof( memory.profiling=TRUE, interval = rPint)
table[9,3] <- system.time( for (i in 1:N) ch2 <- update(ch2,USmat) )[stsel]
Rprof( NULL)
table[9,4] <- summaryRprof(memory="both")$by.total[rPsx,rPsy]
colnames(table) <- c("grid_time","grid_mem","US_time","US_mem")
rownames(table) <- c("Generic chol","chol.spam","safemode",
"symmetrycheck","memory","all","reusing pivot","best cast","update only")
normed.table <- t( round( t(table[-1,])/table[1,],3))
if (F) {
print( t( round( t(table[-1,])/table[1,],3)))
}
In <- diag.spam(nrow(UScounties.storder))
struct <- chol(In + .2 * UScounties.storder + .1 * UScounties.ndorder)
len.1 <- 10
len.2 <- 5
theta.1 <- seq(-.225, to=.515, len=len.1)
theta.2 <- seq(-.09, to=.235, len=len.2)
grid <- array(NA, c(len.1, len.2))
options(spam.cholupdatesingular='null')
for (i in 1:len.1)
for(j in 1:len.2)
grid[i,j] <- !is.null(update(struct, In + theta.1[i]*UScounties.storder
+ theta.2[j]* UScounties.ndorder))
options( echo=TRUE) |
NULL
NULL
NULL
detect.fixations <- function(samples, lambda=15, smooth.coordinates=T, smooth.saccades=T) {
samples <- samples[c("x", "y", "trial", "time")]
if (smooth.coordinates) {
x <- samples$x[c(1,nrow(samples))]
y <- samples$y[c(1,nrow(samples))]
kernel <- rep(1/3, 3)
samples$x <- stats::filter(samples$x, kernel)
samples$y <- stats::filter(samples$y, kernel)
samples$x[c(1,nrow(samples))] <- x
samples$y[c(1,nrow(samples))] <- y
}
samples <- detect.saccades(samples, lambda, smooth.saccades)
if (all(!samples$saccade))
stop("No saccades were detected. Something went wrong.")
fixations <- aggregate.fixations(samples)
fixations
}
aggregate.fixations <- function(samples) {
saccade.events <- sign(c(0, diff(samples$saccade)))
trial.numeric <- as.integer(factor(samples$trial))
trial.events <- sign(c(0, diff(trial.numeric)))
samples$fixation.id <- cumsum(saccade.events==-1|trial.events==1)
samples <- samples[!samples$saccade,,drop=F]
fixations <- with(samples, data.frame(
trial = tapply(trial, fixation.id, function(x) x[1]),
start = tapply(time, fixation.id, min),
end = tapply(time, fixation.id, max),
x = tapply(x, fixation.id, mean),
y = tapply(y, fixation.id, mean),
sd.x = tapply(x, fixation.id, sd),
sd.y = tapply(y, fixation.id, sd),
peak.vx = tapply(vx, fixation.id, function(x) x[which.max(abs(x))]),
peak.vy = tapply(vy, fixation.id, function(x) x[which.max(abs(x))]),
stringsAsFactors=F))
fixations$dur <- fixations$end - fixations$start
fixations
}
detect.saccades <- function(samples, lambda, smooth.saccades) {
vx <- stats::filter(samples$x, -1:1/2)
vy <- stats::filter(samples$y, -1:1/2)
vx[1] <- vx[2]
vy[1] <- vy[2]
vx[length(vx)] <- vx[length(vx)-1]
vy[length(vy)] <- vy[length(vy)-1]
msdx <- sqrt(median(vx**2, na.rm=T) - median(vx, na.rm=T)**2)
msdy <- sqrt(median(vy**2, na.rm=T) - median(vy, na.rm=T)**2)
radiusx <- msdx * lambda
radiusy <- msdy * lambda
sacc <- ((vx/radiusx)**2 + (vy/radiusy)**2) > 1
if (smooth.saccades) {
sacc <- stats::filter(sacc, rep(1/3, 3))
sacc <- as.logical(round(sacc))
}
samples$saccade <- ifelse(is.na(sacc), F, sacc)
samples$vx <- vx
samples$vy <- vy
samples
} |
vizTerracePosition <- function(x, s = NULL, annotations = TRUE, annotation.cex = 0.75, cols = c("
if(!inherits(x, 'data.frame')) {
stop('x must be a data.frame', call. = FALSE)
}
if(nrow(x) < 1) {
stop('x must contain at least 1 row of data', call. = FALSE)
}
if(!requireNamespace('dendextend', quietly=TRUE) | !requireNamespace('latticeExtra', quietly=TRUE))
stop('please install the `dendextend` and `latticeExtra` packages', call.=FALSE)
terrace_position <- NULL
row.names(x) <- x$series
n.records <- x$n
n.series <- nrow(x)
H <- x$shannon_entropy
x$n <- NULL
x$shannon_entropy <- NULL
x.long <- melt(x, id.vars = 'series')
names(x.long)[2] <- 'terrace_position'
tps <- list(superpose.polygon=list(col=cols, lwd=2, lend=2))
if(n.series > 1) {
hyd.order <- order(rowSums(sweep(x[, -1], 2, STATS=c(-1, 1), FUN = '*')), decreasing = TRUE)
x.d <- as.hclust(diana(daisy(x[, -1])))
x.d.hydro <- dendextend::rotate(x.d, order = x$series[hyd.order])
x.long$series <- factor(x.long$series, levels=x.long$series[x.d.hydro$order])
leg <- list(
right = list(
fun = latticeExtra::dendrogramGrob,
args = list(x = as.dendrogram(x.d.hydro), side="right", size=10)
)
)
} else {
x.long$series <- factor(x.long$series)
leg <- list()
x.d.hydro <- list(order = 1L)
}
suppressWarnings(trellis.par.set(tps))
sk <- simpleKey(space='top', columns=2, text=levels(x.long$terrace_position), rectangles = TRUE, points=FALSE, between.columns=2, between=1, cex=0.75)
pp <- barchart(series ~ value, groups=terrace_position, data=x.long, horiz=TRUE, stack=TRUE,
xlab='Proportion',
scales=list(cex=1),
key = sk,
legend = leg,
panel = function(...) {
panel.barchart(...)
if(annotations) {
x.pos.N <- unit(0.03, 'npc')
x.pos.H <- unit(0.97, 'npc')
y.pos <- unit((1:nrow(x)) - 0.25, 'native')
y.pos.annotation <- unit(nrow(x) + 0.25, 'native')
grid.text(
as.character(n.records[x.d.hydro$order]),
x = x.pos.N,
y = y.pos,
gp = gpar(cex = annotation.cex, font = 1)
)
grid.text(
as.character(round(H[x.d.hydro$order], 2)),
x = x.pos.H,
y = y.pos,
gp = gpar(cex = annotation.cex, font = 3)
)
grid.text(
c('N', 'H'),
x = c(x.pos.N, x.pos.H),
y = y.pos.annotation,
gp = gpar(cex = annotation.cex, font = c(2, 4))
)
}
},
yscale.components=function(..., s.to.bold=s) {
temp <- yscale.components.default(...)
if(!is.null(s.to.bold)) {
temp$left$labels$labels <-
sapply( temp$left$labels$labels,
function(x) {
if(grepl(s.to.bold, x, ignore.case = TRUE)) {
as.expression(bquote( bold(.(x))))
} else {
as.expression(bquote(.(x)))
}
}
)
}
return(temp)
})
pp <- update(pp, par.settings = tps)
return(list(fig = pp, order = x.d.hydro$order, clust = x.d.hydro))
} |
"kullCOP" <-
function(cop1=NULL, cop2=NULL, para1=NULL, para2=NULL, alpha=0.05,
del=0, n=1E5, verbose=TRUE, sobol=FALSE, ...) {
if(del > 0) {
lo <- del; hi <- 1 - del
} else {
lo <- 0; hi <- 1
}
UV <- NULL
if(sobol) {
if(! exists(".Random.seed")) tmp <- runif(1)
seed <- sample(.Random.seed, 1)
UV <- randtoolbox::sobol(n = n, dim = 2, seed=seed, scrambling=3, ...)
} else {
UV <- matrix(data=runif(2*n), ncol=2)
}
if(verbose) message("kullCOP: Computing 'f' density values---",
appendLF=FALSE)
f <- densityCOP(UV[,1],UV[,2], cop=cop1, para=para1)
if(verbose) message("done")
if(verbose) message("kullCOP: Computing 'g' density values---",
appendLF=FALSE)
g <- densityCOP(UV[,1],UV[,2], cop=cop2, para=para2)
if(verbose) message("done")
h <- g*(log(g/f))
h <- h[is.finite(h)]
h <- h[! is.nan(h)]
KLdivergence.fg <- mean(h)
KLdivergence.fg.sd <- sd(h)/sqrt(n)
h <- f*(log(f/g))
h <- h[is.finite(h)]
h <- h[! is.nan(h)]
KLdivergence.gf <- mean(h)
KLdivergence.gf.sd <- sd(h)/sqrt(n)
JEFF.divergence <- KLdivergence.fg + KLdivergence.gf
names(JEFF.divergence) <- "Jeffrey's Divergence"
h <- g*log(g/f)^2
h <- h[is.finite(h)]
h <- h[! is.nan(h)]
KLvar.fg <- mean(h)
KLvar.fg.sd <- sd(h)/sqrt(n)
h <- f*log(f/g)^2
h <- h[is.finite(h)]
h <- h[! is.nan(h)]
KLvar.gf <- mean(h)
KLvar.gf.sd <- sd(h)/sqrt(n)
sigmaKL.fg <- sqrt(KLvar.fg - KLdivergence.fg^2)
sigmaKL.gf <- sqrt(KLvar.gf - KLdivergence.gf^2)
tmp <- c(sigmaKL.fg/KLdivergence.fg, sigmaKL.gf/KLdivergence.gf)
tmp <- max(tmp[! is.nan(tmp)])
KL.sample.size <- (qnorm(1-alpha) * tmp)^2
diverge <- c(KLdivergence.fg, sigmaKL.fg,
KLdivergence.gf, sigmaKL.gf)
divergesd <- c(KLdivergence.fg.sd, KLvar.fg.sd,
KLdivergence.gf.sd, KLvar.gf.sd)
names(diverge) <- c("KL-diverge.fg", "sigmaKL-diverge.fg",
"KL-diverge.gf", "sigmaKL-diverge.gf")
names(divergesd) <- c("StdDev_KL-diverge.fg", "StdDev_KL-variance.fg",
"StdDev_KL-diverge.gf", "StdDev_KL-variance.gf")
SS <- as.integer(KL.sample.size)
names(SS) <- "Kullback-Leibler (integer) sample size"
zz <- list(MonteCarlo.sim.size = n,
divergences = diverge,
stdev.divergences = divergesd,
Jeffrey.divergence = JEFF.divergence,
KL.sample.size = SS)
return(zz)
}
"kullCOPint" <-
function(cop1=NULL, cop2=NULL, para1=NULL, para2=NULL, alpha=0.05,
del=.Machine$double.eps^0.25, verbose=TRUE, ...) {
if(del > 0) {
lo <- del; hi <- 1 - del
} else {
lo <- 0; hi <- 1
}
if(verbose) message(" CPU on Kullback-Leibler, double integrations for divergence ",
"(f relative to g)")
KLfg <- NULL
try(KLfg <- integrate(function(u) {
sapply(u, function(u) {
integrate(function(v) {
f <- densityCOP(u,v, cop=cop1, para=para1, ...)
g <- densityCOP(u,v, cop=cop2, para=para2, ...)
return(g*(log(g/f)))
}, lo, hi)$value
})
}, lo, hi))
if(is.null(KLfg)) {
warning("could not numerically integrate, Kullback-Leibler divergence, ",
"(f relative to g)")
KLfg <- NA
KLdivergence.fg <- NA
} else {
KLdivergence.fg <- KLfg$value
}
if(verbose) message(" CPU on Kullback-Leibler, double integrations for divergence ",
"(g relative to f)")
KLgf <- NULL
try(KLgf <- integrate(function(u) {
sapply(u, function(u) {
integrate(function(v) {
f <- densityCOP(u,v, cop=cop1, para=para1, ...)
g <- densityCOP(u,v, cop=cop2, para=para2, ...)
return(f*log(f/g))
}, lo, hi)$value
})
}, lo, hi))
if(is.null(KLgf)) {
warning("could not numerically integrate, Kullback-Leibler divergence ",
"(g relative to f)")
KLgf <- NA
KLdivergence.gf <- NA
} else {
KLdivergence.gf <- KLgf$value
}
JEFF.divergence <- KLdivergence.fg + KLdivergence.gf
names(JEFF.divergence) <- "Jeffrey's Divergence"
if(verbose) message(" CPU on Kullback-Leibler, double integrations for variance ",
"(f relative to g)")
KLvar.fg <- NULL
try(KLvar.fg <- integrate(function(u) {
sapply(u, function(u) {
integrate(function(v) {
f <- densityCOP(u,v, cop=cop1, para=para1, ...)
g <- densityCOP(u,v, cop=cop2, para=para2, ...)
return(g*log(g/f)^2)
}, lo, hi)$value
})
}, lo, hi))
if(is.null(KLvar.fg)) {
warning("could not numerically integrate, Kullback-Leibler variance ",
"(f relative to g)")
KLvar.fg <- NA
sigmaKL.fg <- NA
} else {
sigmaKL.fg <- sqrt(KLvar.fg$value - KLdivergence.fg^2)
}
if(verbose) message(" CPU on Kullback-Leibler, double integrations for variance ",
"(g relative to f)")
KLvar.gf <- NULL
try(KLvar.gf <- integrate(function(u) {
sapply(u, function(u) {
integrate(function(v) {
f <- densityCOP(u,v, cop=cop1, para=para1, ...)
g <- densityCOP(u,v, cop=cop2, para=para2, ...)
return(f*log(f/g)^2)
}, lo, hi)$value
})
}, lo, hi))
if(is.null(KLvar.gf)) {
warning("could not numerically integrate, Kullback-Leibler variance ",
"(g relative to f)")
KLvar.gf <- NA
sigmaKL.gf <- NA
} else {
sigmaKL.gf <- sqrt(KLvar.gf$value - KLdivergence.gf^2)
}
alpha <- alpha
tmp <- max(c(sigmaKL.fg/KLdivergence.fg, sigmaKL.gf/KLdivergence.gf))
KL.sample.size <- (qnorm(1-alpha) * tmp)^2
yy <- list(KLintegrate.fg = KLfg,
KLintegrate.gf = KLgf,
KLvarintegrate.fg = KLvar.fg,
KLvarintegrate.gf = KLvar.gf)
diverge <- c(KLdivergence.fg, sigmaKL.fg,
KLdivergence.gf, sigmaKL.gf)
names(diverge) <- c("KL-diverge.fg", "sigmaKL-diverge.fg",
"KL-diverge.gf", "sigmaKL-diverge.gf")
SS <- as.integer(KL.sample.size)
names(SS) <- "Kullback-Leibler (integer) sample size"
zz <- list(divergences = diverge,
Jeffrey.divergence=JEFF.divergence,
KL.sample.size=SS,
integrations = yy)
return(zz)
} |
NULL
dump.format <- function(namedlist=list(), checkvalid=TRUE, convertfactors=TRUE){
data <- namedlist
if(identical(data, list()))
return('')
if(length(data)==2 & is.null(names(data)) & is.character(data[[1]]) & length(data[[1]])==1){
names <- data
data <- list(data[[2]])
names(data) <- names[[1]]
}
if(inherits(data, 'data.frame'))
data <- as.list(data)
if(!inherits(data,"list") || length(data)==0) stop("Data must be provided as a named list or data frame", call.=FALSE)
if(any(names(data)=="") || is.null(names(data))) stop("Data must be provided as a named list or data frame", call.=FALSE)
if(length(unique(names(data)))!=length(data)) stop('All elements in the data list must have unique names', call.=FALSE)
if(convertfactors){
for(c in which(sapply(data,inherits,what='factor')))
data[[c]] <- as.numeric(data[[c]])
}
if(checkvalid){
valid <- checkvalidforjags(data)
if(!valid$valid) stop(paste("The following problem was identified in the data provided: ", valid$probstring, sep=""))
}
variable = names(data)
value <- data
if(any(variable==".RNG.name")){
n <- which(variable==".RNG.name")
split <- strsplit(value[[n]], split="")[[1]]
if(split[1]!="\"" & split[length(split)]!="\""){
split <- c("\"", split, "\"")
value[[n]] <- paste(split, collapse="")
}
}
output.string <- ""
for(i in 1:length(variable)){
if(length(value[[i]])==1 && length(dim(value[[i]]))==0){
value.string <- as.character(value[[i]])
}else{
dims <- dim(value[[i]])
if(length(dims) > 1){
value.string <- "structure(c("
}else{
value.string <- "c("
}
value.string <- paste(value.string, paste(value[[i]], collapse=", "), ")", sep="")
if(length(dims) > 1){
value.string <- paste(value.string, ", .Dim = c(", paste(dims, collapse=", "), "))", sep="")
}
}
output.string <- paste(output.string, "\"", variable[[i]], "\" <- ", value.string, "\n", sep="")
}
return(output.string)
}
list.format <- function(data=character(), checkvalid=TRUE){
if(! inherits(data, c("character","runjagsdata","runjagsinits")) || length(data)==0) stop("Data must be provided as a character string in the R dump format")
if(all(data=='' | data=='\n'))
return(list())
out <- vector('list', length=length(data))
for(i in 1:length(data)){
if(data[i]==""){
out[[i]] <- list()
}else{
str <- data[i]
str <- gsub('
str <- gsub('^ *\n', '', str)
str <- gsub('\n *\n$', '\n', str)
str <- gsub("<-", "=", str)
str <- gsub("`", "", str)
str <- gsub("= \n", "=", str)
str <- gsub("^\n", "", str)
str <- gsub("\n\n", "\n", str)
str <- gsub("\n\n", "\n", str)
str <- gsub("\n\n", "\n", str)
str <- gsub(",\n.Dim", ", .Dim", str, fixed=TRUE)
str <- gsub("\n", ",", str)
if(str!='' && strsplit(str, split="")[[1]][length(strsplit(str, split="")[[1]])] == ",") str <- paste(strsplit(str, split="")[[1]][1:(length(strsplit(str, split="")[[1]])-1)], collapse="")
out[[i]] <- eval(parse(text=paste('list(', str, ')')))
}
}
if(length(data)>1){
names(out) <- paste('Chain.', 1:length(data), sep='')
}else{
out <- out[[1]]
}
if(checkvalid){
valid <- checkvalidforjags(out)
if(!valid$valid)
stop(paste("The following problem was identified in the data provided: ", valid$probstring, sep=""))
}
return(out)
} |
mean_frames <- function(arr3d, na_rm = FALSE) {
checkmate::assert_numeric(arr3d)
checkmate::assert_array(arr3d, d = 3)
checkmate::assert_flag(na_rm)
if (na_rm) {
mean_frames_na_omit_(arr3d)
} else {
mean_frames_(arr3d)
}
}
sum_frames <- function(arr3d, na_rm = FALSE) {
checkmate::assert_numeric(arr3d)
checkmate::assert_array(arr3d, d = 3)
checkmate::assert_flag(na_rm)
if (na_rm) {
sum_frames_na_omit_(arr3d)
} else {
sum_frames_(arr3d)
}
}
sum_frames_na_omit_ <- function(arr3d) {
checkmate::assert_array(arr3d, d = 3)
checkmate::assert_numeric(arr3d)
if (isTRUE(checkmate::check_integer(arr3d))) {
int_sum_frames_na_omit(arr3d)
} else {
dbl_sum_frames_na_omit(arr3d)
}
}
mean_frames_na_omit_ <- function(arr3d) {
checkmate::assert_array(arr3d, d = 3)
checkmate::assert_numeric(arr3d)
if (isTRUE(checkmate::check_integer(arr3d))) {
int_mean_frames_na_omit(arr3d)
} else {
dbl_mean_frames_na_omit(arr3d)
}
} |
NULL
setClass('lcMethodCustom', contains = 'lcMethod')
lcMethodCustom = function(
response,
fun,
center = meanNA,
time = getOption('latrend.time'),
id = getOption('latrend.id'),
name = 'custom'
) {
mc = match.call.all()
mc$Class = 'lcMethodCustom'
do.call(new, as.list(mc))
}
setValidity('lcMethodCustom', function(object) {
assert_that(has_lcMethod_args(object, formalArgs(lcMethodCustom)))
if (isArgDefined(object, 'fun')) {
assert_that(is.function(object$fun))
}
if (isArgDefined(object, 'center')) {
assert_that(is.function(object$center))
}
})
setMethod('getArgumentDefaults', signature('lcMethodCustom'), function(object) {
c(
formals(lcMethodCustom),
callNextMethod()
)
})
setMethod('getName', signature('lcMethodCustom'), function(object) {
if (isArgDefined(object, 'name') && !is.null(object$name)) {
return(object$name)
}
if (isArgDefined(object, 'fun')) {
fun = object[['fun', eval = FALSE]]
if (is.name(fun)) {
return(paste('custom function ', fun))
}
}
return('custom function')
})
setMethod('getShortName', signature('lcMethodCustom'), function(object) 'custom')
setMethod('prepareData', signature('lcMethodCustom'), function(method, data, verbose) {
assert_that(has_name(data, responseVariable(method)))
callNextMethod()
})
setMethod('fit', signature('lcMethodCustom'), function(method, data, envir, verbose) {
args = as.list(method)
args$data = data
model = do.call(method$fun, args)
model@method = method
assert_that(is.lcModel(model))
return(model)
}) |
jmodelMult <- function (fitLME, fitCOX, data, model = 1, rho = 0, timeVarY = NULL, timeVarT = NULL,
control = list(), ...)
{
cat("Running jmodelMult(), may take some time to finish.\n")
call <- match.call()
CheckInputs(fitLME, fitCOX, rho)
cntrlLst <- GenerateControlList(control, 1)
ID1 <- as.vector(unclass(fitLME$groups[[1]]))
uniqueID <- !duplicated(ID1)
tempID <- which(uniqueID)
tempID <- c(tempID, length(ID1) + 1)
ni <- diff(tempID)
ID <- rep(1:sum(uniqueID), times = ni)
nLong <- length(ni)
if (ncol(fitCOX$y) != 3)
stop("\n must fit time-dependent Cox model in coxph().")
start <- as.vector(fitCOX$y[, 1])
stop <- as.vector(fitCOX$y[, 2])
event <- as.vector(fitCOX$y[, 3])
Time <- stop[cumsum(ni)]
d<- event[cumsum(ni)]
nSurv <- length(Time)
if (sum(d) < 5)
warning("\n more than 5 events are required.")
if (nLong != nSurv)
stop("\n sample sizes in the longitudinal and event processes differ.")
Z <- as.matrix(fitCOX$x)
ncz <- ncol(Z)
phi.names <- colnames(Z)
formSurv <- formula(fitCOX)
TermsSurv <- fitCOX$terms
mfSurv <- model.frame(TermsSurv, data)[cumsum(ni), ]
if (!is.null(timeVarT)) {
if (!all(timeVarT %in% names(mfSurv)))
stop("\n'timeVarT' does not correspond columns in the fixed-effect design matrix of 'fitCOX'.")
mfSurv[timeVarT] <- Time
}
if (ncz > 0) {
Ztime <- as.matrix(model.matrix(formSurv, mfSurv))
if(attr(TermsSurv, 'intercept')) Ztime <- as.matrix(Ztime[, - 1])
} else Ztime <- matrix(, ncol = 0, nrow = nSurv)
TermsLongX <- fitLME$terms
mydata <- fitLME$data[all.vars(TermsLongX)]
formLongX <- formula(fitLME)
mfLongX <- model.frame(TermsLongX, data = mydata)
B <- as.matrix(model.matrix(formLongX, mfLongX))
Y <- as.vector(model.response(mfLongX, "numeric"))
alpha.name <- rownames(attr(TermsLongX, "factors"))[attr(TermsLongX, "response")]
data.id <- mydata[uniqueID, ]
if (!is.null(timeVarY)) {
if (!all(timeVarY %in% names(mydata)))
stop("\n'timeVarY' does not correspond to columns in the fixed-effect design matrix of 'fitLME'.")
data.id[timeVarY] <- Time
}
mfLongX.id <- model.frame(TermsLongX, data = data.id)
Btime <- as.matrix(model.matrix(formLongX, mfLongX.id))
U <- sort(unique(Time[d == 1]))
tempU <- lapply(Time, function(t) U[t >= U])
times <- unlist(tempU)
nk <- sapply(tempU, length)
M <- sum(nk)
Index <- rep(1:nLong, nk)
Index0 <- match(Time, U)
Index1 <- unlist(lapply(nk[nk != 0], seq, from = 1))
Index2 <- colSums(d * outer(Time, U, "=="))
data.id2 <- data.id[Index, ]
if (!is.null(timeVarY)) {
data.id2[timeVarY] <- times
}
mfLongX2 <- model.frame(TermsLongX, data = data.id2)
Btime2 <- as.matrix(model.matrix(formLongX, mfLongX2))
mfSurv2 <- mfSurv[Index, ]
if (!is.null(timeVarT)) {
mfSurv2[timeVarT] <- times
}
if (ncz > 0) {
Ztime2 <- as.matrix(model.matrix(formSurv, mfSurv2))
if(attr(TermsSurv, 'intercept')) Ztime2 <- as.matrix(Ztime2[, - 1])
} else Ztime2 <- matrix(, ncol = 0, nrow = M)
n <- nLong
N <- length(Y)
nu <- length(U)
ncb <- ncol(B)
GHQ <- gauss.quad(cntrlLst$nknot, kind = "hermite")
b <- GHQ$nodes
wGQ <- GHQ$weights
Y.st <- split(Y, ID)
B.st <- lapply(split(B, ID), function(x) matrix(x, ncol = ncb))
Ztime22 <- if (ncz > 1) t(apply(Ztime2, 1, function(x) tcrossprod(x))) else Ztime2 ^ 2
Btime22 <- if (ncb > 1) t(apply(Btime2, 1, function(x) tcrossprod(x))) else Btime2 ^ 2
B2 <- if (ncb > 1) t(apply(B, 1, function(x) tcrossprod(x))) else B ^ 2
tempResp <- strsplit(toString(formLongX), ", ")[[1]][c(2, 1)]
tempResp <- paste(tempResp, collapse = "")
tempForm <- strsplit(toString(splitFormula(formLongX)[[1]]), ", ")[[1]]
tempForm <- tempForm[-1]
tempForm[length(tempForm) + 1] <- "data = fitLME$data"
tempForm <- paste(tempForm, collapse = ",")
tempForm <- paste("lm(", tempResp, tempForm, ")", sep = "")
fitLM <- eval(parse(text = tempForm))
gamma <- as.vector(fitLM$coefficients)
gamma.names <- names(fitLM$coefficients)
gamma.names <- gsub("bs\\(.*\\)", "bs", gamma.names)
surv.init <- InitValMultGeneric(gamma = gamma, B.st = B.st, n = n, Y.st = Y.st, ni = ni, model = model, ID = ID, Index = Index, start = start, stop = stop, B = B, Btime = Btime,
Btime2 = Btime2, event = event, Z = Z, ncz = ncz, Ztime2 = Ztime2, Index2 = Index2, Index1 = Index1, rho = rho, nk = nk, d = d, Ztime22 = Ztime22,
Ztime = Ztime, tol.P = cntrlLst$tol.P, iter = cntrlLst$max.iter)
phi <- surv.init$phi
alpha <- surv.init$alpha
lamb <- surv.init$lamb
Ysigma <- surv.init$Ysigma
Bsigma <- surv.init$Bsigma
theta.old <- list(gamma = gamma, phi = phi, alpha = alpha, Ysigma = Ysigma, Bsigma = Bsigma,
lamb = lamb, lgLik = 0)
err.P <- err.L <- step <- 1
while (step <= cntrlLst$max.iter) {
if (err.P < cntrlLst$tol.P | err.L < cntrlLst$tol.L) break
theta.new <- EMiterMultGeneric(theta.old, B.st, n, Y.st, b, model, Btime, Btime2, Index, Index0, Ztime, Ztime2, nknot = cntrlLst$nknot, nk, Index1, rho, d, wGQ, ID, ncb, B, Y, N,
ncz, Ztime22, Index2, B2, Btime22)
new.P <- c(theta.new$gamma, theta.new$phi, theta.new$alpha, theta.new$Ysigma, theta.new$Bsigma)
old.P <- c(theta.old$gamma, theta.old$phi, theta.old$alpha, theta.old$Ysigma, theta.old$Bsigma)
err.P <- max(abs(new.P - old.P) / (abs(old.P) + .Machine$double.eps * 2))
new.L <- theta.new$lgLik
old.L <- theta.old$lgLik
err.L <- abs(new.L - old.L) / (abs(old.L) + .Machine$double.eps * 2)
step <- step + 1
theta.old <- theta.new
}
converge <- as.numeric(err.P < cntrlLst$tol.P | err.L < cntrlLst$tol.L)
if (cntrlLst$SE.method == 'PFDS') {
time.SE <- system.time(Vcov <- PFDSMult(model, theta.new, delta = cntrlLst$delta, ncz = ncz, ncb = ncb, B.st = B.st, n =n, Y.st = Y.st, b = b, Btime = Btime, Btime2 = Btime2,
Index = Index, Ztime = Ztime, Ztime2 = Ztime2, Index0 = Index0, nk = nk, Index1 = Index1, rho = rho, d = d, wGQ = wGQ, Index2 = Index2,
alpha.name = alpha.name, phi.names = phi.names,N = N, Y = Y, B = B, ID = ID, nknot = cntrlLst$nknot, iter = cntrlLst$max.iter,
tol = min(cntrlLst$tol.P, cntrlLst$delta) / 100))[3]
if (any(is.na(suppressWarnings(sqrt(diag(Vcov))))))
warning("NA's present in StdErr estimation due to numerical error!\n")
} else if (cntrlLst$SE.method == 'PRES') {
if (CheckDeltaMult(theta.new, cntrlLst$delta)) {
time.SE <- system.time(Vcov <- PRESMult(model, theta.new, delta = cntrlLst$delta, ncz = ncz, ncb = ncb, B.st = B.st, n = n, Y.st = Y.st, b =b, Btime = Btime, Btime2 = Btime2,
Index = Index, Ztime = Ztime, Ztime2 = Ztime2, Index0 = Index0 , nk = nk, Index1 = Index1, rho = rho, d = d, wGQ = wGQ, Index2 =Index2,
alpha.name =alpha.name, phi.names = phi.names,N = N, Y = Y, B = B, ID = ID, nknot = cntrlLst$nknot, iter = cntrlLst$max.iter,
tol = min(cntrlLst$tol.P, cntrlLst$delta) / 100 ))[3]
if (any(is.na(suppressWarnings(sqrt(diag(Vcov))))))
warning("NA's present in StdErr estimation due to numerical error!\n")
} else {
Vcov <- time.SE <- NA
warning("\n 'delta' is too large, use smaller 'delta'!")
}
} else if (cntrlLst$SE.method == 'PLFD') {
time.SE <- system.time(Vcov <- PLFDMult( model = model, theta.new, delta = cntrlLst$delta, B.st = B.st, n = n, Y.st = Y.st, b = b, Btime = Btime, Btime2 = Btime2, Index = Index,
Index0 = Index0, Ztime = Ztime, Ztime2 = Ztime2, nk = nk, Index1 = Index1, rho = rho, d = d, wGQ = wGQ, ncz = ncz, ncb = ncb,
Index2 = Index2, alpha.name = alpha.name, phi.names = phi.names, nknot = cntrlLst$nknot, iter = cntrlLst$max.iter,
tol = min(cntrlLst$tol.P, cntrlLst$delta) / 100))[3]
if (any(is.na(suppressWarnings(sqrt(diag(Vcov))))))
warning("NA's present in StdErr estimation due to numerical error!\n")
} else {
Vcov <- time.SE <- NA
warning("\n Standard error estimation method should be either 'PFDS', 'PRES' or 'PLFD'.")
}
theta.new$lamb <- data.frame("time" = U, "bashaz" = theta.new$lamb)
names(theta.new$gamma) <- gamma.names
names(theta.new$phi) <- phi.names
names(theta.new$alpha) <- if (model == 1) alpha.name else "alpha"
names(theta.new$Ysigma) <- "sigma.e"
names(theta.new$Bsigma) <- "sigma.b"
theta.new$est.bi <- matrix(theta.new$est.bi, ncol = 1)
colnames(theta.new$est.bi) <- "bi"
rownames(theta.new$est.bi) <- (fitLME$groups[[1]])[uniqueID]
result <- list()
result$coefficients <- theta.new
result$logLik <- theta.new$lgLik
result$call <- call
result$numIter <- step
result$Vcov <- Vcov
result$est.bi <- theta.new$est.bi
result$coefficients$est.bi <- NULL
result$convergence <- if (converge == 1) "success" else "failure"
result$control <- cntrlLst
result$time.SE <- time.SE
result$N <- N
result$n <- n
result$d <- d
result$rho <- rho
result$dataMat <- list(Y = Y, B = B, ID = ID, IDName = fitLME$groups[[1]])
class(result) <- unlist(strsplit(deparse(sys.call()), split = '\\('))[1]
return(result)
} |
library(testthat)
library(plumber)
test_that("custom OpenAPI Specification update function works", {
pr <- Plumber$new()
pr$handle("GET", "/:path/here", function(){})
pr$handle("POST", "/:path/there", function(){})
pr$setApiSpec(function(spec) {
spec$info$title <- Sys.time()
spec
})
pr$setDocs(docs = "wribbit")
pr$run(port = 1234)
pr$setDocs(docs = TRUE)
pr$run(port = 1234)
pr$setDocs(docs = "swagger")
pr$run(port = 1234)
})
test_that("host doesn't change for messages, but does for RStudio IDE", {
pr <- plumb_api("plumber", "01-append")
pr$run(
"0.0.0.0", port = 1234
)
pr$run(
"::", port = 1234
)
}) |
summary.triSht<-function(object, ...)
{
if(!inherits(object,"triSht"))
stop("object must be of class \"triSht\"")
ans<-list(n=object$n,
na=object$narcs,
nb=object$nchull,
nt=object$nt,
call=object$call)
class(ans)<-"summary.triSht"
ans
} |
wcs_sql <- hBayesDM_model(
task_name = "wcs",
model_name = "sql",
model_type = "",
data_columns = c("subjID", "choice", "outcome"),
parameters = list(
"r" = c(0, 0.1, 1),
"p" = c(0, 0.1, 1),
"d" = c(0, 1, 5)
),
regressors = NULL,
postpreds = c("y_pred"),
preprocess_func = wcs_preprocess_func) |
context("CUSUMLM")
set.seed(410)
T <- 100
d <- 0
x <- fracdiff::fracdiff.sim(n=T, d=d)$series
expect_error(CUSUMLM(c(x,NA), d=d, delta=0.65))
expect_error(CUSUMLM(x, d=d, delta=0.65,tau=2))
expect_error(CUSUMLM(x, d=d, delta=1))
expect_warning(CUSUMLM(x, d=d, delta=0.65,tau=0.1))
x=stats::ts(x)
expect_error(CUSUMLM(x, d=d, delta=0.65))
T = 100
d_grid = c(0.1,0.2)
for(b in 1:length(d_grid))
{
d = d_grid[b]
q = 0
for(i in 1:15)
{
x = fracdiff::fracdiff.sim(n=T, d=d)$series
mod = CUSUMLM(x, d=d, delta=0.65)
q = q+sum(mod[4]>mod[3])
}
expect_lt(q,11)
}
T = 100
d_grid = c(0.1,0.2)
for(b in 1:length(d_grid))
{
d = d_grid[b]
q = 0
for(i in 1:15)
{
x = fracdiff::fracdiff.sim(n=T, d=d)$series
changep = c(rep(0,T/2), rep(1,T/2))
x = x+changep
mod = CUSUMLM(x, d=d, delta=0.65)
q = q+sum(mod[4]>mod[1])
}
expect_gt(q,2)
} |
data = SCPME::data_gen(p = 10, r = 5, n = 1000)
S = (nrow(data$X) - 1)/nrow(data$X)*cov(data$X)
expect_error(shrink(data$X, data$Y, trace = "none"), NA)
expect_error(shrink(data$X, crit.cv = "loglik", trace = "none"), NA)
expect_error(shrink(data$X, data$Y, lam = 0.1, trace = "none"), NA)
expect_warning(shrink(data$X, data$Y, lam = 0.1, trace = "none"), NA)
expect_error(shrink(data$X, crit.cv = "loglik", lam = 0.1, trace = "none"), NA)
expect_warning(shrink(data$X, crit.cv = "loglik", lam = 0.1, trace = "none"), NA)
expect_error(shrink(S = S, crit.cv = "loglik", lam = 0.1, trace = "none"), NA)
expect_warning(shrink(S = S, crit.cv = "loglik", lam = 0.1, trace = "none"), NA)
expect_error(shrink(data$X, data$Y, adjmaxit = 2, trace = "none"), NA)
expect_error(shrink(data$X, data$Y, path = TRUE, trace = "none"), NA)
expect_error(shrink(S = S, crit.cv = "loglik", path = TRUE, trace = "none"), NA)
expect_error(shrink(data$X, data$Y, B = cov(data$X, data$Y), lam.max = max(abs(crossprod(data$X, data$Y))), trace = "none"), NA) |
test_that("lt.sexrelation failes for wrong input", {
expect_snapshot_error(lt.sexrelation(1))
})
test_that("lt.sexrelation produces the right output", {
nitra_prep <- prep.life.table(nitra, group="sex", agebeg = "age_start", ageend = "age_end")
nt <- life.table(nitra_prep)
expect_snapshot_value(
lt.sexrelation(nt$f, nt$m),
style = c("json2")
)
})
test_that("lt.sexrelation failes for wrong second input element", {
nitra_prep <- prep.life.table(nitra, group="sex", agebeg = "age_start", ageend = "age_end")
nt <- life.table(nitra_prep)
er <- 2
expect_snapshot_error(
lt.sexrelation(nt$f, er)
)
}) |
barplot.rtconnect <-
function(height, type="daily", main=NULL, xlab=NULL, ylab=NULL,
plab=FALSE, ...) {
rtc <- height
names.arg <- NULL
legend.text <- NULL
col <- NULL
if (type == "daily") {
daily <- function(date) {
sum(subset(rtc, date=date)$Units)
}
start <- min(rtc$Begin.Date)
end <- max(rtc$End.Date)
days <- as.Date(start:end, origin="1970-01-01")
units <- sapply(days, daily)
names.arg <- days
t.main <- sprintf("
as.character(start), as.character(end))
t.xlab <- "Day"
t.ylab <- "
} else if (type == "daily.version") {
daily.version <- function(date) {
version <- function(version) {
sum(subset(subset(rtc, version=version), date=date)$Units)
}
sapply(versions, version)
}
start <- min(rtc$Begin.Date)
end <- max(rtc$End.Date)
dates <- as.Date(start:end, origin="1970-01-01")
versions <- sort(unique(rtc$Version))
units <- sapply(dates, daily.version)
names.arg=dates
t.main <- sprintf("
as.character(start), as.character(end))
t.xlab <- "Date"
t.ylab <-"
legend.text <- versions
col <- rainbow(nrow(units))
} else if (type == "weekly") {
weekly <- function(week) {
sum(subset(rtc, date.gte=week.to.date(week),
date.lte=week.to.date(week)+6)$Units)
}
start <- min(rtc$Begin.Date)
end <- max(rtc$End.Date)
days <- as.Date(start:end, origin="1970-01-01")
weeks <- unique(sapply(days, date.to.week))
units <- sapply(weeks, weekly)
names.arg <- weeks
t.main <- sprintf("
as.character(weeks[1]), as.character(weeks[length(weeks)]))
t.xlab <- "Week"
t.ylab <-"
} else if (type == "weekly.version") {
weekly.version <- function(week) {
version <- function(version) {
sum(subset(rtc, version=version,
date.gte=week.to.date(week),
date.lte=week.to.date(week)+6)$Units)
}
sapply(versions, version)
}
start <- min(rtc$Begin.Date)
end <- max(rtc$End.Date)
days <- as.Date(start:end, origin="1970-01-01")
weeks <- unique(sapply(days, date.to.week))
versions <- sort(unique(rtc$Version))
units <- sapply(weeks, weekly.version)
names.arg <- weeks
t.main <- sprintf("
as.character(start), as.character(end))
t.xlab <- "Week"
t.ylab <-"
legend.text <- versions
col <- rainbow(nrow(units))
} else if (type == "version") {
version <- function(version) {
sum(subset(rtc, version=version)$Units)
}
versions <- sort(unique(rtc$Version))
units <- sapply(versions, version)
names.arg <- versions
t.main <- sprintf("
t.xlab <- "Version"
t.ylab <-"
} else if (type == "country") {
country <- function(country) {
sum(subset(rtc, country.code=country)$Units)
}
countries <- sort(unique(rtc$Country.Code))
units <- sapply(countries, country)
names.arg <- countries
t.main <- sprintf("
t.xlab <- "Country"
t.ylab <-"
}
if (missing(main)) main <- t.main
if (missing(xlab)) xlab <- t.xlab
if (missing(ylab)) ylab <- t.ylab
bp <- barplot(
units,
names.arg=names.arg,
ylim=c(0, 1.1 * max(units)),
main=main,
xlab=xlab,
ylab=ylab,
legend.text=legend.text,
col=col
)
if (plab) {
text(bp, units, units, adj=c(.5,-.5))
}
} |
NULL
fhirBulkClient <- R6Class("fhirBulkClient",
public = list(
initialize = function(endpoint, tokenURL = NULL, token = NULL)
execInitializeBulk(self, endpoint, tokenURL, token),
patientExport = function(criteria = NULL)
execPatientExport(self, criteria),
groupExport = function(groupId, criteria = NULL)
execGroupExport(self, groupId, criteria),
wholeSystemExport = function(criteria = NULL)
execWholeSystemExport(self, criteria),
getBulkStatus = function()
execGetBulkStatus(self),
downloadBulk = function(requestNumber, returnType = "parsed" ,deleteFromQueue = TRUE)
execDownloadBulk(self, requestNumber, returnType, deleteFromQueue),
deleteBulkRequest = function(requestNumber)
execDeleteBulkRequest(self, requestNumber),
print = function()
execPrint(self),
retrieveToken = function(jwt, scopes, tokenURL = NULL)
execRetrieveToken(self, jwt, scopes, tokenURL),
setToken = function(token)
execSetToken(self, token),
endpoint = NULL,
token = NULL,
tokenURL = NULL,
queue = data.frame(requestUrl = character(),
statusUrl = character(),
progress=character(),
stringsAsFactors=FALSE)
)
)
execInitializeBulk <- function(self, endpoint, tokenURL, token){
if(substr(endpoint, nchar(endpoint), nchar(endpoint)) != "/"){
endpoint <- paste(endpoint, "/", sep="")
}
self$endpoint <- endpoint
if(!is.null(tokenURL)){
self$tokenURL <- tokenURL
}
if(!is.null(token)){
execSetToken(self, token)
}
}
execPatientExport <- function(self, criteria){
url <- toExportUrl(self, "Patient", NULL, criteria)
addToQueue(self, url)
}
execGroupExport <- function(self, groupId, criteria){
url <- toExportUrl(self, NULL, groupId, criteria)
addToQueue(self, url)
}
execWholeSystemExport <- function(self, criteria){
url <- toExportUrl(self, NULL, NULL, criteria)
addToQueue(self, url)
}
execGetBulkStatus <- function(self){
if(nrow(self$queue) == 0){
stop("There are no downloads in the queue", call. = FALSE)
}
headers <- requestHeaders(self, "ndjson")
self$queue["progress"] <- lapply(self$queue["statusUrl"], function(x){
response <- getRequest(self, x, headers)
if(status_code(response) == 200){
"100%"
}
else{
headers(response)$`x-progress`
}
})
self$queue
}
execDownloadBulk <- function(self, requestNumber, returnType, deleteFromQueue){
execGetBulkStatus(self)
if(is.na(self$queue[requestNumber,]$progress)){
stop(paste("There are is no download in the queue at place", requestNumber), call. = FALSE)
}
else if(self$queue[requestNumber,]$progress != "100%"){
stop(paste("Progress is not yet 100%, it is at", self$queue[requestNumber,]$progress), call. = FALSE)
}
else{
bulk <- getBulk(self, self$queue[requestNumber,]$statusUrl, returnType)
if(deleteFromQueue){
self$queue <- self$queue[-requestNumber,]
}
bulk
}
}
execRetrieveToken <- function(self, jwt, scopes, tokenURL){
token <- postJWT(self, jwt, scopes, tokenURL)
self$setToken(token$access_token)
token
}
execDeleteBulkRequest <- function(self, requestNumber){
response <- deleteRequest(self, self$queue[requestNumber,]$statusUrl)
self$queue <- self$queue[-requestNumber,]
}
addToQueue <- function(self, url){
headers <- requestHeaders(self, "ndjson")
response <- getRequest(self, url, headers)
response_headers <- headers(response)
progress_response <- getRequest(self, response_headers$`content-location`, headers)
progress_headers <- headers(progress_response)
self$queue[nrow(self$queue) + 1,] = list(url, response_headers$`content-location`, progress_headers$`x-progress`)
} |
.loocvPhylo <- function(par, cvmethod, targM, corrStr, penalty, error, nobs){
if(corrStr$REML) n <- nobs-ncol(corrStr$X) else n <- nobs
p = corrStr$p
alpha = corrStr$bounds$trTun(par)
if(!is.null(error)) corrStr$mserr = corrStr$bounds$trSE(par)
mod_par = .corrStr(corrStr$bounds$trPar(par), corrStr);
XtX <- pseudoinverse(mod_par$X)
B <- XtX%*%mod_par$Y
residuals <- mod_par$Y - mod_par$X%*%B
Ccov <- mod_par$det
Sk <- crossprod(residuals)/n
switch(cvmethod,
"H&L"={
target <- .targetM(Sk, targM, penalty="RidgeArch")
beta <- (1 - alpha)/(n - 1)
G <- n*beta*Sk + alpha * target
Gi <- try(chol(G), silent=TRUE)
if(inherits(Gi, 'try-error')) return(1e6)
llik <- sapply(1:(nobs-1), function(x){
rk <- sum(backsolve(Gi, residuals[x,], transpose = TRUE)^2)
(n/(nobs-1))*log(1 - beta*rk) + (rk/(1 - beta*rk))
})
ll <- 0.5 * (n*p*log(2*pi) + p*Ccov +
n*sum(2*log(diag(Gi))) + sum(llik))
},
"Mahalanobis"={
target <- .targetM(Sk, targM, penalty="RidgeArch")
beta <- (1 - alpha)/(n - 1)
G <- n*beta*Sk + alpha * target
Gi <- try(chol(G), silent=TRUE)
if(inherits(Gi, 'try-error')) return(1e6)
r0 <- sum(backsolve(Gi, t(residuals), transpose = TRUE)^2)/n
ll <- 0.5 * (n*p*log(2*pi) + p*Ccov + n*sum(2*log(diag(Gi))) +
n*log(1 - beta*r0) + n*(r0/(1 - beta*r0)))
},
"LOOCV"={
target <- .targetM(Sk, targM, penalty)
h <- diag(mod_par$X%*%XtX)
nloo <- corrStr$nloo[!h+1e-8>=1]
const <- n/length(nloo)
llik <- sapply(nloo, function(x){
Bx <- B - tcrossprod(XtX[,x], residuals[x,])/(1-h[x])
residuals2 <- mod_par$Y - mod_par$X%*%Bx
Skpartial <- crossprod(residuals2[-x,])/(n-1)
.regularizedLik(Skpartial, residuals[x,], alpha, targM, target, penalty, const)
})
ll <- 0.5 * (n*p*log(2*pi) + p*Ccov + sum(llik))
},
"LL"={
if(corrStr$model=="BMM"){
scaling <- mean(diag(Sk))
Sk <- Sk*(1/scaling)
}
Gi <- try(chol(Sk), silent=TRUE)
if(inherits(Gi, 'try-error')) return(1e6)
detValue <- sum(2*log(diag(Gi)))
quadprod <- sum(backsolve(Gi, t(residuals), transpose = TRUE)^2)
ll <- 0.5 * (n*p*log(2*pi) + p*Ccov + n*detValue + quadprod)
},
stop("You must specify \"LOOCV\", \"H&L\" or \"Mahalanobis\" method for computing the LOOCV score and \"LL\" for the log-likelihood")
)
if (!is.finite(ll)) return(1e6)
return(ll)
}
.mvGLS <- function(corrstruct){
B <- pseudoinverse(corrstruct$X)%*%corrstruct$Y
residuals <- corrstruct$Y - corrstruct$X%*%B
return(list(residuals=residuals, B=B))
}
.scaleStruct <- function(structure){
if(inherits(structure, "phylo")){
structure$edge.length <- structure$edge.length/max(node.depth.edgelength(structure))
}
return(structure)
}
.targetM <- function(S, targM, penalty="RidgeArch", I = NULL, ...){
p <- dim(S)[1]
args <- list(...)
if(is.null(args[["userMatrix"]])) userMatrix <- NULL else userMatrix <- args$userMatrix
if(is.null(I)) I = diag(p)
target = NULL
if(penalty=="RidgeArch"){
switch(targM,
"Variance" = {target <- diag(diag(S))},
"unitVariance" = {target <- I*mean(diag(S))},
"null" = {
warning("The \"null\" target cannot be used with the \"RidgeArch\" method. The \"unitVariance\" target is used instead.")
target <- I*mean(diag(S))
},
"user" = { target <- userMatrix}
)
}else if(penalty=="RidgeAlt"){
switch(targM,
"Variance" = {target <- diag(1/diag(S))},
"unitVariance" = {target <- I*(1/mean(diag(S)))},
"null" = {target <- matrix(0,p,p)},
"user" = { target <- solve(userMatrix)}
)
}
return(target)
}
.corrStr <- function(par, timeObject){
if(timeObject$model%in%c("EB", "BM", "lambda", "OU", "OUvcv", "BMM", "OUM", "OU1")){
struct = .transformTree(timeObject$structure, par, model=timeObject$model, mserr=timeObject$mserr, Y=timeObject$Y, X=timeObject$X, REML=timeObject$REML, precalc=timeObject$precalc)
}else{
stop("Currently works for phylogenetic models \"BM\", \"EB\", \"OU\", \"BMM\", \"OUM\" and \"lambda\" only...")
}
return(struct)
}
.regularizedLik <- function(S, residuals, lambda, targM, target, penalty, const=1){
switch(penalty,
"RidgeArch"={
G <- (1-lambda)*S + lambda*target
Gi <- try(chol(G), silent=TRUE)
if(inherits(Gi, 'try-error')) return(1e6)
rk <- sum(backsolve(Gi, residuals, transpose = TRUE)^2)
llik <- const*sum(2*log(diag(Gi))) + rk
},
"RidgeAlt"={
quad <- .makePenaltyQuad(S, lambda, target, targM)
Gi <- quad$P
detG <- sum(log(quad$ev))
Swk <- tcrossprod(residuals)
rk <- sum(Swk*Gi)
llik <- const*detG + rk
},
"LASSO"={
LASSO <- glassoFast(S, lambda, maxIt=500)
G <- LASSO$w;
Gi <- LASSO$wi;
Swk <- tcrossprod(residuals);
rk <- sum(Swk*Gi);
llik <- const*as.numeric(determinant(G)$modulus) + rk
})
return(llik)
}
.makePenaltyQuad <- function(S,lambda,target,targM){
switch(targM,
"Variance"={
D <- (S - lambda * target)
D2 <- D %*% D
sqrtM <- .sqM(D2/4 + lambda * diag(nrow(S)))
Alt <- D/2 + sqrtM
AltInv <- (1/lambda)*(Alt - D)
evalues <- eigen(Alt, symmetric=TRUE, only.values = TRUE)$values
},
"unitVariance"={
eig <- eigen(S, symmetric = TRUE)
Q <- eig$vectors
d <- eig$values - lambda*target[1]
evalues <- sqrt(lambda + d^2/4) + d/2
D1 <- evalues
D2 <- 1/evalues
Alt <- Q %*% (D1 * t(Q))
AltInv <- Q %*% (D2 * t(Q))
},
"null"={
eig <- eigen(S, symmetric = TRUE)
Q <- eig$vectors
d <- eig$values
evalues <- sqrt(lambda + d^2/4) + d/2
D1 <- evalues
D2 <- 1/evalues
Alt <- Q %*% (D1 * t(Q))
AltInv <- Q %*% (D2 * t(Q))
}
)
pen <- list(S=Alt, P=AltInv, ev=evalues)
return(pen)
}
.sqM <- function(x){
if(!all(is.finite(x))) return(Inf)
eig <- eigen(x, symmetric = TRUE)
sqrtM <- eig$vectors %*% (sqrt(eig$values) * t(eig$vectors))
return(sqrtM)
}
.sqM1 <- function(x){
if(inherits(x, "phylo")) x <- vcv.phylo(x)
if(!all(is.finite(x))) return(Inf)
eig <- eigen(x, symmetric = TRUE)
tol = max(dim(x))*max(eig$values)*.Machine$double.eps
Positive = eig$values > tol
if(sum(Positive)<length(eig$values)) warning("The phylogenetic covariance matrix was singular. Check the results carefully and consider using 'eigSqm=FALSE' option and 'error=TRUE'")
sqrtM <- eig$vectors[,Positive,drop=FALSE] %*% ((1/sqrt(eig$values[Positive])) * t(eig$vectors[,Positive,drop=FALSE]))
return(sqrtM)
}
.penalizedCov <- function(S, penalty, targM="null", tuning=0){
p = ncol(S)
Target <- .targetM(S, targM, penalty)
switch(penalty,
"RidgeAlt"={
pen <- .makePenaltyQuad(S,tuning,Target,targM)
Pi <- pen$S
P <- pen$P
},
"RidgeArch"={
Pi <- (1-tuning)*S + tuning*Target
eig <- eigen(Pi)
V <- eig$vectors
d <- eig$values
P <- V%*%((1/d) * t(V))
},
"LASSO"={
LASSO <- glassoFast(S,tuning)
Pi <- LASSO$w
P <- LASSO$wi
},
"LL"={
Pi <- S
eig <- eigen(Pi)
V <- eig$vectors
d <- eig$values
P <- V%*%((1/d) * t(V))
})
estimate <- list(Pinv=Pi, P=P, S=S)
return(estimate)
}
.transformTree <- function(phy, param, model=c("EB", "BM", "lambda", "OU", "BMM", "OUM"), mserr=NULL, Y=NULL, X=NULL, REML=TRUE, precalc=NULL){
n <- Ntip(phy)
parent <- phy$edge[,1]
descendent <- phy$edge[,2]
extern <- (descendent <= n)
N <- 2*n-2
diagWeight <- NULL
const <- 0
flag <- FALSE
switch(model,
"OU"={
D = numeric(n)
if(!is.ultrametric(phy)){
dis = node.depth.edgelength(phy)
D = max(dis[1:n]) - dis[1:n]
D = D - mean(D)
phy$edge.length[extern] <- phy$edge.length[extern] + D[descendent[extern]]
flag <- TRUE
}
times <- branching.times(phy)
Tmax <- max(times)
distRoot <- exp(-2*param*times)*(1 - exp(-2*param*(Tmax-times)))
d1 = distRoot[parent-n]
d2 = numeric(N)
d2[extern] = exp(-2*param*D[descendent[extern]]) * (1-exp(-2*param*(Tmax-D[descendent[extern]])))
d2[!extern] = distRoot[descendent[!extern]-n]
diagWeight = exp(param*D)
phy$edge.length = (d2 - d1)/(2*param)
names(diagWeight) = phy$tip.label
w <- 1/diagWeight
Y <- matrix(w*Y, nrow=n)
X <- matrix(w*X, nrow=n)
if(!is.null(mserr)) mserr = mserr*exp(-2*param*D[descendent[extern]])
},
"OUM"={
W <- .Call(mvmorph_weights, nterm=as.integer(n), epochs=precalc$epochs, lambda=param, S=1, S1=1, beta=precalc$listReg, root=as.integer(precalc$root_std))
D = numeric(n)
if(!is.ultrametric(phy)){
dis = node.depth.edgelength(phy)
D = max(dis[1:n]) - dis[1:n]
D = D - mean(D)
phy$edge.length[extern] <- phy$edge.length[extern] + D[descendent[extern]]
flag <- TRUE
}
times <- branching.times(phy)
Tmax <- max(times)
if(precalc$randomRoot){
distRoot <- exp(-2*param*times)
d1 = distRoot[parent-n]
d2 = numeric(N)
d2[extern] = exp(-2*param*D[descendent[extern]])
d2[!extern] = distRoot[descendent[!extern]-n]
}else{
distRoot <- exp(-2*param*times)*(1 - exp(-2*param*(Tmax-times)))
d1 = distRoot[parent-n]
d2 = numeric(N)
d2[extern] = exp(-2*param*D[descendent[extern]]) * (1-exp(-2*param*(Tmax-D[descendent[extern]])))
d2[!extern] = distRoot[descendent[!extern]-n]
}
diagWeight = exp(param*D)
phy$edge.length = (d2 - d1)/(2*param)
names(diagWeight) = phy$tip.label
w <- 1/diagWeight
Y <- matrix(w*Y, nrow=n)
X <- matrix(w*W, nrow=n)
if(REML) const <- determinant(crossprod(W))$modulus
if(!is.null(mserr)) mserr = mserr*exp(-2*param*D[descendent[extern]])
},
"OU1"={
W <- .Call(mvmorph_weights, nterm=as.integer(n), epochs=precalc$epochs, lambda=param, S=1, S1=1, beta=precalc$listReg, root=as.integer(precalc$root_std))
D = numeric(n)
if(!is.ultrametric(phy)){
dis = node.depth.edgelength(phy)
D = max(dis[1:n]) - dis[1:n]
D = D - mean(D)
phy$edge.length[extern] <- phy$edge.length[extern] + D[descendent[extern]]
flag <- TRUE
}
times <- branching.times(phy)
Tmax <- max(times)
if(precalc$randomRoot){
distRoot <- exp(-2*param*times)
d1 = distRoot[parent-n]
d2 = numeric(N)
d2[extern] = exp(-2*param*D[descendent[extern]])
d2[!extern] = distRoot[descendent[!extern]-n]
}else{
distRoot <- exp(-2*param*times)*(1 - exp(-2*param*(Tmax-times)))
d1 = distRoot[parent-n]
d2 = numeric(N)
d2[extern] = exp(-2*param*D[descendent[extern]]) * (1-exp(-2*param*(Tmax-D[descendent[extern]])))
d2[!extern] = distRoot[descendent[!extern]-n]
}
diagWeight = exp(param*D)
phy$edge.length = (d2 - d1)/(2*param)
names(diagWeight) = phy$tip.label
w <- 1/diagWeight
Y <- matrix(w*Y, nrow=n)
X <- matrix(w*W, nrow=n)
if(REML) const <- determinant(crossprod(W))$modulus
if(!is.null(mserr)) mserr = mserr*exp(-2*param*D[descendent[extern]])
},
"EB"={
if (param!=0){
distFromRoot <- node.depth.edgelength(phy)
phy$edge.length = (exp(param*distFromRoot[descendent])-exp(param*distFromRoot[parent]))/param
}
},
"lambda"={
if(param!=1) {
root2tipDist <- node.depth.edgelength(phy)[1:n]
phy$edge.length <- phy$edge.length * param
phy$edge.length[extern] <- phy$edge.length[extern] + (root2tipDist * (1-param))
}
},
"OUvcv"={
V<-.Call("mvmorph_covar_ou_fixed", A=vcv.phylo(phy), alpha=param, sigma=1, PACKAGE="mvMORPH")
C<-list(sqrtM=t(chol(solve(V))), det=determinant(V)$modulus)
},
"OUTS"={
stop("Not yet implemented. The time-series models are coming soon, please be patient")
},
"RWTS"={
stop("Not yet implemented. The time-series models are coming soon, please be patient")
},
"BMM"={
phy$edge.length <- phy$mapped.edge %*% param
})
if(is.numeric(mserr)) phy$edge.length[extern] = phy$edge.length[extern] + mserr
if(model!="OUvcv") C <- pruning(phy, trans=FALSE)
X <- crossprod(C$sqrtM, X)
Y <- crossprod(C$sqrtM, Y)
deterM <- C$det
if(flag) deterM <- deterM + 2*sum(log(diagWeight))
if(REML) deterM <- deterM + determinant(crossprod(X))$modulus - const
return(list(phy=phy, diagWeight=diagWeight, X=X, Y=Y, det=deterM, const=const))
}
.vec <- function(x) as.numeric(x)
.setBounds <- function(penalty, model, lower, upper, tol=1e-10, mserr=NULL, penalized=TRUE, corrModel=NULL, k=NULL){
if(is.null(upper)){
switch(model,
"EB"={up <- 0},
"OU"={up <- 30/max(node.depth.edgelength(corrModel$structure))},
"OU1"={up <- 30/max(node.depth.edgelength(corrModel$structure))},
"OUM"={up <- 30/max(node.depth.edgelength(corrModel$structure))},
"lambda"={up <- 1},
"BM"={up <- Inf},
"BMM"={up <- rep(Inf,k)},
up <- Inf)
}else{
up <- upper
}
if(is.null(lower)){
switch(model,
"EB"={low <- -10},
"OU"={low <- 1e-10},
"OU1"={low <- 1e-10},
"OUM"={low <- 1e-10},
"lambda"={low <- 1e-8},
"BM"={low <- -Inf},
"BMM"={low <- rep(-Inf,k)},
low <- -Inf)
}else{
low <- lower
}
if(is.null(tol)){
if(penalty=="RidgeArch"){
tol = 1e-8
}else{
tol = 0
}
}
if(penalized){
if(penalty%in%c("RidgeAlt","LASSO")){
upperBound <- c(log(10e6),up)
lowerBound <- c(log(tol),low)
}else if(penalty=="RidgeArch"){
upperBound <- c(1,up)
lowerBound <- c(tol,low)
}
if(model=="BMM"){
id1 <- 1; id2 <- 2:(k+1); id3 <- k+2
}else if(model=="BM"){
id1 <- id2 <- 1; id3 <- 2
}else{
id1 <- 1; id2 <- 2; id3 <- 3
}
}else{
upperBound <- up
lowerBound <- low
if(model=="BMM"){
id1 <- 1; id2 <- 1:k; id3 <- k+1
}else if(model=="BM"){
id1 <- id2 <- id3 <- 1
}else{
id1 <- 1; id2 <- 1; id3 <- 2
}
}
if(!is.null(mserr)){
lowerBound = c(lowerBound,0)
upperBound = c(upperBound,Inf)
}
switch(penalty,
"RidgeArch"={ transformTun <- function(x) (x[id1])},
"RidgeAlt" ={ transformTun <- function(x) exp(x[id1])},
"LASSO" ={ transformTun <- function(x) exp(x[id1])},
transformTun <- function(x) (x[id1])
)
switch(model,
"OU"={ transformPar <- function(x) (x[id2])},
"BM" ={ transformPar <- function(x) (x[id2])},
"EB" ={ transformPar <- function(x) (x[id2])},
"lambda" ={ transformPar <- function(x) (x[id2])},
"BMM"={transformPar <- function(x) (x[id2]*x[id2])},
transformPar <- function(x) (x[id2])
)
transformSE <- function(x) x[id3]*x[id3]
bounds <- list(upper=upperBound, lower=lowerBound, trTun=transformTun, trPar= transformPar, trSE=transformSE)
return(bounds)
}
.startGuess <- function(corrModel, cvmethod, mserr=NULL, target, penalty, echo=TRUE, penalized=TRUE,...){
if(echo==TRUE) message("Initialization via grid search. Please wait...")
if(penalized){
if(penalty=="RidgeArch"){
range_val <- c(1e-6, 0.01, 0.05, 0.1, 0.15, 0.2, 0.3, 0.5, 0.7, 0.9)
}else if(penalty=="RidgeAlt"){
range_val <- log(c(1e-12, 1e-9, 1e-6, 0.01, 0.1, 1, 10, 100, 1000, 10000))
}else{
range_val <- log(c(1e-6, 0.01, 0.1, 1, 10, 100, 1000))
}
}else{
range_val <- NULL
}
list_param <- list()
list_param[[1]] <- range_val
list_param[[2]] <- 1
switch(corrModel$model,
"OU"={
mod_val <- log(2)/(max(node.depth.edgelength(corrModel$structure))/c(0.1,0.5,1.5,3,8))
list_param[[2]] <- mod_val
index_err <- 3
},
"OU1"={
mod_val <- log(2)/(max(node.depth.edgelength(corrModel$structure))/c(0.1,0.5,1.5,3,8))
list_param[[2]] <- mod_val
index_err <- 3
},
"OUM"={
mod_val <- log(2)/(max(node.depth.edgelength(corrModel$structure))/c(0.1,0.5,1.5,3,8))
list_param[[2]] <- mod_val
index_err <- 3
},
"OUvcv"={
mod_val <- log(2)/(max(node.depth.edgelength(corrModel$structure))/c(0.1,0.5,1.5,3,8))
list_param[[2]] <- mod_val
index_err <- 3
},
"lambda"={
mod_val <- c(0.2,0.5,0.8)
list_param[[2]] <- mod_val
index_err <- 3
},
"EB"={
mod_val <- -log(2)/(max(node.depth.edgelength(corrModel$structure))/c(0.1,0.5,1.5,3,8))
list_param[[2]] <- mod_val
index_err <- 3
},
"BMM"={
start_values <- function(tree, data){
tip_values <- 1:Ntip(tree)
index_tips <- tree$edge[,2]%in%tip_values
maps <- sapply(tree$maps[index_tips], function(x) names(x[length(x)]))
k = ncol(tree$mapped.edge)
if(length(unique(maps))<k) {
mod_val <- mean(diag(mvLL(tree, data, method="pic")$sigma))
guesses <- as.list(rep(sqrt(mod_val), k))
}else{
guesses <- lapply(colnames(tree$mapped.edge), function(map_names) {
dat_red <- which(maps==map_names)
tree_red=drop.tip(tree, tree$tip.label[!tree$tip.label%in%tree$tip.label[dat_red]] )
sqrt(mean(diag(mvLL(tree_red, data[tree_red$tip.label,], method="pic")$sigma)))
})
}
return(guesses)
}
mod_val <- start_values(corrModel$structure, corrModel$Y)
list_param <- c(list(range_val), mod_val)
index_err <- length(list_param) + 1
},
index_err <- 3
)
if(!is.null(corrModel$mserr)){
if(corrModel$model=="BMM") list_param[[index_err]] <- sqrt(c(0.001,0.01,0.1,1,10)*mean(unlist(mod_val)^2)) else list_param[[index_err]] <- c(0.001,0.01,0.1,1,10)
list_param[sapply(list_param, is.null)] <- NULL
brute_force <- expand.grid(list_param)
}else{
list_param[sapply(list_param, is.null)] <- NULL
brute_force <- expand.grid(list_param)
}
start <- brute_force[which.min(apply(brute_force, 1, .loocvPhylo,
cvmethod=cvmethod,
targM=target,
corrStr=corrModel,
penalty=penalty,
error=mserr,
nobs=corrModel$nobs )),]
if(echo==TRUE & penalized==TRUE) cat("Best starting for the tuning: ",as.numeric(corrModel$bounds$trTun(start[1])))
return(start)
}
.check_par_results <- function(corrModel, par, penalized=TRUE){
if(penalized) indice = 2 else indice = 1
switch(corrModel$model,
"OU"={
if(par==corrModel$bounds$upper[indice]) warning("Parameter search reached the upper bound. You should consider increasing the \"upper\" argument value ")
},
"OU1"={
if(par==corrModel$bounds$upper[indice]) warning("Parameter search reached the upper bound. You should consider increasing the \"upper\" argument value ")
},
"OUM"={
if(par==corrModel$bounds$upper[indice]) warning("Parameter search reached the upper bound. You should consider increasing the \"upper\" argument value ")
},
)
} |
decode_jwt <- function(token, ...)
{
UseMethod("decode_jwt")
}
decode_jwt.AzureToken <- function(token, type=c("access", "id"), ...)
{
type <- paste0(match.arg(type), "_token")
if(is.null(token$credentials[[type]]))
stop(type, " not found", call.=FALSE)
decode_jwt(token$credentials[[type]])
}
decode_jwt.Token <- function(token, type=c("access", "id"), ...)
{
type <- paste0(match.arg(type), "_token")
if(is.null(token$credentials[[type]]))
stop(type, " not found", call.=FALSE)
decode_jwt(token$credentials[[type]])
}
decode_jwt.character <- function(token, ...)
{
token <- as.list(strsplit(token, "\\.")[[1]])
token[1:2] <- lapply(token[1:2], function(x)
jsonlite::fromJSON(rawToChar(jose::base64url_decode(x))))
names(token)[1:2] <- c("header", "payload")
if(length(token) > 2)
names(token)[3] <- "signature"
token
}
extract_jwt <- function(token, ...)
{
UseMethod("extract_jwt")
}
extract_jwt.AzureToken <- function(token, type=c("access", "id"), ...)
{
type <- match.arg(type)
token$credentials[[paste0(type, "_token")]]
}
extract_jwt.Token <- function(token, type=c("access", "id"), ...)
{
type <- match.arg(type)
token$credentials[[paste0(type, "_token")]]
}
extract_jwt.character <- function(token, ...)
{
token
} |
bootstrap.contingency.test <- function(rds.data,
row.var,
col.var,
number.of.bootstrap.samples = 1000,
weight.type=c("HCG", "RDS-II","Arithmetic Mean"),
table.only=FALSE,
verbose = TRUE, ...) {
if(missing(weight.type)){
weight.type <- "HCG"
}
weight.type <- match.arg(weight.type, c("RDS-II","Arithmetic Mean","HCG"))
network.size <- attr(rds.data, "network.size.variable")
remvalues <- rds.data[[network.size]] == 0 | is.na(rds.data[[network.size]])
if (any(remvalues)) {
warning(
paste(
sum(remvalues),
"of",
nrow(rds.data),
"network sizes were missing or zero. The estimator will presume these are",
max(rds.data[[network.size]], na.rm = TRUE)
),
call. = FALSE
)
rds.data[[network.size]][remvalues] <- max(rds.data[[network.size]], na.rm = TRUE)
}
N <- nrow(rds.data) * 100000
wave <- get.wave(rds.data)
if (!has.recruitment.time(rds.data)) {
time <- rep(1, nrow(rds.data))
} else{
time <- get.recruitment.time(rds.data)
}
rds <- rds.data[order(time, wave),]
if (!has.recruitment.time(rds)) {
time <- rep(1, nrow(rds.data))
} else{
time <- get.recruitment.time(rds)
}
row.fact <- as.factor(rds[[row.var]])
row.nms <- levels(row.fact)
row <- as.numeric(row.fact)
nr <- max(row, na.rm=TRUE)
col.fact <- as.factor(rds[[col.var]])
col.nms <- levels(col.fact)
col <- as.numeric(col.fact)
nc <- max(col, na.rm=TRUE)
eg <- expand.grid(1:nc, 1:nr)
v <-
factor(paste(row, col, sep = "_"), levels = paste(eg$Var2, eg$Var1, sep =
"_"))
out.miss <- is.na(row) | is.na(col)
varname <- .make.unique(names(rds), "variable")
rds[[varname]] <- v
if(length(row.nms)<2){
stop(paste(row.var,"contains less than 2 unique values"))
}
if(length(col.nms)<2){
stop(paste(col.var,"contains less than 2 unique values"))
}
rid <- get.rid(rds)
id <- get.id(rds)
degree <- get.net.size(rds)
hcg.row <-
hcg.estimate(id, rid, time, degree, row, N, small.fraction = TRUE)
hcg.col <-
hcg.estimate(id, rid, time, degree, col, N, small.fraction = TRUE)
theta <- matrix(0, nrow = nr * nc, ncol = nr * nc)
yhat <- rep(0, nr * nc)
for (i in 1:nr) {
for (j in 1:nr) {
for (k in 1:nc) {
for (l in 1:nc) {
theta[(i - 1) * nc + k, (j - 1) * nc + l] <-
hcg.row$theta[i, j] * hcg.col$theta[k, l]
yhat[(i - 1) * nc + k] <-
hcg.row$yhat[i] * hcg.col$yhat[k]
}
}
}
}
names(yhat) <-
colnames(theta) <-
row.names(theta) <- paste(eg$Var2, eg$Var1, sep = "_")
weights <- rep(0, nrow(rds))
for (i in 1:nr) {
for (k in 1:nc) {
d <- degree[row == i & col == k & !out.miss]
weights[row == i & col == k & !out.miss] <- yhat[(i - 1) * nc + k] * (1 / d) / sum(1 / d)
}
}
hcg.est <- list(theta = theta,
yhat = yhat,
weights = weights)
chi.squared.func <- function(x, with.names=FALSE) {
vspl <- strsplit(as.character(x[[varname]]), "_", fixed = TRUE)
v1 <- as.numeric(sapply(vspl, function(y)
y[1]))
v2 <- as.numeric(sapply(vspl, function(y)
y[2]))
x[[varname]] <- factor(x[[varname]])
wts <- compute.weights(
x,
outcome.variable = varname,
weight.type = weight.type,
N = N,
small.fraction = TRUE
)
wts <- wts * nrow(x) / sum(wts)
tab <- tapply(wts, list(v1, v2), sum, simplify = TRUE)
if(with.names){
rownames(tab) <- row.nms[as.numeric(rownames(tab))]
colnames(tab) <- col.nms[as.numeric(colnames(tab))]
}
tab[is.na(tab)] <- 0
tab <- tab[rowSums(tab) > 0, colSums(tab) > 0, drop=FALSE]
if(is.null(dim(tab)) || ncol(tab) < 2 || nrow(tab) < 2)
result <- structure(NA, .Names = "X-squared")
else
result <- suppressWarnings(chisq.test(tab, correct = FALSE)$statistic)
attr(result,"table") <- tab
result
}
stat <- chi.squared.func(rds, with.names=TRUE)
tbl <- attr(stat,"table")
attr(stat,"table") <- NULL
if(table.only){
return(tbl)
}
boot.stats <-
unlist(
HCG.boostrap(
rds,
varname,
number.of.bootstrap.samples,
N = N,
fun = chi.squared.func,
hcg.est = hcg.est,
small.fraction = TRUE,
verbose = verbose
)
)
pvalue <- mean(boot.stats > stat, na.rm=TRUE)
result <- list(
table = tbl,
p.value = pvalue,
statistic = stat,
method = paste("RDS Bootstrap Test of", row.var, "versus", col.var),
row.var = row.var,
col.var = col.var,
boot.stats = boot.stats
)
class(result) <- c("rds.contin.bootstrap", "htest")
result
}
print.rds.contin.bootstrap <- function(x, show.table=FALSE, ...){
if(show.table){
cat("Weighted table:", x$row.var, " by ", x$col.var,"\n")
print(x$table, ...)
}
class(x) <- "htest"
print(x)
}
.make.unique <- function(names, name) {
i <- 0
if (!(name %in% names))
return(name)
while (TRUE) {
i <- i + 1
newname <- paste0(name, i)
if (!(newname %in% names))
return(newname)
}
} |
SEA<-function(){
shinyApp(ui<-ui,server<-server)
} |
getSamPerTable <-
function(timSer, sampPer) {
timSerNoNas <- timSer[which(is.na(timSer$value) == FALSE), ]
numNoNaTime <- as.numeric(timSerNoNas$time)
numAllTime0 <- as.numeric(timSer$time) - numNoNaTime[1]
numNoNaTime0 <- numNoNaTime - numNoNaTime[1]
difnumNoNaTime0 <- diff(numNoNaTime0)
getGroupsInNoNas <- function(difnumNoNaTime0) {
result <- NULL
for (i in sampPer) {
rawIndices <- which(difnumNoNaTime0 == i)
grouped <- groupIndices(rawIndices)
result <- rbind(result, cbind(rep(i, nrow(grouped)), grouped))
rm(rawIndices, grouped)
}
result[, 3] <- result[, 3] + 1
result[, 4] <- result[, 4] + 1
result <- result[order(result[, 2]), ]
result
}
groupNoNas <- getGroupsInNoNas(difnumNoNaTime0)
getGroupsInAll <- function(groupNoNas, numNoNaTime0, numAllTime0) {
pcol <- which(compareVecVec(numNoNaTime0[groupNoNas[, 2]], numAllTime0),
arr.ind = TRUE)[, 1]
scol <- which(compareVecVec(numNoNaTime0[groupNoNas[, 3]], numAllTime0),
arr.ind = TRUE)[, 1]
groupInAll <- cbind(groupNoNas[, 1], pcol, scol, groupNoNas[, 4])
rownames(groupInAll) <- NULL
colnames(groupInAll) <- c("samp.per", "ini", "fin", "N")
groupInAll
}
groupInAll <- getGroupsInAll(groupNoNas, numNoNaTime0, numAllTime0)
addTrueGaps <- function(groupInAll, numAllTime0, timSer) {
fmf <- diff(numAllTime0[1:2])
naa <- nrow(groupInAll)
getIniGap <- function(groupInAll, fmf) {
firstIni <- groupInAll[1, 2]
if (firstIni != 1) {
add0 <- data.frame(samp.per = fmf, ini = 1,
fin = firstIni - 1, N = firstIni - 1)
rownames(add0) <- NULL
} else {
add0 <- NULL
}
add0
}
add0 <- getIniGap(groupInAll, fmf)
getFinGap <- function(groupInAll, fmf, timSer) {
lastFin <- groupInAll[nrow(groupInAll), 3]
if (lastFin != nrow(timSer)) {
addF <- data.frame(samp.per = fmf, ini = lastFin + 1,
fin = nrow(timSer),
N = nrow(timSer) - lastFin)
rownames(addF) <- NULL
} else {
addF <- NULL
}
addF
}
addF <- getFinGap(groupInAll, fmf, timSer)
getInBetweenGaps <- function(groupInAll, fmf) {
jumpsInd <- groupInAll[-1, 2] - groupInAll[-naa, 3]
jumpsSampPer <- groupInAll[-naa, 1]
rest <- jumpsInd%%jumpsSampPer
quotient <- jumpsInd/jumpsSampPer
addNonMultiples <- function(rest, groupInAll, fmf) {
nmI <- which(rest != 0)
if (length(nmI) > 0) {
leviia <- groupInAll[(nmI + 1), 2] - 1 - groupInAll[nmI, 3]
addNonMult <- data.frame(samp.per = fmf,
ini = groupInAll[nmI, 3] + 1,
fin = groupInAll[(nmI + 1), 2] - 1,
N = leviia)
rownames(addNonMult) <- NULL
} else {
addNonMult <- NULL
}
addNonMult
}
ana2 <- addNonMultiples(rest, groupInAll, fmf)
addMultiples <- function(rest, groupInAll, fmf, quotient, jumpsInd) {
mI <- which(rest == 0 & jumpsInd != 0)
if (length(mI) > 0) {
nsfrpq <- groupInAll[(mI + 1), 2] - 2 * quotient[mI] -
groupInAll[mI, 3] + 1
addMult <- data.frame(samp.per = groupInAll[mI, 1],
ini = groupInAll[mI, 3] + quotient[mI],
fin = groupInAll[(mI + 1), 2] - quotient[mI],
N = nsfrpq)
rownames(addMult) <- NULL
} else {
addMult <- NULL
}
addMult
}
ana3 <- addMultiples(rest, groupInAll, fmf, quotient, jumpsInd)
anaInBetween <- rbind(ana2, ana3)
anaInBetween
}
addIB <- getInBetweenGaps(groupInAll, fmf)
allToAdd <- rbind(add0, addIB, addF)
addGapRows <- function(allToAdd, groupInAll) {
groupInAll <- data.frame(groupInAll,
type = rep("data", nrow(groupInAll)))
if (length(allToAdd) > 0) {
allToAdd <- data.frame(allToAdd, type = rep("NAs", nrow(allToAdd)))
allRowsPresent <- rbind(groupInAll, allToAdd)
} else {
allRowsPresent <- groupInAll
}
rownames(allRowsPresent) <- NULL
allRowsPresent <- allRowsPresent[order(allRowsPresent$ini), ]
allRowsPresent
}
allRowsPresent <- addGapRows(allToAdd, groupInAll)
allRowsPresent
}
allRowsPresent <- addTrueGaps(groupInAll, numAllTime0, timSer)
indices2Dates <- function(allRowsPresent, timSer) {
withDates <- data.frame(sp.min = allRowsPresent$samp.per/60,
ini = timSer$time[allRowsPresent$ini],
fin = timSer$time[allRowsPresent$fin],
N = allRowsPresent$N,
type = allRowsPresent$type)
}
withDates <- indices2Dates(allRowsPresent, timSer)
row.names(withDates) <- NULL
withDates
} |
context("ml clustering - kmeans")
skip_databricks_connect()
test_that("ml_kmeans() param setting", {
test_requires_latest_spark()
sc <- testthat_spark_connection()
test_args <- list(
k = 3,
max_iter = 30,
init_steps = 4,
init_mode = "random",
seed = 234,
features_col = "wfaaefa",
prediction_col = "awiefjaw"
)
test_param_setting(sc, ml_kmeans, test_args)
})
test_that("'ml_kmeans' and 'kmeans' produce similar fits", {
sc <- testthat_spark_connection()
test_requires_version("2.0.0", "ml_kmeans() requires Spark 2.0.0+")
iris_tbl <- testthat_tbl("iris")
set.seed(123)
iris <- iris %>%
rename(
Sepal_Length = Sepal.Length,
Petal_Length = Petal.Length
)
R <- iris %>%
select(Sepal_Length, Petal_Length) %>%
kmeans(centers = 3)
S <- iris_tbl %>%
select(Sepal_Length, Petal_Length) %>%
ml_kmeans(~., k = 3L)
lhs <- as.matrix(R$centers)
rhs <- as.matrix(S$centers)
lhs <- lhs[order(lhs[, 1]), ]
rhs <- rhs[order(rhs[, 1]), ]
expect_equivalent(lhs, rhs)
})
test_that("'ml_kmeans' supports 'features' argument for backwards compat (
sc <- testthat_spark_connection()
iris_tbl <- testthat_tbl("iris")
set.seed(123)
iris <- iris %>%
rename(
Sepal_Length = Sepal.Length,
Petal_Length = Petal.Length
)
R <- iris %>%
select(Sepal_Length, Petal_Length) %>%
kmeans(centers = 3)
S <- iris_tbl %>%
select(Sepal_Length, Petal_Length) %>%
ml_kmeans(k = 3L, features = c("Sepal_Length", "Petal_Length"))
lhs <- as.matrix(R$centers)
rhs <- as.matrix(S$centers)
lhs <- lhs[order(lhs[, 1]), ]
rhs <- rhs[order(rhs[, 1]), ]
expect_equivalent(lhs, rhs)
})
test_that("ml_kmeans() works properly", {
sc <- testthat_spark_connection()
iris_tbl <- testthat_tbl("iris")
iris_kmeans <- ml_kmeans(iris_tbl, ~ . - Species, k = 5, seed = 11)
rs <- ml_predict(iris_kmeans, iris_tbl) %>%
dplyr::distinct(prediction) %>%
dplyr::arrange(prediction) %>%
dplyr::collect()
expect_equal(rs$prediction, 0:4)
})
test_that("ml_compute_cost() for kmeans", {
test_requires_version("2.0.0", "ml_compute_cost() requires Spark 2.0+")
sc <- testthat_spark_connection()
iris_tbl <- testthat_tbl("iris")
iris_kmeans <- ml_kmeans(iris_tbl, ~ . - Species, k = 5, seed = 11)
version <- spark_version(sc)
if (version >= "3.0.0") {
expect_error(ml_compute_cost(iris_kmeans, iris_tbl))
} else {
expect_equal(
ml_compute_cost(iris_kmeans, iris_tbl),
46.7123,
tolerance = 0.01, scale = 1
)
expect_equal(
iris_tbl %>%
ft_r_formula(~ . - Species) %>%
ml_compute_cost(iris_kmeans$model, .),
46.7123,
tolerance = 0.01, scale = 1
)
}
})
test_that("ml_compute_silhouette_measure() for kmeans", {
test_requires_version("3.0.0", "ml_compute_silhouette_measure() requires Spark 2.0+")
sc <- testthat_spark_connection()
iris_tbl <- testthat_tbl("iris")
iris_kmeans <- ml_kmeans(iris_tbl, ~ . - Species, k = 5, seed = 11)
version <- spark_version(sc)
expect_equal(
ml_compute_silhouette_measure(iris_kmeans, iris_tbl),
0.613,
tolerance = 0.01, scale = 1
)
expect_equal(
iris_tbl %>%
ft_r_formula(~ . - Species) %>%
ml_compute_silhouette_measure(iris_kmeans$model, .),
0.613,
tolerance = 0.01, scale = 1
)
}) |
paik <- function(formula, counts, resp.lvl = 2, data, circle.mult = .4, xlab = NULL, ylab = NULL, leg.title = NULL, leg.loc = NULL, show.mname = FALSE,...){
vars <- as.character(attr(terms(formula), "variables")[-1])
cond.var = vars[3]
rv <- data[,names(data)==vars[1]]
cv <- data[,names(data)==cond.var]
ov <- vars[vars!=vars[1]&vars!=cond.var]
or <- data[,names(data)==ov]
new.formula <- formula(counts ~ rv + ov + cv)
cl <- levels(data[,names(data) == cond.var])
ol <- levels(data[,names(data) == ov])
rl <- levels(data[,names(data) == vars[1]])
x <- xtabs(count ~ rv + or + cv, data = data)
xm <- xtabs(count ~ rv + or, data = data)
m.prop <- apply(xm, 2, function(x)x[resp.lvl]/sum(x))
r.prop <- matrix(nrow=length(cl), ncol=length(ol), dimnames=list(paste(cond.var,cl,sep="."),paste(ov,ol,sep=".")))
r.sum <- matrix(nrow=length(cl), ncol=length(ol), dimnames=list(paste(cond.var,cl,sep="."),paste(ov,ol,sep=".")))
for(i in 1:length(cl)){
tab <- x[,,cl = cl[i]]
r.prop[i,] <- apply(tab, 2, function(x)x[resp.lvl]/sum(x))
r.sum[i,] <- apply(tab, 2, sum)
}
b<-barplot(1:(length(ol)+2), plot = FALSE)
pts <- b[-c(1,(length(ol)+2))]
y.loc <- stack(as.data.frame(r.prop))[,1]
x.loc <- rep(pts,each=length(cl))
temp.ylab <- bquote(paste("Proportion ", .(vars[1]), " = ", .(rl[resp.lvl])))
p <- plot(x.loc, y.loc, xlim = c(1.5,(length(ol)+1.5)), type = "n", xaxt = "n", xlab = ifelse(is.null(xlab), ov, xlab), ylab = ifelse(is.null(ylab), eval(temp.ylab), ylab))
grid(p)
axis(1, at = pts , labels = ol)
tprop <- stack(as.data.frame(t(r.prop)))[,1]
tx <- rep(pts,length(cl))
tpropm <- matrix(ncol = length(pts), nrow = length(cl), data = tprop, byrow = TRUE)
txm <- matrix(ncol = length(pts), nrow = length(cl), data = tx, byrow = TRUE)
col <- gray(seq(1:length(cl))/length(cl))
circle.col <- rep(col, length(cl))
radii <- r.sum/sum(r.sum)
radii <- stack(as.data.frame(radii))[,1]*circle.mult
for(i in 1: length(radii))draw.circle(x.loc[i], y.loc[i], radii[i], col = circle.col[i])
if(length(pts)==2){
for(i in 1:length(cl)){
segments(txm[i,][1], tpropm[i,][1], txm[i,][2], tpropm[i,][2])
}
segments(pts[1], m.prop[1], pts[2], m.prop[2], lty = 2, lwd = 2)
}
if(length(pts)==3){
for(i in 1:length(cl)){
segments(txm[i,][1], tpropm[i,][1], txm[i,][2], tpropm[i,][2]);segments(txm[i,][2], tpropm[i,][2], txm[i,][3], tpropm[i,][3])
}
segments(pts[1], m.prop[1], pts[2], m.prop[2], lty = 2, lwd = 2); segments(pts[2], m.prop[2], pts[3], m.prop[3], lty = 2, lwd = 2)
}
if(length(pts)==4){
for(i in 1:length(cl)){
segments(txm[i,][1], tpropm[i,][1], txm[i,][2], tpropm[i,][2]);segments(txm[i,][2], tpropm[i,][2], txm[i,][3], tpropm[i,][3]);segments(txm[i,][3], tpropm[i,][3], txm[i,][4], tpropm[i,][4])
}
segments(pts[1], m.prop[1], pts[2], m.prop[2], lty = 2, lwd = 2); segments(pts[2], m.prop[2], pts[3], m.prop[3], lty = 2, lwd = 2); segments(pts[3], m.prop[3], pts[4], m.prop[4], lty = 2, lwd = 2)
}
if(length(pts)==5)stop("Number of rows in table must be less than 5")
points(x.loc, y.loc, pch = 19, cex=.6)
legend(ifelse(is.null(leg.loc),"topright",leg.loc),pch=rep(21,length(cl)),pt.bg = col, bg = "white", pt.cex = 1.5, title = ifelse(is.null(leg.title), cond.var, leg.title), legend = cl)
degree <- diff(m.prop)/diff(pts)*360
if(show.mname==TRUE)text(mean(pts),mean(m.prop) + 0.03*max(y.loc),srt=degree, "Marginal prop.")
res <- invisible(list(marginal.prop = r.prop, group.prop = r.sum/sum(r.sum)))
invisible(res)
} |
NMixPlugCondDensMarg <- function(x, ...)
{
UseMethod("NMixPlugCondDensMarg")
} |
get_preview <- function(srvy, obs = 6, pos = 1){
if(srvy %in% ces_codes){
if(srvy == "ces2019_web"){
hldr <- tempfile(fileext = ".dta")
if(!file.exists(hldr)){
cesfile <- "https://dataverse.harvard.edu/api/access/datafile/:persistentId?persistentId=doi:10.7910/DVN/DUS88V/RZFNOV"
utils::download.file(cesfile, hldr, quiet = F, mode = "wb")
survey_read <- haven::read_dta(hldr, encoding = "latin1")
assign("ces2019_web_preview", utils::head(labelled::to_factor(survey_read), obs), envir = as.environment(pos))
unlink(hldr, recursive = T)
rm(survey_read)
}
}
else if(srvy == "ces2019_phone"){
hldr <- tempfile(fileext = ".tab")
if(!file.exists(hldr)){
cesfile <- "https://dataverse.harvard.edu/api/access/datafile/:persistentId?persistentId=doi:10.7910/DVN/8RHLG1/DW4GZZ"
utils::download.file(cesfile, hldr, quiet = F, mode = "wb")
survey_read <- readr::read_tsv(hldr, show_col_types = F)
assign("ces2019_phone_preview", utils::head(labelled::to_factor(survey_read), obs), envir = as.environment(pos))
unlink(hldr, recursive = T)
rm(survey_read)
}
}
else if(srvy == "ces2015_web"){
hldr <- tempfile(fileext = ".zip")
fldr <- paste0(tempdir(), "\\ces2015_web")
if(!file.exists(hldr)){
cesfile <- "https://ces-eec.sites.olt.ubc.ca/files/2018/07/CES15_CPSPES_Web_SSI-Full-Stata-14.zip"
utils::download.file(cesfile, hldr, quiet = F)
utils::unzip(hldr, exdir = fldr)
datafile <- file.path(fldr, "CES15_CPS+PES_Web_SSI Full Stata 14.dta")
survey_read <- haven::read_dta(hldr)
assign("ces2015_web_preview", utils::head(labelled::to_factor(survey_read), obs), envir = as.environment(pos))
unlink(hldr, recursive = T)
unlink(fldr, recursive = T)
rm(survey_read)
}
}
else if(srvy == "ces2015_phone"){
hldr <- tempfile(fileext = ".zip")
fldr <- paste0(tempdir(), "\\ces2015_phone")
if(!file.exists(hldr)){
cesfile <- "https://ces-eec.sites.olt.ubc.ca/files/2018/08/CES2015-phone-Stata.zip"
hldr <- file.path(system.file("extdata", package = "cesR"), "ces2015_phone.zip")
utils::download.file(cesfile, hldr, quiet = F)
utils::unzip(hldr, exdir = fldr)
datafile <- file.path(fldr, "CES2015_CPS-PES-MBS_complete-v2.dta")
survey_read <- haven::read_dta(datafile, encoding = "latin1")
assign("ces2015_phone_preview", utils::head(labelled::to_factor(survey_read), obs), envir = as.environment(pos))
unlink(hldr, recursive = T)
unlink(fldr, recursive = T)
rm(survey_read)
}
}
else if(srvy == "ces2015_combo"){
hldr <- tempfile(fileext = ".zip")
fldr <- paste0(tempdir(), "\\ces2015_combo")
if(!file.exists(hldr)){
cesfile <- "https://ces-eec.sites.olt.ubc.ca/files/2017/04/CES2015_Combined_Stata14.zip"
utils::download.file(cesfile, hldr, quiet = F)
utils::unzip(hldr, exdir = fldr)
datafile <- file.path(fldr, "CES2015_Combined_Stata14.dta")
survey_read <- haven::read_dta(hldr)
assign("ces2015_combo_preview", utils::head(labelled::to_factor(survey_read), obs), envir = as.environment(pos))
unlink(hldr, recursive = T)
unlink(fldr, recursive = T)
rm(survey_read)
}
}
else if(srvy == "ces2011"){
hldr <- tempfile(fileext = ".zip")
fldr <- paste0(tempdir(), "\\ces2011")
if(!file.exists(hldr)){
cesfile <- "https://ces-eec.sites.olt.ubc.ca/files/2014/07/CES2011-final-1.zip"
utils::download.file(cesfile, hldr, quiet = F)
utils::unzip(hldr, exdir = fldr)
datafile <- file.path(fldr, "CPS&PES&MBS&WEB_2011_final.dta")
survey_read <- haven::read_dta(datafile)
assign("ces2011_preview", utils::head(labelled::to_factor(survey_read), obs), envir = as.environment(pos))
unlink(hldr, recursive = T)
unlink(fldr, recursive = T)
rm(survey_read)
}
}
else if(srvy == "ces2008"){
hldr <- tempfile(fileext = ".zip")
fldr <- paste0(tempdir(), "\\ces2008")
if(!file.exists(hldr)){
cesfile <- "https://raw.github.com/hodgettsp/ces_data/master/extdata/CES-E-2008.zip"
utils::download.file(cesfile, hldr, quiet = F)
utils::unzip(hldr, exdir = fldr)
datafile <- file.path(fldr, "CES2015_Combined_Stata14.dta")
survey_read <- haven::read_sav(hldr)
assign("ces2008_preview", utils::head(labelled::to_factor(survey_read), obs), envir = as.environment(pos))
unlink(hldr, recursive = T)
unlink(fldr, recursive = T)
rm(survey_read)
}
}
else if(srvy == "ces2004"){
hldr <- tempfile(fileext = ".zip")
fldr <- paste0(tempdir(), "\\ces2004")
if(!file.exists(hldr)){
cesfile <- "https://raw.github.com/hodgettsp/ces_data/master/extdata/CES-E-2004.zip"
hldr <- file.path(system.file("extdata", package = "cesR"), "ces2004.zip")
utils::download.file(cesfile, hldr, quiet = F)
utils::unzip(hldr, exdir = fldr)
datafile <- file.path(fldr, "CES-E-2004_F1.sav")
survey_read <- haven::read_sav(hldr)
assign("ces2004_preview", utils::head(labelled::to_factor(survey_read), obs), envir = as.environment(pos))
unlink(hldr, recursive = T)
unlink(fldr, recursive = T)
rm(survey_read)
}
}
else if(srvy == "ces0411"){
hldr <- tempfile(fileext = ".zip")
fldr <- paste0(tempdir(), "\\ces0411")
if(!file.exists(hldr)){
cesfile <- "https://ces-eec.sites.olt.ubc.ca/files/2014/07/CES_04060811_final_without-geo-data.zip"
utils::download.file(cesfile, hldr, quiet = F)
utils::unzip(hldr, exdir = fldr)
datafile <- file.path(fldr, "CES_04060811_final_without-geo-data.dta")
survey_read <- haven::read_dta(hldr, encoding = "latin1")
assign("ces0411_preview", utils::head(labelled::to_factor(survey_read), obs), envir = as.environment(pos))
unlink(hldr, recursive = T)
unlink(fldr, recursive = T)
rm(survey_read)
}
}
else if(srvy == "ces0406"){
hldr <- tempfile(fileext = ".zip")
fldr <- paste0(tempdir(), "\\ces0406")
if(!file.exists(hldr)){
cesfile <- "https://raw.github.com/hodgettsp/ces_data/master/extdata/CES-E-2004-2006.zip"
utils::download.file(cesfile, hldr, quiet = F)
utils::unzip(hldr, exdir = fldr)
datafile <- file.path(fldr, "CES-E-2004-2006_F1.sav")
survey_read <- haven::read_sav(hldr)
assign("ces0406_preview", utils::head(labelled::to_factor(survey_read), obs), envir = as.environment(pos))
unlink(hldr, recursive = T)
unlink(fldr, recursive = T)
rm(survey_read)
}
}
else if(srvy == "ces2000"){
hldr <- tempfile(fileext = ".zip")
fldr <- paste0(tempdir(), "\\ces2000")
if(!file.exists(hldr)){
cesfile <- "https://raw.github.com/hodgettsp/ces_data/master/extdata/CES-E-2000.zip"
utils::download.file(cesfile, hldr, quiet = F)
utils::unzip(hldr, exdir = fldr)
datafile <- file.path(fldr, "CES-E-2000_F1.sav")
survey_read <- haven::read_sav(hldr)
assign("ces2000_preview", utils::head(labelled::to_factor(survey_read), obs), envir = as.environment(pos))
unlink(hldr, recursive = T)
unlink(fldr, recursive = T)
rm(survey_read)
}
}
else if(srvy == "ces1997"){
hldr <- tempfile(fileext = ".zip")
fldr <- paste0(tempdir(), "\\ces1997")
if(!file.exists(hldr)){
cesfile <- "https://raw.github.com/hodgettsp/ces_data/master/extdata/CES-E-1997.zip"
hldr <- file.path(system.file("extdata", package = "cesR"), "ces1997.zip")
utils::download.file(cesfile, hldr, quiet = F)
utils::unzip(hldr, exdir = fldr)
datafile <- file.path(fldr, "CES-E-1997_F1.sav")
survey_read <- haven::read_sav(hldr)
assign("ces1997_preview", utils::head(labelled::to_factor(survey_read), obs), envir = as.environment(pos))
unlink(hldr, recursive = T)
unlink(fldr, recursive = T)
rm(survey_read)
}
}
else if(srvy == "ces1993"){
hldr <- tempfile(fileext = ".zip")
fldr <- paste0(tempdir(), "\\ces1993")
if(!file.exists(hldr)){
cesfile <- "https://raw.github.com/hodgettsp/ces_data/master/extdata/CES-E-1993.zip"
utils::download.file(cesfile, hldr, quiet = F)
utils::unzip(hldr, exdir = fldr)
datafile <- file.path(fldr, "CES-E-1993_F1.sav")
survey_read <- haven::read_sav(hldr)
assign("ces1993_preview", utils::head(labelled::to_factor(survey_read), obs), envir = as.environment(pos))
unlink(hldr, recursive = T)
unlink(fldr, recursive = T)
rm(survey_read)
}
}
else if(srvy == "ces1988"){
hldr <- tempfile(fileext = ".zip")
fldr <- paste0(tempdir(), "\\ces1988")
if(!file.exists(hldr)){
cesfile <- "https://raw.github.com/hodgettsp/ces_data/master/extdata/CES-E-1988.zip"
utils::download.file(cesfile, hldr, quiet = F)
utils::unzip(hldr, exdir = fldr)
datafile <- file.path(fldr, "CES-E-1988_F1.sav")
survey_read <- haven::read_sav(hldr)
assign("ces1988_preview", utils::head(labelled::to_factor(survey_read), obs), envir = as.environment(pos))
unlink(hldr, recursive = T)
unlink(fldr, recursive = T)
rm(survey_read)
}
}
else if(srvy == "ces1984"){
hldr <- tempfile(fileext = ".zip")
fldr <- paste0(tempdir(), "\\ces1984")
if(!file.exists(hldr)){
cesfile <- "https://raw.github.com/hodgettsp/ces_data/master/extdata/CES-E-1984.zip"
utils::download.file(cesfile, hldr, quiet = F)
utils::unzip(hldr, exdir = fldr)
datafile <- file.path(fldr, "CES-E-1984_F1.sav")
survey_read <- haven::read_sav(hldr)
assign("ces1984_preview", utils::head(labelled::to_factor(survey_read), obs), envir = as.environment(pos))
unlink(hldr, recursive = T)
unlink(fldr, recursive = T)
rm(survey_read)
}
}
else if(srvy == "ces1974"){
hldr <- tempfile(fileext = ".zip")
fldr <- paste0(tempdir(), "\\ces1974")
if(!file.exists(hldr)){
cesfile <- "https://raw.github.com/hodgettsp/ces_data/master/extdata/CES-E-1974.zip"
utils::download.file(cesfile, hldr, quiet = F)
utils::unzip(hldr, exdir = fldr)
datafile <- file.path(fldr, "CES-E-1974_F1.sav")
survey_read <- haven::read_sav(hldr)
assign("ces1974_preview", utils::head(labelled::to_factor(survey_read), obs), envir = as.environment(pos))
unlink(hldr, recursive = T)
unlink(fldr, recursive = T)
rm(survey_read)
}
}
else if(srvy == "ces7480"){
hldr <- tempfile(fileext = ".zip")
fldr <- paste0(tempdir(), "\\ces7480")
if(!file.exists(hldr)){
cesfile <- "https://raw.github.com/hodgettsp/ces_data/master/extdata/CES-E-1974-1980.zip"
utils::download.file(cesfile, hldr, quiet = F)
utils::unzip(hldr, exdir = fldr)
datafile <- file.path(fldr, "CES-E-1974-1980_F1.sav")
survey_read <- haven::read_sav(hldr)
assign("ces7480_preview", utils::head(labelled::to_factor(survey_read), obs), envir = as.environment(pos))
unlink(hldr, recursive = T)
unlink(fldr, recursive = T)
rm(survey_read)
}
}
else if(srvy == "ces72_jnjl"){
hldr <- tempfile(fileext = ".zip")
fldr <- paste0(tempdir(), "\\ces72jnjl")
if(!file.exists(hldr)){
cesfile <- "https://raw.github.com/hodgettsp/ces_data/master/extdata/CES-E-1972-jun-july.zip"
utils::download.file(cesfile, hldr, quiet = F)
utils::unzip(hldr, exdir = fldr)
datafile <- file.path(fldr, "CES-E-1972-jun-july_F1.sav")
survey_read <- haven::read_sav(hldr)
assign("ces72_jnjl_preview", utils::head(labelled::to_factor(survey_read), obs), envir = as.environment(pos))
unlink(hldr, recursive = T)
unlink(fldr, recursive = T)
rm(survey_read)
}
}
else if(srvy == "ces72_sep"){
hldr <- tempfile(fileext = ".zip")
fldr <- paste0(tempdir(), "\\ces72sep")
if(!file.exists(hldr)){
cesfile <- "https://raw.github.com/hodgettsp/ces_data/master/extdata/CES-E-1972-sept.zip"
utils::download.file(cesfile, hldr, quiet = F)
utils::unzip(hldr, exdir = fldr)
datafile <- file.path(fldr, "CES-E-1972-sept_F1.sav")
survey_read <- haven::read_sav(hldr)
assign("ces72_sep_preview", utils::head(labelled::to_factor(survey_read), obs), envir = as.environment(pos))
unlink(hldr, recursive = T)
unlink(fldr, recursive = T)
rm(survey_read)
}
}
else if(srvy == "ces72_nov"){
hldr <- tempfile(fileext = ".zip")
fldr <- paste0(tempdir(), "\\ces72nov")
if(!file.exists(hldr)){
cesfile <- "https://raw.github.com/hodgettsp/ces_data/master/extdata/CES-E-1972-nov.zip"
utils::download.file(cesfile, hldr, quiet = F)
utils::unzip(hldr, exdir = fldr)
datafile <- file.path(fldr, "CES-E-1972-nov_F1.sav")
survey_read <- haven::read_sav(hldr)
assign("ces72_nov_preview", utils::head(labelled::to_factor(survey_read), obs), envir = as.environment(pos))
unlink(hldr, recursive = T)
unlink(fldr, recursive = T)
rm(survey_read)
}
}
else if(srvy == "ces1968"){
hldr <- tempfile(fileext = ".zip")
fldr <- paste0(tempdir(), "\\ces1968")
if(!file.exists(hldr)){
cesfile <- "https://raw.github.com/hodgettsp/ces_data/master/extdata/CES-E-1968.zip"
utils::download.file(cesfile, hldr, quiet = F)
utils::unzip(hldr, exdir = fldr)
datafile <- file.path(fldr, "CES-E-1968_F1.sav")
survey_read <- haven::read_sav(hldr)
assign("ces1968_preview", utils::head(labelled::to_factor(survey_read), obs), envir = as.environment(pos))
unlink(hldr, recursive = T)
unlink(fldr, recursive = T)
rm(survey_read)
}
}
else if(srvy == "ces1965"){
hldr <- tempfile(fileext = ".zip")
fldr <- paste0(tempdir(), "\\ces1965")
if(!file.exists(hldr)){
cesfile <- "https://raw.github.com/hodgettsp/ces_data/master/extdata/CES-E-1965.zip"
utils::download.file(cesfile, hldr, quiet = F)
utils::unzip(hldr, exdir = fldr)
datafile <- file.path(fldr, "CES-E-1965_F1.sav")
survey_read <- haven::read_sav(hldr)
assign("ces1965_preview", utils::head(labelled::to_factor(survey_read), obs), envir = as.environment(pos))
unlink(hldr, recursive = T)
unlink(fldr, recursive = T)
rm(survey_read)
}
}
}
else{
stop("Incorrect CES dataset code provided. Use function get_cescodes() for a printout of useable code calls")
}
}
ces_codes <- (c("ces2019_web", "ces2019_phone", "ces2015_web", "ces2015_phone", "ces2015_combo",
"ces2011", "ces2008", "ces2004", "ces0411", "ces0406", "ces2000", "ces1997", "ces1993",
"ces1988", "ces1984", "ces1974", "ces7480", "ces72_jnjl", "ces72_sep", "ces72_nov",
"ces1968", "ces1965")) |
"fillCols" <- function(data) {
nc <- ncol(data)
getConst <- function(vec) {
vals <- unique(vec)
return(length(vals[!is.na(vals)])==1)
}
constCol <- apply(data, 2, getConst)
data[,constCol] <- data[1,constCol]
return(data)
} |
test_that("quantifier: all", {
q <- quantifier('all', alg='lukas')
expect_equal(q(0, 1), 0)
expect_equal(q(0.5, 1), 0.5)
expect_equal(q(1, 1), 1)
expect_equal(q(c(0, 0, 0), 1), 0)
expect_equal(q(c(0, 0.1, 0), 1), 0)
expect_equal(q(c(1, 0.9, 1), 1), 0.9)
expect_equal(q(c(0.5, 1, 0.8), 1), 0.5)
expect_equal(q(c(0.1, 0.3, 1), c(0, 0.9, 0.5)), 0.3)
expect_equal(q(c(0.1, 0.3, 1), c(0.01, 0.9, 0.5)), 0.1)
})
test_that("quantifier: some", {
q <- quantifier('some', alg='lukas')
expect_equal(q(0, 1), 0)
expect_equal(q(0.5, 1), 0.5)
expect_equal(q(1, 1), 1)
expect_equal(q(c(0, 0, 0), 1), 0)
expect_equal(q(c(0, 0.1, 0), 1), 0.1)
expect_equal(q(c(0.5, 0.3, 0.8), 1), 0.8)
expect_equal(q(c(0.5, 1, 0.8), 1), 1)
expect_equal(q(c(1, 0.3, 0.2), c(0, 0.9, 0.5)), 0.3)
expect_equal(q(c(1, 0.3, 0.2), c(0.01, 0.9, 0.5)), 1)
})
test_that("quantifier: at.least", {
q <- quantifier('at.least', n=2, alg='lukas')
expect_equal(q(0, 1), 0)
expect_equal(q(0.5, 1), 0)
expect_equal(q(1, 1), 0)
expect_equal(q(c(0, 0, 0), 1), 0)
expect_equal(q(c(0, 0.1, 0), 1), 0)
expect_equal(q(c(0.5, 0.1, 0), 1), 0.1)
expect_equal(q(c(0.5, 0.3, 0.8), 1), 0.5)
expect_equal(q(c(0.5, 1, 0.8), 1), 0.8)
expect_equal(q(c(1, 0.3, 0.2), c(0, 0.9, 0.5)), 0)
expect_equal(q(c(1, 0.3, 0.2, 0.9), c(0, 0.9, 0.5, 1)), 0.2)
expect_equal(q(c(1, 0.3, 0.2, 0.9), c(0.11, 0.9, 0.5, 1)), 0.3)
})
test_that("bochvar quantifier", {
for (quant in c('all', 'almost.all', 'most', 'many', 'some')) {
for (alg in c('lukas', 'goedel', 'goguen')) {
q <- quantifier('all', alg='lukas')
expect_equal(q(NA_real_), NA_real_)
expect_equal(q(NA_real_, 1), NA_real_)
expect_equal(q(NA_real_, 0.3), NA_real_)
expect_equal(q(NA_real_, 0), NA_real_)
expect_equal(q(c(0.1, NA, 1), 1), NA_real_)
expect_equal(q(c(0.1, NA, 1), c(0.01, 0.9, 0.5)), NA_real_)
expect_error(q(1, NA))
}
}
}) |
set_survey_vars <- function(
.svy, x, name = "__SRVYR_TEMP_VAR__", add = FALSE
) {
out <- .svy
if (length(x) != nrow(.svy)) {
cur_group_rows <- group_rows(cur_svy_full())[[cur_group_id()]]
if (length(x) == length(cur_group_rows)) {
x_stretched <- rep(FALSE, nrow(.svy))
x_stretched[cur_group_rows] <- x
x <- x_stretched
}
}
if (inherits(.svy, "twophase2")) {
if (!add) {
out$phase1$sample$variables <- select(out$phase1$sample$variables, dplyr::one_of(group_vars(out)))
}
out$phase1$sample$variables[[name]] <- x
} else {
if (!add) {
out$variables <- select(out$variables, dplyr::one_of(group_vars(out)))
}
out$variables[[name]] <- x
}
out
}
get_var_est <- function(
stat, vartype, grps = "", level = 0.95, df = Inf, pre_calc_ci = FALSE, deff = FALSE
) {
out_width <- 1
out <- lapply(vartype, function(vvv) {
if (vvv == "se") {
se <- survey::SE(stat)
if (!inherits(se, "data.frame")) {
se <- data.frame(matrix(se, ncol = out_width))
}
names(se) <- "_se"
se
} else if (vvv == "ci" && !pre_calc_ci) {
if (length(level) == 1) {
ci <- data.frame(matrix(
stats::confint(stat, level = level, df = df),
ncol = 2 * out_width
))
names(ci) <- c("_low", "_upp")
} else {
lci <- lapply(level, function(x) {as.data.frame(stats::confint(stat,level = x, df = df))})
ci <- dplyr::bind_cols(lci)
names(ci) <- paste0(c("_low", "_upp"), rep(level, each = 2) * 100)
}
ci
} else if (vvv == "ci" && pre_calc_ci) {
if (inherits(stat, "data.frame")) {
ci <- data.frame(stat[c("ci_l", "ci_u")])
names(ci) <- c("_low", "_upp")
} else {
ci <- data.frame(matrix(stats::confint(stat), ncol = 2 * out_width))
names(ci) <- c("_low", "_upp")
}
ci
} else if (vvv == "var") {
var <- data.frame(matrix(survey::SE(stat) ^ 2, ncol = out_width))
names(var) <- "_var"
var
} else if (vvv == "cv") {
cv <- data.frame((matrix(survey::cv(stat), ncol = out_width)))
names(cv) <- "_cv"
cv
} else {
stop(paste0("Unexpected vartype ", vvv))
}
})
coef <- data.frame(matrix(coef(stat), ncol = out_width))
names(coef) <- "coef"
out <- c(list(coef), out)
if (!identical(grps, "")) {
out <- c(list(as.data.frame(stat[grps], stringsAsFactors = FALSE)), out)
}
if (!isFALSE(deff)) {
deff <- data.frame(matrix(survey::deff(stat), ncol = out_width))
names(deff) <- "_deff"
out <- c(out, list(deff))
}
as_srvyr_result_df(dplyr::bind_cols(out))
}
get_var_est_quantile <- function(stat, vartype, q, grps = "", level = 0.95, df = Inf) {
qnames <- paste0("_q", gsub("\\.", "", formatC(q * 100, width = 2, flag = "0")))
out_width <- length(qnames)
out <- lapply(vartype, function(vvv) {
if (vvv == "se") {
se <- survey::SE(stat)
if (!inherits(se, "data.frame")) {
se <- data.frame(matrix(se, ncol = out_width))
}
names(se) <- paste0(qnames, "_se")
se
} else if (vvv == "ci") {
if (inherits(stat, "data.frame")) {
ci_cols <- grep("^ci_[lu]", names(stat))
ci <- data.frame(stat[, ci_cols])
} else {
ci <- data.frame(matrix(stats::confint(stat), ncol = 2 * out_width))
}
names(ci) <- paste0(qnames, rep(c("_low", "_upp"), each = out_width))
ci
} else if (vvv == "var") {
var <- data.frame(matrix(survey::SE(stat) ^ 2, ncol = out_width))
names(var) <- paste0(qnames, "_var")
var
} else if (vvv == "cv") {
cv <- data.frame((matrix(survey::cv(stat), ncol = out_width)))
names(cv) <- paste0(qnames, "_cv")
cv
} else {
stop(paste0("Unexpected vartype ", vvv))
}
})
coef <- data.frame(matrix(coef(stat), ncol = out_width))
names(coef) <- qnames
out <- lapply(out, as.data.frame)
out <- c(list(coef), out)
if (!identical(grps, "")) {
out <- c(list(as.data.frame(stat[grps])), out)
}
as_srvyr_result_df(dplyr::bind_cols(out))
}
stop_for_factor <- function(x) {
if (is.factor(x)) {
stop(paste0(
"Factor not allowed in survey functions, should be used as a grouping variable."
), call. = FALSE)
} else if (is.character(x)) {
stop(paste0(
"Character vectors not allowed in survey functions, should be used as a grouping variable."
), call. = FALSE)
}
}
as_srvyr_result_df <- function(x) {
class(x) <- c("srvyr_result_df", class(x))
x
}
is_srvyr_result_df <- function(x) {
inherits(x, "srvyr_result_df")
}
Math.srvyr_result_df <- function(x, ...) {
out <- NextMethod("Math", x)
class(out) <- c("srvyr_result_df", class(out))
out
}
Ops.srvyr_result_df <- function(e1, e2) {
out <- NextMethod()
class(out) <- c("srvyr_result_df", class(out))
out
} |
`aGETXprofile` <-
function(jx, jy, jz, LAB="A", myloc=NULL, PLOT=FALSE, asp=1)
{
if(missing(myloc)) { myloc=NULL }
if(missing(LAB)) { LAB="A" }
if(missing(asp)) { asp=NULL }
if(is.null(myloc))
{
myloc = locator(2, type='o')
}
dx = sign(diff(myloc$x))*mean(diff(jx))
dy = sign(diff(myloc$y))*mean(diff(jy))
Lrunvent = lm ( myloc$y ~ myloc$x )
newx = seq(from=myloc$x[1], to=myloc$x[2], by=dx)
JY = Lrunvent$coefficients[1]+Lrunvent$coefficients[2]*newx
newy = seq(from=myloc$y[1], to=myloc$y[2], by=dy)
JX = (newy-Lrunvent$coefficients[1])/Lrunvent$coefficients[2]
allx = c(newx, JX)
ally = c(JY, newy)
ox = order(allx)
allx = allx[ox]
ally = ally[ox]
boxx = findInterval(allx, jx)
boxy = findInterval(ally, jy, all.inside = FALSE)
flag = boxy>0 & boxy<length(jy)
pts = cbind(boxx[flag], boxy[flag])
LX = allx[flag]
LY = ally[flag]
RX = sqrt((LX-LX[1])^2+ (LY-LY[1])^2)
LZ = jz[pts]
pnt1 = sqrt((myloc$x[1]-LX[1])^2+ (myloc$y[1]-LY[1])^2)
pnt2 = sqrt((myloc$x[2]-LX[1])^2+ (myloc$y[2]-LY[1])^2)
px1 = findInterval(pnt1, RX)
px2 = findInterval(pnt2, RX)
if(PLOT==TRUE)
{
cdev = dev.cur()
dev.new()
plot(RX, LZ, type='l', xlab="m", ylab="m", ylim=range(jz, na.rm=TRUE), asp=asp)
points( c(pnt1, pnt2), c(LZ[px1], LZ[px2] ), pch=c(6,8), col=c(2,4) )
text(c(pnt1, pnt2),c(LZ[px1], LZ[px2] ) , labels=c(LAB, paste(sep="", LAB, "'")), col=c(2,4), pos=3 )
dev.set(cdev)
}
invisible(list(RX=RX, RZ=LZ, LOC=myloc, LAB=LAB ))
} |
translogHessian <- function( xNames, data, coef, yName = NULL,
dataLogged = FALSE, bordered = FALSE ) {
checkNames( c( xNames ), names( data ) )
nExog <- length( xNames )
nCoef <- 1 + nExog + nExog * ( nExog + 1 ) / 2
if( nCoef != length( coef ) ) {
stop( "a translog function with ", nExog, " exogenous variables",
" must have exactly ", nCoef, " coefficients" )
}
result <- list()
alpha <- coef[ 2:( nExog + 1 ) ]
beta <- vecli2m( coef[ ( nExog + 2 ):nCoef ] )
newXNames <- paste( "x.", c( 1:nExog ), sep = "" )
dNames <- paste( "d.", c( 1:nExog ), sep = "" )
if( dataLogged ) {
logData <- data.frame( no = c( 1:nrow( data ) ) )
for( i in seq( along = xNames ) ) {
logData[[ newXNames[ i ] ]] <- data[[ xNames[ i ] ]]
}
} else {
logData <- data.frame( no = c( 1:nrow( data ) ) )
for( i in seq( along = xNames ) ) {
logData[[ newXNames[ i ] ]] <- log( data[[ xNames[ i ] ]] )
}
}
if( is.null( yName ) ){
logData$yHat <- translogCalc( newXNames, logData, coef,
dataLogged = TRUE )
} else {
if( dataLogged ) {
logData$yHat <- data[[ yName ]]
} else {
logData$yHat <- log( data[[ yName ]] )
}
}
deriv <- translogDeriv( newXNames, logData, coef, yName = "yHat",
dataLogged = TRUE )$deriv
names( deriv ) <- dNames
logData <- cbind( logData, deriv )
hessian <- function( values ) {
result <- matrix( 0, nExog + bordered, nExog + bordered )
for( i in 1:nExog ) {
if( bordered ) {
result[ 1, i + 1 ] <- values[[ dNames[ i ] ]]
result[ i + 1, 1 ] <- values[[ dNames[ i ] ]]
}
for( j in i:nExog ) {
result[ i + bordered, j + bordered ] <-
values[[ dNames[ i ] ]] * values[[ dNames[ j ] ]] /
exp( values[[ "yHat" ]] ) -
ifelse( i == j, 1, 0 ) * values[[ dNames[ i ] ]] /
exp( values[[ newXNames[ i ] ]] ) +
beta[ i, j ] *
exp( values[[ "yHat" ]] ) /
( exp( values[[ newXNames[ i ] ]] ) *
exp( values[[ newXNames[ j ] ]] ) )
}
}
result[ lower.tri( result ) ] <- t( result )[ lower.tri( result ) ]
result <- list( result )
return( result )
}
result <- apply( logData, 1, hessian )
result <- lapply( result, "[[", 1 )
return( result )
} |
internet_browser <- hijack(r_sample_factor,
name = "Browser",
x = c("Chrome", "IE", "Firefox", "Safari", "Opera", "Android"),
prob = c(0.5027, 0.175, 0.1689, 0.0994, 0.017, 0.0132)
) |
test_that("fit_sa works", {
expect_error(fit_sa(Z_potts, mrfi(), family = "onepar", init = c(1,2), gamma_seq = 1:0))
expect_is(fit_sa(Z_potts, mrfi(), family = "onepar", gamma_seq = 1:0), "mrfout")
expect_true(is_valid_array(fit_sa(Z_potts, mrfi(), family = "dif", init = rep(-1, 4*2), gamma_seq = seq(1,0,-0.1))$theta, "dif"))
expect_is(fit_sa(Z_potts, mrfi(), family = "onepar", gamma_seq = seq(1,0,-0.1), refresh_each = 2, refresh_cycles = 5), "mrfout")
})
test_that("fit_sa works with subregions", {
Z <- Z_potts
Z <- ifelse( col(Z) >= (row(Z)- 75)^2/150, Z, NA )
expect_error(fit_sa(Z, mrfi(), family = "onepar", init = c(1,2), gamma_seq = 1:0))
expect_is(fit_sa(Z, mrfi(), family = "onepar", gamma_seq = 1:0), "mrfout")
expect_true(is_valid_array(fit_sa(Z, mrfi(), family = "dif", init = rep(-1, 4*2), gamma_seq = seq(1,0,-0.1))$theta, "dif"))
expect_is(fit_sa(Z, mrfi(), family = "onepar", gamma_seq = seq(1,0,-0.1), refresh_each = 2, refresh_cycles = 5), "mrfout")
}) |
context("update_json")
skip_if(solr_missing(conn))
skip_on_ci()
test_that("update_json works", {
skip_on_cran()
file <- system.file("examples", "books2.json", package = "solrium")
if (!conn$collection_exists("books")) conn$collection_create("books")
aa <- conn$update_json(files = file, name = "books")
expect_is(aa, "list")
expect_named(aa, c("responseHeader"))
expect_true(conn$collection_exists("books"))
})
test_that("update_json works with old format", {
skip_on_cran()
file <- system.file("examples", "books2.json", package = "solrium")
if (!conn$collection_exists("books")) conn$collection_create("books")
aa <- update_json(conn, files = file, name = "books")
expect_is(aa, "list")
expect_named(aa, c("responseHeader"))
expect_true(conn$collection_exists("books"))
})
test_that("update_json fails well", {
skip_on_cran()
expect_error(update_json(), "argument \"conn\" is missing")
expect_error(update_json(5), "conn must be a SolrClient object")
}) |
get_package_version <- function(x, lib_loc = NULL) {
version <- suppressWarnings(utils::packageDescription(x, lib.loc = lib_loc,
fields = "Version"))
if (is.na(version)) stop(packageNotFoundError(x, lib_loc, sys.call()))
return(version)
} |
plot_EIC<-function(fullms,peakID=333,ms=1,CE=0){
if(ms==1){
if (is.numeric(peakID) == FALSE) {
for (i in fullms$ms$peakID){
peak <- fullms$RawData1[which(fullms$RawData1[,7] == i), ]
peak<-as.data.frame(peak)
npeak<-peak[order(peak$RT),]
p3 <- ggplot(data =npeak,aes(npeak$RT/60 ,y=npeak$intensity))+
geom_area(data=npeak,aes(x=npeak$RT/60,y=npeak$intensity),fill="
p3b<-p3+theme_light()+theme(axis.text.x=element_text(size=12),axis.text.y=element_text(size=12),axis.title.x=element_text(size=16),axis.title.y = element_text(size=16,margin=margin(t=0,r=20,b=60,l=0)))
plot<-p3b+ scale_x_continuous(name="Retention time (min)")+ scale_y_continuous(name="Intensity",labels= scales::scientific)+
ggtitle(paste("Raw:",fullms$ms$name[fullms$ms$peakID==i]))
ggsave(plot,filename=paste(i,"Raw.tiff",sep=""), dpi = 800, width = 6, height = 4, units = 'in')
npeak<-as.data.frame(npeak)
npeakRT<-npeak$RT/60
p3 <- ggplot(data =npeak,aes(x=npeakRT,y=npeak$intensity))+ geom_point()+
geom_smooth(method = "loess",se=F)+theme_light()+
geom_ribbon(aes(ymin=0,ymax=predict(loess(npeak$intensity~npeakRT))),fill="
theme(axis.text.x=element_text(size=12),axis.text.y=element_text(size=12),axis.title.x=element_text(size=16),axis.title.y = element_text(size=16,margin=margin(t=0,r=20,b=60,l=0)))
plot<-p3+ scale_x_continuous(name="Retention time (min)")+ scale_y_continuous(name="Intensity",labels= scales::scientific)+
ggtitle(paste("Smooth:",fullms$ms$name[fullms$ms$peakID==i]))
ggsave(plot,filename=paste(i,"Smooth.tiff",sep=""), dpi = 800, width = 6, height = 4, units = 'in')
diff<-diff(npeak$`m/z`)*100
tiff(filename=paste(i,"Scan.pdf",sep=""), res = 800, width = 6, height = 4, units = 'in')
plot(diff,type="h",xlab="Scan",ylab="m/z difference",cex.lab=1.5,lwd=5,col="cyan3",panel.first=grid(lty=1))
abline(h=mean(diff),col="
abline(h=0,col="black")
title(main=paste("QC m/z scan:",fullms$ms$name[fullms$ms$peakID==i]))
dev.off()
}}else {
i=peakID
peak <- fullms$RawData[which(fullms$RawData[,7] == i), ]
peak<-as.data.frame(peak)
npeak<-peak[order(peak$RT),]
npeakRT<-npeak$RT/60
p3 <- ggplot(data =npeak,aes(npeak$RT/60 ,y=npeak$intensity))+
geom_area(data=npeak,aes(x=npeak$RT/60,y=npeak$intensity),fill="
p3b<-p3+theme_light()+theme(axis.text.x=element_text(size=12),axis.text.y=element_text(size=12),axis.title.x=element_text(size=16),axis.title.y = element_text(size=16,margin=margin(t=0,r=20,b=60,l=0)))
plot<-p3b+ scale_x_continuous(name="Retention time (min)")+ scale_y_continuous(name="Intensity",labels= scales::scientific)+
ggtitle(paste("Raw:",fullms$ms$name[fullms$ms$peakID==i]))
ggsave(plot,filename=paste(i,"Raw.tiff",sep=""), dpi = 800, width = 6, height = 4, units = 'in')
p3 <- ggplot(data =npeak,aes(x=npeakRT,y=npeak$intensity))+ geom_point()+
geom_smooth(method = "loess",se=F)+theme_light()+
geom_ribbon(aes(ymin=0,ymax=predict(loess(npeak$intensity~npeakRT))),fill="
theme(axis.text.x=element_text(size=12),axis.text.y=element_text(size=12),axis.title.x=element_text(size=16),axis.title.y = element_text(size=16,margin=margin(t=0,r=20,b=60,l=0)))
plot<-p3+ scale_x_continuous(name="Retention time (min)")+ scale_y_continuous(name="Intensity",labels= scales::scientific)+
ggtitle(paste("Smooth:",fullms$ms$name[fullms$ms$peakID==i]))
ggsave(plot,filename=paste(i,"Smooth.tiff",sep=""), dpi = 800, width = 6, height = 4, units = 'in')
diff<-diff(npeak$`m/z`)*100
tiff(filename=paste(i,"Scan.tiff",sep=""), res = 800, width = 6, height = 4, units = 'in')
plot(diff,type="h",xlab="Scan",ylab="m/z difference",cex.lab=1.5,lwd=5,col="cyan3",panel.first=grid(lty=1))
abline(h=mean(diff),col="
abline(h=0,col="black")
title(main=paste("QC m/z scan:",fullms$ms$name[fullms$ms$peakID==i]))
dev.off()
}}else{
if(CE==0){
if (is.numeric(peakID) == FALSE) {
for (i in fullms$annotation$PeakID){
peak <- fullms$RawData1[which(fullms$RawData1[,7] == i), ]
peak<-as.data.frame(peak)
npeak<-peak[order(peak$RT),]
p3 <- ggplot(data =npeak,aes(npeak$RT/60 ,y=npeak$intensity))+
geom_area(data=npeak,aes(x=npeak$RT/60,y=npeak$intensity),fill="
p3b<-p3+theme_light()+theme(axis.text.x=element_text(size=12),axis.text.y=element_text(size=12),axis.title.x=element_text(size=16),axis.title.y = element_text(size=16,margin=margin(t=0,r=20,b=60,l=0)))
plot<-p3b+ scale_x_continuous(name="Retention time (min)")+ scale_y_continuous(name="Intensity",labels= scales::scientific)+
ggtitle(paste("Raw:",fullms$annotation$Metabolite[fullms$annotation$PeakID==i]))
ggsave(plot,filename=paste(i,"Raw.tiff",sep=""), dpi = 800, width = 6, height = 4, units = 'in')
npeak<-as.data.frame(npeak)
npeakRT<-npeak$RT/60
p3 <- ggplot(data =npeak,aes(x=npeakRT,y=npeak$intensity))+ geom_point()+
geom_smooth(method = "loess",se=F)+theme_light()+
geom_ribbon(aes(ymin=0,ymax=predict(loess(npeak$intensity~npeakRT))),fill="
theme(axis.text.x=element_text(size=12),axis.text.y=element_text(size=12),axis.title.x=element_text(size=16),axis.title.y = element_text(size=16,margin=margin(t=0,r=20,b=60,l=0)))
plot<-p3+ scale_x_continuous(name="Retention time (min)")+ scale_y_continuous(name="Intensity",labels= scales::scientific)+
ggtitle(paste("Smooth:",fullms$annotation$Metabolite[fullms$annotation$PeakID==i]))
ggsave(plot,filename=paste(i,"Smooth.tiff",sep=""), dpi = 800, width = 6, height = 4, units = 'in')
diff<-diff(npeak$`m/z`)*100
pdf(file=paste(i,"Scan.pdf",sep=""))
plot(diff,type="h",xlab="Scan",ylab="m/z difference",cex.lab=1.5,lwd=5,col="cyan3",panel.first=grid(lty=1))
abline(h=mean(diff),col="
abline(h=0,col="black")
title(main=paste("QC m/z scan:",fullms$annotation$Metabolite[fullms$annotation$PeakID==i]))
dev.off()
}}
else {
i=peakID
peak <- fullms$RawData1[which(fullms$RawData1[,7] == i), ]
peak<-as.data.frame(peak)
npeak<-peak[order(peak$RT),]
npeakRT<-npeak$RT/60
p3 <- ggplot(data =npeak,aes(npeak$RT/60 ,y=npeak$intensity))+
geom_area(data=npeak,aes(x=npeak$RT/60,y=npeak$intensity),fill="
p3b<-p3+theme_light()+theme(axis.text.x=element_text(size=12),axis.text.y=element_text(size=12),axis.title.x=element_text(size=16),axis.title.y = element_text(size=16,margin=margin(t=0,r=20,b=60,l=0)))
plot<-p3b+ scale_x_continuous(name="Retention time (min)")+ scale_y_continuous(name="Intensity",labels= scales::scientific)+
ggtitle(paste("Raw:",fullms$annotation$Metabolite[fullms$annotation$PeakID==i]))
ggsave(plot,filename=paste(i,"Raw.tiff",sep=""), dpi = 800, width = 6, height = 4, units = 'in')
npeak<-as.data.frame(npeak)
npeakRT<-npeak$RT/60
p3 <- ggplot(data =npeak,aes(x=npeakRT,y=npeak$intensity))+ geom_point()+
geom_smooth(method = "loess",se=F)+theme_light()+
geom_ribbon(aes(ymin=0,ymax=predict(loess(npeak$intensity~npeakRT))),fill="
theme(axis.text.x=element_text(size=12),axis.text.y=element_text(size=12),axis.title.x=element_text(size=16),axis.title.y = element_text(size=16,margin=margin(t=0,r=20,b=60,l=0)))
plot<-p3+ scale_x_continuous(name="Retention time (min)")+ scale_y_continuous(name="Intensity",labels= scales::scientific)+
ggtitle(paste("Smooth:",fullms$annotation$Metabolite[fullms$annotation$PeakID==i]))
ggsave(plot,filename=paste(i,"Smooth.tiff",sep=""), dpi = 800, width = 6, height = 4, units = 'in')
diff<-diff(npeak$`m/z`)*100
pdf(file=paste(i,"Scan.pdf",sep=""))
plot(diff,type="h",xlab="Scan",ylab="m/z difference",cex.lab=1.5,lwd=5,col="cyan3",panel.first=grid(lty=1))
abline(h=mean(diff),col="
abline(h=0,col="black")
title(main=paste("QC m/z scan:",fullms$annotation$Metabolite[fullms$annotation$PeakID==i]))
dev.off()
}
}else{
if (is.numeric(peakID) == FALSE) {
for (i in fullms$annotation$PeakID){
peak <- fullms$RawData2[which(fullms$RawData2[,7] == i), ]
peak<-as.data.frame(peak)
npeak<-peak[order(peak$RT),]
p3 <- ggplot(data =npeak,aes(npeak$RT/60 ,y=npeak$intensity))+
geom_area(data=npeak,aes(x=npeak$RT/60,y=npeak$intensity),fill="
p3b<-p3+theme_light()+theme(axis.text.x=element_text(size=12),axis.text.y=element_text(size=12),axis.title.x=element_text(size=16),axis.title.y = element_text(size=16,margin=margin(t=0,r=20,b=60,l=0)))
plot<-p3b+ scale_x_continuous(name="Retention time (min)")+ scale_y_continuous(name="Intensity",labels= scales::scientific)+
ggtitle(paste("Raw:",fullms$annotation$Metabolite[fullms$annotation$PeakID==i]))
ggsave(plot,filename=paste(i,"Raw.tiff",sep=""), dpi = 800, width = 6, height = 4, units = 'in')
npeak<-as.data.frame(npeak)
npeakRT<-npeak$RT/60
p3 <- ggplot(data =npeak,aes(x=npeakRT,y=npeak$intensity))+ geom_point()+
geom_smooth(method = "loess",se=F)+theme_light()+
geom_ribbon(aes(ymin=0,ymax=predict(loess(npeak$intensity~npeakRT))),fill="
theme(axis.text.x=element_text(size=12),axis.text.y=element_text(size=12),axis.title.x=element_text(size=16),axis.title.y = element_text(size=16,margin=margin(t=0,r=20,b=60,l=0)))
plot<-p3+ scale_x_continuous(name="Retention time (min)")+ scale_y_continuous(name="Intensity",labels= scales::scientific)+
ggtitle(paste("Smooth:",fullms$annotation$Metabolite[fullms$annotation$PeakID==i]))
ggsave(plot,filename=paste(i,"Smooth.tiff",sep=""), dpi = 800, width = 6, height = 4, units = 'in')
diff<-diff(npeak$`m/z`)*100
pdf(file=paste(i,"Scan.pdf",sep=""))
plot(diff,type="h",xlab="Scan",ylab="m/z difference",cex.lab=1.5,lwd=5,col="cyan3",panel.first=grid(lty=1))
abline(h=mean(diff),col="
abline(h=0,col="black")
title(main=paste("QC m/z scan:",fullms$annotation$Metabolite[fullms$annotation$PeakID==i]))
dev.off()
}}
else {
i=peakID
peak <- fullms$RawData2[which(fullms$RawData2[,7] == i), ]
peak<-as.data.frame(peak)
npeak<-peak[order(peak$RT),]
npeakRT<-npeak$RT/60
p3 <- ggplot(data =npeak,aes(npeak$RT/60 ,y=npeak$intensity))+
geom_area(data=npeak,aes(x=npeak$RT/60,y=npeak$intensity),fill="
p3b<-p3+theme_light()+theme(axis.text.x=element_text(size=12),axis.text.y=element_text(size=12),axis.title.x=element_text(size=16),axis.title.y = element_text(size=16,margin=margin(t=0,r=20,b=60,l=0)))
plot<-p3b+ scale_x_continuous(name="Retention time (min)")+ scale_y_continuous(name="Intensity",labels= scales::scientific)+
ggtitle(paste("Raw:",fullms$annotation$Metabolite[fullms$annotation$PeakID==i]))
ggsave(plot,filename=paste(i,"Raw.tiff",sep=""), dpi = 800, width = 6, height = 4, units = 'in')
plot
npeak<-as.data.frame(npeak)
npeakRT<-npeak$RT/60
p3 <- ggplot(data =npeak,aes(x=npeakRT,y=npeak$intensity))+ geom_point()+
geom_smooth(method = "loess",se=F)+theme_light()+
geom_ribbon(aes(ymin=0,ymax=predict(loess(npeak$intensity~npeakRT))),fill="
theme(axis.text.x=element_text(size=12),axis.text.y=element_text(size=12),axis.title.x=element_text(size=16),axis.title.y = element_text(size=16,margin=margin(t=0,r=20,b=60,l=0)))
plot<-p3+ scale_x_continuous(name="Retention time (min)")+ scale_y_continuous(name="Intensity",labels= scales::scientific)+
ggtitle(paste("Smooth:",fullms$annotation$Metabolite[fullms$annotation$PeakID==i]))
ggsave(plot,filename=paste(i,"Smooth.tiff",sep=""), dpi = 800, width = 6, height = 4, units = 'in')
diff<-diff(npeak$`m/z`)*100
pdf(file=paste(i,"Scan.pdf",sep=""))
plot(diff,type="h",xlab="Scan",ylab="m/z difference",cex.lab=1.5,lwd=5,col="cyan3",panel.first=grid(lty=1))
abline(h=mean(diff),col="
abline(h=0,col="black")
title(main=paste("QC m/z scan:",fullms$annotation$Metabolite[fullms$annotation$PeakID==i]))
dev.off()
}
}}} |
print.mvdapca <- function (x, ...)
{
nobj <- nrow(x$scores)
nvars <- ncol(x$Xdata)
cat("Principal Component Analysis\n")
cat("\nFit Summary: \nNumber of objects =", nobj, "\nNumber of Variables =",
nvars)
cat("\nPercent Variation Explained:\n")
print(round(x$Percents.Explained, 3))
cat("\nCross-Validation Results:\n")
print(data.frame(PRESS = round(x$CV, 3)))
cat("\nEigenvalues:\n")
if(length(x$D) == 1) {
print(round(x$D, 3))
} else {
print(round(diag(x$D), 3))
}
} |
dpearson0 <- function(x,mean,sd,params,log=FALSE) {
if (!missing(params)) { mean <- params[[1]]; sd <- params[[2]] }
dnorm(x,mean=mean,sd=sd,log=log)
}
ppearson0 <- function(q,mean,sd,params,lower.tail=TRUE,log.p=FALSE) {
if (!missing(params)) { mean <- params[[1]]; sd <- params[[2]] }
pnorm(q,mean=mean,sd=sd,lower.tail=lower.tail,log.p=log.p)
}
qpearson0 <- function(p,mean,sd,params,lower.tail=TRUE,log.p=FALSE) {
if (!missing(params)) { mean <- params[[1]]; sd <- params[[2]] }
qnorm(p,mean=mean,sd=sd,lower.tail=lower.tail,log.p=log.p)
}
rpearson0 <- function(n,mean,sd,params) {
if (!missing(params)) { mean <- params[[1]]; sd <- params[[2]] }
rnorm(n,mean=mean,sd=sd)
} |
lake.number <- function(bthA,bthD,uStar,St,metaT,metaB,averageHypoDense){
g <- 9.81
dz <- 0.1
Ao <- bthA[1]
Zo <- bthD[1]
if (Ao==0){stop('Surface area cannot be zero, check *.bth file')}
layerD <- seq(Zo,max(bthD),dz)
layerA <- stats::approx(bthD,bthA,layerD)$y
Zv = layerD*layerA*dz
Zcv = sum(Zv)/sum(layerA)/dz
St_uC = St*Ao/g
Ln = g*St_uC*(metaT+metaB)/(2*averageHypoDense*uStar^2*Ao^(3/2)*Zcv)
return(Ln)
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.