code
stringlengths 1
13.8M
|
---|
library(testthat)
library(parsnip)
library(rlang)
context("multinom regression")
source("helpers.R")
source(test_path("helper-objects.R"))
hpc <- hpc_data[1:150, c(2:5, 8)]
test_that('primary arguments', {
basic <- multinom_reg()
expect_error(
basic_glmnet <- translate(basic %>% set_engine("glmnet")),
"For the glmnet engine, `penalty` must be a single"
)
mixture <- multinom_reg(penalty = 0.1, mixture = 0.128)
mixture_glmnet <- translate(mixture %>% set_engine("glmnet"))
expect_equal(mixture_glmnet$method$fit$args,
list(
x = expr(missing_arg()),
y = expr(missing_arg()),
weights = expr(missing_arg()),
alpha = new_empty_quosure(0.128),
family = "multinomial"
)
)
penalty <- multinom_reg(penalty = 1)
penalty_glmnet <- translate(penalty %>% set_engine("glmnet"))
expect_equal(penalty_glmnet$method$fit$args,
list(
x = expr(missing_arg()),
y = expr(missing_arg()),
weights = expr(missing_arg()),
family = "multinomial"
)
)
mixture_v <- multinom_reg(penalty = 0.01, mixture = tune())
mixture_v_glmnet <- translate(mixture_v %>% set_engine("glmnet"))
expect_equal(mixture_v_glmnet$method$fit$args,
list(
x = expr(missing_arg()),
y = expr(missing_arg()),
weights = expr(missing_arg()),
alpha = new_empty_quosure(tune()),
family = "multinomial"
)
)
})
test_that('engine arguments', {
glmnet_nlam <- multinom_reg(penalty = 0.01)
expect_equal(
translate(glmnet_nlam %>% set_engine("glmnet", nlambda = 10))$method$fit$args,
list(
x = expr(missing_arg()),
y = expr(missing_arg()),
weights = expr(missing_arg()),
nlambda = new_empty_quosure(10),
family = "multinomial"
)
)
with_path <-
multinom_reg(penalty = 1) %>%
set_engine("glmnet", path_values = 4:2) %>%
translate()
expect_equal(
names(with_path$method$fit$args),
c("x", "y", "weights", "lambda", "family")
)
expect_equal(
rlang::eval_tidy(with_path$method$fit$args$lambda),
4:2
)
})
test_that('updating', {
expr1 <- multinom_reg() %>% set_engine("glmnet", intercept = TRUE)
expr1_exp <- multinom_reg(mixture = 0) %>% set_engine("glmnet", intercept = TRUE)
expr2 <- multinom_reg(mixture = tune()) %>% set_engine("glmnet", nlambda = tune())
expr2_exp <- multinom_reg(mixture = tune()) %>% set_engine("glmnet", nlambda = 10)
expr3 <- multinom_reg(mixture = 0, penalty = tune()) %>% set_engine("glmnet")
expr3_exp <- multinom_reg(mixture = 1) %>% set_engine("glmnet")
expr4 <- multinom_reg(mixture = 0) %>% set_engine("glmnet", nlambda = 10)
expr4_exp <- multinom_reg(mixture = 0) %>% set_engine("glmnet", nlambda = 10, pmax = 2)
expr5 <- multinom_reg(mixture = 1) %>% set_engine("glmnet", nlambda = 10)
expr5_exp <- multinom_reg(mixture = 1) %>% set_engine("glmnet", nlambda = 10, pmax = 2)
expect_equal(update(expr1, mixture = 0), expr1_exp)
expect_equal(update(expr2, nlambda = 10), expr2_exp)
expect_equal(update(expr3, mixture = 1, fresh = TRUE) %>% set_engine("glmnet"), expr3_exp)
expect_equal(update(expr5) %>% set_engine("glmnet", nlambda = 10, pmax = 2), expr5_exp)
param_tibb <- tibble::tibble(mixture = 1/3, penalty = 1)
param_list <- as.list(param_tibb)
expr4_updated <- update(expr4, param_tibb)
expect_equal(expr4_updated$args$mixture, 1/3)
expect_equal(expr4_updated$args$penalty, 1)
expect_equal(expr4_updated$eng_args$nlambda, rlang::quo(10))
expr4_updated_lst <- update(expr4, param_list)
expect_equal(expr4_updated_lst$args$mixture, 1/3)
expect_equal(expr4_updated_lst$args$penalty, 1)
expect_equal(expr4_updated_lst$eng_args$nlambda, rlang::quo(10))
})
test_that('bad input', {
expect_error(multinom_reg(mode = "regression"))
expect_error(translate(multinom_reg(penalty = 0.1) %>% set_engine("wat?")))
expect_error(multinom_reg(penalty = 0.1) %>% set_engine())
expect_warning(translate(multinom_reg(penalty = 0.1) %>% set_engine("glmnet", x = hpc[,1:3], y = hpc$class)))
})
|
context("excel")
test_that("excel_clasic_pal works", {
pal <- excel_pal()
n <- 5L
values <- pal(n)
expect_is(values, "character")
expect_eqNe(length(values), n)
})
test_that("excel_clasic_pal with line = TRUE works", {
pal <- excel_pal(line = TRUE)
n <- 5L
values <- pal(n)
expect_is(values, "character")
expect_eqNe(length(values), n)
})
test_that("calc_shape_pal raises warning for large n", {
expect_warning(excel_pal()(8))
})
test_that("excel_new_pal works", {
pal <- excel_new_pal()
n <- 5L
vals <- pal(n)
expect_is(vals, "character")
expect_eqNe(length(vals), n)
})
test_that("excel_new_pal raises error for bad n", {
expect_warning(excel_new_pal()(7))
})
test_that("theme_excel works", {
expect_is(theme_excel(), "theme")
})
test_that("excel_new_pal raises error with bad theme name", {
expect_error(excel_new_pal("adfaasdfa"), regexp = "`theme` must be one of")
})
test_that("scale_fill_excel works", {
expect_is(scale_fill_excel(), "ScaleDiscrete")
})
test_that("scale_colour_excel works", {
expect_is(scale_colour_excel(), "ScaleDiscrete")
})
test_that("scale_colour_excel works", {
expect_is(scale_fill_excel_new(), "ScaleDiscrete")
})
test_that("scale_fill_excel works", {
expect_is(scale_colour_excel_new(), "ScaleDiscrete")
})
test_that("theme_excel with horizontal = FALSE works", {
thm <- theme_excel(horizontal = FALSE)
expect_eqNe(thm$panel.grid.major.y, element_blank())
})
|
qc.stats <- function (x = NULL,
which.data = "raw.data",
mito.genes = NULL,
s.phase.genes = s.phase,
g2m.phase.genes = g2m.phase) {
if ("iCellR" != class(x)[1]) {
stop("x should be an object of class iCellR")
}
if (which.data == "raw.data") {
DATA <- [email protected]
}
if (which.data == "main.data") {
DATA <- [email protected]
}
if (which.data == "imputed.data") {
DATA <- [email protected]
}
UMIs <- colSums(DATA)
nGenes <- sapply(DATA, function(DATA) length(as.numeric(subset(DATA, DATA != 0))))
if (is.null(mito.genes)) {
mito.genes <- grep(pattern = "^mt\\-", x = rownames(DATA), value = TRUE, ignore.case = TRUE)
if ( length(mito.genes) == 0 ) {
mito.genes <- grep(pattern = "^mt\\.", x = rownames(DATA), value = TRUE, ignore.case = TRUE)
}
}
if (!is.null(mito.genes)) {
mito.genes = mito.genes
}
mito <- subset(DATA,rownames(DATA) %in% mito.genes)
if(dim(mito)[1] == 0){
mitoSiz <- rep(0,dim(mito)[2])
mito.percent <- mitoSiz
}
mitoSiz <- colSums(mito)
mito.percent <- (mitoSiz/UMIs)
data <- row.names(DATA)
s.phase.genes <- paste("^",s.phase.genes,"$", sep="")
s.phase.genes <- paste(s.phase.genes,collapse="|")
s.phase.genes <- grep(s.phase.genes, x = data, value = T, ignore.case = TRUE)
s.phase.genes1 <- subset(DATA,rownames(DATA) %in% s.phase.genes)
if(dim(s.phase.genes1)[1] != 0){
s.phase.genes <- colSums(s.phase.genes1)
s.phase.genes <- (s.phase.genes/UMIs)
}
if(dim(s.phase.genes1)[1] == 0){
s.phase.genes <- rep(0,dim(s.phase.genes1)[2])
}
g2m.phase.genes <- paste("^",g2m.phase.genes,"$", sep="")
g2m.phase.genes <- paste(g2m.phase.genes,collapse="|")
g2m.phase.genes <- grep(g2m.phase.genes, x = data, value = T, ignore.case = TRUE)
g2m.phase.genes1 <- subset(DATA,rownames(DATA) %in% g2m.phase.genes)
if(dim(s.phase.genes1)[1] != 0){
g2m.phase.genes <- colSums(g2m.phase.genes1)
g2m.phase.genes <- (s.phase.genes/UMIs)
}
if(dim(g2m.phase.genes1)[1] == 0){
g2m.phase.genes <- rep(0,dim(g2m.phase.genes1)[2])
}
QC <- list(colnames(DATA),
as.numeric(nGenes),
as.numeric(UMIs),
as.numeric(mito.percent),
as.numeric(s.phase.genes),
as.numeric(g2m.phase.genes))
names(QC) <- c("CellIds",
"nGenes",
"UMIs",
"mito.percent",
"S.phase.probability",
"g2m.phase.probability")
STATS <- as.data.frame(QC)
attributes(x)$stats <- STATS
return(x)
}
|
plot_xy_NumGroup <- function(data, xcol, ycol, NumGroup, symsize = 2.5, symthick = 1, s_alpha = 1, TextXAngle = 0, fontsize = 20){
ggplot2::ggplot(data, aes(x = {{ xcol }},
y = {{ ycol }}))+
geom_point(size = {{ symsize }},
alpha = {{ s_alpha }},
aes(fill = {{ NumGroup }}),
shape = 21,
stroke = {{ symthick }})+
theme_classic(base_size = {{ fontsize }})+
theme(strip.background = element_blank())+
guides(x = guide_axis(angle = {{ TextXAngle }}))+
scale_fill_grafify_c()
}
|
.datatable.aware <- TRUE
met2model.BIOCRO <- function(in.path, in.prefix, outfolder, overwrite = FALSE,
lat, lon, start_date, end_date, ...) {
start_date <- lubridate::parse_date_time(start_date, tz = "UTC",
orders = c("ymdHMSz", "ymdHMS", "ymdH", "ymd"))
end_date <- lubridate::parse_date_time(end_date, tz = "UTC",
orders = c("ymdHMSz", "ymdHMS", "ymdH", "ymd"))
dir.create(file.path(outfolder), recursive = TRUE, showWarnings = FALSE)
years_wanted <- lubridate::year(start_date):lubridate::year(end_date)
res <- list()
for (year in years_wanted) {
yrstart = max(lubridate::date(start_date), lubridate::ymd(paste0(year, "-01-01")))
yrend = min(lubridate::date(end_date), lubridate::ymd(paste0(year, "-12-31")))
ncfile <- file.path(in.path, paste(in.prefix, year, "nc", sep = "."))
csvfile <- file.path(outfolder, paste(in.prefix, year, "csv", sep = "."))
if (file.exists(csvfile) && as.logical(overwrite) != TRUE){
PEcAn.logger::logger.warn(paste("Output file", csvfile, "already exists! Moving to next year."))
next
}
met.nc <- ncdf4::nc_open(ncfile)
on.exit(close_nc_if_open(met.nc), add = FALSE)
dt <- mean(diff(udunits2::ud.convert(
met.nc$dim$time$vals,
met.nc$dim$time$units,
"hours since 1700-01-01 00:00:00")))
if (dt < 1) {
ncdf4::nc_close(met.nc)
upscale_result <- PEcAn.data.atmosphere::upscale_met(
outfolder = outfolder, input_met = ncfile,
site.id = in.prefix, resolution = 1/24,
overwrite = overwrite)
met.nc <- ncdf4::nc_open(upscale_result$file)
}
tmp.met <- PEcAn.data.atmosphere::load.cfmet(
met.nc, lat = lat, lon = lon,
start.date = yrstart, end.date = yrend)
ncdf4::nc_close(met.nc)
if (dt > 1) {
tmp.met <- PEcAn.data.atmosphere::cfmet.downscale.time(cfmet = tmp.met, output.dt = 1)
}
met <- cf2biocro(tmp.met)
utils::write.csv(met, file = csvfile, row.names = FALSE)
res[[as.character(year)]] <- data.frame(
file = csvfile,
host = PEcAn.remote::fqdn(),
mimetype = "text/csv",
formatname = "biocromet",
startdate = yrstart,
enddate = yrend,
dbfile.name = in.prefix,
stringsAsFactors = FALSE)
}
result <- do.call("rbind", res)
return(result)
}
cf2biocro <- function(met, longitude = NULL, zulu2solarnoon = FALSE) {
if ((!is.null(longitude)) & zulu2solarnoon) {
solarnoon_offset <- udunits2::ud.convert(longitude/360, "day", "minute")
met[, `:=`(solardate = met$date + lubridate::minutes(solarnoon_offset))]
}
if (!"relative_humidity" %in% colnames(met)) {
if (all(c("air_temperature", "air_pressure", "specific_humidity") %in% colnames(met))) {
rh <- PEcAn.data.atmosphere::qair2rh(
qair = met$specific_humidity,
temp = udunits2::ud.convert(met$air_temperature, "Kelvin", "Celsius"),
press = udunits2::ud.convert(met$air_pressure, "Pa", "hPa"))
met[, `:=`(relative_humidity = rh)]
} else {
PEcAn.logger::logger.error("neither relative_humidity nor [air_temperature, air_pressure, and specific_humidity]",
"are in met data")
}
}
if (!"ppfd" %in% colnames(met)) {
if ("surface_downwelling_photosynthetic_photon_flux_in_air" %in% colnames(met)) {
ppfd <- udunits2::ud.convert(met$surface_downwelling_photosynthetic_photon_flux_in_air, "mol", "umol")
} else if ("surface_downwelling_shortwave_flux_in_air" %in% colnames(met)) {
par <- PEcAn.data.atmosphere::sw2par(met$surface_downwelling_shortwave_flux_in_air)
ppfd <- PEcAn.data.atmosphere::par2ppfd(par)
} else {
PEcAn.logger::logger.error("Need either ppfd or surface_downwelling_shortwave_flux_in_air in met dataset")
}
}
if (!"wind_speed" %in% colnames(met)) {
if (all(c("northward_wind", "eastward_wind") %in% colnames(met))) {
wind_speed <- sqrt(met$northward_wind^2 + met$eastward_wind^2)
} else {
PEcAn.logger::logger.error("neither wind_speed nor both eastward_wind and northward_wind are present in met data")
}
}
if (max(met$relative_humidity) > 1) {
met[, `:=`(relative_humidity = met$relative_humidity/100)]
}
newmet <- met[, list(year = lubridate::year(met$date),
doy = lubridate::yday(met$date),
hour = round(lubridate::hour(met$date) + lubridate::minute(met$date) / 60, 0),
solar = ppfd,
Temp = udunits2::ud.convert(met$air_temperature, "Kelvin", "Celsius"),
RH = met$relative_humidity,
windspeed = wind_speed,
precip = udunits2::ud.convert(met$precipitation_flux, "s-1", "h-1"))]
newmet <- newmet[newmet$hour <= 23,]
return(as.data.frame(newmet))
}
|
summary.bayesQR <- function(object, burnin=0, credint=c(.025,.975), quantile=NULL, ...){
pandterm <- function(message) {
stop(message, call. = FALSE)
}
if (is.null(quantile)){
out <- lapply(object, FUN="summary.bayesQR.single", burnin=burnin, credint=credint)
} else {
if (!all(quantile %in% sapply(object, "[[", "quantile"))){
pandterm("One or more specified quantiles were not estimated")
} else {
object <- object[(sapply(object, "[[", "quantile") %in% quantile)]
out <- lapply(object, FUN="summary.bayesQR.single", burnin=burnin, credint=credint)
}
}
class(out) <- "bayesQR.summary"
return(out)
}
|
Kmmhat <-
function(X, r = NULL, ReferenceType = "", CheckArguments = TRUE) {
if (CheckArguments)
CheckdbmssArguments()
KmmBymarkcorrint <- function (X, r) {
X.marked <- X
X.marked$marks <- X$marks$PointWeight/mean(X$marks$PointWeight)
Kmm <- spatstat.core::Kmark(X.marked, correction="best")
attr(Kmm, "ylab") <- attr(Kmm, "yexp") <- quote(K[mm](r))
attr(Kmm, "fname") <- "K[mm]"
return (Kmm)
}
if (ReferenceType == "") {
return (KmmBymarkcorrint(X, r))
} else {
X.reduced <- X[X$marks$PointType == ReferenceType]
return (KmmBymarkcorrint(X.reduced, r))
}
}
|
"Dbf4myc"
|
StatsCovariate <- function(y = NULL,
y.names = NULL,
x = NULL,
x.names = NULL,
DF,
params = NULL,
job.group = NULL,
initial.list = list(),
jags.model,
...
) {
y <- TrimSplit(y)
x <- TrimSplit(x)
y.names <- if (!is.null(y.names)) TrimSplit(y.names) else CapWords(y)
x.names <- if (!is.null(x.names)) TrimSplit(x.names) else CapWords(x)
job.names <- c(y.names,x.names)
if ( length(y.names) != length(y) ) {
warning("y.names and y have unequal length. Using variable names.")
y.names <- CapWords(y)
}
if ( length(x.names) != length(x) ) {
warning("x.names and x have unequal length. Using variable names.")
x.names <- CapWords(x)
}
y.data <- DF[, c(y,x)]
q <- ncol(y.data)
if (length(x)) {
m1 <- as.matrix(do.call(rbind,lapply(y, function (z) {
m <- expand.grid(z,x)
matrix(unlist(lapply(unlist(m), function (m) {
which(m == colnames(y.data))
} )), ncol = 2 )
})))
} else {
m1 <- t(combn(1:q, 2))
}
m2 <- matrix(1:length(m1), length(m1) / 2, 2, byrow = TRUE)
y.data <- lapply(1:nrow(m1), function (i) {
m <- cbind( y.data[ , m1[i,1] ] , y.data[ , m1[i,2] ] )
m[stats::complete.cases(m) , ]
} )
n <- unlist(lapply(y.data,nrow))
n.max <- max(n)
y.data <- do.call(cbind,lapply(y.data, function(x) {
rbind(x, matrix(NA, nrow=n.max-nrow(x), ncol=2) )
}))
if (length(x)) {
n.data <- data.frame(do.call(rbind,lapply(y.names, function (z) {
m <- expand.grid(z,x.names)
data.frame(m,n, stringsAsFactors = FALSE)
})))
} else {
n.data <- data.frame(t(combn(job.names, 2)),n, stringsAsFactors = FALSE)
}
params <- if(length(params)) TrimSplit(params) else c("cor")
data.list <- list(
n = n,
q = q,
y = y.data,
m1 = m1,
m2 = m2
)
if (is.null(job.group)) job.group <- list ( c("cov","cor") , c("Alpha") )
if ("Alpha" %in% params) {
alpha <- "Alpha <- q / (q - 1) * (1 - sum(diag[]) / (sum(cov)))"
jags.model <- gsub("\\
job.names <- list(list(job.names) , list("Tau-equivalent reliability"))
alpha.n <- c( rep("Tau-equivalent reliability" , ncol(n.data)-1) ,
mean(n.data[ , ncol(n.data)]) )
n.data <- rbind(n.data , alpha.n)
}
n.data$n <- as.numeric(n.data$n)
name.list <- list(
job.group = job.group,
job.names = job.names
)
return (list(
data.list = data.list,
name.list = name.list,
params = params,
jags.model = jags.model,
n.data = n.data
))
}
|
usagehistory <- function (x)
return(x@usagehistory)
"usagehistory<-" <- function(x, value)
{
xx <- x
if (is.null(value))
{
xx@usagehistory <- character()
} else {
xx@usagehistory <- if (is.null(xx@usagehistory)) value else c(xx@usagehistory, value)
}
x <- xx
}
|
has_duplicates <- function(x, .xname = get_name_in_parent(x))
{
if(!anyDuplicated(x))
{
return(false(gettext("%s has no duplicates."), .xname))
}
TRUE
}
has_no_duplicates <- function(x, .xname = get_name_in_parent(x))
{
if(anyDuplicated(x))
{
dupe_indicies <- which(duplicated(x))
return(
false(
ngettext(
length(dupe_indicies),
"%s has a duplicate at position %s.",
"%s has duplicates at positions %s."
),
.xname,
toString(dupe_indicies, width = 100)
)
)
}
TRUE
}
|
rvmf <- function(n, mu, k) {
Rfast::rvmf(n, mu, k)
}
|
circleCub.Gauss <- function (center, r, mean, sd)
{
stopifnot(isScalar(sd), length(center) == 2, length(mean) == 2)
pchisq((r/sd)^2, df=2, ncp=sum(((center-mean)/sd)^2))
}
|
speciesTree <- function(x, FUN = min)
{
test.ultra <- which(!unlist(lapply(x, is.ultrametric)))
if (length(test.ultra))
stop(paste("the following trees were not ultrametric:\n",
paste(test.ultra, collapse = " ")))
Ntree <- length(x)
D <- lapply(x, cophenetic.phylo)
nms <- rownames(D[[1]])
n <- length(nms)
M <- matrix(0, n*(n - 1)/2, Ntree)
for (i in 1:Ntree) M[, i] <- as.dist(D[[i]][nms, nms])
Y <- apply(M, 1, FUN)
attributes(Y) <- list(Size = n, Labels = nms, Diag = FALSE,
Upper = FALSE, class = "dist")
as.phylo(hclust(Y, "single"))
}
|
met2cf.nc <- function(in.path, in.prefix, outfolder, ...) {
script <- paste0("inst/scripts/CF.", in.prefix, "sh")
cmdArgs <- paste(c(in.path, in.prefix, outfolder), collapse = " ")
fcn <- system.file(script, package = "PEcAn.data.atmosphere")
system(paste(fcn, cmdArgs))
}
|
write_ed2in <- function(ed2in, filename, custom_header = character(), barebones = FALSE) {
UseMethod("write_ed2in", ed2in)
}
write_ed2in.ed2in <- function(ed2in, filename, custom_header = character(), barebones = FALSE) {
tags_values_vec <- tags2char(ed2in)
if (isTRUE(barebones)) {
write_ed2in.default(ed2in, filename, custom_header, barebones)
return(NULL)
}
nvalues <- length(tags_values_vec)
ncomments <- length(attr(ed2in, "comment_values"))
file_body <- character(nvalues + ncomments)
file_body[attr(ed2in, "comment_linenos")] <- attr(ed2in, "comment_values")
file_body[attr(ed2in, "value_linenos")] <- tags_values_vec
header <- c(
"!=======================================",
"!=======================================",
"! ED2 namelist file",
"! Generated by `PEcAn.ED2::write_ed2in.ed2in`",
"! Additional user comments below: ",
paste0("! ", custom_header),
"!---------------------------------------"
)
output_lines <- c(header, file_body)
writeLines(output_lines, filename)
}
write_ed2in.default <- function(ed2in, filename, custom_header = character(), barebones = FALSE) {
tags_values_vec <- tags2char(ed2in)
header <- c(
"!=======================================",
"!=======================================",
"! ED2 namelist file",
"! Generated by `PEcAn.ED2::write_ed2in.default`",
"! Additional user comments below: ",
paste0("! ", custom_header),
"!---------------------------------------"
)
output_lines <- c(header, "$ED_NL", tags_values_vec, "$END")
writeLines(output_lines, filename)
}
tags2char <- function(ed2in) {
char_values <- vapply(ed2in, is.character, logical(1))
na_values <- vapply(ed2in, function(x) all(is.na(x)), logical(1))
quoted_vals <- ed2in
quoted_vals[char_values] <- lapply(quoted_vals[char_values], shQuote)
quoted_vals[na_values] <- lapply(quoted_vals[na_values], function(x) "")
values_vec <- vapply(quoted_vals, paste, character(1), collapse = ",")
tags_values_vec <- sprintf(" NL%%%s = %s", names(values_vec), values_vec)
tags_values_vec
}
|
texmexMethod <-
function(method){
method <- casefold(method)
if (method %in% c("o", "opt", "optim", "optimize", "optimise")){
method <- "o"
}
else if (method %in% c("s", "sim", "simulate")){
method <- "s"
}
else if (method %in% c("b", "bs", "boot", "bootstrap")){
method <- "b"
}
else {
stop("method should be either 'optimize', 'simulate' or 'bootstrap'")
}
method
}
texmexPrior <-
function(prior, penalty, method, pp){
prior <- casefold(prior)
penalty <- casefold(penalty)
if (length(penalty) == 0 & is.null(pp) & method != 's'){
prior <- "none"
}
else if (length(penalty) > 0){
if (penalty != prior){
prior <- penalty
}
}
if (method == 's' & !is.element(prior, c('gaussian', 'cauchy'))){
stop('Only gaussian or cauchy prior can be used when simulating from posterior.')
}
prior
}
texmexTrace <-
function(trace, method){
if (method == "o"){
if (!is.null(trace)){
otrace <- trace
}
else {
otrace <- 0
}
}
else{
otrace <- 0
if (is.null(trace)){
trace <- 10000
}
}
c(otrace, trace)
}
texmexPrepareData <-
function(y, data, params){
D <- vector('list', length=length(params))
if (!is.null(data)){
y <- formula(paste(y, "~ 1"))
y <- model.response(model.frame(y, data=data))
for (i in 1:length(params)){
D[[i]] <- model.matrix(params[[i]], data)
}
}
else {
for (i in 1:length(params)){
if (length(as.character(params[[i]])) == 2 & as.character(params[[i]])[2] == "1"){
D[[i]] <- matrix(ncol = 1, rep(1, length(y)))
}
else {
D[[i]] <- model.matrix(params[[i]])
}
}
}
names(D) <- names(params)
n <- length(y)
if (length(na.omit(y)) < n){
stop("missing values in response: missing values are not supported")
}
for (i in 1:length(D)){
if (nrow(na.omit(D[[i]])) < n){
stop("missing values in predictors: missing values are not supported")
}
}
list(y=y, D=D)
}
texmexThresholdData <- function(threshold, data){
for (i in 1:length(data$D)){
data$D[[i]] <- data$D[[i]][data$y > threshold, , drop=FALSE]
}
data$y <- data$y[data$y > threshold]
if (length(data$y) == 0){
stop("No observations above the threshold.")
}
data
}
texmexPriorParameters <-
function(prior, priorParameters, data){
nc <- sum(sapply(data$D, ncol))
if (prior %in% c("quadratic", "gaussian")) {
if (is.null(priorParameters)) {
priorParameters <- list(rep(0, nc), diag(rep(10^4, nc)))
}
if (length(priorParameters) != 2 | !is.list(priorParameters)) {
stop("For Gaussian prior or quadratic penalty, priorParameters should be a list of length 2, the second element of which should be a symmetric (covariance) matrix")
}
}
else if (prior %in% c("lasso", "l1", "laplace")) {
if (is.null(priorParameters)) {
priorParameters <- list(rep(0, nc), diag(rep(10^(-4), nc)))
}
if (length(priorParameters) != 2 | !is.list(priorParameters)) {
stop("For Laplace prior or L1 or Lasso penalty, priorParameters should be a list of length 2, the second element of which should be a diagonal (precision) matrix")
}
if (!is.matrix(priorParameters[[2]])) {
priorParameters[[2]] <- diag(rep(priorParameters[[2]], nc))
}
if (!all(priorParameters[[2]] == diag(diag(priorParameters[[2]])))) {
warning("some off-diagonal elements of the covariance are non-zero. Only the diagonal is used in penalization")
}
}
if (!is.null(priorParameters)) {
if (length(priorParameters[[1]]) != nc) {
stop("wrong number of parameters in prior (doesn't match parameter formulae)")
}
else if (length(diag(priorParameters[[2]])) != nc) {
stop("wrong dimension of prior covariance (doesn't match parameter formulae)")
}
}
priorParameters
}
findFormulae <-
function(call,...){
wh <- sapply(call, function(x){ try(class(eval(x)), silent=TRUE) })
wh <- names(wh)[wh == 'formula']
if (length(wh) > 0){
res <- as.list(call[wh])
for(i in 1:length(res)) res[[i]] <- eval(res[[i]])
}
else { res <- NULL }
res
}
texmexParameters <- function(call, fam, ...){
mp <- lapply(names(fam$param), function(x) ~1)
names(mp) <- names(fam$param)
p <- findFormulae(call, ...)
mp[names(p)] <- p
mp
}
texmexGetParam <- function(data, co){
if (is.vector(co)){ co <- matrix(co, nrow=1) }
mend <- cumsum(unlist(lapply(data, ncol)))
mstart <- c(1, mend+1)[-(length(mend) + 1)]
param <- lapply(1:length(mend), function(i, m, start, end){
m[,start[i]:end[i],drop=FALSE]
},
m=co, start=mstart, end=mend)
param
}
texmexPst <- function(msg="",Family){
paste(msg,", Family = ", Family$name)
}
texmexGetXlevels <- function(fo, data){
getVars <- function(fo) { all.vars(update(fo, 0~.)) }
allVars <- unique(unlist(lapply(fo, getVars)))
data <- data[, allVars, drop=FALSE]
classes <- sapply(data, class)
wh <- classes %in% c("factor", "ordered", "character")
data <- data[, wh, drop=FALSE]
classes <- classes[wh]
data[classes == "character"] <- lapply(data[classes == "character"], as.factor)
xlevels <- lapply(data, levels)
res <- lapply(fo, getVars)
lapply(res, function(X, wh) wh[names(wh) %in% X], wh=xlevels)
}
texmexStandardForm <- function(object){
if (sum(sapply(object$data$D, ncol)) > length(object$family$param)){
object$data$y <- resid(object)
object$threshold <- 0
object$coefficients <- object$family$param
}
object
}
|
wals.feature <-
function(features,
na.rm = TRUE) {
features_set <-
c(
"1A",
"2A",
"3A",
"4A",
"5A",
"6A",
"7A",
"8A",
"9A",
"10A",
"10B",
"11A",
"12A",
"13A",
"14A",
"15A",
"16A",
"17A",
"18A",
"19A",
"20A",
"21A",
"21B",
"22A",
"23A",
"24A",
"25A",
"25B",
"26A",
"27A",
"28A",
"29A",
"30A",
"31A",
"32A",
"33A",
"34A",
"35A",
"36A",
"37A",
"38A",
"39A",
"39B",
"40A",
"41A",
"42A",
"43A",
"44A",
"45A",
"46A",
"47A",
"48A",
"49A",
"50A",
"51A",
"52A",
"53A",
"54A",
"55A",
"56A",
"57A",
"58A",
"58B",
"59A",
"60A",
"61A",
"62A",
"63A",
"64A",
"65A",
"66A",
"67A",
"68A",
"69A",
"70A",
"71A",
"72A",
"73A",
"74A",
"75A",
"76A",
"77A",
"78A",
"79A",
"79B",
"80A",
"81A",
"81B",
"82A",
"83A",
"84A",
"85A",
"86A",
"87A",
"88A",
"89A",
"90A",
"90B",
"90C",
"90D",
"90E",
"90F",
"90G",
"91A",
"92A",
"93A",
"94A",
"95A",
"96A",
"97A",
"98A",
"99A",
"100A",
"101A",
"102A",
"103A",
"104A",
"105A",
"106A",
"107A",
"108A",
"108B",
"109A",
"109B",
"110A",
"111A",
"112A",
"113A",
"114A",
"115A",
"116A",
"117A",
"118A",
"119A",
"120A",
"121A",
"122A",
"123A",
"124A",
"125A",
"126A",
"127A",
"128A",
"129A",
"130A",
"130B",
"131A",
"132A",
"133A",
"134A",
"135A",
"136A",
"136B",
"137A",
"137B",
"138A",
"139A",
"140A",
"141A",
"142A",
"143A",
"143B",
"143C",
"143D",
"143E",
"143F",
"143G",
"144A",
"144B",
"144C",
"144D",
"144E",
"144F",
"144G",
"144H",
"144I",
"144J",
"144K",
"144L",
"144M",
"144N",
"144O",
"144P",
"144Q",
"144R",
"144S",
"144T",
"144U",
"144V",
"144W",
"144X",
"144Y"
)
message(paste0("Don't forget to cite a source (modify in case of using individual chapters):
Dryer, Matthew S. & Haspelmath, Martin (eds.) 2013. The World Atlas of Language Structures Online. Leipzig: Max Planck Institute for Evolutionary Anthropology.
(Available online at https://wals.info/, Accessed on ",
Sys.Date(),
".)
@book{wals,
address = {Leipzig},
editor = {Matthew S. Dryer and Martin Haspelmath},
publisher = {Max Planck Institute for Evolutionary Anthropology},
title = {WALS Online},
url = {https://wals.info/},
year = {2013}
}"))
if (sum(!toupper(features) %in% features_set) < 1){
links <-
paste0("https://wals.info/feature/", toupper(features), ".tab")
datalist <- lapply(links, function(x) {
utils::read.csv(x,
sep = "\t",
skip = 7,
stringsAsFactors = FALSE)
})
final_df <- Reduce(function(x, y) {
merge(
x,
y,
all = TRUE,
by = c(
"wals.code",
"name",
"latitude",
"longitude",
"genus",
"family"
)
)
}, datalist)
final_df <-
final_df[, grep("description|wals.code|name|latitude|longitude", colnames(final_df))]
colnames(final_df)[grep("description", colnames(final_df))] <-
features
final_df <-
merge(final_df, lingtypology::wals, by = "wals.code")
final_df$language <-
lingtypology::lang.gltc(final_df$glottocode)
na_rm <- is.na(final_df$language)
ifelse(na.rm == TRUE,
final_df <- final_df[!na_rm, ],
final_df[is.na(final_df$language), "language"] <-
final_df[is.na(final_df$language), "name"])
final_df <- final_df[,-2]
} else {
not_features <- features[which(!features %in% features_set)]
stop(paste(
"There is no features",
paste0("'", not_features, "'", collapse = ", "),
"in WALS database."
))
}
return(final_df)
}
|
DataAltiExtrapolation_Valery <- function(DatesR,
Precip, PrecipScale = TRUE,
TempMean, TempMin = NULL, TempMax = NULL,
ZInputs, HypsoData, NLayers,
verbose = TRUE) {
GradP_Valery2010 <- 0.00041
HypsoData <- as.double(HypsoData)
ZInputs <- as.double(ZInputs)
ZLayers <- as.double(rep(NA, NLayers))
if (!identical(HypsoData, as.double(rep(NA, 101)))) {
nmoy <- 100 %/% NLayers
nreste <- 100 %% NLayers
ncont <- 0
for (iLayer in 1:NLayers) {
if (nreste > 0) {
nn <- nmoy + 1
nreste <- nreste - 1
} else {
nn <- nmoy
}
if (nn == 1) {
ZLayers[iLayer] <- HypsoData[ncont + 1]
}
if (nn == 2) {
ZLayers[iLayer] <- 0.5 * (HypsoData[ncont + 1] + HypsoData[ncont + 2])
}
if (nn > 2) {
ZLayers[iLayer] <- HypsoData[ncont + nn / 2]
}
ncont <- ncont + nn
}
}
if (identical(ZInputs, HypsoData[51]) & NLayers == 1) {
LayerPrecip <- list(as.double(Precip))
} else {
GradP <- GradP_Valery2010
TabGradP <- rep(GradP, length(Precip))
Zthreshold <- 4000
LayerPrecip_mat <- sapply(1:NLayers, function(iLayer) {
if (ZLayers[iLayer] <= Zthreshold) {
prcp <- as.double(Precip * exp(TabGradP * (ZLayers[iLayer] - ZInputs)))
} else {
if (ZInputs <= Zthreshold) {
prcp <- as.double(Precip * exp(TabGradP * (Zthreshold - ZInputs)))
} else {
prcp <- as.double(Precip)
}
}
return(prcp)
})
if (PrecipScale) {
LayerPrecip_mat <- LayerPrecip_mat / rowMeans(LayerPrecip_mat) * Precip
LayerPrecip_mat[is.nan(LayerPrecip_mat)] <- 0
}
LayerPrecip <- as.list(as.data.frame(LayerPrecip_mat))
}
LayerTempMean <- list()
LayerTempMin <- list()
LayerTempMax <- list()
if (identical(ZInputs, HypsoData[51]) & NLayers == 1) {
LayerTempMean[[1]] <- as.double(TempMean)
if (!is.null(TempMin) & !is.null(TempMax)) {
LayerTempMin[[1]] <- as.double(TempMin)
LayerTempMax[[1]] <- as.double(TempMax)
}
} else {
GradT <- .GradT_Valery2010
iday <- match(format(DatesR, format = "%d%m"),
sprintf("%02i%02i", GradT[, "day"], GradT[, "month"]))
TabGradT <- GradT[iday, c("grad_Tmean", "grad_Tmin", "grad_Tmax")]
for (iLayer in 1:NLayers) {
LayerTempMean[[iLayer]] <- as.double(TempMean + (ZInputs - ZLayers[iLayer]) * abs(TabGradT[, "grad_Tmean"]) / 100)
if (!is.null(TempMin) & !is.null(TempMax)) {
LayerTempMin[[iLayer]] <- as.double(TempMin + (ZInputs - ZLayers[iLayer]) * abs(TabGradT[, "grad_Tmin"]) / 100)
LayerTempMax[[iLayer]] <- as.double(TempMax + (ZInputs - ZLayers[iLayer]) * abs(TabGradT[, "grad_Tmax"]) / 100)
}
}
}
LayerFracSolidPrecip <- list()
Zthreshold <- 1500
Option <- "USACE"
if (!is.na(ZInputs)) {
if (ZInputs < Zthreshold & !is.null(TempMin) & !is.null(TempMax)) {
Option <- "Hydrotel"
}
}
for (iLayer in 1:NLayers) {
if (Option == "Hydrotel") {
TempMin <- LayerTempMin[[iLayer]]
TempMax <- LayerTempMax[[iLayer]]
SolidFraction <- 1 - TempMax / (TempMax - TempMin)
SolidFraction[TempMin >= 0] <- 0
SolidFraction[TempMax <= 0] <- 1
}
if (Option == "USACE") {
USACE_Tmin <- -1.0
USACE_Tmax <- 3.0
TempMean <- LayerTempMean[[iLayer]]
SolidFraction <- 1 - (TempMean - USACE_Tmin) / (USACE_Tmax - USACE_Tmin)
SolidFraction[TempMean > USACE_Tmax] <- 0
SolidFraction[TempMean < USACE_Tmin] <- 1
}
LayerFracSolidPrecip[[iLayer]] <- as.double(SolidFraction)
}
namesLayer <- sprintf("L%i", seq_along(LayerPrecip))
names(LayerPrecip) <- namesLayer
names(LayerTempMean) <- namesLayer
if (!is.null(TempMin) & !is.null(TempMax)) {
names(LayerTempMin) <- namesLayer
names(LayerTempMax) <- namesLayer
}
names(LayerFracSolidPrecip) <- namesLayer
return(list(LayerPrecip = LayerPrecip,
LayerTempMean = LayerTempMean,
LayerTempMin = LayerTempMin,
LayerTempMax = LayerTempMax,
LayerFracSolidPrecip = LayerFracSolidPrecip,
ZLayers = ZLayers))
}
|
sp.classification <-
function (tree.sp, species.spruce, species.pine, species.harw)
{
if (is.null(species.spruce)) stop("species.spruce is NULL")
if (is.null(species.pine)) stop("species.pine is NULL")
if (is.null(species.harw)) stop("species.harw is NULL")
sp.clas <- factor(rep("other", length(tree.sp)),
levels = c("spruce",
"pine", "birch", "other"))
sp.clas[tree.sp %in% species.spruce] <- "spruce"
sp.clas[tree.sp %in% species.pine] <- "pine"
sp.clas[tree.sp %in% species.harw] <- "birch"
return(sp.clas)
}
|
est.mfa <- function (init_para, Y, itmax, tol, conv_measure, eta , ...)
{
p <- ncol(Y)
n <- nrow(Y)
fit <- init_para
loglikeNtau <- try(do.call("logL_tau.mfa", c(list(Y = Y),
fit)), silent = TRUE)
if ((any(class(loglikeNtau) %in% "try-error")) || (any(class(loglikeNtau) %in%
"character"))) {
FIT <- paste("in computing the log-likelihood before EM-steps")
class(FIT) <- "error"
return(FIT)
}
fit <- append(fit, loglikeNtau)
for (niter in 1:itmax) {
FIT <- do.call("MStep.amfa", c(list(Y = Y, eta = eta), fit))
if (any(class(FIT) %in% "error")) {
FIT <- paste("in ", niter, "iteration of the M-step",
FIT)
class(FIT) <- "error"
return(FIT)
}
loglikeNtau <- try(do.call("logL_tau.mfa", c(list(Y = Y),
FIT)), silent = TRUE)
if ((any(class(loglikeNtau) %in% "try-error")) || (any(class(loglikeNtau) %in%
"character"))) {
FIT <- paste("in computing the log-likelihood after the ",
niter, "th the M-step", FIT$logL, sep = "")
class(FIT) <- "error"
return(FIT)
}
FIT <- append(FIT, loglikeNtau)
if ((any(class(FIT$logL) == "NULL")) || (any(class(FIT$logL) ==
"character"))) {
FIT <- paste("in computing the log-likelihood after the ",
niter, "th the M-step", FIT$logL, sep = "")
class(FIT) <- "error"
return(FIT)
}
else {
if ((FIT$logL == -Inf) | is.na(FIT$logL)) {
FIT <- paste("the log-likelihood computed after the ",
niter, "th iteration of the M-step is not finite",
sep = "")
class(FIT) <- "error"
return(FIT)
}
}
if ((conv_measure == "diff") && (abs(FIT$logL - fit$logL) <
tol))
break
if ((conv_measure == "ratio") && (abs((FIT$logL - fit$logL)/FIT$logL) <
tol))
break
fit <- FIT
}
class(FIT) <- "mfa"
return(FIT)
}
|
library(testthat)
library(rly)
context("Bad literal specification")
Lexer <- R6::R6Class("Lexer",
public = list(
tokens = c('NUMBER'),
literals = c("+","-","**"),
t_NUMBER = function(re='\\d+', t) {
return(t)
},
t_error = function(t) {}
)
)
test_that("literal not single", {
expect_output(expect_error(rly::lex(Lexer), "Can't build lexer"),
"ERROR .* Invalid literal. Must be a single character")
})
|
gvcmcatfit <-
function (
x,
y,
weights,
family,
control,
acoefs,
lambda,
phis,
weight,
which.a,
start = NULL,
offset = rep(0, nobs),
...
)
{
x <- as.matrix(x)
xnames <- dimnames(x)[[2L]]
ynames <- if (is.matrix(y))
rownames(y) else names(y)
conv <- FALSE
nobs <- nrow(x)
nlik <- if (control$scaled.lik) nobs else 1
nvars <- ncol(x)
EMPTY <- nvars == 0
if (is.null(weights))
weights <- rep.int(1, nobs)
if (is.null(offset))
offset <- rep.int(0, nobs)
if (length(offset)==1)
offset <- rep.int(offset, nobs)
variance <- family$variance
linkinv <- family$linkinv
if (!is.function(variance) || !is.function(linkinv))
stop("'family' argument seems not to be a valid family object",
call. = FALSE)
dev.resids <- family$dev.resids
aic <- family$aic
mu.eta <- family$mu.eta
unless.null <- function(x, if.null) if (is.null(x)) if.null else x
valideta <- unless.null(family$valideta, function(eta) TRUE)
validmu <- unless.null(family$validmu, function(mu) TRUE)
coefold <- if (nvars >= nobs || control$start.ml==TRUE) control$oml else rep(1, nvars)
if (any(is.na(coefold))) coefold[which(is.na(coefold))] <- 0
mustart <- etastart <- NULL
if (is.null(start)) {
suppressWarnings(eval(family$initialize))
etastart <- family$linkfun(mustart)
start <- coefold
} else {
start <- if (length(start) != nvars)
{stop(gettextf("length of 'start' should equal %d and correspond to initial coefs for %s",
nvars, paste(deparse(xnames), collapse = ", ")),
domain = NA)}
else {
isnogood <- is.na(start)
if (any(isnogood)) { start[isnogood] <- 0 }
start }
eval(family$initialize)
coefold <- start
etastart <- offset + as.vector(if (NCOL(x) == 1) {x * start} else {x %*% start})
mustart <- linkinv(etastart)
}
if (EMPTY) {
eta <- rep.int(0, nobs) + offset
if (!valideta(eta))
stop("invalid linear predictor values in empty model",
call. = FALSE)
mu <- linkinv(eta)
if (!validmu(mu))
stop("invalid fitted means in empty model", call. = FALSE)
dev <- sum(dev.resids(y, mu, weights))
w <- ((weights * mu.eta(eta)^2)/variance(mu))^0.5
residuals <- (y - mu)/mu.eta(eta)
good <- rep(TRUE, length(residuals))
boundary <- conv <- TRUE
coef <- numeric()
iter <- 0L
}
if (!EMPTY) {
eta <- etastart
mu <- mustart
if (!(validmu(mu) && valideta(eta)))
stop("cannot find valid starting values: please specify some",
call. = FALSE)
devold <- sum(dev.resids(y, mu, weights))
boundary <- conv <- FALSE
L1 <- function(xi, control=control) sqrt((xi^2 + control$c) )^(-1)
L0 <- if (control$L0.log==TRUE) {
function(xi, control=control){p <- 1+exp(-control$gama*abs(xi))
2*control$gama*(sqrt(xi^2 + control$c))^(-1)*p^(-1)*(1 - 1/p)}
} else {
function(xi, control=control){2*(xi+control$c)^(-2)}
}
L2 <- function(xi, control=control){2}
elastic <- function(xi, control=control) {control$elastic * L1(xi, control=control) +
(1-control$elastic) * L1(xi, control=control)}
SCAD <- function(xi, control=control){
a <- 3.7
lambda <- control$lambda
zweiteklammer <- a*lambda-abs(xi)
(as.numeric(abs(xi)<=lambda) + zweiteklammer*as.numeric(zweiteklammer>0) *
as.numeric(abs(xi)>lambda) / (a-1) / lambda) /sqrt(abs(xi)^2 + control$c)
}
sp <- function(xi, control=control) { 2 }
alle.L <- c("L1", "L0", "L2", "elastic", "SCAD", "sp", "L0.normal", "L0.root", "L0.ridge")
all.L <- alle.L[which(alle.L %in% which.a)]
vec.L <- function(x, which.a, all.L=all.L, control=control){
output <- rep(0,length(x))
for (i in all.L){
good <- which(which.a==i)
if (!is.null(good)) output[good] <- eval(parse(text=i))(x[good], control)
}
return(output)
}
grouped <- function(beta, Aj, control=control) {
dfj <- sum(Aj==1)
AjtAj <- Aj%*%t(Aj)
rep(sqrt(dfj) / sqrt(t(beta)%*%AjtAj%*%beta + control$c), ncol(Aj) )
}
vec.special <- function(beta, acoefs, which.a) {
output <- rep(0,ncol(acoefs))
info <- rle(which.a)
i <- "grouped"
for (j in grep(i, info[[2]])) {
good <- which(which.a==info[[2]][j])
adpt <- sqrt(sum((t(acoefs)%*%control$oml)[good]^2)) * control$adapted.weights + abs(control$adapted.weights - 1)
output[good] <- eval(parse(text=i))(beta, acoefs[,good], control)/adpt
}
return(output)
}
A <- function(beta, acoefs=acoefs, x=x, which.a=which.a, all.L=all.L, lambda=lambda, phis=phis, weight=weight, control=control) {
nbs <- t(acoefs)%*%beta
fd <- as.integer(drop(nbs)!=0)
appro <- fd * lambda * phis * weight * (vec.L(nbs, which.a, all.L, control) + vec.special(beta, acoefs, which.a))
crossprod(t(acoefs)*sqrt(appro))
}
method<-"lqa"
if(method=="lqa"){
for (i in 1L:control$maxi) {
good <- weights > 0
varmu <- variance(mu)[good]
if (any(is.na(varmu)))
stop("NAs in V(mu)")
if (any(varmu == 0))
stop("0s in V(mu)")
mu.eta.val <- mu.eta(eta)
if (any(is.na(mu.eta.val[good])))
stop("NAs in d(mu)/d(eta)")
good <- (weights > 0) & (mu.eta.val != 0)
if (all(!good)) {
conv <- FALSE
warning("no observations informative at iteration ", i)
break
}
z <- (eta - offset)[good] + (y - mu)[good]/mu.eta.val[good]
w <- sqrt((weights[good] * mu.eta.val[good]^2)/variance(mu)[good])
A.lambda <- nlik * A(start, acoefs=acoefs, x=x, which.a=which.a, all.L=all.L,
lambda=lambda, phis=phis, weight=weight, control)
x.star <- w*x
y.schlange <- as.vector(w*z)
p.imat.new <- crossprod(x.star) + A.lambda
chol.pimat.new <- chol(p.imat.new)
inv.pimat.new <- chol2inv(chol.pimat.new)
start.new <- control$g * drop(inv.pimat.new %*% t(x.star) %*%
y.schlange) + (1 - control$g) * start
start <- start.new
if (any(!is.finite(start))) {
conv <- FALSE
warning(gettextf("non-finite coefficients at iteration %d", i), domain = NA)
break
}
eta <- drop(x %*% start)
mu <- linkinv(eta <- eta + offset)
dev <- sum(dev.resids(y, mu, weights))
boundary <- FALSE
if (!is.finite(dev)) {
if (any(coefold==NaN) || any(coefold==Inf) || any(is.na(coefold)))
stop("no valid set of coefficients has been found: please supply starting values",
call. = FALSE)
warning("step size truncated due to divergence",
call. = FALSE)
ii <- 1
while (!is.finite(dev)) {
if (ii > control$maxi)
stop("inner loop 1; cannot correct step size",
call. = FALSE)
ii <- ii + 1
start <- (start + coefold)/2
eta <- drop(x %*% start)
mu <- linkinv(eta <- eta + offset)
dev <- sum(dev.resids(y, mu, weights))
}
boundary <- TRUE
}
if (!(valideta(eta) && validmu(mu))) {
if (any(coefold==NaN) || any(coefold==Inf) || any(is.na(coefold)))
stop("no valid set of coefficients has been found: please supply starting values",
call. = FALSE)
warning("step size truncated: out of bounds",
call. = FALSE)
ii <- 1
while (!(valideta(eta) && validmu(mu))) {
if (ii > control$maxi)
stop("inner loop 2; cannot correct step size",
call. = FALSE)
ii <- ii + 1
start <- (start + coefold)/2
eta <- drop(x %*% start)
mu <- linkinv(eta <- eta + offset)
}
boundary <- TRUE
dev <- sum(dev.resids(y, mu, weights))
}
if (sum(abs(start - coefold))/sum(abs(coefold)) <= control$epsilon) {
conv <- TRUE
coef <- start
break
} else {
devold <- dev
coef <- coefold <- start
}
}
}
start <- coef <- round(start, control$accuracy)
eta <- drop(x %*% start)
mu <- linkinv(eta <- eta + offset)
dev <- sum(dev.resids(y, mu, weights))
if (!conv)
warning("Convergence warning for lambda = ", lambda[1], "\n", call. = FALSE)
if (boundary)
warning("Algorithm stopped at boundary value",
call. = FALSE)
if (family$family == "binomial") {
if (any(mu > 1 - control$epsilon) || any(mu < control$epsilon))
warning("Fitted probabilities numerically 0 or 1 occurred",
call. = FALSE)
}
if (family$family == "poisson") {
if (any(mu < control$epsilon))
warning("Fitted rates numerically 0 occurred",
call. = FALSE)
}
residuals <- (y - mu)/mu.eta(eta)
names(coef) <- xnames
H.i.1 <- Matrix(x.star)
suppressWarnings(try(H.i.2 <- H.i.1 %*% inv.pimat.new))
rank <- if (exists("H.i.2")) sum(H.i.2 * H.i.1) else NA
}
names(residuals) <- ynames
names(mu) <- ynames
names(eta) <- ynames
wt <- rep.int(0, nobs)
wt[good] <- w^2
names(wt) <- ynames
names(weights) <- ynames
names(y) <- ynames
wtdmu <- sum(weights * y)/sum(weights)
nulldev <- sum(dev.resids(y, wtdmu, weights))
n.ok <- nobs - sum(weights == 0)
nulldf <- n.ok - 1
resdf <- n.ok - rank
aic.model <- aic(y, nobs, mu, weights, dev) + 2 * rank
Rmat <- QR <- NULL
list(coefficients = coef, residuals = residuals, fitted.values = mu,
effects = if (!EMPTY) effects, R = if (!EMPTY) Rmat,
rank = round(rank, 2),
qr = if (!EMPTY) QR,
family = family,
linear.predictors = eta, deviance = dev, aic = aic.model,
null.deviance = nulldev, iter = i, weights = wt, prior.weights = weights,
df.residual = resdf, df.null = nulldf, y = y, converged = conv,
boundary = boundary)
}
|
create_project_rstudio <- function(path, template, dump, merge)
{
.stopifproject(c("Cannot create a new project inside an existing one",
"Please change to another directory and re-run create.project()"),
path = normalizePath(dirname(path)))
.stopifproject(c("Cannot create a new project inside an existing one",
"Please change to another directory and re-run create.project()"),
path = dirname(normalizePath(dirname(path))))
dir.create(path, recursive = TRUE, showWarnings = FALSE)
ProjectTemplate::create.project(
project.name = path, template = template,
dump = dump, merge.strategy = ifelse(merge, "allow.non.conflict", "require.empty"),
rstudio.project = TRUE
)
}
|
mare<-function(o, p){
a<-abs(o-p)
b<-o
stats::median(a/b)
}
|
jxCalendarTime <- function(tc0, tc1) {
jx <- rep(0, times=length(tc1))
for (k in 1:length(tc1)) {
idx <- which(tc0 <= tc1[k])
if (length(idx) == 0) {
jx[k] <- 0
} else {
jx[k] <- idx[length(idx)]
}
}
return(jx)
}
jyCalendarTime <- function(jx) {
jx0 <- c(0, jx[1:(length(jx)-1)])
jx <- which(jx > jx0)
return(jx)
}
|
Replicator <- function(time, state, parameters) {
a <- parameters
states <- sqrt(length(a))
A <- matrix(a, states, byrow = TRUE)
A <- t(A)
dX <- c()
for(i in 1:states) {
dX[i] <- sum(state * A[i, ])
}
avgFitness <- sum(dX * state)
for(i in 1:states) {
dX[i] <- state[i] * (dX[i] - avgFitness)
}
return(list(dX))
}
|
split_tree_at_height = function(tree,
height = 0,
by_edge_count = FALSE){
Ntips = length(tree$tip.label);
Nnodes = tree$Nnode;
Nedges = nrow(tree$edge);
clade_labels = c(tree$tip.label, tree$node.label)
results = split_tree_at_height_CPP( Ntips,
Nnodes,
Nedges,
tree_edge = as.vector(t(tree$edge)) - 1,
edge_length = (if(by_edge_count || is.null(tree$edge.length)) numeric() else tree$edge.length),
root_edge = (if(is.null(tree$root.edge)) 0 else tree$root.edge),
split_height = height)
if(!results$success){
return(list(success=FALSE, error=results$error));
}else if(results$Nsubtrees==0){
return(list(Nsubtrees = 0,
subtrees = list(),
clade2subtree = rep(0,Ntips+Nnodes)));
}else{
Nsubtrees = results$Nsubtrees
NStips = results$NStips
NSnodes = results$NSnodes
NSclades = NStips+NSnodes
NSedges = results$NSedges
new2old_clade = results$new2old_clade + 1;
new2old_edge = results$new2old_edge + 1;
clade2subtree = results$clade2subtree + 1;
subtrees = vector(mode="list", Nsubtrees)
clade_offset = 0;
edge_offset = 0;
for(i in 1:Nsubtrees){
subtree = list( Nnode = NSnodes[i],
tip.label = (if(NStips[i]==0) vector("character") else clade_labels[new2old_clade[(1+clade_offset):(clade_offset+NStips[i])]]),
node.label = (if(is.null(tree$node.label) || (NSnodes[i]==0)) NULL else clade_labels[new2old_clade[(clade_offset+NStips[i]+1):(clade_offset+NSclades[i])]]),
edge = (if(NSedges[i]==0) matrix(nrow=0,ncol=2) else matrix(results$subtree_edges[(1+2*edge_offset):(2*edge_offset+2*NSedges[i])],ncol=2,byrow=TRUE) + 1),
edge.length = (if(is.null(tree$edge.length)) NULL else (if(NSedges[i]==0) vector("double") else tree$edge.length[new2old_edge[(1+edge_offset):(edge_offset+NSedges[i])]])),
root = results$new_roots[i]+1,
root.edge = results$root_edges[i])
class(subtree) = "phylo";
attr(subtree,"order") = NULL
subtrees[[i]] = list( tree = subtree,
new2old_clade = (if(NSclades[i]==0) vector("integer") else new2old_clade[(1+clade_offset):(clade_offset+NSclades[i])]),
new2old_edge = (if(NSedges[i]==0) vector("integer") else new2old_edge[(1+edge_offset):(edge_offset+NSedges[i])]));
clade_offset = clade_offset + NSclades[i];
edge_offset = edge_offset + NSedges[i];
}
return(list(Nsubtrees = Nsubtrees,
subtrees = subtrees,
clade2subtree = clade2subtree));
}
}
|
source_url <- function(url, ..., sha1 = NULL) {
stopifnot(is.character(url), length(url) == 1)
rlang::check_installed("digest")
temp_file <- file_temp()
on.exit(file_delete(temp_file))
request <- httr::GET(url)
httr::stop_for_status(request)
writeBin(httr::content(request, type = "raw"), temp_file)
file_sha1 <- digest::digest(file = temp_file, algo = "sha1")
if (is.null(sha1)) {
cli::cli_alert_info("SHA-1 hash of file is {file_sha1}")
} else {
if (nchar(sha1) < 6) {
stop("Supplied SHA-1 hash is too short (must be at least 6 characters)")
}
file_sha1 <- substr(file_sha1, 1, nchar(sha1))
if (!identical(file_sha1, sha1)) {
stop("SHA-1 hash of downloaded file (", file_sha1,
")\n does not match expected value (", sha1, ")",
call. = FALSE
)
}
}
check_dots_used(action = getOption("devtools.ellipsis_action", rlang::warn))
source(temp_file, ...)
}
source_gist <- function(id, ..., filename = NULL, sha1 = NULL, quiet = FALSE) {
rlang::check_installed("gh")
stopifnot(length(id) == 1)
url_match <- "((^https://)|^)gist.github.com/([^/]+/)?([0-9a-f]+)$"
if (grepl(url_match, id)) {
id <- regmatches(id, regexec(url_match, id))[[1]][5]
url <- find_gist(id, filename)
} else if (is.numeric(id) || grepl("^[0-9a-f]+$", id)) {
url <- find_gist(id, filename)
} else {
stop("Unknown id: ", id)
}
if (!quiet) cli::cli_alert_info("Sourcing {url}")
check_dots_used(action = getOption("devtools.ellipsis_action", rlang::warn))
source_url(url, ..., sha1 = sha1)
}
find_gist <- function(id, filename) {
files <- gh::gh("GET /gists/:id", id = id)$files
r_files <- files[grepl("\\.[rR]$", names(files))]
if (length(r_files) == 0) {
stop("No R files found in gist", call. = FALSE)
}
if (!is.null(filename)) {
if (!is.character(filename) || length(filename) > 1 || !grepl("\\.[rR]$", filename)) {
stop("'filename' must be NULL, or a single filename ending in .R/.r", call. = FALSE)
}
which <- match(tolower(filename), tolower(names(r_files)))
if (is.na(which)) {
stop("'", filename, "' not found in this gist", call. = FALSE)
}
} else {
if (length(r_files) > 1) {
warning("Multiple R files in gist, using first", call. = FALSE)
}
which <- 1
}
r_files[[which]]$raw_url
}
|
place_order_options <- function(RH, option_id, direction, side, quantity, stop_price = NULL, type = "limit", time_in_force) {
check_rh(RH)
if (type != "limit") stop("Only limit orders are supported")
if (type == "limit" & is.null(stop_price)) stop("Limit orders must contain a price")
if (!direction %in% c("debit", "credit")) stop("direction must be 'debit' or 'credit'")
orders <- api_orders_options(RH = RH,
option_id = option_id,
direction = direction,
side = side,
quantity = quantity,
stop_price = stop_price,
type = type,
time_in_force = time_in_force,
action = "order")
return(orders)
}
|
toBibtex.BibEntry <- function(object,
note.replace.field = c('urldate', "pubsate",
"addendum"),
extra.fields = NULL, ...){
object <- .BibEntry_expand_crossrefs(unclass(object), to.bibtex = TRUE)
if (length(object)) {
object$.index <- NULL
rval <- head(unlist(lapply(object, ConvertToBibtex,
note.replace.field, extra.fields)),
-1L)
}
else rval <- character()
class(rval) <- "Bibtex"
rval
}
ConvertToBibtex <- function(object, note.replace.field, extra.fields){
object <- unclass(object)[[1L]]
bibtype <- tolower(attr(object, "bibtype"))
obj.names <- names(object)
if ("author" %in% obj.names)
object$author <- encoded_text_to_latex(format_author(object$author),
"UTF-8")
if ("editor" %in% obj.names)
object$editor <- encoded_text_to_latex(format_author(object$editor),
"UTF-8")
if (bibtype == "article" && 'journaltitle' %in% obj.names &&
is.null(object$journal))
object$journal <- object$journaltitle
if ("location" %in% obj.names && is.null(object$address))
object$address <- object$location
object <- ConvertDate(object)
if ("institution" %in% obj.names && bibtype == 'thesis' &&
is.null(object$school)){
object$school <- object$institution
object$institution <- NULL
}
if ("eprinttype" %in% obj.names && is.null(object$archiveprefix))
object$archiveprefix <- object$eprinttype
if ("eprintclass" %in% obj.names && is.null(object$primaryclass))
object$primaryclass <- object$eprintclass
if ("sortkey" %in% obj.names && !"key" %in% obj.names)
object$key <- object$sortkey
if ("maintitle" %in% obj.names && !"series" %in% obj.names)
object$series <- object$maintitle
if ("issuetitle" %in% obj.names && !"booktitle" %in% obj.names)
object$booktitle <- object$issuetitle
if ("eventtitle" %in% obj.names && !"booktitle" %in% obj.names)
object$booktitle <- object$eventtitle
if (!"note" %in% obj.names && length(note.replace.field)){
object <- FillNote(object, obj.names, note.replace.field)
}
if (bibtype == "thesis" && length(object$type)){
bibtype <- switch(object$type, mathesis = {
object$type <- NULL
"mastersthesis"
}, phdthesis = {
object$type <- NULL
"phdthesis"
}, "phdthesis")
}
bibtype <- ConvertBibtype(bibtype)
rval <- paste0("@", bibtype, "{", attr(object, "key"), ",")
rval <- c(rval, vapply(names(object)[names(object) %in% c(.Bibtex_fields,
extra.fields)],
function(n) paste0(" ", n, " = {", object[[n]],
"},"), ""), "}", "")
return(rval)
}
ConvertDate <- function(obj){
dat <- attr(obj, 'dateobj')
if (!is.null(dat) && is.null(obj$year)){
if (is.interval(dat)){
obj$year <- tolower(year(int_start(dat)))
}else{
obj$year <- tolower(year(dat))
}
}
if (!is.null(dat) && attr(dat, "day.mon") > 0 && is.null(obj$month)){
if (is.interval(dat)){
obj$month <- tolower(month(int_start(dat), TRUE, TRUE))
}else{
obj$month <- tolower(month(dat, TRUE, TRUE))
}
}
obj
}
FillNote <- function(obj, onames, nrf){
for (i in seq_along(nrf)){
if (nrf[i] %in% onames){
if (nrf[i] == 'urldate'){
fDate <- try(ProcessDate(obj$urldate, NULL), TRUE)
if (!is.null(fDate) && !inherits(fDate, 'try-error')){
obj$note <- paste0('Last visited on ',
MakeBibLaTeX()$DateFormatter(fDate, TRUE))
}else{
obj$note <- paste0('Last visited on ', obj$urldate)
}
}else{
obj$note <- obj[[nrf[i]]]
}
break
}
}
obj
}
ConvertBibtype <- function(bibtype){
types <- tolower(names(BibTeX_entry_field_db))
if (length(pos <- which(types %in% bibtype)))
types[pos]
else
switch(bibtype, "mvbook" = "Book", "bookinbook" = "InBook",
"suppbook" = "InBook", "collection" = "Book",
"mvcollection" = "Book",
"suppcollection" = "InCollection",
"reference" = "Book", "mvreference" = "Book",
"inreference" = "InBook", "report" = "TechReport",
"proceedings" = "Book", "mvproceedings" = "Book",
"periodical" = "Book", "suppperiodical" = "InBook",
"patent" = "TechReport", "Misc")
}
.Bibtex_fields <- c("address", "author", "annote", "booktitle", "chapter",
"crossref", "edition", "editor", "eprint", "year",
"howpublished", "institution", "journal", "month", "key",
"note", "primaryclass", "archiveprefix", "doi",
"number", "organization", "pages", "publisher", "school",
"series", "title", "type", "url", "volume")
|
cov2dist <- function(V,void=FALSE)
{
if((sum(dim(V))/2)^2 != length(V)) stop("Object 'V' must be a squared matrix")
if(!float::storage.mode(V) %in% c("double","float32")) storage.mode(V) <- "double"
p <- ncol(V)
isFloat <- float::storage.mode(V)=="float32"
if(void)
{
if(isFloat){
out <- .Call('cov2distance',as.integer(p),V@Data,isFloat)
}else{
out <- .Call('cov2distance',as.integer(p),V,isFloat)
}
}else{
if(isFloat){
out <- V@Data[]
}else out <- V[]
tmp <- .Call('cov2distance',as.integer(p),out,isFloat)
if(isFloat) out <- float::float32(out)
}
out
}
cov2cor2 <- function(V,a=1,void=FALSE)
{
if((sum(dim(V))/2)^2 != length(V)) stop("Object 'V' must be a squared matrix")
if(!float::storage.mode(V) %in% c("double","float32")) storage.mode(V) <- "double"
p <- ncol(V)
isFloat <- float::storage.mode(V)=="float32"
if(void)
{
if(isFloat){
nOK <- .Call('cov2correlation',as.integer(p),V@Data,isFloat,as.numeric(a))[[1]]
}else{
nOK <- .Call('cov2correlation',as.integer(p),V,isFloat,as.numeric(a))[[1]]
}
out <- NULL
}else{
if(isFloat){
out <- V@Data[]
}else out <- V[]
nOK <- .Call('cov2correlation',as.integer(p),out,isFloat,as.numeric(a))[[1]]
if(isFloat) out <- float::float32(out)
}
if(nOK != p) warning("Some diagonal values of 'V' are 0 or NA. Results are dobubtful",immediate.=TRUE)
out
}
add2diag <- function(V,a,void=FALSE)
{
if((sum(dim(V))/2)^2 != length(V)) stop("Object 'V' must be a squared matrix")
if(!float::storage.mode(V) %in% c("float32","double")) storage.mode(V) <- "double"
p <- ncol(V)
isFloat <- float::storage.mode(V)=="float32"
if(void)
{
if(isFloat){
out <- .Call('addvalue2diag',as.integer(p),V@Data,as.numeric(a),isFloat)
}else{
out <- .Call('addvalue2diag',as.integer(p),V,as.numeric(a),isFloat)
}
}else{
if(isFloat){
out <- V@Data[]
}else out <- V[]
tmp <- .Call('addvalue2diag',as.integer(p),out,as.numeric(a),isFloat)
if(isFloat) out <- float::float32(out)
}
out
}
getIndexCorrelated <- function(X,maxCor=0.8)
{
COV <- stats::cov(X)
p <- ncol(COV)
index <- .Call("getCorrelated",as.integer(p),COV,as.numeric(maxCor))
out <- NULL
if(index[[2]]>0) out <- index[[1]][1:index[[2]]]
out
}
collect <- function(prefix="")
{
filenames <- Sys.glob(paste0(prefix,"_*_of_*.RData"))
out <- NULL
if(length(filenames)>0){
nFiles <- as.numeric(unlist(lapply(strsplit(filenames,"_"),function(x) gsub(".RData","",x[length(x)]))))
if(length(unique(nFiles))>1)
stop(" Different subset output files were found for the given prefix='",prefix,
"'. Remove old files. No output was collected")
filenames <- paste0(prefix,"_",1:nFiles[1],"_of_",nFiles[1],".RData")
if(!all(file.exists(filenames))) stop("Some files are missing for the given prefix='",prefix,"'\n")
for(i in seq_along(filenames))
{
load(filenames[i])
if(i==1){
fm <- out
}else{
fm$file_beta <- c(fm$file_beta,out$file_beta)
fm$tst <- c(fm$tst,out$tst)
fm$df <- rbind(fm$df,out$df)
fm$lambda <- rbind(fm$lambda,out$lambda)
}
cat(" Loaded file: '",filenames[i],"'\n",sep="")
}
fm$subset[1] <- NA
}else stop(" No output files were found for the given prefix='",prefix,"'")
fm
}
backsolvet <- function(r, x, k=ncol(r))
{
float::backsolve(r,x,k,transpose=TRUE)
}
upDateR <- function(xtx, R = NULL, Xtx, eps = .Machine$double.eps)
{
norm.xnew <- sqrt(xtx)
if(is.null(R)) {
R <- float::t(norm.xnew)
attr(R, "rank") <- 1
R
}else{
r <- backsolvet(R, Xtx)
rpp <- norm.xnew^2 - sum(r^2)
rank <- attr(R, "rank")
if(rpp <= eps){
rpp <- eps
}else{
rpp <- sqrt(rpp)
rank <- rank + 1
}
if(float::storage.mode(R)=="float32") rpp <- float::fl(rpp)
R <- cbind(rbind(R, float::fl(0)), c(r, rpp))
attr(R, "rank") <- rank
R
}
}
downDateR <- function(R, k = p)
{
p <- dim(R)[1]
if(p == 1){
return(NULL)
}else{
R <- deleteCol(R, rep(1, p), k)[[1]][ - p, , drop = FALSE]
attr(R, "rank") <- p - 1
return(R)
}
}
deleteCol <- function(R, z, k = p)
{
p <- dim(R)[1]
R <- R[, -k, drop = FALSE]
z <- as.matrix(z)
pz <- dim(z)[2]
if(!float::storage.mode(R) %in% c("double","float32")) storage.mode(R) <- "double"
isFloat <- float::storage.mode(R) == "float32"
if(isFloat){
z <- float::fl(z)
}else storage.mode(z) <- "double"
if(isFloat){
tmp = .Call("delete_col",R@Data,as.integer(p),as.integer(k),z@Data,as.integer(pz),isFloat)
return(lapply(tmp,function(x)float::float32(x)))
}else{
return(.Call("delete_col",R,as.integer(p),as.integer(k),z,as.integer(pz),isFloat))
}
}
saveBinary <- function(X,file = paste0(tempdir(),"/file.bin"),
type = c("float","double"), verbose = TRUE)
{
type <- match.arg(type)
if(length(dim(X)) != 2L) stop("Object 'X' must be a matrix")
if(!float::storage.mode(X) %in% c("double","float32")) storage.mode(X) <- "double"
unlink(file)
if(float::storage.mode(X)=="float32" & type!='float'){
type <- 'float'
warning("Object can be only saved as type='float' when class(X)='float'\n",
" Variable type was changed to type='float'",immediate.=TRUE)
}
isFloat <- float::storage.mode(X)=="float32"
size <- ifelse(type=="float",4,8)
if(isFloat){
out <- .Call('writeBinFileFloat',file,nrow(X),ncol(X),
as.integer(size),X@Data,isFloat)
}else{
out <- .Call('writeBinFileFloat',file,nrow(X),ncol(X),
as.integer(size),X,isFloat)
}
if(verbose){
tmp <- c(Gb=1E9,Mb=1E6,Kb=1E3,b=1E0)
sz <- file.size(file)/tmp[min(which(file.size(file)/tmp>1))]
cat("Saved file '",file,"'\n")
cat(" nrow=",nrow(X),", ncol=",ncol(X),", type=",type,", size=",size,"bytes, file.size=",round(sz,2),names(sz),"\n")
}
}
readBinary <- function(file = paste0(tempdir(),"/file.bin"),
indexRow = NULL, indexCol = NULL, verbose = TRUE)
{
if(!file.exists(file)){
stop("File '",file,"' does not exist")
}
nsetRow <- as.integer(length(indexRow))
nsetCol <- as.integer(length(indexCol))
X <- .Call("readBinFileFloat",file,nsetRow,nsetCol,
as.integer(indexRow),as.integer(indexCol))
n <- X[[1]]; p <- X[[2]]; size <- X[[3]]
isFloat <- X[[4]]
nError <- X[[5]]
if(nError==0){
if(isFloat | size==4){
X <- float::float32(X[[6]])
type <- "float"
}else{
X <- X[[6]]
type <- "double"
}
if(verbose){
tmp <- c(Gb=1E9,Mb=1E6,Kb=1E3,b=1E0)
sz <- object.size(X)/tmp[min(which(object.size(X)/tmp>1))]
cat("Loaded file '",file,"'\n")
cat(" nrow=",n,", ncol=",p,", type=",type,", size=",size,"bytes, object.size=",round(sz,2),names(sz),"\n")
}
}else{
X <- NULL
}
return(X)
}
atPib <- function(i,Uta,Utb,UtX=UtX,dbar=dbar){
if(i==1) {
aPw <- sum(Uta*UtX[,i]*dbar)
bPw <- sum(Utb*UtX[,i]*dbar)
wPw <- sum(UtX[,i]^2*dbar)
sum(Uta*Utb*dbar)-aPw*bPw/wPw
}else{
atPib(i-1,Uta,Utb,UtX,dbar) - atPib(i-1,Uta,UtX[,i],UtX,dbar)*atPib(i-1,Utb,UtX[,i],UtX,dbar)/atPib(i-1,UtX[,i],UtX[,i],UtX,dbar)
}
}
atPia <- function(i,Uta,UtX=UtX,dbar=dbar){
if(i==1) {
aPw <- sum(Uta*UtX[,i]*dbar)
wPw <- sum(UtX[,i]^2*dbar)
sum(Uta^2*dbar)-(aPw^2)/wPw
}else{
atPia(i-1,Uta,UtX,dbar) - atPib(i-1,Uta,UtX[,i],UtX,dbar)^2/atPia(i-1,UtX[,i],UtX,dbar)
}
}
atPiPib <- function(i,Uta,Utb,UtX,dbar){
if(i==1) {
aPw <- sum(Uta*UtX[,i]*dbar)
aPPw <- sum(Uta*UtX[,i]*dbar^2)
bPw <- sum(Utb*UtX[,i]*dbar)
bPPw <- sum(Utb*UtX[,i]*dbar^2)
wPw <- sum(UtX[,i]^2*dbar)
wPPw <- sum(UtX[,i]^2*dbar^2)
sum(Uta*Utb*dbar^2)+aPw*bPw*wPPw/(wPw^2) - aPw*bPPw/wPw - bPw*aPPw/wPw
}else{
atPiPib(i-1,Uta,Utb,UtX,dbar) +
atPib(i-1,Uta,UtX[,i],UtX,dbar)*atPib(i-1,Utb,UtX[,i],UtX,dbar)*atPiPib(i-1,UtX[,i],UtX[,i],UtX,dbar)/atPib(i-1,UtX[,i],UtX[,i],UtX,dbar)^2 -
atPib(i-1,Uta,UtX[,i],UtX,dbar)*atPiPib(i-1,Utb,UtX[,i],UtX,dbar)/atPib(i-1,UtX[,i],UtX[,i],UtX,dbar) -
atPib(i-1,Utb,UtX[,i],UtX,dbar)*atPiPib(i-1,Uta,UtX[,i],UtX,dbar)/atPib(i-1,UtX[,i],UtX[,i],UtX,dbar)
}
}
atPiPia <- function(i,Uta,UtX,dbar){
if(i==1) {
aPw <- sum(Uta*UtX[,i]*dbar)
aPPw <- sum(Uta*UtX[,i]*dbar^2)
wPw <- sum(UtX[,i]^2*dbar)
wPPw <- sum(UtX[,i]^2*dbar^2)
sum(Uta^2*dbar^2)+aPw^2*wPPw/(wPw^2) - 2*aPw*aPPw/wPw
}else{
atPiPia(i-1,Uta,UtX,dbar) +
atPib(i-1,Uta,UtX[,i],UtX,dbar)^2*atPiPia(i-1,UtX[,i],UtX,dbar)/atPia(i-1,UtX[,i],UtX,dbar)^2 -
2*atPib(i-1,Uta,UtX[,i],UtX,dbar)*atPiPib(i-1,Uta,UtX[,i],UtX,dbar)/atPia(i-1,UtX[,i],UtX,dbar)
}
}
tr_Pi <- function(i,UtX,dbar){
if(i==1) {
wPw <- sum(UtX[,i]^2*dbar)
wPPw <- sum(UtX[,i]^2*dbar^2)
sum(dbar) - wPPw/wPw
}else{
tr_Pi(i-1,UtX,dbar) - atPiPia(i-1,UtX[,i],UtX,dbar)/atPia(i-1,UtX[,i],UtX,dbar)
}
}
dlogLik <- function(ratio,n,c0,Uty,UtX,d)
{
dbar <- 1/(ratio*d+1)
Tr_Hinv_G <- (n-sum(dbar))/ratio
ytPy <- atPia(c0+1,Uty,UtX=UtX,dbar=dbar)
ytPPy <- atPiPia(c0+1,Uty,UtX=UtX,dbar=dbar)
ytPGPy <- (ytPy-ytPPy)/ratio
dd <- -0.5*Tr_Hinv_G + 0.5*n * ytPGPy/ytPy
return(dd)
}
dlogResLik <- function(ratio,n,c0,Uty,UtX,d)
{
dbar <- 1/(ratio*d+1)
Tr_Px <- tr_Pi(c0+1,UtX=UtX,dbar=dbar)
Tr_Px_G <- (n-c0-1-Tr_Px)/ratio
ytPy <- atPia(c0+1,Uty,UtX=UtX,dbar=dbar)
ytPPy <- atPiPia(c0+1,Uty,UtX=UtX,dbar=dbar)
ytPGPy <- (ytPy-ytPPy)/ratio
dd <- -0.5*Tr_Px_G + 0.5*(n-c0-1)* ytPGPy/ytPy
return(dd)
}
searchInt <- function(method,interval,n,c0,Uty,UtX,d,maxIter,tol,lower,upper,varP)
{
flag <- TRUE; i <- 1
convergence <- ratio <- dbar <- varU <- varE <- bHat <- msg <- NA
while(flag)
{
i <- i + 1
if(method=="REML"){
tmp <- try(uniroot(f=dlogResLik,interval=c(interval[i-1],interval[i]),n=n,c0=c0,Uty=Uty,
UtX=UtX,d=d,tol=tol,maxiter=maxIter,trace=2),
silent = TRUE)
}else{
tmp <- try(uniroot(f=dlogLik,interval=c(interval[i-1],interval[i]),n=n,c0=c0,Uty=Uty,
UtX=UtX,d=d,tol=tol,maxiter=maxIter,trace=2),
silent = TRUE)
}
if(class(tmp) == "list")
{
ratio0 <- tmp$root
if(ratio0 <= lower){
ratio0 <- lower
msg <- paste0("Root varU/varE is the lower bound ",lower)
}else{
if(ratio0 >= upper){
ratio0 <- upper
msg <- paste0("Root varU/varE is the upper bound ",upper)
}
}
dbar <- 1/(ratio0*d + 1)
qq1 <- t(Uty*dbar)%*%UtX
qq2 <- solve(sweep(t(UtX),2L,dbar,FUN="*")%*%UtX)
ytPy <- drop(sum(dbar*Uty^2)-qq1%*%qq2%*%t(qq1))
bHat <- drop(qq2%*%t(qq1))
varE <- ifelse(method=="REML",ytPy/(n-c0-1),ytPy/n)
varU <- ratio0*varE
if(varU <= (2)*varP){
convergence <- tmp$iter <= maxIter
ratio <- ratio0
}
}
if(i == length(interval) | !is.na(convergence)) flag <- FALSE
}
list(ratio=ratio,varU=varU,varE=varE,convergence=convergence,
dbar=dbar,bHat=bHat,msg=msg)
}
getSecondAxis <- function(lambda,df,maxLength=6)
{
loglambda <- -log(lambda)
labels0 <- sort(unique(round(df)))
if(min(labels0)<1) labels0[which.min(labels0)] <- 1
if(stats::IQR(df)>0)
{
breaks0 <- stats::predict(stats::smooth.spline(df, loglambda),labels0)$y
}else breaks0 <- NULL
index <- 1
while(any((breaks0-breaks0[max(index)])>1)){
dd <- breaks0-breaks0[max(index)]
index <- c(index,which(dd > 1)[1])
}
breaks0 <- breaks0[index]
labels0 <- labels0[index]
if(length(breaks0)>maxLength){
index <- unique(round(seq(1,length(breaks0),length=maxLength)))
breaks0 <- breaks0[index]
labels0 <- labels0[index]
}
return(list(breaks=breaks0,labels=labels0))
}
plotNet <- function(fm, B, Z = NULL, K, subsetG = NULL, tst = NULL,
U = NULL, d = NULL, group = NULL, group.shape = NULL,
set.color = NULL, set.size = NULL, df = NULL, title, axis.labels = TRUE,
curve = FALSE, bg.color = "gray20", unified = TRUE, ntst = 36,
line.color = "gray90", line.tick = 0.3, legend.pos="right",
point.color = "gray20", sets = c("Testing","Supporting","Non-active"))
{
set <- PC1 <- PC2 <- PC1_TRN <- PC1_TST <- PC2_TRN <- PC2_TST <- NULL
legend.pos <- match.arg(legend.pos,
choices=c("right","bottomright","bottomleft","topleft","topright","none"))
if(!inherits(fm, "SSI")) stop("Object 'fm' is not of the class 'SSI'")
if(is.null(U) & is.null(d))
{
if(is.character(K)){
K <- readBinary(K)
}
if(is.null(K))
stop("Matrix 'K' must be a positive semi definite matrix\n")
if(!is.null(Z)) {
if(length(dim(Z))!=2) stop("Object 'Z' must be a matrix with ncol(Z)=nrow(K)\n")
K <- float::tcrossprod(Z,float::tcrossprod(Z,K))
}
tmp <- float::svd(K,nu=2,nv=0)
d <- tmp$d
U <- tmp$u
expvarPC <- 100*d/sum(d)
}else{
if(is.null(U)){
stop("You are providing the eigevalues, but not the eigenvectors")
}else{
if(is.null(d)){
message("You are providing the eigenvectors, but not the eigenvalues\n",
"No variance explained can be calculated")
expvarPC <- NULL
}else{
if(nrow(U) == length(d)){
expvarPC <- 100*d/sum(d)
}else expvarPC <- NULL
}
}
}
tmp <- paste0(" (",sprintf('%.1f',expvarPC),"%)")
if(length(tmp)<2) tmp <- NULL
labelsPC <- paste0("PC ",1:2,tmp[1:2])
if(!is.null(tst)){
if(any(!tst %in% fm$tst))
stop("Some elements in 'tst' vector are not contained in set 'fm$tst'")
}else tst <- fm$tst
if(!unified & length(tst) >= ntst){
cat("Large number of testing individuals. Only the first",ntst,"are shown\n")
tst <- tst[1:ntst]
}
justx <- ifelse(length(grep("left",legend.pos))>0,0,1)
justy <- ifelse(length(grep("bottom",legend.pos))>0,0,1)
if(!legend.pos %in% c("none","right")) legend.pos <- c(abs(justx-0.01),abs(justy-0.01))
theme0 <- ggplot2::theme(
panel.grid.minor = ggplot2::element_blank(),
panel.grid.major = ggplot2::element_blank(),
legend.box.spacing = ggplot2::unit(0.4, "lines"),
legend.background = ggplot2::element_rect(fill = "gray95"),
panel.background = ggplot2::element_rect(fill = bg.color),
legend.justification = c(justx,justy),
legend.position=legend.pos,
legend.key.height = ggplot2::unit(0.9,"line"),
legend.key.width = ggplot2::unit(0.9, "lines"),
legend.title = ggplot2::element_blank(),
legend.margin = ggplot2::margin(t=0,b=0.25,l=0.25,r=0.25,unit='line'),
strip.text = ggplot2::element_blank(), panel.spacing = ggplot2::unit(0.1,"lines")
)
if(missing(B)){
if(is.null(df)) df <- summary.SSI(fm)$optCOR$df
if(0 > df | df > range(fm$df)[2])
stop("Parameter 'df' must be greater than zero and no greater than nTRN")
B <- as.matrix(coef.SSI(fm,df=df))
}else{
stopifnot(length(dim(B))==2L)
df <- mean(do.call(c, lapply(1:nrow(B), function(i) sum(abs(B[i,]) > 0))))
}
flagGp <- !is.null(group)
if(is.null(group)) group <- data.frame(group=rep(1,nrow(U)))
gpName <- colnames(group)
if(is.null(subsetG)) subsetG <- 1:nrow(U)
if(!(class(sets) == "character" & length(sets) == 3))
stop("Parameter 'sets' must be a triplet of 'character' type")
dat <- data.frame(id=1:nrow(U),set=sets[3],group=group,float::dbl(U[,1:2]))
dat$set <- as.character(dat$set)
dat$set[subsetG[tst]] <- sets[1]
index <- do.call(c, lapply(1:ncol(B), function(j) any(abs(B[fm$tst %in% tst,,drop=FALSE][,j]) > 0)))
dat$set[subsetG[fm$trn[index]]] <- sets[2]
dat$set[subsetG[fm$trn[!index]]] <- sets[3]
colnames(dat) <- c("id","set","group","PC1","PC2")
dat$group <- factor(as.character(dat$group))
dat$set <- factor(dat$set,levels=c(sets))
if(!flagGp) dat$group <- dat$set
levelsGp <- levels(dat$group)
if(length(levelsGp) > 5)
stop("Number of levels of 'group' must be at most 5")
if(is.null(group.shape)){
if(flagGp){
group.shape <- c(21,22,23,24,25)
}else group.shape <- c(21,21,21)
}
group.shape <- group.shape[1:length(levelsGp)]
if(is.null(set.color)){
set.color <- c("
}
set.color <- set.color[1:length(sets)]
if(is.null(set.size)){
set.size <- c(2.5,1.5,1)
}
set.size <- set.size[1:length(sets)]
if(any(is.na(group.shape)))
stop("The number of elements in 'group.shape' must be of length ",length(levelsGp))
if(any(is.na(set.size)) | any(is.na(set.color)))
stop("The number of elements in 'set.size' and 'set.color' must be of length ",length(sets))
if(missing(title)){
title0 <- bquote(.(fm$name)*". Support set size="*.(round(df)))
theme0 <- theme0 + ggplot2::theme(plot.title = ggplot2::element_text(hjust = 0.5))
}else{
title0 <- title
if(is.null(title)){
theme0 <- theme0 + ggplot2::theme(plot.title = ggplot2::element_blank())
}else{
theme0 <- theme0 + ggplot2::theme(plot.title = ggplot2::element_text(hjust = 0.5))
}
}
if(!axis.labels){
theme0 <- theme0 + ggplot2::theme(axis.text=ggplot2::element_blank(),
axis.ticks=ggplot2::element_blank())
}
names(group.shape) <- levelsGp
names(set.color) <- names(set.size) <- sets
if(unified)
{
pt <- ggplot2::ggplot(dat,ggplot2::aes(x=PC1,y=PC2)) +
ggplot2::geom_point(data=dat[dat$set==sets[3],],ggplot2::aes(shape=group,fill=set),
color=point.color,size=set.size[3])
for(i in 1:length(tst))
{
indexTRN <- which(abs(B[which(fm$tst == tst[i]), ]) > 0)
if(length(indexTRN)>0)
{
dat1 <- dat[subsetG[fm$trn],c("PC1","PC2")][indexTRN,]
dat2 <- dat[subsetG[tst],c("PC1","PC2")][i,]
colnames(dat1) <- paste0(colnames(dat1),"_TRN")
colnames(dat2) <- paste0(colnames(dat2),"_TST")
dat1 <- data.frame(dat2[rep(1,nrow(dat1)),],dat1)
if(curve){
pt <- pt + ggplot2::geom_curve(ggplot2::aes(x=PC1_TST,y=PC2_TST,xend=PC1_TRN,yend=PC2_TRN),
data=dat1,alpha=0.4,size=line.tick,color=line.color,curvature=0.4)
}else{
pt <- pt + ggplot2::geom_segment(ggplot2::aes(x=PC1_TST,y=PC2_TST,xend=PC1_TRN,yend=PC2_TRN),
data=dat1,alpha=0.4,size=line.tick,color=line.color)
}
}
}
pt <- pt +
ggplot2::geom_point(data=dat[dat$set==sets[1],],ggplot2::aes(shape=group,fill=set),color=point.color,size=set.size[1]) +
ggplot2::geom_point(data=dat[dat$set==sets[2],],ggplot2::aes(shape=group,fill=set),color=point.color,size=set.size[2]) +
ggplot2::theme_bw() + theme0
}else{
dat2 <- c()
for(i in 1:length(tst))
{
indexTRN <- which(abs(B[which(fm$tst == tst[i]), ]) > 0)
if(length(indexTRN) > 0)
{
tmp <- dat[subsetG[fm$trn], ][-indexTRN,]
tmp$set <- sets[3]
tmp <- rbind(dat[subsetG[fm$trn], ][indexTRN,], tmp, dat[subsetG[tst], ][i,])
dat2 <- rbind(dat2,data.frame(tmp, ind = i))
}
}
pt <- ggplot2::ggplot(dat2,ggplot2::aes(x=PC1,y=PC2)) + ggplot2::facet_wrap(~ind) +
ggplot2::geom_point(data=dat2[dat2$set==sets[3],],ggplot2::aes(fill=set,shape=group),color=point.color,size=set.size[3]) +
ggplot2::geom_point(data=dat2[dat2$set==sets[2],],ggplot2::aes(fill=set,shape=group),color=point.color,size=set.size[2]) +
ggplot2::geom_point(data=dat2[dat2$set==sets[1],],ggplot2::aes(fill=set,shape=group),color=point.color,size=set.size[1]) +
ggplot2::theme_bw() + theme0
}
pt <- pt + ggplot2::labs(title=title0, x=labelsPC[1],y=labelsPC[2]) +
ggplot2::scale_shape_manual(values = group.shape,
guide=ggplot2::guide_legend(override.aes=list(size=2,fill="white"))) +
ggplot2::scale_fill_manual(values = set.color,
guide=ggplot2::guide_legend(override.aes=list(shape=21,size=2)))
if(!flagGp) pt <- pt + ggplot2::guides(shape="none")
pt
}
plotPath <- function(fm, Z=NULL, K=NULL, tst=NULL, title=NULL, maxCor=0.85)
{
k <- NULL
flagKinship <- FALSE
if(!inherits(fm,c("LASSO","SSI"))) stop("Object 'fm' is not of the class 'LASSO' or 'SSI'")
theme0 <- ggplot2::theme(
panel.grid.minor = ggplot2::element_blank(),
panel.grid.major = ggplot2::element_blank(),
plot.title = ggplot2::element_text(hjust = 0.5),
legend.background = ggplot2::element_rect(fill = "gray95"),
legend.box.spacing = ggplot2::unit(0.4, "lines"),
legend.key.height= ggplot2::unit(3,"line"),
legend.key.width = ggplot2::unit(0.8, "lines")
)
if(inherits(fm,"SSI"))
{
if(!is.null(K)){
flagKinship <- TRUE
if(is.character(K)){
K <- readBinary(K)
}
if(!is.null(Z))
{
if(length(dim(Z)) != 2) stop("Object 'Z' must be a matrix")
K <- float::tcrossprod(Z,float::tcrossprod(Z,K))
}
if(length(dim(K))!=2 | (length(K) != length(fm$y)^2))
stop("Product Z %*% K %*% t(Z) must be a squared matrix with number of rows (and columns) equal to the number of elements in 'y'")
}
beta <- coef.SSI(fm)
if(!is.null(tst)){
if(any(!tst %in% fm$tst)) stop("Some elements in 'tst' vector are not contained in 'fm$tst'")
indexTST <- which(fm$tst %in% tst)
}else indexTST <- seq_along(fm$tst)
beta <- beta[indexTST]
lambda <- apply(fm$lambda,2,mean)
df <- apply(fm$df,2,mean)
}else{
beta <- fm$beta
lambda <- fm$lambda
df <- fm$df
}
nDF <- length(df)
if(min(lambda) < .Machine$double.eps*1000) lambda[which.min(lambda)] <- min(lambda[lambda>0])/2
if(nDF==1) stop("Coefficients path plot can not be generated for 'nLambda=1'")
if(inherits(fm,"SSI"))
{
dat <- c()
trim <- length(fm$trn)*length(beta) > 20000
for(i in seq_along(beta))
{
b0 <- as.matrix(beta[[i]])
if(trim){
indexOK <- getIndexCorrelated(t(b0),maxCor)
}else indexOK <- seq_along(fm$trn)
tmp <- matrix(NA,nrow=1,ncol=length(indexOK))
if(!is.null(K)){
tmp <- K[fm$tst[indexTST[i]],fm$trn[indexOK],drop=FALSE]
if(float::storage.mode(tmp)=='float32') tmp <- float::dbl(tmp)
}
dimnames(tmp) <- list(fm$tst[indexTST[i]],fm$trn[indexOK])
tmp <- reshape2::melt(tmp)
colnames(tmp) <- c("tst_i","trn_i","value")
tmp <- tmp[rep(1:nrow(tmp),nDF),]
df0 <- rep(df,each=length(indexOK))
lambda0 <- rep(lambda,each=length(indexOK))
b0 <- as.vector(b0[indexOK,])
id <- factor(tmp$tst_i):factor(tmp$trn_i)
dat <- rbind(dat,data.frame(df=df0,lambda=lambda0,beta=float::dbl(b0),k=tmp$value,id=id))
}
}else{
id <- factor(rep(seq(nrow(beta)),ncol(beta)))
dat <- data.frame(df=rep(df,each=nrow(beta)),lambda=rep(lambda,each=nrow(beta)),beta=as.vector(beta),id=id)
}
ax2 <- getSecondAxis(lambda,df)
brks0 <- ax2$breaks
labs0 <- ax2$labels
title0 <- bquote("Coefficients path. "*.(fm$name))
if(!is.null(title)) title0 <- title
if(flagKinship)
{
pt <- ggplot2::ggplot(dat,ggplot2::aes(-log(lambda),beta,color=k,group=id)) +
viridis::scale_color_viridis() + ggplot2::geom_line() + ggplot2::theme_bw() + theme0 +
ggplot2::labs(title=title0,y=expression(beta),x=expression("-log("*lambda*")"))
}else{
pt <- ggplot2::ggplot(dat,ggplot2::aes(-log(lambda),beta,color=id,group=id))+
ggplot2::geom_line() + ggplot2::theme_bw() + theme0 +
ggplot2::theme(legend.position = "none") +
ggplot2::labs(title=title0,y=expression(beta),x=expression("-log("*lambda*")"))
}
if(length(brks0)>3){
pt <- pt + ggplot2::scale_x_continuous(sec.axis=ggplot2::sec_axis(~.+0,"Number of predictors",breaks=brks0,labels=labs0))
}
pt
}
.onAttach <- function(libname, pkgname) {
packageStartupMessage("
|=======================================================================|
| ._______. ._______. ._______. ._______. |
| | ._____| | ._____| | ._____| |__. .__| |
| | |_____. | |___. | |_____. | | |
| |_____. | | .___| |_____. | | | Authors: |
| ._____| | | | ._____| | .__| |__. Marco Lopez-Cruz |
| |_______| |_| |_______| |_______| Gustavo de los Campos |
| |
| Sparse Family and Selection Index. Version 1.0.1 (Jan 26, 2022) |
| Type 'citation('SFSI')' to know how to cite SFSI |
| Type 'help(package='SFSI',help_type='html')' to see help |
| Type 'browseVignettes('SFSI')' to see documentation |
| Type 'demo(package='SFSI')' to see demos |
| |
|=======================================================================|
")
}
|
normM <- function(X){
if (is.data.frame(X) == TRUE) {
X = as.matrix(X)
}
normX = sqrt(sum(diag(t(X) %*% X)))
Y = X / normX
return(Y)
}
|
qread.annotated.matrix <- function(
file, type, annot.cols=1, header=TRUE, sep="\t", comment.char="", quote="", check.names=FALSE, stringsAsFactors=FALSE, ...
) {
x <- read.table(file,
header=header, sep=sep, comment.char=comment.char, quote=quote,
check.names=check.names, stringsAsFactors=stringsAsFactors, ...);
if (length(annot.cols) == 1) {
annot.cols <- 1:annot.cols;
}
meta <- data.frame(x[, annot.cols], check.names=check.names);
colnames(meta) <- colnames(x)[annot.cols];
data <- as.matrix(x[, -annot.cols]);
rownames(data) <- meta[, 1];
structure(list(meta=meta, data=data), class="annotated.matrix")
}
qwrite.annotated.matrix <- function(
x, file, type, quote=FALSE, sep="\t", row.names=FALSE, col.names=TRUE, ...
) {
d <- data.frame(x$meta, x$data, check.names=FALSE);
write.table(d, file,
quote=quote, sep=sep, row.names=row.names, col.names=col.names, ...)
}
qread.amtx <- function(file, type, annot.cols=1, ...) {
qread.annotated.matrix(file, annot.cols=annot.cols)
}
qwrite.amtx <- function(x, file, type, ...) {
qwrite.annotated.matrix(x, file)
}
|
c3mtc <-
function(dataset,null=NULL,mtc=TRUE,adj="bonferroni",alpha=0.05,nullit=NA, estimator="pearson", disc="none",adjacency=FALSE, igraph=TRUE){
mat.mim=mimwrap(dataset, estimator=estimator, disc=disc)
net=c3(mat.mim)
net[upper.tri(net)]=0
edges<-which(net>0,arr.ind=TRUE)
pval <- matrix(0,nrow=nrow(edges),ncol=1)
if(mtc==TRUE){
if(is.null(null)){
null=makenull(dataset, nullit=nullit, estimator=estimator, disc=disc)
}
maxn=max(null)
lnull=length(null)
for(e in 1:nrow(edges)){
G1 <- edges[e,1]
G2 <- edges[e,2]
if(mat.mim[G1,G2]>maxn){
pval[e]=1/lnull
}else{
pval[e]=.getpval(mat.mim[G1,G2],null)
}
}
padj=p.adjust(pval,method=adj)
for(e in which(padj>alpha)){
G1 <- edges[e,1]
G2 <- edges[e,2]
net[G1,G2]=0
net[G2,G1]=0
}
}else{
net=c3(mat.mim)
}
if(igraph==TRUE){
if(adjacency==TRUE){
net=.mat2igraph(net, weighted=FALSE)
}else{
net=.mat2igraph(net)
}
}else{
net=as.matrix(forceSymmetric(net,uplo="L"))
if(adjacency==TRUE){
net=(net>0)*1
}
}
return(net)
}
|
library(arules)
datar2 <- read.csv("dar3b.csv", header = TRUE, row.names=1)
str(datar2)
datar2 <- as.matrix(datar2)
str(datar2)
head(datar2)
datar2.trans <- as(datar2, "transactions")
str(datar2.trans)
summary(datar2.trans)
inspect(datar2.trans)
itemFrequencyPlot(datar2.trans, ylim=c(0,1))
options(digits=2)
datar2.rule <- apriori(datar2.trans, parameter = list(maxlen=3, support=0.04, confidence=0.8, ext=TRUE))
datar2.rule = sort(datar2.rule, by='lift', decreasing=T)
summary(datar2.rule)
inspect(sort(subset(datar2.rule, subset= (support >0.5))))
inspect(datar2.rule[1:5])
rule1 <- subset(datar2.rule, subset=(rhs %in% "elective6") & (lift>1.0))
rule1
inspect(rule1)
rule2 <- subset(datar2.rule, subset=(rhs %in% c('elective3','elective4')) & (lift>1.5))
rule2 = sort(rule2, by='confidence', decreasing=T)
inspect(rule2)
rule3 <- subset(datar2.rule, subset=(rhs %in% "elective1") & (support >0.5))
rule3 = sort(rule3, by='support', decreasing=T)
inspect(rule3)
|
"psSignal" <-
function(y, x_signal, x_index = c(1: ncol(x_signal)), nseg = 10,
bdeg = 3, pord = 3, lambda = 1, wts = 1+ 0*y, family =
"gaussian", link = "default", m_binomial = 1 + 0*y,
r_gamma = wts, y_predicted = NULL, x_predicted
= x_signal, ridge_adj = 0, int = TRUE) {
x <- x_index
n <- length(y)
parms <- pspline_checker(
family, link, bdeg, pord,
nseg, lambda, ridge_adj, wts
)
family <- parms$family
link <- parms$link
q <- parms$bdeg
d <- parms$pord
ridge_adj <- parms$ridge_adj
lambda <- parms$lambda
nseg <- parms$nseg
wts <- parms$wts
xl <- min(x)
xr <- max(x)
xmax <- xr + 0.01 * (xr - xl)
xmin <- xl - 0.01 * (xr - xl)
dx <- (xmax - xmin) / nseg
knots <- seq(xmin - q * dx, xmax + q * dx, by = dx)
b <- bbase(x, xl, xr, nseg, q)
n_col <- ncol(b)
p_ridge <- NULL
if (ridge_adj > 0) {
nix_ridge <- rep(0, n_col)
p_ridge <- sqrt(ridge_adj) * diag(rep(1, n_col))
}
p <- diag(n_col)
p <- diff(p, diff = d)
p <- sqrt(lambda) * p
nix <- rep(0, n_col - d)
x_signal <- as.matrix(x_signal)
xb <- x_signal %*% as.matrix(b)
if (int) {
xb <- cbind(rep(1, n), xb)
p <- cbind(rep(0, nrow(p)), p)
if (ridge_adj > 0) {
p_ridge <- cbind(rep(0, nrow(p_ridge)), p_ridge)
}
}
ps_fit <- pspline_fitter(y, B = xb, family, link, P = p,
P_ridge = p_ridge, wts, m_binomial, r_gamma )
mu <- ps_fit$mu
coef <- ps_fit$coef
eta <- ps_fit$eta
bin_percent_correct <- NULL
if (family == "binomial") {
pcount <- 0
p_hat <- mu / m_binomial
for (ii in 1:n) {
if (p_hat[ii] > 0.5) {
count <- y[ii]
}
if (p_hat[ii] <= 0.5) {
count <- m_binomial[ii] - y[ii]
}
count <- pcount + count
pcount <- count
}
bin_percent_correct <- count / sum(m_binomial)
}
w <- ps_fit$w
w_aug <- c(w, (nix + 1))
qr = ps_fit$f$qr
h <- hat(qr, intercept = FALSE)[1:n]
trace <- sum(h)
R <- qr.R(qr)
bread = chol2inv(R)
dev_ = dev_calc(family, y, mu, m_binomial, r_gamma)
dev = dev_$dev
dispersion_parm = dev_$dispersion_parm
cv <- press_mu <- press_e <- NULL
if (family == "gaussian") {
dev <- sum(ps_fit$f$residuals[1:n]^2)
dispersion_parm <- dev / (n - trace)
press_e <- ps_fit$f$residuals[1:n] / (1 - h)
cv <- sqrt(sum((press_e)^2) / (n))
press_mu <- y - press_e
}
aic <- dev + 2 * trace
if (int) {
yint <- ps_fit$coef[1]
beta <- b %*% (as.vector(ps_fit$f$coef)[2:(n_col + 1)])
}
if (!int) {
yint <- NULL
beta <- b %*% as.vector(ps_fit$f$coef)
}
ii = c((1+int):(n_col+int))
var_beta = b%*% bread[ii, ii]%*%t(b)
stdev_beta = as.vector(sqrt(dispersion_parm*diag(var_beta)))
summary_predicted <- NULL
cv_predicted <- eta_predicted <- NULL
if (!missing(x_predicted)) {
x_predicted <- as.matrix(x_predicted)
if (!int) {
if (ncol(x_predicted) > 1) {
eta_predicted <- x_predicted %*% beta
var_pred <- x_predicted %*% b %*% bread %*% t(
x_predicted %*% b
)
}
if (ncol(x_predicted) == 1) {
eta_predicted <- t(x_predicted) %*% beta
var_pred <- t(x_predicted) %*% b %*% bread %*%
t(b) %*% x_predicted
}
}
if (int) {
dim_xp <- nrow(x_predicted)
if (ncol(x_predicted) > 1) {
one_xpred_b <- cbind(rep(1, dim_xp), (x_predicted %*% b))
eta_predicted <- x_predicted %*% beta + yint
var_pred <- one_xpred_b %*% bread %*% t(one_xpred_b)
}
if (ncol(x_predicted) == 1) {
one_xpred_b <- cbind(1, t(x_predicted) %*% b)
eta_predicted <- t(x_predicted) %*% beta + yint
var_pred <- (one_xpred_b) %*% bread %*% t(
one_xpred_b
)
}
}
stdev_pred <- as.vector(sqrt(diag(var_pred)))
stdev_pred <- sqrt(dispersion_parm) * stdev_pred
pivot <- as.vector(2 * stdev_pred)
upper <- eta_predicted + pivot
lower <- eta_predicted - pivot
summary_predicted <- cbind(lower, eta_predicted, upper)
if (!missing(y_predicted)) {
if (family == "gaussian") {
cv_predicted <- sqrt(sum((y_predicted -
eta_predicted)^2) / (length(y_predicted)))
}
}
bin_percent_correct <- NULL
if (link == "logit") {
summary_predicted <- 1 / (1 + exp(-summary_predicted))
pcount <- 0
p_hat <- exp(eta_predicted) / (1 + exp(eta_predicted))
if (!missing(y_predicted)) {
for (ii in 1:length(eta_predicted)) {
if (p_hat[ii] > 0.5) {
count <- y_predicted[ii]
}
if (p_hat[ii] <= 0.5) {
count <- 1 - y_predicted[ii]
}
count <- pcount + count
pcount <- count
}
bin_percent_correct <- count / length(y_predicted)
}
}
summary_predicted <- inverse_link(summary_predicted, link)
if (link == "reciprocal") {
summary_predicted <- summary_predicted[, 3:1]
}
summary_predicted <- as.matrix(summary_predicted)
dimnames(summary_predicted) <- list(NULL, c(
"-2std_Lower",
"Predicted", "+2std_Upper"
))
}
llist <- list(
b= b, B = b, coef = coef, y_intercept = yint, int = int, x_index = x_index,
x_signal = x_signal, y = y, press_mu = press_mu, bin_percent_correct = bin_percent_correct,
family = family, link = link, nseg = nseg, pord = d, bdeg = q,
lambda = lambda, aic = aic, deviance = dev, eff_df = trace - 1,
df_resid = n - trace + 1, bin_percent_correct = bin_percent_correct,
dispersion_param = dispersion_parm, summary_predicted = summary_predicted,
eta_predicted = eta_predicted, cv_predicted = cv_predicted, cv = cv,
mu = mu, eta = eta, beta = beta, stdev_beta = stdev_beta
)
class(llist) <- "pssignal"
return(llist)
}
|
LimDist <- function(X,rate,epsilon=0.01,iter){
if (missingArg(X)) {
stop("The X argument is missing.")
}
if (missingArg(rate)){
stop("The 'rate' argument is missing.")
}
if (missingArg(iter))
iter <- 1000
if(rate==TRUE){
rs <- rowSums(X)
r <- max(rs)
P_hat <- X/r
n <- dim(X)[1]
for(j in 1:n){
P_hat[j,j]<- 1 - (rs[j]/r)
}
P <- methods::new("markovchain", transitionMatrix= P_hat)
output <- P[1:dim(X)[1]]
if(markovchain::is.irreducible(P) & markovchain::period(P)==1){
j <- 2
condition <- 1
while(j < iter & condition > epsilon){
Pmas <- output
output <- Pmas%*%output
condition <- max(abs(output - Pmas))
j <- j+1
}
output <- list(indicator=rate,message="This is an irreducible Continuous Markov Chain.", Lim_dist = output[1,],error = condition,n_iter=j,P_hat = P[1:dim(X)[1]])
class(output) <- "modesto"
return(output)
}
else{
return(c(indicator=!rate,message="This is not an irreducible Continuous Markov Chain."))
}
}
if(rate==FALSE){
P <- new("markovchain", transitionMatrix= X)
output <- P[1:dim(X)[1]]
if(markovchain::is.irreducible(P) & markovchain::period(P)==1){
j <- 2
condition <- 1
while(j < iter & condition > epsilon){
Pmas <- output
output <- Pmas%*%output
condition <- max(abs(output - Pmas))
j <- j+1
}
output <- list(indicator=!rate,message="This is an irreducible Continuous Markov Chain.", Lim_dist = output[1,],error = condition, n_iter=j,P_hat = P[1:dim(X)[1]])
class(output) <- "modesto"
return(output)
}
else{
output <- list(indicator=rate,message="This is not an irreducible Continuous Markov Chain.")
class(output) <- "modesto"
return(output)
}
}
}
|
demo("mouse_move", package="MSG")
|
model_evaluation <- function(object) {
check_object(object, type = "exp")
df <- data.frame(y_hat = object$y_hat, y = object$y)
df <- df[order(df$y_hat, decreasing = TRUE), ]
roc_y <- factor(df$y)
positive_label <- levels(roc_y)[2]
negative_label <- levels(roc_y)[1]
tp_duplicates <- cumsum(df$y == positive_label)
fp_duplicates <- cumsum(df$y == negative_label)
duplicates <- rev(duplicated(rev(df$y_hat)))
tp <- c(0, tp_duplicates[!duplicates])
fp <- c(0, fp_duplicates[!duplicates])
n_pos <- sum(df$y == positive_label)
n_neg <- sum(df$y == negative_label)
fn <- n_pos - tp
tn <- n_neg - fp
n_pos_pred <- tp + fp
n_neg_pred <- fn + tn
tpr <- tp / n_pos
fpr <- fp / n_neg
rpp <- (tp + fp) / (tp + fp + tn + fn)
precision <- tp / (tp + fp)
recall <- tp / n_pos
result <- data.frame(y_hat = c(0, object$y_hat[!duplicates]),
y = c(0, factor(object$y)[!duplicates]),
cutoffs = c(0, df$y_hat[!duplicates]),
tpr = tpr,
fpr = fpr,
rpp = rpp,
tp = tp,
precision = precision,
recall = recall,
label = factor(object$label), stringsAsFactors = TRUE)
colnames(result) <- paste0("_", colnames(result), "_")
class(result) <- c("auditor_model_evaluation", "data.frame")
result
}
modelEvaluation <- function(object) {
warning("Please note that 'modelEvaluation()' is now deprecated, it is better to use 'model_evaluation()' instead.")
model_evaluation(object)
}
|
"summaryNAltraj" <-
function(x)
{
if (!inherits(x,"ltraj"))
stop("x should be of class 'ltraj'")
nna <- unlist(lapply(x, function(i) length(i$x[is.na(i$x)])))
n <- unlist(lapply(x, function(i) length(i$x)))
ani <- unlist(lapply(x, function(i) attr(i, "id")))
burst <- unlist(lapply(x, function(i) attr(i, "burst")))
so <- data.frame(id=ani, burst=burst,
missing.values=nna, N.reloc=n, percent = 100*nna/n)
return(so)
}
|
model_parameters.bracl <- function(model,
ci = .95,
bootstrap = FALSE,
iterations = 1000,
standardize = NULL,
exponentiate = FALSE,
p_adjust = NULL,
verbose = TRUE,
...) {
nl <- tryCatch(
{
nlevels(factor(insight::get_response(model)))
},
error = function(e) {
0
}
)
if (nl > 2) {
merge_by <- c("Parameter", "Response")
} else {
merge_by <- "Parameter"
}
out <- .model_parameters_generic(
model = model,
ci = ci,
bootstrap = bootstrap,
iterations = iterations,
merge_by = merge_by,
standardize = standardize,
exponentiate = exponentiate,
robust = FALSE,
p_adjust = p_adjust,
...
)
attr(out, "object_name") <- deparse(substitute(model), width.cutoff = 500)
out
}
ci.bracl <- function(x, ci = .95, method = NULL, robust = FALSE, ...) {
params <- insight::get_parameters(x)
out <- .ci_generic(model = x, ci = ci, method = method, robust = robust, ...)
if ("Response" %in% colnames(params)) {
out$Response <- params$Response
}
out
}
standard_error.bracl <- function(model, ...) {
smry <- suppressMessages(as.data.frame(stats::coef(summary(model))))
se <- smry[[2]]
names(se) <- rownames(smry)
params <- insight::get_parameters(model)
.data_frame(
Parameter = params$Parameter,
SE = as.vector(se),
Response = params$Response
)
}
p_value.bracl <- function(model, ...) {
smry <- suppressMessages(as.data.frame(stats::coef(summary(model))))
p <- smry[[4]]
names(p) <- rownames(smry)
params <- insight::get_parameters(model)
.data_frame(
Parameter = params$Parameter,
p = as.vector(p),
Response = params$Response
)
}
model_parameters.multinom <- model_parameters.bracl
ci.multinom <- ci.bracl
degrees_of_freedom.multinom <- function(model, method = NULL, ...) {
if (identical(method, "normal")) {
Inf
} else {
insight::n_obs(model) - model$edf
}
}
degrees_of_freedom.nnet <- degrees_of_freedom.multinom
standard_error.multinom <- function(model, ...) {
se <- tryCatch(
{
stderr <- summary(model)$standard.errors
if (is.null(stderr)) {
vc <- insight::get_varcov(model)
stderr <- as.vector(sqrt(diag(vc)))
} else {
if (is.matrix(stderr)) {
tmp <- c()
for (i in 1:nrow(stderr)) {
tmp <- c(tmp, as.vector(stderr[i, ]))
}
} else {
tmp <- as.vector(stderr)
}
stderr <- tmp
}
stderr
},
error = function(e) {
vc <- insight::get_varcov(model)
as.vector(sqrt(diag(vc)))
}
)
params <- insight::get_parameters(model)
if ("Response" %in% colnames(params)) {
.data_frame(
Parameter = params$Parameter,
SE = se,
Response = params$Response
)
} else {
.data_frame(
Parameter = params$Parameter,
SE = se
)
}
}
p_value.multinom <- function(model, method = "residual", ...) {
stat <- insight::get_statistic(model)
out <- p_value.default(model, method = method, ...)
if (!is.null(stat$Response)) {
out$Response <- stat$Response
}
out
}
simulate_parameters.multinom <- function(model,
iterations = 1000,
centrality = "median",
ci = .95,
ci_method = "quantile",
test = "p-value",
...) {
data <- simulate_model(model, iterations = iterations, ...)
out <- .summary_bootstrap(
data = data,
test = test,
centrality = centrality,
ci = ci,
ci_method = ci_method,
...
)
params <- insight::get_parameters(model)
out$Parameter <- params$Parameter
if ("Response" %in% colnames(params)) {
out$Response <- params$Response
}
class(out) <- c("parameters_simulate", "see_parameters_simulate", class(out))
attr(out, "object_name") <- deparse(substitute(model), width.cutoff = 500)
attr(out, "iterations") <- iterations
attr(out, "ci") <- ci
attr(out, "ci_method") <- ci_method
attr(out, "centrality") <- centrality
out
}
model_parameters.brmultinom <- model_parameters.bracl
ci.brmultinom <- ci.bracl
standard_error.brmultinom <- standard_error.multinom
p_value.brmultinom <- p_value.multinom
|
readWKB <- function(wkb, id = NULL, proj4string = CRS(as.character(NA))) {
if(inherits(wkb, "raw") && (is.null(id) || length(id) == 1)) {
wkb <- list(wkb)
}
if(!is.list(wkb)) {
stop("wkb must be a list")
}
if(isTRUE(length(wkb) < 1)) {
stop("wkb must have length 1 or greater")
}
if(!all(vapply(X = wkb, FUN = inherits, FUN.VALUE = logical(1), "raw"))) {
stop("Each element of wkb must be a raw vector")
}
namesSpecified <- TRUE
if(is.null(id)) {
if(is.null(names(wkb))) {
namesSpecified <- FALSE
id <- as.character(seq_along(wkb))
} else {
id <- names(wkb)
}
}
if(!identical(length(wkb), length(id))) {
stop("wkb and id must have same length")
}
if(is.character(proj4string)) {
proj4string = CRS(proj4string)
}
if(length(proj4string) != 1) {
stop("proj4string must have length 1")
}
obj <- mapply(wkb, id, FUN = function(WkbGeom, Id) {
rc <- rawConnection(WkbGeom, "r")
on.exit(close(rc))
seek(rc, 0L)
byteOrder <- readByteOrder(rc)
if(byteOrder == as.raw(1L)) {
endian <- "little"
} else {
endian <- "big"
}
wkbType <- readWkbType(rc, endian)
if(wkbType == 1L) {
readWkbPoint(rc, endian)
} else if(wkbType == 2L) {
readWkbLineString(rc, Id, endian)
} else if(wkbType == 3L) {
readWkbPolygon(rc, Id, endian)
} else if(wkbType == 4L) {
readWkbMultiPoint(rc, endian)
} else if(wkbType == 5L) {
readWkbMultiLineString(rc, Id, endian)
} else if(wkbType == 6L) {
readWkbMultiPolygon(rc, Id, endian)
} else if(wkbType == 7L) {
stop("GeometryCollection is not a supported geometry type")
} else {
stop("Supported geometry types are Point, LineString, Polygon, MultiPoint, MultiLineString, and MultiPolygon")
}
}, SIMPLIFY = FALSE, USE.NAMES = FALSE)
objClass <- unique(vapply(obj, function(x) class(x)[1], character(1)))
if(isTRUE(length(objClass) > 1)) {
stop("Elements of wkb cannot have different geometry types")
}
if(objClass == "numeric") {
if(namesSpecified) {
names(obj) <- id
}
SpatialPoints(do.call("rbind", obj), proj4string = proj4string)
} else if(objClass == "matrix" || objClass == "data.frame") {
if(namesSpecified) {
names(obj) <- id
}
lapply(X = obj, FUN = SpatialPoints, proj4string = proj4string)
} else if(objClass == "Lines") {
SpatialLines(obj, proj4string = proj4string)
} else if(objClass == "Polygons") {
SpatialPolygons(obj, proj4string = proj4string)
} else {
stop("Unexpected object")
}
}
readWkbMultiPoint <- function(rc, endian) {
numPoints <- readInteger(rc, endian)
t(vapply(seq_len(numPoints), function(...) {
byteOrder <- readByteOrder(rc)
if(byteOrder == as.raw(1L)) {
endian <- "little"
} else {
endian <- "big"
}
wkbType <- readWkbType(rc, endian)
if(wkbType != 1L) {
stop("MultiPoints may contain only Points")
}
readPoint(rc, endian)
}, numeric(2)))
}
readWkbMultiLineString <- function(rc, multiLineStringId, endian) {
numLineStrings <- readInteger(rc, endian)
Lines(unlist(lapply(seq_len(numLineStrings), function(...) {
byteOrder <- readByteOrder(rc)
if(byteOrder == as.raw(1L)) {
endian <- "little"
} else {
endian <- "big"
}
wkbType <- readWkbType(rc, endian)
if(wkbType != 2L) {
stop("MultiLineStrings may contain only LineStrings")
}
numPoints <- readInteger(rc, endian)
Line(readPoints(rc, numPoints, endian))
})), multiLineStringId)
}
readWkbMultiPolygon <- function(rc, multiPolygonId, endian) {
numPolygons <- readInteger(rc, endian)
Polygons(unlist(lapply(seq_len(numPolygons), function(...) {
byteOrder <- readByteOrder(rc)
if(byteOrder == as.raw(1L)) {
endian <- "little"
} else {
endian <- "big"
}
wkbType <- readWkbType(rc, endian)
if(wkbType != 3L) {
stop("MultiPolygons may contain only Polygons")
}
numRings <- readInteger(rc, endian)
readLinearRings(rc, numRings, endian)
})), multiPolygonId)
}
readWkbPoint <- function(rc, endian) {
readPoint(rc, endian)
}
readWkbLineString <- function(rc, lineStringId, endian) {
numPoints <- readInteger(rc, endian)
Lines(list(Line(readPoints(rc, numPoints, endian))), lineStringId)
}
readWkbPolygon <- function(rc, polygonId, endian) {
numRings <- readInteger(rc, endian)
Polygons(readLinearRings(rc, numRings, endian), polygonId)
}
readLinearRings <- function(rc, numRings, endian) {
lapply(seq_len(numRings), function(ringId) {
readLinearRing(rc, endian)
})
}
readLinearRing <- function(rc, endian) {
numPoints <- readInteger(rc, endian)
Polygon(readPoints(rc, numPoints, endian))
}
readPoints <- function(rc, numPoints, endian) {
t(vapply(seq_len(numPoints), function(pointId) {
readPoint(rc, endian)
}, numeric(2)))
}
readPoint <- function(rc, endian) {
c(x = readDouble(rc, endian), y = readDouble(rc, endian))
}
readByteOrder <- function(rc) {
readByte(rc)
}
readWkbType <- function(rc, endian) {
readInteger(rc, endian)
}
readByte <- function(rc) {
readBin(rc, what = "raw", size = 1L)
}
readInteger <- function(rc, endian) {
readBin(rc, what = "integer", size = 4L, endian = endian)
}
readDouble <- function(rc, endian) {
readBin(rc, what = "double", size = 8L, endian = endian)
}
|
rasch_mml2_calcprob_missing1 <- function( theta.k, b, beta, delta.miss, pjk,
fixed.a=NULL, return_nonresponse_probs=FALSE, irtmodel="missing1")
{
I <- length(b)
if (is.null(fixed.a)){
fixed.a <- rep(1,I)
}
TP <- nrow(theta.k)
thetaM1 <- sirt_matrix2(theta.k[,1], nrow=I)
thetaM2 <- sirt_matrix2(theta.k[,2], nrow=I)
ab <- fixed.a*b
M1 <- stats::plogis(fixed.a*thetaM1-ab)
if (irtmodel=="missing1"){
M2a <- stats::plogis(thetaM2-beta)
M2b <- stats::plogis(thetaM2-beta-delta.miss)
} else {
M2a <- stats::plogis(thetaM2-beta+delta.miss)
M2b <- stats::plogis(thetaM2-beta)
}
pjk[,1,] <- 1 - M1
pjk[,2,] <- M1
pjk[,1,] <- M2a*pjk[,1,]
pjk[,2,] <- M2b*pjk[,2,]
m3 <- pjk[,3,] <- (1-M2a)*(1-M1) + (1-M2b)*M1
if (return_nonresponse_probs){
pjk[,1,] <- (1-M2a)*(1-M1) / m3
pjk[,2,] <- (1-M2b)*M1 / m3
pjk <- pjk[,1:2,]
}
return(pjk)
}
.calcprob.missing1 <- rasch_mml2_calcprob_missing1
|
get_cnp_checksum <- function(cnp) {
dec_cnp <- stringr::str_extract_all(as.character(cnp), "[0-9]") %>%
unlist()
mult_cnp <- as.integer(dec_cnp[1:12]) *
c(2, 7, 9, 1, 4, 6, 3, 5, 8, 2, 7, 9)
remainder <- sum(mult_cnp) %% 11
if (remainder == 10) {
11 - remainder
} else {
remainder
}
}
|
context("na_mean")
test_that("All NA vector gives warning", {
expect_warning(na_mean(c(NA, NA, NA, NA, NA)))
})
test_that("Correct results for all options with a modifed tsAirgap dataset (additionalNAs at end)", {
skip_on_cran()
x <- tsAirgap
x[135:144] <- NA
expect_equal(round(mean(na_mean(x, option = "median")), digits = 1), 260.0)
expect_equal(round(mean(na_mean(x, option = "mean")), digits = 1), 264.1)
expect_equal(round(mean(na_mean(x, option = "mode")), digits = 1), 258.8)
})
test_that("Correct results for all options with a modifed tsAirgap dataset (additionalNAs at start)", {
skip_on_cran()
x <- tsAirgap
x[1:5] <- NA
expect_equal(round(mean(na_mean(x, option = "median")), digits = 1), 282.7)
expect_equal(round(mean(na_mean(x, option = "mean")), digits = 1), 284.8)
expect_equal(round(mean(na_mean(x, option = "mode")), digits = 1), 278.2)
})
test_that("Correct results for all options with the tsAirgap dataset", {
skip_on_cran()
x <- tsAirgap
expect_equal(round(mean(na_mean(x, option = "median")), digits = 1), 277.9)
expect_equal(round(mean(na_mean(x, option = "mean")), digits = 1), 279.8)
expect_equal(round(mean(na_mean(x, option = "mode")), digits = 1), 275.2)
})
test_that("Imputation works for data.frame", {
x <- data.frame(tsAirgap, tsAirgap, tsAirgapComplete)
expect_false(anyNA(na_mean(x, option = "mean")))
expect_false(anyNA(na_mean(x, option = "mode")))
expect_false(anyNA(na_mean(x, option = "median")))
})
test_that("Warning for wrong input for option parameter", {
expect_error(na_mean(tsAirgap, option = "Wrong"))
})
test_that("Test NA at beginning", {
x <- tsAirgap
x[1:4] <- NA
expect_false(anyNA(na_mean(x, option = "mean")))
expect_false(anyNA(na_mean(x, option = "mode")))
expect_false(anyNA(na_mean(x, option = "median")))
expect_false(anyNA(na_mean(x)))
})
test_that("Test NA at end", {
x <- tsAirgap
x[140:144] <- NA
expect_false(anyNA(na_mean(x, option = "mean")))
expect_false(anyNA(na_mean(x, option = "mode")))
expect_false(anyNA(na_mean(x, option = "median")))
expect_false(anyNA(na_mean(x)))
})
test_that("Multiple NAs in a row", {
x <- tsAirgap
x[40:80] <- NA
expect_false(anyNA(na_mean(x, option = "mean")))
expect_false(anyNA(na_mean(x, option = "mode")))
expect_false(anyNA(na_mean(x, option = "median")))
expect_false(anyNA(na_mean(x)))
})
test_that("Over 90% NAs", {
x <- tsAirgap
x[10:140] <- NA
expect_false(anyNA(na_mean(x, option = "mean")))
expect_false(anyNA(na_mean(x, option = "mode")))
expect_false(anyNA(na_mean(x, option = "median")))
expect_false(anyNA(na_mean(x)))
})
|
missDiag <- function(
original,
imputed,
formula=NULL,
skip_n=25,
scale=FALSE,
adjust='ebal',
use_imputed_cov = TRUE,
convert = c("numeric"),
categories = 3,
verbose=0,
output_diag=FALSE,
ebal_param=param_ebal(),
sbw_param=param_sbw(),
cobalt_param=param_cobalt()){
call_ <- sys.call()
if(!adjust %in% c("none", "sbw", "ebal")) {
stop("Parameter 'adjust' must be one of these: none, sbw or ebal") }
original <- as.data.frame(original)
if("mids" %in% class(imputed)){
imputed <- mice::complete(imputed,action='all')
M <- length(imputed)
} else if("list" %in% class(imputed)) {
M <- length(imputed)
} else {
imputed <- list(as.data.frame(imputed))
M <- 1
}
if(!is.null(formula)){
formulas <- construct_formulas(formula,
vars=colnames(original))
} else {
vars <- colnames(original)
vars_na <- apply(is.na(original), 2, sum)>0
vars <- vars[vars_na]
dv <- paste(vars, collapse = "|")
iv <- paste(colnames(original), collapse="+")
formula <- as.formula(paste0(dv, "~", iv))
formulas <- construct_formulas(formula, vars=vars)
}
out <- list()
for(m in 1:M){
if(verbose>0 & M>1) cat("Dataset Nummer:",m,"\n")
imp <- as.data.frame(imputed[[m]])
for(f in formulas){
if( use_imputed_cov == FALSE){
imp_prep <- make_categorical_cov(
original_df = original,
imputed_df = imp,
formula = as.formula(f),
categories = categories,
convert = convert)
} else {
imp_prep <- imp
}
if(adjust=='sbw'){
w <- do.call(run_sbw, c(
list(original= original,
imputed = imp_prep,
formula = as.formula(f),
skip_n = skip_n,
scale = scale,
verbose = verbose),
sbw_param))
}
if(adjust=='ebal'){
w <- do.call(run_ebal, c(
list(original= original,
imputed = imp_prep,
formula = as.formula(f),
skip_n = skip_n,
scale = scale,
verbose = verbose),
ebal_param))
}
if(adjust=='none'){
if(verbose>0){
message("No weights constructed." )
}
w <- rep(1,nrow(imp_prep))
}
if(!is.null(w)){
yname <- all.vars(f)[1]
xname <- all.vars(f)
res <- do.call(bal.tab, c(
list(x=imp_prep[,xname,drop=FALSE],
treat = is.na(original[[yname]]),
weights = w),
cobalt_param))$Balance
res$m <- m
res$vname_lhs <- yname
res$vname <- rownames(res)
out[[length(out)+1]] <- res
}
}
}
out <- do.call(rbind,out)
rownames(out) <- NULL
out <- tolower_remove_dot_vars(out)
out <- out[,!grepl("_un|_threshold", colnames(out))]
out$diag <- ifelse(mapply(grepl, out$vname_lhs, out$vname),
FALSE, TRUE)
attr(out, 'call') <- call_
if(output_diag==FALSE){
out <- subset(out, diag==FALSE)
}
return(out)
}
|
stop_nice <- function(...) {
stop(paste(strwrap(paste(...), exdent = 7), collapse = "\n"), call. = FALSE)
}
|
chain <- function(.data, ...) {
UseMethod("chain")
}
chain.ExprBuilder <- function(.data, ..., .parent_env = rlang::caller_env()) {
start_expr(end_expr.ExprBuilder(.data, ..., .parent_env = .parent_env), ...)
}
|
context("LearningCars")
test_that("Cars weights", {
data(fusion_cars)
a <- NewFusionInput("µA", NewMfTrapezoidalInf(4, 20), "A")
v <- NewFusionInput("µV", NewMfTrapezoidalSup(100, 500), "V")
s <- NewFusionInput("µS", NewMfTrapezoidalSup(120, 220), "S")
c <- NewFusionInput("µC", NewMfTrapezoidalInf(6, 16), "C")
degrees <- .aggregate_node(fusion_cars, list(a, v, s, c))
target <- c(1, 0, 0, 0)
wam_weights <- LearnWamWeights(degrees, target)
expect_equal(wam_weights, c(0.5, 0, 0.5, 0))
owa_weights <- LearnOwaWeights(degrees, target)
expect_equal(owa_weights, c(1, 0, 0, 0))
})
|
context("adjacency spectral embedding")
std <- function(x) {
x <- zapsmall(x)
apply(x, 2, function(col) {
if (any(col < 0) && col[which(col != 0)[1]] < 0) { -col } else { col }
})
}
mag_order <- function(x) {
order(abs(x), sign(x), decreasing=TRUE)
}
mag_sort <- function(x) {
x[mag_order(x)]
}
test_that("Undirected, unweighted case works", {
library(igraph)
set.seed(42)
g <- random.graph.game(10, 15, type="gnm", directed=FALSE)
no <- 7
A <- g[]
A <- A + 1/2 * diag(degree(g))
ss <- eigen(A)
U <- std(ss$vectors)
X <- std(ss$vectors %*% sqrt(diag(abs(ss$values))))
au_la <- embed_adjacency_matrix(g, no=no, which="la",
cvec=degree(g)/2, scaled=TRUE)
as_la <- embed_adjacency_matrix(g, no=no, which="la",
cvec=degree(g)/2, scaled=FALSE)
expect_that(as_la$D, equals(ss$values[1:no]))
expect_that(au_la$D, equals(ss$values[1:no]))
expect_that(std(as_la$X), equals(std(U[,1:no])))
expect_that(std(au_la$X), equals(X[,1:no]))
au_lm <- embed_adjacency_matrix(g, no=no, which="lm",
cvec=degree(g)/2, scaled=TRUE)
as_lm <- embed_adjacency_matrix(g, no=no, which="lm",
cvec=degree(g)/2, scaled=FALSE)
expect_that(as_lm$D, equals(mag_sort(ss$values)[1:no]))
expect_that(au_lm$D, equals(mag_sort(ss$values)[1:no]))
expect_that(std(as_lm$X), equals(std(U[,mag_order(ss$values)][,1:no])))
expect_that(std(au_lm$X), equals(X[,mag_order(ss$values)][,1:no]))
au_sa <- embed_adjacency_matrix(g, no=no, which="sa",
cvec=degree(g)/2, scaled=TRUE)
as_sa <- embed_adjacency_matrix(g, no=no, which="sa",
cvec=degree(g)/2, scaled=FALSE)
expect_that(as_sa$D, equals(ss$values[vcount(g)-1:no+1]))
expect_that(au_sa$D, equals(ss$values[vcount(g)-1:no+1]))
expect_that(std(as_sa$X), equals(std(U[,vcount(g)-1:no+1])))
expect_that(std(au_sa$X), equals(X[,vcount(g)-1:no+1]))
})
test_that("Undirected, weighted case works", {
library(igraph)
set.seed(42)
g <- random.graph.game(10, 20, type="gnm", directed=FALSE)
E(g)$weight <- sample(1:5, ecount(g), replace=TRUE)
no <- 3
A <- g[]
A <- A + 1/2 * diag(degree(g))
ss <- eigen(A)
U <- std(ss$vectors)
X <- std(ss$vectors %*% sqrt(diag(abs(ss$values))))
au_la <- embed_adjacency_matrix(g, no=no, which="la",
cvec=degree(g)/2, scaled=TRUE)
as_la <- embed_adjacency_matrix(g, no=no, which="la",
cvec=degree(g)/2, scaled=FALSE)
expect_that(as_la$D, equals(ss$values[1:no]))
expect_that(std(as_la$X), equals(std(U[,1:no])))
expect_that(au_la$D, equals(ss$values[1:no]))
expect_that(std(au_la$X), equals(X[,1:no]))
au_lm <- embed_adjacency_matrix(g, no=no, which="lm",
cvec=degree(g)/2, scaled=TRUE)
as_lm <- embed_adjacency_matrix(g, no=no, which="lm",
cvec=degree(g)/2, scaled=FALSE)
expect_that(as_lm$D, equals(mag_sort(ss$values)[1:no]))
expect_that(au_lm$D, equals(mag_sort(ss$values)[1:no]))
expect_that(std(as_lm$X), equals(std(U[,mag_order(ss$values)][,1:no])))
expect_that(std(au_lm$X), equals(X[,mag_order(ss$values)][,1:no]))
au_sa <- embed_adjacency_matrix(g, no=no, which="sa",
cvec=degree(g)/2, scaled=TRUE)
as_sa <- embed_adjacency_matrix(g, no=no, which="sa",
cvec=degree(g)/2, scaled=FALSE)
expect_that(std(as_sa$X), equals(std(U[,vcount(g)-1:no+1])))
expect_that(std(au_sa$X), equals(X[,vcount(g)-1:no+1]))
})
test_that("Directed, unweighted case works", {
library(igraph)
set.seed(42)
g <- random.graph.game(10, 20, type="gnm", directed=TRUE)
no <- 3
A <- g[]
A <- A + 1/2 * diag(degree(g))
ss <- svd(A)
U <- std(ss$u)
V <- std(ss$v)
X <- std(ss$u %*% sqrt(diag(ss$d)))
Y <- std(ss$v %*% sqrt(diag(ss$d)))
au_la <- embed_adjacency_matrix(g, no=no, which="la",
cvec=degree(g)/2, scaled=TRUE)
as_la <- embed_adjacency_matrix(g, no=no, which="la",
cvec=degree(g)/2, scaled=FALSE)
expect_that(as_la$D, equals(ss$d[1:no]))
expect_that(au_la$D, equals(ss$d[1:no]))
expect_that(std(as_la$X), equals(std(U[,1:no])))
expect_that(std(as_la$Y), equals(std(V[,1:no])))
expect_that(std(au_la$X), equals(X[,1:no]))
expect_that(std(au_la$Y), equals(Y[,1:no]))
au_lm <- embed_adjacency_matrix(g, no=no, which="lm",
cvec=degree(g)/2, scaled=TRUE)
as_lm <- embed_adjacency_matrix(g, no=no, which="lm",
cvec=degree(g)/2, scaled=FALSE)
expect_that(as_lm$D, equals(ss$d[1:no]))
expect_that(au_lm$D, equals(ss$d[1:no]))
expect_that(std(as_lm$X), equals(std(U[,1:no])))
expect_that(std(as_lm$Y), equals(std(V[,1:no])))
expect_that(std(au_lm$X), equals(X[,1:no]))
expect_that(std(au_lm$Y), equals(Y[,1:no]))
au_sa <- embed_adjacency_matrix(g, no=no, which="sa",
cvec=degree(g)/2, scaled=TRUE)
as_sa <- embed_adjacency_matrix(g, no=no, which="sa",
cvec=degree(g)/2, scaled=FALSE)
expect_that(as_sa$D, equals(ss$d[vcount(g)-1:no+1]))
expect_that(au_sa$D, equals(ss$d[vcount(g)-1:no+1]))
expect_that(std(as_sa$X), equals(std(U[,vcount(g)-1:no+1])))
expect_that(std(as_sa$Y), equals(std(V[,vcount(g)-1:no+1])))
expect_that(std(au_sa$X), equals(X[,vcount(g)-1:no+1]))
expect_that(std(au_sa$Y), equals(Y[,vcount(g)-1:no+1]))
})
test_that("Directed, weighted case works", {
library(igraph)
set.seed(42)
g <- random.graph.game(10, 20, type="gnm", directed=TRUE)
E(g)$weight <- sample(1:5, ecount(g), replace=TRUE)
no <- 3
A <- g[]
A <- A + 1/2 * diag(degree(g))
ss <- svd(A)
U <- std(ss$u)
V <- std(ss$v)
X <- std(ss$u %*% sqrt(diag(ss$d)))
Y <- std(ss$v %*% sqrt(diag(ss$d)))
au_la <- embed_adjacency_matrix(g, no=no, which="la",
cvec=degree(g)/2, scaled=TRUE)
as_la <- embed_adjacency_matrix(g, no=no, which="la",
cvec=degree(g)/2, scaled=FALSE)
expect_that(std(as_la$X), equals(std(U[,1:no])))
expect_that(std(as_la$Y), equals(std(V[,1:no])))
expect_that(std(au_la$X), equals(X[,1:no]))
expect_that(std(au_la$Y), equals(Y[,1:no]))
au_lm <- embed_adjacency_matrix(g, no=no, which="lm",
cvec=degree(g)/2, scaled=TRUE)
as_lm <- embed_adjacency_matrix(g, no=no, which="lm",
cvec=degree(g)/2, scaled=FALSE)
expect_that(std(as_lm$X), equals(std(U[,1:no])))
expect_that(std(as_lm$Y), equals(std(V[,1:no])))
expect_that(std(au_lm$X), equals(X[,1:no]))
expect_that(std(au_lm$Y), equals(Y[,1:no]))
au_sa <- embed_adjacency_matrix(g, no=no, which="sa",
cvec=degree(g)/2, scaled=TRUE)
as_sa <- embed_adjacency_matrix(g, no=no, which="sa",
cvec=degree(g)/2, scaled=FALSE)
expect_that(std(as_sa$X), equals(std(U[,vcount(g)-1:no+1])))
expect_that(std(as_sa$Y), equals(std(V[,vcount(g)-1:no+1])))
expect_that(std(au_sa$X), equals(X[,vcount(g)-1:no+1]))
expect_that(std(au_sa$Y), equals(Y[,vcount(g)-1:no+1]))
})
test_that("Issue
library(igraph)
set.seed(12345)
g <- erdos.renyi.game(15, .4)
w <- -log(runif(ecount(g)))
X1 <- embed_adjacency_matrix(g, 2, weights= w)
E(g)$weight <- w
X2 <- embed_adjacency_matrix(g, 2)
expect_that(X1$D, equals(X2$D))
})
test_that("Issue
library(igraph)
set.seed(12345)
pref.matrix <- diag(0.2, 2) + 0.2
block.sizes <- c(800, 800)
n <- sum(block.sizes)
g <- sbm.game(n, pref.matrix, block.sizes, directed=TRUE)
for (i in 1:25) {
ase <- embed_adjacency_matrix(g, 2)
expect_that(mean(ase$X %*% t(ase$Y)), equals(0.299981018354173))
}
})
|
context("subset kinship matrices")
test_that("subset.kinship works", {
iron <- read_cross2(system.file("extdata", "iron.zip", package="qtl2"))
iron <- iron[c("2","3","4","5","6","7"), 2:6]
pr <- calc_genoprob(iron)
k <- calc_kinship(pr)
k_loco <- calc_kinship(pr, "loco")
expect_equal(subset_kinship(k, ind=c(2,4)), k[c(2,4), c(2,4)])
expect_equal(subset_kinship(k, ind=c("2","4")), k[c("2","4"), c("2","4")])
expect_equal(subset_kinship(k_loco, ind=c(2,4)), lapply(k_loco, function(a) a[c(2,4), c(2,4)]))
expect_equal(subset_kinship(k_loco, ind=c("2","4")), lapply(k_loco, function(a) a[c("2","4"), c("2","4")]))
expect_equal(subset_kinship(k, ind=c(2,4), chr=5), k[c(2,4), c(2,4)])
expect_equal(subset_kinship(k, ind=c("2","4"), chr=5), k[c("2","4"), c("2","4")])
expect_equal(subset_kinship(k_loco, ind=c(2,4), chr=5), k_loco[["5"]][c(2,4), c(2,4)])
expect_equal(subset_kinship(k_loco, ind=c("2","4"), chr=5), k_loco[["5"]][c("2","4"), c("2","4")])
expect_equal(subset_kinship(k_loco, ind=c(2,4), chr=c(2,5)), lapply(k_loco[c("2","5")], function(a) a[c(2,4), c(2,4)]))
expect_equal(subset_kinship(k_loco, ind=c("2","4"), chr=c(2,5)), lapply(k_loco[c("2", "5")], function(a) a[c("2","4"), c("2","4")]))
expect_equal(subset_kinship(k_loco, ind=c(2,4), chr=-c(2,4)), lapply(k_loco[c("3","5","6")], function(a) a[c(2,4), c(2,4)]))
expect_equal(subset_kinship(k_loco, ind=c("2","4"), chr=c("-2","-4")), lapply(k_loco[c("3","5","6")], function(a) a[c("2","4"), c("2","4")]))
})
|
visitSequence.determine <- function( impMethod, vis, data, maxit=10)
{
is_char <- FALSE
if ( ! is.numeric(vis) ){
is_char <- TRUE
vis0 <- vis
vis <- match( colnames(data), vis )
names(vis) <- vis0
}
dat <- data
ind <- grep( "~ I", impMethod )
I1 <- length(ind)
vis1 <- vis0 <- vis
pass_vars <- names(impMethod)[ind]
iter <- 0
while (iter < maxit ){
vis <- vis1
for (var.ii in pass_vars){
vis1 <- visitSequence_determine_handle_variable( var.ii=var.ii, impMethod=impMethod,
vis1=vis1, dat=dat )
}
visit_constant <- visitSequence_determine_equal_vecs( v1=vis, v2=vis1 )
if ( visit_constant ){
iter <- maxit + 1
}
iter <- iter + 1
}
if (is_char){
vis1 <- names(vis1)
}
return(vis1)
}
|
survHIMA <- function(X, Z, M, OT, status, FDRcut = 0.05, verbose = FALSE){
MZ <- cbind(M,Z,X)
n <- length(X)
p <- dim(M)[2]
if(is.null(Z))
q <- 0
else
q <- dim(Z)[2]
message("Step 1: Mediators screening ...", " (", Sys.time(), ")")
d_0 <- 1*round(n/log(n))
beta_SIS <- matrix(0,1,p)
for (i in 1:p){
ID_S <- c(i, (p+1):(p+q+1))
MZ_SIS <- MZ[,ID_S]
fit <- survival::coxph(survival::Surv(OT, status) ~ MZ_SIS)
beta_SIS[i] <- fit$coefficients[1]
}
alpha_SIS <- matrix(0,1,p)
XZ <- cbind(X,Z)
for (i in 1:p){
fit_a <- lsfit(XZ,M[,i],intercept = TRUE)
est_a <- matrix(coef(fit_a))[2]
alpha_SIS[i] <- est_a
}
ab_SIS <- alpha_SIS*beta_SIS
ID_SIS <- which(-abs(ab_SIS) <= sort(-abs(ab_SIS))[min(p, d_0)])
d <- length(ID_SIS)
if(verbose) message(" ", d, " mediators selected from the screening.")
message("Step 2: De-biased Lasso estimates ...", " (", Sys.time(), ")")
P_beta_SIS <- matrix(0,1,d)
beta_DLASSO_SIS_est <- matrix(0,1,d)
beta_DLASSO_SIS_SE <- matrix(0,1,d)
MZ_SIS <- MZ[,c(ID_SIS, (p+1):(p+q+1))]
MZ_SIS_1 <- t(t(MZ_SIS[,1]))
for (i in 1:d){
V <- MZ_SIS
V[,1] <- V[,i]
V[,i] <- MZ_SIS_1
LDPE_res <- LDPE_func(ID = 1, X = V, OT = OT, status = status)
beta_LDPE_est <- LDPE_res[1]
beta_LDPE_SE <- LDPE_res[2]
V1_P <- abs(beta_LDPE_est)/beta_LDPE_SE
P_beta_SIS[i] <- 2*(1-pnorm(V1_P,0,1))
beta_DLASSO_SIS_est[i] <- beta_LDPE_est
beta_DLASSO_SIS_SE[i] <- beta_LDPE_SE
}
alpha_SIS_est <- matrix(0,1,d)
alpha_SIS_SE <- matrix(0,1,d)
P_alpha_SIS <- matrix(0,1,d)
XZ <- cbind(X,Z)
for (i in 1:d){
fit_a <- lsfit(XZ,M[,ID_SIS[i]],intercept = TRUE)
est_a <- matrix(coef(fit_a))[2]
se_a <- ls.diag(fit_a)$std.err[2]
sd_1 <- abs(est_a)/se_a
P_alpha_SIS[i] <- 2*(1-pnorm(sd_1,0,1))
alpha_SIS_est[i] <- est_a
alpha_SIS_SE[i] <- se_a
}
message("Step 3: Multiple-testing procedure ...", " (", Sys.time(), ")")
PA <- cbind(t(P_alpha_SIS), t(P_beta_SIS))
P_value <- apply(PA,1,max)
N0 <- dim(PA)[1]*dim(PA)[2]
input_pvalues <- PA + matrix(runif(N0,0,10^{-10}),dim(PA)[1],2)
nullprop <- HDMT::null_estimation(input_pvalues,lambda=0.5)
fdrcut <- HDMT::fdr_est(nullprop$alpha00,nullprop$alpha01,nullprop$alpha10, nullprop$alpha1,nullprop$alpha2,input_pvalues,exact=0)
ID_fdr <- which(fdrcut <= FDRcut)
if (length(ID_fdr) > 0){
alpha_hat <- alpha_SIS_est[ID_fdr]
alpha_est <- alpha_SIS_SE[ID_fdr]
beta_hat <- beta_DLASSO_SIS_est[ID_fdr]
beta_est <- beta_DLASSO_SIS_SE[ID_fdr]
ID <- ID_SIS[ID_fdr]
P_max <- P_value[ID_fdr]
}
out_result <- data.frame(ID = ID,
alpha = alpha_hat,
alpha_se = alpha_est,
beta = beta_hat,
beta_set = beta_est,
pvalue = P_max)
return(out_result)
}
|
print.ggmnonreg <- function(x,...){
if(is(x, "ggm_search")){
colnames(x$wadj) <- 1:ncol(x$wadj)
print(as.data.frame(x$wadj), ...)
} else if(is(x, "ggm_inference")){
colnames(x$wadj) <- 1:ncol(x$wadj)
print(as.data.frame(x$wadj),...)
} else if(is(x, "enr")){
print_enr(x,...)
} else if(is(x, "predictability")){
print_r2(round(x$r2, 2),...)
} else if(is(x, "eip")){
print_eip(x, 2)
} else if(is(x, "ising_search")){
colnames(x$wadj) <- 1:ncol(x$wadj)
print(as.data.frame(x$wadj), ...)
} else if(is(x, "mixed_search")){
colnames(x$wadj) <- 1:ncol(x$wadj)
print(as.data.frame(x$wadj), ...)
} else if(is(x, "ggm_compare")){
colnames(x$wadj) <- 1:ncol(x$wadj)
print(as.data.frame(x$wadj), ...)
}
}
|
message("\n---- Test ignorelist ----")
testthat::skip_on_cran()
testthat::skip_on_ci()
testthat::skip_if_not(is_gcloud_configured(), "Google account is not set")
testthat::skip_if_not(check_gcloud_connection(), "Google Cloud server is not reachable")
s2_l2a_list <- c(
"S2B_MSIL2A_20200801T100559_N0214_R022_T32TNR_20200801T135302.SAFE",
"S2B_MSIL2A_20200801T100559_N0214_R022_T32TNS_20200801T135302.SAFE"
)
testthat::test_that(
"Tests on safelist read/write", {
testthat::skip_if_not(file.exists(file.path(
safe_dir, s2_l2a_list[1],
"GRANULE/L2A_T32TNR_A017780_20200801T101400/IMG_DATA/R10m",
"T32TNR_20200801T100559_B08_10m.jp2"
)))
testthat::skip_if_not(file.exists(file.path(
safe_dir, s2_l2a_list[2],
"GRANULE/L2A_T32TNS_A017780_20200801T101400/IMG_DATA/R10m",
"T32TNS_20200801T100559_B08_10m.jp2"
)))
outdir_17 <- tempfile(pattern = "out_test17_")
dir.create(dirname(outdir_17), showWarnings = FALSE)
out_17 <- sen2r(
gui = FALSE,
online = FALSE,
step_atmcorr = "l2a",
extent = system.file("extdata/vector/barbellino.geojson", package = "sen2r"),
extent_name = "Barbellino",
timewindow = as.Date("2020-08-01"),
list_indices = c("NDVI","MSAVI2"),
list_rgb = c("RGB432B"),
mask_type = "land",
max_mask = 10,
path_l2a = safe_dir,
path_out = outdir_17,
thumbnails = FALSE
)
ignorelist_path <- file.path(outdir_17, ".ignorelist.txt")
expect_true(file.exists(ignorelist_path))
ignorelist <- RcppTOML::parseTOML(ignorelist_path)
expect_length(ignorelist, 4)
expect_equal(
sort(names(ignorelist)),
c("dates_cloudcovered", "mask_type", "max_mask", "names_missing")
)
expect_equal(ignorelist$max_mask, 10)
expect_equal(ignorelist$mask_type, "land")
expect_equal(ignorelist$dates_cloudcovered, as.Date("2020-08-01"))
out_17_2 <- sen2r(attr(out_17, "procpath"))
expect_equal(length(out_17_2), 0)
}
)
|
"print.svecest" <-
function(x, digits = max(3, getOption("digits") - 3), ...){
text1 <- "SVEC Estimation Results:"
cat(paste("\n", text1, "\n", sep = ""))
row <- paste(rep("=", nchar(text1)), collapse = "")
cat(row, "\n")
cat("\n")
cat("\nEstimated contemporaneous impact matrix:\n")
print(x$SR, digits = digits, ...)
cat("\nEstimated long run impact matrix:\n")
print(x$LR, digits = digits, ...)
invisible(x)
}
|
vol_splx <- function(S) {
d <- dim(S)[2]
A <- S[2:dim(S)[1], ] - matrix(1, nrow = d, ncol = 1) %*% S[1, ]
V <- (1 / factorial(d)) * abs(det(A))
return(V)
}
|
jDivKernelFitPdf <-
function(s,h,x){
f <- 1/h * jNormPdf((x-s)/h) * ((s-x)/h)
return(mean(f))
}
|
setGraph<- function(obj, ...)
{
UseMethod("setGraph")
}
setGraph.default<- function(obj,...)
{
warning("Default method is called on unrecognized object")
return(obj)
}
setGraph.Network<- function(obj, x, ...)
{
obj$graph<- x
return(obj)
}
setRootDepSet<- function(obj, subset, root, ...)
{
UseMethod("setRootDepSet", obj)
}
setRootDepSet.default<- function(obj, subset, root, ...)
{
return("Default method called on unrecognized object")
}
setRootDepSet.RootDepSet<- function(obj, subset, root, ...)
{
if (is.atomic(subset)&& is.null(dim(subset)))
{
if (!is.character(subset))
stop(" 'subset' must be a character vector")
if(!is.character(root))
stop(" 'root' must be a character")
if (! (root %in% subset))
stop("the root must be part of the subset")
} else if (is.list(subset)) {
vld<- sapply(subset, function(x) is.character(x))
if(sum(!vld)!=0)
stop(" 'subset' must be a list of character vectors, with characters corresponding to the names of the nodes ")
if(!is.character(root))
stop(" 'root' must be a character")
for (i in 1:length(subset))
{
if (! (root[i] %in% subset[[i]]))
stop("the root must be part of the subset")
}
} else stop("'subset' must be either one dimensional array or a list")
obj$value<- subset
names(obj$value)<- root
obj$root<- root
message("From setRootDepSet.RootDepSet: The order of the subset must correspond to the
order of its corresponding root")
return(obj)
}
setParams<- function(obj, value, ...)
{
UseMethod("setParams")
}
setParams.default<- function(obj, value, ...)
{
return("Default method called on unrecognized object")
}
setParams.HRMnetwork<- function(obj, value,...)
{
g<- obj$graph
ne<- get.edge.attribute(g, "name", E(g))
if (length(value)<length(E(g)))
stop("The number of parameters is smaller than the number of edges")
if (is.null(names(value)))
{
names(value) <- get.edge.attribute(g, "name", E(g))
message("From setParams.HRMtree: Names have been attributed to the vector 'value' in the order corresponding to the order of the edges: The fist element has the name of the first edge, the second element the name of the second edge, etc.")
}
obj$depParams[ne]<- value[ne]
message("From setParams.HRMtree: The parameters have been attached to the edges according to their names")
return(obj)
}
setParams.HRMtree<- function(obj, value,...)
{
g<- obj$graph
ne<- get.edge.attribute(g, "name", E(g))
if (length(value)<length(E(g)))
stop("The number of parameters is smaller than the number of edges")
if (is.null(names(value)))
{
names(value) <- get.edge.attribute(g, "name", E(g))
message("From setParams.HRMtree: Names have been attributed to the vector 'value' in the order corresponding to the order of the edges: The fist element has the name of the first edge, the second element the name of the second edge, etc.")
}
obj$depParams[ne]<- value[ne]
message("From setParams.HRMtree: The parameters have been attached to the edges according to their names")
return(obj)
}
resetParams<- function(obj, ...)
{
UseMethod("resetParams")
}
resetParams.MLE1params<- function(obj, cnames)
{
x<- rep(1,length(cnames))
names(x)<- cnames
class(x)<- class(obj)
return(x)
}
|
context("Tests for sensitivity and sensquery")
library(bnmonitor)
data("travel")
data("fire_alarm")
|
library(googleAuthR)
library(googleCloudStorageR)
something <- tryCatch({
gcs_get_object("schedule/test.csv",
bucket = "mark-edmondson-public-files")
}, error = function(ex) {
NULL
})
something_else <- data.frame(X1 = 1,
time = Sys.time(),
blah = paste(sample(letters, 10, replace = TRUE), collapse = ""))
something <- rbind(something, something_else)
googleAuthR::gar_gce_auth()
tmp <- tempfile(fileext = ".csv")
on.exit(unlink(tmp))
write.csv(something, file = tmp, row.names = FALSE)
gcs_upload(tmp,
bucket = "mark-edmondson-public-files",
name = "schedule/test.csv")
|
library(Bessel)
xs <- 100*(1:7)
nus <- 450 + 10*(0:9)
d.xn <- expand.grid(nu = nus, x = xs)
M <- with(d.xn,
cbind(K = besselK(x,nu),
K_exp = besselK(x,nu, expon.scaled = TRUE),
K_nA.2 = besselK.nuAsym(x, nu, log = TRUE, k.max=2),
K_nA.3 = besselK.nuAsym(x, nu, log = TRUE, k.max=3),
K_nA.4 = besselK.nuAsym(x, nu, log = TRUE, k.max=4))
)
A <- M
datt <- attr(d.xn, "out.attrs")
dim(A) <- c(datt$dim, ncol(M))
dimnames(A) <- c(datt$dimnames, list(colnames(M)))
A
stopifnot(
all.equal(M[,3], M[,4], tol=1e-12)
,
all.equal(M[,4], M[,5], tol=2e-15)
,
all.equal(M[,"K"], exp(M[,5]), tol= 1e-12)
)
cat('Time elapsed: ', proc.time(),'\n')
|
octileDistanceW <- function(cluster1,cluster2,weight){
D <- 1
D2 <- sqrt(D + D)
value <- 0
minimal <- c()
if(is.null(weight)){
for (index in c(1:ncol(cluster1))) {
aux <- abs(cluster1[index] - cluster2[index])
value <- value + aux
minimal <- c(minimal, aux)
}
} else {
for (index in c(1:ncol(cluster1))) {
aux <- weight[index] * abs(cluster1[index] - cluster2[index])
value <- value + aux
minimal <- c(minimal, aux)
}
}
dist <- D * value + (D2 - 2 * D) * min(minimal)
dist
}
|
ebic.fbed.glmm.ordinal.reps <- function(y, x, id, reps = NULL, univ = NULL, gam = NULL, wei = NULL, K = 0) {
dm <- dim(x)
n <- dm[1]
p <- dm[2]
ind <- 1:p
lik2 <- numeric(p)
sela <- NULL
card <- 0
zevar <- Rfast::check_data(x)
if ( sum( zevar > 0 ) > 0 ) x[, zevar] <- rnorm( n * length(zevar) )
if ( is.null(gam) ) {
con <- 2 - log(p) / log(n)
} else con <- 2 * gam
if ( (con) < 0 ) con <- 0
lik1 <- BIC( ordinal::clmm(y ~ 1 + reps + (1|id), weights = wei) )
runtime <- proc.time()
if ( is.null(univ) ) {
for ( i in ind ) {
fit2 <- ordinal::clmm( y ~ x[, i] + reps + (1|id), weights = wei )
lik2[i] <- BIC(fit2) + con * log(p)
}
lik2[zevar] <- Inf
n.tests <- p
stat <- lik1 - lik2
univ <- list()
univ$ebic <- lik2
} else {
n.tests <- 0
lik2 <- univ$ebic
lik2[zevar] <- Inf
stat <- lik1 - lik2
}
s <- which(stat > 0)
if ( length(s) > 0 ) {
sel <- which.max(stat)
sela <- sel
s <- s[ - which(s == sel) ]
lik1 <- lik2[sel]
sa <- stat[sel]
lik2 <- rep( lik1, p )
while ( sum(s > 0) > 0 ) {
M <- length(sela) + 1
for ( i in ind[s] ) {
fit2 <- ordinal::clmm( y ~ x[, c(sela, i)] + reps + (1|id), weights = wei )
lik2[i] <- BIC(fit2) + con * lchoose(p, M)
}
n.tests <- n.tests + length(ind[s])
stat <- lik1 - lik2
s <- which(stat > 0)
sel <- which.max(stat) * ( length(s)>0 )
sa <- c(sa, stat[sel])
sela <- c(sela, sel[sel>0])
s <- s[ - which(s == sel) ]
if (sel > 0) {
lik1 <- lik2[sel]
lik2 <- rep(lik1, p)
}
}
card <- length(sela)
if (K == 1) {
M <- length(sela) + 1
for ( i in ind[-c(sela, zevar)] ) {
fit2 <- ordinal::clmm( y ~ x[, c(sela, i)] + reps + (1|id), weights = wei )
lik2[i] <- BIC(fit2) + con * lchoose(p, M)
}
n.tests[2] <- length( ind[-c(sela, zevar)] )
stat <- lik1 - lik2
s <- which(stat > 0)
sel <- which.max(stat) * ( length(s)>0 )
sa <- c(sa, stat[sel])
sela <- c(sela, sel[sel>0])
s <- s[ - which(s == sel) ]
if (sel > 0) {
lik1 <- lik2[sel]
lik2 <- rep(lik1, p)
}
while ( sum(s > 0) > 0 ) {
M <- length(sela) + 1
for ( i in ind[s] ) {
fit2 <- ordinal::clmm( y ~ x[, c(sela, i)] + reps + (1|id), weights = wei )
lik2[i] <- BIC(fit2) + con * lchoose(p, M)
}
n.tests[2] <- n.tests[2] + length( ind[s] )
stat <- lik1 - lik2
s <- which(stat > 0)
sel <- which.max(stat) * ( length(s)>0 )
sa <- c(sa, stat[sel])
sela <- c(sela, sel[sel>0])
s <- s[ - which(s == sel) ]
if (sel > 0) {
lik1 <- lik2[sel]
lik2 <- rep(lik1, p)
}
}
card <- c(card, sum(sela > 0) )
}
if ( K > 1) {
M <- length(sela) + 1
for ( i in ind[-c(sela, zevar)] ) {
fit2 <- ordinal::clmm( y ~ x[, c(sela, i)] + reps + (1|id), weights = wei )
lik2[i] <- BIC(fit2) + con * lchoose(p, M)
}
n.tests[2] <- length(ind[-c(sela, zevar)])
stat <- lik1 - lik2
s <- which(stat > 0)
sel <- which.max(stat) * ( length(s)>0 )
sa <- c(sa, stat[sel])
sela <- c(sela, sel[sel>0])
s <- s[ - which(s == sel) ]
if (sel > 0) {
lik1 <- lik2[sel]
lik2 <- rep(lik1, p)
}
while ( sum(s > 0) > 0 ) {
M <- length(sela) + 1
for ( i in ind[s] ) {
fit2 <- ordinal::clmm( y ~ x[, c(sela, i)] + reps + (1|id), weights = wei )
lik2[i] <- BIC(fit2) + con * lchoose(p, M)
}
n.tests[2] <- n.tests[2] + length(ind[s])
stat <- lik1 - lik2
s <- which(stat > 0)
sel <- which.max(stat) * ( length(s)>0 )
sa <- c(sa, stat[sel])
sela <- c(sela, sel[sel>0])
s <- s[ - which(s == sel) ]
if (sel > 0) {
lik1 <- lik2[sel]
lik2 <- rep(lik1, p)
}
}
vim <- 1
card <- c(card, sum(sela > 0) )
while ( vim < K & card[vim + 1] - card[vim] > 0 ) {
vim <- vim + 1
M <- length(sela) + 1
for ( i in ind[-c(sela, zevar)] ) {
fit2 <- ordinal::clmm( y ~ x[, c(sela, i)] + reps + (1|id), weights = wei )
lik2[i] <- BIC(fit2) + con * lchoose(p, M)
}
n.tests[vim + 1] <- length(ind[-c(sela, zevar)])
stat <- lik1 - lik2
s <- which(stat > 0)
sel <- which.max(stat) * ( length(s)>0 )
sa <- c(sa, stat[sel])
sela <- c(sela, sel[sel>0])
s <- s[ - which(s == sel) ]
if (sel > 0) {
lik1 <- lik2[sel]
lik2 <- rep(lik1, p)
}
while ( sum(s > 0) > 0 ) {
M <- length(sela) + 1
for ( i in ind[s] ) {
fit2 <- ordinal::clmm( y ~ x[, c(sela, i)] + reps + (1|id), weights = wei )
lik2[i] <- BIC(fit2) + con * lchoose(p, M)
}
n.tests[vim + 1] <- n.tests[vim + 1] + length(ind[s])
stat <- lik1 - lik2
s <- which(stat > 0)
sel <- which.max(stat) * ( length(s)>0 )
sa <- c(sa, stat[sel])
sela <- c(sela, sel[sel>0])
s <- s[ - which(s == sel) ]
if (sel > 0) {
lik1 <- lik2[sel]
lik2 <- rep(lik1, p)
}
}
card <- c(card, sum(sela > 0) )
}
}
}
runtime <- proc.time() - runtime
len <- sum( sela > 0 )
if (len > 0) {
res <- cbind(sela[1:len], sa[1:len] )
info <- matrix(nrow = length(card), ncol = 2)
info[, 1] <- card
info[, 2] <- n.tests
} else {
res <- matrix(c(0, 0), ncol = 2)
info <- matrix(c(0, p), ncol = 2)
}
colnames(res) <- c("Vars", "eBIC difference")
rownames(info) <- paste("K=", 1:length(card)- 1, sep = "")
colnames(info) <- c("Number of vars", "Number of tests")
list(univ = univ, res = res, info = info, runtime = runtime)
}
|
setMethod('predict', signature(object='CirclesRange'),
function(object, x, ext=NULL, mask=FALSE, filename='', ...) {
if ( extends(class(x)[1], 'Raster')) {
if (! mask) {
x <- raster(x)
}
if (! is.null(ext)) {
x <- crop(x, ext)
}
if (mask) {
xx <- rasterize(object@polygons, raster(x), field=1, fun='sum', mask=FALSE, update=FALSE, updateValue="NA", getCover=FALSE, silent=TRUE, ...)
xx <- mask(xx, x, filename=filename, ...)
} else {
xx <- rasterize(object@polygons, raster(x), field=1, fun='sum', mask=FALSE, update=FALSE, updateValue="NA", getCover=FALSE, silent=TRUE, filename=filename, ...)
}
return(xx)
} else {
if (! inherits(x, 'SpatialPoints') ) {
x <- data.frame(x[,1:2])
colnames(x) <- c('x', 'y')
coordinates(x) <- ~ x + y
}
return( .pointsInPolygons(x, object@polygons, sum) )
}
}
)
|
visregLatticePlot <- function(v, partial, band, rug, whitespace, strip.names, top, line.par, fill.par, points.par, ...) {
x <- v$res[, v$meta$x]
y <- v$res$visregRes
b <- v$res[, v$meta$by]
xx <- v$fit[, v$meta$x]
yy <- v$fit$visregFit
bb <- v$fit[, v$meta$by]
lwr <- v$fit$visregLwr
upr <- v$fit$visregUpr
if (is.factor(bb)) {
b <- droplevels(b)
bb <- droplevels(bb)
}
xlim <- if (is.factor(xx)) c(0, 1) else range(xx)
if (partial) {
ylim <- range(c(y, lwr, upr), na.rm=TRUE)
} else {
ylim <- range(c(yy, lwr, upr), na.rm=TRUE)
}
pad <- 0.05*diff(ylim)
ylim[1] <- ylim[1]-pad
ylim[2] <- ylim[2]+pad
pad <- 0.04*diff(xlim)
xlim[1] <- xlim[1]-pad
xlim[2] <- xlim[2]+pad
ylab <- if (is.null(v$meta$yName)) paste("f(", v$meta$x, ")", sep="") else v$meta$yName
new.args <- list(...)
if (identical(strip.names, FALSE)) {
strip <- lattice::strip.custom(strip.names=FALSE, factor.levels=levels(as.factor(bb)), strip.levels=c(TRUE, TRUE), fg=lattice::trellis.par.get("strip.background")$col)
} else if (identical(strip.names, TRUE)) {
if (is.factor(v$fit[, v$meta$by])) {
strip <- lattice::strip.custom(strip.names=TRUE, strip.levels=c(TRUE, TRUE), var.name=v$meta$by)
} else {
strip <- lattice::strip.custom(strip.names=FALSE, factor.levels=paste(v$meta$by, abbrNum(bb), sep=": "), strip.levels=c(TRUE, TRUE), fg=lattice::trellis.par.get("strip.background")$col)
}
} else {
strip <- lattice::strip.custom(strip.names=FALSE, factor.levels=strip.names, strip.levels=c(TRUE, TRUE), fg=lattice::trellis.par.get("strip.background")$col)
}
lframe <- data.frame(fit=yy, lwr=lwr, upr=upr, xx=xx, by=bb)
lresids <- data.frame(r=y, x=x, by=b, pos=v$res$visregPos)
plot.args <- list(x=formula(lframe$fit~lframe$xx | lframe$by), type="l", ylim=ylim, xlab=v$meta$x, ylab=ylab, lframe=lframe, lresids=lresids, partial=partial, band=band, rug=rug, xlim=xlim, strip=strip, top=top, fill.par=fill.par)
if (length(new.args)) plot.args[names(new.args)] <- new.args
if (is.null(dev.list())) trellis.device()
opar <- lattice::trellis.par.get()
line.args <- list(lwd=3, col="
if (length(line.par)) line.args[names(line.par)] <- line.par
lattice::trellis.par.set(plot.line=line.args)
points.args <- list(cex=0.4, pch=19, col="gray50")
if (length(points.par)) points.args[names(points.par)] <- points.par
lattice::trellis.par.set(plot.symbol=points.args)
FUN <- getFromNamespace('xyplot', 'lattice')
if (is.factor(x)) {
K <- length(levels(x))
len <- K*(1-whitespace)+(K-1)*whitespace
scales <- list(x=list(at=((0:(K-1))/len+(1-whitespace)/(2*len)), labels=levels(x)))
if (is.null(plot.args$scales)) {
plot.args$scales <- scales
} else if (is.null(plot.args$scales$x)) {
plot.args$scales$x <- scales$x
} else {
plot.args$scales$x <- c(plot.args$scales$x, scales)
}
plot.args$panel <- visregFactorPanel
plot.args$w <- whitespace
tp <- do.call(FUN, plot.args)
plot(tp)
} else {
plot.args$panel <- visregPanel
tp <- do.call(FUN, plot.args)
plot(tp)
}
lattice::trellis.par.set(opar)
return(tp)
}
|
sort.by.cols<-function(x, cols=NULL, na.last = FALSE, decreasing = FALSE) {
if(length(dim(x))<2) {
stop("must provide a dataframe or matrix with more than one column")
}
if(length(cols)==0) {
cols<-seq(1:dim(x)[2])
}
col_arg<-NULL
for(col in cols) {
col_arg<-paste0(col_arg, paste0("x[,",col,"],"))
}
sort_string<-paste0("x[order(", col_arg, "na.last=",na.last,",decreasing=",decreasing,"),]" )
x.sorted<-eval(parse(text=sort_string))
x.sorted
}
|
get_expected_data_for_get_regional_data_tests_only_level_1_regions <- function() {
dates <- c("2020-01-31", "2020-02-01", "2020-02-02", "2020-02-03", "2020-02-04", "2020-02-05")
provinces <- c("Northland", "Eastland", "Southland", "Westland", "Virginia")
region_codes <- tibble::tibble(iso_3166_2 = c("NO", "EA", "SO", "WE", "VA"), region = provinces)
expected_data_for_provinces <- list()
for (i in 1:length(provinces)) {
province <- provinces[i]
dates_and_province <- data.frame(expand.grid(dates, province))
set.seed(417 + i)
count_data <- sort(sample(1:50, 30, replace = T))
set.seed(Sys.time())
count_data_frame <- data.frame(matrix(count_data, ncol = 5, byrow = TRUE))
set.seed(940 + i)
row_for_NA <- sample(1:6, 1, replace = FALSE)
set.seed(Sys.time())
count_data_frame[row_for_NA, ] <- 0
cumulative_counts_data_frame <- cumsum(count_data_frame)
expected_data_for_province <- cbind(dates_and_province, count_data_frame, cumulative_counts_data_frame)
colnames(expected_data_for_province) <- c(
"date", "province", "cases_new", "deaths_new", "recovered_new",
"hosp_new", "tested_new", "cases_total", "deaths_total",
"recovered_total", "hosp_total", "tested_total"
)
expected_data_for_province <- expected_data_for_province %>%
dplyr::select(
date, province, cases_new, cases_total, deaths_new, deaths_total,
recovered_new, recovered_total, hosp_new,
hosp_total, tested_new, tested_total
)
expected_data_for_province$tested_new <- as.numeric(NA_integer_)
expected_data_for_province$tested_total <- as.numeric(NA_integer_)
expected_data_for_province[row_for_NA, 3:12] <- as.numeric(NA_integer_)
expected_data_for_province <- expected_data_for_province %>% tidyr::fill(
cases_total,
deaths_total,
recovered_total,
hosp_total
)
expected_data_for_provinces[[i]] <- expected_data_for_province
}
expected_data <- suppressWarnings(dplyr::bind_rows(expected_data_for_provinces)) %>%
dplyr::mutate(
date = as.Date(date),
cases_new = as.numeric(cases_new),
cases_total = as.numeric(cases_total),
deaths_new = as.numeric(deaths_new),
deaths_total = as.numeric(deaths_total),
recovered_new = as.numeric(recovered_new),
recovered_total = as.numeric(recovered_total),
hosp_new = as.numeric(hosp_new),
hosp_total = as.numeric(hosp_total)
) %>%
dplyr::left_join(region_codes, by = c("province" = "region")) %>%
dplyr::select(
date, province, iso_3166_2, cases_new, cases_total, deaths_new,
deaths_total, recovered_new, recovered_total,
hosp_new, hosp_total, tested_new,
tested_total
) %>%
dplyr::arrange(date, province)
return(tibble::tibble(expected_data))
}
get_input_data_for_get_regional_data_tests_only_level_1_regions <- function() {
expected_data <- get_expected_data_for_get_regional_data_tests_only_level_1_regions()
input_data <- expected_data[, -c(3, 12, 13)]
input_data <- input_data[-which(rowSums(is.na(input_data)) > 0), ]
colnames(input_data)[2] <- "level_1_region"
return(input_data)
}
get_expected_totals_data_for_get_regional_data_tests_only_level_1_regions <- function() {
expected_data <- get_expected_data_for_get_regional_data_tests_only_level_1_regions()
totals_data <- expected_data[c(26:30), c(2, 3, 5, 7, 9, 11, 13)]
totals_data[, 7] <- rep(0, 5)
colnames(totals_data) <- c("province", "iso_3166_2", "cases_total", "deaths_total", "recovered_total", "hosp_total", "tested_total")
totals_data <- totals_data %>% dplyr::arrange(-cases_total)
return(tibble::tibble(totals_data))
}
get_input_data_for_get_regional_data_tests_with_level_2_regions <- function() {
data <- get_input_data_for_get_regional_data_tests_only_level_1_regions()
colnames(data)[2] <- "level_2_region"
regions_table <- tibble::tibble(
level_2_region = c("Northland", "Eastland", "Southland", "Westland", "Virginia"),
level_1_region = c("Oneland", "Oneland", "Twoland", "Twoland", "USA")
)
data <- data %>%
dplyr::left_join(regions_table, by = "level_2_region") %>%
dplyr::select(
date, level_2_region, level_1_region, cases_new, cases_total, deaths_new,
deaths_total, recovered_new, recovered_total,
hosp_new, hosp_total
)
return(data)
}
get_expected_data_for_get_regional_data_tests_with_level_2_regions <- function() {
data <- get_expected_data_for_get_regional_data_tests_only_level_1_regions()
data <- data[, -3]
data$region <- rep(c("Oneland", "Oneland", "Twoland", "USA", "Twoland"), 6)
region_codes <- tibble::tibble(
iso_3166_2 = c("ON", "TW", "US"),
region = c("Oneland", "Twoland", "USA")
)
level_2_region_codes <- tibble::tibble(
iso_3166_2_province = c("NO", "EA", "SO", "WE", "VA"),
region = c(
"Northland", "Eastland", "Southland",
"Westland", "Virginia"
)
)
data <- data %>%
dplyr::left_join(region_codes, by = "region") %>%
dplyr::left_join(level_2_region_codes, by = c("province" = "region")) %>%
dplyr::select(
date, province, iso_3166_2_province, region, iso_3166_2,
cases_new, cases_total, deaths_new, deaths_total,
recovered_new, recovered_total, hosp_new, hosp_total,
tested_new, tested_total
) %>%
dplyr::arrange(date, region, province)
return(data)
}
get_expected_totals_data_for_get_regional_data_tests_with_level_2_regions <- function() {
data <- get_expected_totals_data_for_get_regional_data_tests_only_level_1_regions()
data <- data[, -2]
data$region <- c("Oneland", "USA", "Twoland", "Twoland", "Oneland")
region_codes <- tibble::tibble(
iso_3166_2 = c("ON", "TW", "US"),
region = c("Oneland", "Twoland", "USA")
)
level_2_region_codes <- tibble::tibble(
iso_3166_2_province = c("NO", "EA", "SO", "WE", "VA"),
region = c(
"Northland", "Eastland", "Southland",
"Westland", "Virginia"
)
)
data <- data %>%
dplyr::left_join(region_codes, by = "region") %>%
dplyr::left_join(level_2_region_codes, by = c("province" = "region")) %>%
dplyr::select(
province, iso_3166_2_province, region, iso_3166_2, cases_total, deaths_total,
recovered_total, hosp_total, tested_total
)
return(tibble::tibble(data))
}
get_expected_data_for_fill_empty_dates_with_na_test <- function() {
dates <- c("2020-01-31", "2020-02-01", "2020-02-02", "2020-02-03")
regions <- c("Northland", "Eastland", "Wisconsin")
region_codes <- tibble::tibble(
region = regions,
level_1_region_code = c("NO", "EA", "WI")
)
expected_data <- data.frame(expand.grid(dates, regions))
colnames(expected_data) <- c("date", "level_1_region")
expected_data$date <- as.Date(expected_data$date)
expected_data$level_1_region <- as.character(expected_data$level_1_region)
expected_data <- expected_data %>%
dplyr::arrange(date, level_1_region) %>%
dplyr::left_join(region_codes, by = c("level_1_region" = "region"))
expected_data$cases <- c(1:5, rep(NA, 4), 10:12)
return(tibble::tibble(expected_data))
}
get_input_data_for_complete_cumulative_columns_test <- function() {
expected_data <- get_expected_data_for_fill_empty_dates_with_na_test()
partial_data <- expected_data[-c(6:9), ]
partial_data_with_cum_cases_na <- partial_data %>%
dplyr::group_by(level_1_region) %>%
dplyr::mutate(cases_total = cumsum(cases))
full_data_with_cum_cases_na <- covidregionaldata:::fill_empty_dates_with_na(partial_data_with_cum_cases_na)
return(full_data_with_cum_cases_na)
}
get_expected_data_for_complete_cumulative_columns_test <- function() {
expected_data <- get_expected_data_for_fill_empty_dates_with_na_test()
partial_data <- expected_data[-c(6:9), ]
full_data_with_cum_cases_filled <- covidregionaldata:::fill_empty_dates_with_na(partial_data)
full_data_with_cum_cases_filled <- dplyr::arrange(full_data_with_cum_cases_filled, level_1_region, date)
full_data_with_cum_cases_filled <- cbind(full_data_with_cum_cases_filled, as.integer(c(1, 5, 5, 15, 2, 7, 7, 18, 3, 3, 3, 15)))
colnames(full_data_with_cum_cases_filled)[5] <- "cases_total"
return(tibble::tibble(full_data_with_cum_cases_filled))
}
|
boxGrob <- function(label,
y = unit(.5, "npc"),
x = unit(.5, "npc"),
width,
height,
just = "center",
bjust = "center",
txt_gp = getOption("boxGrobTxt", default = gpar(color = "black",
cex = 1)),
box_gp = getOption("boxGrob", default = gpar(fill = "white")) ,
box_fn = roundrectGrob,
name = NULL) {
assert(
checkString(label),
checkNumeric(label),
is.language(label)
)
assert_unit(y)
assert_unit(x)
assert_unit(width)
assert_unit(height)
assert_just(just)
assert_just(bjust)
assert_class(txt_gp, "gpar")
assert_class(box_gp, "gpar")
x <- prAsUnit(x)
y <- prAsUnit(y)
txt_padding <- unit(4 * ifelse(is.null(txt_gp$cex), 1, txt_gp$cex), "mm")
txt <- textGrob(
label = label,
x = prGetX4Txt(just, txt_padding), y = .5,
just = just, gp = txt_gp,
name = "label"
)
if (missing(height)) {
height <- grobHeight(txt) + txt_padding + txt_padding
} else {
height <- prAsUnit(height)
}
if (missing(width)) {
width <- grobWidth(txt) + txt_padding + txt_padding
} else {
width <- prAsUnit(width)
}
vp_args <- list(
x = x,
y = y,
width = width,
height = height,
just = bjust
)
rect <- do.call(box_fn, list(x = .5, y = .5, gp = box_gp))
gl <- grobTree(
gList(
rect,
txt
),
vp = do.call(viewport, vp_args),
name = name,
cl = "box"
)
structure(gl,
coords = prCreateBoxCoordinates(viewport_data = vp_args),
viewport_data = vp_args
)
}
|
plotWriteFilenameToLaTexFile <-function (graphicFileName)
{
cat(paste0("\\includegraphics[width=0.95\\textwidth,height=0.63\\textwidth]{", graphicFileName, "}\n"))
}
|
addin_clearCache <- function() {
ui <- miniPage(
gadgetTitleBar("Clear recent cache entries"),
miniContentPanel(
textInput("cachePath", "Cache path:",
value = normalizePath(getOption("spades.cachePath", "."),
winslash = "/", mustWork = FALSE),
width = "100%"),
textInput("after", "After (default: Sys.time() - 60):",
value = Sys.time() - 60,
width = "100%")
)
)
server <- function(input, output, session) {
observeEvent(input$done, {
reproducible::clearCache(input$cachePath, after = input$after)
stopApp()
})
}
runGadget(ui, server, viewer = dialogViewer("Clear recent cache entries."))
}
|
meta.RiskD <-
function(data.mi, BB.grdnum=1000, B.sim=20000, cov.prob=0.95, midp=T, MH.imputation=F, print=T, studyCI=T)
{
n=length(data.mi[,1])
n1=data.mi[,3]; n2=data.mi[,4]
p1=data.mi[,1]/data.mi[,3]; p2=data.mi[,2]/data.mi[,4]
if(MH.imputation==T)
{id=(1:n)[p1*p2==0]
p1[id]=(data.mi[id,1]+0.5)/(data.mi[id,3]+1); p2[id]=(data.mi[id,2]+0.5)/(data.mi[id,4]+1)
n1[id]=data.mi[id,3]+1; n2[id]=data.mi[id,4]+1
}
deltap=p2-p1
varp=p1*(1-p1)/n1+p2*(1-p2)/n2
weight=(n1*n2/(n1+n2))/sum(n1*n2/(n1+n2))
mu.MH=sum(deltap*weight); sd.MH=sqrt(sum(weight^2*varp))
ci.MH=c(mu.MH-qnorm((1+cov.prob)/2)*sd.MH, mu.MH+qnorm((1+cov.prob)/2)*sd.MH)
p.MH=1-pchisq(mu.MH^2/sd.MH^2,1)
d0=max(abs(ci.MH))
delta.grd=seq(-min(1, 5*d0), min(1, d0*5),length=BB.grdnum-1); delta.grd=sort(c(0,delta.grd))
pv1.pool=pv2.pool=numeric(0)
for(kk in 1:n)
{ x1=data.mi[kk,1]
x2=data.mi[kk,2]
n1=data.mi[kk,3]
n2=data.mi[kk,4]
fit=priskD.exact(x1,x2,n1,n2, delta.grd, midp=midp)
pv1.pool=rbind(pv1.pool, fit$pv1); pv2.pool=rbind(pv2.pool, fit$pv2)
if(print==T) cat("study=", kk, "\n")
}
for(i in 1:n)
{ for(j in 1:BB.grdnum)
{ pv1.pool[i,(BB.grdnum-j+1)]=max(pv1.pool[i,1:(BB.grdnum-j+1)]);pv2.pool[i,j]=max(pv2.pool[i,j:BB.grdnum])
}
}
sigma0=1/data.mi[,3]+1/data.mi[,4]
set.seed(100)
tnull=matrix(0,B.sim,3)
y=matrix(runif(B.sim*n), n, B.sim)
y=y/(1+1e-2)
tnull[,1]=apply(-log(1-y)/sigma0, 2, sum)
tnull[,2]=apply(y/sigma0, 2, sum)
tnull[,3]=apply(asin(y)/sigma0, 2, sum)
alpha0=(1+cov.prob)/2;
cut=rep(0,3)
for(b in 1:3)
cut[b]=quantile(tnull[,b], 1-alpha0)
t1=t2=matrix(0,BB.grdnum,3)
pv1.pool=pv1.pool/(1+1e-2)
pv2.pool=pv2.pool/(1+1e-2)
t1[,1]=apply(-log(1-pv1.pool)/sigma0, 2, sum); t2[,1]=apply(-log(1-pv2.pool)/sigma0, 2, sum)
t1[,2]=apply(pv1.pool/sigma0, 2, sum); t2[,2]=apply(pv2.pool/sigma0, 2, sum)
t1[,3]=apply(asin(pv1.pool)/sigma0, 2, sum); t2[,3]=apply(asin(pv2.pool)/sigma0, 2, sum)
ci.fisher= c(min(delta.grd[t1[,1]>=cut[1]]),max(delta.grd[t2[,1]>=cut[1]]))
ci.cons= c(min(delta.grd[t1[,2]>=cut[2]]),max(delta.grd[t2[,2]>=cut[2]]))
ci.iv=c(min(delta.grd[t1[,3]>=cut[3]]),max(delta.grd[t2[,3]>=cut[3]]))
ci.MH=ci.MH
ci.range=c(min(delta.grd), max(delta.grd))
est.fisher=delta.grd[abs(t2[,1]-t1[,1])==min(abs(t2[,1]-t1[,1]))][1]
est.cons=delta.grd[abs(t2[,2]-t1[,2])==min(abs(t2[,2]-t1[,2]))][1]
est.iv=delta.grd[abs(t2[,3]-t1[,3])==min(abs(t2[,3]-t1[,3]))][1]
est.MH=mu.MH
est.range=NA
n0=(BB.grdnum+1)/2
c1=t1[n0,]; c2=t2[n0,]
p.fisher= min(1, 2*min(c(1-mean(tnull[,1]>=c1[1]), 1-mean(tnull[,1]>=c2[1]))))
p.cons= min(1, 2*min(c(1-mean(tnull[,2]>=c1[2]), 1-mean(tnull[,2]>=c2[2]))))
p.iv=min(1, 2*min(c(1-mean(tnull[,3]>=c1[3]), 1-mean(tnull[,3]>=c2[3]))))
pvalue=c(p.cons, p.iv, p.fisher, p.MH, NA)
ci=cbind(ci.cons, ci.iv, ci.fisher,ci.MH, ci.range)
ci=rbind(c(est.cons, est.iv, est.fisher, est.MH, est.range), ci, pvalue)
rownames(ci)=c("est", "lower CI", "upper CI", "p")
colnames(ci)=c("constant", "inverse-variance", "fisher", "asymptotical-MH", " range")
study.ci=NULL
if(studyCI==T)
{n=length(data.mi[,1])
study.ci=matrix(0, n, 5)
colnames(study.ci)=c("est", "lower CI", "upper CI", "p", "limit")
rownames(study.ci)=1:n
for(kk in 1:n)
{xx1=data.mi[kk,1]
xx2=data.mi[kk,2]
nn1=data.mi[kk,3]
nn2=data.mi[kk,4]
fit=ci.RiskD(xx1, xx2, nn1, nn2, cov.prob=cov.prob, BB.grdnum=BB.grdnum, midp=midp)
study.ci[kk,2]=fit$lower
study.ci[kk,3]=fit$upper
study.ci[kk,1]=fit$est
study.ci[kk,4]=fit$p
study.ci[kk,5]=fit$status
rownames(study.ci)[kk]=paste("study ", kk)
}
}
return(list(ci.fixed=ci, study.ci=study.ci, precision=paste("+/-", (max(delta.grd)-min(delta.grd))/BB.grdnum)))
}
|
library(purrr)
library(readr)
urls <- list(
example = "http://example.org",
asdf = "http://asdfasdasdkfjlda"
)
map(urls, read_lines)
safe_readlines <- safely(readLines)
safe_readlines
html <- map(urls, safe_readlines)
str(html)
html[["example"]][["result"]]
html[["asdf"]][["error"]]
str(transpose(html))
res <- transpose(html)[["result"]]
errs <- transpose(html)[["error"]]
|
library(knitr)
opts_chunk$set(
comment = "",
fig.width = 12,
message = FALSE,
warning = FALSE,
tidy.opts = list(
keep.blank.line = TRUE,
width.cutoff = 150
),
options(width = 150),
eval = TRUE
)
library("survminer")
library("survival")
fit<- survfit(Surv(time, status) ~ sex, data = lung)
ggsurvplot(fit, data = lung)
ggsurvplot(fit, data = lung,
title = "Survival curves", subtitle = "Based on Kaplan-Meier estimates",
caption = "created with survminer",
font.title = c(16, "bold", "darkblue"),
font.subtitle = c(15, "bold.italic", "purple"),
font.caption = c(14, "plain", "orange"),
font.x = c(14, "bold.italic", "red"),
font.y = c(14, "bold.italic", "darkred"),
font.tickslab = c(12, "plain", "darkgreen"))
ggsurvplot(fit, data = lung, risk.table = TRUE)
ggsurvplot(fit, data = lung,
title = "Survival curves", subtitle = "Based on Kaplan-Meier estimates",
caption = "created with survminer",
font.title = c(16, "bold", "darkblue"),
font.subtitle = c(15, "bold.italic", "purple"),
font.caption = c(14, "plain", "orange"),
font.x = c(14, "bold.italic", "red"),
font.y = c(14, "bold.italic", "darkred"),
font.tickslab = c(12, "plain", "darkgreen"),
risk.table = TRUE,
risk.table.title = "Note the risk set sizes",
risk.table.subtitle = "and remember about censoring.",
risk.table.caption = "source code: website.com",
risk.table.height = 0.45)
ggsurvplot(fit, data = lung, risk.table = TRUE, ncensor.plot = TRUE)
ggsurvplot(fit, data = lung,
title = "Survival curves", subtitle = "Based on Kaplan-Meier estimates",
caption = "created with survminer",
font.title = c(16, "bold", "darkblue"),
font.subtitle = c(15, "bold.italic", "purple"),
font.caption = c(14, "plain", "orange"),
font.x = c(14, "bold.italic", "red"),
font.y = c(14, "bold.italic", "darkred"),
font.tickslab = c(12, "plain", "darkgreen"),
risk.table = TRUE,
risk.table.title = "Note the risk set sizes",
risk.table.subtitle = "and remember about censoring.",
risk.table.caption = "source code: website.com",
risk.table.height = 0.35,
ncensor.plot = TRUE,
ncensor.plot.title = "Number of censorings",
ncensor.plot.subtitle = "over the time.",
ncensor.plot.caption = "data available at data.com",
ncensor.plot.height = 0.35)
|
is.fdiscd.misclass <-
function(x)
{
return(is(x, "fdiscd.misclass"))
}
|
fixDuplicates <- function(data, idVar="id", infusionDoseTimeVar=NULL, infusionDoseVar=NULL, moveBolus=FALSE, bolusDoseTimeVar=NULL, bolusDoseVar=NULL) {
colnames <- names(data)
if(!is.null(idVar) && !(idVar %in% colnames)) stop(sprintf("column %s does not exist", idVar))
if(!is.null(infusionDoseTimeVar) && !(infusionDoseTimeVar %in% colnames)) stop(sprintf("column %s does not exist", infusionDoseTimeVar))
if(!is.null(infusionDoseVar) && !(infusionDoseVar %in% colnames)) stop(sprintf("column %s does not exist", infusionDoseVar))
idcol <- match(idVar, colnames)
idtv <- match(infusionDoseTimeVar, colnames)
idv <- match(infusionDoseVar, colnames)
rtcol <- match(sprintf("%s.real", infusionDoseTimeVar), colnames)
if(is.na(rtcol)) stop("real time column is not present")
bdtv <- which(bolusDoseTimeVar == colnames)
bdv <- which(bolusDoseVar == colnames)
if(moveBolus && (!length(bdtv) || !length(bdv))) moveBolus <- FALSE
data[is.na(data[,idtv]) & !is.na(data[,idv]), idv] <- NA
real.time <- parse_dates(data[,rtcol])
inf.format <- guessDateFormat(data[,idtv])
infuse.time <- parse_dates(data[,idtv])
bolus.time <- NULL
if(length(bdtv)) bolus.time <- parse_dates(data[,bdtv])
data$tobolus <- 0
data$change <- 0
if(!all.equal(seq(nrow(data)), order(data[,idcol]))) stop(sprintf("data set should be ordered by %s", idVar))
duprows <- which(unlist(tapply(infuse.time, data[,idcol], FUN=function(i) c(FALSE, diff(i)==0))))
duprows <- duprows[!is.na(data[duprows, idtv])]
for(i in duprows) {
pair <- i-1
while(is.na(data[pair,idtv]) || as.character(data[i,idtv]) != as.character(data[pair,idtv])) {
pair <- pair-1
}
solved <- FALSE
if(lubridate::minute(real.time[pair]) != 0) {
index <- pair-1
while(index > 1 && is.na(data[index,idtv]) && data[index,idcol] == data[pair,idcol]) {
index <- index-1
}
tmp <- infuse.time[pair] - lubridate::dhours(1)
if(index == 0 || data[index,idcol] != data[pair,idcol] || tmp > infuse.time[index]) {
data[pair, idtv] <- format(tmp, inf.format)
solved <- TRUE
}
}
if(!solved && lubridate::minute(real.time[i]) != 0) {
index <- i+1
while(is.na(data[index,idtv]) && data[index,idcol] == data[i,idcol]) {
index <- index+1
}
tmp <- infuse.time[i] + lubridate::dhours(1)
if(data[index,idcol] != data[i,idcol] || infuse.time[index] > tmp) {
data[i, idtv] <- format(tmp, inf.format)
solved <- TRUE
}
}
if(!solved && !is.null(bolus.time)) {
if(lubridate::minute(real.time[pair]) != 0 && data$tobolus[pair] == 0) {
j <- pair
} else {
j <- i
}
if(all(is.na(data[j,c(bdtv,bdv)]))) {
if(moveBolus) {
data[j,bdtv] <- data[j,rtcol]
data[j,bdv] <- data[j,idv]
data[j,rtcol] <- NA
data[j,idtv] <- NA
data[j,idv] <- NA
}
data$tobolus[j] <- 1
}
}
}
if(!is.null(bolus.time)) {
duprows <- which(unlist(tapply(bolus.time, data[,idcol], FUN=function(i) c(FALSE, diff(i)==0))))
duprows <- duprows[!is.na(data[duprows, bdtv])]
if(length(duprows)) {
vals <- apply(data[, c(idcol, bdtv)], MARGIN=1, paste, collapse='|')
for(i in duprows) {
init.ix <- match(vals[i], vals)
data[init.ix, bdv] <- data[init.ix, bdv] + data[i, bdv]
}
data[duprows, c(bdtv, bdv)] <- c(NA, NA)
data[duprows, 'tobolus'] <- NA
}
}
if(any(is.na(data$tobolus))) data <- data[!is.na(data$tobolus),]
data
}
|
rrse = function(truth, response, na_value = NaN, ...) {
assert_regr(truth, response = response, na_value = na_value)
v = var(truth)
if (v < TOL) {
return(na_value)
}
sqrt(sum(se(truth, response)) / (v * (length(truth) - 1L)))
}
add_measure(rrse, "Root Relative Squared Error", "regr", 0, Inf, TRUE)
|
fit.nls <-
function (lambda, p.names, estimates)
{
extrapolation <- list()
lambda0 <- c(0, max(lambda) / 2, max(lambda))
for (d in p.names) {
quad <- lm(estimates[, d] ~ lambda + I(lambda^2))
a.nls <- predict(quad,
newdata = data.frame(lambda = lambda0))
gamma.est.3 <- ((a.nls[2] - a.nls[3]) * lambda0[3] *
(lambda0[2] - lambda0[1]) - lambda0[1] *
(a.nls[1] - a.nls[2]) * (lambda0[3] - lambda0[2])) /
((a.nls[1] - a.nls[2]) * (lambda0[3] - lambda0[2]) -
(a.nls[2] - a.nls[3]) * (lambda0[2] - lambda0[1]))
gamma.est.2 <- ((a.nls[2] - a.nls[3]) * (gamma.est.3 + lambda0[2]) *
(gamma.est.3 + lambda0[3])) / (lambda0[3] - lambda0[2])
gamma.est.1 <- a.nls[1] - (gamma.est.2 / (gamma.est.3 + lambda0[1]))
extrapolation[[d]] <-
nls(estimates[, d] ~ gamma.1 + gamma.2 / (gamma.3 + lambda),
start = list(gamma.1 = gamma.est.1, gamma.2 = gamma.est.2,
gamma.3 = gamma.est.3))
}
return(extrapolation)
}
|
.tableAggregateload <- function(opts){
Mode <- NULL
linkTable <- try(data.table::fread(system.file("/format_output/tableOutput.csv", package = "antaresEditObject")),
silent = TRUE)
linkTable$progNam <- linkTable$Stats
linkTable$progNam[which(linkTable$progNam == "values")] <- "EXP"
if(opts$mode == "Economy") {
linkTable <- linkTable[Mode == "economy"]
}else{
linkTable <- linkTable[Mode != "economy"]
}
}
.updateData <- function(opts, dta, linkTable){
.N <- Name <- progNam <- NULL
idC <- antaresRead::getIdCols(dta)
calcC <- names(dta)[!names(dta)%in%idC]
idC <- idC[idC != "mcYear"]
dtamean <- dta[, lapply(.SD, function(X){(mean(X))}), .SDcols = calcC, by = idC]
dtamin <- dta[, lapply(.SD, function(X){(min(X))}), .SDcols = calcC, by = idC]
dtamax <- dta[, lapply(.SD, function(X){(max(X))}), .SDcols = calcC, by = idC]
dtastd <- dta[, lapply(.SD, function(X){(sd(X)/(.N*2+1)*(.N*2))}), .SDcols = calcC, by = idC]
outputData <- unique(dta[, .SD, .SDcols = idC])
for(i in calcC){
keep <- linkTable[Name == i]
if("EXP" %in% keep$progNam){
outputData[[i]] <- round(dtamean[[i]],keep[progNam == "EXP"]$digits)
}
if("std" %in% keep$progNam){
outputData[[paste0(i, "_std")]] <- round(dtastd[[i]],keep[progNam == "std"]$digits)
}
if("min" %in% keep$progNam){
outputData[[paste0(i, "_min")]] <- round(dtamin[[i]],keep[progNam == "min"]$digits)
}
if("max" %in% keep$progNam){
outputData[[paste0(i, "_max")]] <- round(dtamax[[i]],keep[progNam == "max"]$digits)
}
}
attributes(outputData)$synthesis <- TRUE
outputData
}
|
plot.surv_variable_response_explainer <- function(x, ..., split = "model"){
y <- color <- NULL
df <- data.frame(x)
dfl <- list(...)
if (length(dfl) > 0) {
for (resp in dfl) {
class(resp) <- "data.frame"
df <- rbind(df, resp)
}
}
if (is.numeric(df$value) & length(unique(df$value))>=4) {
df$value <- cut(df$value, quantile(df$value, prob = seq(0, 1, length.out = 6)), include.lowest = TRUE)
df <- aggregate(y~., data = df, mean)
}
if (split == "variable") {
add_facet <- facet_wrap(~value, ncol = 1)
df$color <- factor(df$label)
legend <- "model"
} else {
add_facet <- facet_wrap(~label, ncol = 1)
df$color <- factor(df$value)
legend <- x$var[1]
}
ggplot(df, aes(x, y, color = color)) +
geom_step() +
labs(title = paste0("Partial Dependency Plot of variable ", df$var[1]),
x = "time",
y = "mean survival probability",
col = legend) +
add_facet +
theme_mi2()+
scale_y_continuous(breaks = seq(0,1,0.1),
limits = c(0,1),
labels = paste(seq(0,100,10),"%"),
name = "survival probability")
}
|
setMethod(
f="OutFlux_by_PoolName",
signature=c(
func='function'
,sourceName='character'
)
,def=function(func,sourceName){
new(
'OutFlux_by_PoolName'
,sourceName=PoolName(sourceName)
,func=func
)
}
)
setMethod(
f="by_PoolIndex"
,signature=signature(
obj='OutFlux_by_PoolName'
,poolNames='character'
,timeSymbol='character'
)
,definition=function(obj,poolNames,timeSymbol){
fl_by_index<-new(
"OutFlux_by_PoolIndex"
,sourceIndex=PoolIndex(
obj@sourceName
,poolNames=poolNames
)
,func=by_PoolIndex(
obj@func
,poolNames=poolNames
,timeSymbol=timeSymbol
)
)
fl_by_index
}
)
|
context("aging of one portfolio of guaratee contracts")
library(vamc)
histFundScen <- genFundScen(fundMap, histIdxScen)
test_that("test for correctness of aging a portfolio of guarantee contracts", {
skip_on_cran()
tmp <- tempfile()
expect_equal_to_reference(agePortfolio(VAPort, mortTable, histFundScen,
histDates, dT = 1 / 12, "2014-01-01",
cForwardCurve), tmp)
})
|
filterEvaluator <- function(filter, params=list()){
if(!is.list(params)){
stop('The filter method parameters are not in a list.')
}
algorithms <- list('binaryConsistency', 'chiSquared', 'cramed', 'determinationCoefficient', 'fscore', 'gainRatio', 'giniIndex', 'IEConsistency', 'IEPConsistency', 'Jd', 'MDLC', 'mutualInformation', 'roughsetConsistency', 'relief', 'ReliefFeatureSetMeasure', 'symmetricalUncertain')
switch(filter,
'binaryConsistency'={
do.call(binaryConsistency,params)
},
'chiSquared'={
do.call(chiSquared,params)
},
'cramer'={
do.call(cramer,params)
},
'determinationCoefficient'={
do.call(determinationCoefficient,params)
},
'gainRatio'={
do.call(gainRatio,params)
},
'giniIndex'={
do.call(giniIndex,params)
},
'IEConsistency'={
do.call(IEConsistency,params)
},
'IEPConsistency'={
do.call(IEPConsistency,params)
},
'Jd'={
do.call(Jd,params)
},
'MDLC'={
do.call(MDLC,params)
},
'mutualInformation'={
do.call(mutualInformation,params)
},
'roughsetConsistency'={
do.call(roughsetConsistency,params)
},
'relief'={
do.call(relief,params)
},
'ReliefFeatureSetMeasure'={
do.call(ReliefFeatureSetMeasure,params)
},
'symmetricalUncertain'={
do.call(symmetricalUncertain,params)
},
{
stop('The filter method passed as a parameter is not supported by the package.')
}
)
}
attr(filterEvaluator,'shortName') <- "filterGenerator"
attr(filterEvaluator,'name') <- "Filter Measure Generator"
attr(filterEvaluator,'methods') <- c('binaryConsistency', 'chiSquared', 'cramer', 'determinationCoefficient', 'fscore', 'gainRatio', 'giniIndex', 'IEConsistency', 'IEPConsistency', 'Jd', 'MDLC', 'mutualInformation', 'roughsetConsistency', 'relief', 'RFSM', 'symmetricalUncertain')
|
library(crs)
set.seed(24)
n <- 10000
x <- runif(n,-1,1)
x.seq <- seq(min(x),max(x),length=100)
sd.x <- .1+abs(x)
sd.x.seq <- .1+abs(x.seq)
y <- rnorm(n,mean=x,sd=sd.x)
taus <- c(0.05,0.1,.25,0.5,.75,.9,0.95)
j.opt <- numeric()
s.opt <- numeric()
plot(x,y,cex=.25,col="lightgrey")
for(t in seq(along=taus)) {
model.rq <- crs(y~x,tau=taus[t],cv.threshold=0,nmulti=2)
lines(x.seq,predict(model.rq,newdata=data.frame(x=x.seq)),col=t,lty=t,lwd=2)
lines(x.seq,qnorm(taus[t],mean=x.seq,sd=sd.x.seq),lty=2)
j.opt[t] <- model.rq$degree
s.opt[t] <- model.rq$segments
}
legend(min(x),max(y),paste("tau=",taus,", d=",j.opt,", s=",s.opt,sep=""),
lty=1:length(taus),
col=1:length(taus),
lwd=rep(2,length(taus)),
bty="n")
|
createFormula <-
function(x,right=TRUE){
f<-'.~.'
if (length(x)==1){
if (right)
f<-paste('.',x,sep='~')
else
f<-paste(x,'.',sep='~')
} else if (length(x)>1){
half_length<-length(x)%/%2
if (right)
f<-paste(
paste(tail(x,half_length),collapse='+'),
paste(head(x,length(x)-half_length),
collapse='+')
,sep='~'
)
else
f<-paste(
paste(head(x,length(x)-half_length),collapse='+'),
paste(tail(x,half_length),
collapse='+')
,sep='~'
)
}
as.formula(f)
}
|
generate.jackknife.blocks<-function(x,nsnp.per.bjack.block,verbose=TRUE){
[email protected][,2]
det.idx.per.chr=matrix(unlist(by(1:x@nsnp,[email protected][,1],range)),ncol=2,byrow=T)
if(nrow(det.idx.per.chr)==0){
stop("Exit function: No chr/contigs available (information on SNP contig name might not have been provided)")
}
tmp.diff.pos=by(tmp.pos,[email protected][,1],diff,na.rm=T)
det.idx.per.chr=cbind(det.idx.per.chr,
det.idx.per.chr[,2]-det.idx.per.chr[,1]+1,
tmp.pos[det.idx.per.chr[,2]]-tmp.pos[det.idx.per.chr[,1]],
matrix(unlist(lapply(tmp.diff.pos,f<-function(x){if(length(x)>0){return(range(x))}else{return(rep(NA,2))}})),ncol=2,byrow=T))
colnames(det.idx.per.chr)=c("snp_idx1","snp_idx2","nsnps","size","min_intersnp_dist","max_intersnp_dist")
tmp=sum(det.idx.per.chr[,5]<0,na.rm=T)
if(tmp>0){
warning(paste(tmp,"contigs with unordered SNPs positions (these will be discarded)\n"))
}
ctg.sel=det.idx.per.chr[,3]>nsnp.per.bjack.block & det.idx.per.chr[,5]>=0
if(sum(ctg.sel)==0){
stop("Exit function: No contig available after applying filtering steps (e.g., try lowering nsnp.per.bjack.block)\n")
}
det.idx.per.chr=det.idx.per.chr[ctg.sel,]
if(sum(ctg.sel)==1){
block.start=seq(det.idx.per.chr[1],det.idx.per.chr[2]-nsnp.per.bjack.block+1,nsnp.per.bjack.block)
}else{
block.start=as.vector(unlist(apply(det.idx.per.chr[,1:2],1,f<-function(x){return(seq(x[1],x[2]-nsnp.per.bjack.block+1,nsnp.per.bjack.block))})))
}
nblocks=length(block.start)
if(nblocks<=1){
stop("Exit function: <=1 block available\n")
}
blocks.det=cbind(block.start,block.start+nsnp.per.bjack.block-1)
blocks.det=cbind(blocks.det,tmp.pos[blocks.det[,2]]-tmp.pos[blocks.det[,1]])
colnames(blocks.det)=c("idx.start","idx.end","size")
snp.block.id=rep(NA,x@nsnp)
for(i in 1:nblocks){snp.block.id[blocks.det[i,1]:blocks.det[i,2]]=i}
if(verbose){
cat(nblocks,"Jackknife blocks identified with",sum(!is.na(snp.block.id)),"SNPs (out of",x@nsnp,").\n SNPs map to",nrow(det.idx.per.chr),"different chrom/scaffolds\n")
cat("Average (min-max) Block Sizes:",round(mean(blocks.det[,3]*1e-6),3),"(",round(min(blocks.det[,3]*1e-6),3),"-",round(max(blocks.det[,3]*1e-6),3),") Mb\n")
}
return(list(blocks.det=blocks.det,snp.block.id=snp.block.id,nblocks=nblocks,nsnps=sum(!is.na(snp.block.id))))
}
|
source("ESEUR_config.r")
library("boot")
mean_diff=function()
{
s_ind=rnorm(len_est_2)
expert=c(est_2$expert[s_ind < 0], est_2$planning.poker[s_ind >= 0])
s_ind=rnorm(len_est_2)
poker=c(est_2$expert[s_ind < 0], est_2$planning.poker[s_ind >= 0])
return(mean(expert)-mean(poker))
}
est_1=read.csv(paste0(ESEUR_dir, "group-compare/16_1.csv.xz"), as.is=TRUE)
est_2=read.csv(paste0(ESEUR_dir, "group-compare/16_2.csv.xz"), as.is=TRUE)
est_mean_diff=abs(mean(est_2$expert)-mean(est_2$planning.poker))
len_est_2=nrow(est_2)
t=replicate(4999, mean_diff())
plot(density(t))
mean(t)
sd(t)
100*length(which(abs(t) >= est_mean_diff))/(1+length(t))
|
check_ssn <- function(path, predictions = NULL) {
out <- TRUE
message("Checking necessary files...")
if(file.exists(file.path(path, "edges.shp"))) {
message("\tedges.shp...OK")
} else {
out <- out & FALSE
message("\tedges.shp...FAIL!")
}
if(file.exists(file.path(path, "sites.shp"))) {
message("\tsites.shp...OK")
} else {
out <- out & FALSE
message("\tsites.shp...FAIL!")
}
if(!is.null(predictions)){
for(i in 1:length(predictions)){
if(file.exists(file.path(path, predictions[i], ".shp"))) {
message(paste0("\t", predictions[i], ".shp...OK"))
} else {
out <- out & FALSE
message(paste0("\t", predictions[i], ".shp...FAIL!"))
}
}
}
edges <- readOGR(path, "edges", verbose = FALSE)
netIDs <- unique(edges$netID)
bin_files <- list.files(path, pattern = "*.dat")
if (all(bin_files %in% paste0("netID", netIDs, ".dat"))) {
message("\tbinary files...OK")
} else {
out <- out & FALSE
message("\tbinary files...FAIL!")
}
message("Checking edges.shp...")
obl_cols <- c("rid", "netID", "OBJECTID", "upDist", "Length", "H2OArea", "rcaArea")
if (all(obl_cols %in% names(edges@data))) {
message("\tColumns...OK")
} else {
out <- out & FALSE
message("\tColumns...FAIL!
\tMissing columns: ", obl_cols[!obl_cols %in% names(edges@data)])
}
if (length(unique(edges$rid)) == nrow(edges)) {
message("\tUnique rids...OK")
} else {
out <- out & FALSE
message("\tUnique rids...FAIL!")
}
if (max(edges$rid) >= nrow(edges) - 1) {
message("\tMax rid...OK")
} else {
out <- out & FALSE
message("\tMax rid...FAIL!")
}
if (!any(is.na(edges$netID))) {
message("\tnetID...OK")
} else {
out <- out & FALSE
message("\tnetID > 0...FAIL!")
}
ssn <- importSSN(path)
ssn <- additive.function(ssn, "H2OArea", "afv_computed")
r_afv <- range(ssn@data$afv_computed)
if (all(r_afv >= 0 & r_afv <= 1)) {
message("\tadditive function value range...OK")
} else {
out <- out & FALSE
message("\tadditive function value range...FAIL!")
}
message("Checking sites.shp...")
sites <- readOGR(path, "sites", verbose = FALSE)
obl_cols <- c("rid", "pid", "locID", "netID", "upDist")
obl_cols2 <- "H2OArea"
if (all(obl_cols %in% names(sites@data)) & any(grepl(obl_cols2, names(sites@data)))) {
message("\tColumns...OK")
} else {
out <- out & FALSE
message("\tColumns...FAIL!
\tMissing columns: ", obl_cols[!obl_cols %in% names(sites@data)])
}
if(any(sites$ratio > 1) | any(sites$ratio < 0)) {
out <- out & FALSE
message("\tratio > 0 and < 1 ...FAIL!")
} else {
message("\tratio...OK")
}
if (!any(is.na(sites$netID))) {
message("\tnetID...OK")
} else {
out <- out & FALSE
message("\tnetID > 0...FAIL!")
}
if (!any(is.na(sites$rid))) {
message("\trid...OK")
} else {
out <- out & FALSE
message("\trid > 0...FAIL!")
}
if(!is.null(predictions)){
message("Checking Prediction sites...")
for(i in 1:length(predictions)){
preds <- readOGR(path, predictions[i], verbose = FALSE)
obl_cols <- c("rid", "pid", "locID", "netID", "upDist", "H2OArea")
if (all(obl_cols %in% names(preds@data))) {
message("\tColumns...OK")
} else {
out <- out & FALSE
message("\tColumns...FAIL!
\tMissing columns: ", obl_cols[!obl_cols %in% names(preds@data)])
}
if (any(preds$ratio > 1) | any (preds$ratio < 0)) {
message("\tratio...OK")
} else {
out <- out & FALSE
message("\tratio > 0 and < 1 ...FAIL!")
}
if (!any(is.na(preds$netID))) {
message("\tnetID...OK")
} else {
out <- out & FALSE
message("\tnetID > 0...FAIL!")
}
if (!any(is.na(preds$rid))) {
message("\trid...OK")
} else {
out <- out & FALSE
message("\trid > 0...FAIL!")
}
}
}
message("Checking Binary files...")
if (all(as.numeric(gsub("netID(.*)\\.dat", "\\1", bin_files)) %in% unique(edges@data$netID)) &
all(unique(edges@data$netID) %in% as.numeric(gsub("netID(.*)\\.dat", "\\1", bin_files)))) {
message("\tBinary files...OK")
} else {
out <- out & FALSE
message("\tBinary files...FAIL!")
}
netids <- as.numeric(gsub("netID(.*)\\.dat", "\\1", bin_files))
rid_ok <- TRUE
for(i in netids){
rids_bin <- utils::read.table(file.path(path, paste0("netID", i, ".dat")), header = T, sep = ",")[,1]
rids_edge <- edges$rid[edges$netID == i]
if(! all(rids_bin %in% rids_bin) & all(rids_edge %in% rids_bin)){
rid_ok <- rid_ok & FALSE
}
}
if(rid_ok){
message("\trids in binary files...OK")
} else {
out <- out & FALSE
message("\trids in binary files...FAIL!")
}
return(out)
}
|
NULL
distr_impl <- function(pdqr_class, impl_funs, x, type, ...) {
assert_missing(x, "numeric vector or appropriate data frame")
assert_missing(type, 'pdqr type ("discrete" or "continuous")')
assert_pdqr_type(type)
x_tbl <- impute_x_tbl(x, type, ...)
rm(list = "x", envir = environment())
res <- switch(
type,
discrete = impl_funs[["discrete"]](x_tbl),
continuous = impl_funs[["continuous"]](x_tbl)
)
add_pdqr_class(res, pdqr_class)
}
add_pdqr_class <- function(f, pdqr_class) {
add_class(f, c(pdqr_class, "pdqr"))
}
unpdqr <- function(f) {
class(f) <- setdiff(class(f), c("p", "d", "q", "r", "pdqr"))
f
}
filter_numbers <- function(x) {
x_is_nan <- is.nan(x)
if (any(x_is_nan)) {
warning_collapse("`x` has `NaN`s, which are removed.")
}
x <- x[!x_is_nan]
x_is_na <- is.na(x)
if (any(x_is_na)) {
warning_collapse("`x` has `NA`s, which are removed.")
}
x <- x[!x_is_na]
x_is_inf <- is.infinite(x)
if (any(x_is_inf)) {
warning_collapse("`x` has infinite values, which are removed.")
}
x[!x_is_inf]
}
is_pdqr_fun <- function(f) {
enable_asserting_locally()
tryCatch(assert_pdqr_fun(f), error = function(e) {
FALSE
})
}
is_pdqr_type <- function(type) {
enable_asserting_locally()
tryCatch(assert_pdqr_type(type), error = function(e) {
FALSE
})
}
is_support <- function(supp, allow_na = FALSE) {
enable_asserting_locally()
tryCatch(assert_support(supp, allow_na), error = function(e) {
FALSE
})
}
is_x_tbl <- function(x, type) {
enable_asserting_locally()
tryCatch(assert_x_tbl(x, type), error = function(e) {
FALSE
})
}
is_x_tbl_meta <- function(x, type) {
tryCatch(assert_x_tbl_meta(x, type), error = function(e) {
FALSE
})
}
is_pdqr_class <- function(chr) {
chr %in% c("p", "d", "q", "r")
}
is_boolean_pdqr_fun <- function(f) {
(meta_type(f) == "discrete") && identical(meta_x_tbl(f)[["x"]], c(0, 1))
}
has_meta_type <- function(f) {
has_meta(f, "type") && is_pdqr_type(meta_type(f))
}
has_meta_support <- function(f) {
has_meta(f, "support") && is_support(meta_support(f))
}
has_meta_x_tbl <- function(f, type) {
has_meta(f, "x_tbl") && is_x_tbl(meta_x_tbl(f), type) &&
is_x_tbl_meta(meta_x_tbl(f), type)
}
density_piecelin <- function(x, ...) {
dens <- stats::density(x, ...)
x_dens <- dens[["x"]]
y_dens <- dens[["y"]]
tot_integral <- trapez_integral(x_dens, y_dens)
data.frame(x = x_dens, y = y_dens / tot_integral)
}
trapez_integral <- function(x, y) {
sum(trapez_piece_integral(x, y))
}
trapez_piece_integral <- function(x, y) {
n <- length(y)
(x[-1] - x[-n]) * (y[-1] + y[-n]) / 2
}
trapez_part_integral <- function(x, y) {
n <- length(y)
c(0, cumsum((x[-1] - x[-n]) * (y[-1] + y[-n])) / 2)
}
compute_piecelin_density_coeffs <- function(x_tbl, ind_vec) {
n <- length(ind_vec)
slope <- numeric(n)
intercept <- numeric(n)
x <- x_tbl[["x"]]
y <- x_tbl[["y"]]
ind_is_in <- (ind_vec >= 1) & (ind_vec < length(x))
inds_in <- ind_vec[ind_is_in]
slope[ind_is_in] <- (y[inds_in + 1] - y[inds_in]) /
(x[inds_in + 1] - x[inds_in])
intercept[ind_is_in] <- y[inds_in] - slope[ind_is_in] * x[inds_in]
list(slope = slope, intercept = intercept)
}
|
NULL
plot.tbl_regression <- function(x,
remove_header_rows = TRUE,
remove_reference_rows = FALSE, ...) {
assert_package("GGally", fn = "plot.tbl_regression()")
df_coefs <- x$table_body
if (isTRUE(remove_header_rows)) {
df_coefs <- df_coefs %>% filter(!.data$header_row %in% TRUE)
}
if (isTRUE(remove_reference_rows)) {
df_coefs <- df_coefs %>% filter(!.data$reference_row %in% TRUE)
}
df_coefs %>%
GGally::ggcoef_plot(exponentiate = x$inputs$exponentiate, ...)
}
plot.tbl_uvregression <- plot.tbl_regression
|
context("getGenotypes")
test_that("getGenotypes recognizes no file and wrong file arguments", {
expect_error(getGenotypes(), "\"fileName\" is missing, with no default")
})
test_that("getGenotypes recognizes and opens Excel files.", {
pedExcel <-
suppressWarnings(getGenotypes(
fileName = system.file("testdata", "qcPed.xlsx",
package =
"nprcgenekeepr")
))
expect_equal(nrow(pedExcel), 280)
})
test_that(
paste0(
"getGenotypes recognizes and opens CSV files with default ",
"comma separator."
),
{
pedCsv <-
getGenotypes(fileName = system.file("testdata", "qcPed.csv",
package =
"nprcgenekeepr"))
expect_equal(nrow(pedCsv), 280)
}
)
test_that(
paste0(
"getGenotypes recognizes and opens CSV files with specified ",
"comma separator."
),
{
pedCsv2 <-
getGenotypes(
fileName = system.file("testdata", "qcPed.csv",
package =
"nprcgenekeepr"),
sep = ","
)
expect_equal(nrow(pedCsv2), 280)
}
)
test_that(
paste0(
"getGenotypes recognizes and opens .txt files with specified ",
"tab separator."
),
{
pedTxt <-
getGenotypes(
fileName = system.file("testdata", "qcPed.txt",
package =
"nprcgenekeepr"),
sep = "\t"
)
expect_equal(nrow(pedTxt), 280)
}
)
|
IsoGenem <- function(x, y){
y <- as.matrix(y)
if(dim(y)[[1]]==1){
IsoGene1(x = x, y = y)
} else {
ordx <- order(x)
x <- x[ordx]
y <- y[, ordx]
unx <- unique(x)
ydf <- as.data.frame(t(y))
y.m <- do.call("cbind", unclass(by(ydf, x, colMeans)))
y.m.tot <- matrix(rep(rowMeans(y), length(x)), ncol = length(x))
n.p <- table(x)
n.g <- length(n.p)
y.is.u <- t(apply(y.m, 1, function(x) pava(x, w = n.p)) )
y.is.d <- t(apply(y.m, 1, function(x) pava(x, w = n.p, decreasing=TRUE)))
rep.iso.d <- y.is.d[, rep(1:length(n.p),n.p)]
rep.iso.u <- y.is.u[, rep(1:length(n.p),n.p)]
y.m.all <- y.m[, rep(1:length(n.p), n.p)]
SST0 <- rowSums((y - rowMeans(y))^2)
SSIS.u1 <- rowSums((rep.iso.u - y)^2)
SSIS.d1 <- rowSums((rep.iso.d - y)^2)
SST <- rowSums((y - y.m.all)^2)
direction=NULL
direction <- ifelse(SSIS.u1 <= SSIS.d1, "u", "d")
lambda1.up <- SSIS.u1 / SST0
Esquare.up <- 1 - lambda1.up
iso.u <- y.is.u
w.up <- (y.is.u[,n.g]-y.m[,1])/sqrt(rowSums((y-y.m.all)^2)/(sum(n.p)-n.g)*(1/n.p[1]+1/n.p[n.g]))
w.c.up <- (y.is.u[,n.g]-y.is.u[,1])/sqrt(rowSums((y-y.m.all)^2)/(sum(n.p)-n.g)*(1/n.p[1]+1/n.p[n.g]))
m.up <- (y.is.u[,n.g] - y.is.u[,1]) / sqrt(SSIS.u1/(sum(n.p)-n.g))
i.up <- (y.is.u[,n.g] - y.is.u[,1]) /
sqrt(SSIS.u1/(sum(n.p) - apply(y.is.u, 1, function(x) length(unique(x)))))
lambda1.dn <- SSIS.d1 / SST0
Esquare.dn <- 1 - lambda1.dn
iso.u <- y.is.d
n.pSum <- sum(n.p)
w.dn <- (y.is.d[,n.g]-y.m[,1])/sqrt(rowSums((y-y.m.all)^2)/(sum(n.p)-n.g)*(1/n.p[1]+1/n.p[n.g]))
w.c.dn <- (y.is.d[,n.g]-y.is.d[,1])/sqrt(rowSums((y-y.m.all)^2)/(sum(n.p)-n.g)*(1/n.p[1]+1/n.p[n.g]))
m.dn <- (y.is.d[,n.g]-y.is.d[,1]) / sqrt(SSIS.d1/(n.pSum-n.g))
i.dn <- (y.is.d[,n.g]-y.is.d[,1]) /
sqrt(SSIS.d1/(sum(n.p) - apply(y.is.d, 1, function(x) length(unique(x)))))
res <- list(E2.up = Esquare.up,
Williams.up = as.numeric(w.up),
Marcus.up = as.numeric(w.c.up),
M.up = as.numeric(m.up),
ModM.up = as.numeric(i.up),
E2.dn = Esquare.dn,
Williams.dn = as.numeric(w.dn),
Marcus.dn = as.numeric(w.c.dn),
M.dn = as.numeric(m.dn),
ModM.dn = as.numeric(i.dn),
direction = direction)
return(res)
}
}
|
.onLoad <- function(libname, pkgname) {
if (ps::ps_is_supported()) {
ps::ps_handle()
bt <- ps::ps_boot_time()
.Call(c_processx__set_boot_time, bt)
}
supervisor_reset()
if (Sys.getenv("DEBUGME", "") != "" &&
requireNamespace("debugme", quietly = TRUE)) {
debugme::debugme()
}
err$onload_hook()
}
.onUnload <- function(libpath) {
rethrow_call(c_processx__unload_cleanup)
supervisor_reset()
}
|
bootSummary2=function(out, per100=TRUE) {
out2=as.matrix(out)
if(per100) {out2=as.matrix(out)*(100/3)}
summ=apply(out2,2,summary)
return(summ)
}
|
library("testthat")
library("arules")
context("associations")
set.seed(20070611)
m <- matrix(as.integer(runif(100000) > 0.8), ncol = 20)
dimnames(m) <- list(NULL, paste("item", c(1:20), sep = ""))
t <- as(m, "transactions")
expect_identical(dim(t), dim(m))
r <-
apriori(t,
parameter = list(supp = 0.01, conf = 0.1),
control = list(verb = FALSE))
ss <- subset(r, subset = lift > 1.4 & lhs %in% "item3")
expect_identical(labels(lhs(ss)), "{item2,item3}")
expect_true(quality(ss)$lift > 1.4)
f <- eclat(t,
parameter = list(supp = 0.01),
control = list(verb = FALSE))
ss <- subset(f, subset = items %in% "item7")
expect_identical(labels(ss), grep("item7", labels(ss), value = TRUE))
lmat <- matrix(rbind(c(1, 1, 0), c(0, 0, 1)), ncol = 3)
rmat <- matrix(rbind(c(1, 0, 0), c(0, 1, 0)), ncol = 3)
colnames(lmat) <- c("a", "b", "c")
colnames(rmat) <- c("c", "a", "b")
lhs <- as(lmat, "itemMatrix")
rhs <- as(rmat, "itemMatrix")
is <-
new("itemsets", items = lhs, quality = data.frame(support = c(.1, .1)))
expect_equal(labels(is), c("{a,b}", "{c}"))
qual <-
data.frame(
support = c(.5, .5),
confidence = c(.5, .5),
lift = c(2, 1)
)
expect_warning(r <- rules(lhs, rhs, quality = qual))
context("subsetting")
take_r <- sample(nrow(t), 10)
take_c <- sample(ncol(t), 10)
expect_equal(dim(sub_n <- t[take_r, take_c]), c(10L, 10L))
expect_equal(dim(t[take_r]), c(10L, ncol(t)))
expect_equal(dim(t[take_r, ]), c(10L, ncol(t)))
expect_equal(dim(t[, take_c]), c(nrow(t), 10L))
take_rb <- rep(FALSE, nrow(t))
take_rb[take_r] <- TRUE
take_cb <- rep(FALSE, ncol(t))
take_cb[take_c] <- TRUE
expect_equal(dim(sub_b <- t[take_rb, take_cb]), c(10L, 10L))
expect_equal(dim(t[take_rb, ]), c(10L, ncol(t)))
expect_equal(dim(t[take_rb]), c(10L, ncol(t)))
expect_equal(dim(t[, take_cb]), c(nrow(t), 10L))
expect_true(setequal(sub_b, recode(sub_n, sub_b)))
take_cc <- itemLabels(t)[take_c]
expect_equal(dim(t[, take_cc]), c(nrow(t), 10L))
expect_warning(expect_equal(dim(t[NA, NA]), c(0L, 0L)))
expect_warning(expect_equal(dim(t[NA]), c(0L, ncol(t))))
expect_warning(expect_equal(dim(t[, NA]), c(nrow(t), 0L)))
take_rn <- take_r
take_rn[3:4] <- NA
take_cn <- take_c
take_cn[3:4] <- NA
expect_warning(expect_equal(dim(t[take_rn, take_cn]), c(8L, 8L)))
take_rbn <- take_rb
take_rbn[which(take_rbn)[3:4]] <- NA
take_cbn <- take_cb
take_cbn[which(take_cbn)[3:4]] <- NA
expect_warning(expect_equal(dim(t[take_rbn, take_cbn]), c(8L, 8L)))
take_ccn <- take_cc
take_ccn[3:4] <- NA
expect_warning(expect_equal(dim(t[, take_cbn]), c(nrow(t), 8L)))
r <-
apriori(t,
parameter = list(supp = 0.01, conf = 0.1),
control = list(verb = FALSE))
expect_warning(expect_equal(length(r[NA]), 0L))
expect_warning(expect_equal(length(r[c(1L, NA_integer_)]), 1L))
expect_warning(expect_equal(length(r[c(TRUE, NA, FALSE)]),
sum(rep(
c(TRUE, NA, FALSE), length.out = length(r)
), na.rm = TRUE)))
f <- eclat(t,
parameter = list(supp = 0.01),
control = list(verb = FALSE))
expect_warning(expect_equal(length(f[NA]), 0L))
expect_warning(expect_equal(length(f[c(1L, NA_integer_)]), 1L))
expect_warning(expect_equal(length(f[c(TRUE, NA, FALSE)]),
sum(rep(
c(TRUE, NA, FALSE), length.out = length(f)
), na.rm = TRUE)))
expect_identical(r[1:5], head(r, n = 5))
expect_identical(r[1:6], head(r))
expect_identical(r[1:2], head(r[1:2]))
expect_identical(r[0], head(r, n = 0))
expect_identical(r[1:(length(r) - 10)], head(r, n = -10))
expect_identical(r[1:length(r)], head(r, n = length(r)))
expect_identical(r[1:length(r)], head(r, n = length(r) + 100L))
expect_identical(sort(r, by = "lift")[1:5], head(r, n = 5, by = "lift"))
expect_identical(sort(r, by = "lift", decreasing = FALSE)[1:5],
head(
r,
n = 5,
by = "lift",
decreasing = FALSE
))
expect_identical(head(r[0]), r[0])
expect_identical(r[tail(1:length(r), n = 5)], tail(r, n = 5))
expect_identical(r[1:2], tail(r[1:2]))
expect_identical(r[0], tail(r, n = 0))
expect_identical(sort(r, by = "lift")[tail(1:length(r), n = 5)], tail(r, n = 5, by = "lift"))
expect_identical(tail(r[0]), r[0])
expect_identical(match(r[2:10], r), 2:10)
expect_identical(r[2:10] %in% r, 2:10)
expect_identical(match(f[2:10], f), 2:10)
expect_identical(f[2:10] %in% f, 2:10)
expect_identical(rhs(r[1:10]) %pin% "item1",
c(FALSE, FALSE, TRUE, TRUE, TRUE,
TRUE, TRUE, FALSE, FALSE, TRUE))
expect_error(rhs(r[1:10]) %pin% "")
expect_warning(rhs(r[1:10]) %pin% c("1", "2"))
|
context("pin_test_file")
load(file = "test_pin.Rdata")
test_that(desc="class in df",{
test_pins$pin <- as.pin(test_pins$pin_raw)
expect_is(test_pins$pin, "pin")
})
test_that(desc="Frequencies in test file: pin_sex",{
expect_equal(as.numeric(table(pin_sex(test_pins$pin))), c(9299,9328))
})
test_that(desc="Frequencies in test file: pin_age",{
expect_equal(as.numeric(table(pin_age(test_pins$pin, "2015-01-01")))[c(1,2,10,50,99)],
c(363,374,364,26,365))
})
test_that(desc="Frequencies in test file: pin_ctrl",{
expect_equal(as.numeric(table(pin_ctrl(test_pins$pin))), 18627)
})
test_that(desc="Frequencies in test file: pin_coordn",{
expect_equal(as.numeric(table(pin_coordn(test_pins$pin))), 18627)
})
test_that(desc="Frequencies in test file: pin_birthplace",{
expect_equal(as.numeric(table(pin_birthplace(test_pins$pin)))[c(1,2,10,27:28)], c(5,3,46,7724,9105))
})
test_that(desc="Frequencies in test file: pin_to_date",{
expect_equal(as.character(min(unique(pin_to_date(test_pins$pin)))), "1890-01-01")
expect_equal(as.character(max(unique(pin_to_date(test_pins$pin)))), "2014-12-31")
})
|
star_schema_as_tibble_list <-
function(st,
include_role_playing = FALSE) {
UseMethod("star_schema_as_tibble_list")
}
star_schema_as_tibble_list.star_schema <-
function(st,
include_role_playing = FALSE) {
star_schema_as_tl(st, include_role_playing = include_role_playing)
}
star_schema_as_tl <-
function(st,
tl_prev = NULL,
commondim = NULL,
include_role_playing) {
UseMethod("star_schema_as_tl")
}
star_schema_as_tl.star_schema <-
function(st,
tl_prev = NULL,
commondim = NULL,
include_role_playing) {
names_prev <- names(tl_prev)
tl <- c(tl_prev, list(tibble::as_tibble(st$fact[[1]])))
names <- c(names_prev, attr(st$fact[[1]], "name"))
dim <- get_all_dimensions(st)
for (d in seq_along(dim)) {
name_dim <- attr(dim[[d]], "name")
if (!(name_dim %in% commondim)) {
tl <- c(tl, list(tibble::as_tibble(dim[[d]])))
names <- c(names, name_dim)
}
}
if (include_role_playing) {
rp_names <- get_name_of_role_playing_dimensions(st)
for (d in rp_names) {
if (!(d %in% names)) {
tl <- c(tl, list(tibble::as_tibble(st$dimension[[d]])))
names <- c(names, d)
}
}
}
names(tl) <- names
tl
}
get_name_of_role_playing_dimensions <- function(st) {
res <- c()
names <- names(st$dimension)
for (n in names) {
if (is_role_playing_dimension(st$dimension[[n]])) {
res <- c(res, n)
}
}
res
}
|
gof.trial <- function(model, breaks=NULL, nc=NULL){
width <- model$meta.data$width
left <- model$meta.data$left
xmat <- model$mr$mr$data
data <- eval(model$data)
data <- data[data$observer==1&data$object %in%
as.numeric(names(model$fitted)),]
n <- dim(xmat)[1]
xmat$omega <- rep(1,dim(xmat)[1])
xmat$omega[xmat$timesdetected==2] <- 2
if(is.null(nc)){
nc <- round(sqrt(min(length(xmat$distance[xmat$observer==1 &
xmat$detected==1]),
length(xmat$distance[xmat$observer==1 &
xmat$timesdetected==2]))), 0)
}
if(is.null(breaks)){
breaks <- left + ((width-left)/nc)*(0:nc)
}else{
nc <- length(breaks)-1
}
xmat$detected <- 1
p1 <- predict(model$mr, xmat, compute=TRUE, integrate=FALSE)$fitted
p.omega <- data.frame(object = rep(1:n, 2),
omega = c(rep(1, n), rep(2, n)),
distance = rep(xmat$distance, 2),
prob = rep(0, 2*n))
p.omega$prob[p.omega$omega==1] <- (1-p1)
p.omega$prob[p.omega$omega==2] <- p1
expected.2 <- by(p.omega$prob,
list(as.factor(p.omega$omega),
cut(p.omega$distance, breaks, include.lowest=TRUE)),
sum, na.rm=TRUE)
expected.1 <- rep(0,nc)
for(j in 1:nc){
expected.1[j] <- sum(predict(model, compute=TRUE,
int.range=matrix(c(breaks[j],breaks[j+1]),
nrow=1))$fitted/
model$fitted, na.rm=TRUE)
}
observed.count.1 <- table(cut(data$distance, breaks, include.lowest=TRUE))
observed.count.2 <- table(as.factor(xmat$omega),
cut(xmat$distance,breaks, include.lowest=TRUE))
chisq.1 <- sum((observed.count.1-expected.1)^2/expected.1, na.rm=TRUE)
chisq.2 <- sum((observed.count.2-expected.2)^2/expected.2, na.rm=TRUE)
df.1 <- nc-1-length(model$ds$ds$par)
if(df.1<=0){
df.1 <- NA
p.1 <- NA
}else{
p.1 <- 1-pchisq(chisq.1,df.1)
}
df.2 <- nc-length(model$mr$par)
if(df.2<=0){
df.2 <- NA
p.2 <- NA
}else{
p.2 <- 1-pchisq(chisq.2,df.2)
}
return(list(chi1=list(observed=observed.count.1,
expected=expected.1,
chisq=chisq.1,
p=p.1,
df=df.1),
chi2=list(observed=observed.count.2,
expected=expected.2[1:2,],
chisq=chisq.2,
p=p.2,
df=df.2),
pooled.chi=list(chisq=chisq.1+chisq.2,
df=2*nc-length(model$par)-1,
p=1-pchisq(chisq.1+chisq.2,
2*nc-length(model$par)-1))))
}
|
knitr::opts_chunk$set(
collapse = TRUE,
comment = "
warning = FALSE,
message = FALSE,
fig.align = "center",
fig.path = "../man/figures/",
cache.path = "../inst/cache/")
knitr::include_graphics("../man/figures/hier.png")
knitr::include_graphics("../man/figures/mts.png")
knitr::include_graphics("../man/figures/mts2.png")
knitr::include_graphics("../man/figures/mts3.png")
knitr::include_graphics("../man/figures/mts4.png")
knitr::include_graphics("../man/figures/mts6.png")
knitr::include_graphics("../man/figures/mts12.png")
knitr::include_graphics("../man/figures/mts_base.png")
knitr::include_graphics("../man/figures/mts2_base.png")
knitr::include_graphics("../man/figures/mts3_base.png")
knitr::include_graphics("../man/figures/mts4_base.png")
knitr::include_graphics("../man/figures/mts6_base.png")
knitr::include_graphics("../man/figures/mts12_base.png")
knitr::include_graphics("../man/figures/ite.png")
knitr::include_graphics("../man/figures/mts_base_rec.png")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.