code
stringlengths
1
13.8M
conditional_means.instrumental_forest <- function(object, ...) { tau.hat <- predict(object, ...)$predictions Y.hat.0 <- object$Y.hat - object$W.hat * tau.hat Y.hat.1 <- object$Y.hat + (1 - object$W.hat) * tau.hat cbind("control" = Y.hat.0, "treated" = Y.hat.1) } double_robust_scores.instrumental_forest <- function(object, compliance.score = NULL, ...) { if (is.null(compliance.score)) { compliance.forest <- grf::causal_forest( X = object$X.orig, Y = object$W.orig, W = object$Z.orig, Y.hat = object$W.hat, W.hat = object$Z.hat ) compliance.score <- predict(compliance.forest)$predictions } Z.orig <- object$Z.orig Z.hat <- object$Z.hat Y.orig <- object$Y.orig Y.hat <- object$Y.hat W.orig <- object$W.orig W.hat <- object$W.hat tau.hat <- predict(object)$predictions g.hat <- 1 / compliance.score * (Z.orig - Z.hat) / (Z.hat * (1 - Z.hat)) dr.correction <- g.hat * (Y.orig - Y.hat - (W.orig - W.hat) * tau.hat) gamma <- tau.hat + dr.correction cbind("control" = -gamma, "treated" = gamma) }
summary.rmlr <- function(object, ...){ if (object$Penalization==0) cat("Multinomial Logistic Regression\n") else{ cat("Ridge Multinomial Logistic Regression\n") cat("Penalization : ", object$Penalization,"\n\n")} cat("\nSummary of the reponse\n") print(object$itab) cat("\n Model Fit :\n") fit=matrix(c(object$Deviance, object$Difference, object$df , object$p), 1,4) rownames(fit)="Final Model" colnames(fit)=c(" -2 log Lik ", "Difference (Null)", " df", "P-value") print(fit) cat("\n\n") Ps=rbind(object$CoxSnell, object$Nagelkerke, object$MacFaden) rownames(Ps)=c("CoxSnell", "Nagelkerke", "MacFaden") colnames(Ps)="Pseudo R-Squared" print(Ps) cat("\nOther fit indices (Information criteria)\n") cat("AIC: ", object$AIC,"\n") cat("BIC: ", object$BIC,"\n") nlevels=dim(object$beta)[1] lablevel=rownames(object$beta) cat("\nParameter estimates :\n") for (i in 1:nlevels){ cat("Response level : ", lablevel[i],"\n") inf=cbind(object$beta[i,], object$stderr[i,], (object$beta[i,]/object$stderr[i,])^2, 1 - pchisq((object$beta[i,]/object$stderr[i,])^2, df = 1), exp(object$beta[i,]), exp(object$beta[i,]-1.96*object$stderr[i,]), exp(object$beta[i,]+1.96*object$stderr[i,])) colnames(inf)=c(" Beta", " Std. Error", " Wald", " p-value", "exp(Beta)", "CI : Lower", "CI : Upper") print(round(inf, digits=4))} cat("\nClassification :\n") cat("\nTable :\n") print(object$Table) cat("\nPercentages :\n") percent=round((diag(object$Table)/object$itab[,1])*100, digits=2) print(percent) cat("\nGlobal Percentage :", object$PercentCorrect,"\n") }
context("df_to_x3p") dftest2 <- dftest[sample(1:42, size = 38), c(1, 2, 4)] test_that("df_to_x3p works as expected", { suppressWarnings( expect_error( df_to_x3p(dftest[, 1:3]), "!is.null.dframe\\$value. is not TRUE" ) ) expect_silent(tmp <- df_to_x3p(dftest[, c(1, 2, 4, 5)])) expect_equivalent( tmp$surface.matrix, matrix(dplyr::arrange(dftest, desc(y))$value, byrow = F, nrow = 6 ) ) expect_equivalent(tmp$header.info, list(sizeX = 6, sizeY = 7, incrementX = 1, incrementY = 1)) expect_message( x3ptest2 <- df_to_x3p(dftest2), "dframe has missing values ... they will be expanded" ) }) test_that("x3p_to_df works as expected", { expect_silent(tmp <- x3p_to_df(x3ptest_mask)) expect_equivalent( tmp %>% dplyr::mutate(x = x + 1, y = y + 1) %>% dplyr::arrange(y, x) %>% dplyr::select(-annotation), dftest[, c(1, 2, 4, 5)] ) x3ptest2 <- x3ptest x3ptest2$header.info <- list(sizeX = NULL, sizeY = NULL, incrementX = NULL, incrementY = NULL) expect_warning(tmp2 <- x3p_to_df(x3ptest2)) expect_equivalent(sort(tmp2$value), sort(dftest$value)) x3ptest2 <- x3ptest x3ptest2$header.info <- list( sizeX = NULL, sizeY = NULL, incrementX = NULL, incrementY = NULL, num_obs_per_profile = 6, num_profiles = 7, profile_inc = 1, obs_inc = 1 ) expect_silent(tmp3 <- x3p_to_df(x3ptest2)) expect_equivalent(sort(tmp3$value), sort(dftest$value)) })
get_parameters.zeroinfl <- function(x, component = c("all", "conditional", "zi", "zero_inflated"), ...) { component <- match.arg(component) .return_zeroinf_parms(x, component) } get_parameters.hurdle <- get_parameters.zeroinfl get_parameters.zerotrunc <- get_parameters.default get_parameters.zcpglm <- function(x, component = c("all", "conditional", "zi", "zero_inflated"), ...) { component <- match.arg(component) cf <- stats::coef(x) cond <- data.frame( Parameter = names(cf$tweedie), Estimate = unname(cf$tweedie), Component = "conditional", stringsAsFactors = FALSE, row.names = NULL ) zi <- data.frame( Parameter = names(cf$zero), Estimate = unname(cf$zero), Component = "zero_inflated", stringsAsFactors = FALSE, row.names = NULL ) pars <- switch(component, all = rbind(cond, zi), conditional = cond, zi = , zero_inflated = zi ) if (component != "all") { pars <- .remove_column(pars, "Component") } .remove_backticks_from_parameter_names(pars) } .return_zeroinf_parms <- function(x, component) { cf <- stats::coef(x) conditional <- grepl("^count_", names(cf), perl = TRUE) zero_inflated <- grepl("^zero_", names(cf), perl = TRUE) cond <- data.frame( Parameter = names(cf)[conditional], Estimate = unname(cf)[conditional], Component = "conditional", stringsAsFactors = FALSE, row.names = NULL ) zi <- data.frame( Parameter = names(cf)[zero_inflated], Estimate = unname(cf)[zero_inflated], Component = "zero_inflated", stringsAsFactors = FALSE, row.names = NULL ) pars <- switch(component, all = rbind(cond, zi), conditional = cond, zi = , zero_inflated = zi ) if (component != "all") { pars <- .remove_column(pars, "Component") } .remove_backticks_from_parameter_names(pars) } get_parameters.mhurdle <- function(x, component = c("all", "conditional", "zi", "zero_inflated", "infrequent_purchase", "ip", "auxiliary"), ...) { component <- match.arg(component) cf <- stats::coef(x) cond_pars <- which(grepl("^h2\\.", names(cf))) zi_pars <- which(grepl("^h1\\.", names(cf))) ip_pars <- which(grepl("^h3\\.", names(cf))) aux_pars <- (1:length(names(cf)))[-c(cond_pars, zi_pars, ip_pars)] if (length(cond_pars)) { cond_dat <- data.frame( Parameter = names(cf)[cond_pars], Estimate = unname(cf[cond_pars]), Component = "conditional", stringsAsFactors = FALSE, row.names = NULL ) } else { cond_dat <- NULL } if (length(zi_pars)) { zi_dat <- data.frame( Parameter = names(cf)[zi_pars], Estimate = unname(cf[zi_pars]), Component = "zero_inflated", stringsAsFactors = FALSE, row.names = NULL ) } else { zi_dat <- NULL } if (length(ip_pars)) { ip_dat <- data.frame( Parameter = names(cf)[ip_pars], Estimate = unname(cf[ip_pars]), Component = "infrequent_purchase", stringsAsFactors = FALSE, row.names = NULL ) } else { ip_dat <- NULL } if (length(aux_pars)) { aux_dat <- data.frame( Parameter = names(cf)[aux_pars], Estimate = unname(cf[aux_pars]), Component = "auxiliary", stringsAsFactors = FALSE, row.names = NULL ) } else { aux_dat <- NULL } pars <- rbind(cond_dat, zi_dat, ip_dat, aux_dat) pars <- .filter_component(pars, component) if (component != "all") { pars <- .remove_column(pars, "Component") } .remove_backticks_from_parameter_names(pars) }
frm_fb_sample_imputed_values_eval_likelihood <- function(mm, model_results, ind0, dat_vv, aggregation=FALSE, dat=NULL, ind_miss_vv=NULL, sampling_level_vv=NULL, use_sampling_level_vv=FALSE ) { ind_mm <- ind0[[mm]] mod <- model_results[[mm]] use_variable_level_mm <- ind_mm$use_variable_level model <- mod if (use_sampling_level_vv){ aggregation <- TRUE } if (aggregation){ dat_sel <- dat dat_sel[ ind_miss_vv, colnames(dat_vv) ] <- dat_vv } else { dat_sel <- dat_vv } y <- dat_sel[, ind_mm$dv_vars ] case <- dat_sel$case design_matrix <- dat_sel args <- list(model=model, y=y, case=case, design_matrix=design_matrix ) if (ind_mm$R_density_fct=="frm_linreg_density"){ args$use_in_frm_fb <- TRUE } dmod <- do.call( what=ind_mm$R_density_fct, args=args ) like <- dmod$like if (use_variable_level_mm){ variable_level <- ind_mm$variable_level idcluster <- dat_sel[, variable_level] freq_cluster <- rowsum( 1+0*idcluster, idcluster) freq_cluster <- freq_cluster[ match( idcluster, rownames(freq_cluster) ), 1] like <- like^( 1 / freq_cluster ) } if (aggregation & ( ! use_sampling_level_vv ) ){ like <- like[ ind_miss_vv ] } return(like) }
knitr::opts_chunk$set( fig.height = 4.5, fig.width = 4.5, fig.align = 'center', collapse = TRUE, comment = " ) library(sensemakr) data("darfur") darfur.model <- lm(peacefactor ~ directlyharmed + village + female + age + farmer_dar + herder_dar + pastvoted + hhsize_darfur, data = darfur) stargazer::stargazer(darfur.model, keep = "directlyharmed", type = "text") darfur.sensitivity <- sensemakr(model = darfur.model, treatment = "directlyharmed", benchmark_covariates = "female", kd = 1:3, ky = 1:3, q = 1, alpha = 0.05, reduce = TRUE) darfur.sensitivity <- sensemakr(model = darfur.model, treatment = "directlyharmed", benchmark_covariates = "female", kd = 1:3) darfur.sensitivity ovb_minimal_reporting(darfur.sensitivity, format = "html") summary(darfur.sensitivity) plot(darfur.sensitivity) plot(darfur.sensitivity, sensitivity.of = "t-value") plot(darfur.sensitivity, type = "extreme")
mean_tmle <- function(ftime, ftype, trt, t0 = max(ftime[ftype > 0]), adjustVars = NULL, SL.ftime = NULL, SL.ctime = NULL, SL.trt = NULL, glm.ftime = NULL, glm.ctime = NULL, glm.trt = "1", glm.family = "binomial", returnIC = TRUE, returnModels = FALSE, ftypeOfInterest = unique(ftype[ftype != 0]), trtOfInterest = unique(trt), bounds = NULL, verbose = FALSE, Gcomp = FALSE, gtol = 1e-3, ...) { n <- length(ftime) id <- seq_len(n) dat <- data.frame(id = id, ftime = ftime, ftype = ftype, trt = trt) if (!is.null(adjustVars)) { dat <- cbind(dat, adjustVars) } nJ <- length(ftypeOfInterest) allJ <- sort(unique(ftype[ftype != 0])) ofInterestJ <- sort(ftypeOfInterest) ntrt <- length(trtOfInterest) uniqtrt <- sort(trtOfInterest) trtOut <- estimateTreatment( dat = dat, ntrt = ntrt, uniqtrt = uniqtrt, adjustVars = adjustVars, SL.trt = SL.trt, glm.trt = glm.trt, returnModels = returnModels, gtol = gtol ) dat <- trtOut$dat trtMod <- trtOut$trtMod dataList <- makeDataList( dat = dat, J = allJ, ntrt = ntrt, uniqtrt = uniqtrt, t0 = t0, bounds = bounds ) censOut <- estimateCensoring( dataList = dataList, ntrt = ntrt, uniqtrt = uniqtrt, t0 = t0, verbose = verbose, adjustVars = adjustVars, SL.ctime = SL.ctime, glm.ctime = glm.ctime, glm.family = glm.family, returnModels = returnModels, gtol = gtol ) dataList <- censOut$dataList ctimeMod <- censOut$ctimeMod wideDataList <- makeWideDataList( dat = dat, dataList = dataList, adjustVars = adjustVars, t0 = t0, allJ = allJ, ntrt = ntrt, uniqtrt = uniqtrt ) timeAndType <- expand.grid(rev(seq_len(t0)), ofInterestJ) ftimeMod <- vector(mode = "list", length = length(ofInterestJ)) names(ftimeMod) <- paste0("J", ofInterestJ) for (j in seq_along(ofInterestJ)) { ftimeMod[[j]] <- vector(mode = "list", length = t0) names(ftimeMod[[j]]) <- paste0("t", seq_len(t0)) } for (i in seq_len(nrow(timeAndType))) { estOut <- estimateIteratedMean( wideDataList = wideDataList, t = timeAndType[i, 1], whichJ = timeAndType[i, 2], ntrt = ntrt, uniqtrt = uniqtrt, allJ = allJ, t0 = t0, SL.ftime = SL.ftime, adjustVars = adjustVars, glm.ftime = glm.ftime, verbose = verbose, returnModels = returnModels, bounds = bounds ) wideDataList <- estOut$wideDataList eval(parse(text = paste0( "ftimeMod$J", timeAndType[i, 2], "$t", timeAndType[i, 1], "<-estOut$ftimeMod" ))) wideDataList <- fluctuateIteratedMean( wideDataList = wideDataList, t = timeAndType[i, 1], whichJ = timeAndType[i, 2], ntrt = ntrt, uniqtrt = uniqtrt, allJ = allJ, t0 = t0, SL.ftime = SL.ftime, glm.ftime = glm.ftime, returnModels = returnModels, bounds = bounds, Gcomp = Gcomp ) } est <- rowNames <- NULL for (j in ofInterestJ) { for (z in seq_along(uniqtrt)) { thisEst <- eval(parse(text = paste( "mean(wideDataList[[", z + 1, "]]$Q", j, "star.1)", sep = "" ))) est <- rbind(est, thisEst) rowNames <- c(rowNames, paste(c(uniqtrt[z], j), collapse = " ")) eval(parse(text = paste( "wideDataList[[1]]$Q", j, "star.0.Z", uniqtrt[z], " <- rep(thisEst,n)", sep = "" ))) eval(parse(text = paste( "wideDataList[[1]]$Q", j, "star.1.Z", uniqtrt[z], " <- wideDataList[[(z+1)]]$Q", j, "star.1", sep = "" ))) } } row.names(est) <- rowNames for (j in ofInterestJ) { for (z in seq_along(uniqtrt)) { for (t in rev(seq_len(t0))) { outcomeName <- ifelse(t == t0, paste("N", j, ".", t0, sep = ""), paste("Q", j, "star.", t + 1, sep = "") ) eval(parse(text = paste( "wideDataList[[1]]$D.Z", uniqtrt[z], ".", j, "star.", t, " <- wideDataList[[1]]$H", uniqtrt[z], ".", t, "*(wideDataList[[1]][,outcomeName] - wideDataList[[1]]$Q", j, "star.", t, ")", sep = "" ))) } eval(parse(text = paste( "wideDataList[[1]]$D.Z", uniqtrt[z], ".", j, "star.0 <- wideDataList[[1]]$Q", j, "star.1.Z", uniqtrt[z], " - wideDataList[[1]]$Q", j, "star.0.Z", uniqtrt[z], sep = "" ))) ind <- eval(parse(text = paste( "grep('D.Z", uniqtrt[z], ".", j, "star', names(wideDataList[[1]]))", sep = "" ))) eval(parse(text = paste( "wideDataList[[1]]$IC", j, "star.Z", uniqtrt[z], " <- rowSums(cbind(rep(0, nrow(wideDataList[[1]])),wideDataList[[1]][,ind]))", sep = "" ))) } } infCurves <- wideDataList[[1]][ , grep("IC", names(wideDataList[[1]])), drop = FALSE ] meanIC <- apply(infCurves, MARGIN = 2, FUN = mean) var <- t(as.matrix(infCurves)) %*% as.matrix(infCurves) / (n^2) row.names(var) <- colnames(var) <- rowNames out <- list( est = est, var = var, meanIC = meanIC, ic = infCurves, trtMod = trtMod, ftimeMod = ftimeMod, ctimeMod = ctimeMod, ftime = ftime, ftype = ftype, trt = trt, adjustVars = adjustVars ) class(out) <- "survtmle" return(out) }
PREFIX_XML <- "<?xml version=\"1.0\"?>\n" convert.samples.MAAT <- function(trait.samples, runid) { if (is.list(trait.samples)) { trait.samples <- as.data.frame(trait.samples) } trait.names <- colnames(trait.samples) trait.names[trait.names == "leaf_respiration_rate_m2"] <- "atref.rd" trait.names[trait.names == "Vcmax"] <- "atref.vcmax" trait.names[trait.names == "Jmax"] <- "atref.jmax" trait.names[trait.names == "Ev_Arrhenius"] <- "Ha.vcmax" trait.names[trait.names == "Ej_Arrhenius"] <- "Ha.jmax" trait.names[trait.names == "Ha_Modified_Arrhenius_Vcmax"] <- "Ha.vcmax" trait.names[trait.names == "Hd_Modified_Arrhenius_Vcmax"] <- "Hd.vcmax" trait.names[trait.names == "Ha_Modified_Arrhenius_Jmax"] <- "Ha.jmax" trait.names[trait.names == "Hd_Modified_Arrhenius_Jmax"] <- "Hd.jmax" trait.names[trait.names == "cuticular_cond"] <- "g0" trait.names[trait.names == "stomatal_slope"] <- "g1_leuning" trait.names[trait.names == "stomatal_slope.g1"] <- "g1_medlyn" trait.names[trait.names == "stomatal_slope.BB"] <- "g1_ball" trait.names[trait.names == "f_frac"] <- "f" trait.names[trait.names == "theta"] <- "theta_j" trait.names[trait.names == "leaf_respiration_Q10"] <- "q10.rd" colnames(trait.samples) <- trait.names if ("atref.rd" %in% names(trait.samples)) { trait.samples[["b_rdv_25"]] <- trait.samples[["atref.rd"]] / trait.samples[["atref.vcmax"]] } if ("Ha.vcmax" %in% names(trait.samples)) { trait.samples <- transform(trait.samples, Ha.vcmax = udunits2::ud.convert(Ha.vcmax, "kJ", "J")) } if ("Hd.vcmax" %in% names(trait.samples)) { trait.samples <- transform(trait.samples, Hd.vcmax = udunits2::ud.convert(Hd.vcmax, "kJ", "J")) } if ("Ha.jmax" %in% names(trait.samples)) { trait.samples <- transform(trait.samples, Ha.jmax = udunits2::ud.convert(Ha.jmax, "kJ", "J")) } if ("Hd.jmax" %in% names(trait.samples)) { trait.samples <- transform(trait.samples, Hd.jmax = udunits2::ud.convert(Hd.jmax, "kJ", "J")) } if ("leaf_reflect_vis" %in% names(trait.samples) & "leaf_trans_vis" %in% names(trait.samples) ){ leaf_abs <- 1-(trait.samples[["leaf_reflect_vis"]]+trait.samples[["leaf_trans_vis"]]) trait.samples[["a"]] <- leaf_abs remove <- which(colnames(trait.samples)=="leaf_trans_vis" | colnames(trait.samples)=="leaf_reflect_vis") trait.samples <- trait.samples[,-remove] } if ("leaf_width" %in% names(trait.samples)) { trait.samples <- transform(trait.samples, leaf_width = udunits2::ud.convert(leaf_width, "mm", "m")) } if ("g0" %in% names(trait.samples)) { trait.samples <- transform(trait.samples, g0 = udunits2::ud.convert(g0, "umol H2O m-2 s-1", "mol H2O m-2 s-1")) } return(trait.samples) } write.config.MAAT <- function(defaults = NULL, trait.values, settings, run.id) { nest_entries <- function(x, pattern, new_name = pattern){ matches <- grepl(pattern, names(x)) if(!any(matches)){ return(x) } nested <- stats::setNames(x[matches], gsub(pattern, "", names(x[matches]))) x <- x[!matches] x[[new_name]] <- nested x } rundir <- file.path(settings$host$rundir, run.id) outdir <- file.path(settings$host$outdir, run.id) maat_mod_obj <- as.character(settings$model$config$mod_obj) settings$model$config$mod_obj <- NULL system2(file.path(settings$model$binary, "run_scripts/setup_MAAT_project.bs"), c(maat_mod_obj, rundir, file.path(settings$model$binary, "run_scripts"), file.path(settings$model$binary, "src"))) unlink(file.path(rundir,"leaf_user_dynamic.xml"), recursive = FALSE) if (is.null(settings$run$inputs$met)) { unlink(file.path(rundir,"leaf_user_met.xml"), recursive = FALSE) } run_maat_script <- file.path(settings$model$binary, "src", "run_MAAT.R") if (!is.null(settings$model$config$mod_mimic)) { PEcAn.logger::logger.info(paste0("Running with model mimic: ",settings$model$config$mod_mimic)) mod_mimic <- as.character(settings$model$config$mod_mimic) settings$model$config$mod_mimic <- NULL xml <- PEcAn.settings::listToXml(settings$model$config, "default") } else { PEcAn.logger::logger.info("*** Model mimic not selected ***") mod_mimic <- 'NULL' xml <- PEcAn.settings::listToXml(settings$model$config, "default") } PEcAn.logger::logger.info("*** Convert input trait values to MAAT parameters and units ***") traits <- convert.samples.MAAT(trait.samples = trait.values[[settings$pfts$pft$name]],runid=run.id) traits <- as.list(traits) traits.list <- list() maat_param_prefix_list <- list(param=c("Ha.","Hd.","atref.","reftemp.","Topt.","deltaS.","a_deltaS_t.","b_deltaS_t.","q10.","a_q10_t.", "b_q10_t.","tupp_cox.","tlow_cox.","exp_cox."), xml=c("Ha","Hd","atref","reftemp","Topt","deltaS","a_deltaS_t","b_deltaS_t","q10","a_q10_t", "b_q10_t","tupp_cox","tlow_cox","exp_cox")) q <- 1 for (p in seq(seq_along(1:length(maat_param_prefix_list$param)))) { if (q==1) { traits.list <- nest_entries(traits, paste0(maat_param_prefix_list$param[p]), paste0(maat_param_prefix_list$xml[p])) } else { traits.list <- nest_entries(traits.list, paste0(maat_param_prefix_list$param[p]), paste0(maat_param_prefix_list$xml[p])) } q <- q+1 } traits.xml <- PEcAn.settings::listToXml(traits.list, "pars") rm(p,q) xml[[1]] <- XML::addChildren(xml[[1]], traits.xml) XML::saveXML(xml, file = file.path(settings$rundir, run.id, "leaf_user_static.xml"), indent = TRUE, prefix = PREFIX_XML) if (is.null(settings$run$inputs$met)) { PEcAn.logger::logger.info("-- No met selected. Running without a met driver --") jobsh <- paste0(" "\"srcdir <- ","'",file.path(settings$model$binary, "src"),"'","\""," ", "\"pdir <- ","'",rundir,"'","\""," ","\"mod_obj <- ","'",maat_mod_obj,"'","\""," ", "\"xml<-T","\""," ","\"uq<-F","\""," ", "\"factorial<-F","\""," ","\"mod_mimic<-",mod_mimic,"\""," ", "\"odir <- ","'",outdir,"'","\""," > ",rundir, "/logfile.txt","\n",'echo "', ' library(PEcAn.MAAT); model2netcdf.MAAT(', "'",rundir,"',","'",outdir,"',", settings$run$site$lat,",", settings$run$site$lon,", '", settings$run$start.date,"', '", settings$run$end.date,"') ", '" | R --vanilla') } else if (!is.null(settings$run$inputs$met)) { met.dir <- dirname(as.character(settings$run$inputs$met$path)) met.file <- basename(as.character(settings$run$inputs$met$path)) file.copy(file.path(met.dir, list.files(met.dir, "*.xml")), rundir, overwrite = TRUE, recursive = FALSE, copy.mode = TRUE, copy.date = TRUE) PEcAn.logger::logger.info("-- Met selected. Running with a met driver --") PEcAn.logger::logger.info(paste0("Running with met: ",met.file)) jobsh <- paste0(" "\"srcdir <- ","'",file.path(settings$model$binary, "src"),"'","\""," ", "\"pdir <- ","'",rundir,"'","\""," ","\"mod_obj <- ","'",maat_mod_obj,"'","\""," ", "\"xml<-T","\""," ","\"uq<-F","\""," ", "\"factorial<-F","\""," ","\"mod_mimic<-",mod_mimic,"\""," ", "\"odir <- ","'",outdir,"'","\""," ","\"mdir <- ","'",met.dir,"'", "\""," ","\"metdata <- ","'",met.file,"'","\""," > ",rundir, "/logfile.txt","\n",'echo "', ' library(PEcAn.MAAT); model2netcdf.MAAT(', "'",rundir,"',","'",outdir,"',", settings$run$site$lat,",", settings$run$site$lon,", '", settings$run$start.date,"', '", settings$run$end.date,"') ", '" | R --vanilla') } writeLines(jobsh, con = file.path(settings$rundir, run.id, "job.sh")) Sys.chmod(file.path(settings$rundir, run.id, "job.sh")) }
context("Writing Posixct") test_that("Writing Posixct with writeData & writeDataTable", { options("openxlsx.datetimeFormat" = "dd/mm/yy hh:mm") tstart <- strptime("30/05/2017 08:30", "%d/%m/%Y %H:%M", tz = "CET") TimeDT <- c(0, 5, 10, 15, 30, 60, 120, 180, 240, 480, 720, 1440) * 60 + tstart df <- data.frame(TimeDT, TimeTxt = format(TimeDT, "%Y-%m-%d %H:%M")) wb <- createWorkbook() addWorksheet(wb, "writeData") addWorksheet(wb, "writeDataTable") writeData(wb, "writeData", df, startCol = 2, startRow = 3, rowNames = FALSE) writeDataTable(wb, "writeDataTable", df, startCol = 2, startRow = 3) wd <- as.numeric(wb$worksheets[[1]]$sheet_data$v) wdt <- as.numeric(wb$worksheets[[2]]$sheet_data$v) expected <- c( 0, 1, 42885.3541666667, 2, 42885.3576388889, 3, 42885.3611111111, 4, 42885.3645833333, 5, 42885.375, 6, 42885.3958333333, 7, 42885.4375, 8, 42885.4791666667, 9, 42885.5208333333, 10, 42885.6875, 11, 42885.8541666667, 12, 42886.3541666667, 13 ) expect_equal(object = round(wd, 12), expected = expected) expect_equal(object = round(wdt, 12), expected = expected) expect_equal(object = wd, expected = wdt) options("openxlsx.datetimeFormat" = "yyyy-mm-dd hh:mm:ss") }) test_that("Writing mixed EDT/EST Posixct with writeData & writeDataTable", { options("openxlsx.datetimeFormat" = "dd/mm/yy hh:mm") tstart1 <- as.POSIXct("12/03/2018 08:30", format = "%d/%m/%Y %H:%M") tstart2 <- as.POSIXct("10/03/2018 08:30", format = "%d/%m/%Y %H:%M") TimeDT1 <- c(NA, 0, 10, 30, 60, 120, 240, 720, 1440) * 60 + tstart1 TimeDT2 <- c(0, 10, 30, 60, 120, 240, 720, NA, 1440) * 60 + tstart2 df <- data.frame( timeval = c(TimeDT1, TimeDT2), timetxt = format(c(TimeDT1, TimeDT2), "%Y-%m-%d %H:%M") ) wb <- createWorkbook() addWorksheet(wb, "writeData") addWorksheet(wb, "writeDataTable") writeData(wb, "writeData", df, startCol = 2, startRow = 3, rowNames = FALSE) writeDataTable(wb, "writeDataTable", df, startCol = 2, startRow = 3) wd <- as.numeric(wb$worksheets[[1]]$sheet_data$v) wdt <- as.numeric(wb$worksheets[[2]]$sheet_data$v) wd <- wd[wb$worksheets[[1]]$sheet_data$cols == 2] wdt <- wdt[wb$worksheets[[2]]$sheet_data$cols == 2] wd <- wd[wd != 0 | is.na(wd)] wdt <- wdt[wdt != 0 | is.na(wdt)] wd <- convertToDateTime(wd[order(wd)]) wdt <- convertToDateTime(wdt[order(wdt)]) expected <- df$timeval[order(df$timeval)] expect_equal( object = wd, expected = expected, tolerance = 10 ^ -10, check.tzone = FALSE ) expect_equal( object = wdt, expected = expected, tolerance = 10 ^ -10, check.tzone = TRUE ) expect_equal( object = wd, expected = wdt, check.tzone = TRUE ) options("openxlsx.datetimeFormat" = "yyyy-mm-dd hh:mm:ss") })
cube <- function(x=0,y=0,z=0, bordered=TRUE, filled = TRUE, lwd=2, scale=1, fillcol = gray(.95), bordercol ='black', ...) { mycube <- cube3d() mycube$vb[4,] <- mycube$vb[4,]/scale*2 for (i in 1:length(x)) { if (bordered) { bcube <- mycube bcube$material bcube$material$lwd <-lwd bcube$material$front <- 'line' bcube$material$back <- 'line' bcube %>% translate3d(x[i], y[i], z[i]) %>% shade3d } if (filled) { fcube <- mycube fcube$vb[4,] <- fcube$vb[4,]*1.01 fcube$material$col <- fillcol fcube %>% translate3d(x[i], y[i], z[i]) %>% shade3d } } }
SQLKeywords_SQLiteConnection <- function(dbObj, ...) { .SQL92Keywords } setMethod("SQLKeywords", "SQLiteConnection", SQLKeywords_SQLiteConnection)
var_imp.bagger <- function(object, ...) { object$imp }
bwadap.ts <- function(x, Kn = 5, c.thresh = 2, ...) { n <- length(x) thresh <- c.thresh*sqrt(log(n, 10)/n) ac <- as.vector(acf(x, type="correlation", plot=FALSE, lag.max = floor(n/2))$acf) l <- length(ac) pos <- 1 while(pos < n/10) { npos <- match(TRUE, abs(ac[pos:l]) < thresh) if( is.na(npos) ) break; pos <- pos+npos-1 if( pos+Kn-1 > floor(n/2) ) break; if(all(abs(ac[pos:(pos+Kn-1)]) < thresh)){ return(pos-2) } else { npos <- match(FALSE, abs(ac[pos:(pos+Kn-1)]) < thresh) pos <- pos + npos - 1 } } warning("No bandwidth found") return(floor(n/10)) } bwadap.numeric <- function(x, smax=13.49/IQR(x), n.points = 1000, Kn = 1.349*5/IQR(x), c.thresh = 2, ...) { n <- length(x) if(n <= 2) stop("x must have length greater than 2") thresh <- c.thresh*sqrt(log(n, 10)/n) dft <- function(s) { dftval <- complex( real = sum(cos(s * x)), imaginary = sum(sin(s * x)) ) return(Mod(dftval)/n) } dft <- Vectorize(dft, "s") svals <- seq(0, smax, length.out=n.points) ftvals <- dft(svals) pos <- 1 while(pos < n.points) { npos <- match(TRUE, ftvals[pos:n.points] < thresh) + pos - 1 if( is.na(npos) ) break; start.s <- uniroot(function(s) dft(s) - thresh, svals[c(npos-1,npos)])$root upcrosspos <- match(TRUE, ftvals[npos:n.points] > thresh) if( is.na(upcrosspos) ) { if(svals[n.points] - start.s < Kn) { warning("Not able to check Kn units beyond selected bandwidth") } return(1/start.s) } end.s <- uniroot(function(s) dft(s) - thresh, svals[c(npos + upcrosspos - 2, npos + upcrosspos - 1)])$root if( end.s - start.s > Kn ) { return(1/start.s) } else { pos <- npos + upcrosspos - 1 } } warning("No bandwidth found") return(NA) } bwadap <- function(x, ...) UseMethod("bwadap")
context("Tests for the mappingColumn R module") library(safetyGraphics) library(shinytest) library(testthat) library(stringr) skip_on_cran() app <- ShinyDriver$new("./module_examples/mappingColumn") initial<-app$getAllValues() test_that("UI function stops with invalid inputs (non-data.frame)",{ skip_on_cran() id_meta <- meta%>%filter(domain=="labs")%>%filter(col_key=="id_col") id_mapping_list<-list(id_col="USUBJID") expect_error(mappingColumnUI("test1.1", list(id_col="USUBJID"), safetyData::adam_adlbc)) expect_error(mappingColumnUI("test1.2", id_meta, "invalid_data_option")) expect_error(mappingColumnUI("test1.3", id_meta, labs, list(id_col="USUBJID"))) }) test_that("the correct number of inputs are created (1 per field/column)",{ skip_on_cran() inputs <- names(initial$input) expect_length(str_subset(inputs,"ex1"),1) expect_length(str_subset(inputs,"ex2"),1) expect_length(str_subset(inputs,"ex3"),5) expect_length(str_subset(inputs,"ex4"),5) }) test_that("default values for inputs are set correctly in example app",{ skip_on_cran() expect_equal(initial$input[["ex1-id_col-colSelect"]],"") expect_equal(initial$input[["ex2-id_col-colSelect"]],"USUBJID") expect_equal(initial$input[["ex3-measure_col-colSelect"]],"") expect_equal(initial$input[["ex3-measure_values--ALP-colSelect"]],"") expect_equal(initial$input[["ex3-measure_values--ALT-colSelect"]],"") expect_equal(initial$input[["ex3-measure_values--AST-colSelect"]],"") expect_equal(initial$input[["ex3-measure_values--TB-colSelect"]],"") expect_equal(initial$input[["ex4-measure_col-colSelect"]],"PARAM") expect_equal(initial$input[["ex4-measure_values--ALP-colSelect"]],"Alkaline Phosphatase (U/L)") expect_equal(initial$input[["ex4-measure_values--ALT-colSelect"]],"") expect_equal(initial$input[["ex4-measure_values--AST-colSelect"]],"") expect_equal(initial$input[["ex4-measure_values--TB-colSelect"]],"") expect_equal(initial$input[["ex5-measure_col-colSelect"]],"") expect_equal(initial$input[["ex5-measure_values--ALP-colSelect"]],"") expect_equal(initial$input[["ex5-measure_values--ALT-colSelect"]],"") expect_equal(initial$input[["ex5-measure_values--AST-colSelect"]],"") expect_equal(initial$input[["ex5-measure_values--TB-colSelect"]],"") expect_equal(initial$input[["ex6-measure_col-colSelect"]],"LBTEST") expect_equal(initial$input[["ex6-measure_values--ALP-colSelect"]],"Alkaline Phosphatase") expect_equal(initial$input[["ex6-measure_values--ALT-colSelect"]],"") expect_equal(initial$input[["ex6-measure_values--AST-colSelect"]],"") expect_equal(initial$input[["ex6-measure_values--TB-colSelect"]],"") }) test_that("changing column input updates clears the field input values and updates input list",{ skip_on_cran() app$setValue('ex4-measure_col-colSelect',"PARAMCD") expect_equal(app$getValue("ex4-measure_col-colSelect"),"PARAMCD") Sys.sleep(.5) expect_equal(app$getValue("ex4-measure_values--ALP-colSelect"),"") app$setValue('ex4-measure_values--ALP-colSelect',"ALP") Sys.sleep(.5) expect_equal(app$getValue("ex4-measure_values--ALP-colSelect"),"ALP") }) test_that("output are data frames with the expected values",{ skip_on_cran() expect_true(all(sapply(initial$export,is.data.frame))) expect_true(all(sapply(initial$export,function(x){names(x)==c("text_key","current")}))) expect_equal(nrow(initial$export$ex1_data),1) expect_equal(nrow(initial$export$ex2_data),1) expect_equal(nrow(initial$export$ex3_data),5) expect_equal(nrow(initial$export$ex4_data),5) expect_equal(nrow(initial$export$ex5_data),5) expect_equal(nrow(initial$export$ex6_data),5) ex2_id_col <- initial$export$ex2_data %>% filter(text_key=="id_col") %>% pull(current) %>% as.character() expect_equal(ex2_id_col, "USUBJID") ex4_measure_col <- initial$export$ex4_data %>% filter(text_key=="measure_col") %>% pull(current) %>% as.character() expect_equal(ex4_measure_col, "PARAM") ex4_measure_col_ALP <- initial$export$ex4_data %>% filter(text_key=="measure_values--ALP") %>% pull(current) %>% as.character() expect_equal(ex4_measure_col_ALP, "Alkaline Phosphatase (U/L)") ex6_measure_col <- initial$export$ex6_data %>% filter(text_key=="measure_col") %>% pull(current) %>% as.character() expect_equal(ex6_measure_col, "LBTEST") ex6_measure_col_ALP <- initial$export$ex6_data %>% filter(text_key=="measure_values--ALP") %>% pull(current) %>% as.character() expect_equal(ex6_measure_col_ALP, "Alkaline Phosphatase") updated<-app$getAllValues() ex2_id_col_updated <- updated$export$ex2_data %>% filter(text_key=="id_col") %>% pull(current) %>% as.character() expect_equal(ex2_id_col_updated, "USUBJID") ex4_measure_col_updated <- updated$export$ex4_data %>% filter(text_key=="measure_col") %>% pull(current) %>% as.character() expect_equal(ex4_measure_col_updated, "PARAMCD") ex4_measure_values_ALP_updated <- updated$export$ex4_data %>% filter(text_key=="measure_values--ALP") %>% pull(current) %>% as.character() expect_equal(ex4_measure_values_ALP_updated, "ALP") }) app$stop()
structure(list(url = "https://api.twitter.com/2/tweets?tweet.fields=attachments%2Cauthor_id%2Cconversation_id%2Ccreated_at%2Centities%2Cgeo%2Cid%2Cin_reply_to_user_id%2Clang%2Cpublic_metrics%2Cpossibly_sensitive%2Creferenced_tweets%2Csource%2Ctext%2Cwithheld&user.fields=created_at%2Cdescription%2Centities%2Cid%2Clocation%2Cname%2Cpinned_tweet_id%2Cprofile_image_url%2Cprotected%2Cpublic_metrics%2Curl%2Cusername%2Cverified%2Cwithheld&expansions=author_id%2Centities.mentions.username%2Cgeo.place_id%2Cin_reply_to_user_id%2Creferenced_tweets.id%2Creferenced_tweets.id.author_id&place.fields=contained_within%2Ccountry%2Ccountry_code%2Cfull_name%2Cgeo%2Cid%2Cname%2Cplace_type&ids=1266876474440761346%2C1266868259925737474%2C1266867327079002121%2C1266866660713127936%2C1266864490446012418%2C1266860737244336129%2C1266859737615826944%2C1266859455586676736%2C1266858090143588352%2C1266857669157097473%2C1266856357954756609%2C1266855807699861506%2C1266855344086663169%2C1266854627758276608%2C1266854586188476421%2C1266854533889757187%2C1266853931247906816%2C1266853419291234312%2C1266852781526302722%2C1266852099163291650%2C1266851746086899713%2C1266850733409931264%2C1266850092289572869%2C1266848161424969728%2C1266847955346210823%2C1266847040937525251%2C1266846743137837057%2C1266846080144211970%2C1266845377703149570%2C1266844755557842944%2C1266844314895777792%2C1266843758575923200%2C1266842696355897344%2C1266842682187546624%2C1266842675594104839%2C1266842473982197760%2C1266842400040849408%2C1266842270516629505%2C1266842200387784707%2C1266841835626024964%2C1266840513229074436%2C1266840285696405511%2C1266840115906777091%2C1266839491513331714%2C1266839242250084352%2C1266838724048031745%2C1266837987620524049%2C1266836992265842688%2C1266836364005388293%2C1266835587765538817%2C1266835559621767168%2C1266835230947782659%2C1266835214392799239%2C1266834647956275202%2C1266834072040587264%2C1266834042051342338%2C1266833865701756930%2C1266833807799455745%2C1266832536061214727%2C1266832245790302209%2C1266831975198986240%2C1266831225462890496%2C1266831181984813065%2C1266831085582913538%2C1266830382512685059%2C1266828500771778560%2C1266827399276224513%2C1266826884727406595%2C1266826815705939970%2C1266826792964435968%2C1266826240620662786%2C1266826036454580227%2C1266825574636535813%2C1266825188013936642%2C1266824926289371136%2C1266824597804064769%2C1266823704715157504%2C1266823295304949760%2C1266823244142821377%2C1266822697494986752%2C1266822103820574729%2C1266821913831235584%2C1266821749464858632%2C1266820820359352322%2C1266820479249244160%2C1266820145130987521%2C1266819364247998465%2C1266819340353101824%2C1266819279216877568%2C1266819132827328514%2C1266819115236364290%2C1266818671952957440%2C1266818221644144641%2C1266818177977274369%2C1266818019453435920%2C1266817292643467265%2C1266817176687779846%2C1266816778010845196%2C1266816466512297984%2C1266814311298629640", status_code = 200L, headers = structure(list(date = "Tue, 21 Dec 2021 13:36:57 UTC", server = "tsa_o", `api-version` = "2.32", `content-type` = "application/json; charset=utf-8", `cache-control` = "no-cache, no-store, max-age=0", `content-length` = "37925", `x-access-level` = "read", `x-frame-options` = "SAMEORIGIN", `content-encoding` = "gzip", `x-xss-protection` = "0", `x-rate-limit-limit` = "300", `x-rate-limit-reset` = "1640094716", `content-disposition` = "attachment; filename=json.json", `x-content-type-options` = "nosniff", `x-rate-limit-remaining` = "299", `strict-transport-security` = "max-age=631138519", `x-response-time` = "1059", `x-connection-hash` = "b914cde170481ca75c4b9b9291d6d4593fbb9c37fd3a37f09fefae48ca90f4e7"), class = c("insensitive", "list")), all_headers = list(list(status = 200L, version = "HTTP/2", headers = structure(list(date = "Tue, 21 Dec 2021 13:36:57 UTC", server = "tsa_o", `api-version` = "2.32", `content-type` = "application/json; charset=utf-8", `cache-control` = "no-cache, no-store, max-age=0", `content-length` = "37925", `x-access-level` = "read", `x-frame-options` = "SAMEORIGIN", `content-encoding` = "gzip", `x-xss-protection` = "0", `x-rate-limit-limit` = "300", `x-rate-limit-reset` = "1640094716", `content-disposition` = "attachment; filename=json.json", `x-content-type-options` = "nosniff", `x-rate-limit-remaining` = "299", `strict-transport-security` = "max-age=631138519", `x-response-time` = "1059", `x-connection-hash` = "b914cde170481ca75c4b9b9291d6d4593fbb9c37fd3a37f09fefae48ca90f4e7"), class = c("insensitive", "list")))), cookies = structure(list(domain = c(".twitter.com", ".twitter.com", ".twitter.com", ".twitter.com"), flag = c(TRUE, TRUE, TRUE, TRUE), path = c("/", "/", "/", "/"), secure = c(TRUE, TRUE, TRUE, TRUE), expiration = structure(c(1702744284, 1702744284, 1702744284, 1702744284), class = c("POSIXct", "POSIXt")), name = c("guest_id_marketing", "guest_id_ads", "personalization_id", "guest_id"), value = c("REDACTED", "REDACTED", "REDACTED", "REDACTED")), row.names = c(NA, -4L), class = "data.frame"), content = charToRaw("{\"data\":[{\"lang\":\"de\",\"text\":\"diese Petition mitzeichnen!!\\n. \\nKonsequente Ausrichtung eines zukünftigen Konjunkturpakets.\\nNur soziale und technische Innovationen, date = structure(1640093817, class = c("POSIXct", "POSIXt" ), tzone = "GMT"), times = c(redirect = 0, namelookup = 0.058534, connect = 0.072659, pretransfer = 0.130836, starttransfer = 1.222355, total = 1.225534)), class = "response")
LV_pm_alpha_none_lambdacov_none_alphacov_none <- function(par, fitness, neigh_intra_matrix = NULL, neigh_inter_matrix, covariates, fixed_parameters){ pos <- 1 if(is.null(fixed_parameters[["lambda"]])){ lambda <- par[pos] pos <- pos + 1 }else{ lambda <- fixed_parameters[["lambda"]] } sigma <- par[length(par)] pred <- rep(lambda, times=length(fitness)) llik <- dnorm(fitness, mean = (log(pred)), sd = (sigma), log=TRUE) return(sum(-1*llik)) }
rqgauss <-function(n,q=0,mu=0,sig=1,meth="Box-Muller"){ nsam <- n qv <- q if(qv >= 3 || qv == 1) stop("q value must be < 3 or != 1") if(meth != "Quantile") if(meth != "Chaotic") if(meth != "Box-Muller") stop("invalid method") qPDF <- array(1:nsam) qPDF[] <-0 if (meth == "Box-Muller"){ logq <- function(x,qva) { mde<-.Machine$double.exponent-2 if(abs(qva-1)<10^-mde){ a<-log(x) }else{ b<-as.double(x/x^qva/(1-qva)) a<-as.double(b-1/(1-qva)) } return(a) } qgen<-as.double((1+qv)/(3-qv)) for(i in 1:nsam){ u1<-as.double(runif(1,0,1)) u2<-as.double(runif(1,0,1)) qPDF[i]<-mu+sig*as.double(sqrt(abs(2*logq(u1,qgen)))*sin(2*pi*u2)) } }else{ if (meth == "Quantile") qPDF<-cqgauss(runif(nsam),qv,mu,sig) } if (meth == "Chaotic") qPDF<-Chaotic(nsam,qv,mu,sig) return(qPDF) }
dataBuilder <- function(dataset, resp, subject, method, time, gs = NULL){ Data <- data.frame(dataset) Data <- try(rename.vars(Data, from = c(resp, subject, method, time), to = c("resp", "subject", "method", "time"), info = FALSE), TRUE) if (is.null(gs) == FALSE) { gold <- which.max(levels(Data$method) == gs) others <- seq(1, length(levels(Data$method)))[-gold] Data$method <- factor(Data$method, levels = levels(Data$method)[c(gold, others)]) } return(Data) }
'dse03ee'
"as.treeshape.treebalance" <- function(x, ...) { tree=x height=nrow(tree$merge) merge=matrix(NA, height, 2) current.tip=-1 for (node in 1:height) { new.node=height+1-node if (tree$merge[node,2]==1) { res1=current.tip current.tip=current.tip-1 } else { res1=new.node-1 } if ((tree$merge[node,1]-tree$merge[node,2])==1) { res2=current.tip current.tip=current.tip-1 } else { res2=new.node-tree$merge[node,2] } merge[new.node,]=c(res1,res2) } res=treeshape(merge) }
library(ManifoldOptim) library(mvtnorm) set.seed(1234) n <- 2000 p <- 5 mu.true <- rep(0,p) Sigma.true <- diag(2,p) + 0.01 y <- rmvnorm(n, mean = mu.true, sigma = Sigma.true) tx <- function(x) { S <- matrix(x, p, p) S[lower.tri(S)] <- t(S)[lower.tri(S)] return(S) } f <- function(x) { Sigma <- tx(x) -sum(dmvnorm(y, mean = mu.true, sigma = Sigma, log = TRUE)) } mod <- Module("ManifoldOptim_module", PACKAGE = "ManifoldOptim") prob <- new(mod$RProblem, f) X0 <- diag(1, p) x0 <- as.numeric(X0) prob$objFun(x0) mani.params <- get.manifold.params(IsCheckParams = TRUE) solver.params <- get.solver.params(DEBUG = 0, Tolerance = 1e-4, Max_Iteration = 1000, IsCheckParams = TRUE, IsCheckGradHess = FALSE) mani.defn <- get.spd.defn(p) res <- manifold.optim(prob, mani.defn, method = "LRBFGS", mani.params = mani.params, solver.params = solver.params, x0 = x0) print(res) head(tx(res$xopt))
"mgp154" "mgp2592"
smooth_gifti <- function( original_fname, target_fname, surf_fname=NULL, surf_FWHM=5, hemisphere=c("left", "right"), ROI_fname=NULL, zeroes_as_NA=FALSE) { stopifnot(file.exists(original_fname)) if (is.null(surf_fname)) { ciftiTools_warn(paste( "No surface provided to `smooth_gifti`,", "so using the surface included in `ciftiTools`." )) hemisphere <- match.arg(hemisphere, c("left", "right")) x_res <- nrow(readgii(original_fname)$data[[1]]) surf_fname <- file.path(tempdir(), paste0(hemisphere, ".surf.gii")) surf_fname <- resample_gifti( ciftiTools.files()$surf[hemisphere], surf_fname, hemisphere=hemisphere, file_type="surface", resamp_res=x_res ) } stopifnot(file.exists(surf_fname)) cmd <- paste( "-metric-smoothing", sys_path(surf_fname), sys_path(original_fname), surf_FWHM / (2*sqrt(2*log(2))), sys_path(target_fname) ) if (!is.null(ROI_fname)) { cmd <- paste(cmd, "-roi", sys_path(ROI_fname)) } if (zeroes_as_NA) { cmd <- paste(cmd, "-fix-zeros") } run_wb_cmd(cmd) return(invisible(target_fname)) } smoothGIfTI <- function( original_fname, target_fname, surf_fname, surf_FWHM=5, zeroes_as_NA=FALSE) { smooth_gifti( original_fname, target_fname, surf_fname, surf_FWHM, zeroes_as_NA ) } smoothgii <- function( original_fname, target_fname, surf_fname, surf_FWHM=5, zeroes_as_NA=FALSE) { smooth_gifti( original_fname, target_fname, surf_fname, surf_FWHM, zeroes_as_NA ) }
A1inv <- function(x) { ifelse (0 <= x & x < 0.53, 2 * x + x^3 + (5 * x^5)/6, ifelse (x < 0.85, -0.4 + 1.39 * x + 0.43/(1 - x), 1/(x^3 - 4 * x^2 + 3 * x))) }
match_order_by_labels <- function(dend_change, dend_template, check_that_labels_match = TRUE) { tree_to_change_labels <- labels(dend_change) tree_template_labels <- labels(dend_template) if (check_that_labels_match) { if (!identical(sort(tree_to_change_labels), sort(tree_template_labels))) { stop("labels do not match in both trees. Please make sure to fix the labels names! (make sure also that the labels of BOTH trees are 'character')") } } tree_template_order <- order.dendrogram(dend_template) ss_order_change_leaf_numbers_to_match_template <- match(x = tree_to_change_labels, table = tree_template_labels) tree_new_leaf_numbers <- tree_template_order[ss_order_change_leaf_numbers_to_match_template] order.dendrogram(dend_change) <- tree_new_leaf_numbers return(dend_change) } match_order_dendrogram_by_old_order <- function(dend_change, dend_template, dend_change_old_order, check_that_labels_match = FALSE, check_that_leaves_order_match = FALSE) { if (check_that_labels_match) { if (any(sort(labels(dend_change)) != sort(labels(dend_template)))) stop("labels do not match in both trees. Please make sure to fix the labels names!") } if (check_that_leaves_order_match) { if (any(sort(order.dendrogram(dend_change)) != sort(order.dendrogram(dend_template)))) stop("order.dendrogram do not match in both trees. Please make sure to fix the labels names!") } tree_to_change_order <- order.dendrogram(dend_change) tree_template_order <- order.dendrogram(dend_template) ss_order_change_leaf_numbers_to_match_template <- match(x = tree_to_change_order, table = dend_change_old_order) new_leaves_order <- tree_template_order[ss_order_change_leaf_numbers_to_match_template] order.dendrogram(dend_change) <- new_leaves_order return(dend_change) } entanglement <- function(dend1, ...) { UseMethod("entanglement") } entanglement.default <- function(dend1, dend2, ...) { stop("no default function for entanglement") } entanglement.hclust <- function(dend1, dend2, ...) { dend1 <- as.dendrogram(dend1) dend2 <- as.dendrogram(dend2) entanglement(dend1, dend2, ...) } entanglement.phylo <- function(dend1, dend2, ...) { dend1 <- as.dendrogram(dend1) dend2 <- as.dendrogram(dend2) entanglement(dend1, dend2, ...) } entanglement.dendlist <- function(dend1, which = c(1L, 2L), ...) { if (length(dend1) == 1) stop("Your dendlist has only 1 dendrogram - entanglement can not be calculated") if (all(which %in% seq_len(length(dend1)))) { entanglement.dendrogram(dend1[[which[1]]], dend1[[which[2]]], ...) } else { stop("You are trying to calculate the entanglement for trees which are outside the range of trees in your dendlist") } } entanglement.dendrogram <- function(dend1, dend2, L = 1.5, leaves_matching_method = c("labels", "order"), ...) { if (L == 0) L <- L + 1e-50 n_leaves <- nleaves(dend1) one_to_n_leaves <- seq_len(n_leaves) leaves_matching_method <- match.arg(leaves_matching_method) if (leaves_matching_method == "order") { dend1_old_order <- order.dendrogram(dend1) order.dendrogram(dend1) <- one_to_n_leaves dend2 <- match_order_dendrogram_by_old_order(dend2, dend1, dend1_old_order) } else { order.dendrogram(dend1) <- one_to_n_leaves dend2 <- match_order_by_labels(dend2, dend1) } sum_abs_diff_L <- function(x, y, L) { sum(abs(x - y)^L) } entanglement_result <- sum_abs_diff_L(order.dendrogram(dend1), order.dendrogram(dend2), L) worse_entanglement_result <- sum_abs_diff_L(one_to_n_leaves, rev(one_to_n_leaves), L) normalized_entanglement_result <- entanglement_result / worse_entanglement_result return(normalized_entanglement_result) }
Arrow <- R6::R6Class( classname = "Arrow", inherit = Edge, private = list( ), public = list( initialize = function(source, target, label="") { super$initialize(v1=source, v2=target, label=label) return(invisible(self)) }, source = function() { return(private$v1) }, target = function() { return(private$v2) } ) )
DEL <- function (mu.link ="log", sigma.link="log", nu.link="logit") { mstats <- checklink("mu.link", "DEL", substitute(mu.link), c("1/mu^2", "log", "identity")) dstats <- checklink("sigma.link", "DEL", substitute(sigma.link), c("inverse", "log", "identity")) vstats <- checklink("nu.link", "DEL", substitute(nu.link), c("logit", "probit", "cloglog", "cauchit", "log")) structure( list(family = c("DEL", "Delaporte"), parameters = list(mu = TRUE, sigma = TRUE, nu = TRUE), nopar = 3, type = "Discrete", mu.link = as.character(substitute(mu.link)), sigma.link = as.character(substitute(sigma.link)), nu.link = as.character(substitute(nu.link)), mu.linkfun = mstats$linkfun, sigma.linkfun = dstats$linkfun, nu.linkfun = vstats$linkfun, mu.linkinv = mstats$linkinv, sigma.linkinv = dstats$linkinv, nu.linkinv = vstats$linkinv, mu.dr = mstats$mu.eta, sigma.dr = dstats$mu.eta, nu.dr = vstats$mu.eta, dldm = function(y,mu,sigma,nu) { logty <-log(y+1)+dDEL(y+1, mu=mu, sigma=sigma, nu=nu, log=TRUE)- dDEL(y, mu=mu, sigma=sigma, nu=nu, log=TRUE) ty <- exp(logty) dldm <- (y-ty)/mu dldm }, d2ldm2 = function(y,mu,sigma,nu) { logty <-log(y+1)+dDEL(y+1, mu=mu, sigma=sigma, nu=nu, log=TRUE)- dDEL(y, mu=mu, sigma=sigma, nu=nu, log=TRUE) ty <- exp(logty) dldm <- (y-ty)/mu d2ldm2 <- - dldm * dldm d2ldm2 <- ifelse(d2ldm2 < -1e-15, d2ldm2,-1e-15) d2ldm2 }, dldd = function(y,mu,sigma,nu) { nd <- numeric.deriv(dDEL(y, mu, sigma, nu, log=TRUE), "sigma", delta=0.01) dldd <- as.vector(attr(nd, "gradient")) dldd }, d2ldd2 = function(y,mu,sigma,nu) { nd <- numeric.deriv(dDEL(y, mu, sigma, nu, log=TRUE), "sigma", delta=0.01) dldd <- as.vector(attr(nd, "gradient")) d2ldd2 <- -dldd*dldd d2ldd2 <- ifelse(d2ldd2 < -1e-15, d2ldd2,-1e-15) d2ldd2 }, d2ldmdd = function(y,mu,sigma,nu) { logty <-log(y+1)+dDEL(y+1, mu=mu, sigma=sigma, nu=nu, log=TRUE)- dDEL(y, mu=mu, sigma=sigma, nu=nu, log=TRUE) ty <- exp(logty) dldm <- (y-ty)/mu nd <- numeric.deriv(dDEL(y, mu, sigma, nu, log=TRUE), "sigma", delta=0.01) dldd <- as.vector(attr(nd, "gradient")) d2ldmdd <- -dldm *dldd d2ldmdd <- ifelse(d2ldmdd < -1e-15, d2ldmdd,-1e-15) d2ldmdd }, d2ldmdv = function(y,mu,sigma,nu) { logty <-log(y+1)+dDEL(y+1, mu=mu, sigma=sigma, nu=nu, log=TRUE)- dDEL(y, mu=mu, sigma=sigma, nu=nu, log=TRUE) ty <- exp(logty) dldm <- (y-ty)/mu nd <- numeric.deriv(dDEL(y, mu, sigma, nu, log=TRUE), "nu", delta=0.01) dldv <- as.vector(attr(nd, "gradient")) d2ldmdv <- -dldm *dldv d2ldmdv <- ifelse(d2ldmdv < -1e-15, d2ldmdv,-1e-15) d2ldmdv }, d2ldddv = function(y,mu,sigma,nu) { nd <- numeric.deriv(dDEL(y, mu, sigma, nu, log=TRUE), "sigma", delta=0.01) dldd <- as.vector(attr(nd, "gradient")) nd <- numeric.deriv(dDEL(y, mu, sigma, nu, log=TRUE), "nu", delta=0.01) dldv <- as.vector(attr(nd, "gradient")) d2ldddv <- -dldd *dldv d2ldddv <- ifelse(d2ldddv < -1e-15, d2ldddv,-1e-15) d2ldddv }, dldv = function(y,mu,sigma,nu) { nd <- numeric.deriv(dDEL(y, mu, sigma, nu, log=TRUE), "nu", delta=0.01) dldv <- as.vector(attr(nd, "gradient")) dldv }, d2ldv2 = function(y,mu,sigma,nu) { nd <- numeric.deriv(dDEL(y, mu, sigma, nu, log=TRUE), "nu", delta=0.01) dldv <- as.vector(attr(nd, "gradient")) d2ldv2 <- -dldv*dldv d2ldv2 <- ifelse(d2ldv2 < -1e-15, d2ldv2,-1e-15) d2ldv2 } , G.dev.incr = function(y,mu,sigma,nu, pw=1,...) -2*dDEL(y, mu, sigma, nu, log=TRUE), rqres = expression( rqres(pfun="pDEL", type="Discrete", ymin=0, y=y, mu=mu, sigma=sigma, nu=nu) ), mu.initial = expression(mu<- (y+mean(y)/2)), sigma.initial = expression( sigma <- rep( max( ((var(y)-mean(y))/(mean(y)^2)),0.1),length(y))), nu.initial = expression({ nu <- rep(0.5,length(y)) }), mu.valid = function(mu) all(mu > 0) , sigma.valid = function(sigma) all(sigma > 0), nu.valid = function(nu) all(nu > 0) && all(nu < 1), y.valid = function(y) all(y >= 0), mean = function(mu, sigma, nu) mu, variance = function(mu, sigma, nu) mu + mu^2 * sigma * (1 - nu)^2 ), class = c("gamlss.family","family")) } dDEL<-function(x, mu=1, sigma=1, nu=.5, log=FALSE) { if (any(mu <= 0) ) stop(paste("mu must be greater than 0 ", "\n", "")) if (any(sigma <= 0) ) stop(paste("sigma must be greater than 0 ", "\n", "")) if (any(nu <= 0) | any(nu >= 1)) stop(paste("nu must be between 0 and 1", "\n", "")) if (any(x < 0) ) stop(paste("x must be >=0", "\n", "")) ly <- max(length(x),length(mu),length(sigma),length(nu)) x <- rep(x, length = ly) sigma <- rep(sigma, length = ly) mu <- rep(mu, length = ly) nu <- rep(nu, length = ly) logpy0 <- -mu*nu-(1/sigma)*(log(1+mu*sigma*(1-nu))) S <- tofyDEL2(x, mu, sigma, nu) logfy <- logpy0-lgamma(x+1)+S if(log==FALSE) fy <- exp(logfy) else fy <- logfy if (length(sigma)>1) fy <- ifelse(sigma>0.0001, fy, dPO(x, mu = mu, log = log) ) else fy <- if (sigma<0.0001) dPO(x, mu = mu, log = log) else fy fy } tofyDEL1 <- function (y, mu, sigma, nu) { ly <- max(length(y),length(mu),length(sigma),length(nu)) y <- rep(y, length = ly) sigma <- rep(sigma, length = ly) mu <- rep(mu, length = ly) nu <- rep(nu, length = ly) sumlty <- as.double(.C("tofydel1", as.double(y), as.double(mu), as.double(sigma), as.double(nu), ans=double(ly), as.integer(length(y)), as.integer(max(y)+1), PACKAGE="gamlss.dist")$ans) sumlty } tofyDEL2 <- function (y, mu, sigma, nu) { ly <- max(length(y),length(mu),length(sigma),length(nu)) y <- rep(y, length = ly) sigma <- rep(sigma, length = ly) mu <- rep(mu, length = ly) nu <- rep(nu, length = ly) sumlty <- as.double(.C("tofydel2", as.double(y), as.double(mu), as.double(sigma), as.double(nu), ans=double(ly), as.integer(length(y)), as.integer(max(y)+1), PACKAGE="gamlss.dist")$ans) sumlty } pDEL <- function(q, mu = 1, sigma = 1, nu = .5, lower.tail = TRUE, log.p = FALSE) { if (any(mu <= 0) ) stop(paste("mu must be greater than 0 ", "\n", "")) if (any(sigma <= 0) ) stop(paste("sigma must be greater than 0 ", "\n", "")) if (any(nu <= 0) | any(nu >= 1)) stop(paste("nu must be between 0 and 1", "\n", "")) if (any(q < 0) ) stop(paste("q must be >=0", "\n", "")) ly <- max(length(q),length(mu),length(sigma),length(nu)) q <- rep(q, length = ly) sigma <- rep(sigma, length = ly) mu <- rep(mu, length = ly) nu <- rep(nu, length = ly) fn <- function(q, mu, sigma, nu) sum(dDEL(0:q, mu=mu, sigma=sigma, nu=nu)) Vcdf <- Vectorize(fn) cdf <- Vcdf(q=q, mu=mu, sigma=sigma, nu=nu) cdf <- if(lower.tail==TRUE) cdf else 1-cdf cdf <- if(log.p==FALSE) cdf else log(cdf) cdf } qDEL <- function(p, mu=1, sigma=1, nu=0.5, lower.tail = TRUE, log.p = FALSE, max.value = 10000) { if (any(mu <= 0) ) stop(paste("mu must be greater than 0 ", "\n", "")) if (any(sigma <= 0) ) stop(paste("sigma must be greater than 0 ", "\n", "")) if (any(nu <= 0) | any(nu >= 1)) stop(paste("nu must be between 0 and 1", "\n", "")) if (any(p < 0) | any(p > 1.0001)) stop(paste("p must be between 0 and 1", "\n", "")) if (log.p==TRUE) p <- exp(p) else p <- p if (lower.tail==TRUE) p <- p else p <- 1-p ly <- length(p) QQQ <- rep(0,ly) nsigma <- rep(sigma, length = ly) nmu <- rep(mu, length = ly) nnu <- rep(nu, length = ly) for (i in seq(along=p)) { cumpro <- 0 if (p[i]+0.000000001 >= 1) QQQ[i] <- Inf else { for (j in seq(from = 0, to = max.value)) { cumpro <- pDEL(j, mu = nmu[i], sigma = nsigma[i], nu = nnu[i], log.p = FALSE) QQQ[i] <- j if (p[i] <= cumpro ) break } } } QQQ } rDEL <- function(n, mu=1, sigma=1, nu=0.5, max.value = 10000) { if (any(mu <= 0) ) stop(paste("mu must be greater than 0 ", "\n", "")) if (any(sigma <= 0) ) stop(paste("sigma must be greater than 0 ", "\n", "")) if (any(nu <= 0) | any(nu >= 1)) stop(paste("nu must be between 0 and 1", "\n", "")) if (any(n <= 0)) stop(paste("n must be a positive integer", "\n", "")) n <- ceiling(n) p <- runif(n) r <- qDEL(p, mu=mu, sigma=sigma, nu=nu, max.value = max.value ) as.integer(r) }
pt2d <- function(x, y = x, rho = 0, nu = 4) { if (nu == Inf) return(pnorm2d(x = x, y = y, rho = rho)) sigma <- diag(2) sigma[1, 2] <- sigma[2, 1] <- rho X <- cbind(x, y) .pmvt <- function(x, delta, sigma, df) mvtnorm::pmvt( lower = -Inf, upper = x, delta = delta, sigma = sigma, df = df) ans <- apply(X, 1, ".pmvt", delta = c(0,0), sigma = sigma, df = nu) attr(ans, "control") <- c(rho = rho, nu = nu) ans } dt2d <- function(x, y = x, rho = 0, nu = 4) { if (nu == Inf) return(dnorm2d(x = x, y = y, rho = rho)) xoy <- (x^2 - 2*rho*x*y + y^2)/ (2*(1 - rho^2)) density <- (1 + 2*xoy/nu)^(-(nu+2)/2) / (2*pi*sqrt(1-rho^2)) attr(density, "control") <- c(rho = rho, nu = nu) density } rt2d <- function(n, rho = 0, nu = 4) { if (nu == Inf) return(rnorm2d(n = n, rho = rho)) ans <- rnorm2d(n, rho)/sqrt(rchisq(n, nu)/nu) attr(ans, "control") <- c(rho = rho, nu = nu) ans }
month_cnv=function(monthinput, up=FALSE, short=FALSE){ numelem = length(monthinput) monthnames = c('january', 'february', 'march', 'april', 'may', 'june', 'july', 'august', 'september', 'october', 'november', 'december') monthshort = substr(monthnames,1,3) if (is.character(monthinput)){ monthinput = gsub('(^ +)|( +$)','',monthinput) shortinput = tolower(substr(monthinput,1,3)) return(which(shortinput==monthshort)) } else { if(any(monthinput<1 | monthinput>12)) { stop("bad input values. month numbers must be 1-12.") } else { result = monthnames[monthinput] if(short) result = substr(result,1,3) if(up) result = toupper(result) } } return(result) }
rrum = function(Y, Q, chain_length = 10000L, as = 1, bs = 1, ag = 1, bg = 1, delta0 = rep(1, 2 ^ ncol(Q))) { if(length(as) != 1 | length(ag) != 1 | length(bs) != 1 | length(bg) != 1) { stop("as, ag, bs, and bg must all be numeric and of length 1") } rrum_helper(Y, Q, delta0, chain_length, as, bs, ag, bg) }
plotFsiland=function(res,land,data) { requireNamespace("ggplot2") requireNamespace("ggforce") namesland=colnames(res$landcontri) landraster=landtoraster(land,landname=namesland ,wd=res$wd) lx=range(unlist(lapply(landraster,function(x){range(x[,"X"])}))) ly=range(unlist(lapply(landraster,function(x){range(x[,"Y"])}))) pp=c("X","Y") Area=Fsiland.quantile(res,c(0.5,0.95)) MR=max(Area) Area=rbind(0,Area) ldata=NULL DA=NULL xx=lx[2]+MR yy=ly[1] for(i in 1:length(landraster)) { ldata=rbind(ldata,data.frame(landraster[[i]][,pp],LandVar=names(landraster)[i])) yy=yy+Area[i,2]+Area[i+1,2]+0.1*(ly[2]-ly[1]) DA=rbind(DA,data.frame(x=xx,y=yy,r=Area[i+1,],LandVar=names(landraster)[i],Type=as.factor(c(0.5,0.95)))) } cbPalette <- c( " g=ggplot(ldata,aes_string(x="X",y="Y",color="LandVar"))+ geom_rect(aes(xmin =lx[1],xmax= lx[2],ymin=ly[1],ymax=ly[2]),fill="grey95",inherit.aes = FALSE)+geom_point( size=0.4)+ scale_fill_manual(values=cbPalette)+scale_color_manual(values=cbPalette)+ theme_classic()+theme(axis.title=element_blank(),legend.title=element_blank(),legend.position="bottom")+ geom_point(data=data,aes_string(x="X",y="Y"),color=1)+coord_fixed() g2=g+geom_circle(data=DA,aes_string(x0="x", y0="y", r="r",color="LandVar",fill="LandVar",linetype="Type",alpha=0.5),inherit.aes = FALSE,show.legend = FALSE) g2 }
context("write_disk_path behavior") test_that("with crul", { skip_on_cran() skip_if_not_installed("vcr") library("vcr") dir <- tempdir() invisible(vcr_configure(dir = dir)) library(crul) f <- tempfile(fileext = ".json") webmockr_net_connect_allowed() expect_error( suppressWarnings(use_cassette("write_disk_path_not_set_crul_error", { out <- HttpClient$new("https://httpbin.org/get")$get(disk = f) })), "write_disk_path must be given" ) wdp <- file.path(dir, "files") invisible(vcr_configure(dir = dir, write_disk_path = wdp)) expect_error( use_cassette("write_disk_path_not_set_crul_noerror", { out <- HttpClient$new("https://httpbin.org/get")$get(disk = f) }), NA ) unlink(f) unlink(wdp, TRUE) unlink(file.path(dir, "write_disk_path_not_set_crul_error.yml")) unlink(file.path(dir, "write_disk_path_not_set_crul_noerror.yml")) webmockr_disable_net_connect() unloadNamespace("vcr") }) test_that("if relative path set its not expanded to full path anymore", { skip_on_cran() skip_if_not_installed("vcr") library("vcr") dir <- tempdir() f <- "stuff.json" wdp <- "../files" invisible(vcr_configure(dir = dir, write_disk_path = wdp)) og <- getwd() setwd(dir) on.exit(setwd(og)) expect_error( use_cassette("write_disk_path_is_relative", { out <- HttpClient$new("https://httpbin.org/get?foo=foo")$get(disk = f) }), NA ) txt <- readLines(file.path(dir, "write_disk_path_is_relative.yml")) expect_true(any(grepl("../files/stuff.json", txt))) unlink("stuff.json") webmockr_disable_net_connect() unloadNamespace("vcr") }) test_that("with httr", { skip_on_cran() skip_if_not_installed("vcr") library("vcr") enable() dir <- tempdir() invisible(vcr_configure(dir = dir)) library(httr) f <- tempfile(fileext = ".json") webmockr_net_connect_allowed() expect_error( suppressWarnings(use_cassette("write_disk_path_not_set_crul_error", { out <- GET("https://httpbin.org/get", write_disk(f)) })), "write_disk_path must be given" ) f <- tempfile(fileext = ".json") wdp <- file.path(dir, "files") invisible(vcr_configure(dir = dir, write_disk_path = wdp)) expect_error( use_cassette("write_disk_path_not_set_crul_noerror", { out <- GET("https://httpbin.org/get", write_disk(f)) }), NA ) unlink(f) unlink(wdp, TRUE) unlink(file.path(dir, "write_disk_path_not_set_crul_error.yml")) unlink(file.path(dir, "write_disk_path_not_set_crul_noerror.yml")) webmockr_disable_net_connect() unloadNamespace("vcr") }) test_that("if relative path set its not expanded to full path anymore: httr", { skip_on_cran() skip_if_not_installed("vcr") library("vcr") dir <- tempdir() f <- "stuff.json" wdp <- "../files" invisible(vcr_configure(dir = dir, write_disk_path = wdp)) og <- getwd() setwd(dir) on.exit(setwd(og)) expect_error( use_cassette("write_disk_path_is_relative", { out <- GET("https://httpbin.org/get?foo=foo", write_disk(f)) }), NA ) txt <- readLines(file.path(dir, "write_disk_path_is_relative.yml")) expect_true(any(grepl("../files/stuff.json", txt))) unlink("stuff.json") webmockr_disable_net_connect() unloadNamespace("vcr") })
library(dynbenchmark) library(tidyverse) experiment("01-datasets") qsub::rsync_remote( remote_src = TRUE, path_src = derived_file(remote = TRUE, experiment = "01-datasets"), remote_dest = FALSE, path_dest = derived_file(remote = FALSE, experiment = "01-datasets"), verbose = TRUE, compress = FALSE )
context("plotting") memory <- list( ReducedDimensionPlot( LegendPointSize = 2 ), ColumnDataPlot(), FeatureAssayPlot(), RowDataPlot(), SampleAssayPlot(), SampleAssayPlot(), SampleAssayPlot() ) pObjects <- mimic_live_app(sce, memory) sce <- iSEE:::.set_colormap(sce, ExperimentColorMap()) test_that(".make_redDimPlot/.scatter_plot produce a valid list",{ p.out <- .generateOutput(pObjects$memory$ReducedDimensionPlot1, sce, all_memory=pObjects$memory, all_contents=pObjects$contents) expect_type(p.out, "list") expect_named(p.out, c("commands", "contents", "plot", "varname")) expect_type(p.out$commands, "list") expect_true(all(vapply(p.out$commands, is.character, TRUE))) expect_s3_class(p.out$contents, "data.frame") expect_named(p.out$contents, c("X","Y")) expect_s3_class(p.out$plot, c("gg", "ggplot")) }) test_that(".make_redDimPlot/.scatter_plot produce a valid xy with color", { rdp <- pObjects$memory$ReducedDimensionPlot1 rdp[[iSEE:::.colorByField]] <- iSEE:::.colorByColDataTitle p.out <- .generateOutput(rdp, sce, all_memory=pObjects$memory, all_contents=pObjects$contents) expect_named(p.out$contents, c("X","Y","ColorBy")) }) test_that(".make_colDataPlot/.scatter_plot produce a valid list",{ cdp <- pObjects$memory$ColumnDataPlot1 cdp[[iSEE:::.colDataXAxis]] <- iSEE:::.colDataXAxisColDataTitle cdp[[iSEE:::.colDataXAxisColData]] <- "NREADS" cdp[[iSEE:::.colDataYAxis]] <- "NALIGNED" p.out <- .generateOutput(cdp, sce, all_memory=pObjects$memory, all_contents=pObjects$contents) expect_type(p.out, "list") expect_named(p.out, c("commands", "contents", "plot", "varname")) expect_type(p.out$commands, "list") expect_true(all(vapply(p.out$commands, is.character, TRUE))) expect_s3_class(p.out$contents, "data.frame") expect_named(p.out$contents, c("Y","X")) expect_s3_class(p.out$plot, c("gg", "ggplot")) }) test_that(".make_colDataPlot/.scatter_plot produce a valid xy with color", { cdp <- pObjects$memory$ColumnDataPlot1 cdp[[iSEE:::.colDataXAxis]] <- iSEE:::.colDataXAxisColDataTitle cdp[[iSEE:::.colDataXAxisColData]] <- "NREADS" cdp[[iSEE:::.colorByField]] <- iSEE:::.colorByColDataTitle p.out <- .generateOutput(cdp, sce, all_memory=pObjects$memory, all_contents=pObjects$contents) expect_named(p.out$contents, c('Y', 'X', 'ColorBy')) }) test_that(".make_colDataPlot/.violin_plot produce a valid list",{ p.out <- .generateOutput(pObjects$memory$ColumnDataPlot1, sce, all_memory=pObjects$memory, all_contents=pObjects$contents) expect_type(p.out, "list") expect_named(p.out, c("commands", "contents", "plot", "varname")) expect_type(p.out$commands, "list") expect_true(all(vapply(p.out$commands, is.character, TRUE))) expect_s3_class(p.out$contents, "data.frame") expect_named(p.out$contents, c("Y", "X", "GroupBy", "jitteredX")) expect_s3_class(p.out$plot, c("gg", "ggplot")) }) test_that(".make_colDataPlot/.violin_plot produce a valid xy with color", { cdp <- pObjects$memory$ColumnDataPlot1 cdp[[iSEE:::.colorByField]] <- iSEE:::.colorByColDataTitle p.out <- .generateOutput(cdp, sce, all_memory=pObjects$memory, all_contents=pObjects$contents) expect_named(p.out$contents, c("Y","X","ColorBy","GroupBy","jitteredX")) }) test_that(".make_colDataPlot/.square_plot produce a valid list",{ cdp <- pObjects$memory$ColumnDataPlot1 cdp[[iSEE:::.colDataXAxis]] <- iSEE:::.colDataXAxisColDataTitle cdp[[iSEE:::.colDataXAxisColData]] <- "driver_1_s" cdp[[iSEE:::.colDataYAxis]] <- "passes_qc_checks_s" p.out <- .generateOutput(cdp, sce, all_memory=pObjects$memory, all_contents=pObjects$contents) expect_type(p.out, "list") expect_named(p.out, c("commands", "contents", "plot", "varname")) expect_type(p.out$commands, "list") expect_true(all(vapply(p.out$commands, is.character, TRUE))) expect_s3_class(p.out$contents, "data.frame") expect_named(p.out$contents, c("Y","X","jitteredX","jitteredY")) expect_s3_class(p.out$plot, c("gg", "ggplot")) }) test_that(".make_colDataPlot/.square_plot produce a valid xy with color", { cdp <- pObjects$memory$ColumnDataPlot1 cdp[[iSEE:::.colDataXAxis]] <- iSEE:::.colDataXAxisColDataTitle cdp[[iSEE:::.colDataXAxisColData]] <- "driver_1_s" cdp[[iSEE:::.colDataYAxis]] <- "passes_qc_checks_s" cdp[[iSEE:::.colorByField]] <- iSEE:::.colorByColDataTitle p.out <- .generateOutput(cdp, sce, all_memory=pObjects$memory, all_contents=pObjects$contents) expect_named(p.out$contents, c("Y","X","ColorBy","jitteredX","jitteredY")) }) test_that(".make_rowDataPlot/.scatter_plot produce a valid list",{ rdp <- pObjects$memory$RowDataPlot rdp[[iSEE:::.rowDataXAxis]] <- iSEE:::.rowDataXAxisRowDataTitle rdp[[iSEE:::.rowDataXAxisRowData]] <- "num_cells" rdp[[iSEE:::.rowDataYAxis]] <- "mean_count" p.out <- .generateOutput(rdp, sce, all_memory=pObjects$memory, all_contents=pObjects$contents) expect_type(p.out, "list") expect_named(p.out, c("commands", "contents", "plot", "varname")) expect_type(p.out$commands, "list") expect_true(all(vapply(p.out$commands, is.character, TRUE))) expect_s3_class(p.out$contents, "data.frame") expect_named(p.out$contents, c("Y","X")) expect_s3_class(p.out$plot, c("gg", "ggplot")) }) test_that(".make_rowDataPlot/.violin_plot produce a valid xy with color", { rdp <- pObjects$memory$RowDataPlot rdp[[iSEE:::.rowDataXAxis]] <- iSEE:::.rowDataXAxisRowDataTitle rdp[[iSEE:::.rowDataXAxisRowData]] <- "num_cells" rdp[[iSEE:::.rowDataYAxis]] <- "mean_count" rdp[[iSEE:::.colorByField]] <- iSEE:::.colorByRowDataTitle p.out <- .generateOutput(rdp, sce, all_memory=pObjects$memory, all_contents=pObjects$contents) expect_named(p.out$contents, c('Y', 'X', 'ColorBy')) rdp[[iSEE:::.colorByField]] <- iSEE:::.colorByFeatNameTitle p.out <- .generateOutput(rdp, sce, all_memory=pObjects$memory, all_contents=pObjects$contents) expect_named(p.out$contents, c('Y', 'X', 'ColorBy')) }) test_that(".make_rowDataPlot/.violin_plot produce a valid list",{ rdp <- pObjects$memory$RowDataPlot p.out <- .generateOutput(rdp, sce, all_memory=pObjects$memory, all_contents=pObjects$contents) expect_type(p.out, "list") expect_named(p.out, c("commands", "contents", "plot", "varname")) expect_type(p.out$commands, "list") expect_true(all(vapply(p.out$commands, is.character, TRUE))) expect_s3_class(p.out$contents, "data.frame") expect_named(p.out$contents, c("Y", "X", "GroupBy", "jitteredX")) expect_s3_class(p.out$plot, c("gg", "ggplot")) }) test_that(".make_rowDataPlot/.violin_plot produce a valid xy with color", { rdp <- pObjects$memory$RowDataPlot rdp[[iSEE:::.colorByField]] <- iSEE:::.colorByRowDataTitle p.out <- .generateOutput(rdp, sce, all_memory=pObjects$memory, all_contents=pObjects$contents) expect_named(p.out$contents, c("Y","X","ColorBy","GroupBy","jitteredX")) rdp[[iSEE:::.colorByField]] <- iSEE:::.colorByFeatNameTitle p.out <- .generateOutput(rdp, sce, all_memory=pObjects$memory, all_contents=pObjects$contents) expect_named(p.out$contents, c("Y","X","ColorBy","GroupBy","jitteredX")) }) test_that(".make_rowDataPlot/.square_plot produce a valid list",{ rowData(sce)[, "LETTERS"] <- sample(LETTERS[1:3], nrow(sce), replace=TRUE) rdp <- pObjects$memory$RowDataPlot rdp[[iSEE:::.rowDataXAxis]] <- iSEE:::.rowDataXAxisRowDataTitle rdp[[iSEE:::.rowDataXAxisRowData]] <- "letters" rdp[[iSEE:::.rowDataYAxis]] <- "LETTERS" p.out <- .generateOutput(rdp, sce, all_memory=pObjects$memory, all_contents=pObjects$contents) expect_type(p.out, "list") expect_named(p.out, c("commands", "contents", "plot", "varname")) expect_type(p.out$commands, "list") expect_true(all(vapply(p.out$commands, is.character, TRUE))) expect_s3_class(p.out$contents, "data.frame") expect_named(p.out$contents, c('Y', 'X', 'jitteredX', 'jitteredY')) expect_s3_class(p.out$plot, c("gg", "ggplot")) }) test_that(".make_rowDataPlot/.square_plot produce a valid xy with color",{ rowData(sce)[, "LETTERS"] <- sample(LETTERS[1:3], nrow(sce), replace=TRUE) rdp <- pObjects$memory$RowDataPlot rdp[[iSEE:::.rowDataXAxis]] <- iSEE:::.rowDataXAxisRowDataTitle rdp[[iSEE:::.rowDataXAxisRowData]] <- "letters" rdp[[iSEE:::.rowDataYAxis]] <- "LETTERS" rdp[[iSEE:::.colorByField]] <- iSEE:::.colorByRowDataTitle p.out <- .generateOutput(rdp, sce, all_memory=pObjects$memory, all_contents=pObjects$contents) expect_type(p.out, "list") expect_named(p.out, c("commands", "contents", "plot", "varname")) expect_type(p.out$commands, "list") expect_true(all(vapply(p.out$commands, is.character, TRUE))) expect_s3_class(p.out$contents, "data.frame") expect_named(p.out$contents, c('Y', 'X', 'ColorBy', 'jitteredX', 'jitteredY')) expect_s3_class(p.out$plot, c("gg", "ggplot")) rdp[[iSEE:::.colorByField]] <- iSEE:::.colorByFeatNameTitle p.out <- .generateOutput(rdp, sce, all_memory=pObjects$memory, all_contents=pObjects$contents) expect_named(p.out$contents, c('Y', 'X', 'ColorBy', 'jitteredX', 'jitteredY')) }) test_that(".make_featAssayPlot/.violin_plot produce a valid list",{ fdp <- pObjects$memory$FeatureAssayPlot1 p.out <- .generateOutput(fdp, sce, all_memory=pObjects$memory, all_contents=pObjects$contents) expect_type(p.out, "list") expect_named(p.out, c("commands", "contents", "plot", "varname")) expect_type(p.out$commands, "list") expect_true(all(vapply(p.out$commands, is.character, TRUE))) expect_s3_class(p.out$contents, "data.frame") expect_named(p.out$contents, c("Y", "X", "GroupBy", "jitteredX")) expect_s3_class(p.out$plot, c("gg", "ggplot")) }) test_that(".make_featAssayPlot/.violin_plot produce a valid xy with color", { fdp <- pObjects$memory$FeatureAssayPlot1 fdp[[iSEE:::.colorByField]] <- iSEE:::.colorByColDataTitle p.out <- .generateOutput(fdp, sce, all_memory=pObjects$memory, all_contents=pObjects$contents) expect_named(p.out$contents, c("Y","X","ColorBy","GroupBy","jitteredX")) }) test_that(".make_featAssayPlot works for XAxis set to Column data", { fdp <- pObjects$memory$FeatureAssayPlot1 fdp[[iSEE:::.featAssayXAxis]] <- iSEE:::.featAssayXAxisColDataTitle fdp[[iSEE:::.featAssayXAxisColData]] <- "dissection_s" p.out <- .generateOutput(fdp, sce, all_memory=pObjects$memory, all_contents=pObjects$contents) expect_true(any(grepl("dissection_s", unlist(p.out$commands)))) }) test_that(".make_featAssayPlot works for XAxis set to a character feature name", { selected_gene <- "Lamp5" fdp <- pObjects$memory$FeatureAssayPlot1 fdp[[iSEE:::.featAssayXAxis]] <- iSEE:::.featAssayXAxisFeatNameTitle fdp[[iSEE:::.featAssayXAxisFeatName]] <- selected_gene p.out <- .generateOutput(fdp, sce, all_memory=pObjects$memory, all_contents=pObjects$contents) expect_true(any(grepl(selected_gene, unlist(p.out$commands)))) }) test_that(".make_featAssayPlot works for groupable colour covariate", { selected_coldata <- "dissection_s" fdp <- pObjects$memory$FeatureAssayPlot1 fdp[[iSEE:::.colorByField]] <- iSEE:::.colorByColDataTitle fdp[[iSEE:::.colorByColData]] <- selected_coldata p.out <- .generateOutput(fdp, sce, all_memory=pObjects$memory, all_contents=pObjects$contents) expect_true(any(grepl(selected_coldata, unlist(p.out$commands)))) expect_named(p.out$contents, c("Y", "X", "ColorBy", "GroupBy", "jitteredX")) }) test_that(".make_sampAssayPlot works with X covariate set to None", { sap <- pObjects$memory$SampleAssayPlot1 p.out <- .generateOutput(sap, sce, all_memory=pObjects$memory, all_contents=pObjects$contents) expect_type(p.out, "list") expect_named(p.out, c("commands", "contents", "plot", "varname")) expect_type(p.out$commands, "list") expect_true(all(vapply(p.out$commands, is.character, TRUE))) expect_s3_class(p.out$contents, "data.frame") expect_named(p.out$contents, c("Y", "X", "GroupBy", "jitteredX")) expect_true(all(p.out$contents$X=="")) expect_s3_class(p.out$plot, c("gg", "ggplot")) }) test_that(".make_sampAssayPlot works with X variable set to Row data", { selected_rowdata <- "num_cells" sap <- pObjects$memory$SampleAssayPlot1 sap[[iSEE:::.rowDataXAxis]] <- iSEE:::.sampAssayXAxisRowDataTitle sap[[iSEE:::.rowDataXAxisRowData]] <- selected_rowdata p.out <- .generateOutput(sap, sce, all_memory=pObjects$memory, all_contents=pObjects$contents) expect_true(any(grepl(selected_rowdata, unlist(p.out$commands)))) }) test_that(".make_sampAssayPlot works with X variable set to Sample name", { selected_sample <- colnames(sce)[2] sap <- pObjects$memory$SampleAssayPlot1 sap[[iSEE:::.rowDataXAxis]] <- iSEE:::.sampAssayXAxisSampNameTitle sap[[iSEE:::.sampAssayXAxisSampName]] <- selected_sample p.out <- .generateOutput(sap, sce, all_memory=pObjects$memory, all_contents=pObjects$contents) expect_true(any(grepl(selected_sample, unlist(p.out$commands)))) }) test_that(".make_colDataPlot/.create_plot can produce horizontal violins", { selected_coldataX <- "NREADS" selected_coldataY <- "driver_1_s" cdp <- pObjects$memory$ColumnDataPlot cdp[[iSEE:::.colDataXAxis]] <- iSEE:::.colorByColDataTitle cdp1 <- cdp cdp1[[iSEE:::.colDataXAxisColData]] <- selected_coldataX cdp1[[iSEE:::.colDataYAxis]] <- selected_coldataY cdp2 <- cdp cdp2[[iSEE:::.colDataXAxisColData]] <- selected_coldataY cdp2[[iSEE:::.colDataYAxis]] <- selected_coldataX p.out1 <- .generateOutput(cdp1, sce, all_memory=pObjects$memory, all_contents=pObjects$contents) p.out2 <- .generateOutput(cdp2, sce, all_memory=pObjects$memory, all_contents=pObjects$contents) expect_identical(p.out1$contents, p.out2$contents) expect_true(any(grepl("coord_flip", unlist(p.out1$commands)))) expect_false(any(grepl("coord_flip", unlist(p.out2$commands)))) }) test_that(".scatter_plot works with zoom",{ params <- pObjects$memory$ReducedDimensionPlot1 ref <- .generateOutput(params, sce, all_memory=pObjects$memory, all_contents=pObjects$contents) rd <- reducedDim(sce, params[[iSEE:::.redDimType]]) x_range <- range(head(rd[, params[[iSEE:::.redDimXAxis]]]), 10) y_range <- range(head(rd[, params[[iSEE:::.redDimYAxis]]]), 10) zoom_range <- c(x_range, y_range) names(zoom_range) <- c("xmin","xmax","ymin","ymax") params[[iSEE:::.zoomData]] <- zoom_range p.out <- .generateOutput(params, sce, all_memory=pObjects$memory, all_contents=pObjects$contents) expect_identical(p.out$contents, ref$contents) expect_true(any(grepl("coord_cartesian.*xmin.*xmax", unlist(p.out$commands)))) expect_false(any(grepl("coord_cartesian.*xmin.*xmax", unlist(ref$commands)))) }) test_that(".make_colDataPlot/.violin_plot works with zoom",{ cdp <- pObjects$memory$ColumnDataPlot1 cdp[[iSEE:::.colDataXAxis]] <- iSEE:::.colDataXAxisColDataTitle chosen_x <- "driver_1_s" cdp[[iSEE:::.colDataXAxisColData]] <- chosen_x ref <- .generateOutput(cdp, sce, all_memory=pObjects$memory, all_contents=pObjects$contents) x_unique <- unique(as.numeric(as.factor(colData(sce)[,chosen_x]))) chosen_y <- cdp[[iSEE:::.colDataYAxis]] y_range <- range(head(colData(sce)[,chosen_y], 10)) zoom_range <- c(sort(head(x_unique, 2)), y_range) zoom_range <- zoom_range + c(-0.5, 0.5, 0, 0) names(zoom_range) <- c("xmin","xmax","ymin","ymax") cdp[[iSEE:::.zoomData]] <- zoom_range p.out <- .generateOutput(cdp, sce, all_memory=pObjects$memory, all_contents=pObjects$contents) expect_identical(p.out$contents, ref$contents) expect_true(any(grepl("coord_cartesian.*xmin.*xmax", unlist(p.out$commands)))) expect_false(any(grepl("coord_cartesian.*xmin.*xmax", unlist(ref$commands)))) }) test_that(".make_colDataPlot/.violin_plot works with zoom",{ cdp <- pObjects$memory$ColumnDataPlot1 cdp[[iSEE:::.colDataXAxis]] <- iSEE:::.colDataXAxisColDataTitle chosen_x <- "NREADS" cdp[[iSEE:::.colDataXAxisColData]] <- chosen_x chosen_y <- "driver_1_s" cdp[[iSEE:::.colDataYAxis]] <- chosen_y ref <- .generateOutput(cdp, sce, all_memory=pObjects$memory, all_contents=pObjects$contents) x_range <- range(head(colData(sce)[,chosen_x], 10)) y_unique <- unique(as.numeric(as.factor(colData(sce)[,chosen_y]))) zoom_range <- c(x_range, sort(head(y_unique, 2))) zoom_range <- zoom_range + c(0, 0, -0.5, 0.5) names(zoom_range) <- c("xmin","xmax","ymin","ymax") cdp[[iSEE:::.zoomData]] <- zoom_range p.out <- .generateOutput(cdp, sce, all_memory=pObjects$memory, all_contents=pObjects$contents) expect_identical(p.out$contents, ref$contents) expect_true(any(grepl("coord_flip.*xmin.*xmax", unlist(p.out$commands)))) expect_false(any(grepl("coord_flip.*xmin.*xmax", unlist(ref$commands)))) }) test_that(".make_colDataPlot/.square_plot works with zoom",{ cdp <- pObjects$memory$ColumnDataPlot1 cdp[[iSEE:::.colDataXAxis]] <- iSEE:::.colDataXAxisColDataTitle chosen_x <- "passes_qc_checks_s" cdp[[iSEE:::.colDataXAxisColData]] <- chosen_x chosen_y <- "driver_1_s" cdp[[iSEE:::.colDataYAxis]] <- chosen_y ref <- .generateOutput(cdp, sce, all_memory=pObjects$memory, all_contents=pObjects$contents) x_unique <- unique(as.numeric(as.factor(colData(sce)[,chosen_x]))) y_unique <- unique(as.numeric(as.factor(colData(sce)[,chosen_y]))) zoom_range <- c( sort(head(x_unique, 2)), sort(head(y_unique, 2)) ) zoom_range <- zoom_range + rep(c(-0.5, 0.5), times=2) names(zoom_range) <- c("xmin","xmax","ymin","ymax") cdp[[iSEE:::.zoomData]] <- zoom_range p.out <- .generateOutput(cdp, sce, all_memory=pObjects$memory, all_contents=pObjects$contents) expect_identical(p.out$contents, ref$contents) expect_true(any(grepl("coord_cartesian.*xmin.*xmax", unlist(p.out$commands)))) expect_false(any(grepl("coord_cartesian.*xmin.*xmax", unlist(ref$commands)))) }) test_that("define_shapeby_for_column_plot produces the expected commands", { params <- pObjects$memory$ReducedDimensionPlot1 params[[iSEE:::.shapeByField]] <- iSEE:::.shapeByColDataTitle params[[iSEE:::.shapeByColData]] <- "driver_1_s" env <- new.env() env$se <- sce .generateDotPlotData(params, env) shape_out <- iSEE:::.addDotPlotDataShape(params, env) expect_true(!is.null(env$plot.data$ShapeBy)) expect_identical(shape_out$labels$ShapeBy, "driver_1_s") expect_match(shape_out$commands, "driver_1_s", fixed=TRUE) }) test_that(".define_shapeby_for_row_plot produces the expected commands", { params <- pObjects$memory$RowDataPlot1 params[[iSEE:::.shapeByField]] <- iSEE:::.shapeByRowDataTitle params[[iSEE:::.shapeByRowData]] <- "letters" env <- new.env() env$se <- sce .generateDotPlotData(params, env) shape_out <- iSEE:::.addDotPlotDataShape(params, env) expect_true(!is.null(env$plot.data$ShapeBy)) expect_identical(shape_out$labels$ShapeBy, "letters") expect_match(shape_out$commands, "letters", fixed=TRUE) }) test_that("define_sizeby_for_column_plot produces the expected commands", { params <- pObjects$memory$ReducedDimensionPlot1 params[[iSEE:::.sizeByField]] <- iSEE:::.sizeByColDataTitle params[[iSEE:::.sizeByColData]] <- "NREADS" env <- new.env() env$se <- sce .generateDotPlotData(params, env) size_out <- iSEE:::.addDotPlotDataSize(params, env) expect_true(!is.null(env$plot.data$SizeBy)) expect_identical(size_out$labels$SizeBy, "NREADS") expect_match(size_out$commands, "NREADS", fixed=TRUE) }) test_that(".define_sizeby_for_row_plot produces the expected commands", { params <- pObjects$memory$RowDataPlot1 params[[iSEE:::.sizeByField]] <- iSEE:::.sizeByRowDataTitle params[[iSEE:::.sizeByRowData]] <- "mean_count" env <- new.env() env$se <- sce .generateDotPlotData(params, env) size_out <- iSEE:::.addDotPlotDataSize(params, env) expect_true(!is.null(env$plot.data$SizeBy)) expect_identical(size_out$labels$SizeBy, "mean_count") expect_match(size_out$commands, "mean_count", fixed=TRUE) }) test_that(".coerce_type handles various inputs correctly", { input_field <- "XYZ" expect_warning( lab_out <- iSEE:::.coerce_type(letters, input_field, max_levels=0), "covariate has too many unique values, coercing to numeric" ) expect_identical(lab_out, "plot.data$XYZ <- as.numeric(as.factor(plot.data$XYZ));") expect_warning( lab_out <- iSEE:::.coerce_type(factor(letters), input_field, max_levels=0), "covariate has too many unique values, coercing to numeric" ) expect_identical(lab_out, "plot.data$XYZ <- as.numeric(plot.data$XYZ);") lab_out <- iSEE:::.coerce_type(1:10, input_field) expect_identical(lab_out, NULL) lab_out <- iSEE:::.coerce_type(letters, input_field) expect_identical(lab_out, 'plot.data[["XYZ"]] <- factor(plot.data[["XYZ"]]);') lab_out <- iSEE:::.coerce_type(factor(letters), input_field) expect_identical(lab_out, NULL) }) test_that(".create_points handles selection effects", { all_memory <- pObjects$memory rdp <- all_memory$ReducedDimensionPlot1 fap <- all_memory$FeatureAssayPlot1 fap[[iSEE:::.selectColSource]] <- .getEncodedName(rdp) rd <- reducedDim(sce, rdp[[iSEE:::.redDimType]]) x_10 <- head(rd[, rdp[[iSEE:::.redDimXAxis]]], 10) y_10 <- head(rd[, rdp[[iSEE:::.redDimYAxis]]], 10) all_memory$ReducedDimensionPlot1[[iSEE:::.brushData]] <- list( xmin=min(x_10), xmax=max(x_10), ymin=min(y_10), ymax=max(y_10), direction="xy", mapping=list(x="X", y="Y"), brushId="dummy_brush", outputId="dummy_plot" ) out <- .generateOutput(fap, sce, all_memory=all_memory, all_contents=pObjects$contents) expect_true(!is.null(out$contents$SelectBy)) expect_true(any(grepl("geom_point.*SelectBy.*alpha", unlist(out$commands)))) fap[[iSEE:::.colorByField]] <- iSEE:::.colorByColSelectionsTitle fap[[iSEE:::.selectTransAlpha]] <- 1 out <- .generateOutput(fap, sce, all_memory=all_memory, all_contents=pObjects$contents) expect_true(!is.null(out$contents$SelectBy)) expect_false(any(grepl("geom_point.*SelectBy.*alpha", unlist(out$commands)))) expect_true(any(grepl("columnSelectionColorMap", unlist(out$commands)))) fap[[iSEE:::.selectColRestrict]] <- TRUE out <- .generateOutput(fap, sce, all_memory=all_memory, all_contents=pObjects$contents) expect_true(!is.null(out$contents$SelectBy)) expect_true(any(grepl("plot.data.all", unlist(out$commands)))) expect_true(any(grepl("subset.*SelectBy", unlist(out$commands)))) }) test_that(".create_points handles sizing effects", { all_memory <- pObjects$memory rdp <- all_memory$ReducedDimensionPlot1 rdp[[iSEE:::.sizeByField]] <- iSEE:::.sizeByColDataTitle out <- .generateOutput(rdp, sce, all_memory=all_memory, all_contents=pObjects$contents) expect_true(!is.null(out$contents$SizeBy)) expect_true(any(grepl("geom_point.*SizeBy.*alpha", unlist(out$commands)))) }) test_that(".self_brush_box draw multiple shiny brushes", { cdp <- pObjects$memory$ColumnDataPlot1 cdp[[iSEE:::.colDataXAxis]] <- iSEE:::.colDataXAxisColDataTitle cdp[[iSEE:::.colDataXAxisColData]] <- "NREADS" cdp[[iSEE:::.colDataYAxis]] <- "driver_1_s" brushHistory <- list( list(xmin=1, xmax=2, ymin=3, ymax=4), list(xmin=2, xmax=3, ymin=4, ymax=5) ) cdp[[iSEE:::.multiSelectHistory]] <- brushHistory out <- iSEE:::.self_select_boxes(cdp, flip=TRUE) expect_length(out, 2*length(brushHistory)) expect_type(out, "character") expect_match(out[1], "geom_rect", fixed=TRUE) expect_match(out[2], "geom_text", fixed=TRUE) expect_match(out[3], "geom_rect", fixed=TRUE) expect_match(out[4], "geom_text", fixed=TRUE) }) test_that(".self_brush_box can flip axes", { cdp <- pObjects$memory$ColumnDataPlot1 cdp[[iSEE:::.colDataXAxis]] <- iSEE:::.colDataXAxisColDataTitle cdp[[iSEE:::.colDataXAxisColData]] <- "NREADS" cdp[[iSEE:::.colDataYAxis]] <- "driver_1_s" brushData <- list(xmin=1, xmax=2, ymin=3, ymax=4) cdp[[iSEE:::.brushData]] <- brushData out <- iSEE:::.self_select_boxes(cdp, flip=TRUE) expect_match(out, "aes(xmin=ymin, xmax=ymax, ymin=xmin, ymax=xmax)", fixed=TRUE) }) test_that(".self_brush_box flip axes when faceting on both X and Y", { cdp <- pObjects$memory$ColumnDataPlot1 cdp[[iSEE:::.colDataXAxis]] <- iSEE:::.colDataXAxisColDataTitle cdp[[iSEE:::.colDataXAxisColData]] <- "NREADS" cdp[[iSEE:::.colDataYAxis]] <- "driver_1_s" cdp[[iSEE:::.facetRow]] <- "Column data" cdp[[iSEE:::.facetRowByColData]] <- "Core.Type" cdp[[iSEE:::.facetColumn]] <- "Column data" cdp[[iSEE:::.facetColumnByColData]] <- "passes_qc_checks_s" brushData <- list(xmin=1, xmax=2, ymin=3, ymax=4) cdp[[iSEE:::.brushData]] <- brushData out <- iSEE:::.self_select_boxes(cdp, flip=TRUE) expect_match( out, "list(FacetRow=all_active[['ColumnDataPlot1']][['panelvar2']], FacetColumn=all_active[['ColumnDataPlot1']][['panelvar1']])", fixed=TRUE) }) test_that(".self_lasso_path work with a single point", { rdp <- pObjects$memory$ReducedDimensionPlot1 rd <- reducedDim(sce, rdp[[iSEE:::.redDimType]]) x_10 <- head(rd[, rdp[[iSEE:::.redDimXAxis]]], 10) y_10 <- head(rd[, rdp[[iSEE:::.redDimYAxis]]], 10) new_lasso <- list(lasso=NULL, closed=FALSE, panelvar1=NULL, panelvar2=NULL, mapping=list(x="X", y="Y")) new_lasso$coord <- matrix( data=c( min(x_10), min(y_10) ), ncol=2, byrow=TRUE ) rdp[[iSEE:::.brushData]] <- new_lasso lasso_cmd <- iSEE:::.self_select_boxes(rdp, flip=FALSE) expect_match(lasso_cmd, "geom_point", fixed=TRUE) }) test_that(".self_lasso_path work with an open path", { rdp <- pObjects$memory$ReducedDimensionPlot1 rd <- reducedDim(sce, rdp[[iSEE:::.redDimType]]) x_10 <- head(rd[, rdp[[iSEE:::.redDimXAxis]]], 10) y_10 <- head(rd[, rdp[[iSEE:::.redDimYAxis]]], 10) new_lasso <- list(lasso=NULL, closed=FALSE, panelvar1=NULL, panelvar2=NULL, mapping=list(x="X", y="Y")) new_lasso$coord <- matrix( data=c( min(x_10), min(y_10), max(x_10), min(y_10), max(x_10), max(y_10) ), ncol=2, byrow=TRUE ) rdp[[iSEE:::.brushData]] <- new_lasso lasso_cmd <- iSEE:::.self_select_boxes(rdp, flip=FALSE) expect_match(lasso_cmd[1], "geom_path", fixed=TRUE) expect_match(lasso_cmd[2], "geom_point", fixed=TRUE) expect_identical(lasso_cmd[3], "scale_shape_manual(values=c('TRUE'=22, 'FALSE'=20))") expect_identical(lasso_cmd[4], "guides(shape='none')") }) test_that(".self_lasso_path work with an open path and a ShapeBy covariate", { rdp <- pObjects$memory$ReducedDimensionPlot1 rdp[[iSEE:::.shapeByField]] <- iSEE:::.shapeByColDataTitle rd <- reducedDim(sce, rdp[[iSEE:::.redDimType]]) x_10 <- head(rd[, rdp[[iSEE:::.redDimXAxis]]], 10) y_10 <- head(rd[, rdp[[iSEE:::.redDimYAxis]]], 10) new_lasso <- list(lasso=NULL, closed=FALSE, panelvar1=NULL, panelvar2=NULL, mapping=list(x="X", y="Y")) new_lasso$coord <- matrix( data=c( min(x_10), min(y_10), max(x_10), min(y_10), max(x_10), max(y_10) ), ncol=2, byrow=TRUE ) rdp[[iSEE:::.brushData]] <- new_lasso lasso_cmd <- iSEE:::.self_select_boxes(rdp, flip=FALSE) expect_match(lasso_cmd[1], "geom_path", fixed=TRUE) expect_match(lasso_cmd[2], "geom_point", fixed=TRUE) expect_identical(lasso_cmd[3], "scale_size_manual(values=c('TRUE'=1.5, 'FALSE'=0.25))") expect_identical(lasso_cmd[4], "guides(size='none')") }) test_that(".self_lasso_path work with a closed path", { rdp <- pObjects$memory$ReducedDimensionPlot1 rd <- reducedDim(sce, rdp[[iSEE:::.redDimType]]) x_10 <- head(rd[, rdp[[iSEE:::.redDimXAxis]]], 10) y_10 <- head(rd[, rdp[[iSEE:::.redDimYAxis]]], 10) new_lasso <- list(lasso=NULL, closed=TRUE, panelvar1=NULL, panelvar2=NULL, mapping=list(x="X", y="Y")) new_lasso$coord <- matrix( data=c( min(x_10), min(y_10), max(x_10), min(y_10), max(x_10), max(y_10), min(x_10), max(y_10), min(x_10), min(y_10) ), ncol=2, byrow=TRUE ) rdp[[iSEE:::.brushData]] <- new_lasso lasso_cmd <- iSEE:::.self_select_boxes(rdp, flip=FALSE) expect_match(lasso_cmd[1], "geom_polygon", fixed=TRUE) }) test_that(".self_lasso_path works with multiple lassos", { cdp <- pObjects$memory$ColumnDataPlot cdp[[iSEE:::.colDataXAxis]] <- iSEE:::.colDataXAxisColDataTitle cdp[[iSEE:::.colDataXAxisColData]] <- "NREADS" cdp[[iSEE:::.colDataYAxis]] <- "driver_1_s" LASSO_CLOSED <- list( lasso=NULL, closed=TRUE, panelvar1=NULL, panelvar2=NULL, mapping=list(x="X", y="Y"), coord=matrix(c(1, 2, 2, 1, 1, 1, 1, 2, 2, 1), ncol=2)) lassoHistory <- list(LASSO_CLOSED, LASSO_CLOSED) cdp[[iSEE:::.multiSelectHistory]] <- lassoHistory lasso_cmd <- iSEE:::.self_select_boxes(cdp, flip=FALSE) expect_type(lasso_cmd, "character") expect_length(lasso_cmd, 2*length(lassoHistory)) expect_match(lasso_cmd[1], "geom_polygon", fixed=TRUE) expect_match(lasso_cmd[2], "geom_text", fixed=TRUE) }) test_that(".self_lasso_path flip axes when faceting on both X and Y", { cdp <- pObjects$memory$ColumnDataPlot1 cdp[[iSEE:::.colDataXAxis]] <- iSEE:::.colDataXAxisColDataTitle cdp[[iSEE:::.colDataXAxisColData]] <- "NREADS" cdp[[iSEE:::.colDataYAxis]] <- "driver_1_s" cdp[[iSEE:::.facetRow]] <- "Column data" cdp[[iSEE:::.facetRowByColData]] <- "Core.Type" cdp[[iSEE:::.facetColumn]] <- "Column data" cdp[[iSEE:::.facetColumnByColData]] <- "passes_qc_checks_s" LASSO_CLOSED <- list( lasso=NULL, closed=TRUE, panelvar1=NULL, panelvar2=NULL, mapping=list(x="X", y="Y"), coord=matrix(c(1, 2, 2, 1, 1, 1, 1, 2, 2, 1), ncol=2)) cdp[[iSEE:::.brushData]] <- LASSO_CLOSED lasso_cmd <- iSEE:::.self_select_boxes(cdp, flip=FALSE) expect_match( lasso_cmd, "FacetRow=all_active[['ColumnDataPlot1']][['panelvar2']], FacetColumn=all_active[['ColumnDataPlot1']][['panelvar1']]", fixed=TRUE) }) test_that(".addFacets works correctly plots", { params <- pObjects$memory$ReducedDimensionPlot1 out <- iSEE:::.addFacets(params) expect_null(out) params[["FacetRowBy"]] <- "Column data" params[["FacetRowByColData"]] <- "driver_1_s" params[["FacetColumnBy"]] <- "Column data" params[["FacetColumnByColData"]] <- "Core.Type" out <- iSEE:::.addFacets(params) expect_identical(out, "facet_grid(FacetRow ~ FacetColumn)") params <- pObjects$memory$RowDataPlot1 out <- iSEE:::.addFacets(params) expect_null(out) params[["FacetRowBy"]] <- "Row data" params[["FacetRowByRowData"]] <- "letters" out <- iSEE:::.addFacets(params) expect_identical(out, "facet_grid(FacetRow ~ .)") params[["FacetRowBy"]] <- "None" params[["FacetColumnBy"]] <- "Row data" params[["FacetColumnByRowData"]] <- "letters" out <- iSEE:::.addFacets(params) expect_identical(out, "facet_grid(. ~ FacetColumn)") }) test_that(".choose_plot_type flips both full and restricted plot.data for horizontal violins", { plot.data <- data.frame(X=runif(10), Y=factor(letters[1:10])) envir <- new.env() assign("plot.data", plot.data, envir=envir) assign("plot.data.all", plot.data, envir=envir) out <- iSEE:::.choose_plot_type(envir=envir) expect_identical(envir$plot.data$X, plot.data$Y) expect_identical(envir$plot.data$Y, plot.data$X) expect_identical(envir$plot.data.all$X, plot.data$Y) expect_identical(envir$plot.data.all$Y, plot.data$X) }) test_that("Jitter is properly performed for faceted plots", { plot.data <- data.frame(Y=runif(10), X=factor(letters[1:10]), FacetRow=factor(letters[1:10]), FacetColumn=factor(LETTERS[1:10])) out <- iSEE:::.violin_setup(plot_data=plot.data, horizontal=FALSE) expect_match(out[3], "jitterViolinPoints") expect_match(out[3], "FacetRow") expect_match(out[3], "FacetColumn") plot.data <- data.frame(Y=factor(letters[1:10]), X=factor(letters[1:10]), FacetRow=factor(letters[1:10]), FacetColumn=factor(LETTERS[1:10])) out <- iSEE:::.square_setup(plot_data=plot.data) expect_match(out, "jitterSquarePoints") expect_match(out, "FacetRow") expect_match(out, "FacetColumn") }) test_that(".downsample_points produces the appropriate code for scatter plots", { rdp <- pObjects$memory$ReducedDimensionPlot1 sce <- .cacheCommonInfo(rdp, sce) rdp <- .refineParameters(rdp, sce) ref <- .generateOutput(rdp, sce, all_memory=all_memory, all_contents=pObjects$contents) expect_false(any(grepl("subsetPointsByGrid", unlist(ref$commands)))) expect_false(any(grepl("plot.data.pre", unlist(ref$commands)))) rdp[[iSEE:::.plotPointDownsample]] <- TRUE out <- .generateOutput(rdp, sce, all_memory=all_memory, all_contents=pObjects$contents) expect_true(any(grepl("subsetPointsByGrid.*X.*Y", unlist(out$commands)))) expect_true(any(grepl("plot.data.pre", unlist(out$commands)))) }) test_that(".downsample_points produces the appropriate code for square plots", { cdp <- pObjects$memory$ColumnDataPlot1 cdp[[iSEE:::.colDataXAxis]] <- iSEE:::.colDataXAxisColDataTitle cdp[[iSEE:::.colDataXAxisColData]] <- "driver_1_s" cdp[[iSEE:::.colDataYAxis]] <- "passes_qc_checks_s" ref <- .generateOutput(cdp, sce, all_memory=all_memory, all_contents=pObjects$contents) expect_false(any(grepl("subsetPointsByGrid", unlist(ref$commands)))) expect_false(any(grepl("plot.data.pre", unlist(ref$commands)))) cdp[[iSEE:::.plotPointDownsample]] <- TRUE out <- .generateOutput(cdp, sce, all_memory=all_memory, all_contents=pObjects$contents) expect_true(any(grepl("subsetPointsByGrid.*jitteredX.*jitteredY", unlist(out$commands)))) expect_true(any(grepl("plot.data.pre", unlist(out$commands)))) }) test_that(".downsample_points produces the appropriate code for violin plots", { cdp <- pObjects$memory$ColumnDataPlot1 cdp[[iSEE:::.colDataXAxis]] <- iSEE:::.colDataXAxisColDataTitle cdp[[iSEE:::.colDataXAxisColData]] <- "passes_qc_checks_s" cdp[[iSEE:::.colDataYAxis]] <- "NREADS" ref <- .generateOutput(cdp, sce, all_memory=all_memory, all_contents=pObjects$contents) expect_false(any(grepl("subsetPointsByGrid", unlist(ref$commands)))) expect_false(any(grepl("plot.data.pre", unlist(ref$commands)))) cdp[[iSEE:::.plotPointDownsample]] <- TRUE out <- .generateOutput(cdp, sce, all_memory=all_memory, all_contents=pObjects$contents) expect_true(any(grepl("subsetPointsByGrid.*jitteredX", unlist(out$commands)))) expect_true(any(grepl("plot.data.pre", unlist(out$commands)))) }) test_that(".downsample_points produces the appropriate code for horizontal violin plots", { cdp <- pObjects$memory$ColumnDataPlot1 cdp[[iSEE:::.colDataXAxis]] <- iSEE:::.colDataXAxisColDataTitle cdp[[iSEE:::.colDataXAxisColData]] <- "NREADS" cdp[[iSEE:::.colDataYAxis]] <- "passes_qc_checks_s" ref <- .generateOutput(cdp, sce, all_memory=all_memory, all_contents=pObjects$contents) expect_false(any(grepl("subsetPointsByGrid", unlist(ref$commands)))) expect_false(any(grepl("plot.data.pre", unlist(ref$commands)))) cdp[[iSEE:::.plotPointDownsample]] <- TRUE out <- .generateOutput(cdp, sce, all_memory=all_memory, all_contents=pObjects$contents) expect_true(any(grepl("subsetPointsByGrid.*jitteredX", unlist(out$commands)))) expect_true(any(grepl("plot.data.pre", unlist(out$commands)))) }) test_that(".downsample_points interacts correctly with selection of a specific sample/feature", { rdp <- pObjects$memory$ReducedDimensionPlot1 rdp[[iSEE:::.colorByField]] <- iSEE:::.colorBySampNameTitle rdp[[iSEE:::.plotPointDownsample]] <- TRUE sce <- .cacheCommonInfo(rdp, sce) rdp <- .refineParameters(rdp, sce) out <- .generateOutput(rdp, sce, all_memory=all_memory, all_contents=pObjects$contents) expect_true(any(grepl(".subsetted | as.logical(plot.data$ColorBy)", unlist(out$commands), fixed=TRUE))) }) setClass("ColumnDataPlotPrioritized", contains="ColumnDataPlot") setMethod(".prioritizeDotPlotData", "ColumnDataPlotPrioritized", function(x, envir) { cmds <- c( ".priority <- rep(letters[1:5], length.out=ncol(se));", ".priority <- factor(.priority, ordered=TRUE);", ".rescaled <- c(a=1, b=0.5, c=2, d=3, e=1);" ) eval(parse(text=cmds), envir=envir) list(commands=cmds, rescaled=TRUE) }) test_that(".generateDotPlot responds to priority", { cdp <- pObjects$memory$ColumnDataPlot1 cdp[[iSEE:::.colDataXAxis]] <- iSEE:::.colDataXAxisColDataTitle cdp[[iSEE:::.colDataXAxisColData]] <- "driver_1_s" cdp[[iSEE:::.colDataYAxis]] <- "passes_qc_checks_s" ref <- .generateOutput(cdp, sce, all_memory=all_memory, all_contents=pObjects$contents) expect_false(any(grepl('plot.data\\[order\\(.priority\\)', unlist(ref$commands)))) cdpp <- as(cdp, "ColumnDataPlotPrioritized") out <- .generateOutput(cdpp, sce, all_memory=all_memory, all_contents=pObjects$contents) expect_true(any(grepl('plot.data\\[order\\(.priority\\)', unlist(out$commands)))) expect_identical(out$contents, ref$contents) expect_identical(sort(rownames(out$plot$data)), sort(rownames(ref$plot$data))) expect_false(identical(out$plot$data, ref$plot$data)) }) test_that(".downsample_points responds to priority", { cdp <- pObjects$memory$ColumnDataPlot1 cdp[[iSEE:::.colDataXAxis]] <- iSEE:::.colDataXAxisColDataTitle cdp[[iSEE:::.colDataXAxisColData]] <- "driver_1_s" cdp[[iSEE:::.colDataYAxis]] <- "passes_qc_checks_s" cdp[[iSEE:::.plotPointDownsample]] <- TRUE cdp[[iSEE:::.plotPointSampleRes]] <- 50 ref <- .generateOutput(cdp, sce, all_memory=all_memory, all_contents=pObjects$contents) expect_false(any(grepl('grouping=\\.priority', unlist(ref$commands)))) expect_false(any(grepl('resolution=50\\*\\.rescaled', unlist(ref$commands)))) cdpp <- as(cdp, "ColumnDataPlotPrioritized") out <- .generateOutput(cdpp, sce, all_memory=all_memory, all_contents=pObjects$contents) expect_true(any(grepl('grouping=\\.priority', unlist(out$commands)))) expect_true(any(grepl('resolution=50\\*\\.rescaled', unlist(out$commands)))) expect_identical(out$contents, ref$contents) expect_false(identical(out$plot$data, ref$plot$data)) }) test_that(".create_plot can add faceting commands", { rdp <- pObjects$memory$ReducedDimensionPlot1 rdp[["FacetColumnBy"]] <- "Column data" rdp[["FacetColumnByColData"]] <- "driver_1_s" out <- .generateOutput(rdp, sce, all_memory=all_memory, all_contents=pObjects$contents) expect_true(any(grepl("facet_grid(. ~ FacetColumn)", out$commands$plot, fixed=TRUE))) }) test_that("2d density contours can be added to scatter plots ", { rdp <- pObjects$memory$ReducedDimensionPlot1 rdp[[iSEE:::.contourAdd]] <- TRUE out <- .generateOutput(rdp, sce, all_memory=all_memory, all_contents=pObjects$contents) expect_true(any(grepl("geom_density_2d", out$commands$plot, fixed=TRUE))) }) test_that("plots subsetted to no data contain a geom_blank command", { geom_blank_cmd <- "geom_blank(data=plot.data.all, inherit.aes=FALSE, aes(x=X, y=Y)) +" out <- iSEE:::.scatter_plot( plot_data=data.frame(), param_choices=pObjects$memory$ReducedDimensionPlot1, "x_lab", "y_lab", "color_lab", "shape_lab", "size_lab", "title", by_row=FALSE, is_subsetted=TRUE, is_downsampled=FALSE) expect_identical(out[["select_blank"]], geom_blank_cmd) cdp <- pObjects$memory$ColumnDataPlot1 cdp[[iSEE:::.colDataXAxis]] <- iSEE:::.colDataXAxisColDataTitle cdp[[iSEE:::.colDataXAxisColData]] <- "driver_1_s" out <- iSEE:::.violin_plot( plot_data=data.frame(), param_choices=cdp, "x_lab", "y_lab", "color_lab", "shape_lab", "size_lab", "title", by_row=FALSE, is_subsetted=TRUE, is_downsampled=FALSE) expect_identical(out[["select_blank"]], geom_blank_cmd) cdp[[iSEE:::.colDataYAxis]] <- "dissection_s" out <- iSEE:::.square_plot( plot_data=data.frame(), param_choices=cdp, "x_lab", "y_lab", "color_lab", "shape_lab", "size_lab", "title", by_row=FALSE, is_subsetted=TRUE) expect_identical(out[["select_blank"]], geom_blank_cmd) }) test_that(".buildLabs returns NULL for NULL inputs", { expect_null(iSEE:::.buildLabs()) }) test_that(".add_selectby_column handles NAs correctly", { rdp <- pObjects$memory$ReducedDimensionPlot1 env <- new.env() env$plot.data <- data.frame(X=1, Y=2, FacetRow=1, FacetColumn=2) out <- iSEE:::.add_selectby_column(rdp, env) expect_false(any(grepl("subset.*is.na", unlist(out)))) expect_identical(nrow(env$plot.data), 1L) env$plot.data <- data.frame(X=1, Y=NA_real_) out <- iSEE:::.add_selectby_column(rdp, env) expect_true(any(grepl("subset.*is.na", unlist(out)))) expect_identical(nrow(env$plot.data), 0L) env$plot.data <- data.frame(X=1, Y=1, FacetRow=NA_real_) out <- iSEE:::.add_selectby_column(rdp, env) expect_true(any(grepl("subset.*is.na", unlist(out)))) expect_identical(nrow(env$plot.data), 0L) }) test_that(".create_guides_command produces a command when expected", { x <- ReducedDimensionPlot(PointSize = 1, LegendPointSize = 2) out <- iSEE:::.create_guides_command(x, factor(sce$driver_1_s)) expect_identical( out, "guides(colour = guide_legend(override.aes = list(size=2)), fill = guide_legend(override.aes = list(size=2))) +" ) x <- ReducedDimensionPlot(LegendPointSize = 2, PointSize = 2) out <- iSEE:::.create_guides_command(x, factor(sce$driver_1_s)) expect_null(out) x <- ReducedDimensionPlot(PointSize = 1, LegendPointSize = 2) out <- iSEE:::.create_guides_command(x, sce$NREADS) expect_null(out) }) test_that(".generateDotPlot handles custom labels", { rdp <- pObjects$memory$ReducedDimensionPlot1 rdp[[iSEE:::.plotCustomLabels]] <- TRUE cn <- colnames(sce)[1:3] rdp[[iSEE:::.plotCustomLabelsText]] <- paste0(cn, collapse = "\n") p.out <- .generateOutput(rdp, sce, all_memory=pObjects$memory, all_contents=pObjects$contents) expect_type(p.out, "list") expect_named(p.out, c("commands", "contents", "plot", "varname")) expect_type(p.out$commands, "list") expect_true(all(vapply(p.out$commands, is.character, TRUE))) expect_true(any(grepl("LabelBy", p.out$commands))) expect_s3_class(p.out$plot, c("gg", "ggplot")) }) test_that(".generateDotPlot handles centered labels", { rdp <- pObjects$memory$ReducedDimensionPlot1 rdp[[iSEE:::.plotLabelCenters]] <- TRUE p.out <- .generateOutput(rdp, sce, all_memory=pObjects$memory, all_contents=pObjects$contents) expect_true(any(grepl("\\.aggregated", p.out$commands))) expect_s3_class(p.out$plot, c("gg", "ggplot")) rdp[["FacetRowBy"]] <- "Column data" rdp[["FacetColumnBy"]] <- "Column data" rdp[["FacetColumnByColData"]] <- "dissection_s" rdp[["FacetRowByColData"]] <- "dissection_s" p.out <- .generateOutput(rdp, sce, all_memory=pObjects$memory, all_contents=pObjects$contents) expect_true(any(grepl("LabelCenters.*FacetRow", p.out$commands))) expect_true(any(grepl("LabelCenters.*FacetColumn", p.out$commands))) rdp <- pObjects$memory$RowDataPlot1 rdp[["XAxis"]] <- "Row data" rdp[[iSEE:::.plotLabelCenters]] <- TRUE p.out <- .generateOutput(rdp, sce, all_memory=pObjects$memory, all_contents=pObjects$contents) expect_true(any(grepl("\\.aggregated", p.out$commands))) })
"ccme_glyphosate"
siaf <- function (f, F, Fcircle, effRange, deriv, Deriv, simulate, npars, validpars = NULL) { npars <- as.integer(npars) if (length(npars) != 1 || npars < 0L) { stop("'siaf$npars' must be a single nonnegative number") } f <- .checknargs3(f, "siaf$f") F <- if (missing(F) || is.null(F)) siaf.fallback.F else { F <- match.fun(F) if (length(formals(F)) < 4L) stop("siaf$F() must accept >=4 arguments ", "(polydomain, f, pars, type)") F } haspars <- npars > 0L if (!haspars || missing(deriv)) deriv <- NULL if (!is.null(deriv)) deriv <- .checknargs3(deriv, "siaf$deriv") if (missing(effRange)) effRange <- NULL if (missing(Fcircle) || is.null(Fcircle)) { Fcircle <- NULL if (!is.null(effRange)) { message("'siaf$effRange' only works in conjunction with 'siaf$Fcircle'") effRange <- NULL } } if (!is.null(Fcircle)) Fcircle <- .checknargs3(Fcircle, "siaf$Fcircle") if (!is.null(effRange)) { effRange <- match.fun(effRange) if (length(formals(effRange)) < 1L) { stop("the 'siaf$effRange' function must accept a parameter vector") } } Deriv <- if (is.null(deriv)) NULL else if (missing(Deriv) || is.null(Deriv)) siaf.fallback.Deriv else { Deriv <- match.fun(Deriv) if (length(formals(Deriv)) < 4L) stop("siaf$Deriv() must accept >=4 arguments ", "(polydomain, deriv, pars, type)") Deriv } if (missing(simulate)) simulate <- NULL if (!is.null(simulate)) { simulate <- .checknargs3(simulate, "siaf$simulate") if (length(formals(simulate)) == 3L) formals(simulate) <- c(formals(simulate), alist(ub=)) } validpars <- if (!haspars || is.null(validpars)) NULL else match.fun(validpars) list(f = f, F = F, Fcircle = Fcircle, effRange = effRange, deriv = deriv, Deriv = Deriv, simulate = simulate, npars = npars, validpars = validpars) } siaf.constant <- function () { res <- list( f = as.function(c(alist(s=, pars=NULL, types=NULL), quote(rep.int(1, length(s)/2))), envir = .GlobalEnv), Fcircle = as.function(c(alist(r=, pars=NULL, type=NULL), quote(pi*r^2)), envir = .GlobalEnv), simulate = as.function(c(alist(n=, pars=NULL, type=NULL, ub=), quote(runifdisc(n, ub))), envir = getNamespace("surveillance")), npars = 0L ) attr(res, "constant") <- TRUE res } siaf.fallback.F <- function (polydomain, f, pars, type, method = "SV", ...) { if (identical(method,"SV")) { polyCub.SV(polyregion = polydomain, f = f, pars, type, alpha = 0, ...) } else { polyCub(polyregion = polydomain, f = f, method = method, pars, type, ...) } } getFcircle <- function (siaf, control.F = list()) { if (is.null(siaf$Fcircle)) { function (r, pars, type) { disc <- discpoly(c(0,0), r, npoly = 64, class = "owin") do.call(siaf$F, c(alist(disc, siaf$f, pars, type), control.F)) } } else { siaf$Fcircle } } siaf.fallback.Deriv <- function (polydomain, deriv, pars, type, method = "SV", ...) { deriv1 <- function (s, paridx) deriv(s, pars, type)[,paridx,drop=TRUE] intderiv1 <- function (paridx) polyCub(polyregion = polydomain, f = deriv1, method = method, paridx = paridx, ...) vapply(X = seq_along(pars), FUN = intderiv1, FUN.VALUE = 0, USE.NAMES = FALSE) } siaf.simulatePC <- function (intrfr) { as.function(c(alist(n=, siafpars=, type=, ub=), substitute({ stopifnot(is.finite(ub)) normconst <- intrfr(ub, siafpars, type) CDF <- function (q) intrfr(q, siafpars, type) / normconst QF <- function (p) uniroot(function(q) CDF(q)-p, lower=0, upper=ub)$root r <- vapply(X=runif(n), FUN=QF, FUN.VALUE=0, USE.NAMES=FALSE) theta <- runif(n, 0, 2*pi) r * cbind(cos(theta), sin(theta)) })), envir=parent.frame()) } checksiaf <- function (siaf, pargrid, type = 1, tolerance = 1e-5, method = "SV", ...) { stopifnot(is.list(siaf), is.numeric(pargrid), !is.na(pargrid), length(pargrid) > 0) pargrid <- as.matrix(pargrid) stopifnot(siaf$npars == ncol(pargrid)) if (!is.null(siaf$F)) { cat("'F' vs. cubature using method = \"", method ,"\" ... ", sep="") comp.F <- checksiaf.F(siaf$F, siaf$f, pargrid, type=type, method=method, ...) cat(attr(comp.F, "all.equal") <- all.equal(comp.F[,1], comp.F[,2], check.attributes=FALSE, tolerance=tolerance), "\n") } if (!is.null(siaf$Fcircle)) { cat("'Fcircle' vs. cubature using method = \"",method,"\" ... ", sep="") comp.Fcircle <- checksiaf.Fcircle(siaf$Fcircle, siaf$f, pargrid, type=type, method=method, ...) cat(attr(comp.Fcircle, "all.equal") <- all.equal(comp.Fcircle[,1], comp.Fcircle[,2], check.attributes=FALSE, tolerance=tolerance), "\n") } if (!is.null(siaf$deriv)) { cat("'deriv' vs. numerical derivative ... ") if (requireNamespace("maxLik", quietly=TRUE)) { maxRelDiffs.deriv <- checksiaf.deriv(siaf$deriv, siaf$f, pargrid, type=type) cat(attr(maxRelDiffs.deriv, "all.equal") <- if (any(maxRelDiffs.deriv > tolerance)) paste("maxRelDiff =", max(maxRelDiffs.deriv)) else TRUE, "\n") } else cat("Failed: need package", sQuote("maxLik"), "\n") } if (!is.null(siaf$Deriv)) { cat("'Deriv' vs. cubature using method = \"", method ,"\" ... ", sep="") comp.Deriv <- checksiaf.Deriv(siaf$Deriv, siaf$deriv, pargrid, type=type, method=method, ...) if (siaf$npars > 1) cat("\n") attr(comp.Deriv, "all.equal") <- sapply(seq_len(siaf$npars), function (j) { if (siaf$npars > 1) cat("\tsiaf parameter ", j, ": ", sep="") ae <- all.equal(comp.Deriv[,j], comp.Deriv[,siaf$npars+j], check.attributes=FALSE, tolerance=tolerance) cat(ae, "\n") ae }) } if (interactive() && !is.null(siaf$simulate)) { cat("Simulating ... ") checksiaf.simulate(siaf$simulate, siaf$f, pargrid[1,], type=type) cat("(-> check the plot)\n") } invisible(mget(c("comp.F", "comp.Fcircle", "maxRelDiffs.deriv", "comp.Deriv"), ifnotfound=list(NULL), inherits=FALSE)) } checksiaf.F <- function (F, f, pargrid, type=1, method="SV", ...) { res <- t(apply(pargrid, 1, function (pars) { given <- F(LETTERR, f, pars, type) num <- siaf.fallback.F(polydomain = LETTERR, f = f, pars = pars, type = type, method = method, ...) c(given, num) })) colnames(res) <- c("F", method) res } checksiaf.Fcircle <- function (Fcircle, f, pargrid, type=1, rs=c(1,5,10,50,100), method="SV", ...) { pargrid <- pargrid[rep(1:nrow(pargrid), each=length(rs)),,drop=FALSE] rpargrid <- cbind(rs, pargrid, deparse.level=0) res <- t(apply(rpargrid, 1, function (x) { disc <- discpoly(c(0,0), x[1L], npoly = 128, class = "owin") c(ana = Fcircle(x[1L], x[-1L], type), num = siaf.fallback.F(polydomain = disc, f = f, pars = x[-1L], type = type, method = method, ...)) })) res } checksiaf.deriv <- function (deriv, f, pargrid, type=1, rmax=100) { rgrid <- seq(-rmax,rmax,len=21) / sqrt(2) rgrid <- rgrid[rgrid != 0] sgrid <- cbind(rgrid, rgrid) apply(pargrid, 1, function (pars) { maxLik::compareDerivatives(f, deriv, t0=pars, s=sgrid, print=FALSE)$maxRelDiffGrad }) } checksiaf.Deriv <- function (Deriv, deriv, pargrid, type=1, method="SV", ...) { res <- t(apply(pargrid, 1, function (pars) { given <- Deriv(LETTERR, deriv, pars, type) num <- siaf.fallback.Deriv(polydomain = LETTERR, deriv = deriv, pars = pars, type = type, method = method, ...) c(given, num) })) paridxs <- seq_len(ncol(pargrid)) colnames(res) <- c(paste("Deriv",paridxs,sep="."), paste(method,paridxs,sep=".")) res } checksiaf.simulate <- function (simulate, f, pars, type=1, B=3000, ub=10, plot=interactive()) { simpoints <- simulate(B, pars, type=type, ub=ub) if (plot) { opar <- par(mfrow=c(2,1), mar=c(4,3,2,1)); on.exit(par(opar)) plot(as.im.function(function(x,y,...) f(cbind(x,y), pars, type), W=discpoly(c(0,0), ub, class="owin")), axes=TRUE, main="Simulation from the spatial kernel") points(simpoints, cex=0.2) kdens <- kde2d(simpoints[,1], simpoints[,2], n=100) contour(kdens, add=TRUE, col=2, lwd=2, labcex=1.5, vfont=c("sans serif", "bold")) truehist(sqrt(rowSums(simpoints^2)), xlab="Distance") rfr <- function (r) r*f(cbind(r,0), pars, type) rfrnorm <- integrate(rfr, 0, ub)$value do.call("curve", list(quote(rfr(x)/rfrnorm), add=TRUE, col=2, lwd=2)) } invisible(simpoints) }
plotSEMM_setup <- function(pi, alpha1, alpha2, beta21, psi11, psi22, points = 50) { if (!is.vector(pi)) { print("Error:Probabilities must be provided as a vector") } if (!is.vector(alpha2)) { print("Error: Alpha values must be provided as a vector") } if (!is.vector(alpha1)) { print("Error: Kappa values must be provided as a vector") } if (!is.vector(beta21)) { print("Error: Beta values must be provided as a vector") } if (!is.vector(psi11)) { print("Error: Psi values must be provided as a vector") } if (!is.vector(psi22)) { print("Error: Psi values must be provided as a vector") } classes <- length(beta21) alphaarray <- array(data = 0, c(2, 1, classes)) j <- 0 for (i in 1:classes) { alphaarray[1, 1, i] <- alpha1[i] alphaarray[2, 1, i] <- alpha2[i] j <- j+1 } gammaarray <- array(data = 0, c(2, 2, classes)) j <- 0 for (i in 1:classes) { gammaarray[2, 1, i] <- beta21[i] j <- j+1 } psiarray <- array(data = 0, c(2, 2, classes)) j <- 0 for (i in 1:classes) { psiarray[1, 1, i] <- psi11[i] psiarray[2, 2, i] <- psi22[i] j <- j+1 } IMPCOV <- array(data = NA, c(2, 2, classes)) IMPMEAN <- array(data = NA, c(2, 2, classes)) for (i in 1:classes) { IMPCOV[, , i] <- solve(diag(x = 1, nrow = 2, ncol = 2) - gammaarray[, , i]) %*% (psiarray[, , i]) %*% t(solve(diag(x = 1, nrow = 2, ncol = 2) - gammaarray[, , i])) IMPMEAN[, , i] <- solve(diag(x = 1, nrow = 2, ncol = 2) - gammaarray[, , i]) %*% (alphaarray[, , i]) } MuKsi <- vector(mode = "numeric", length = classes) MuEta <- vector(mode = "numeric", length = classes) VKsi <- vector(mode = "numeric", length = classes) VEta <- vector(mode = "numeric", length = classes) COVKSIETA <- vector(mode = "numeric", length = classes) alpha <- vector(mode = "numeric", length = classes) gamma <- vector(mode = "numeric", length = classes) for (i in 1:classes) { MuKsi[i] = IMPMEAN[1, 1, i] MuEta[i] = IMPMEAN[2, 2, i] VKsi[i] = IMPCOV[1, 1, i] VEta[i] = IMPCOV[2, 2, i] COVKSIETA[i] = IMPCOV[1, 2, i] alpha[i] = alphaarray[2, 1, i] gamma[i] = gammaarray[2, 1, i] } overallmuKSI <- 0 overallmuETA <- 0 for (i in 1:classes) { overallmuKSI = overallmuKSI + pi[i] * MuKsi[i] overallmuETA = overallmuETA + pi[i] * MuEta[i] } overallvKSI <- 0 overallvETA <- 0 for (i in 1:classes) { for (j in 1:classes) { if (i < j) { overallvKSI <- overallvKSI + pi[i] * pi[j] * (MuKsi[i] - MuKsi[j]) * t(MuKsi[i] - MuKsi[j]) overallvETA <- overallvETA + pi[i] * pi[j] * (MuEta[i] - MuEta[j]) * t(MuEta[i] - MuEta[j]) } } } for (i in 1:classes) { overallvKSI = overallvKSI + (VKsi[i] * pi[i]) overallvETA = overallvETA + (VEta[i] * pi[i]) } upperboundKsi <- overallmuKSI + 3 * sqrt(overallvKSI) lowerboundKsi <- overallmuKSI - 3 * sqrt(overallvKSI) upperboundEta <- overallmuETA + 3 * sqrt(overallvETA) lowerboundEta <- overallmuETA - 3 * sqrt(overallvETA) Ksi <- seq(lowerboundKsi, upperboundKsi, length = points) Eta <- seq(lowerboundEta, upperboundEta, length = points) pKsi <- matrix(data = 0, nrow = length(Ksi), ncol = classes) for (i in 1:classes) { pKsi[, i] <- pi[i] * dnorm(Ksi, mean = MuKsi[i], sd = sqrt(VKsi[i])) } pEta <- matrix(data = 0, nrow = length(Eta), ncol = classes) for (i in 1:classes) { pEta[, i] <- pi[i] * dnorm(Eta, mean = MuEta[i], sd = sqrt(VEta[i])) } post <- matrix(data = 0, nrow = length(Ksi), ncol = classes) sumpKsi = matrix(data = 0, nrow = length(Ksi), ncol = 1) for (i in 1:classes) { sumpKsi[, 1] = sumpKsi[, 1] + pKsi[, i] } for (i in 1:classes) { post[, i] <- pKsi[, i]/sumpKsi[, 1] } denKsi <- sumpKsi[, 1] sumpEta <- matrix(data = 0, nrow = length(Eta), ncol = classes) for (i in 1:classes) { sumpEta[, 1] <- sumpEta[, 1] + pEta[, i] } denEta <- sumpEta[, 1] etahmat <- matrix(data = 0, nrow = length(Ksi), ncol = classes) for (i in 1:classes) { etahmat[, i] <- alpha[i] + gamma[i] * Ksi } etah <- vector(mode = "numeric", length = length(Ksi)) for (i in 1:classes) { etah <- etah + post[, i] * etahmat[, i] } etah_ <- etah etah_[denKsi <= 0.02] <- NA etah_[denEta <= 0.02] <- NA r <- vector(mode = "numeric", length = classes) for (i in 1:classes) { r[i] <- COVKSIETA[i]/sqrt(VKsi[i] * VEta[i]) } denKE <- function(Ksi, Eta) { placeholder <- 0 denKE_ <- matrix(data = 0, nrow = length(Ksi), ncol = classes) for (i in 1:classes) { z <- ((Ksi - MuKsi[i])^2)/VKsi[i] + ((Eta - MuEta[i])^2)/VEta[i] - 2 * r[i] * (Ksi - MuKsi[i]) * (Eta - MuEta[i])/sqrt(VKsi[i] * VEta[i]) denKE_[, i] <- (1/(2 * 22/7 * sqrt(VKsi[i]) * sqrt(VEta[i]) * sqrt(1 - r[i]^2))) * exp(-z/(2 * (1 - r[i]^2))) } for (i in 1:classes) { placeholder <- placeholder + pi[i] * denKE_[, i] } denKE <- placeholder } z <- outer(Ksi, Eta, denKE) SEMLIdatapks <- data.frame(Eta1=Ksi, Eta2=Eta, agg_denEta1=denKsi, agg_denEta2=denEta, agg_pred=etah_, etah=etah, class_pred=I(etahmat), contour=I(z), classes=classes, class_prob=I(post), class_denEta1=I(pKsi), class_denEta2=I(pEta), setup2=FALSE) return(SEMLIdatapks) }
hai_data_poly <- function(.recipe_object = NULL, ..., .p_degree = 2){ if(is.null(.recipe_object)){ rlang::abort("`.recipe_object` must be passed, please add.") } else { rec_obj <- .recipe_object } terms <- rlang::enquos(...) degree <- as.double(.p_degree) if(!is.double(degree)){ stop(call. = FALSE, "(.p_degree) must be an integer.") } scale_obj <- recipes::step_poly( recipe = rec_obj, degree = degree, !!! terms ) output <- list( rec_base = rec_obj, scale_rec_obj = scale_obj ) return(output) }
plot_sdp <- function(pos, sdp, strain_labels=names(qtl2::CCcolors), ...) { n_str <- length(strain_labels) stopifnot(length(pos) == length(sdp)) stopifnot(all(sdp < 2^n_str & sdp >= 0)) alleles <- invert_sdp(sdp, n_str) y <- seq_len(n_str) plot_sdp_internal <- function(xlim=range(pos), ylim=c(max(y)+0.5, min(y)-0.5), xlab="Position (Mbp)", ylab="", xaxs="i", yaxs="i", main="", mgp.x=c(2.6, 0.5, 0), mgp.y=c(2.6, 0.5, 0), mgp=NULL, las=1, hlines=NULL, hlines_col="black", hlines_lwd=1, hlines_lty=1, vlines=NULL, vlines_col="white", vlines_lwd=1, vlines_lty=1, bgcolor="gray90", lwd=2, col="darkslateblue", sub="", ...) { dots <- list(...) if(!is.null(mgp)) mgp.x <- mgp.y <- mgp plot(0, 0, xlab="", ylab="", xlim=xlim, ylim=ylim, xaxs=xaxs, yaxs=yaxs, xaxt="n", yaxt="n", type="n", main=main, sub=sub) u <- par("usr") if(!is.null(bgcolor)) rect(u[1], u[3], u[2], u[4], col=bgcolor, border=NA) if(is.null(dots$xaxt)) dots$xaxt <- par("xaxt") if(is.null(dots$yaxt)) dots$yaxt <- par("yaxt") if(dots$xaxt != "n") { axis(side=1, at=pretty(xlim), mgp=mgp.x, las=las, tick=FALSE) } if(dots$yaxt != "n") { axis(side=2, at=y, strain_labels, mgp=mgp.y, las=las, tick=FALSE) } title(xlab=xlab, mgp=mgp.x) title(ylab=ylab, mgp=mgp.y) if(!(length(hlines)==1 && is.na(hlines))) { if(is.null(hlines)) hlines <- seq(0.5, n_str+0.5) abline(h=hlines, col=hlines_col, lwd=hlines_lwd, lty=hlines_lty) } if(!(length(vlines)==1 && is.na(vlines))) { if(is.null(vlines)) vlines <- pretty(xlim) abline(v=vlines, col=vlines_col, lwd=vlines_lwd, lty=vlines_lty) } for(i in seq_len(n_str)) { if(all(alleles[,i] == 1)) next wh <- which(alleles[,i]==3) yy <- rep(y[i], length(wh)) segments(pos[wh], yy-0.5, pos[wh], yy+0.5, lwd=lwd, col=col) } box() } plot_sdp_internal(...) }
with_mock_dir <- function(dir, expr, simplify = TRUE, replace = TRUE) { if (dir.exists("tests/testthat") && !(substr(dir, 1, 1) %in% c("/", "\\"))) { dir <- file.path("tests", "testthat", dir) } with_mock_path(dir, replace = replace, { if (dir.exists(dir)) { verbose_message("Using mocks found in ", dir) with_mock_api(expr) } else { verbose_message("Recording responses to ", dir) capture_requests(expr, simplify = simplify) } }) }
cdf.bundle <- function (bundle, qout = NA, extrap = FALSE,quietly=FALSE) { if (!inherits(bundle, "bundle") && !inherits(bundle, "restricted")) stop("Function needs 'expectreg' estimated by bundle or restricted.") basis = bundle$design np = length(bundle$intercepts) pp <- bundle$asymmetries rst <- (bundle$response - (basis %*% bundle$trend.coef))/(basis %*% bundle$residual.coef) u <- seq(1.2 * min(rst), 1.2 * max(rst), length = 100) m <- length(u) bundle$asymmetry.coef = sort(bundle$asymmetry.coef) A <- matrix(0, np + 1, m) A[np + 1, ] <- 1 for (k in 1:np) { a1 <- (1 - pp[k]) * (u - bundle$asymmetry.coef[k]) * (u <= bundle$asymmetry.coef[k]) a2 <- pp[k] * (u - bundle$asymmetry.coef[k]) * (u > bundle$asymmetry.coef[k]) A[k, ] <- a1 + a2 } D <- diff(diag(m), diff = 2) lambda <- 100 P <- lambda * t(D) %*% D v1 <- solve(t(A) %*% A + P, t(A) %*% c(rep(0, np), 1)) q <- c(rep(0, np), 1) lambda2 <- 1 D2 <- diff(diag(m), diff = 3) P2 <- lambda2 * t(D2) %*% D2 z <- log(v1 - min(v1) + 0.02 * max(v1)) for (it in 1:20) { g <- exp(z) r <- q - A %*% g B <- A * outer(rep(1, np + 1), as.vector(g)) Q <- t(B) %*% B znew <- solve(Q + P2, t(B) %*% r + Q %*% z) dz <- max(abs(z - znew)) z <- znew if(!quietly) cat("iteration: ", it, ", convergence: ", dz, "\n") if (dz < 1e-06) break } dens = g/(u[2] - u[1]) F = cumsum(dens) dens = dens/max(F) F = F/max(F) if (any(is.na(qout))) qout = pp if (extrap) quant <- as.vector(my.approx(F, u, xout = qout, rule = 3)$y) else quant <- as.vector(my.approx(F, u, xout = qout, rule = 2)$y) result = list(x = u, density = dens, cdf = F, quantiles = quant, qout = qout, random = rst) class(result) = c("expectilecdf", "bundledensity") result }
petitr<-function(tabvie,niter=100,eps=1E-07,m=1,alpha=0.05,s=1) { if (class(tabvie)!="data.frame") stop("**** Parameter tabvie must be a data frame ****\n") n=dim(tabvie)[2]-1 if( n %% m != 0) stop ("**** m must divide n ****\n") tablif=xlxmx(tabvie,s) rm=r(tablif,eps) cat("-----------------------------------------------------------------------\n") cat(" petitr : intrinsic rate of increase calculus \n") cat("-----------------------------------------------------------------------\n") cat("\nraw estimator : r= \t",rm,"\n\n") rpart=NULL range=1:n for(i in 1:(n %/% m)) { index=range<((i-1)*m+1) | range>(i*m) index=c(TRUE,index) partlife=xlxmx(tabvie[,index],s) rminus=r(partlife,eps) rpart=c(rpart,rminus) } psv=(n*rm-(n-m)*rpart)/m rmj=mean(psv) se = sqrt(var(psv)) lower = rmj-qnorm(1-alpha)*sqrt(var(psv)) upper = rmj+qnorm(1-alpha)*sqrt(var(psv)) res=data.frame(t(c(rm,rmj,se,lower,upper))) names(res)=c("rm raw","rm jackknife","se","lower","upper") print(res) invisible(psv) }
heckitrob.control <- function(acc = 1e-04, test.acc = "coef", maxit = 50, maxitO = 50, weights.x1 = c("none", "hat", "robCov", "covMcd"), weights.x2 = c("none", "hat", "robCov", "covMcd"), tcc = 1.345, t.c = 1.345) { if (!is.numeric(acc) || acc <= 0) stop("value of acc must be > 0") if (test.acc != "coef") stop("Only 'test.acc = \"coef\"' is currently implemented") if (!is.numeric(maxit) || maxit <= 0) stop("maximum number of iterations must be > 0") if (!is.numeric(maxitO) || maxitO <= 0) stop("maximum number of iterations must be > 0") if (!is.numeric(tcc) || tcc <= 0) stop("value of the tuning constant c (tcc) must be > 0") if (!is.numeric(t.c) || t.c <= 0) stop("value of the tuning constant c (t.c) must be > 0") if (!is.character(weights.x1)) stop("choose the implemented method of the weight function") if (!is.character(weights.x2)) stop("choose the implemented method of the weight function") list(acc = acc, test.acc = test.acc, maxit = maxit, maxitO = maxitO, weights.x1 = weights.x1, weights.x2 = weights.x2[1], tcc = tcc, t.c = t.c) }
isStrictlyNegativeNumberScalar <- function(argument, default = NULL, stopIfNot = FALSE, message = NULL, argumentName = NULL) { checkarg(argument, "N", default = default, stopIfNot = stopIfNot, nullAllowed = FALSE, n = 1, zeroAllowed = FALSE, negativeAllowed = TRUE, positiveAllowed = FALSE, nonIntegerAllowed = TRUE, naAllowed = FALSE, nanAllowed = FALSE, infAllowed = FALSE, message = message, argumentName = argumentName) }
context("parameter objects") test_that('param ranges', { expect_equal(frac_common_cov(c(.1, .5))$range, list(lower = .1, upper = .5)) expect_equal(frac_identity(c(.1, .5))$range, list(lower = .1, upper = .5)) expect_equal(smoothness(c(.1, .5))$range, list(lower = .1, upper = .5)) })
compute_p <- function(test_type, test_stat, df1, df2, two_tailed){ stopifnot(test_type %in% c("t", "F", "Z", "r", "Chi2", "Q", "Qb", "Qw")) if(test_type == "t"){ computed <- stats::pt(-1 * abs(test_stat), df2) } else if(test_type == "F"){ computed <- stats::pf(test_stat, df1, df2, lower.tail = FALSE) } else if(test_type == "Z"){ computed <- stats::pnorm(abs(test_stat), lower.tail = FALSE) } else if(test_type == "r"){ t <- r2t(test_stat, df2) computed <- stats::pt(-1 * abs(t), df2) } else if(test_type == "Chi2" | test_type == "Q" | test_type == "Qb" | test_type == "Qw"){ computed <- stats::pchisq(test_stat, df1, lower.tail = FALSE) } if (!is.na(computed) & (test_type == "t" | test_type == "Z" | test_type == "r") & two_tailed) { computed <- computed * 2 } return(computed) } r2t <- function(r, df){ t <- r / (sqrt((1 - r^2) / df)) return(t) }
sim_simName <- function(simSetup, name) { slot(simSetup, "simName") <- name simSetup }
prep_log_AUC <- function(dat, x_axis, log_base = 2, type = "adjust", correction = 1, dec_offset = TRUE) { { if (!tibble::is_tibble(dat)) { stop("dat must be a tibble") } if (!base::is.character(x_axis)) { base::stop("x_axis must be a string indicating the x-axis variable") } if (!base::is.numeric(log_base) | (log_base <= 0)) { base::stop("log_base must be a number greater than 0") } if (!(type %in% (c("corr", "adjust", "IHS")))) { base::stop("type must be a string of either \"corr\", \"adjust\", or \"IHS\".") } if (!base::is.numeric(correction) | (correction <= 0)) { base::stop("correction must be a number greater than 0") } if (!base::is.logical(dec_offset)) { base::stop("dec_offset must be either TRUE or FALSE") } } x_vals <- dat %>% dplyr::pull({{ x_axis }}) %>% base::unique() log_vals <- dplyr::tibble(orig = x_vals) %>% dplyr::arrange(.data$orig) %>% dplyr::mutate( log_val = base::log(.data$orig, log_base), log_diff = .data$log_val - dplyr::lag(.data$log_val) ) if (type == "adjust") { mean_diff <- log_vals %>% dplyr::filter(!base::is.na(.data$log_diff) & !base::is.infinite(.data$log_diff)) %>% dplyr::pull(.data$log_diff) %>% base::mean() } else if (type == "corr") { mean_diff <- 1 } inc_zero <- base::min(x_vals) == 0 if (!inc_zero) { mean_diff <- 0 } if (dec_offset) { log_offset <- log_vals %>% dplyr::filter(!base::is.infinite(.data$log_val)) %>% dplyr::pull(.data$log_val) %>% base::min() if (log_offset < 0) { log_offset <- base::abs(log_offset) } else { log_offset <- 0 } } else { log_offset <- 0 } if (type == "adjust") { log_vals <- log_vals %>% dplyr::mutate(log_val_adj = .data$log_val + log_offset + mean_diff) } else if (type == "corr") { log_vals <- log_vals %>% dplyr::mutate(log_val_adj = base::log(.data$orig + correction, base = log_base )) } else if (type == "IHS") { log_vals <- log_vals %>% dplyr::mutate(log_val_adj = base::asinh(.data$orig)) } if (inc_zero) { log_vals[1, "log_val_adj"] <- 0 } new_col <- glue::glue("log_{x_axis}") log_vals <- log_vals %>% dplyr::select(.data$orig, .data$log_val_adj) %>% dplyr::rename( {{ x_axis }} := "orig", {{ new_col }} := "log_val_adj" ) dat <- dplyr::left_join(dat, log_vals, by = x_axis ) base::return(dat) }
expected <- eval(parse(text="1")); test(id=0, code={ argv <- list(); do.call(`prod`, argv); }, o=expected);
relativeWeights = function(price, index.weights) { relative_weights = (price * index.weights) / sum(price * index.weights) return(relative_weights) }
plot.see_check_collinearity <- function(x, data = NULL, colors = c(" ...) { if (is.null(data)) { dat <- datawizard::compact_list(.retrieve_data(x)) } else { dat <- data } if (is.null(dat)) { return(NULL) } dat$group <- "low" dat$group[dat$VIF >= 5 & dat$VIF < 10] <- "moderate" dat$group[dat$VIF >= 10] <- "high" if (ncol(dat) == 5) { colnames(dat) <- c("x", "y", "se", "facet", "group") dat[, c("x", "y", "facet", "group")] } else { colnames(dat) <- c("x", "y", "se", "group") dat[, c("x", "y", "group")] } if (length(unique(dat$facet)) == 1) { dat <- dat[, -which(colnames(dat) == "facet")] } .plot_diag_vif(dat, colors = colors) }
createConstraints <- function(whichConstraint = whichConstraint) { if (length(whichConstraint) == 1) { createConstraint(whichConstraint = whichConstraint) } else if (length(whichConstraint) > 1) { constraints <- data.frame() for (cons in seq_along(whichConstraint)) { constraint <- createConstraint(whichConstraint[cons]) constraints <- rbind(constraints, constraint) } return(constraints) } } createConstraint <- function(whichConstraint = whichConstraint) { constraintEffect <- names(whichConstraint) constraintEffect <- cleanName(constraintEffect) constraintElement <- gsub("\\s", "", whichConstraint[1]) if (grepl("<", constraintElement, fixed = TRUE)) { constraintUpper <- sub(".*<", "", constraintElement) constraintLower <- sub("<.*", "", constraintElement) } else if (grepl(">", constraintElement, fixed = TRUE)) { constraintUpper <- sub(">.*", "", constraintElement) constraintLower <- sub(".*>", "", constraintElement) } return(data.frame(constraintEffect = constraintEffect, constraintElement = constraintElement, constraintUpper = constraintUpper, constraintLower = constraintLower)) }
cumulated <- function(x, ...) { UseMethod("cumulated") } cumulated.default <- function(x, method = c("continuous", "discrete", "compound", "simple"), percentage = FALSE, ...) { stopifnot(is.timeSeries(x)) method <- match.arg(method) Title <- x@title Documentation <- x@documentation if (percentage) x <- x/100 positions <- time(x) if(method == "geometric") { ans <- colCumsums(x) } if(method == "compound" || method == "continuous") { ans <- exp(colCumsums(x)) } if(method == "simple" || method == "discrete") { ans <- colCumprods(1+x) } ans@title <- Title ans@documentation <- Documentation ans }
peacock3 <- function(x, y) { xx <- as.matrix(x) yy <- as.matrix(y) n1 <- nrow(xx) n2 <- nrow(yy) n <- n1 + n2 dd <- gcd(n1, n2) L <- n1/dd*n2 d1 <- L/n1 d2 <- L/n2 dim1 <- ncol(xx) dim2 <- ncol(yy) dmin <- min(dim1, dim2) if( dmin < 3 ) stop('The dimensions of both samples should be at least three') xy1 <- c(xx[,1], yy[,1]) xy2 <- c(xx[,2], yy[,2]) xy3 <- c(xx[,3], yy[,3]) I1 <- order( xy1 ) I2 <- order( xy2 ) I3 <- order( xy3 ) max_hnnn <- 0 max_hnpn <- 0 max_hpnn <- 0 max_hppn <- 0 max_hnnp <- 0 max_hnpp <- 0 max_hpnp <- 0 max_hppp <- 0 for(zu in xy1[I1]){ for(zv in xy2[I2]){ hnnn <- 0 hnpn <- 0 hpnn <- 0 hppn <- 0 t <- 1 while( t <= n ){ w <- I3[t] if(xy1[w] <= zu){ if(xy2[w] <= zv){ if(w <= n1) hnnn <- hnnn + d1 else hnnn <- hnnn - d2 max_hnnn <- max(max_hnnn, abs(hnnn)) }else{ if(w <= n1) hnpn <- hnpn + d1 else hnpn <- hnpn - d2 max_hnpn <- max(max_hnpn, abs(hnpn)) } }else{ if(xy2[w] <= zv){ if(w <= n1) hpnn <- hpnn + d1 else hpnn <- hpnn - d2 max_hpnn <- max(max_hpnn, abs(hpnn)) }else{ if(w <= n1) hppn <- hppn + d1 else hppn <- hppn - d2 max_hppn <- max(max_hppn, abs(hppn)) } } t <- t + 1 } hnnp <- 0 hnpp <- 0 hpnp <- 0 hppp <- 0 t <- n while( t > 1 ){ w <- I3[t] if(xy1[w] <= zu){ if(xy2[w] <= zv){ if(w <= n1) hnnp <- hnnp + d1 else hnnp <- hnnp - d2 max_hnnp <- max(max_hnnp, abs(hnnp)) }else{ if(w <= n1) hnpp <- hnpp + d1 else hnpp <- hnpp - d2 max_hnpp <- max(max_hnpp, abs(hnpp)) } }else{ if(xy2[w] <= zv){ if(w <= n1) hpnp <- hpnp + d1 else hpnp <- hpnp - d2 max_hpnp <- max(max_hpnp, abs(hpnp)) }else{ if(w <= n1) hppp <- hppp + d1 else hppp <- hppp - d2 max_hppp <- max(max_hppp, abs(hppp)) } } t <- t - 1 } } } return (max(max_hnnn, max_hnpn, max_hpnn, max_hppn, max_hnnp, max_hnpp, max_hpnp, max_hppp)/L); }
require(PortfolioEffectHFT) portfolio=portfolio_create(fromTime="2014-04-13 9:30:01", toTime="2014-04-16 16:00:00") portfolio_settings(portfolio,windowLength = '360m',portfolioMetricsMode='price') positionGOOG=position_add(portfolio,'GOOG',1) positionAAPL=position_add(portfolio,'AAPL',1) positionC=position_add(portfolio,'C',1) plot(portfolio) portfolio_settings(portfolio,resultsSamplingInterval='last') resultLintner=data.frame(Variance=0,ExpectedReturn=0) for(x in seq(0.004,0.016,0.004)){ optimizer=optimization_goal(variance(portfolio),"min") optimizer=optimization_constraint(optimizer,value(portfolio),'=',10^9) optimizer=optimization_constraint(optimizer,expected_return(portfolio),"=",x) optimPortfolio=optimization_run(optimizer) resultLintner=rbind(resultLintner,c(compute(variance(optimPortfolio))[[1]][2],compute(expected_return(optimPortfolio))[[1]][2])) } resultLintner=resultLintner[-1,] resultLintner=data.frame(Variance=spline(resultLintner$Variance, n=100)$y, ExpectedReturn=spline(resultLintner$ExpectedReturn, n=100)$y) ggplot()+geom_path(data=resultLintner, aes(x=Variance,y=ExpectedReturn),size=1.2)+util_plotTheme()+ggtitle("Efficient Frontier")+ylab("Expected Return") resultLintner3000Portfolio=data.frame(Variance=0,ExpectedReturn=0) for(x in seq(0.004,0.016,0.004)){ optimizer=optimization_goal(variance(portfolio),"min") optimizer=optimization_constraint(optimizer,value(portfolio),'=',3000) optimizer=optimization_constraint(optimizer,expected_return(portfolio),"=",x) optimPortfolio=optimization_run(optimizer) resultLintner3000Portfolio=rbind(resultLintner3000Portfolio,c(compute(variance(optimPortfolio))[[1]][2],compute(expected_return(optimPortfolio))[[1]][2])) } resultLintner3000Portfolio=resultLintner3000Portfolio[-1,] resultLintner20000Portfolio=data.frame(Variance=0,ExpectedReturn=0) for(x in seq(0.004,0.016,0.004)){ optimizer=optimization_goal(variance(portfolio),"min") optimizer=optimization_constraint(optimizer,value(portfolio),'=',20000) optimizer=optimization_constraint(optimizer,expected_return(portfolio),"=",x) optimPortfolio=optimization_run(optimizer) resultLintner20000Portfolio=rbind(resultLintner20000Portfolio,c(compute(variance(optimPortfolio))[[1]][2],compute(expected_return(optimPortfolio))[[1]][2])) } resultLintner20000Portfolio=resultLintner20000Portfolio[-1,] resultLintner3000Portfolio$legend="$3000 Portfolio" resultLintner20000Portfolio$legend="$20000 Portfolio" resultLintner$legend="Theoretical Portfolio" result=rbind(resultLintner3000Portfolio,resultLintner20000Portfolio,resultLintner) ggplot()+geom_path(data=result, aes(x=Variance,y=ExpectedReturn,col=legend),size=1.2)+ util_plotTheme()+ggtitle("Efficient Frontier of Theoretical/$20000/$3000 portfolio")+ ylab("Expected Return")+util_colorScheme() portfolio_settings(portfolio, windowLength = '360m', resultsSamplingInterval='last', shortSalesMode = 'markowitz',portfolioMetricsMode='price') resultMarkowitz=data.frame(Variance=0,ExpectedReturn=0) for(x in seq(0.004,0.016,0.004)){ optimizer=optimization_goal(variance(portfolio),"min") optimizer=optimization_constraint(optimizer,value(portfolio),'=',10^9) optimizer=optimization_constraint(optimizer,expected_return(portfolio),"=",x) optimPortfolio=optimization_run(optimizer) resultMarkowitz=rbind(resultMarkowitz,c(compute(variance(optimPortfolio))[[1]][2],compute(expected_return(optimPortfolio))[[1]][2])) } resultMarkowitz=resultMarkowitz[-1,] resultMarkowitz=data.frame(Variance=spline(resultMarkowitz$Variance, n=100)$y, ExpectedReturn=spline(resultMarkowitz$ExpectedReturn, n=100)$y) resultMarkowitz$legend="Markowitz" resultLintner$legend="Lintner" result=rbind(resultMarkowitz,resultLintner) ggplot()+geom_path(data=result, aes(x=Variance,y=ExpectedReturn,col=legend),size=1.2)+ util_plotTheme()+ggtitle("Markowitz and Lintner Efficient Frontier")+ylab("Expected Return")+ util_colorScheme()
write_fitted_parameters <- function(model, parhistory, csv.output) { setup <- elt(model, "setup") read.only <- elt(setup, "read.only") model.path <- elt(setup, "model.path") identifier <- elt(setup, "model.ident") if (read.only & csv.output==TRUE) { message("Warning: cannot write fitted parameters back to the model input folders - model is read-only") message("Warning: to fix this, make a copy of the model using e2e_copy() into your own workspace.") stop("Model is not writable!") } preference_matrix_input <- get.model.file(model.path, PARAMETERS_DIR, file.pattern=FITTED_PARAMETERS_PREFERENCE, row.names=1) uptake_mort_input <- get.model.file(model.path, PARAMETERS_DIR, file.pattern=FITTED_PARAMETERS_UPTAKE_MORT) microbiology_input <- get.model.file(model.path, PARAMETERS_DIR, file.pattern=FITTED_PARAMETERS_MICROBIOLOGY) preference_matrix_input_NEW<-preference_matrix_input uptake_mort_input_NEW<-uptake_mort_input microbiology_input_NEW<-microbiology_input ROW_TO_USE <- nrow(parhistory) preference_matrix_input_NEW$kelp[which(rownames(preference_matrix_input_NEW)=="nitrate")] <- parhistory$PREF_NIT_kelp[ROW_TO_USE] preference_matrix_input_NEW$kelp[which(rownames(preference_matrix_input_NEW)=="ammonia")] <- parhistory$PREF_AMM_kelp[ROW_TO_USE] preference_matrix_input_NEW$phyt[which(rownames(preference_matrix_input_NEW)=="nitrate")] <- parhistory$PREF_NIT_phyt[ROW_TO_USE] preference_matrix_input_NEW$phyt[which(rownames(preference_matrix_input_NEW)=="ammonia")] <- parhistory$PREF_AMM_phyt[ROW_TO_USE] preference_matrix_input_NEW$omnivzoo[which(rownames(preference_matrix_input_NEW)=="phyt")] <- parhistory$PREF_phyt_omni[ROW_TO_USE] preference_matrix_input_NEW$omnivzoo[which(rownames(preference_matrix_input_NEW)=="suspdet")] <- parhistory$PREF_det_omni[ROW_TO_USE] preference_matrix_input_NEW$omnivzoo[which(rownames(preference_matrix_input_NEW)=="benthslar")] <- parhistory$PREF_benthslar_omni[ROW_TO_USE] preference_matrix_input_NEW$omnivzoo[which(rownames(preference_matrix_input_NEW)=="benthclar")] <- parhistory$PREF_benthclar_omni[ROW_TO_USE] preference_matrix_input_NEW$carnzoo[which(rownames(preference_matrix_input_NEW)=="omnivzoo")] <- parhistory$PREF_omni_carn[ROW_TO_USE] preference_matrix_input_NEW$carnzoo[which(rownames(preference_matrix_input_NEW)=="benthslar")] <- parhistory$PREF_benthslar_carn[ROW_TO_USE] preference_matrix_input_NEW$carnzoo[which(rownames(preference_matrix_input_NEW)=="benthclar")] <- parhistory$PREF_benthclar_carn[ROW_TO_USE] preference_matrix_input_NEW$carnzoo[which(rownames(preference_matrix_input_NEW)=="fishplar")] <- parhistory$PREF_fishplar_carn[ROW_TO_USE] preference_matrix_input_NEW$carnzoo[which(rownames(preference_matrix_input_NEW)=="fishdlar")] <- parhistory$PREF_fishdlar_carn[ROW_TO_USE] preference_matrix_input_NEW$fishplar[which(rownames(preference_matrix_input_NEW)=="omnivzoo")] <- parhistory$PREF_omni_fishplar[ROW_TO_USE] preference_matrix_input_NEW$fishplar[which(rownames(preference_matrix_input_NEW)=="benthslar")] <- parhistory$PREF_benthslar_fishplar[ROW_TO_USE] preference_matrix_input_NEW$fishplar[which(rownames(preference_matrix_input_NEW)=="benthclar")] <- parhistory$PREF_benthclar_fishplar[ROW_TO_USE] preference_matrix_input_NEW$fishp[which(rownames(preference_matrix_input_NEW)=="omnivzoo")] <- parhistory$PREF_omni_fishp[ROW_TO_USE] preference_matrix_input_NEW$fishp[which(rownames(preference_matrix_input_NEW)=="carnzoo")] <- parhistory$PREF_carn_fishp[ROW_TO_USE] preference_matrix_input_NEW$fishp[which(rownames(preference_matrix_input_NEW)=="benthslar")] <- parhistory$PREF_benthslar_fishp[ROW_TO_USE] preference_matrix_input_NEW$fishp[which(rownames(preference_matrix_input_NEW)=="benthclar")] <- parhistory$PREF_benthclar_fishp[ROW_TO_USE] preference_matrix_input_NEW$fishp[which(rownames(preference_matrix_input_NEW)=="fishdlar")] <- parhistory$PREF_fishdlar_fishp[ROW_TO_USE] preference_matrix_input_NEW$fishp[which(rownames(preference_matrix_input_NEW)=="fishplar")] <- parhistory$PREF_fishplar_fishp[ROW_TO_USE] preference_matrix_input_NEW$fishm[which(rownames(preference_matrix_input_NEW)=="omnivzoo")] <- parhistory$PREF_omni_fishm[ROW_TO_USE] preference_matrix_input_NEW$fishm[which(rownames(preference_matrix_input_NEW)=="carnzoo")] <- parhistory$PREF_carn_fishm[ROW_TO_USE] preference_matrix_input_NEW$fishm[which(rownames(preference_matrix_input_NEW)=="benthslar")] <- parhistory$PREF_benthslar_fishm[ROW_TO_USE] preference_matrix_input_NEW$fishm[which(rownames(preference_matrix_input_NEW)=="benthclar")] <- parhistory$PREF_benthclar_fishm[ROW_TO_USE] preference_matrix_input_NEW$fishm[which(rownames(preference_matrix_input_NEW)=="fishdlar")] <- parhistory$PREF_fishdlar_fishm[ROW_TO_USE] preference_matrix_input_NEW$fishm[which(rownames(preference_matrix_input_NEW)=="fishplar")] <- parhistory$PREF_fishplar_fishm[ROW_TO_USE] preference_matrix_input_NEW$fishdlar[which(rownames(preference_matrix_input_NEW)=="omnivzoo")] <- parhistory$PREF_omni_fishdlar[ROW_TO_USE] preference_matrix_input_NEW$fishdlar[which(rownames(preference_matrix_input_NEW)=="benthslar")] <- parhistory$PREF_benthslar_fishdlar[ROW_TO_USE] preference_matrix_input_NEW$fishdlar[which(rownames(preference_matrix_input_NEW)=="benthclar")] <- parhistory$PREF_benthclar_fishdlar[ROW_TO_USE] preference_matrix_input_NEW$fishd[which(rownames(preference_matrix_input_NEW)=="carnzoo")] <- parhistory$PREF_carn_fishd[ROW_TO_USE] preference_matrix_input_NEW$fishd[which(rownames(preference_matrix_input_NEW)=="benths")] <- parhistory$PREF_benths_fishd[ROW_TO_USE] preference_matrix_input_NEW$fishd[which(rownames(preference_matrix_input_NEW)=="benthc")] <- parhistory$PREF_benthc_fishd[ROW_TO_USE] preference_matrix_input_NEW$fishd[which(rownames(preference_matrix_input_NEW)=="fishplar")] <- parhistory$PREF_fishplar_fishd[ROW_TO_USE] preference_matrix_input_NEW$fishd[which(rownames(preference_matrix_input_NEW)=="fishdlar")] <- parhistory$PREF_fishdlar_fishd[ROW_TO_USE] preference_matrix_input_NEW$fishd[which(rownames(preference_matrix_input_NEW)=="fishp")] <- parhistory$PREF_fishp_fishd[ROW_TO_USE] preference_matrix_input_NEW$fishd[which(rownames(preference_matrix_input_NEW)=="fishm")] <- parhistory$PREF_fishm_fishd[ROW_TO_USE] preference_matrix_input_NEW$fishd[which(rownames(preference_matrix_input_NEW)=="fishd")] <- parhistory$PREF_fishd_fishd[ROW_TO_USE] preference_matrix_input_NEW$fishd[which(rownames(preference_matrix_input_NEW)=="discards")] <- parhistory$PREF_disc_fishd[ROW_TO_USE] preference_matrix_input_NEW$fishd[which(rownames(preference_matrix_input_NEW)=="corpses")] <- parhistory$PREF_corp_fishd[ROW_TO_USE] preference_matrix_input_NEW$benthslar[which(rownames(preference_matrix_input_NEW)=="phyt")] <- parhistory$PREF_phyt_benthslar[ROW_TO_USE] preference_matrix_input_NEW$benthclar[which(rownames(preference_matrix_input_NEW)=="phyt")] <- parhistory$PREF_phyt_benthclar[ROW_TO_USE] preference_matrix_input_NEW$benthslar[which(rownames(preference_matrix_input_NEW)=="suspdet")] <- parhistory$PREF_det_benthslar[ROW_TO_USE] preference_matrix_input_NEW$benthclar[which(rownames(preference_matrix_input_NEW)=="suspdet")] <- parhistory$PREF_det_benthclar[ROW_TO_USE] preference_matrix_input_NEW$benths[which(rownames(preference_matrix_input_NEW)=="phyt")] <- parhistory$PREF_phyt_benths[ROW_TO_USE] preference_matrix_input_NEW$benths[which(rownames(preference_matrix_input_NEW)=="suspdet")] <- parhistory$PREF_det_benths[ROW_TO_USE] preference_matrix_input_NEW$benths[which(rownames(preference_matrix_input_NEW)=="seddet")] <- parhistory$PREF_sed_benths[ROW_TO_USE] preference_matrix_input_NEW$benthc[which(rownames(preference_matrix_input_NEW)=="kelp")] <- parhistory$PREF_kelp_benthc[ROW_TO_USE] preference_matrix_input_NEW$benthc[which(rownames(preference_matrix_input_NEW)=="kelpdebris")] <- parhistory$PREF_kelpdebris_benthc[ROW_TO_USE] preference_matrix_input_NEW$benthc[which(rownames(preference_matrix_input_NEW)=="benths")] <- parhistory$PREF_benths_benthc[ROW_TO_USE] preference_matrix_input_NEW$benthc[which(rownames(preference_matrix_input_NEW)=="corpses")] <- parhistory$PREF_corp_benthc[ROW_TO_USE] preference_matrix_input_NEW$bird[which(rownames(preference_matrix_input_NEW)=="carnzoo")] <- parhistory$PREF_carn_bird[ROW_TO_USE] preference_matrix_input_NEW$bird[which(rownames(preference_matrix_input_NEW)=="benths")] <- parhistory$PREF_benths_bird[ROW_TO_USE] preference_matrix_input_NEW$bird[which(rownames(preference_matrix_input_NEW)=="benthc")] <- parhistory$PREF_benthc_bird[ROW_TO_USE] preference_matrix_input_NEW$bird[which(rownames(preference_matrix_input_NEW)=="fishp")] <- parhistory$PREF_fishp_bird[ROW_TO_USE] preference_matrix_input_NEW$bird[which(rownames(preference_matrix_input_NEW)=="fishm")] <- parhistory$PREF_fishm_bird[ROW_TO_USE] preference_matrix_input_NEW$bird[which(rownames(preference_matrix_input_NEW)=="fishd")] <- parhistory$PREF_fishd_bird[ROW_TO_USE] preference_matrix_input_NEW$bird[which(rownames(preference_matrix_input_NEW)=="discards")] <- parhistory$PREF_disc_bird[ROW_TO_USE] preference_matrix_input_NEW$bird[which(rownames(preference_matrix_input_NEW)=="corpses")] <- parhistory$PREF_corp_bird[ROW_TO_USE] preference_matrix_input_NEW$seal[which(rownames(preference_matrix_input_NEW)=="carnzoo")] <- parhistory$PREF_carn_seal[ROW_TO_USE] preference_matrix_input_NEW$seal[which(rownames(preference_matrix_input_NEW)=="benths")] <- parhistory$PREF_benths_seal[ROW_TO_USE] preference_matrix_input_NEW$seal[which(rownames(preference_matrix_input_NEW)=="benthc")] <- parhistory$PREF_benthc_seal[ROW_TO_USE] preference_matrix_input_NEW$seal[which(rownames(preference_matrix_input_NEW)=="fishp")] <- parhistory$PREF_fishp_seal[ROW_TO_USE] preference_matrix_input_NEW$seal[which(rownames(preference_matrix_input_NEW)=="fishm")] <- parhistory$PREF_fishm_seal[ROW_TO_USE] preference_matrix_input_NEW$seal[which(rownames(preference_matrix_input_NEW)=="fishd")] <- parhistory$PREF_fishd_seal[ROW_TO_USE] preference_matrix_input_NEW$seal[which(rownames(preference_matrix_input_NEW)=="discards")] <- parhistory$PREF_disc_seal[ROW_TO_USE] preference_matrix_input_NEW$seal[which(rownames(preference_matrix_input_NEW)=="corpses")] <- parhistory$PREF_corp_seal[ROW_TO_USE] preference_matrix_input_NEW$seal[which(rownames(preference_matrix_input_NEW)=="bird")] <- parhistory$PREF_bird_seal[ROW_TO_USE] preference_matrix_input_NEW$ceta[which(rownames(preference_matrix_input_NEW)=="omnivzoo")] <- parhistory$PREF_omni_ceta[ROW_TO_USE] preference_matrix_input_NEW$ceta[which(rownames(preference_matrix_input_NEW)=="carnzoo")] <- parhistory$PREF_carn_ceta[ROW_TO_USE] preference_matrix_input_NEW$ceta[which(rownames(preference_matrix_input_NEW)=="benths")] <- parhistory$PREF_benths_ceta[ROW_TO_USE] preference_matrix_input_NEW$ceta[which(rownames(preference_matrix_input_NEW)=="benthc")] <- parhistory$PREF_benthc_ceta[ROW_TO_USE] preference_matrix_input_NEW$ceta[which(rownames(preference_matrix_input_NEW)=="fishp")] <- parhistory$PREF_fishp_ceta[ROW_TO_USE] preference_matrix_input_NEW$ceta[which(rownames(preference_matrix_input_NEW)=="fishm")] <- parhistory$PREF_fishm_ceta[ROW_TO_USE] preference_matrix_input_NEW$ceta[which(rownames(preference_matrix_input_NEW)=="fishd")] <- parhistory$PREF_fishd_ceta[ROW_TO_USE] preference_matrix_input_NEW$ceta[which(rownames(preference_matrix_input_NEW)=="discards")] <- parhistory$PREF_disc_ceta[ROW_TO_USE] preference_matrix_input_NEW$ceta[which(rownames(preference_matrix_input_NEW)=="bird")] <- parhistory$PREF_bird_ceta[ROW_TO_USE] preference_matrix_input_NEW$ceta[which(rownames(preference_matrix_input_NEW)=="seal")] <- parhistory$PREF_seal_ceta[ROW_TO_USE] uptake_mort_input_NEW$Cumax[which(uptake_mort_input_NEW$consumer=="kelp")] <- parhistory$uC_kelp[ROW_TO_USE] uptake_mort_input_NEW$Cddexud[which(uptake_mort_input_NEW$consumer=="kelp")] <- parhistory$ddexudC_kelp[ROW_TO_USE] uptake_mort_input_NEW$Numax[which(uptake_mort_input_NEW$consumer=="kelp")] <- parhistory$u_kelp[ROW_TO_USE] uptake_mort_input_NEW$Numax[which(uptake_mort_input_NEW$consumer=="phyt_s")] <- parhistory$u_phyt[ROW_TO_USE] uptake_mort_input_NEW$Numax[which(uptake_mort_input_NEW$consumer=="omnivzoo")] <- parhistory$u_omni[ROW_TO_USE] uptake_mort_input_NEW$Numax[which(uptake_mort_input_NEW$consumer=="carnzoo")] <- parhistory$u_carn[ROW_TO_USE] uptake_mort_input_NEW$Numax[which(uptake_mort_input_NEW$consumer=="fishplar")] <- parhistory$u_fishplar[ROW_TO_USE] uptake_mort_input_NEW$Numax[which(uptake_mort_input_NEW$consumer=="fishp")] <- parhistory$u_fishp[ROW_TO_USE] uptake_mort_input_NEW$Numax[which(uptake_mort_input_NEW$consumer=="fishm")] <- parhistory$u_fishm[ROW_TO_USE] uptake_mort_input_NEW$Numax[which(uptake_mort_input_NEW$consumer=="fishdlar")] <- parhistory$u_fishdlar[ROW_TO_USE] uptake_mort_input_NEW$Numax[which(uptake_mort_input_NEW$consumer=="fishd")] <- parhistory$u_fishd[ROW_TO_USE] uptake_mort_input_NEW$Numax[which(uptake_mort_input_NEW$consumer=="benthslar")] <- parhistory$u_benthslar[ROW_TO_USE] uptake_mort_input_NEW$Numax[which(uptake_mort_input_NEW$consumer=="benthclar")] <- parhistory$u_benthclar[ROW_TO_USE] uptake_mort_input_NEW$Numax[which(uptake_mort_input_NEW$consumer=="benths")] <- parhistory$u_benths[ROW_TO_USE] uptake_mort_input_NEW$Numax[which(uptake_mort_input_NEW$consumer=="benthc")] <- parhistory$u_benthc[ROW_TO_USE] uptake_mort_input_NEW$Numax[which(uptake_mort_input_NEW$consumer=="bird")] <- parhistory$u_bird[ROW_TO_USE] uptake_mort_input_NEW$Numax[which(uptake_mort_input_NEW$consumer=="seal")] <- parhistory$u_seal[ROW_TO_USE] uptake_mort_input_NEW$Numax[which(uptake_mort_input_NEW$consumer=="ceta")] <- parhistory$u_ceta[ROW_TO_USE] uptake_mort_input_NEW$Nhsat[which(uptake_mort_input_NEW$consumer=="kelp")] <- parhistory$h_kelp[ROW_TO_USE] uptake_mort_input_NEW$Nhsat[which(uptake_mort_input_NEW$consumer=="phyt_s")] <- parhistory$h_phyt[ROW_TO_USE] uptake_mort_input_NEW$Nhsat[which(uptake_mort_input_NEW$consumer=="omnivzoo")] <- parhistory$h_omni[ROW_TO_USE] uptake_mort_input_NEW$Nhsat[which(uptake_mort_input_NEW$consumer=="carnzoo")] <- parhistory$h_carn[ROW_TO_USE] uptake_mort_input_NEW$Nhsat[which(uptake_mort_input_NEW$consumer=="fishplar")] <- parhistory$h_fishplar[ROW_TO_USE] uptake_mort_input_NEW$Nhsat[which(uptake_mort_input_NEW$consumer=="fishp")] <- parhistory$h_fishp[ROW_TO_USE] uptake_mort_input_NEW$Nhsat[which(uptake_mort_input_NEW$consumer=="fishm")] <- parhistory$h_fishm[ROW_TO_USE] uptake_mort_input_NEW$Nhsat[which(uptake_mort_input_NEW$consumer=="fishdlar")] <- parhistory$h_fishdlar[ROW_TO_USE] uptake_mort_input_NEW$Nhsat[which(uptake_mort_input_NEW$consumer=="fishd")] <- parhistory$h_fishd[ROW_TO_USE] uptake_mort_input_NEW$Nhsat[which(uptake_mort_input_NEW$consumer=="benthslar")] <- parhistory$h_benthslar[ROW_TO_USE] uptake_mort_input_NEW$Nhsat[which(uptake_mort_input_NEW$consumer=="benthclar")] <- parhistory$h_benthclar[ROW_TO_USE] uptake_mort_input_NEW$Nhsat[which(uptake_mort_input_NEW$consumer=="benths")] <- parhistory$h_benths[ROW_TO_USE] uptake_mort_input_NEW$Nhsat[which(uptake_mort_input_NEW$consumer=="benthc")] <- parhistory$h_benthc[ROW_TO_USE] uptake_mort_input_NEW$Nhsat[which(uptake_mort_input_NEW$consumer=="bird")] <- parhistory$h_bird[ROW_TO_USE] uptake_mort_input_NEW$Nhsat[which(uptake_mort_input_NEW$consumer=="seal")] <- parhistory$h_seal[ROW_TO_USE] uptake_mort_input_NEW$Nhsat[which(uptake_mort_input_NEW$consumer=="ceta")] <- parhistory$h_ceta[ROW_TO_USE] uptake_mort_input_NEW$BdeApar[which(uptake_mort_input_NEW$consumer=="bird")] <- parhistory$bda_par_bird[ROW_TO_USE] uptake_mort_input_NEW$BdeApar[which(uptake_mort_input_NEW$consumer=="seal")] <- parhistory$bda_par_seal[ROW_TO_USE] uptake_mort_input_NEW$BdeApar[which(uptake_mort_input_NEW$consumer=="ceta")] <- parhistory$bda_par_ceta[ROW_TO_USE] uptake_mort_input_NEW$ddmort[which(uptake_mort_input_NEW$consumer=="kelp")] <- parhistory$xxwave_kelp[ROW_TO_USE] uptake_mort_input_NEW$ddmort[which(uptake_mort_input_NEW$consumer=="phyt_s")] <- parhistory$xxst[ROW_TO_USE] uptake_mort_input_NEW$ddmort[which(uptake_mort_input_NEW$consumer=="phyt_d")] <- parhistory$xxdt[ROW_TO_USE] uptake_mort_input_NEW$ddmort[which(uptake_mort_input_NEW$consumer=="omnivzoo")] <- parhistory$xxomni[ROW_TO_USE] uptake_mort_input_NEW$ddmort[which(uptake_mort_input_NEW$consumer=="carnzoo")] <- parhistory$xxcarn[ROW_TO_USE] uptake_mort_input_NEW$ddmort[which(uptake_mort_input_NEW$consumer=="benthslar")] <- parhistory$xxbenthslar[ROW_TO_USE] uptake_mort_input_NEW$ddmort[which(uptake_mort_input_NEW$consumer=="benthclar")] <- parhistory$xxbenthclar[ROW_TO_USE] uptake_mort_input_NEW$ddmort[which(uptake_mort_input_NEW$consumer=="benths")] <- parhistory$xxbenths[ROW_TO_USE] uptake_mort_input_NEW$ddmort[which(uptake_mort_input_NEW$consumer=="benthc")] <- parhistory$xxbenthc[ROW_TO_USE] uptake_mort_input_NEW$ddmort[which(uptake_mort_input_NEW$consumer=="fishplar")] <- parhistory$xxpfishlar[ROW_TO_USE] uptake_mort_input_NEW$ddmort[which(uptake_mort_input_NEW$consumer=="fishdlar")] <- parhistory$xxdfishlar[ROW_TO_USE] uptake_mort_input_NEW$ddmort[which(uptake_mort_input_NEW$consumer=="fishp")] <- parhistory$xxpfish[ROW_TO_USE] uptake_mort_input_NEW$ddmort[which(uptake_mort_input_NEW$consumer=="fishm")] <- parhistory$xxmfish[ROW_TO_USE] uptake_mort_input_NEW$ddmort[which(uptake_mort_input_NEW$consumer=="fishd")] <- parhistory$xxdfish[ROW_TO_USE] uptake_mort_input_NEW$ddmort[which(uptake_mort_input_NEW$consumer=="bird")] <- parhistory$xxbird[ROW_TO_USE] uptake_mort_input_NEW$ddmort[which(uptake_mort_input_NEW$consumer=="seal")] <- parhistory$xxseal[ROW_TO_USE] uptake_mort_input_NEW$ddmort[which(uptake_mort_input_NEW$consumer=="ceta")] <- parhistory$xxceta[ROW_TO_USE] uptake_mort_input_NEW$migration_coef[which(uptake_mort_input_NEW$consumer=="fishp")] <- parhistory$xpfish_migcoef[ROW_TO_USE] uptake_mort_input_NEW$migration_coef[which(uptake_mort_input_NEW$consumer=="fishm")] <- parhistory$xmfish_migcoef[ROW_TO_USE] uptake_mort_input_NEW$migration_coef[which(uptake_mort_input_NEW$consumer=="fishd")] <- parhistory$xdfish_migcoef[ROW_TO_USE] uptake_mort_input_NEW$migration_coef[which(uptake_mort_input_NEW$consumer=="bird")] <- parhistory$xbird_migcoef[ROW_TO_USE] uptake_mort_input_NEW$migration_coef[which(uptake_mort_input_NEW$consumer=="seal")] <- parhistory$xseal_migcoef[ROW_TO_USE] uptake_mort_input_NEW$migration_coef[which(uptake_mort_input_NEW$consumer=="ceta")] <- parhistory$xceta_migcoef[ROW_TO_USE] uptake_mort_input_NEW$max_exploitable_f[which(uptake_mort_input_NEW$consumer=="kelp")] <- parhistory$xmax_exploitable_f_KP[ROW_TO_USE] uptake_mort_input_NEW$max_exploitable_f[which(uptake_mort_input_NEW$consumer=="fishp")] <- parhistory$xmax_exploitable_f_PF[ROW_TO_USE] uptake_mort_input_NEW$max_exploitable_f[which(uptake_mort_input_NEW$consumer=="fishd")] <- parhistory$xmax_exploitable_f_DF[ROW_TO_USE] uptake_mort_input_NEW$max_exploitable_f[which(uptake_mort_input_NEW$consumer=="fishm")] <- parhistory$xmax_exploitable_f_MF[ROW_TO_USE] uptake_mort_input_NEW$max_exploitable_f[which(uptake_mort_input_NEW$consumer=="benths")] <- parhistory$xmax_exploitable_f_SB[ROW_TO_USE] uptake_mort_input_NEW$max_exploitable_f[which(uptake_mort_input_NEW$consumer=="benthc")] <- parhistory$xmax_exploitable_f_CB[ROW_TO_USE] uptake_mort_input_NEW$max_exploitable_f[which(uptake_mort_input_NEW$consumer=="carnzoo")] <- parhistory$xmax_exploitable_f_CZ[ROW_TO_USE] uptake_mort_input_NEW$max_exploitable_f[which(uptake_mort_input_NEW$consumer=="bird")] <- parhistory$xmax_exploitable_f_BD[ROW_TO_USE] uptake_mort_input_NEW$max_exploitable_f[which(uptake_mort_input_NEW$consumer=="seal")] <- parhistory$xmax_exploitable_f_SL[ROW_TO_USE] uptake_mort_input_NEW$max_exploitable_f[which(uptake_mort_input_NEW$consumer=="ceta")] <- parhistory$xmax_exploitable_f_CT[ROW_TO_USE] microbiology_input_NEW$Value[1] <- parhistory$xmt[ROW_TO_USE] microbiology_input_NEW$Value[2] <- parhistory$xnst[ROW_TO_USE] microbiology_input_NEW$Value[3] <- parhistory$xdst[ROW_TO_USE] microbiology_input_NEW$Value[4] <- parhistory$xndt[ROW_TO_USE] microbiology_input_NEW$Value[5] <- parhistory$xddt[ROW_TO_USE] microbiology_input_NEW$Value[6] <- parhistory$xqs_p1[ROW_TO_USE] microbiology_input_NEW$Value[7] <- parhistory$xqs_p2[ROW_TO_USE] microbiology_input_NEW$Value[8] <- parhistory$xqs_p3[ROW_TO_USE] microbiology_input_NEW$Value[9] <- parhistory$xmsedt[ROW_TO_USE] microbiology_input_NEW$Value[10] <- parhistory$xmsens[ROW_TO_USE] microbiology_input_NEW$Value[11] <- parhistory$xnsedt[ROW_TO_USE] microbiology_input_NEW$Value[12] <- parhistory$xnsens[ROW_TO_USE] microbiology_input_NEW$Value[13] <- parhistory$xdsedt[ROW_TO_USE] microbiology_input_NEW$Value[14] <- parhistory$xdsens[ROW_TO_USE] microbiology_input_NEW$Value[15] <- parhistory$xdisc_corp[ROW_TO_USE] microbiology_input_NEW$Value[16] <- parhistory$xxcorp_det[ROW_TO_USE] microbiology_input_NEW$Value[17] <- parhistory$xkelpdebris_det[ROW_TO_USE] microbiology_input_NEW$Value[18] <- parhistory$xdsink_s[ROW_TO_USE] microbiology_input_NEW$Value[19] <- parhistory$xdsink_d[ROW_TO_USE] microbiology_input_NEW$Value[20] <- parhistory$xkelpshade[ROW_TO_USE] microbiology_input_NEW$Value[21] <- parhistory$xwave_kelpdebris[ROW_TO_USE] microbiology_input_NEW$Value[22] <- parhistory$xdfdp[ROW_TO_USE] parameterpath <- makepath(model.path, PARAMETERS_DIR) if(read.only==FALSE & csv.output==TRUE) { filename = csvname(parameterpath, "fitted_preference_matrix", identifier) writecsv(preference_matrix_input_NEW, filename, row.names=TRUE) print(paste("Writing preference matrix file :",filename)) } if(read.only==FALSE & csv.output==TRUE) { filename = csvname(parameterpath, "fitted_uptake_mort_rates", identifier) writecsv(uptake_mort_input_NEW, filename, row.names=FALSE) print(paste("Writing uptake and mortality parameter file :",filename)) } if(read.only==FALSE & csv.output==TRUE) { filename = csvname(parameterpath, "fitted_microbiology_others", identifier) writecsv(microbiology_input_NEW, filename, row.names=FALSE) print(paste("Writing microbiology parameter file :",filename)) } new_parameter_set<-list(new_preference_matrix = preference_matrix_input_NEW, new_uptake_mort_rate_parameters = uptake_mort_input_NEW, new_microbiology_parameters = microbiology_input_NEW) new_parameter_set }
context("64bit-support") test_that("Dataset with more than 2^31 rows", { large_space <- H5S$new(type="simple", dim=as.integer64(2)^33) large_space$select_hyperslab(start=1, count=1, stride=1, block=as.integer64(2)^32) expect_equal(large_space$get_select_hyper_blocklist()[,1], setNames(c(1, 2^32), c("block_1_start", "block_1_end"))) large_space$select_none() large_space[2:2^32] expect_equal(large_space$get_select_hyper_blocklist()[,1], setNames(c(2, 2^32), c("block_1_start", "block_1_end"))) large_space$select_none() large_space[as.integer64(2):(as.integer64(2)^32)] expect_equal(large_space$get_select_hyper_blocklist()[,1], setNames(c(2, 2^32), c("block_1_start", "block_1_end"))) test_file <- tempfile(fileext=".h5") file.h5 <- H5File$new(test_file, mode="w") large_ds <- file.h5$create_dataset("large_ds", dtype=h5types$H5T_NATIVE_INT, space=large_space, chunk_dims=1e6, gzip_level = 4) large_ds[(2^32-10):(2^32+10)] <- 1:21 expect_equal(large_ds[(2^32-20):(2^32+20)], c(rep(0L,10), 1:21, rep(0L, 10))) file.h5$close_all() file.remove(test_file) }) truncateVec <- function(x, min, max) { x[!is.na(x) & x < min] <- min x[!is.na(x) & x > max] <- max return(x) } value_LLONG_MAX <- function() { res <- bit64::as.integer64(0) for(i in 0:62) { res <- res + bit64::as.integer64(2)^i } return(res) } test_that("Datatype conversion with 64bit", { LLONG_MAX <- value_LLONG_MAX() dtype_uint64 <- h5types$H5T_NATIVE_ULLONG dtype_int64 <- h5types$H5T_NATIVE_LLONG dbl_vec_pos <- c(1, 2, 2^31-1, 2^31, 2^32, 2^33, 2^62, 2^63, 1.5 * 2^63, 2^65) dbl_vec <- c(-dbl_vec_pos, dbl_vec_pos) dbl_vec_int64 <- suppressWarnings(bit64::as.integer64(dbl_vec)) res_dbl_uint64_default <- hdf5r:::convertRoundTrip(dbl_vec, dtype_uint64, flags=h5const$H5TOR_CONV_NONE) res_dbl_uint64_na <- suppressWarnings(hdf5r:::convertRoundTrip(dbl_vec, dtype_uint64, flags=h5const$H5TOR_CONV_UINT64_NA)) res_dbl_uint64_force <- hdf5r:::convertRoundTrip(dbl_vec, dtype_uint64, flags=h5const$H5TOR_CONV_INT64_FLOAT_FORCE) dbl_vec_int64_trunc <- suppressWarnings(bit64::as.integer64(dbl_vec)) dbl_vec_int64_trunc[dbl_vec < 0] <- 0 dbl_vec_int64_trunc[is.na(dbl_vec_int64_trunc)] <- LLONG_MAX dbl_vec_int64_na <- suppressWarnings(bit64::as.integer64(dbl_vec)) suppressWarnings({ dbl_vec_int64_na[18] <- dbl_vec_int64_na[18] + 1 }) dbl_vec_int64_na[dbl_vec < 0] <- 0 expect_equal(suppressWarnings(truncateVec(dbl_vec_int64_trunc, 0, LLONG_MAX)), res_dbl_uint64_default$output) expect_equal(suppressWarnings(truncateVec(dbl_vec_int64_na, 0, LLONG_MAX)), res_dbl_uint64_na$output) dbl_vec_force <- truncateVec(dbl_vec, 0, 2^64) expect_equal(dbl_vec_force, res_dbl_uint64_force$output) res_dbl_int64_default <- suppressWarnings(hdf5r:::convertRoundTrip(dbl_vec, dtype_int64, flags=h5const$H5TOR_CONV_NONE)) expect_equal(dbl_vec_int64, res_dbl_int64_default$output) res_dbl_int64_int_noloss_short_int <- hdf5r:::convertRoundTrip(dbl_vec[abs(dbl_vec) < 2^31], dtype_int64, flags=h5const$H5TOR_CONV_INT64_INT_NOLOSS) expect_equal(as.integer(dbl_vec[abs(dbl_vec) < 2^31]), res_dbl_int64_int_noloss_short_int$output) res_dbl_int64_int_noloss_short_int_withNA <- hdf5r:::convertRoundTrip(c(dbl_vec[abs(dbl_vec) < 2^31], NA), dtype_int64, flags=h5const$H5TOR_CONV_INT64_INT_NOLOSS) expect_equal(as.integer(c(dbl_vec[abs(dbl_vec) < 2^31], NA)), res_dbl_int64_int_noloss_short_int_withNA$output) res_dbl_int64_int_noloss <- suppressWarnings(hdf5r:::convertRoundTrip(dbl_vec, dtype_int64, flags=h5const$H5TOR_CONV_INT64_INT_NOLOSS)) expect_equal(dbl_vec_int64, res_dbl_int64_int_noloss$output) res_dbl_int64_float_noloss_short_float <- hdf5r:::convertRoundTrip(dbl_vec[abs(dbl_vec) < 2^51], dtype_int64, flags=h5const$H5TOR_CONV_INT64_FLOAT_NOLOSS) expect_equal(as.numeric(dbl_vec[abs(dbl_vec) < 2^51]), res_dbl_int64_float_noloss_short_float$output) res_dbl_int64_float_noloss_short_float_withNA <- hdf5r:::convertRoundTrip(c(dbl_vec[abs(dbl_vec) < 2^51], NA), dtype_int64, flags=h5const$H5TOR_CONV_INT64_FLOAT_NOLOSS) expect_equal(as.numeric(c(dbl_vec[abs(dbl_vec) < 2^51], NA)), res_dbl_int64_float_noloss_short_float_withNA$output) res_dbl_int64_float_noloss <- suppressWarnings(hdf5r:::convertRoundTrip(dbl_vec, dtype_int64, flags=h5const$H5TOR_CONV_INT64_FLOAT_NOLOSS)) expect_equal(dbl_vec_int64, res_dbl_int64_int_noloss$output) res_dbl_int64_noloss <- suppressWarnings(hdf5r:::convertRoundTrip(dbl_vec, dtype_int64, flags=h5const$H5TOR_CONV_INT64_NOLOSS)) expect_equal(dbl_vec_int64, res_dbl_int64_noloss$output) res_dbl_int64_noloss_short_int <- hdf5r:::convertRoundTrip(dbl_vec[abs(dbl_vec) < 2^31], dtype_int64, flags=h5const$H5TOR_CONV_INT64_NOLOSS) expect_equal(as.integer(dbl_vec[abs(dbl_vec) < 2^31]), res_dbl_int64_noloss_short_int$output) res_dbl_int64_noloss_short_float <- hdf5r:::convertRoundTrip(dbl_vec[abs(dbl_vec) < 2^51], dtype_int64, flags=h5const$H5TOR_CONV_INT64_NOLOSS) expect_equal(as.numeric(dbl_vec[abs(dbl_vec) < 2^51]), res_dbl_int64_noloss_short_float$output) suppressWarnings(res_dbl_int64_force <- hdf5r:::convertRoundTrip(dbl_vec, dtype_int64, flags=h5const$H5TOR_CONV_INT64_FLOAT_FORCE)) expect_equal(suppressWarnings(as.numeric(dbl_vec_int64)), res_dbl_int64_force$output) })
knitr::opts_chunk$set( collapse = TRUE, comment = " fig.width = 7, fig.align = "center" ) if (!requireNamespace("microbenchmark", quietly = TRUE)) { stop("Package \"microbenchmark\" needed for this vignette to work. Please install it.", call. = FALSE) } library(EFAtools) DOSPERT_sub <- DOSPERT_raw[1:500,] BARTLETT(DOSPERT_sub) KMO(DOSPERT_sub) PARALLEL(DOSPERT_sub, eigen_type = "SMC") print(PARALLEL(DOSPERT_sub, eigen_type = "SMC"), plot = FALSE) print(EKC(DOSPERT_sub), plot = FALSE) N_FACTORS(DOSPERT_sub, criteria = c("PARALLEL", "EKC", "SMT"), eigen_type_other = c("SMC", "PCA")) N_FACTORS(DOSPERT_sub, method = "ULS") N_FACTORS(test_models$baseline$cormat, N = 500, method = "ULS", eigen_type_other = c("SMC", "PCA")) EFA(DOSPERT_sub, n_factors = 6) EFA(DOSPERT_sub, n_factors = 6, rotation = "promax") EFA(DOSPERT_sub, n_factors = 6, rotation = "promax", type = "psych") EFA(DOSPERT_sub, n_factors = 6, rotation = "promax", type = "SPSS") COMPARE( EFA(DOSPERT_sub, n_factors = 6, rotation = "promax", type = "psych")$rot_loadings, EFA(DOSPERT_sub, n_factors = 6, rotation = "promax", type = "SPSS")$rot_loadings ) EFA(DOSPERT_sub, n_factors = 6, rotation = "oblimin", method = "ULS") COMPARE( EFA(DOSPERT_sub, n_factors = 6, rotation = "promax")$rot_loadings, EFA(DOSPERT_sub, n_factors = 6, rotation = "oblimin", method = "ULS")$rot_loadings, x_labels = c("PAF and promax", "ULS and oblimin") ) EFA_mod <- EFA(DOSPERT_sub, n_factors = 6, rotation = "promax") fac_scores <- FACTOR_SCORES(DOSPERT_sub, f = EFA_mod) microbenchmark::microbenchmark( PARALLEL(DOSPERT_sub, eigen_type = "SMC", n_datasets = 25), psych::fa.parallel(DOSPERT_sub, SMC = TRUE, plot = FALSE, n.iter = 25) ) microbenchmark::microbenchmark( EFA(DOSPERT_raw, 6), psych::fa(DOSPERT_raw, 6, rotate = "none", fm = "pa") ) EFA_AV <- EFA_AVERAGE(test_models$baseline$cormat, n_factors = 3, N = 500, method = c("PAF", "ML", "ULS"), rotation = "oblique", show_progress = FALSE) EFA_AV efa_dospert <- EFA(DOSPERT_sub, n_factors = 6, rotation = "promax") efa_dospert sl_dospert <- SL(efa_dospert) sl_dospert OMEGA(sl_dospert, type = "psych") OMEGA(sl_dospert, factor_corres = matrix(c(rep(0, 18), rep(1, 6), rep(0, 30), rep(1, 6), rep(0, 6), 1, 0, 1, 0, 1, rep(0, 19), rep(1, 6), rep(0, 31), 1, 0, 1, 0, 1, rep(0, 30), rep(1, 6), rep(0, 12)), ncol = 6, byrow = FALSE))
"all_valid_nuts_codes"
library(pointblank) create_informant( read_fn = ~ palmerpenguins::penguins, tbl_name = "penguins", label = "Les données `penguins` du paquet **palmerpenguins**.", lang = "fr" ) %>% info_columns( columns = "species", `ℹ️` = "Un facteur désignant les espèces de manchots ({species_snippet})." ) %>% info_columns( columns = "island", `ℹ️` = "L'île de l'archipel de Palmer, Antarctique ({island_snippet})." ) %>% info_columns( columns = "year", `ℹ️` = "L'année d'étude: {year_snippet}." ) %>% info_snippet( snippet_name = "species_snippet", fn = snip_list(column = "species", and_or = "and") ) %>% info_snippet( snippet_name = "island_snippet", fn = snip_list(column = "island", and_or = "or") ) %>% info_snippet( snippet_name = "year_snippet", fn = snip_list(column = "year", limit = 1) ) %>% incorporate()
"tac_metadata"
"frames.time" <- function(dataset, datanum) { if(is.matrix(dataset$ftime) == FALSE) dataset$ftime <- rbind(dataset$ftime) if(is.matrix(dataset$index) == FALSE) dataset$index <- rbind(dataset$index) nums <- seq(1, nrow(dataset$ftime)) incl <- dataset$index[, 1] <= datanum & dataset$index[, 2] >= datanum retv <- NULL segnum <- nums[incl] percent <- (datanum - dataset$index[segnum, 1])/ (dataset$index[segnum, 2] - dataset$index[segnum, 1]) retv$segnum <- segnum retv$time <- dataset$ftime[segnum, 1] + percent * (dataset$ftime[segnum, 2] - dataset$ftime[segnum, 1]) retv } "get.time.element"<- function(timeval, dataset) { numrows <- nrow(dataset$ftime) left <- dataset$ftime[1, 1] right <- dataset$ftime[numrows, 2] left.i <- dataset$index[1, 1] right.i <- dataset$index[numrows, 2] round(((timeval - left)/(right - left)) * (right.i - left.i)) + 1 }
"print.howmany" <- function(x,...) { summary(x) }
rpygeo_build_env <- function(path = NULL, overwrite = TRUE, extensions = NULL, x64 = FALSE, pro = FALSE, arcgisAPI = FALSE, workspace = NULL, scratch_workspace = NULL) { if (is.null(path)) { if (x64) { dirs1 <- list.files( path = "C:/Python27", pattern = "64", recursive = FALSE, full.names = TRUE ) dirs <- list.files( path = dirs1, pattern = "python.exe", recursive = TRUE, full.names = TRUE ) } if (pro) { dirs <- list.files( path = "C:/Program Files/ArcGIS/Pro/bin/Python/envs/arcgispro-py3", pattern = "python.exe", full.names = TRUE ) } if (!pro && !x64) { dirs <- list.files( path = "C:/Python27", pattern = "python.exe", recursive = TRUE, full.names = TRUE ) } if (length(dirs) == 1) { path <- dirs } if (length(dirs) > 1) { stop("multiple paths found, define ArcGIS Path\n") } if (length(dirs) < 1) { stop("No python version found in 'C:/Python27' - please define python path\n") } } if (!is.null(path)) { path == path } reticulate::use_python(python = path, required = TRUE) if (!arcgisAPI) { reticulate::py_run_string("import arcpy") } if (arcgisAPI) { reticulate::py_run_string("import arcgis") } if (!arcgisAPI) { input_check(overwrite = overwrite, extensions = extensions) } if (!arcgisAPI) { if (!is.null(workspace)) { set_workspace(workspace) } } if (!arcgisAPI) { if (!is.null(workspace) & !is.null(scratch_workspace)) { set_scratch_workspace(scratch_workspace) } else if (!is.null(workspace) & is.null(scratch_workspace)) { if (tools::file_ext(basename(workspace)) == "gdb") { dir.create(paste0(dirname(workspace), "/scratch"), showWarnings = FALSE) set_scratch_workspace(paste0(dirname(workspace), "/scratch")) } else { dir.create(paste0(workspace, "/scratch"), showWarnings = FALSE) set_scratch_workspace(paste0(workspace, "/scratch")) } } } if (!arcgisAPI) { return(reticulate::py_run_string("import arcpy")$arcpy) } if (arcgisAPI) { return(reticulate::py_run_string("import arcgis")$arcpy) } } rpygeo_search <- function(search_term = NULL) { modules <- reticulate::py_run_file(system.file("python", "get_modules.py", package = "RPyGeo")) if(is.null(search_term)) { return(modules$module) } search_result <- modules$module %>% purrr::map(function(a) stringr::str_subset(a, stringr::regex(search_term, ignore_case = TRUE))) %>% purrr::keep(function(a) length(a) > 0) if (length(search_result) < 1) { return(NULL) } search_result } rpygeo_load <- function(data) { path <- data %>% utils::type.convert() %>% as.character() info <- reticulate::py_run_string(paste0("info = arcpy.Describe('", path, "')")) if (tools::file_ext(basename(info$info$path)) == "gdb") { if (info$info$dataType == "FeatureClass") { sf::st_read(dsn = info$info$path, layer = info$info$baseName, quiet = TRUE) %>% return() } else if (info$info$dataType == "RasterDataset") { tempdir() %>% paste0("/r", paste0(floor(stats::runif(7, min = 0, max = 9)), collapse = ""), ".asc") -> temp_file reticulate::py_run_string(paste0("arcpy.RasterToASCII_conversion('", info$info$baseName, "', '", temp_file, "')")) raster::raster(temp_file) %>% return() } else { stop("Unsupported dataset. rpygeo_load supports Feature Class and Raster Dataset.") } } else { if (any(info$info$extension %in% c("tif", "img", "asc"))) { raster::raster(paste0(info$info$path, "/", info$info$file)) %>% return() } else if (any(info$info$extension %in% c("shp"))) { sf::st_read(paste0(info$info$path, "/", info$info$file), quiet = TRUE) %>% return() } else if (info$info$extension == "" & file.exists(paste0(info$info$path, "/", info$info$file, "/hdr.adf"))) { raster::raster(paste0(info$info$path, "/", info$info$file)) %>% return() } else { stop("Unsupported data type. rpygeo_load supports Tagged Image File Format (.tif), Erdas Imagine Images (.img), Arc/Info Binary Grid (.adf), Esri ASCII Raster (.asc) and Shapefiles (.shp)") } } } rpygeo_help <- function(arcpy_function) { doc <- substitute(arcpy_function) %>% deparse() %>% reticulate::py_function_docs() help_type <- arcpy_function$func_doc %>% stringr::str_match("(INPUTS:|Arguments:)") if (is.na(help_type[[1]])) { parameters <- c("No input parameters", "No output parameters") template <- "help_template_generic.Rmd" template_parameter <- list( name = doc$name, description = arcpy_function$func_doc ) } else if (help_type[[1]] == "INPUTS:") { output_type <- arcpy_function$func_doc %>% stringr::str_match("OUTPUTS:") if (is.na(output_type)) { res <- arcpy_function$func_doc %>% stringr::str_match("(INPUTS:)([\\S\\s]*)") %>% stringr::str_replace_all("\\n {6}", "\\\n") %>% stringr::str_replace("^\\n", "") %>% stringr::str_replace("\\s*$", "") template <- "help_template_no_output.Rmd" template_parameter <- list( name = doc$name, input = res[[3]], example = doc$signature ) } else { res <- arcpy_function$func_doc %>% stringr::str_match("(INPUTS:)([\\S\\s]*)(OUTPUTS:)([\\S\\s]*)") %>% stringr::str_replace_all("\\n {6}", "\\\n") %>% stringr::str_replace("^\\n", "") %>% stringr::str_replace("\\s*$", "") template <- "help_template.Rmd" template_parameter <- list( name = doc$name, input = res[[3]], output = res[[5]], example = doc$signature ) } } else if (help_type[[1]] == "Arguments:") { res <- arcpy_function$func_doc %>% stringr::str_match("(Arguments:)([\\S\\s]*)(Results:)([\\S\\s]*)") %>% stringr::str_replace_all("\\n {4}", "\\\n") %>% stringr::str_replace("^\\n", "") %>% stringr::str_replace("\\s*$", "") template <- "help_template.Rmd" template_parameter <- list( name = doc$name, input = res[[3]], output = res[[5]], example = doc$signature ) } help_file <- rmarkdown::render(system.file("template", template, package = "RPyGeo"), output_file = "help.html", output_dir = tempdir(), params = template_parameter, quiet = TRUE) if (!is.null(getOption("viewer"))) { rstudioapi::viewer(help_file) } else { utils::browseURL(help_file) } } rpygeo_save <- function(data, filename) { path <- data %>% utils::type.convert() %>% as.character() overwrite <- reticulate::py_run_string("overwrite = arcpy.env.overwriteOutput") workspace <- reticulate::py_run_string("workspace = arcpy.env.workspace") info <- reticulate::py_run_string(paste0("info = arcpy.Describe('", path, "')")) if (info$info$dataType != "RasterDataset") { stop("Only raster files or raster datasets in file geodatabases are supported.") } if (tools::file_ext(basename(info$info$path)) == "gdb" & tools::file_ext(basename(workspace$workspace)) == "gdb") { reticulate::py_run_string(paste0("arcpy.Copy_management('", info$info$catalogpath, "', '", workspace$workspace, "/", filename, "')")) } else if (tools::file_ext(basename(workspace$workspace)) == "gdb") { reticulate::py_run_string(paste0("arcpy.Copy_management('", info$info$catalogpath, "', '", workspace$workspace, "/", filename, "')")) } else if (tools::file_ext(basename(info$info$path)) == "gdb") { reticulate::py_run_string(paste0("arcpy.RasterToOtherFormat_conversion('", info$info$catalogpath, "', '", workspace$workspace, "')")) } else { raster::raster(info$info$catalogpath) %>% raster::writeRaster(paste0(workspace$workspace, "/", filename), overwrite = overwrite$overwrite) } }
hypergraph.union <- function(h1,h2,reduce=TRUE) { edges <- c(hypergraph_as_edgelist(h1),hypergraph_as_edgelist(h2)) h <- as.hypergraph(edges) if(reduce){ h <- reduce.hypergraph(h) } h }
context("test news creation") test_that("existing file is checked", { tmp_file <- file.path(paste0(tempdir(), "/tmp_NEWS.md")) writeLines(text = "test NEWS.md", con = tmp_file) this_warning <- paste0( "version check not yet implemented! ", "Consider given an initial version. ", "It is set to 0.0.0.9000 for now.") expect_warning(news$new(file = tmp_file), this_warning) unlink(tmp_file) expect_error(news$new(file = "this/does/not/exists.md"), "given file does not exists") })
print.summary.GORMC <- function(x,...){ cat("Call:\n") print(x$call) cat("\n") printCoefmat(x$coefficients,P.values=TRUE,has.Pvalue=TRUE) print(paste("Loglik=",round(x$loglik,2))) class(x)<-"summary.GORMC" }
context("isd_parse_line") path <- system.file('extdata/024130-99999-2016.gz', package = "isdparser") lns <- readLines(path, encoding = "latin1") test_that("isd_parse_line", { aa <- isd_parse_line(lns[1]) expect_is(aa, "tbl_df") expect_is(aa, "data.frame") expect_type(aa$total_chars, "character") expect_type(aa$usaf_station, "character") expect_type(aa$wban_station, "character") expect_type(aa$date, "character") expect_type(aa$time, "character") expect_equal(NCOL(aa), 38) expect_true(any(grepl("REM", names(aa)))) bb <- isd_parse_line(lns[1], as_data_frame = FALSE) expect_is(bb, "list") expect_type(aa$total_chars, "character") expect_type(aa$usaf_station, "character") expect_type(aa$wban_station, "character") expect_type(aa$date, "character") expect_type(aa$time, "character") expect_equal(length(bb), 38) expect_true(any(grepl("REM", names(bb)))) }) test_that("isd_parse_line - additional param works", { aa <- isd_parse_line(lns[1], additional = FALSE) expect_is(aa, "tbl_df") expect_is(aa, "data.frame") expect_equal(NCOL(aa), 31) expect_false(any(grepl("REM", names(aa)))) bb <- isd_parse_line(lns[1], additional = FALSE, as_data_frame = FALSE) expect_is(bb, "list") expect_equal(length(bb), 31) expect_false(any(grepl("REM", names(bb)))) }) test_that("isd_parse_line fails well", { expect_error(isd_parse_line(), "argument \"x\" is missing") expect_error(isd_parse_line(5), "'x' must be class character") })
BMTmoment <- function(p3, p4, type.p.3.4 = "t w", p1 = 0, p2 = 1, type.p.1.2 = "c-d", order, type = "standardised", method = "quadrature"){ is.wholenumber <- function(x, tol = .Machine$double.eps^0.5) abs(x - round(x)) < tol if (any(!is.wholenumber(order)) || any(order < 1)) stop("order should be a vector of integers greater or equal than 1") type <- match.arg(type, c("raw","central","standardised")) method <- match.arg(method, c("quadrature","exact")) TYPE.P.3.4 <- c("t w", "a-s") int.type.p.3.4 <- pmatch(type.p.3.4, TYPE.P.3.4) if (is.na(int.type.p.3.4)) stop("invalid type of parametrization for parameters 3 and 4") if (int.type.p.3.4 == -1) stop("ambiguous type of parametrization for parameters 3 and 4") TYPE.P.1.2 <- c("c-d", "l-s") int.type.p.1.2 <- pmatch(type.p.1.2, TYPE.P.1.2) if (is.na(int.type.p.1.2)) stop("invalid type of parametrization for parameters 1 and 2") if (int.type.p.1.2 == -1) stop("ambiguous type of parametrization for parameters 1 and 2") len1 <- max(length(p3),length(p4)) p3 <- rep(p3, len=len1) p4 <- rep(p4, len=len1) len2 <- max(length(p1),length(p2)) p1 <- rep(p1, len=len2) p2 <- rep(p2, len=len2) if(int.type.p.1.2 == 1){ min <- replace(p1, p1 >= p2, NaN) max <- replace(p2, p1 >= p2, NaN) a <- max - min b <- min } else{ mu <- p1 sigma <- replace(p2, p2 <= 0, NaN) a <- sigma/BMTsd(p3, p4, type.p.3.4) b <- mu - a * BMTmean(p3, p4, type.p.3.4) } if(method=="quadrature"){ if(int.type.p.3.4 == 1){ kappa_l <- replace(p3, p3 < 0 | p3 > 1, NaN) kappa_r <- replace(p4, p4 < 0 | p4 > 1, NaN) a_3 <- 3*kappa_l+3*kappa_r-2 a_2 <- (-6*kappa_l-3*kappa_r+3) a_1 <- (3*kappa_l) } else{ zeta <- replace(p3, p3 < -1 | p3 > 1, NaN) xi <- replace(p4, p4 < 0 | p4 > 1, NaN) abs.zeta <- abs(zeta) aux1 <- 0.5-xi a_3 <- 6*(xi+abs.zeta*aux1)-2 a_2 <- -9*(xi+abs.zeta*aux1)+1.5*zeta+3 a_1 <- 3*(xi+abs.zeta*aux1)-1.5*zeta } funct1 <- function(order,a_3,a_2,a_1,a,b){ t <- 0.5*.GL.10.points + 0.5 x.t <- .x.t(t, a_3, a_2, a_1) if(a!=1) x.t <- a * x.t if(b!=0) x.t <- x.t + b yFp.t <- 6*t*(1-t) return(0.5*sum(.GL.10.weights*(x.t^order)*yFp.t)) } if(type=="raw"){ m <- mapply(funct1,order,a_3,a_2,a_1,a,b) } else{ mean <- BMTmean(p3, p4, type.p.3.4) m <- mapply(funct1,order,a_3,a_2,a_1,rep(1,len=len2),-mean) if(type=="central"){ m <- a^order * m } else{ sigma <- BMTsd(p3, p4, type.p.3.4) m <- m / ( sigma^order ) } } } else{ if(int.type.p.3.4 == 1){ kappa_l <- replace(p3, p3 < 0 | p3 > 1, NaN) kappa_r <- replace(p4, p4 < 0 | p4 > 1, NaN) } else{ p <- BMTchangepars(p3, p4, type.p.3.4) kappa_l <- p$p3 kappa_r <- p$p4 } funct2 <- function(kappa_l,kappa_r,order,a,b){ K <- partitions::compositions(order, 4, include.zero=TRUE) term4 <- function(v,kappa_l,kappa_r,order,a,b){ term4 <- factorial(order) * 3^(v[2]+v[3]) * ifelse(v[1]==0,1,(b)^v[1]) * ifelse(v[2]==0,1,(a*kappa_l+b)^v[2]) * ifelse(v[3]==0,1,(a*(1-kappa_r)+b)^v[3]) * ifelse(v[4]==0,1,(a+b)^v[4]) / factorial(v[1]) / factorial(v[2]) / factorial(v[3]) / factorial(v[4]) / choose((3*order+2),(1+v[2]+2*v[3]+3*v[4])) return(term4) } return(2/(order+1) * sum(apply(K,2,term4,kappa_l,kappa_r,order,a,b))) } if(type=="raw"){ m <- mapply(funct2,kappa_l,kappa_r,order,a,b) } else{ mean <- BMTmean(kappa_l, kappa_r) m <- mapply(funct2,kappa_l,kappa_r,order,rep(1,len=len2),-mean) if(type=="central"){ m <- a^order * m } else{ sigma <- BMTsd(kappa_l, kappa_r) m <- m / ( sigma^order ) } } } return(m) } BMTmgf <- function(s, p3, p4, type.p.3.4 = "t w", p1 = 0, p2 = 1, type.p.1.2 = "c-d"){ TYPE.P.3.4 <- c("t w", "a-s") int.type.p.3.4 <- pmatch(type.p.3.4, TYPE.P.3.4) if (is.na(int.type.p.3.4)) stop("invalid type of parametrization for parameters 3 and 4") if (int.type.p.3.4 == -1) stop("ambiguous type of parametrization for parameters 3 and 4") TYPE.P.1.2 <- c("c-d", "l-s") int.type.p.1.2 <- pmatch(type.p.1.2, TYPE.P.1.2) if (is.na(int.type.p.1.2)) stop("invalid type of parametrization for parameters 1 and 2") if (int.type.p.1.2 == -1) stop("ambiguous type of parametrization for parameters 1 and 2") len1 <- max(length(p3),length(p4)) p3 <- rep(p3, len=len1) p4 <- rep(p4, len=len1) len2 <- max(length(s),length(p1),length(p2)) s <- rep(s, len=len2) p1 <- rep(p1, len=len2) p2 <- rep(p2, len=len2) if(int.type.p.3.4 == 1){ kappa_l <- replace(p3, p3 < 0 | p3 > 1, NaN) kappa_r <- replace(p4, p4 < 0 | p4 > 1, NaN) a_3 <- 3*kappa_l+3*kappa_r-2 a_2 <- (-6*kappa_l-3*kappa_r+3) a_1 <- (3*kappa_l) } else{ zeta <- replace(p3, p3 < -1 | p3 > 1, NaN) xi <- replace(p4, p4 < 0 | p4 > 1, NaN) abs.zeta <- abs(zeta) aux1 <- 0.5-xi a_3 <- 6*(xi+abs.zeta*aux1)-2 a_2 <- -9*(xi+abs.zeta*aux1)+1.5*zeta+3 a_1 <- 3*(xi+abs.zeta*aux1)-1.5*zeta } funct3 <- function(s,a_3,a_2,a_1){ t <- 0.5*.GL.10.points + 0.5 x.t <- .x.t(t, a_3, a_2, a_1) yFp.t <- 6*t*(1-t) return(0.5*sum(.GL.10.weights*exp(s*x.t)*yFp.t)) } if(int.type.p.1.2 == 1){ min <- replace(p1, p1 >= p2, NaN) max <- replace(p2, p1 >= p2, NaN) range <- max - min y <- mapply(funct3,range*s,a_3,a_2,a_1)*exp(min*s) } else{ mu <- p1 sigma <- replace(p2, p2 <= 0, NaN) range <- sigma/BMTsd(p3, p4, type.p.3.4) y <- mapply(funct3,range*s,a_3,a_2,a_1)*exp((mu-range*BMTmean(p3, p4, type.p.3.4))*s) } return(y) } BMTchf <- function(s, p3, p4, type.p.3.4 = "t w", p1 = 0, p2 = 1, type.p.1.2 = "c-d"){ y <- BMTmgf(1i*s, p3, p4, type.p.3.4, p1, p2, type.p.1.2) return(y) } mBMT <- function(order, p3, p4, type.p.3.4, p1, p2, type.p.1.2){ fun <- switch(order,BMTmean,BMTsd,BMTskew,BMTkurt) return(fun(p3, p4, type.p.3.4, p1, p2, type.p.1.2)) } .GL.10.points <- c(-0.973906528517171720078, -0.8650633666889845107321, -0.6794095682990244062343, -0.4333953941292471907993, -0.148874338981631210885, 0.1488743389816312108848, 0.433395394129247190799, 0.6794095682990244062343, 0.8650633666889845107321, 0.973906528517171720078) .GL.10.weights <- c(0.0666713443086881375936, 0.149451349150580593146, 0.2190863625159820439955, 0.2692667193099963550912, 0.295524224714752870174, 0.295524224714752870174, 0.2692667193099963550913, 0.219086362515982043995, 0.1494513491505805931458, 0.0666713443086881375936)
x <- function() 3 x<- function() 3 if (x) 3 else 4 if (x) { 3 } else 4 if (x) 3 else 4 while (x) "x" while (x) "x"
context("test-twoxtwo") test_that("summary computes measures", { tmp_twoxtwo <- titanic %>% twoxtwo::twoxtwo(., exposure = Crew, outcome = Survived, retain = TRUE) expect_equal(summary(tmp_twoxtwo)$odds_ratio, odds_ratio(titanic, exposure = Crew, outcome = Survived)) expect_equal(summary(tmp_twoxtwo)$risk_ratio, risk_ratio(titanic, exposure = Crew, outcome = Survived)) expect_equal(summary(tmp_twoxtwo)$risk_difference, risk_diff(titanic, exposure = Crew, outcome = Survived)) }) test_that("summary does not try to compute measures with retain FALSE", { tmp_twoxtwo <- titanic %>% twoxtwo::twoxtwo(., exposure = Crew, outcome = Survived, retain = FALSE) expect_null(summary(tmp_twoxtwo)$odds_ratio) expect_null(summary(tmp_twoxtwo)$risk_ratio) expect_null(summary(tmp_twoxtwo)$risk_difference) }) test_that("levels argument can flip orientation", { tmp_twoxtwo <- titanic %>% twoxtwo::twoxtwo(., exposure = Crew, outcome = Survived, levels = list(exposure = c(FALSE,TRUE), outcome = c("Yes","No"))) expect_equal(tmp_twoxtwo$cells$A, 499) expect_equal(tmp_twoxtwo$cells$B, 817) expect_equal(tmp_twoxtwo$cells$C, 212) expect_equal(tmp_twoxtwo$cells$D, 673) tmp_twoxtwo <- titanic %>% twoxtwo::twoxtwo(., exposure = Crew, outcome = Survived, levels = list(exposure = c(FALSE,TRUE), outcome = c("No","Yes"))) expect_equal(tmp_twoxtwo$cells$A, 817) expect_equal(tmp_twoxtwo$cells$B, 499) expect_equal(tmp_twoxtwo$cells$C, 673) expect_equal(tmp_twoxtwo$cells$D, 212) tmp_twoxtwo <- titanic %>% dplyr::mutate(Survived = ifelse(Survived == "Yes", 1, 0)) %>% twoxtwo(., exposure = Crew, outcome = Survived, levels = list(exposure = c(FALSE,TRUE), outcome = c(1,0))) expect_equal(tmp_twoxtwo$cells$A, 499) expect_equal(tmp_twoxtwo$cells$B, 817) expect_equal(tmp_twoxtwo$cells$C, 212) expect_equal(tmp_twoxtwo$cells$D, 673) }) test_that("levels argument errors with level that does not exist", { expect_error({ tmp_twoxtwo <- titanic %>% twoxtwo(., exposure = Crew, outcome = Survived, levels = list(exposure = c(FALSE,TRUE), outcome = c("Survived","Died"))) }) }) test_that("cell count with 0 triggers warning", { tmp <- dplyr::tribble(~exposed, ~diseased,~n, TRUE, TRUE, 7, TRUE, FALSE,15, FALSE, TRUE, 0, FALSE, FALSE, 19) %>% tidyr::uncount(n) wrng_msg <- "\nAt least one of the cells in the two-by-two table is 0.\nEstimates may be uninformative." expect_warning(odds_ratio(tmp, exposed, diseased), wrng_msg) expect_warning(risk_ratio(tmp, exposed, diseased), wrng_msg) expect_warning(risk_diff(tmp, exposed, diseased), wrng_msg) expect_warning(twoxtwo(tmp, exposed, diseased), wrng_msg) })
e <- expression vague_dt_default <- list( list(c = e(seconds < 10), s = "moments ago"), list(c = e(seconds < 45), s = "less than a minute ago"), list(c = e(seconds < 90), s = "about a minute ago"), list(c = e(minutes < 45), s = e("%d minutes ago" %s% round(minutes))), list(c = e(minutes < 90), s = "about an hour ago"), list(c = e(hours < 24), s = e("%d hours ago" %s% round(hours))), list(c = e(hours < 42), s = "a day ago"), list(c = e(days < 30), s = e("%d days ago" %s% round(days))), list(c = e(days < 45), s = "about a month ago"), list(c = e(days < 335), s = e("%d months ago" %s% round(days / 30))), list(c = e(years < 1.5), s = "about a year ago"), list(c = TRUE, s = e("%d years ago" %s% round(years))) ) vague_dt_short <- list( list(c = e(seconds < 50), s = "<1 min"), list(c = e(minutes < 50), s = e("%d min" %s% round(minutes))), list(c = e(hours < 1.5), s = "1 hour"), list(c = e(hours < 18), s = e("%d hours" %s% round(hours))), list(c = e(hours < 42), s = "1 day"), list(c = e(days < 30), s = e("%d day" %s% round(days))), list(c = e(days < 45), s = "1 mon"), list(c = e(days < 335), s = e("%d mon" %s% round(days / 30))), list(c = e(years < 1.5), s = "1 year"), list(c = TRUE, s = e("%d years" %s% round(years))) ) vague_dt_terse <- list( list(c = e(seconds < 50), s = e("%2ds" %s% round(seconds))), list(c = e(minutes < 50), s = e("%2dm" %s% round(minutes))), list(c = e(hours < 18), s = e("%2dh" %s% round(hours))), list(c = e(days < 30), s = e("%2dd" %s% round(days))), list(c = e(days < 335), s = e("%2dM" %s% round(days / 30))), list(c = TRUE, s = e("%2dy" %s% round(years))) ) vague_dt_formats <- list( "default" = vague_dt_default, "short" = vague_dt_short, "terse" = vague_dt_terse ) time_ago <- function(date, format = c("default", "short", "terse")) { date <- as.POSIXct(date) if (length(date) > 1) return(sapply(date, time_ago, format = format)) seconds <- difftime(Sys.time(), date, units = "secs") vague_dt(seconds, format = format) } vague_dt <- function(dt, format = c("default", "short", "terse")) { assert_diff_time(dt) units(dt) <- "secs" seconds <- as.vector(dt) if (!length(seconds)) return(character()) pieces <- list( minutes = seconds / 60, hours = seconds / 60 / 60, days = seconds / 60 / 60 / 24, years = seconds / 60 / 60 / 24 / 365.25 ) format <- match.arg(format) for (p in vague_dt_formats[[format]]) { if (eval(p$c, pieces)) return(eval(p$s, pieces)) } }
seqBIC <- function(seqdata, seqdata2=NULL, group=NULL, set=NULL, s=100, seed=36963, squared="LRTonly", weighted=TRUE, opt=NULL, BFopt=NULL, method, ...) { return(seqCompare(seqdata, seqdata2, group, set, s, seed, stat="BIC", squared, weighted, opt, BFopt, method, ...)) } seqLRT <- function(seqdata, seqdata2=NULL, group=NULL, set=NULL, s=100, seed=36963, squared="LRTonly", weighted=TRUE, opt=NULL, BFopt=NULL, method, ...) { return(seqCompare(seqdata, seqdata2, group, set, s, seed, stat="LRT", squared, weighted, opt, BFopt, method, ...)) } seqCompare <- function(seqdata, seqdata2=NULL, group=NULL, set=NULL, s=100, seed=36963, stat="all", squared="LRTonly", weighted=TRUE, opt=NULL, BFopt=NULL, method, ...) { gc(FALSE) ptime.begin <- proc.time() if (is.null(seqdata2) & is.null(group)){ stop("'seqdata2' and 'group' cannot both be NULL!") } if (!is.null(set) & is.null(group)){ stop("'set' not NULL while 'group' is NULL!") } if (!is.logical(weighted)) { if (weighted != 'by.group') stop("weighted must be logical or 'by.group'") weight.by <- weighted weighted <- TRUE } else { weight.by <- 'global' } if (is.logical(squared)) LRTpow <- 1 else { if (squared != "LRTonly") stop("squared must be logical or 'LRTonly'") LRTpow <- 2 squared <- FALSE } is1.stslist <- inherits(seqdata,"stslist") is2.stslist <- inherits(seqdata2,"stslist") if (is.list(seqdata) & !is1.stslist) { if (is2.stslist | length(seqdata) != length(seqdata2)) stop("When 'seqdata' is a list, seqdata2 must be a list of same length") else { l <- length(seqdata) i <- 1 while (i <= l) { if(!inherits(seqdata[[i]], "stslist") | !inherits(seqdata2[[i]], "stslist")) stop("At least one element of the seqdata lists is not a state sequence object!") i=i+1 } } } else if (!is1.stslist) { stop("If not a list, 'seqdata' must be a state sequence object (stslist) created with seqdef()") } else if (!is.null(seqdata2) & !is2.stslist) { stop("If not a list, 'seqdata2' must be a state sequence object (stslist) created with seqdef()") } if (any(!stat %in% c("LRT","BIC","all"))) stop("Bad stat value, must be one of 'LRT', 'BIC', or 'all'") if (any(stat=="all")) { is.LRT <- is.BIC <- TRUE } else{ is.LRT <- "LRT" %in% stat is.BIC <- "BIC" %in% stat } if (!is1.stslist){ seq1 <- seqdata seq2 <- seqdata2 } else if (is1.stslist & !is.null(seqdata2)) { seq1 <- list(seqdata) seq2 <- list(seqdata2) } else if (is1.stslist & is.null(seqdata2)) { gvar <- as.vector(group) if (!is.null(set)){ setvar <- as.vector(set) inotna <- which(!is.na(gvar) & !is.na(setvar)) setvar <- setvar[inotna] setvar <- factor(setvar) lev.set <- levels(setvar) } else { inotna <- which(!is.na(gvar)) } ina <- nrow(seqdata) - length(inotna) if(ina > 0) message("[!!] ", ina, " sequences removed because of NA values of the grouping variable(s)\n") gvar <- gvar[inotna] gvar <- factor(gvar) lev.g <- levels(gvar) if (length(lev.g) > 2) stop("Currently seqLRT supports only 2 groups!") seqdata <- seqdata[inotna,] seq1 <- list() seq2 <- list() if (is.null(set)){ seq1[[1]] <- seqdata[gvar==lev.g[1],] seq2[[1]] <- seqdata[gvar==lev.g[2],] } else { for (i in 1:length(lev.set)){ seq1[[i]] <- seqdata[gvar==lev.g[1] & setvar==lev.set[i],] seq2[[i]] <- seqdata[gvar==lev.g[2] & setvar==lev.set[i],] } } } G = length(seq1) n = matrix(NA,nrow=G,ncol=2) seq.a = seq.b <- list(rep(NA,G)) for (i in 1:G) { if (nrow(seq1[[i]])>=nrow(seq2[[i]])) { n[i,1] <- nrow(seq1[[i]]) n[i,2] <- nrow(seq2[[i]]) seq.a[[i]] <- seq1[[i]] seq.b[[i]] <- seq2[[i]] } else { n[i,1] <- nrow(seq2[[i]]) n[i,2] <- nrow(seq1[[i]]) seq.a[[i]] <- seq2[[i]] seq.b[[i]] <- seq1[[i]] } } if (s>0) { m.n = apply(n,1,max) n.n = apply(n,1,min) f.n1 <- floor(s/m.n) ff.n1 <- sapply(f.n1, g<-function(x){max(1,x)}) r.n1 = ifelse(s<m.n, s - m.n%%s, s - f.n1*m.n) k.n = floor((ff.n1*m.n+r.n1)/n.n) r.n2 = (ff.n1*m.n+r.n1)-k.n*n.n if(any(m.n<r.n1)) { ii <- which(m.n<r.n1) stop("rest r.n1 values greater than max m.n for i= ", ii, " s= ", s) } if(any(n.n<r.n2)) { ii <- which(n.n<r.n2) stop("rest r.n2 values greater than min n.n for i= ", ii, " s= ", s) } } nc <- ifelse(is.LRT & is.BIC, 4, 2) Results <- matrix(NA,G,nc) oopt <- opt multsple <- FALSE for (i in 1:G) { if (s==0) { r1 <- 1:nrow(seq.a[[i]]) r2 <- 1:nrow(seq.b[[i]]) + nrow(seq.a[[i]]) suppressMessages(diss <- seqdist(rbind(seq.a[[i]],seq.b[[i]]), method=method, weighted=weighted, ...)) weights <- c(attr(seq.a[[i]],"weights"),attr(seq.b[[i]],"weights")) suppressMessages( Results[i,] <- seq.comp(r1, r2, diss, weights, is.LRT=is.LRT, is.BIC=is.BIC, squared=squared, weighted=weighted, weight.by=weight.by, LRTpow=LRTpow, ...)) } else { set.seed(seed) r.s1 <- c(permute(rep(1:m.n[i],ff.n1[i])),sample(1:m.n[i],r.n1[i],F)) r.s2 <- c(permute(rep(1:n.n[i],k.n[i])),sample(1:n.n[i],r.n2[i],F)) r.s1 = matrix(r.s1,ncol=s) r.s2 = matrix(r.s2,ncol=s) if (is.null(oopt)) opt <- ifelse(nrow(seq.a[[i]]) + nrow(seq.b[[i]]) > 2*s, 1, 2) if (opt==2) { suppressMessages(diss <- seqdist(rbind(seq.a[[i]],seq.b[[i]]), method=method, weighted=weighted, ...)) weights <- c(attr(seq.a[[i]],"weights"),attr(seq.b[[i]],"weights")) } multsple <- nrow(r.s1) > 1 || multsple t<-matrix(NA,nrow=nrow(r.s1),ncol=nc) for (j in 1:nrow(r.s1)) { if (opt==2) { r1 <- r.s1[j,] r2 <- r.s2[j,] + nrow(seq.a[[i]]) } else { seqA<-seq.a[[i]][r.s1[j,],] seqB<-seq.b[[i]][r.s2[j,],] seqAB <- rbind(seqA, seqB) wA <- attr(seqA,"weights") wB <- attr(seqB,"weights") weights <- c(wA,wB) r1 <- 1:length(r.s1[j,]) r2 <- length(r.s1[j,]) + 1:length(r.s2[j,]) suppressMessages(diss <- seqdist(seqAB, method=method, weighted=weighted, ...)) } suppressMessages(t[j,] <- seq.comp(r1, r2, diss, weights, is.LRT=is.LRT, is.BIC=is.BIC, squared=squared, weighted=weighted, weight.by=weight.by, LRTpow=LRTpow, ...)) } Results[i,]<-apply(t,2,mean) } } colnames <- NULL if (is.LRT) colnames <- c("LRT", "p-value") if (is.BIC) { if (is.null(BFopt) && multsple) { BF2 <- exp(Results[,nc-1]/2) Results <- cbind(Results, BF2) colnames <- c(colnames, "BIC diff.", "Bayes Factor (Avg)", "Bayes Factor (From Avg BIC)") } else if (BFopt==1 && multsple) { colnames <- c(colnames, "BIC diff.", "Bayes Factor (Avg)") } else if (BFopt==2 && multsple) { BF2 <- exp(Results[,nc-1]/2) Results[,nc] <- BF2 colnames <- c(colnames, "BIC diff.", "Bayes Factor (From Avg BIC)") } else { colnames <- c(colnames, "BIC diff.", "Bayes Factor") } } colnames(Results) <- colnames if(!is.null(set)) rownames(Results) <- lev.set ptime.end <- proc.time() time.begin <- as.POSIXct(sum(ptime.begin[1:2]), origin = "1960-01-01") time.end <- as.POSIXct(sum(ptime.end[1:2]), origin = "1960-01-01") time.elapsed <- format(round(difftime(time.end, time.begin), 3)) message("elapsed time:", time.elapsed) return(Results) } seq.comp <- function(r1, r2, diss, weights, is.LRT,is.BIC, squared, weighted, weight.by, LRTpow,...) { n1 <- length(r1) n2 <- length(r2) n0 <- n1+n2 dist.S=dist.S1=dist.S2<-vector() weighted <- weighted && !any(is.null(weights)) if (weighted) { w1 <- weights[r1] w2 <- weights[r2] w <- c(w1,w2) if (weight.by == 'by.group') { w1 <- n1/sum(w1) * w1 w2 <- n2/sum(w2) * w2 w <- c(w1,w2) } nw <- sum(w) nw1 <- sum(w1) nw2 <- sum(w2) } else { nw <- n0 nw1 <- n1 nw2 <- n2 w <- rep(1,nw) w1 <- rep(1,nw1) w2 <- rep(1,nw2) } dist.S <-disscenter(diss[c(r1,r2),c(r1,r2)], weights=w, squared=squared) dist.S1<-disscenter(diss[r1,r1], weights=w1, squared=squared) dist.S2<-disscenter(diss[r2,r2], weights=w2, squared=squared) SS <- sum(w*dist.S^LRTpow) SS1 <- sum(w1*dist.S1^LRTpow) SS2 <- sum(w2*dist.S2^LRTpow) res <- NULL LRT <- n0*(log(SS/n0) - log((SS1+SS2)/n0)) if (is.LRT) { p.LRT <- pchisq(LRT,1,lower.tail=FALSE) res <- cbind(LRT, p.LRT) } if (is.BIC) { BIC <- LRT - 1*log(n0) BF <- exp(BIC/2) res <- cbind(res, BIC, BF) } return(res) }
context("Spatial modeling functions") skip_on_cran() set.seed(567) dat_occ <- data.frame(cov1=rnorm(500), x=runif(500, 0,10), y=runif(500,0,10)) dat_p <- data.frame(x2=rnorm(500*5)) y <- matrix(NA, 500, 5) z <- rep(NA, 500) b <- c(0.4, -0.5, 0, 0.5) idx <- 1 for (i in sample(1:500, 300, replace=FALSE)){ z[i] <- rbinom(1,1, plogis(b[1] + b[2]*dat_occ$cov1[i])) for (j in 1:5){ y[i,j] <- z[i]*rbinom(1,1, plogis(b[3] + b[4]*dat_p$x2[idx])) idx <- idx + 1 } } umf <- unmarkedFrameOccu(y=y, siteCovs=dat_occ, obsCovs=dat_p) fit <- suppressMessages(suppressWarnings(stan_occu(~1~cov1+RSR(x,y,1), data=umf[1:20,], chains=2, iter=200, refresh=0))) fit2 <- suppressWarnings(stan_occu(~1~1, data=umf[1:10,], chains=2, iter=200, refresh=0)) test_that("spatial model output structure", { expect_is(fit, "ubmsFitOccu") expect_true(has_spatial(fit@submodels@submodels$state)) expect_equal(names(coef(fit))[3], "state[RSR [tau]]") }) test_that("methods for spatial model work", { pr <- suppressMessages(predict(fit, "state")) expect_is(pr, "data.frame") expect_equal(dim(pr), c(20,4)) nd <- data.frame(cov1=c(0,1)) expect_error(suppressMessages(predict(fit, "state", newdata=nd))) pr <- suppressMessages(predict(fit, "state", newdata=nd, re.form=NA)) expect_equal(dim(pr), c(2,4)) ss <- suppressMessages(sim_state(fit, samples=1:2)) expect_equal(dim(ss), c(2,13)) expect_warning(ppred <- suppressMessages(posterior_predict(fit, "z", draws=2))) expect_equal(dim(ppred), c(2, 13)) expect_warning(ppred <- suppressMessages(posterior_predict(fit, "y", draws=2))) expect_equal(dim(ppred), c(2, 65)) fitted <- getMethod("fitted", "ubmsFit") ft <- suppressMessages(fitted(fit, "state", draws=2)) expect_is(ft, "matrix") expect_equal(dim(ft), c(2, 13)) }) test_that("RSR() generates spatial matrices", { rsr_out <- RSR(dat_occ$x, dat_occ$y, threshold=1) rsr_out2 <- RSR(dat_occ$x, dat_occ$y, threshold=5) expect_is(rsr_out, "list") expect_equal(names(rsr_out), c("A","Q","n_eig","coords")) expect_equal(as.matrix(rsr_out$coords), as.matrix(dat_occ[,c("x","y")])) expect_equal(rsr_out$Q[1], -sum(rsr_out$Q[1,2:500])) expect_true(sum(diag(rsr_out$Q)) < sum(diag(rsr_out2$Q))) expect_equal(rsr_out$n_eig, nrow(dat_occ)*0.1) rsr_out3 <- RSR(dat_occ$x, dat_occ$y, threshold=1, moran_cut=100) expect_equal(rsr_out3$n_eig, 100) expect_error(RSR(dat_occ$x, dat_occ$y, threshold=1, moran_cut=1000)) expect_equal(dim(rsr_out$A), c(nrow(dat_occ), nrow(dat_occ))) expect_true(max(rsr_out$A)==1) }) test_that("RSR() can generate a plot", { pdf(NULL) rsr_out <- RSR(dat_occ$x, dat_occ$y, threshold=1) gg <- RSR(dat_occ$x, dat_occ$y, threshold=1, plot_site=1) expect_is(gg, "gg") dev.off() }) test_that("RSR info can be extracted from submodel", { sm <- fit@submodels@submodels$state inf <- get_rsr_info(sm) expect_is(inf, "list") expect_equal(names(inf), c("A","Q","n_eig","coords")) }) test_that("remove_RSR removes spatial component of formula", { nf <- remove_RSR(fit@submodels@submodels$state@formula) expect_equal(as.formula(~cov1), nf) expect_equal(~1, remove_RSR(~RSR(x,y,1))) expect_error(remove_RSR(~cov1+RSR(x,y,1)+(1|fake))) expect_error(remove_RSR(~cov1+(1|fake)+RSR(x,y,1))) }) test_that("has_spatial identifies spatial submodels", { expect_true(has_spatial(fit@submodels@submodels$state)) expect_false(has_spatial(fit@submodels@submodels$det)) }) test_that("has_spatial works on lists of formulas", { expect_true(has_spatial(list(det=~1,state=~RSR(x,y,1)))) expect_error(has_spatial(list(state=~1,det=~RSR(x,y,1)))) expect_error(has_spatial(list(state=~RSR(x,y,1),det=~RSR(x,y,1)))) expect_error(has_spatial(list(det=~1,state=~RSR(x,y,1)),support=FALSE)) }) test_that("construction of ubmsSubmodelSpatial objects", { ex <- extract_missing_sites(umf) sm <- ubmsSubmodelSpatial("Test","test", ex$umf@siteCovs, ~1+RSR(x,y,1), "plogis", uniform(-5,5), normal(0,2.5), gamma(1,1), ex$sites_augment, ex$data_aug) expect_is(sm, "ubmsSubmodelSpatial") }) test_that("extract_missing_sites identifies augmented sites", { es <- extract_missing_sites(umf) expect_is(es, "list") expect_equivalent(es$sites_augment, apply(umf@y, 1, function(x) all(is.na(x)))) expect_equal(nrow(es$data_aug), sum(es$sites_augment)) expect_equal(nrow(siteCovs(es$umf)), numSites(umf) - sum(es$sites_augment)) expect_true(!any(apply(es$umf@y, 1, function(x) all(is.na(x))))) umf2 <- umf umf2@siteCovs$cov1[1] <- NA expect_error(extract_missing_sites(umf2)) }) test_that("spatial_matrices builds correct RSR matrices", { sm <- fit@submodels@submodels$state mats <- suppressMessages(spatial_matrices(sm)) expect_is(mats, "list") n_eig <- get_rsr_info(sm)$n_eig expect_equal(dim(mats$Qalpha), c(n_eig, n_eig)) expect_equal(mats$Qalpha[1,1:2], c(0.08389,0.44826), tol=1e-4) expect_equal(dim(mats$Kmat), c(20, n_eig)) expect_equal(mats$Kmat[1,1:2], c(-0.0197,0.0250), tol=1e-4) expect_equal(mats$n_eigen, n_eig) }) test_that("get_pars method for ubmsSubmodelSpatial adds tau param", { sm <- fit@submodels@submodels$state expect_equal(get_pars(sm), c("beta_state","b_state","tau")) }) test_that("get_stan_data for ubmsSubmodelSpatial includes spatial data", { sm <- fit@submodels@submodels$state dat <- suppressMessages(get_stan_data(sm)) expect_is(dat, "list") expect_equal(dat$n_random_state[1], 2) expect_true(all(c("Kmat","Qalpha","n_eigen","n_aug_sites","X_aug","offset_aug") %in% names(dat))) expect_equal(nrow(dat$X_aug), 7) expect_equal(length(dat$offset_aug), 7) }) test_that("stanfit_names returns correct names for ubmsSubmodelSpatial", { sm <- fit@submodels@submodels$state sname <- stanfit_names(sm) expect_equal(length(sname), 5) expect_equal(sname[5], "tau") }) test_that("plot_spatial returns ggplot", { pdf(NULL) gg1 <- suppressMessages(plot_spatial(fit, "state")) expect_is(gg1, "gg") gg2 <- suppressMessages(plot_spatial(fit, "eta")) expect_is(gg2, "gg") gg3 <- suppressMessages(plot_spatial(fit, "state", sites=TRUE)) expect_is(gg3, "gg") dev.off() expect_error(plot_spatial(umf)) expect_error(plot_spatial(fit2)) })
annual_range <- function(swmpr_in, ...) UseMethod('annual_range') annual_range.swmpr <- function(swmpr_in , param = NULL , target_yr = NULL , criteria = NULL , free_y = FALSE , log_trans = FALSE , converted = FALSE , criteria_lab = 'WQ Threshold' , plot_title = FALSE , plot = TRUE , ...) { dat <- swmpr_in parm <- sym(param) conv <- converted seas <- sym('season') res <- sym('result') dt <- sym('date') avg <- sym('mean') mini <- sym('min') maxi <- sym('max') mini_avg <- sym('min_avg') maxi_avg <- sym('max_avg') parameters <- attr(dat, 'parameters') station <- attr(dat, 'station') data_type <- substr(station, 6, nchar(station)) if(data_type == 'nut') warning('Nutrient data detected. Consider specifying seasons > 1 month. See `?assign_season` for details.') x <- dat[ , c('datetimestamp', param)] x <- x[complete.cases(x), ] if(is.null(target_yr)) { warning('No target year specified. Maximum year in data set will be used.') target_yr <- max(lubridate::year(x$datetimestamp)) } if(!is.null(target_yr)) { if(!(target_yr %in% unique(year(x$datetimestamp)))) { warning('User-specified target year is not present in the data set. target_yr argument will be set to max year in the data set') target_yr <- max(year(x$datetimestamp)) } } if(!any(param %in% parameters)) stop('Param argument must name input column') y_trans <- ifelse(log_trans, 'log10', 'identity') y_label <- y_labeler(param = param, converted = conv) if(attr(dat, 'qaqc_cols')) warning('QAQC columns present. QAQC not performed before analysis.') dat <- dat %>% filter(lubridate::year(.data$datetimestamp) == target_yr) dat$season <- assign_season(dat$datetimestamp, abb = TRUE, ...) dat$date <- lubridate::floor_date(dat$datetimestamp, unit = 'days') dat <- dat %>% dplyr::select(.data$datetimestamp, date, .data$season, !!parm) dat <- dat %>% dplyr::filter(!is.na(!! parm)) dat_day <- dat %>% group_by(!! seas, !! dt) %>% summarise(mean = mean(!! parm, na.rm = TRUE) , min = min(!! parm, na.rm = TRUE) , max = max(!! parm, na.rm = TRUE) , .groups = "drop_last") dat_month <- dat_day %>% group_by(!! seas) %>% summarise(mean = mean(!! avg, na.rm = TRUE) , min_avg = mean(!! mini, na.rm = TRUE) , max_avg = mean(!! maxi, na.rm = TRUE) , min = min(!! mini, na.rm = TRUE) , max = max(!! maxi, na.rm = TRUE) , .groups = "drop_last") dat_month <- tidyr::complete(dat_month, !! seas) if(plot){ mn <- min(dat_month$min, na.rm = TRUE) mn <- ifelse(mn < 0 , min(pretty(mn)), 0) mn <- ifelse(log_trans, ifelse(substr(station, 6, nchar(station)) == 'nut', 0.001, 0.1), mn) lab_ln <- ifelse(data_type == 'nut', paste('Monthly Sample \n(', target_yr, ')', sep = ''), paste('Daily Average \n(', target_yr, ')', sep = '')) plt <- ggplot(data = dat_month, aes_(x = seas, y = avg, group = 1)) + geom_line(lwd = 1, color = 'steelblue3') + geom_point(aes_(fill = lab_ln, shape = lab_ln), color = 'black', size = 2) + labs(x = NULL, y = eval(y_label)) + theme_bw() + theme(legend.position = 'top', legend.direction = 'horizontal') if(data_type != 'nut') { lab_rng_avg <- paste('Avg Daily Range \n(', target_yr, ')', sep = '') lab_rng_mx <- paste('Daily Range \n(', target_yr, ')', sep = '') plt <- plt + geom_ribbon(aes_(x = seas, ymax = maxi_avg, ymin = mini_avg, fill = lab_rng_avg, alpha = lab_rng_avg)) + geom_ribbon(aes_(x = seas, ymax = maxi, ymin = mini, group = 1, fill = lab_rng_mx, alpha = lab_rng_mx)) } if(!log_trans) { plt <- plt + scale_y_continuous(labels = format_format(digits = 2, big.mark = ",", decimal.mark = ".", scientific = FALSE) , breaks = pretty_breaks(n = 8)) if(!free_y){plt <- plt + expand_limits(y = mn)} } else { plt <- plt + scale_y_continuous(trans = y_trans , labels = format_format(digits = 2, big.mark = ",", decimal.mark = ".", scientific = FALSE) , breaks = pretty_breaks(n = 8)) if(!free_y) {plt <- plt + expand_limits(y = mn)} } plt <- plt + scale_fill_manual('', values = c(rep('steelblue3', 3)), guide = "none") + scale_shape_manual('', values = c(21)) + scale_alpha_manual('', values = c(0.4, 0.15)) plt <- plt + guides(alpha = guide_legend(override.aes = list(fill = 'steelblue3')) , shape = guide_legend(override.aes = list(fill = 'steelblue3'))) if(!is.null(criteria)) { plt <- plt + geom_hline(aes(yintercept = criteria, color = factor(criteria_lab) , linetype = factor(criteria_lab)) , show.legend = TRUE) + scale_color_manual('', values = c('red')) + scale_linetype_manual('', values = c('longdash')) } if(plot_title) { ttl <- title_labeler(nerr_site_id = station) plt <- plt + ggtitle(ttl) + theme(plot.title = element_text(hjust = 0.5)) } plt <- plt + guides(alpha = guide_legend(override.aes = list(fill = 'steelblue3', linetype = 0), order = 2) , shape = guide_legend(override.aes = list(fill = 'steelblue3', linetype = 0), order = 1) , 'WQ Threshold' = guide_legend(order = 3)) plt <- plt + theme(strip.background = element_blank(), panel.border = element_rect(color = 'black')) + theme(axis.title.y = element_text(margin = unit(c(0, 8, 0, 0), 'pt'), angle = 90)) + theme(text = element_text(size = 16)) plt <- plt + theme(legend.key.height = unit(0.1, 'cm') , legend.key.width = unit(0.5, 'cm')) + theme(legend.text = element_text(size = 10) , legend.text.align = 0.5) + theme(legend.spacing.x = unit(3, 'pt')) return(plt) } else { tbl <- dat_month } }
spherical.flipping<-function(C,mesh,param1,param2){ C<-matrix(as.vector(C),ncol=3,nrow=1) mesh<-mesh P<-t(mesh$vb)[,-4] numVer<-dim(P)[2] numDim<-dim(P)[1] P2<-P-(repmat(C,numDim,1)) normp<-rowSums(P2^2) normp<-sqrt(normp) param<-param1 R2<-matrix(repmat(max(normp)*(param2^param1),numDim, 1)) SF<-P2+2*repmat(R2-cbind(normp),1,3)* P2/(repmat(cbind(normp),1,3)) return(SF) }
slitFrame <- structure(function ( rd, lv = cClass(rd,'factor') ) { rd11 <- rd[lv] if(is.null(lv)){ rd1 <- Filter(is.factor, rd) rd11 <- rd1[1:length(rd1)] } lrd <- split(rd, rd11, drop = TRUE) spn <- data.frame( do.call(rbind, strsplit(names(lrd), '\\.'))) nmr <- apply(spn[rev(names(spn))], 1, paste, collapse = '.') options(warn=-1) anm <- all(is.na(as.numeric(as.character(nmr)))) options(warn=0) if(anm) names(lrd) <- nmr lrd <- lrd[order(names(lrd))] return(lrd) }, ex=function() { data(Prings05, envir = environment()) data(PTclim05, envir = environment()) spl <- slitFrame(Prings05) str(spl) spl <- slitFrame(Prings05,'year') str(spl) spl <- slitFrame(PTclim05,'year') str(spl) })
expected <- eval(parse(text="2L")); test(id=0, code={ argv <- eval(parse(text="list(FALSE)")); .Internal(sink.number(argv[[1]])); }, o=expected);
polychoric2 <- function( dat, maxiter=100, cor.smooth=TRUE, use_pbv=1, conv=1e-10, rho_init=NULL, weights=NULL ) { dat1 <- as.matrix(dat) NV <- ncol(dat1) N <- nrow(dat1) if (is.null(rho_init)){ rho_init <- matrix(0, nrow=NV, ncol=NV) } if (is.null(weights)){ weights <- rep(1,N) } min_val <- apply(dat1, 2, min, na.rm=TRUE) if (any(min_val>0)){ stop("Minimum value must always zero.\n") } maxK <- max(dat1, na.rm=TRUE ) res0 <- sirt_rcpp_polychoric2( dat=dat1, maxK=maxK, maxiter=maxiter, use_pbv=use_pbv, conv=conv, rho_init=rho_init, weights=weights) iter <- res0$iter rho <- res0$rho Nobs <- res0$Nobs maxcat <- res0$maxcat thresh <- res0$thresh tau <- thresh[,c(2:(maxK+1))] tau[ tau==99 ] <- Inf rownames(rho) <- colnames(rho) <- colnames(dat1) if ( maxK > 1 ){ rownames(tau) <- rownames(rho) colnames(tau) <- paste0("Cat", 1:maxK) } if ( maxK==1 ){ names(tau) <- rownames(rho) } rho[ Nobs==0 ] <- NA diag(rho) <- 1 if ( sum( is.na(rho) > 0 ) ){ cor.smooth <- FALSE } if (cor.smooth){ rho <- sirt_import_psych_cor.smooth(x=rho) } res <- list(tau=tau, rho=rho, Nobs=Nobs, maxcat=maxcat, cor.smooth=cor.smooth, iter=iter) return(res) }
context("Install from GitHub") test_that("github_resolve_ref.github_release", { skip_on_cran() skip_if_offline() skip_if_over_rate_limit() expect_error( github_resolve_ref.github_release( NA, list(username = "hadley", repo = "devtools"), host = "api.github.com" ), NA ) }) test_that("github_resolve_ref.NULL", { expect_equal( github_resolve_ref(NULL, list()), list(ref = "HEAD") ) }) test_that("github_resolve_ref.github_pull", { expect_error( github_resolve_ref( github_pull("1"), list(userame = "gaborcsardi", repo = "pkgconfig") ), "Cannot find GitHub pull request" ) }) test_that("github_resolve_ref.github_release", { skip_on_cran() skip_if_offline() skip_if_over_rate_limit() expect_error( github_resolve_ref( github_release(), list(userame = "gaborcsardi", repo = "xxxxxxxxxx") ), "Cannot find repo" ) }) test_that("github_release", { skip_on_cran() skip_if_offline() skip_if_over_rate_limit() Sys.unsetenv("R_TESTS") lib <- tempfile() on.exit(unlink(lib, recursive = TRUE), add = TRUE) dir.create(lib) install_github( "gaborcsardi/falsy", ref = github_release(), lib = lib, quiet = TRUE ) expect_silent(packageDescription("falsy", lib.loc = lib)) expect_equal( packageDescription("falsy", lib.loc = lib)$RemoteRepo, "falsy") }) test_that("install_github", { skip_on_cran() skip_if_offline() skip_if_over_rate_limit() Sys.unsetenv("R_TESTS") lib <- tempfile() on.exit(unlink(lib, recursive = TRUE), add = TRUE) dir.create(lib) install_github("cran/falsy", lib = lib, quiet = TRUE) expect_silent(packageDescription("falsy", lib.loc = lib)) expect_equal( packageDescription("falsy", lib.loc = lib)$RemoteRepo, "falsy") remote <- package2remote("falsy", lib = lib) expect_s3_class(remote, "remote") expect_s3_class(remote, "github_remote") expect_equal(format(remote), "GitHub") expect_equal(remote$host, "api.github.com") expect_equal(remote$username, "cran") expect_equal(remote$repo, "falsy") expect_equal(remote$ref, "HEAD") expect_equal(remote$subdir, NULL) expect_true(!is.na(remote$sha) && nzchar(remote$sha)) }) test_that("error if not username, warning if given as argument", { skip_on_cran() skip_if_offline() skip_if_over_rate_limit() Sys.unsetenv("R_TESTS") lib <- tempfile() on.exit(unlink(lib, recursive = TRUE), add = TRUE) dir.create(lib) expect_error( install_github("falsy", lib = lib, quiet = TRUE), "Invalid git repo specification" ) }) test_that("remote_download.github_remote messages", { mockery::stub(remote_download.github_remote, "download", TRUE) expect_message( remote_download.github_remote( list( host = "api.github.com", username = "cran", repo = "falsy", ref = "HEAD" ) ), "Downloading GitHub repo" ) }) test_that("remote_metadata.github_remote", { expect_equal( remote_metadata.github_remote(list(), sha = "foobar")$RemoteSha, "foobar" ) }) test_that("remote_sha.github_remote", { skip_on_cran() skip_if_offline() skip_if_over_rate_limit() expect_equal( remote_sha.github_remote( list( username = "cran", repo = "falsy", ref = "1.0", host = "api.github.com" ) ), "0f39d9eb735bf16909831c0bb129063dda388375" ) }) test_that("github_remote with deleted branch", { skip_on_cran() skip_if_offline() skip_if_over_rate_limit() skip_if(is_standalone() || !pkg_installed("curl")) expect_equal( remote_sha.github_remote( list( username = "tidyverse", repo = "purrr", ref = "rc-0.3.1", host = "api.github.com" ) ), NA_character_ ) }) test_that("github_pull", { skip_on_cran() skip_if_offline() skip_if_over_rate_limit() Sys.unsetenv("R_TESTS") lib <- tempfile() on.exit(unlink(lib, recursive = TRUE), add = TRUE) dir.create(lib) install_github( "r-lib/desc", ref = github_pull(64), lib = lib, quiet = TRUE ) expect_silent(packageDescription("desc", lib.loc = lib)) expect_equal( packageDescription("desc", lib.loc = lib)$RemoteRepo, "desc") }) test_that("remote_sha.github_remote errors if remote doesn't exist", { skip_on_cran() skip_if_offline() skip_if_over_rate_limit() expect_error(remote_sha(github_remote("arst/arst"))) }) test_that("remote_sha.github_remote returns expected value if remote does exist", { skip_on_cran() skip_if_offline() skip_if_over_rate_limit() expect_equal(remote_sha(github_remote("r-lib/[email protected]")), "ad9aac7b9a522354e1ff363a86f389e32cec181b") })
vegindex <- function( x, index, returnHCR = "auto", L = 0.5, weighted = TRUE, ... ) { vegindex_available <- function() { av <- c("NDVI","OSAVI","SAVI","MTVI","NDWI","PWI", "MSI", "SRWI","GMI1","GMI2","MCARI","TVI", "Vogelmann4","Boochs","Boochs2", "CARI","CI","Carter","Carter2","Carter3","Carter4", "Carter5","Carter6","Datt","Datt2","Datt3","Datt4", "Datt5","Datt6","DD","DDn","D1","D2","EVI","EGFR","EGFN", "GI","Gitelson","Gitelson2","Green NDVI","MCARI/OSAVI", "MCARI2","MCARI2/OSAVI2","mNDVI","mND705","Maccioni", "mREIP","MSAVI","mSR","mSR705","mSR2","MTCI","NDVI2", "NDVI3","NPCI","OSAVI2","RDVI","REP_LE","REP_Li", "SIPI","SPVI","SR","SR1","SR2","SR3","SR4","SR5","SR6", "SR7", "SR8","SRPI","Sum_Dr1","Sum_Dr2","TCARI","TCARI2", "TCARI/OSAVI","TCARI2/OSAVI2","Vogelmann","NDLI", "Vogelmann2","Vogelmann3","PRI","CAI","NDNI", "PSSR", "PSND", "CRI1", "CRI2", "CRI3", "CRI4", "MPRI", "PRI*CI2", "CI2", "PSRI", "ClAInt", "TGI", "PRI_norm","PARS","DPI","Datt7","Datt8", "GDVI_2","GDVI_3","GDVI_4","LWVI1","LWVI2", "DWSI1","DWSI2","DWSI3","DWSI4","DWSI5", "SWIR FI", "SWIR LI", "SWIR SI", "SWIR VI" ) return(sort(av)) } return_index <- function(x) { if (eval.parent(convertSpatialGrid)) { spec <- speclib(x, 1) spec@rastermeta <- gridMeta result <- HyperSpecRaster(spec) } return (x) } if (length(names(match.call()))==0) { return(vegindex_available()) } if (x@spectra@fromRaster) return(.blockwise(speclib_obj = "x", pos = 1)) x_back <- x if (!is.speclib(x)) stop("x is not of class 'Speclib'") if (returnHCR == "auto") returnHCR <- .is.rastermeta(x) convertSpatialGrid <- returnHCR gridMeta <- x@rastermeta if (returnHCR) { if (!.is.rastermeta(x)) stop("If returnHCR, x must contain meta information") } if (length(index)>1) { result <- as.data.frame(matrix(data = NA, nrow = dim(x)[1], ncol = length(index))) for (i in 1:length(index)) { temp <- vegindex(x, index[i], returnHCR=FALSE, ...) if (!is.null(temp)) { result[,i] <- temp } } if (nspectra(x) > 1 & nspectra(x) < 10000) { names(result) <- index row.names(result) <- idSpeclib(x) } if (returnHCR) { spec <- speclib(result, c(1:ncol(result))) if (.is.rastermeta(x)) spec@rastermeta <- x@rastermeta result <- HyperSpecRaster(spec) } return(result) } d_indexs <- c("Boochs","Boochs2","Datt3","D1","D2","EGFR","EGFN", "Vogelmann3","Sum_Dr1","Sum_Dr2","REP_LE","DPI") m <- c(rep.int(1,length(d_indexs))) if (any(index==d_indexs)) x <- derivative.speclib(x, m=m[d_indexs==index], ...) wlunit <- x@wlunit y <- spectra(x) x <- wavelength(x) if (index=="NDVI") { return(return_index((get_reflectance(y,x,800,weighted)-get_reflectance(y,x,680,weighted)) / (get_reflectance(y,x,800,weighted)+get_reflectance(y,x,680,weighted)))) } if (index=="OSAVI") { return(return_index((1+0.16) * (get_reflectance(y,x,800,weighted)-get_reflectance(y,x,670,weighted))/ (get_reflectance(y,x,800,weighted)+get_reflectance(y,x,670,weighted)+0.16) )) } if (index=="RDVI") { return(return_index((get_reflectance(y,x,800,weighted)-get_reflectance(y,x,670,weighted))/ sqrt(get_reflectance(y,x,800,weighted)+get_reflectance(y,x,670,weighted)))) } if (index=="SAVI") { return(return_index((1+L)*(get_reflectance(y,x,800,weighted)-get_reflectance(y,x,670,weighted))/ (get_reflectance(y,x,800,weighted)+get_reflectance(y,x,670,weighted)+L))) } if (index=="MTVI") { return(return_index(1.2*(1.2*(get_reflectance(y,x,800,weighted)-get_reflectance(y,x,550,weighted))- 2.5*(get_reflectance(y,x,670,weighted)-get_reflectance(y,x,550,weighted))))) } if (index=="NDWI") { return(return_index((get_reflectance(y,x,860,weighted)-get_reflectance(y,x,1240,weighted)) / (get_reflectance(y,x,860,weighted)+get_reflectance(y,x,1240,weighted)))) } if (index=="PWI") { return(return_index(get_reflectance(y,x,900,weighted)/get_reflectance(y,x,970,weighted))) } if (index=="MSI") { return(return_index(get_reflectance(y,x,1600,weighted)/ get_reflectance(y,x,817,weighted))) } if (index=="WBI") { return(return_index(get_reflectance(y,x,970,weighted)/ get_reflectance(y,x,900,weighted))) } if (index=="SRWI") { return(return_index(get_reflectance(y,x,850,weighted)/get_reflectance(y,x,1240,weighted))) } if (index=="GMI1") { return(return_index(get_reflectance(y,x,750,weighted)/get_reflectance(y,x,550,weighted))) } if (index=="GMI2") { return(return_index(get_reflectance(y,x,750,weighted)/get_reflectance(y,x,700,weighted))) } if (index=="MCARI") { return(return_index(((get_reflectance(y,x,700,weighted)-get_reflectance(y,x,670,weighted))- 0.2*(get_reflectance(y,x,700,weighted)-get_reflectance(y,x,550,weighted)))* (get_reflectance(y,x,700,weighted)/get_reflectance(y,x,670,weighted)))) } if (index=="TVI") { return(return_index(0.5*(120*(get_reflectance(y,x,750,weighted)-get_reflectance(y,x,550,weighted))- 200*(get_reflectance(y,x,670,weighted)-get_reflectance(y,x,550,weighted))))) } if (index=="Vogelmann4") { return(return_index((get_reflectance(y,x,734,weighted)-get_reflectance(y,x,747,weighted))/ (get_reflectance(y,x,715,weighted)+get_reflectance(y,x,720,weighted)))) } if (index=="Boochs") { return(return_index(get_reflectance(y,x,703,weighted))) } if (index=="Boochs2") { return(return_index(get_reflectance(y,x,720,weighted))) } if (index=="CARI") { a = (get_reflectance(y,x,700,weighted)-get_reflectance(y,x,550,weighted)) / 150 b = get_reflectance(y,x,550,weighted)-(a*550) return(return_index(get_reflectance(y,x,700,weighted)*abs(a*670+get_reflectance(y,x,670,weighted)+b)/ get_reflectance(y,x,670,weighted)*(a^2+1)^0.5)) } if (index=="CI") { return(return_index(get_reflectance(y,x,675,weighted)*get_reflectance(y,x,690,weighted)/ get_reflectance(y,x,683,weighted)^2)) } if (index=="Carter") { return(return_index(((get_reflectance(y,x,695,weighted))/(get_reflectance(y,x,420,weighted))))) } if (index=="Carter2") { return(return_index(((get_reflectance(y,x,695,weighted))/(get_reflectance(y,x,760,weighted))))) } if (index=="Carter3") { return(return_index(((get_reflectance(y,x,605,weighted))/(get_reflectance(y,x,760,weighted))))) } if (index=="Carter4") { return(return_index(((get_reflectance(y,x,710,weighted))/(get_reflectance(y,x,760,weighted))))) } if (index=="Carter5") { return(return_index(((get_reflectance(y,x,695,weighted))/(get_reflectance(y,x,670,weighted))))) } if (index=="Carter6") { return(return_index((get_reflectance(y,x,550,weighted)))) } if (index=="Datt") { return(return_index(((get_reflectance(y,x,850,weighted)-get_reflectance(y,x,710,weighted))/ (get_reflectance(y,x,850,weighted)-get_reflectance(y,x,680,weighted))))) } if (index=="Datt2") { return(return_index(((get_reflectance(y,x,850,weighted))/(get_reflectance(y,x,710,weighted))))) } if (index=="Datt3") { return(return_index(((get_reflectance(y,x,754,weighted))/(get_reflectance(y,x,704,weighted))))) } if (index=="Datt4") { return(return_index(((get_reflectance(y,x,672,weighted))/ (get_reflectance(y,x,550,weighted)*get_reflectance(y,x,708,weighted))))) } if (index=="Datt5") { return(return_index(((get_reflectance(y,x,672,weighted))/(get_reflectance(y,x,550,weighted))))) } if (index=="Datt6") { return(return_index(((get_reflectance(y,x,860,weighted))/ (get_reflectance(y,x,550,weighted)*get_reflectance(y,x,708,weighted))))) } if (index=="Datt7") { return(return_index((get_reflectance(y,x,860,weighted) - get_reflectance(y,x,2218,weighted))/ (get_reflectance(y,x,860,weighted) - get_reflectance(y,x,1928,weighted)))) } if (index=="Datt8") { return(return_index((get_reflectance(y,x,860,weighted) - get_reflectance(y,x,1788,weighted))/ (get_reflectance(y,x,860,weighted) - get_reflectance(y,x,1928,weighted)))) } if (index=="DD") { return(return_index((get_reflectance(y,x,749,weighted)-get_reflectance(y,x,720,weighted))- (get_reflectance(y,x,701,weighted)-get_reflectance(y,x,672,weighted)))) } if (index=="DDn") { return(return_index(2*(get_reflectance(y,x,710,weighted)-get_reflectance(y,x,660,weighted)- get_reflectance(y,x,760,weighted)))) } if (index=="D1") { return(return_index(get_reflectance(y,x,730,weighted)/get_reflectance(y,x,706,weighted))) } if (index=="D2") { return(return_index(get_reflectance(y,x,705,weighted)/get_reflectance(y,x,722,weighted))) } if (index=="DPI") { return(return_index(get_reflectance(y,x,688,weighted)*get_reflectance(y,x,710,weighted)/get_reflectance(y,x,697,weighted)^2)) } if (index=="EVI") { return(return_index(2.5*((get_reflectance(y,x,800,weighted)-get_reflectance(y,x,670,weighted))/ (get_reflectance(y,x,800,weighted)-(6*get_reflectance(y,x,670,weighted))- (7.5*get_reflectance(y,x,475,weighted))+1)))) } if (index=="EGFR") { if (x[1] > 500) return(NULL) if (x[length(x)] < 750) return(NULL) dG <- apply(y[,x>=500 & x<=550],1,max) dRE <- apply(y[,x>=650 & x<=750],1,max) return(return_index(dRE/dG)) } if (index=="EGFN") { if (x[1] > 500) return(NULL) if (x[length(x)] < 750) return(NULL) dG <- apply(y[,x>=500 & x<=550],1,max) dRE <- apply(y[,x>=650 & x<=750],1,max) return(return_index((dRE-dG)/(dRE+dG))) } if (index=="GI") { return(return_index(get_reflectance(y,x,554,weighted)/get_reflectance(y,x,677,weighted))) } if (index=="Gitelson") { return(return_index(1/get_reflectance(y,x,700,weighted))) } if (index=="Gitelson2") { return(return_index((get_reflectance(y,x,750,weighted)-get_reflectance(y,x,800,weighted)/ get_reflectance(y,x,695,weighted)-get_reflectance(y,x,740,weighted))-1)) } if (index=="Green NDVI") { return(return_index((get_reflectance(y,x,800,weighted)-get_reflectance(y,x,550,weighted))/ (get_reflectance(y,x,800,weighted)+get_reflectance(y,x,550,weighted)))) } if (index=="MCARI/OSAVI") { x <- speclib(spectra=y,wavelength=x) return(return_index(vegindex(x,"MCARI",weighted=weighted)/vegindex(x,"OSAVI",weighted=weighted))) } if (index=="MCARI2") { return(return_index(((get_reflectance(y,x,750,weighted)-get_reflectance(y,x,705,weighted))- 0.2*(get_reflectance(y,x,750,weighted)-get_reflectance(y,x,550,weighted)))* (get_reflectance(y,x,750,weighted)/get_reflectance(y,x,705,weighted)))) } if (index=="MCARI2/OSAVI2") { x <- speclib(spectra=y,wavelength=x) return(return_index(vegindex(x,"MCARI2",weighted=weighted)/vegindex(x,"OSAVI2",weighted=weighted))) } if (index=="mNDVI") { return(return_index((get_reflectance(y,x,800,weighted)-get_reflectance(y,x,680,weighted))/ (get_reflectance(y,x,800,weighted)+get_reflectance(y,x,680,weighted)- 2*get_reflectance(y,x,445,weighted)))) } if (index=="mND705") { return(return_index((get_reflectance(y,x,750,weighted)-get_reflectance(y,x,705,weighted))/ (get_reflectance(y,x,750,weighted)+get_reflectance(y,x,705,weighted)- 2*get_reflectance(y,x,445,weighted)))) } if (index=="Maccioni") { return(return_index((get_reflectance(y,x,780,weighted)-get_reflectance(y,x,710,weighted))/ (get_reflectance(y,x,780,weighted)-get_reflectance(y,x,680,weighted)))) } if (index=="mREIP") { mREIP_fun <- function(x, wl) { Rs <- x[length(x)-1] R0 <- x[length(x)] x <- x[1:(length(x)-2)] Bl <- -1*log(sqrt((Rs-x)/(Rs-R0))) if (all(is.finite(Bl))) { coef <- summary(lm(Bl~wl))$coefficients c(-1*coef[1,1]/coef[2,1]) } else { c(NA, NA) } } if (x[1] > 670) return(rep.int(NA,nrow(y))) if (x[length(x)] < 795) return(rep.int(NA,nrow(y))) if (nrow(y)==1) { R0 <- matrix(data=apply(matrix(y[,x>=670&x<=685], nrow = 1),1,mean),ncol=1) Rs <- matrix(data=apply(matrix(y[,x>=780&x<=795], nrow = 1),1,mean),ncol=1) Rl <- matrix(y[,x>=670&x<=685], nrow = 1) } else { R0 <- matrix(data=apply(y[,x>=670&x<=685],1,mean),ncol=1) Rs <- matrix(data=apply(y[,x>=780&x<=795],1,mean),ncol=1) Rl <- as.matrix(y[,x>=670&x<=685]) } dat <- cbind(Rl,Rs,R0) Bl <- apply(dat,1,mREIP_fun,x[x>=670&x<=685]) return(return_index(as.vector(t(Bl)))) } if (index=="MSAVI") { return(return_index(0.5 * (2*get_reflectance(y,x,800,weighted)+1-((2*get_reflectance(y,x,800,weighted)+1)^2- 8*(get_reflectance(y,x,800,weighted)-get_reflectance(y,x,670,weighted)))^0.5))) } if (index=="mSR") { return(return_index((get_reflectance(y,x,800,weighted)-get_reflectance(y,x,445,weighted))/ (get_reflectance(y,x,680,weighted)-get_reflectance(y,x,445,weighted)))) } if (index=="mSR705") { return(return_index((get_reflectance(y,x,750,weighted)-get_reflectance(y,x,445,weighted))/ (get_reflectance(y,x,705,weighted)-get_reflectance(y,x,445,weighted)))) } if (index=="mSR2") { return(return_index((get_reflectance(y,x,750,weighted)/get_reflectance(y,x,705,weighted))- 1/(get_reflectance(y,x,750,weighted)/get_reflectance(y,x,705,weighted)+1)^0.5)) } if (index=="MTCI") { return(return_index((get_reflectance(y,x,754,weighted)-get_reflectance(y,x,709,weighted))/ (get_reflectance(y,x,709,weighted)-get_reflectance(y,x,681,weighted)))) } if (index=="NDVI2") { return(return_index((get_reflectance(y,x,750,weighted)-get_reflectance(y,x,705,weighted))/ (get_reflectance(y,x,750,weighted)+get_reflectance(y,x,705,weighted)))) } if (index=="NDVI3") { return(return_index((get_reflectance(y,x,682,weighted)-get_reflectance(y,x,553,weighted))/ (get_reflectance(y,x,682,weighted)+get_reflectance(y,x,553,weighted)))) } if (index=="NPCI") { return(return_index((get_reflectance(y,x,680,weighted)-get_reflectance(y,x,430,weighted))/ (get_reflectance(y,x,680,weighted)+get_reflectance(y,x,430,weighted)))) } if (index=="OSAVI2") { return(return_index((1+0.16) * (get_reflectance(y,x,750,weighted)-get_reflectance(y,x,705,weighted))/ (get_reflectance(y,x,750,weighted)+get_reflectance(y,x,705,weighted)+0.16) )) } if (index=="RDVI") { return(return_index((get_reflectance(y,x,800,weighted)-get_reflectance(y,x,670,weighted))/ (get_reflectance(y,x,800,weighted)+get_reflectance(y,x,670,weighted))^0.5)) } if (index=="REP_LE") { if (x[1] > 680) return(NULL) if (x[length(x)] < 760) return(NULL) mFR <- (get_reflectance(y,x,680,weighted)-get_reflectance(y,x,700,weighted))/(680-700) tFR <- get_reflectance(y,x,680,weighted) - mFR * 680 mNIR <- (get_reflectance(y,x,725,weighted)-get_reflectance(y,x,760,weighted))/(725-760) tNIR <- get_reflectance(y,x,725,weighted) - mNIR * 725 return(return_index((tNIR-tFR)/(mFR-mNIR))) } if (index=="REP_Li") { Rre <- (get_reflectance(y,x,670,weighted)+get_reflectance(y,x,780,weighted))/2 return(return_index(700 + 40*(((Rre - get_reflectance(y,x,700,weighted))/ (get_reflectance(y,x,740,weighted)-get_reflectance(y,x,700,weighted)))))) } if (index=="SIPI") { return(return_index((get_reflectance(y,x,800,weighted)-get_reflectance(y,x,445,weighted))/ (get_reflectance(y,x,800,weighted)-get_reflectance(y,x,680,weighted)))) } if (index=="SPVI") { return(return_index(0.4*3.7*(get_reflectance(y,x,800,weighted)-get_reflectance(y,x,670,weighted))- 1.2*((get_reflectance(y,x,530,weighted)-get_reflectance(y,x,670,weighted))^2)^0.5)) } if (index=="SR") { return(return_index(get_reflectance(y,x,800,weighted)/get_reflectance(y,x,680,weighted))) } if (index=="SR1") { return(return_index(get_reflectance(y,x,750,weighted)/get_reflectance(y,x,700,weighted))) } if (index=="SR2") { return(return_index(get_reflectance(y,x,752,weighted)/get_reflectance(y,x,690,weighted))) } if (index=="SR3") { return(return_index(get_reflectance(y,x,750,weighted)/get_reflectance(y,x,550,weighted))) } if (index=="SR4") { return(return_index(get_reflectance(y,x,700,weighted)/get_reflectance(y,x,670,weighted))) } if (index=="SR5") { return(return_index(get_reflectance(y,x,675,weighted)/get_reflectance(y,x,700,weighted))) } if (index=="SR6") { return(return_index(get_reflectance(y,x,750,weighted)/get_reflectance(y,x,710,weighted))) } if (index=="SR7") { return(return_index(get_reflectance(y,x,440,weighted)/get_reflectance(y,x,690,weighted))) } if (index=="SR8") { return(return_index(get_reflectance(y,x,515,weighted)/get_reflectance(y,x,550,weighted))) } if (index=="SRPI") { return(return_index(get_reflectance(y,x,430,weighted)/get_reflectance(y,x,680,weighted))) } if (index=="Sum_Dr1") { if (x[1] > 626) return(NULL) if (x[length(x)] < 795) return(NULL) y <- abs(y[,x>=626&x<=795]) return(return_index(as.vector(rowSums(y)))) } if (index=="Sum_Dr2") { if (x[1] > 680) return(NULL) if (x[length(x)] < 780) return(NULL) y <- y[,x>=680&x<=780] return(return_index(as.vector(rowSums(y)))) } if (index=="TCARI") { return(return_index(3*((get_reflectance(y,x,700,weighted)-get_reflectance(y,x,670,weighted))- 0.2*(get_reflectance(y,x,700,weighted)-get_reflectance(y,x,550,weighted))* (get_reflectance(y,x,700,weighted)/get_reflectance(y,x,670,weighted))))) } if (index=="TCARI2") { return(return_index(3*((get_reflectance(y,x,750,weighted)-get_reflectance(y,x,705,weighted))- 0.2*(get_reflectance(y,x,750,weighted)-get_reflectance(y,x,550,weighted))* (get_reflectance(y,x,750,weighted)/get_reflectance(y,x,705,weighted))))) } if (index=="TCARI/OSAVI") { x <- speclib(spectra=y,wavelength=x) return(return_index(vegindex(x,"TCARI",weighted=weighted)/vegindex(x,"OSAVI",weighted=weighted))) } if (index=="TCARI2/OSAVI2") { x <- speclib(spectra=y,wavelength=x) return(return_index(vegindex(x,"TCARI2",weighted=weighted)/vegindex(x,"OSAVI2",weighted=weighted))) } if (index=="TVI") { return(return_index(0.5*(120*(get_reflectance(y,x,750,weighted)-get_reflectance(y,x,550,weighted))- 200*(get_reflectance(y,x,670,weighted)-get_reflectance(y,x,550,weighted))))) } if (index=="Vogelmann") { return(return_index(get_reflectance(y,x,740,weighted)/get_reflectance(y,x,720,weighted))) } if (index=="Vogelmann2") { return(return_index((get_reflectance(y,x,734,weighted)-get_reflectance(y,x,747,weighted))/ (get_reflectance(y,x,715,weighted)+get_reflectance(y,x,726,weighted)))) } if (index=="Vogelmann3") { return(return_index(get_reflectance(y,x,715,weighted)/get_reflectance(y,x,705,weighted))) } if (index=="PRI") { return(return_index((get_reflectance(y,x,531,weighted)-get_reflectance(y,x,570,weighted))/ (get_reflectance(y,x,531,weighted)+get_reflectance(y,x,570,weighted)))) } if (index=="PRI_norm") { xx <- speclib(spectra=y,wavelength=x) return(return_index(vegindex(xx,"PRI",weighted=weighted)*(-1)/(vegindex(xx,"RDVI",weighted=weighted)* get_reflectance(y,x,700,weighted)/get_reflectance(y,x,670,weighted)))) } if (index=="TCARI") { return(return_index(3*((get_reflectance(y,x,700,weighted)-get_reflectance(y,x,670,weighted))- 0.2*(get_reflectance(y,x,700,weighted)-get_reflectance(y,x,550,weighted))* (get_reflectance(y,x,700,weighted)/get_reflectance(y,x,670,weighted))))) } if (index=="CAI") { return(return_index(0.5*(get_reflectance(y,x,2000,weighted)+get_reflectance(y,x,2200,weighted))- get_reflectance(y,x,2100,weighted))) } if (index=="NDNI") { return(return_index((log(1/get_reflectance(y,x,1510,weighted)) - log(1/get_reflectance(y,x,1680,weighted)))/ (log(1/get_reflectance(y,x,1510,weighted)) + log(1/get_reflectance(y,x,1680,weighted))))) } if (index=="NDLI") { return(return_index((log(1/get_reflectance(y,x,1754,weighted)) - log(1/get_reflectance(y,x,1680,weighted)))/ (log(1/get_reflectance(y,x,1754,weighted)) + log(1/get_reflectance(y,x,1680,weighted))))) } if (index=="PARS") { return(return_index(get_reflectance(y,x,746,weighted)/get_reflectance(y,x,513,weighted))) } if (index=="PSSR") { return(return_index(get_reflectance(y,x,800,weighted)/get_reflectance(y,x,635,weighted))) } if (index=="PSND") { return(return_index((get_reflectance(y,x,800,weighted)-get_reflectance(y,x,470,weighted))/ (get_reflectance(y,x,800,weighted)+get_reflectance(y,x,470,weighted)))) } if (index=="CRI1") { return(return_index(1/get_reflectance(y,x,515,weighted)-1/get_reflectance(y,x,550,weighted))) } if (index=="CRI2") { return(return_index(1/get_reflectance(y,x,515,weighted)-1/get_reflectance(y,x,700,weighted))) } if (index=="CRI3") { return(return_index(1/get_reflectance(y,x,515,weighted)-1/get_reflectance(y,x,550,weighted)* get_reflectance(y,x,770,weighted))) } if (index=="CRI4") { return(return_index(1/get_reflectance(y,x,515,weighted)-1/get_reflectance(y,x,700,weighted)* get_reflectance(y,x,770,weighted))) } if (index=="MPRI") { return(return_index((get_reflectance(y,x,515,weighted)-get_reflectance(y,x,530,weighted)/ (get_reflectance(y,x,515,weighted)+get_reflectance(y,x,530,weighted))))) } if (index=="PRI*CI2") { x <- speclib(spectra=y,wavelength=x) return(return_index(vegindex(x,"PRI",weighted=weighted)*vegindex(x,"CI2",weighted=weighted))) } if (index=="CI2") { return(return_index(get_reflectance(y,x,760,weighted)/get_reflectance(y,x,700,weighted)-1)) } if (index=="PSRI") { return(return_index((get_reflectance(y,x,678,weighted)-get_reflectance(y,x,500,weighted))/ get_reflectance(y,x,750,weighted))) } if (index=="ClAInt") { if (x[1] > 600) return(NULL) if (x[length(x)] < 735) return(NULL) y <- abs(y[,x>=600&x<=735]) return(return_index(as.vector(rowSums(y)))) } if (index=="TGI") { return(return_index(-0.5*(190*(get_reflectance(y,x,670,weighted)-get_reflectance(y,x,550,weighted)) - 120*(get_reflectance(y,x,670,weighted)-get_reflectance(y,x,480,weighted))))) } if (substr(index, 1, 4) == "GDVI") { pow <- strsplit(index, "_") if (length(pow[[1]]) < 2) { pow <- 2 warning("Exponent of GDVI missing. Use 2 for exponent") } pow <- try(as.numeric(pow[[1]][2]), silent = TRUE) if (inherits(pow, "try-error")) { pow <- 2 warning("Exponent of GDVI not numeric. Use 2 for exponent") } return(return_index((get_reflectance(y,x,800,weighted)^pow-get_reflectance(y,x,680,weighted)^pow) / (get_reflectance(y,x,800,weighted)^pow+get_reflectance(y,x,680,weighted)^pow))) } if (index=="LWVI1") { return(return_index((get_reflectance(y,x,1094,weighted)-get_reflectance(y,x,983,weighted)) / (get_reflectance(y,x,1094,weighted)+get_reflectance(y,x,983,weighted)))) } if (index=="LWVI2") { return(return_index((get_reflectance(y,x,1094,weighted)-get_reflectance(y,x,1205,weighted)) / (get_reflectance(y,x,1094,weighted)+get_reflectance(y,x,1205,weighted)))) } if (index=="DWSI1") { return(return_index(get_reflectance(y,x,800,weighted)/get_reflectance(y,x,1660,weighted))) } if (index=="DWSI2") { return(return_index(get_reflectance(y,x,1660,weighted)/get_reflectance(y,x,550,weighted))) } if (index=="DWSI3") { return(return_index(get_reflectance(y,x,1660,weighted)/get_reflectance(y,x,680,weighted))) } if (index=="DWSI4") { return(return_index(get_reflectance(y,x,550,weighted)/get_reflectance(y,x,680,weighted))) } if (index=="DWSI5") { return(return_index((get_reflectance(y,x,800,weighted)+get_reflectance(y,x,550,weighted)) / (get_reflectance(y,x,1660,weighted)+get_reflectance(y,x,680,weighted)))) } if (index=="SWIR FI") { return(return_index((get_reflectance(y,x,2133,weighted)^2) / (get_reflectance(y,x,2225,weighted)*get_reflectance(y,x,2209,weighted)^3))) } if (index=="SWIR LI") { return(return_index(3.87* (get_reflectance(y,x,2210,weighted)-get_reflectance(y,x,2090,weighted)) - 27.51*(get_reflectance(y,x,2280,weighted)-get_reflectance(y,x,2090,weighted)) - 0.2)) } if (index=="SWIR SI") { return(return_index(-41.59* (get_reflectance(y,x,2210,weighted)-get_reflectance(y,x,2090,weighted)) + 1.24*(get_reflectance(y,x,2280,weighted)-get_reflectance(y,x,2090,weighted)) + 0.64)) } if (index=="SWIR VI") { return(return_index(37.72* (get_reflectance(y,x,2210,weighted)-get_reflectance(y,x,2090,weighted)) + 26.27*(get_reflectance(y,x,2280,weighted)-get_reflectance(y,x,2090,weighted)) + 0.57)) } x <- x * .ConvWlBwd(wlunit) index <- gsub("R", "", gsub("(R[.0-9]+)", "get_reflectance(y,x,\\1,weighted)", index, perl = TRUE) ) index <- gsub("D", "", gsub("(D[.0-9]+)", "get_reflectance(spectra(derivative.speclib(x_back, m=1, ...)),x,\\1,weighted)", index, perl = TRUE) ) index <- gsub("S", "", gsub("(S[.0-9]+)", "get_reflectance(spectra(derivative.speclib(x_back, m=2, ...)),x,\\1,weighted)", index, perl = TRUE) ) index_val <- try(return_index(eval(parse(text = index))), silent = TRUE) if (inherits(index_val, "try-error")) { cat("Error in self-defined index string or unimplemented index selected\n") cat("Index string evals to:\n") cat(paste(index, "\n")) return(NULL) } return(index_val) }
KNN_C <- function(train, test, k=1, distance="Euclidean"){ alg <- RKEEL::R6_KNN_C$new() alg$setParameters(train, test, k, distance) return (alg) } R6_KNN_C <- R6::R6Class("R6_KNN_C", inherit = ClassificationAlgorithm, public = list( k = 1, distance = "Euclidean", setParameters = function(train, test, k=1, distance="Euclidean"){ super$setParameters(train, test) self$k <- k if((tolower(distance) == "euclidean") || (tolower(distance) == "manhattan") || (tolower(distance) == "hvdm")){ self$distance <- distance } else{ self$distance <- "Euclidean" } } ), private = list( jarName = "KNN.jar", algorithmName = "KNN-C", algorithmString = "K Nearest Neighbors Classifier", getParametersText = function(){ text <- "" text <- paste0(text, "K Value = ", self$k, "\n") text <- paste0(text, "Distance Function = ", self$distance, "\n") return(text) } ) )
rbind_columnwise <- function(...) { input <- lapply(list(...), function(x) slam::as.simple_triplet_matrix(x)) .convert_internal_result_to_seqR_Matrix_class(.cpp_merge_kmer_results(input)) } .convert_internal_result_to_seqR_Matrix_class <- function(seqR_list) { .convert_seqR_list_to_Matrix_class(seqR_list) } .convert_seqR_list_to_Matrix_class <- function(seqR_list) { if (length(seqR_list$i) == 0) { Matrix::Matrix(nrow=seqR_list$nrow, ncol=0, sparse = TRUE) } else { dimnames <- .get_dimnames(seqR_list) Matrix::sparseMatrix( i = seqR_list$i, j = seqR_list$j, x = seqR_list$v, dims = c(seqR_list$nrow, seqR_list$ncol), dimnames = dimnames ) } } .get_dimnames <- function(seqR_list) { list(NULL, seqR_list$names) }
pargroup <- function (A) { nr <- nrow(A) nc <- ncol(A) tol <- 2.220446e-16 r <- 1 for (i in 1:nc) { pivot <- which.max(abs(A[r:nr, i])) pivot <- r + pivot - 1 m <- abs(A[pivot, i]) if (m <= tol) { A[r:nr, i] <- 0 } else { A[c(pivot, r), i:nc] <- A[c(r, pivot), i:nc] A[r, i:nc] <- A[r, i:nc]/A[r, i] if (r == 1) { ridx <- c((r + 1):nr) } else if (r == nr) { ridx <- c(1:(r - 1)) } else { ridx <- c(1:(r - 1), (r + 1):nr) } A[ridx, i:nc] <- A[ridx, i:nc] - A[ridx, i, drop = FALSE] %*% A[r, i:nc, drop = FALSE] if (r == nr) break r <- r + 1 } } A[abs(A) < tol] <- 0 pos <- c() for(i in 1:nc) { if(!all(A[,i] %in% c(0,1)) | sum(A[,i] == 1) > 1) { pos <- c(pos, i) } } A2 <- A while(length(pos) > 1) { A2[,pos[1]] <- 0 temp <- rep(0, ncol(A2)) temp[pos[1]] <- 1 pos2 <- c(1) for(j in 2:length(pos)) { if(all(A[,pos[j]] == A[,pos[1]])) { temp[pos[j]] <- 1 A2[,pos[j]] <- 0 pos2 <- c(pos2, j) } } A2 <- rbind(A2, temp) pos <- setdiff(pos, pos[pos2]) } if(length(pos) == 1) { temp <- rep(0, ncol(A2)) temp[pos[1]] <- 1 A2[,pos[1]] <- 0 A2 <- rbind(A2, temp) } rownames(A2)<- NULL A2 }
tidy.margins <- function(x, conf.int = FALSE, conf.level = 0.95, ...) { ret <- as_tibble(summary(x, level = conf.level)) if ("at" %in% names(attributes(x))) { at_vars <- setdiff(names(attributes(x)$at), "index") std_cols <- c("factor", "AME", "SE", "z", "p", "lower", "upper") ret <- ret %>% { tryCatch( tidyr::pivot_longer(., dplyr::all_of(at_vars), names_to = "at.variable", values_to = "at.value"), error = function(e) { mutate( ., dplyr::across(dplyr::all_of(at_vars), as.character), message("Warning: `at.value` column coerced to character.") ) %>% tidyr::pivot_longer(dplyr::all_of(at_vars), names_to = "at.variable", values_to = "at.value") } ) } } ret <- ret %>% dplyr::select( term = .data$factor, dplyr::contains("at."), estimate = .data$AME, std.error = .data$SE, statistic = .data$z, p.value = .data$p, conf.low = .data$lower, conf.high = .data$upper ) if(!conf.int) { ret <- dplyr::select(ret, -c(conf.low, conf.high)) } return(ret) } glance.margins <- function(x, ...) { orig_mod_call <- attributes(x)$call ret <- broom::glance(eval(orig_mod_call), ...) return(ret) } augment.margins <- augment.default
extract_xpdf <- function(...){ .Defunct(package = "crminer", msg = "Removed - see crminer::crm_extract()") }
vis_value_plot <- vis_value(airquality) test_that("vis_value creates the right plot",{ skip_on_cran() skip_on_ci() vdiffr::expect_doppelganger("vis_value vanilla", vis_value_plot) }) test_that("vis_value sends an error when used with the wrong data",{ expect_snapshot( error = TRUE, vis_value(iris) ) })
`vrais.LDL.add` <- function(moyenne.pere,alpha.Q,s,CD,perf,PLA,DL.m,DL.chrom1,DL.chrom2,desc.pere,mean.gene) { nb.pere = length(desc.pere[,1]) logvrais = 0 for (i in 1:nb.pere) { deb=desc.pere[i,1] fin=desc.pere[i,2] vrais.intra.pere = vrais.LDL.add.pere(moyenne.pere[i],alpha.Q,s,CD[deb:fin],perf[deb:fin],PLA[deb:fin],DL.m[deb:fin],DL.chrom1[i],DL.chrom2[i],mean.gene) logvrais = logvrais + vrais.intra.pere } logvrais }
epi.pooled <- function(se, sp, P, m, r){ PlSp <- sp^m HSp <- (PlSp)^r HSe <- 1 - ((1 - (1 - P)^m) * (1 - se) + (1 - P)^m * PlSp)^r HAPneg <- 1 - HSp rval <- list(HAPneg = HAPneg, HSe = HSe, HSp = HSp) rval }
SeeDist <- function(x, title = "Default", subtitle = "Default", numbins = 0, xlab = NULL, var_explain = NULL, data.fill.color = "deepskyblue", mean.line.color = "darkgreen", median.line.color = "yellow", mode.line.color = "orange", mean.line.type = "longdash", median.line.type = "dashed", mode.line.type = "dashed", mean.line.size = 1.5, median.line.size = 1.5, mean.point.shape = 21, median.point.shape = 23, mean.point.size = 4, median.point.size = 4, zcurve.color = "red", zcurve.type = "twodash", zcurve.size = 1, tcurve.color = "black", tcurve.type = "dotted", tcurve.size = 1, mode.line.size = 1, whatplots = c("d", "b", "h", "v"), k = 2, add_jitter = TRUE, add_rug = TRUE, xlim_left = NULL, xlim_right = NULL, ggtheme = ggplot2::theme_bw() ) { ggplot2::theme_set(ggtheme) if (!is.numeric(x)) { stop("Sorry the data must be numeric") } x_name <- deparse(substitute(x)) if (is.null(xlab)) { xlab <- x_name } binnumber <- nclass.FD(x) binnumber <- ifelse(numbins == 0, binnumber, numbins) desc.output <- DescTools::Desc(x, plotit = FALSE, main = xlab, digits = k) if (sum(is.na(x)) != 0) { missing_count <- sum(is.na(x)) warning(paste("Removing", missing_count, "missing values"), call. = FALSE) x <- x[!is.na(x)] } x_mean <- desc.output[[1]]$mean x_sd <- desc.output[[1]]$sd x_median <- desc.output[[1]]$quant['median'] x_mode <- CGPfunctions::Mode(x) x_skew <- desc.output[[1]]$skew x_kurtosis <- desc.output[[1]]$kurt if (length(x_mode) >= 4) { warning(paste("There are", length(x_mode)), " modal values displaying just the first 3", call. = FALSE) x_mode <- x_mode[c(1, 2, 3)] } my_jitter_geom <- list() if (add_jitter) { my_jitter_geom <- list( geom_jitter(aes(x = "", y = x), width = 0.05, height = 0, alpha = .5) ) } my_rug_geom <- list() if (add_rug) { my_rug_geom <- list( geom_rug(aes(y = 0), sides = "b") ) } if (is.null(xlim_left)) { xlim_left <- -3 * x_sd + x_mean } if (is.null(xlim_right)) { xlim_right <- +3 * x_sd + x_mean } if (!is.null(title) && title == "Default") { my_title <- paste0("Distribution of the variable ", x_name, " ", var_explain ) } else { my_title <- title } make_subtitle <- function(x, mean_x, sd_x, median_x, Skew_x, Kurtosis_x, k = k) { ret_subtitle <- bquote("N =" ~ .(length(x)) * "," ~ bar(X) ~ "=" ~ .(round(mean_x, k)) * ", SD =" ~ .(round(sd_x, k)) * ", Median =" ~ .(round(median_x, k)) * ", Skewness =" ~ .(round(Skew_x, k)) * ", Kurtosis =" ~ .(round(Kurtosis_x, k) ) ) } if (!is.null(subtitle) && subtitle == "Default") { my_subtitle <- make_subtitle(x, x_mean, x_sd, x_median, x_skew, x_kurtosis, k) } else { my_subtitle <- subtitle } mycaption <- bquote(bar(X) ~ "is" ~ .(mean.line.color) ~ ", Median is" ~ .(median.line.color) ~ ", Mode is" ~ .(mode.line.color) ~ ", z curve is" ~ .(zcurve.color) ~ ", t curve is" ~ .(tcurve.color) ) custom_t_function <- function(x, mu, nu, df, ncp) { dt((x - mu)/nu, df, ncp) / nu } if ("d" %in% tolower(whatplots)) { p <- ggplot(data.frame(x)) + aes(x) + geom_density(fill = data.fill.color, ) + stat_function(fun = dnorm, color = zcurve.color, linetype = zcurve.type, size = zcurve.size, args = list(mean = x_mean, sd = x_sd) ) + stat_function(fun = custom_t_function, color = tcurve.color, linetype = tcurve.type, size = tcurve.size, args = list(mu = x_mean, nu = x_sd, df = length(x) - 1, ncp = 0) ) + geom_vline(xintercept = x_mean, colour = mean.line.color, linetype = mean.line.type, size = mean.line.size) + geom_vline(xintercept = x_median, colour = median.line.color, linetype = median.line.type, size = median.line.size) + geom_vline(xintercept = x_mode, colour = mode.line.color, linetype = mode.line.type, size = mode.line.size) + my_rug_geom + labs( title = my_title, subtitle = my_subtitle, x = xlab, caption = mycaption ) + xlim(xlim_left, xlim_right) + theme( axis.title.y = element_blank(), axis.text.y = element_blank(), axis.ticks.y = element_blank(), axis.line.y = element_blank(), panel.grid.major.y = element_blank(), panel.grid.minor.y = element_blank() ) print(p) } if ("b" %in% tolower(whatplots)) { pp <- ggplot(data.frame(x)) + aes(x) + labs( title = my_title, subtitle = my_subtitle, y = xlab, caption = mycaption ) + stat_boxplot(aes(x = "", y = x), geom = "errorbar", width = 0.2) + geom_boxplot(aes(x = "", y = x), fill = data.fill.color, outlier.color = data.fill.color) + coord_flip() + geom_point(aes(x = "", y = x_mean), shape = mean.point.shape, size = mean.point.size, fill = mean.line.color) + theme( axis.title.y = element_blank(), axis.text.y = element_blank(), axis.ticks.y = element_blank(), axis.line.y = element_blank(), panel.grid.major.y = element_blank() ) print(pp) } if ("h" %in% tolower(whatplots)) { ppp <- ggplot(data.frame(x)) + aes(x) + labs( title = my_title, subtitle = my_subtitle, x = xlab, caption = mycaption ) + geom_histogram(bins = binnumber, color = "black", fill = data.fill.color) + my_rug_geom + geom_vline(xintercept = x_mean, colour = mean.line.color, linetype = mean.line.type, size = mean.line.size) + geom_vline(xintercept = x_median, colour = median.line.color, linetype = median.line.type, size = median.line.size) + geom_vline(xintercept = x_mode, colour = mode.line.color, linetype = mode.line.type, size = mode.line.size) print(ppp) } if ("v" %in% tolower(whatplots)) { pppp <- ggplot(data.frame(x)) + aes(x) + labs( title = my_title, subtitle = my_subtitle, y = xlab, caption = mycaption ) + geom_violin(aes(x = "", y = x), fill = data.fill.color) + my_jitter_geom + coord_flip() + geom_point(aes(x = "", y = x_mean), shape = mean.point.shape, size = mean.point.size, fill = mean.line.color) + geom_point(aes(x = "", y = x_median), shape = median.point.shape, size = median.point.size, fill = median.line.color) + theme( axis.title.y = element_blank(), axis.text.y = element_blank(), axis.ticks.y = element_blank(), axis.line.y = element_blank(), panel.grid.major.y = element_blank() ) print(pppp) } return(desc.output) }
lifeTable <- function (dataSet, timeColumn, censColumn, intervalBorders=NULL) { if(!is.data.frame(dataSet)) {stop("Data set is not of type data.frame! Please convert the data!")} if(!(length(timeColumn)==1 & is.character(timeColumn))) {stop("The column name is not correctly specified! The argument must be a scalar as character.")} if(!(length(censColumn)==1 & is.character(censColumn))) {stop("The column name is not correctly specified! The argument must be a scalar as character.")} if(!(is.null (intervalBorders) )) { if(!(is.character(intervalBorders))) {stop("The interval borders are not in character format! Please give the appropriate format, e. g. [0, a_1), [a_1, a_2), ..., [a_{q-1}, a_q) with a_q beeing the number of intervals")} MaxTime <- max(dataSet [, timeColumn]) if(!(length(intervalBorders)==MaxTime)) {stop("The interval borders have not the same length as the number of unique intervals! Please give the appropriate format, e. g. [0, a_1), [a_1, a_2), ..., [a_{q-1}, a_q) with a_q beeing the number of intervals")} } atRiskInitial <- dim(dataSet) [1] dataSet <- dataSet [order(dataSet [, timeColumn]), ] formulaInput <- as.formula(paste(censColumn, timeColumn, sep="~")) events <- aggregate(formula=formulaInput, data=dataSet, FUN=function (x) sum(x)) [, 2] dropouts <- aggregate(formula=formulaInput, data=dataSet, FUN=function (x) sum(1-x)) [, 2] atRiskInput <- c(atRiskInitial, atRiskInitial-cumsum(events+dropouts)) atRiskInput <- atRiskInput [-length(atRiskInput)] times <- as.numeric(names(table(as.numeric(as.character(dataSet [, timeColumn]))))) Index <- which(diff(times)>1) while(any(diff(times)>1)) { Index <- which(diff(times)>1) [1] atRiskInput <- c(atRiskInput [1:Index], atRiskInput [Index] - (events[Index] + dropouts[Index]), atRiskInput [(Index+1):length(atRiskInput)]) events <- c(events [1:Index], 0, events [(Index+1):length(events)]) dropouts <- c(dropouts [1:Index], 0, dropouts [(Index+1):length(dropouts)]) times <- c(times [1:Index], times[Index] + 1, times [(Index+1):length(times)]) } if(times[1]!=1) { atRiskInput <- c(rep(atRiskInput [1], times[1] -1), atRiskInput) events <- c(rep(0, times[1] - 1), events) dropouts <- c(rep(0, times[1] - 1), dropouts) times <- c(1:(times[1] - 1), times) } atRisk <- atRiskInput - dropouts/2 haz <- events / atRisk S <- cumprod(1 - haz) sehaz <- sqrt((haz - haz^2) / atRisk) seS <- S * sqrt(cumsum(haz / (1 - haz) / (atRisk))) cumHazard <- cumsum(haz) seCumHazard <- sqrt(cumsum(events / atRisk^2)) if(is.null(intervalBorders)) { RowNames <- paste("[", c(0, times [-length(times)]), ", ", times, ")", sep = "") } else { RowNames <- intervalBorders } margProb <- haz * c(1, S[-length(S)]) Output <- data.frame(n=atRiskInput, events = events, dropouts = dropouts, atRisk = atRisk, hazard = haz, seHazard = sehaz, S = S, seS = seS, cumHazard=cumHazard, seCumHazard=seCumHazard, margProb=margProb, row.names = RowNames) Output <- list(Output=Output) class(Output) <- "discSurvLifeTable" return(Output) } print.discSurvLifeTable <- function (x, ...) { x <- x [[1]] for(i in 1:dim(x) [2]) { x [, i] <- round(x [, i], 4) } print(x) }
OptimiseParametersFNB <- function(area, observed, extent, model = "FNB", starting.params = NULL) { resid.fun <- getFunction(paste("Resid", model, sep = "")) pred.fun <- getFunction(paste("Predict", model, sep = "")) if(is.null(starting.params)) { starting.pars <- get(paste("Params", model, sep = "")) } if(!is.null(starting.params)) { starting.pars <- starting.params } optimisation <- minpack.lm::nls.lm(par = starting.pars, fn = resid.fun, area = area, extent = extent, observed = log(observed), lower = c("N" = -Inf, "k" = 0), upper = c("N" = Inf, "k" = min(area, na.rm = TRUE) * 100), control = minpack.lm::nls.lm.control( maxiter = 1000)) optim.pars <- as.list(coef(optimisation)) return(optim.pars) }
midint<-function(data) { if(!is.data.frame(data)) stop("Invalid input parameter specification: check data") if(!(all(c("Start.Date","End.Date")%in%names(data)))) stop("Error: data does not contain columns named Start.Date/End.Date") start<-tryCatch(as.POSIXct(as.character(data$Start.Date),tz="UTC"),error=function(e){return(NA)}) end<-tryCatch(as.POSIXct(as.character(data$End.Date),tz="UTC"),error=function(e){return(NA)}) if(is.na(start) || is.na(end)) stop("Error: check dates format in data") start+time_length(interval(start,end))/2 }
names_list <- function(df, len){ df %>% names %>% gtools::combinations(n = length(.), r = len, v = . ) %>% as.data.frame() %>% as.list() } make_distincts <- function(df, ...){ df %>% dplyr::select(...) -> id_cols nms_list <- list() for(i in seq_along(id_cols)) { nms_list %>% append(list(names_list(id_cols, i))) -> nms_list } nms_list } determine_distinct <- function(df, ..., listviewer = TRUE){ n_dupes(df) -> d_rows if(d_rows > 0) { print(stringr::str_glue("database has {d_rows} duplicate rows, and will eliminate them")) df <- dplyr::distinct(df)} get_unique_col_names(df) -> unique_names df %>% framecleaner::select_otherwise(..., otherwise = tidyselect::everything(), return_type = "names") %>% setdiff(unique_names) -> db_names df %>% dplyr::select(-tidyselect::any_of(unique_names)) -> df make_distincts(df, tidyselect::any_of(db_names)) -> dst_list distinct_combos <- list() new_list <- list() for(j in seq_along(dst_list)){ stringr::str_c("LEVEL ", j) -> col_nm dst_list %>% purrr::pluck(j) -> the_lev filter_list(smaller_list = distinct_combos, bigger_list = data.table::transpose(the_lev)) %>% data.table::transpose() -> the_lev utils::capture.output( the_lev %>% purrr::pmap_lgl(., ~confirm_distinct(df, ...)) -> dst_nms) the_lev %>% as.data.frame() %>% dplyr::filter(dst_nms) -> d1 d1 %>% rows_to_list() -> l1 l1 %>% append(distinct_combos) -> distinct_combos if(nrow(d1) != 0){ d1 %>% tidyr::unite(col = !!col_nm, sep = ", ") %>% append(new_list) -> new_list} } new_list[["LEVEL 1"]] <- as.list(unique_names) new_list %>% purrr::map(~if(rlang::is_empty(.)) {. <- 'no primary keys'} else{.}) -> output if(listviewer){ output %>% listviewer::jsonedit(.) } else{ output } } pivot_summary <- function(sumr, ...){ column <- rowname <- NULL if (!missing(..1)) { sumr %>% tidyr::unite(col = "column", ..., remove = T) %>% dplyr::relocate(column) -> sumr1 sumr1 %>% dplyr::select(-1) %>% as.matrix() %>% mode -> output_mode } else{ sumr -> sumr1 } sumr1 %>% t %>% as.data.frame() %>% tibble::rownames_to_column() %>% tibble::as_tibble() %>% dplyr::rename(column = rowname) %>% dplyr::arrange(column) -> sumr2 if (!missing(..1)) { sumr2 %>% janitor::row_to_names(row_number = 1) %>% dplyr::mutate(dplyr::across(-1, ~as(., output_mode)))-> sumr3 } else{ sumr2 -> sumr3 } sumr3 } get_unique_col_names <- function(df){ nrow(df) -> rws V1 <- column <- NULL df %>% dplyr::summarize(dplyr::across(.fns = ~dplyr::n_distinct(.) == rws)) %>% pivot_summary() %>% dplyr::filter(V1) %>% dplyr::pull(column) } rows_to_list <- function(df){ df %>% t() %>% as.data.frame() %>% lapply(unlist)} is_subset_list <- function(chr, chr_list){ any(purrr::map_lgl(chr_list, ~all(is.element(el = ., set = chr)))) } filter_list <- function(smaller_list, bigger_list){ bigger_list %>% purrr::map_lgl(~is_subset_list(chr = ., chr_list = smaller_list)) -> logical_vec purrr::discard(bigger_list, logical_vec) }
cste_surv <- function(x,y,z,s,h){ n <- nrow(z) p <- ncol(z) sep <- 20 myfun <- function(w){return(as.numeric(y>=w))} R <- matrix(sapply(y,myfun),n,n,byrow=TRUE) stdel <- matrix(0,nrow=n,ncol=p) for(i in 1:n){ tempfun <- function(t){return(lpl(t,x,x[i],R,z,s,h))} ans = nmk(rep(0,2*p+1),tempfun)$par stdel[i,] <- ans[1:p] } return(stdel) }
spDynLM <- function(formula, data = parent.frame(), coords, knots, starting, tuning, priors, cov.model, get.fitted=FALSE, n.samples, verbose=TRUE, n.report=100, ...){ formal.args <- names(formals(sys.function(sys.parent()))) elip.args <- names(list(...)) for(i in elip.args){ if(! i %in% formal.args) warning("'",i, "' is not an argument") } if(missing(formula)){stop("error: formula must be specified")} if(class(formula) == "list"){ holder <- mkspDynMats(formula, data) Y <- holder[[1]] X <- as.matrix(holder[[2]]) x.names <- holder[[3]] }else{ stop("error: formula is misspecified") } p <- ncol(X) n <- nrow(Y) N.t <- length(formula) miss <- as.vector(is.na(Y)) storage.mode(Y) <- "double" storage.mode(X) <- "double" storage.mode(p) <- "integer" storage.mode(n) <- "integer" storage.mode(N.t) <- "integer" storage.mode(miss) <- "integer" if(!is.matrix(coords)){stop("error: coords must n-by-2 matrix of xy-coordinate locations")} if(ncol(coords) != 2 || nrow(coords) != n){ stop("error: either the coords have more than two columns or then number of rows is different than data used in the model formula") } is.pp <- FALSE if(!missing(knots)){ if(is.vector(knots) && length(knots) %in% c(2,3)){ if(knots[1] > 1){ x.knots <- seq(min(coords[,1]), max(coords[,1]), length.out=knots[1]) }else{ x.knots <- (max(coords[,1])-min(coords[,1]))/2 } if(knots[2] > 1){ y.knots <- seq(min(coords[,2]), max(coords[,2]), length.out=knots[2]) }else{ y.knots <- (max(coords[,2])-min(coords[,2]))/2 } if(length(knots) == 2){ if(knots[1] > 1){ x.int <- (x.knots[2]-x.knots[1])/2 x.knots <- seq(min(x.knots)-x.int, max(x.knots)+x.int, length.out=knots[1]) } if(knots[2] > 1){ y.int <- (y.knots[2]-y.knots[1])/2 y.knots <- seq(min(y.knots)-y.int, max(y.knots)+y.int, length.out=knots[2]) } knot.coords <- as.matrix(expand.grid(x.knots, y.knots)) is.pp <- TRUE }else{ if(knots[1] > 1){ x.int <- knots[3] x.knots <- seq(min(x.knots)-x.int, max(x.knots)+x.int, length.out=knots[1]) } if(knots[2] > 1){ y.int <- knots[3] y.knots <- seq(min(y.knots)-y.int, max(y.knots)+y.int, length.out=knots[2]) } knot.coords <- as.matrix(expand.grid(x.knots, y.knots)) is.pp <- TRUE } }else if(is.matrix(knots) && ncol(knots) == 2){ knot.coords <- knots is.pp <- TRUE }else{ stop("error: knots is misspecified") } } m <- 0 coords.D <- 0 knots.D <- 0 coords.knots.D <- 0 if(is.pp){ knots.D <- iDist(knot.coords) m <- nrow(knots.D) coords.knots.D <- iDist(coords, knot.coords) if(min(coords.knots.D) == 0){ stop("error: knots and observation coordinates cannot coincide. At least one knot location coincides with an observed coordinate.") } }else{ coords.D <- iDist(coords) } storage.mode(m) <- "integer" storage.mode(coords.D) <- "double" storage.mode(knots.D) <- "double" storage.mode(coords.knots.D) <- "double" if(missing(cov.model)){stop("error: cov.model must be specified")} if(!cov.model%in%c("gaussian","exponential","matern","spherical")) {stop("error: specified cov.model '",cov.model,"' is not a valid option; choose, from gaussian, exponential, matern, spherical.")} if(missing(priors)) {stop("error: prior list for the parameters must be specified")} names(priors) <- tolower(names(priors)) if(!"beta.0.norm" %in% names(priors)){stop("error: beta.0.norm must be specified")} beta.0.Norm <- priors[["beta.0.norm"]] if(!is.list(beta.0.Norm) || length(beta.0.Norm) != 2){stop("error: beta.0.Norm must be a list of length 2")} if(length(beta.0.Norm[[1]]) != p ){stop(paste("error: beta.0.Norm[[1]] must be a vector of length, ",p, "",sep=""))} if(length(beta.0.Norm[[2]]) != p^2 ){stop(paste("error: beta.0.Norm[[2]] must be a ",p,"x",p," covariance matrix",sep=""))} if(!"sigma.sq.ig" %in% names(priors)){stop("error: sigma.sq.IG must be specified")} sigma.sq.IG <- priors[["sigma.sq.ig"]] if(!is.list(sigma.sq.IG) || length(sigma.sq.IG) != 2){stop("error: sigma.sq.IG must be a list of length 2")} if(length(sigma.sq.IG[[1]]) != N.t){stop(paste("error: sigma.sq.IG[[1]] must be a vector of length, ",N.t, "",sep=""))} if(length(sigma.sq.IG[[2]]) != N.t){stop(paste("error: sigma.sq.IG[[2]] must be a vector of length, ",N.t, "",sep=""))} sigma.sq.IG <- as.vector(t(cbind(sigma.sq.IG[[1]],sigma.sq.IG[[2]]))) if(!"tau.sq.ig" %in% names(priors)){stop("error: tau.sq.IG must be specified")} tau.sq.IG <- priors[["tau.sq.ig"]] if(!is.list(tau.sq.IG) || length(tau.sq.IG) != 2){stop("error: tau.sq.IG must be a list of length 2")} if(length(tau.sq.IG[[1]]) != N.t){stop(paste("error: tau.sq.IG[[1]] must be a vector of length, ",N.t, "",sep=""))} if(length(tau.sq.IG[[2]]) != N.t){stop(paste("error: tau.sq.IG[[2]] must be a vector of length, ",N.t, "",sep=""))} tau.sq.IG <- as.vector(t(cbind(tau.sq.IG[[1]],tau.sq.IG[[2]]))) if(!"phi.unif" %in% names(priors)){stop("error: phi.Unif must be specified")} phi.Unif <- priors[["phi.unif"]] if(!is.list(phi.Unif) || length(phi.Unif) != 2){stop("error: phi.Unif must be a list of length 2")} if(length(phi.Unif[[1]]) != N.t){stop(paste("error: phi.Unif[[1]] must be a vector of length, ",N.t, "",sep=""))} if(length(phi.Unif[[2]]) != N.t){stop(paste("error: phi.Unif[[2]] must be a vector of length, ",N.t, "",sep=""))} if(any(phi.Unif[[2]]-phi.Unif[[1]] <= 0)){stop("error: phi.Unif has zero support")} phi.Unif <- as.vector(t(cbind(phi.Unif[[1]],phi.Unif[[2]]))) nu.Unif <- 0 if(cov.model == "matern"){ if(!"nu.unif" %in% names(priors)){stop("error: nu.Unif must be specified")} nu.Unif <- priors[["nu.unif"]] if(!is.list(nu.Unif) || length(nu.Unif) != 2){stop("error: nu.Unif must be a list of length 2")} if(length(nu.Unif[[1]]) != N.t){stop(paste("error: nu.Unif[[1]] must be a vector of length, ",N.t, "",sep=""))} if(length(nu.Unif[[2]]) != N.t){stop(paste("error: nu.Unif[[2]] must be a vector of length, ",N.t, "",sep=""))} if(any(nu.Unif[[2]]-nu.Unif[[1]] <= 0)){stop("error: nu.Unif has zero support")} nu.Unif <- as.vector(t(cbind(nu.Unif[[1]],nu.Unif[[2]]))) } if(!"sigma.eta.iw" %in% names(priors)){stop("error: Sigma.eta.IW must be specified")} sigma.eta.IW <- priors[["sigma.eta.iw"]] if(!is.list(sigma.eta.IW) || length(sigma.eta.IW) != 2){stop("error: Sigma.eta.IW must be a list of length 2")} if(length(sigma.eta.IW[[1]]) != 1){stop("error: Sigma.eta.IW[[1]] must be of length 1 (i.e., the IW df hyperparameter)")} if(length(sigma.eta.IW[[2]]) != p^2){stop(paste("error: Sigma.eta.IW[[2]] must be a vector or matrix of length, ",p^2, ", (i.e., the IW scale matrix hyperparameter)",sep=""))} storage.mode(sigma.sq.IG) <- "double" storage.mode(tau.sq.IG) <- "double" storage.mode(phi.Unif) <- "double" storage.mode(nu.Unif) <- "double" storage.mode(sigma.eta.IW[[1]]) <- "double"; storage.mode(sigma.eta.IW[[2]]) <- "double" if(missing(starting)){stop("error: starting value list for the parameters must be specified")} names(starting) <- tolower(names(starting)) if(!"beta" %in% names(starting)){stop("error: beta must be specified in starting value list")} beta.starting <- starting[["beta"]] if(length(beta.starting) != N.t*p){stop(paste("error: beta starting must be of length ",N.t,"*",p,sep=""))} if(!"sigma.sq" %in% names(starting)){stop("error: sigma.sq must be specified in starting value list")} sigma.sq.starting <- starting[["sigma.sq"]] if(length(sigma.sq.starting) != N.t){stop(paste("error: sigma.sq starting must be a vector of length, ",N.t, "",sep=""))} if(!"tau.sq" %in% names(starting)){stop("error: a prior was spcified for tau.sq therefore tau.sq must be specified in starting value list")} tau.sq.starting <- starting[["tau.sq"]] if(length(tau.sq.starting) != N.t){stop(paste("error: tau.sq starting must be a vector of length, ",N.t, "",sep=""))} if(!"phi" %in% names(starting)){stop("error: phi must be specified in starting value list")} phi.starting <- starting[["phi"]] if(length(phi.starting) != N.t){stop(paste("error: phi starting must be a vector of length, ",N.t, "",sep=""))} nu.starting <- 0 if(cov.model == "matern"){ if(!"nu" %in% names(starting)){stop("error: nu must be specified in starting value list")} nu.starting <- starting[["nu"]] if(length(nu.starting) != N.t){stop(paste("error: nu starting must be a vector of length, ",N.t, "",sep=""))} } if(!"sigma.eta" %in% names(starting)){stop("error: Sigma.eta must be specified in starting value list")} sigma.eta.starting <- as.vector(starting[["sigma.eta"]]) if(length(sigma.eta.starting) != p^2){stop(paste("error: Sigma.eta must be a positive definite matrix of length, ",p^2, sep=""))} storage.mode(beta.starting) <- "double" storage.mode(phi.starting) <- "double" storage.mode(sigma.sq.starting) <- "double" storage.mode(tau.sq.starting) <- "double" storage.mode(nu.starting) <- "double" storage.mode(sigma.eta.starting) <- "double" if(missing(tuning)){stop("error: tuning value vector for the spatial parameters must be specified")} names(tuning) <- tolower(names(tuning)) if(!"phi" %in% names(tuning)){stop("error: phi must be specified in tuning value list")} phi.tuning <- tuning[["phi"]] if(length(phi.tuning) != N.t){stop(paste("error: phi tuning must be a vector of length, ",N.t, "",sep=""))} nu.tuning <- 0 if(cov.model == "matern"){ if(!"nu" %in% names(tuning)){stop("error: nu must be specified in tuning value list")} nu.tuning <- tuning[["nu"]] if(length(nu.tuning) != N.t){stop(paste("error: nu tuning must be a vector of length, ",N.t, "",sep=""))} } storage.mode(phi.tuning) <- "double" storage.mode(nu.tuning) <- "double" if(missing(n.samples)){stop("error: n.samples needs to be specified")} storage.mode(n.samples) <- "integer" storage.mode(get.fitted) <- "integer" storage.mode(n.report) <- "integer" storage.mode(verbose) <- "integer" ptm <- proc.time() if(is.pp){ out <- .Call("spPPDynLM", Y, t(X), p, n, m, N.t, knots.D, coords.knots.D, beta.0.Norm, sigma.sq.IG, tau.sq.IG, nu.Unif, phi.Unif, sigma.eta.IW, beta.starting, phi.starting, sigma.sq.starting, tau.sq.starting, nu.starting, sigma.eta.starting, phi.tuning, nu.tuning, cov.model, n.samples, miss, get.fitted, verbose, n.report) }else{ out <- .Call("spDynLM", Y, t(X), p, n, N.t, coords.D, beta.0.Norm, sigma.sq.IG, tau.sq.IG, nu.Unif, phi.Unif, sigma.eta.IW, beta.starting, phi.starting, sigma.sq.starting, tau.sq.starting, nu.starting, sigma.eta.starting, phi.tuning, nu.tuning, cov.model, n.samples, miss, get.fitted, verbose, n.report) } run.time <- proc.time() - ptm out$p.beta.0.samples <- mcmc(t(out$p.beta.0.samples)) colnames(out$p.beta.0.samples) <- x.names out$p.beta.samples <- mcmc(t(out$p.beta.samples)) colnames(out$p.beta.samples) <- as.vector(t(sapply(paste(x.names,".t",sep=""),paste,1:N.t,sep=""))) out$p.theta.samples <- mcmc(t(out$p.theta.samples)) if(cov.model != "matern"){ colnames(out$p.theta.samples) <- as.vector(t(sapply(paste(c("sigma.sq", "tau.sq", "phi"),".t",sep=""),paste,1:N.t,sep=""))) }else{ colnames(out$p.theta.samples) <- as.vector(t(sapply(paste(c("sigma.sq", "tau.sq", "phi", "nu"),".t",sep=""),paste,1:N.t,sep=""))) } out$p.sigma.eta.samples <- mcmc(t(out$p.sigma.eta.samples)) colnames(out$p.sigma.eta.samples) <- paste("eta[",matrix(apply(cbind(expand.grid(x.names,x.names)), 1, function(x) paste(x, collapse=",")),p,p),"]",sep="") out$Y <- Y out$X <- X out$coords <- coords out$cov.model <- cov.model out$x.names <- x.names out$run.time <- run.time out$missing.indx <- miss if(is.pp){ out$knot.coords <- knot.coords } class(out) <- "spDynLM" out }
sum_up <- function(df, ..., d = FALSE, wt = NULL) { wt = dplyr::enquo(wt) if (rlang::is_null(rlang::f_rhs(wt))) { wtvar <- character(0) } else{ wtvar <- names(tidyselect::vars_select(names(df), !!wt)) } byvars <- dplyr::group_vars(df) vars <- setdiff(names(tidyselect::vars_select(names(df), ...)), c(wtvar, byvars)) if (length(vars) == 0) { vars <- setdiff(names(df), c(byvars, wtvar)) } nums <- sapply(df, is.numeric) nums_name <- names(nums[nums == TRUE]) vars <- intersect(vars, nums_name) if (!length(vars)) stop("Please select at least one numeric variable", call. = FALSE) df <- dplyr::select(df, dplyr::all_of(c(vars, byvars, wtvar))) df <- dplyr::summarize(df, describe(dplyr::across(), d = d, wtvar = wtvar, byvars = byvars)) out <- dplyr::arrange(df, dplyr::across(dplyr::all_of(c(byvars, "Variable")))) if (d) { out1 <- dplyr::select(out, dplyr::all_of(c(byvars, "Variable", "Obs", "Missing", "Mean", "StdDev", "Skewness", "Kurtosis"))) out2 <- dplyr::select(out, dplyr::all_of(c(byvars, "Variable", "Min", "p1", "p5", "p10", "p25", "p50"))) out3 <- dplyr::select(out, dplyr::all_of(c(byvars, "Variable", "p50", "p75", "p90", "p95", "p99", "Max"))) statascii(out1, n_groups = length(byvars) + 1) cat("\n") statascii(out2, n_groups = length(byvars) + 1) cat("\n") statascii(out3, n_groups = length(byvars) + 1) } else{ out <- dplyr::select(out, dplyr::all_of(c(byvars, "Variable", setdiff(names(out), c(byvars, "Variable"))))) statascii(out, n_groups = length(byvars) + 1) } invisible(out) } describe <- function(df, d = FALSE, wtvar = character(0), byvars = character(0)){ if (length(byvars)){ df <- dplyr::select(df, dplyr::all_of(setdiff(names(df), byvars))) } if (length(wtvar)){ w <- df[[wtvar]] df <- dplyr::select(df, dplyr::all_of(setdiff(names(df), wtvar))) } else{ w <- NULL } names <- names(df) if (d==FALSE) { if (!is.null(w)){ sum <- lapply(df ,function(x){ take <- !is.na(x) & !is.na(w) & w > 0 x_omit <- x[take] w_omit <- w[take] m <- matrixStats::weightedMean(x_omit, w = w_omit) c(length(x_omit), length(x)-length(x_omit), m, sqrt(matrixStats::weightedMean((x_omit-m)^2, w = w_omit)), matrixStats::colRanges(x_omit, dim = c(length(x_omit), 1))) }) }else{ sum <- lapply(df ,function(x){ x_omit <- stats::na.omit(x) c(length(x_omit), length(x) - length(x_omit), mean(x_omit), stats::sd(x_omit), matrixStats::colRanges(x_omit, dim = c(length(x_omit), 1))) }) } sum <- do.call(cbind, sum) sum <- as.data.frame(t(sum)) sum <- dplyr::bind_cols(dplyr::tibble(names), sum) sum <- stats::setNames(sum, c("Variable", "Obs","Missing","Mean","StdDev","Min", "Max")) } else { N <- nrow(df) f=function(x){ if (!is.null(w)){ take <- !is.na(x) & !is.na(w) & w > 0 x_omit <- x[take] w_omit <- w[take] m <- matrixStats::weightedMean(x_omit, w = w_omit) sum_higher <- matrixStats::colWeightedMeans(cbind((x_omit-m)^2,(x_omit-m)^3,(x_omit-m)^4), w = w_omit) sum_higher[1] <- sqrt(sum_higher[1]) sum_higher[2] <- sum_higher[2]/sum_higher[1]^3 sum_higher[3] <- sum_higher[3]/sum_higher[1]^4 sum_quantile <- pctile(x_omit, c(0, 0.01, 0.05, 0.1, 0.25, 0.50, 0.75, 0.9, 0.95, 0.99, 1), wt = w_omit) } else{ x_omit <- stats::na.omit(x) m <- mean(x_omit) sum_higher <- colMeans(cbind((x_omit-m)^2,(x_omit-m)^3,(x_omit-m)^4)) sum_higher[1] <- sqrt(sum_higher[1]) sum_higher[2] <- sum_higher[2]/sum_higher[1]^3 sum_higher[3] <- sum_higher[3]/sum_higher[1]^4 sum_quantile= pctile(x_omit, c(0, 0.01, 0.05, 0.1, 0.25, 0.50, 0.75, 0.9, 0.95, 0.99, 1)) } n_NA <- length(x) - length(x_omit) sum <- c(N-n_NA, n_NA, m, sum_higher, sum_quantile) } sum <- parallel::mclapply(df, f) sum <- do.call(cbind, sum) sum <- as.data.frame(t(sum)) sum <- dplyr::bind_cols(dplyr::tibble(names), sum) sum <- stats::setNames(sum, c("Variable", "Obs","Missing","Mean","StdDev","Skewness","Kurtosis","Min","p1","p5","p10","p25","p50","p75","p90","p95","p99","Max")) } sum }
context("image") test_that("fails well with no input", { expect_error(image(), "argument \"id\" is missing") }) test_that("fails well with non-existent droplet", { skip_on_cran() expect_error(image("bearbearbear"), "The resource you were accessing could not be found") }) test_that("httr curl options work", { skip_on_cran() library("httr") expect_error(image("asdfadf", config = timeout(seconds = 0.001))) })
gefs <- function(...) { .Defunct(msg = "`gefs` is defunct; it may return later") } gefs_dimension_values <- function(...) { .Defunct(msg = "`gefs_dimension_values` is defunct; it may return later") } gefs_dimensions <- function(...) { .Defunct(msg = "`gefs_dimensions` is defunct; it may return later") } gefs_ensembles <- function(...) { .Defunct(msg = "`gefs_ensembles` is defunct; it may return later") } gefs_latitudes <- function(...) { .Defunct(msg = "`gefs_latitudes` is defunct; it may return later") } gefs_longitudes <- function(...) { .Defunct(msg = "`gefs_longitudes` is defunct; it may return later") } gefs_times <- function(...) { .Defunct(msg = "`gefs_times` is defunct; it may return later") } gefs_variables <- function(...) { .Defunct(msg = "`gefs_variables` is defunct; it may return later") } NULL
NULL NULL methods::setGeneric("number_of_total_units", function(x) standardGeneric("number_of_total_units")) methods::setMethod("number_of_total_units", "ConservationProblem", function(x) x$number_of_total_units())