code
stringlengths
1
13.8M
cdm_pem_inits_assign_parmlist <- function(pem_pars, envir) { parmlist <- list() NP <- length(pem_pars) for (pp in 1:NP){ p1 <- get( pem_pars[pp], pos=envir ) parmlist[[ pp ]] <- p1 } names(parmlist) <- pem_pars return(parmlist) }
library("RUnit") library("krm") test.getSeqKernel <- function() { tolerance=1e-3 if (R.Version()$system %in% c("x86_64, mingw32")) { tolerance=1e-6 } RNGkind("Mersenne-Twister", "Inversion") fileName=paste(system.file(package="krm")[1],'/misc/SETpfamseed_aligned_for_testing.fasta', sep="") K=getSeqKernel (fileName, kern.type="mi", tau=.01, call.C=T) checkEqualsNumeric( c(K[1:2, 1:2]) , c(1.0000000, 0.1038921, 0.1038921, 1.0000000) , tolerance = tolerance ) K=getSeqKernel (fileName, kern.type="mi", tau=1, call.C=T) checkEqualsNumeric( c((K^0.01)[1:2, 1:2]) , c(1.0000000, 0.1038921, 0.1038921, 1.0000000) , tolerance = tolerance ) seq.alignment <- readFastaFile(fileName) K=getSeqKernel (seq.alignment[1:2], kern.type="mi", tau=.01) checkEqualsNumeric( c(K[1:2, 1:2]) , c(1.0000000, 0.1038921, 0.1038921, 1.0000000) , tolerance = tolerance ) seq.alignment <- readFastaFile(fileName) K=getSeqKernel (seq.alignment[1:2], kern.type="mi", tau=.01, seq.start=100, seq.end=200) K.1=getSeqKernel (seq.alignment[1:2], kern.type="mi", tau=.01, seq.start=100, seq.end=200, call.C=FALSE) checkEqualsNumeric( c(K[1:2, 1:2]) , c(K.1[1:2, 1:2]) , tolerance = tolerance ) }
get_Rversion <- function() { rinfo <- data.frame( Name = "Version", Value = R.version.string ) return(rinfo) }
match.data.frame <- function(x, y, by, by.x=by, by.y=by, grep., split, sep=':'){ if((missing(by.x) || missing(by.y)) && missing(by)){ by <- names(x) } kx <- length(by.x) nx <- nrow(x) if(kx<1){ warning('length(by.x)==0; nothing to match. Returning NAs') return(rep(NA, nx)) } chk.x <- which(!(by.x %in% names(x))) if(length(chk.x)>0){ stop('by.x not in names(x); first error = ', by.x[chk.x[1]]) } if((ky <- length(by.y))!= kx){ stop('length(by.x) = ', kx, ' != length(by.y) = ', ky) } chk.y <- which(!(by.y %in% names(y))) if(length(chk.y)>0){ stop('by.y not in names(y); first error = ', by.y[chk.y[1]]) } if(missing(grep.)){ grep. <- rep(NA, kx) } else { if((kg <- length(grep.)) == 1){ grep. <- rep(grep., kx) } else if(kg != kx) stop('length(by.x) = ', kx, ' != length(grep.) = ', kg) } if(missing(split)){ split <- c(NA, ' ')[1+!is.na(grep.)] } else { if((ks <- length(split))==1){ split <- rep(split, kx) } else if(ks != kx) stop('length(by.x) = ', kx, ' != length(split) = ', ks) } oops <- which(is.na(grep.) != is.na(split)) if(length(oops)>0){ stop('grep. cannot be NA when split is not and vice versa;', ' first error in position number ', oops[1], ' where grep. = ', grep[oops[1]], ' and split = ', split[oops[1]]) } fullMatch <- (is.na(grep.) & is.na(split)) afM <- any(fullMatch) if(afM){ matchx <- c(as.list(x[, by.x[fullMatch], drop=FALSE]), sep=sep) keyfx <- with(x, do.call(paste, matchx)) matchy <- c(as.list(y[, by.y[fullMatch], drop=FALSE]), sep=sep) keyfy <- with(y, do.call(paste, matchy)) } anf <- any(!fullMatch) if(anf){ parMatch <- ((1:kx)[!fullMatch]) x. <- mapply(function(z, s){ z. <- strsplit(z, s) sapply(z., '[', 1) }, x[, by.x[parMatch], drop=FALSE], split[parMatch]) } xyi <- rep(NA, nx) ny <- nrow(y) iy. <- 1:ny for(ix in 1:nx){ if(afM){ iy <- which(regexpr(keyfx[ix], keyfy, fixed=TRUE)>0) if((kiy <- length(iy))>0){ if(kiy<2) { xyi[ix] <- iy } } } else iy <- iy. if(anf & (kiy>1)){ for(j in parMatch){ xij <- x.[ix, by.x[j]] xiju <- do.call(grep.[j], list(xij, y[iy, by.y[j]], fixed=TRUE) ) if((kiju <- length(xiju))<1) next if(kiju<2){ xyi[ix] <- iy[xiju] next } else iy <- iy[xiju] } } } xyi }
test_that("hover_reload_button has expected structure", { test_me <- hover_reload_button("id", "label") expect_equal( test_me$attribs$type, "button" ) expect_true(any("onClick" %in% names(test_me$attribs))) expect_equal( test_me$attribs$onClick, "location.reload();" ) })
amanida_read <- function(file, mode, coln, separator=NULL) { . = NULL; foldchange = NULL; pvalue = NULL; N = NULL; set.seed(123) VAR_NAMES <- c('id', 'pvalue', 'foldchange', 'N', 'ref') ext <- tools::file_ext(file) if (ext %in% c("csv", "tsv", "txt")) { stopifnot("Please, specify a separator."=!is.null(separator)) datafile <- readr::read_delim(file, delim = separator, col_types = readr::cols()) %>% mutate( across(coln[2:3], function(x) sub(",", ".", x, fixed = TRUE)) ) } else if (ext %in% c("xlsx", "xls")) { datafile <- readxl::read_excel(file) } else { stop("Format not compatible; try csv, tsv, excel or txt. Aborting.") } misrow <- sum(!complete.cases(datafile)) if (mode == "quan") { datafile <- datafile %>% select(all_of(coln)) %>% filter(complete.cases(.)) %>% rename_with(.cols = everything(), .fn = ~ VAR_NAMES) %>% mutate( `foldchange` = as.numeric(`foldchange`), `pvalue` = as.numeric(`pvalue`), `N` = as.integer(`N`), `foldchange` = case_when( `foldchange` < 0 ~ 1 / abs(`foldchange`), T ~ foldchange ), trend = case_when( `foldchange` < 1 ~ -1, `foldchange` == 1 ~ 0, T ~ 1 ) ) } else if (mode == "qual") { VAR_NAMES <- c('id', 'trend', 'ref') datafile <- datafile %>% select(all_of(coln)) %>% filter(complete.cases(.)) %>% rename_with(.cols = everything(), .fn = ~ VAR_NAMES) %>% mutate(trend = case_when( tolower(trend) == "down" ~ -1, T ~ 1 )) } else { stop("Please, indicate mode: 'quan' for quantitative and 'qual' for qualitative") } message(paste("Loaded dataset with", nrow(datafile),"rows which contains", length(unique(datafile$id)), "different identifiers. There are", misrow, "rows skipped from original dataset because contained NA values.")) return(datafile) }
get.decision.obd.kb <- function(toxicity.low, toxicity.moderate,toxicity.high, efficacy.low, efficacy.moderate, efficacy.high, target.toxicity, target.efficacy, cohortsize, ncohort, cutoff.eli.toxicity= 0.95, cutoff.eli.efficacy=0.3){ decision.matrix <- matrix(c("E","E","E","E","E","E","E","S","D","S","S","S", "D","D","D","D"),ncol=1, byrow=TRUE) efficacy <-matrix(rep( c(0,efficacy.low,efficacy.low,efficacy.moderate,efficacy.moderate,efficacy.high,efficacy.high,1),4), ncol=2, byrow=TRUE) toxicity <- matrix( c(0,toxicity.low,0,toxicity.low,0,toxicity.low,0,toxicity.low,toxicity.low,toxicity.moderate,toxicity.low,toxicity.moderate,toxicity.low,toxicity.moderate,toxicity.low,toxicity.moderate,toxicity.moderate,toxicity.high,toxicity.moderate,toxicity.high,toxicity.moderate,toxicity.high,toxicity.moderate,toxicity.high,toxicity.high,1,toxicity.high,1,toxicity.high,1,toxicity.high,1), ncol=2, byrow=TRUE) decision.table <- cbind(toxicity,efficacy, decision.matrix) colnames(decision.table) <- c("T1", "T2", "EF1","EF2","DECISION") ntvector <- c() ndvector <- c() nrvector <- c() for (i in 1:ncohort) { nt<-rep(i*cohortsize,(i*cohortsize+1)^2) ntvector <- append(ntvector, nt) for (j in 0:(i*cohortsize)){ nd <- rep(j, (i*cohortsize+1)) ndvector <- append(ndvector, nd) nr <- c(0:(i*cohortsize)) nrvector <- append(nrvector,nr) } } ntmatrix <- matrix(ntvector,ncol=1) ndmatrix <- matrix(ndvector,ncol=1) nrmatrix <- matrix(nrvector,ncol=1) outmatrix <- cbind(ntmatrix,ndmatrix,nrmatrix) colnames(outmatrix)<- c("N", "T","R") outmatrix <- as.data.frame(outmatrix) pinfor <- function(mout, i){ out <- list( n = mout[i,1], d = mout[i,2], r = mout[i,3]) } pdecision <- function(decisont, i){ out <- list ( t1 = as.numeric(decisont[i,1]), t2 = as.numeric(decisont[i,2]), e1 = as.numeric(decisont[i,3]), e2 = as.numeric(decisont[i,4]), d = decisont[i,5] ) } maxjupm <- function(decision.table, outmatrix, i){ outputoutm <- pinfor(outmatrix,i) nt = outputoutm$n dt = outputoutm$d rt = outputoutm$r jupmv <- c() for (rowd in 1:nrow(decision.table)){ a = as.numeric(decision.table[rowd,1]) b = as.numeric(decision.table[rowd,2]) c = as.numeric(decision.table[rowd,3]) d = as.numeric(decision.table[rowd,4]) jupm = round((pbeta(b, (1+dt), (1+nt-dt))-pbeta(a, (1+dt), (1+nt-dt)))*(pbeta(d, (1+rt), (1+nt-rt))-pbeta(c, (1+rt), (1+nt-rt)))/((b-a)*(d-c)),2) jupmv <- append(jupmv, jupm) } jupmv_matrix <- matrix(jupmv, ncol=1) colnames(jupmv_matrix)<- "jupm" decision.table_jupm <- cbind(decision.table,jupmv_matrix) decision.table_jupm <- as.data.frame(decision.table_jupm) decision.table_jupm$jupm <- as.numeric(as.character(decision.table_jupm$jupm)) jupm_max <-max(decision.table_jupm$jupm) max_index <- which.max(decision.table_jupm$jupm) decision <- decision.table_jupm$D[max_index] out <- list(decision.table_jupm=decision.table_jupm, chosen_decision=decision, jupm_max_value =jupm_max) } decision_picking <-c() jupm_picking <-c() for (rowoutm in 1:nrow(outmatrix)){ outjupm <- maxjupm(decision.table, outmatrix, rowoutm) decision_p <- outjupm$chosen_decision decision_picking <- append(decision_picking,as.character(decision_p)) jupm_mv <- outjupm$jupm_max_value jupm_picking <- append(jupm_picking,jupm_mv) } decision_matrix <- matrix(decision_picking, ncol=1) colnames(decision_matrix)<- "Decision" jupm_max_matrix <- matrix(jupm_picking, ncol=1) colnames(jupm_max_matrix) <- "Jupm" output.matrix <- cbind(outmatrix, decision_matrix,jupm_max_matrix) output.matrix <- output.matrix[,-5] output.matrix <- as.data.frame(output.matrix) i <- sapply(output.matrix, is.factor) output.matrix[i] <- lapply(output.matrix[i], as.character) for (rowindex in 1:nrow(output.matrix)){ if (output.matrix$Decision[rowindex] == "D") { if (1- pbeta(target.toxicity,output.matrix$T[rowindex]+1, output.matrix$N[rowindex]-output.matrix$T[rowindex]+1) > cutoff.eli.toxicity) { output.matrix$Decision[rowindex] = "DUT" } else if (1- pbeta(target.efficacy,output.matrix$R[rowindex]+1, output.matrix$N[rowindex]-output.matrix$R[rowindex]+1)< cutoff.eli.efficacy){ output.matrix$Decision[rowindex] = "DUE" } } else if (output.matrix$Decision[rowindex] == "E"){ if (1- pbeta(target.efficacy,output.matrix$R[rowindex]+1, output.matrix$N[rowindex]-output.matrix$R[rowindex]+1)< cutoff.eli.efficacy){ output.matrix$Decision[rowindex] = "EUE" } } } outlist =list( boundary.table=decision.table, decision.matrix=output.matrix) return (outlist) }
t.test.mult <- function(mean1, var1, n1, mean2=NULL, var2=NULL, n2=NULL, samples=NULL, alternative=c("two.sided", "less", "greater"), mu=0, var.equal=FALSE, conf.level=0.95, as.vector=FALSE) { alternative <- match.arg(alternative) if (length(mean1) != length(var1)) { stop("'mean1' and 'var1' must have the same length.") } if (any(is.na(var1))) { stop("'var1' must not be null or NA") } if (!is.null(mean2) && (length(mean2) != length(var2))) { stop("'mean2' and 'var2' must have the same length.") } if (!is.null(mean2) && any(is.na(var2))) { stop("'var2' must not be null or NA if 'mean2' is not null") } if (!is.null(mean2) && length(mean1)!=length(mean2)) { stop("'mean1' and 'mean2' must have the same length if both are not null") } if (length(n1)!=1) { stop("'n1' must have length 1") } if ((length(n2)!=1) && (length(n2)!=NA)) { stop("'n2' must be null or have length 1") } if (samples == 1) { estimate <- mean1 df <- rep(n1-1, length(mean1)) stderr <- sqrt(var1/n1) tstat <- (mean1-mu)/stderr } else { estimate <- mean1 - mean2 if (var.equal) { df <- rep(n1+n2-2, length(mean1)) v <- 0 if(n1 > 1) v <- v + (n1-1)*var1 if(n2 > 1) v <- v + (n2-1)*var2 v <- v/df stderr <- sqrt(v*(1/n1+1/n2)) } else { stderr1 <- sqrt(var1/n1) stderr2 <- sqrt(var2/n2) stderr <- sqrt(stderr1^2 + stderr2^2) df <- stderr^4/(stderr1^4/(n1-1) + stderr2^4/(n2-1)) } tstat <- (estimate - mu)/stderr } if (alternative == "less") { cint <- cbind(-Inf, tstat + qt(conf.level, df) ) } else if (alternative == "greater") { cint <- cbind(tstat - qt(conf.level, df), Inf) } else { cint <- qt((1+conf.level)/2, df) cint <- tstat + cbind(-cint, cint) } cint <- mu + cint * stderr rval <- cbind(estimate, cint) if (as.vector && length(mean1)==1) { rval <- as.vector(rval); names(rval) <- c("point","lower","upper") } else dimnames(rval)[[2]] <- c("point","lower","upper") return(rval) }
lda_shrink_mean <- function(x, ...) { UseMethod("lda_shrink_mean") } lda_shrink_mean.default <- function(x, y, prior = NULL, ...) { x <- pred_to_matrix(x) y <- outcome_to_factor(y) complete <- complete.cases(x) & complete.cases(y) x <- x[complete,,drop = FALSE] y <- y[complete] obj <- diag_estimates(x = x, y = y, prior = prior, pool = TRUE, est_mean = "tong") obj$col_names <- colnames(x) obj <- new_discrim_object(obj, "lda_shrink_mean") obj } lda_shrink_mean.formula <- function(formula, data, prior = NULL, ...) { formula <- no_intercept(formula, data) mf <- model.frame(formula = formula, data = data) .terms <- attr(mf, "terms") x <- model.matrix(.terms, data = mf) y <- model.response(mf) est <- lda_shrink_mean.default(x = x, y = y, prior = prior) est$.terms <- .terms est <- new_discrim_object(est, class(est)) est } print.lda_shrink_mean <- function(x, ...) { cat("Shrinkage-Mean-Based Diagonal LDA\n\n") print_basics(x, ...) invisible(x) } predict.lda_shrink_mean <- function(object, newdata, type = c("class", "prob", "score"), ...) { type <- rlang::arg_match0(type, c("class", "prob", "score"), arg_nm = "type") newdata <- process_newdata(object, newdata) scores <- apply(newdata, 1, function(obs) { sapply(object$est, function(class_est) { with(class_est, sum((obs - xbar)^2 / object$var_pool) + log(prior)) }) }) if (type == "prob") { means <- lapply(object$est, "[[", "xbar") covs <- replicate(n=object$num_groups, object$var_pool, simplify=FALSE) priors <- lapply(object$est, "[[", "prior") res <- posterior_probs(x = newdata, means = means, covs = covs, priors = priors) res <- as.data.frame(res) } else if (type == "class") { res <- score_to_class(scores, object) } else { res <- t(scores) res <- as.data.frame(res) } res }
NULL plot.SysConfig <- function(x, y, ...) { plot(as.raster(x$PointerMatrix, background = x$WBColours[[1]]), ...) } print.SysConfig <- function(x, ...) { cat(sprintf("Amiga system-configuration\nFontHeight:\t%i\nPrinter:\t%s %s\nInterlaced:\t%s", x$FontHeight, tolower(strsplit(as.character(x$PrinterPort), "_")[[1]][[1]]), x$PrinterFilename, as.character(x$LaceWB == "LACE")), ...) } as.raw.SysConfig <- function(x, ...) { class(x) <- NULL x$KeyRptSpeed <- as.raw(x$KeyRptSpeed) x$KeyRptDelay <- as.raw(x$KeyRptDelay) x$DoubleClick <- as.raw(x$DoubleClick) x$PointerMatrix <- as.raw(x$PointerMatrix) x$PointerMatrix[3] <- raw(1) x$WBColours <- colourToAmigaRaw(x$WBColours, colour.depth = "12 bit", n.bytes = "2") x$spriteColours <- colourToAmigaRaw(x$spriteColours, colour.depth = "12 bit", n.bytes = "2") x$PrinterFilename <- charToRaw(x$PrinterFilename)[1:30] x$PrtDevName <- charToRaw(x$PrtDevName)[1:16] x[names(.SysConfigFactors)] <- lapply(names(.SysConfigFactors), function(y) { .match.factor.inv(x, y, .SysConfigFactors[[y]]$vals, .SysConfigFactors[[y]]$levs) }) x$PrintFlags <- .match.multi.factor.inv(x, "PrintFlags", .SysConfigMultiFactors[["PrintFlags"]]$vals, .SysConfigMultiFactors[["PrintFlags"]]$levs) x$SerRWBits <- .bitmapToRaw(x$SerRWBits, F, F) x$SerParShk <- .amigaIntToRaw( 16*.match.factor.inv(x$SerParShk, "SerialParity", 0:4, c("SPARITY_NONE", "SPARITY_EVEN", "SPARITY_ODD", "SPARITY_MARK", "SPARITY_SPACE")) + .match.factor.inv(x$SerParShk, "HandshakeMode", 0:2, c("SHSHAKE_XON", "SHSHAKE_RTS", "SHSHAKE_NONE")), 8, F) x$SerStopBuf <- .amigaIntToRaw( 16*(x$SerStopBuf$N.StopBits - 1) + .match.factor.inv(x$SerStopBuf, "BufSize", 0:5, c("SBUF_512", "SBUF_1024", "SBUF_2048", "SBUF_4096", "SBUF_8000", "SBUF_16000")), 8, F) x <- .write.amigaData(x, .SysConfigData$byte, .SysConfigData$signed, .SysConfigData$par.names) return(x) } .SysConfigMultiFactors <- list( PrintFlags= data.frame( vals = c(0x0001, 0x0002, 0x0004, 0x0008, 0x0000, 0x0010, 0x0020, 0x0040, 0x0080, 0x0100, 0x0000, 0x0200, 0x0400, 0x0800, 0x1000), levs = c("CORRECT_RED", "CORRECT_GREEN", "CORRECT_BLUE", "CENTER_IMAGE", "IGNORE_DIMENSIONS", "BOUNDED_DIMENSIONS", "ABSOLUTE_DIMENSIONS", "PIXEL_DIMENSIONS", "MULTIPLY_DIMENSIONS", "INTEGER_SCALING", "ORDERED_DITHERING", "HALFTONE_DITHERING", "FLOYD_DITHERING", "ANTI_ALIAS", "GREY_SCALE2"), stringsAsFactors = F ) ) .SysConfigFactors <- list( PrinterPort = data.frame(vals = c(0x00, 0x01), levs = c("PARALLEL_PRINTER", "SERIAL_PRINTER"), stringsAsFactors = F), BaudRate = data.frame(vals = 0:7, levs = c("BAUD_110", "BAUD_300", "BAUD_1200", "BAUD_2400", "BAUD_4800", "BAUD_9600", "BAUD_19200", "BAUD_MIDI"), stringsAsFactors = F), PaperType = data.frame(vals = c(0x00, 0x80), levs = c("FANFOLD", "SINGLE"), stringsAsFactors = F), PrintPitch = data.frame(vals = c(0x000, 0x400, 0x800), levs = c("PICA", "ELITE", "FINE"), stringsAsFactors = F), PrintQuality = data.frame(vals = c(0x000, 0x100), levs = c("DRAFT", "LETTER")), PrintSpacing = data.frame(vals = c(0x000, 0x200), levs = c("SIX_LPI", "EIGHT_LPI"), stringsAsFactors = F), PrintImage = data.frame(vals = c(0x00, 0x01), levs = c("IMAGE_POSITIVE", "IMAGE_NEGATIVE"), stringsAsFactors = F), PrintAspect = data.frame(vals = c(0x00, 0x01), levs = c("ASPECT_HORIZ", "ASPECT_VERT"), stringsAsFactors = F), PrintShade = data.frame(vals = c(0x00, 0x01, 0x02), levs = c("SHADE_BW", "SHADE_GREYSCALE", "SHADE_COLOR"), stringsAsFactors = F), PaperSize = data.frame(vals = (0:13)*16, levs = c("US_LETTER", "US_LEGAL", "N_TRACTOR", "W_TRACTOR", "CUSTOM", paste0("EURO_A", 0:8)), stringsAsFactors = F), PrinterType = data.frame(vals = 0:12, levs = c("CUSTOM_NAME", "ALPHA_P_101", "BROTHER_15XL", "CBM_MPS1000", "DIAB_630", "DIAB_ADV_D25", "DIAB_C_150", "EPSON", "EPSON_JX_80", "OKIMATE_20", "QUME_LP_20", "HP_LASERJET", "HP_LASERJET_PLUS"), stringsAsFactors = F), LaceWB = data.frame(vals = 0:1, levs = c("NO_LACE", "LACE"), stringsAsFactors = F) ) .SysConfigData <- data.frame( byte = c(1, 1, 2, -8, -8, -8, -72, 1, 1, -6, 2, -8, 1, 1, 2, 2, -2, 2, -30, rep(2, 12), rep(-1, 3), 1, -12, -16, 1, 1, 1, 1, 2, 2, 2, 1, 1, 2, 2, 1, 1), signed = c(T, F, F, F, F, F, F, T, T, F, F, F, T, T, T, T, F, T, F, rep(F, 8), T, F, F, F, rep( F, 3), F, F, F, T, T, F, F, F, F, F, F, F, F, F, F, F), par.names = c("FontHeight", "PrinterPort", "BaudRate", "KeyRptSpeed", "KeyRptDelay", "DoubleClick", "PointerMatrix", "XOffset", "YOffset", "spriteColours", "PointerTicks", "WBColours", "ViewXOffset", "ViewYOffset", "ViewInitX", "ViewInitY", "EnableCLI", "PrinterType", "PrinterFilename", "PrintPitch", "PrintQuality", "PrintSpacing", "PrintLeftMargin", "PrintRightMargin", "PrintImage", "PrintAspect", "PrintShade", "PrintThreshold", "PaperSize", "PaperLength", "PaperType", "SerRWBits", "SerStopBuf", "SerParShk", "LaceWB", "Pad", "PrtDevName", "DefaultPrtUnit", "DefaultSerUnit", "RowSizeChange", "ColumnSizeChange", "PrintFlags", "PrintMaxWidth", "PrintMaxHeight", "PrintDensity", "PrintXOffset", "wb_Width", "wb_Height", "wb_Depth", "ext_size"), stringsAsFactors = F ) read.SysConfig <- function(file, disk = NULL) { dat <- .read.generic(file, disk) rawToSysConfig(dat) } write.SysConfig <- function(x, file, disk = NULL) { if (!("SysConfig" %in% class(x))) stop("x should be of class SysConfig.") .write.generic(x, file, disk) } rawToSysConfig <- function(x) { system.configuration <- .read.amigaData(x, .SysConfigData$byte, .SysConfigData$signed, .SysConfigData$par.names) system.configuration$KeyRptSpeed <- timeval(system.configuration$KeyRptSpeed) system.configuration$KeyRptDelay <- timeval(system.configuration$KeyRptDelay) system.configuration$DoubleClick <- timeval(system.configuration$DoubleClick) system.configuration$PointerMatrix <- rawToHWSprite(system.configuration$PointerMatrix) system.configuration$WBColours <- amigaRawToColour(system.configuration$WBColours, colour.depth = "12 bit", n.bytes = "2") system.configuration$spriteColours <- amigaRawToColour(system.configuration$spriteColours, colour.depth = "12 bit", n.bytes = "2") system.configuration$PointerMatrix@colours <- system.configuration$spriteColours system.configuration$PrinterFilename <- .rawToCharNull(system.configuration$PrinterFilename) system.configuration$PrtDevName <- .rawToCharNull(system.configuration$PrtDevName) system.configuration[names(.SysConfigFactors)] <- lapply(names(.SysConfigFactors), function(y) { .match.factor(system.configuration, y, .SysConfigFactors[[y]]$vals, .SysConfigFactors[[y]]$levs) }) system.configuration$PrintFlags <- .match.multi.factor(system.configuration, "PrintFlags", .SysConfigMultiFactors[["PrintFlags"]]$vals, .SysConfigMultiFactors[["PrintFlags"]]$levs) if (sum(grepl("DIMENSIONS", system.configuration$PrintFlags)) > 1) system.configuration$PrintFlags <- system.configuration$PrintFlags[!grepl("IGNORE_DIMENSIONS", system.configuration$PrintFlags)] if (sum(grepl("DITHERING", system.configuration$PrintFlags)) > 1) system.configuration$PrintFlags <- system.configuration$PrintFlags[!grepl("ORDERED_DITHERING", system.configuration$PrintFlags)] system.configuration$SerRWBits <- as.logical(.rawToBitmap(system.configuration$SerRWBits, F, F)) names(system.configuration$SerRWBits) <- c(t(outer(c("write.bit", "read.bit"), 0:3, paste0))) system.configuration$SerParShk <- list( SerialParity = .match.factor(list(SerialParity = ProTrackR::hiNybble(system.configuration$SerParShk)), "SerialParity", 0:4, c("SPARITY_NONE", "SPARITY_EVEN", "SPARITY_ODD", "SPARITY_MARK", "SPARITY_SPACE")), HandshakeMode = .match.factor(list(HandshakeMode = ProTrackR::loNybble(system.configuration$SerParShk)), "HandshakeMode", 0:2, c("SHSHAKE_XON", "SHSHAKE_RTS", "SHSHAKE_NONE")) ) system.configuration$SerStopBuf <- list( N.StopBits = ProTrackR::hiNybble(system.configuration$SerStopBuf) + 1L, BufSize = .match.factor(list(BufSize = ProTrackR::loNybble(system.configuration$SerStopBuf)), "BufSize", 0:5, c("SBUF_512", "SBUF_1024", "SBUF_2048", "SBUF_4096", "SBUF_8000", "SBUF_16000")) ) class(system.configuration) <- "SysConfig" return(system.configuration) } simpleSysConfig <- function(options) { result <- paste0("789ce3606060650083c301608a53fd0090646460f0538088373430341c3c", "e078e0b142f2836b02e51fce70d4ff38c152ffe70053fdbf038c60fac701", "96fa0f0738ea1f1c1000d320fe9f06260686ffff4126fcffc8e7c2c0c0f7", "8681916b150303ff7fb66ea06023830e482e3d352fb5283399012f6065f0", "86b2d81914189cf02b46058c501a004f782dc6") pos <- 1:(nchar(result)/2) pos <- pos*2 - 1 result <- as.raw(as.numeric( paste0("0x",c(sapply(result, substring, first = pos, last = pos + 1))))) result <- memDecompress(result, "gzip") result <- rawToSysConfig(result) if (!missing(options)) { for (opt in names(options)) { result[[opt]] <- options[[opt]] } } return(result) } `$<-.SysConfig` <- function(x, i, value) { x[[i]] <- value x } `[[<-.SysConfig` <- function(x, i, value) { if (!("character" %in% class(i))) stop("Refer to elements by name, not by index number, when replacing them.") cl <- class(x) x <- x[.SysConfigData$par.names] if (!(i %in% .SysConfigData$par.names)) stop(sprintf("Element \"%s\" is not part of SysConfig and cannot be assigned.", i)) class(x) <- NULL if (i %in% c("WBColours", "spriteColours") && !all(.is.colour(value))) stop(sprintf("Can only assign colours to %s.", i)) if (i == "WBColours" && length(value) != 4) stop("WBColours needs a vector of 4 colours.") if (i == "spriteColours" && length(value) != 3) stop("spriteColours needs a vector of 3 colours.") if (i == "Pad") { value <- as.raw(value) if (length(value) != 12) stop("'Pad' should be a vector of 12 raw values.") } if (i == "SerRWBits") { value <- as.logical(value) if (length(value) != 12) stop("'SerRWBits' should be a vector of 8 logical values.") names(sc$SerRWBits) <- c(paste0("write.bit", 0:3), paste0("read.bit", 0:3)) } if (i == "SerStopBuf") { if (typeof(value) == "list" && all(names(value) == c("N.StopBits", "BufSize"))) { value$N.StopBits <- as.numeric(value$N.StopBits) if (value$N.StopBits < 0 || value$N.StopBits > 15) stop("value is out of range.") bfs <- c("SBUF_512", "SBUF_1024", "SBUF_2048", "SBUF_4096", "SBUF_8000", "SBUF_16000") if (is.numeric(value$BufSize)) value$BufSize <- bfs[match(value$BufSize, 0:5)] if (is.factor(value$BufSize)) value$BufSize <- as.character(value$BufSize) value$BufSize <- factor(value$BufSize[1], bfs) if (is.na(value$BufSize)) stop("Illegal value for SerStopBuf.") } else { stop("SerStopBuf should be a list with elements N.StopBits and BufSize") } } if (i == "SerParShk") { if (typeof(value) == "list" && all(names(value) == c("SerialParity", "HandshakeMode"))) { sp <- c("SPARITY_NONE", "SPARITY_EVEN", "SPARITY_ODD", "SPARITY_MARK", "SPARITY_SPACE") if (is.numeric(value$SerialParity)) value$SerialParity <- sp[match(value$SerialParity, 0:4)] if (is.factor(value$SerialParity)) value$SerialParity <- as.character(value$SerialParity) value$SerialParity <- factor(value$SerialParity[1], sp) hs <- c("SHSHAKE_XON", "SHSHAKE_RTS", "SHSHAKE_NONE") if (is.numeric(value$HandshakeMode)) value$HandshakeMode <- hs[match(value$HandshakeMode, 0:2)] if (is.factor(value$HandshakeMode)) value$HandshakeMode <- as.character(value$HandshakeMode) value$HandshakeMode <- factor(value$HandshakeMode[1], hs) if (is.na(value$HandshakeMode) || is.na(value$SerialParity)) stop("Illegal value for SerParShk") } else { stop("SerStopBuf should be a list with elements N.StopBits and BufSize") } } if (i == "EnableCLI") { value <- as.raw(value) if (length(value) != 2) stop("'EnableCLI' should be a vector of 2 raw values.") } if (i %in% c("PrtDevName", "PrinterFilename")) value <- as.character(value) if (i %in% c("KeyRptSpeed", "KeyRptDelay", "DoubleClick")) { if (is.numeric(value)) { if (value < 0) stop(sprintf("Negative numbers are not allowed for %s", i)) class(value) <- "AmigaTimeVal" } if (!"AmigaTimeVal" %in% class(value)) stop("Value cannot be cast to 'timeval' class object.") } if (i == "PointerMatrix") { if ("raster" %in% class(value)) value <- rasterToHWSprite(value) if (!("hardwareSprite" %in% class(value))) stop ("PointerMatrix element and its replacement should be an S4 class hardwareSprite object.") if (!all(dim(value) == 16)) stop("The pointer sprite should be 16 pixels wide and 16 pixels high.") x[["spriteColours"]] <- value@colours } fct <- .SysConfigFactors[[i]] if (!is.null(fct)) { if (is.factor(value)) { if (!all(levels(value) == fct$levs)) stop(sprintf("Illegal levels for factor %s.", i)) value <- value[[1]] } if (is.numeric(value)) { if (!(value[[1]] %in% fct$vals)) stop (sprintf("Illegal value for %s.", i)) value <- factor(fct$levs[fct$vals == value[[1]]], fct$levs) } if (is.character(value)) { if (!(value[[1]] %in% fct$levs)) stop(sprintf("Illegal level for factor %s.", i)) value <- factor(value[[1]], fct$levs) } } else { fct <- .SysConfigMultiFactors[[i]] if (!is.null(fct)) { if (is.factor(value)) value <- as.character(value) if (is.character(value)) value <- match(value, .SysConfigMultiFactors[[i]]$levs) if (is.numeric(value)) { if (any(is.na(value))) stop(sprintf("Illegal value for %s.", i)) value <- .bitwOrAll(value) if (value < 0) stop(sprintf("Illegal value for %s.", i)) temp <- eval(parse(text = sprintf("list(%s = %i)", i, value))) value <- .match.multi.factor(temp, i, .SysConfigMultiFactors[[i]]$vals, .SysConfigMultiFactors[[i]]$levs) } } else { bt <- .SysConfigData$byte[.SysConfigData$par.names == i] sn <- .SysConfigData$signed[.SysConfigData$par.names == i] if (bt > 0) { value <- value[[1]] rn <- c(0, 2^(bt*8) - 1) if (sn) rn <- rn - ceiling(rn[2]/2) if (!sn && value < 0) stop("Negative values are not allowed for %s", i) if (value < rn[1] || value > rn[2]) stop("Value is out of range.") } } } x[[i]] <- value class(x) <- cl return(x) }
decision.finding <- function(out.matrix, n, t, r){ rowindex <- which(out.matrix$N==n & out.matrix$T==t & out.matrix$R == r) decision <- out.matrix$Decision[rowindex] decision <- as.character(decision) return (decision) }
step_subset <- function(parent, vars = parent$vars, groups = parent$groups, locals = parent$locals, arrange = parent$arrange, i = NULL, j = NULL, on = character(), allow_cartesian = NULL, needs_copy = FALSE ) { stopifnot(is_step(parent)) stopifnot(is_expression(i) || is_call(i) || is_step(i)) stopifnot(is_expression(j) || is_call(j)) stopifnot(is.character(on)) new_step( parent = parent, vars = vars, groups = groups, locals = locals, arrange = arrange, i = i, j = j, on = on, allow_cartesian = allow_cartesian, implicit_copy = !is.null(i) || !is.null(j), needs_copy = needs_copy || parent$needs_copy, class = "dtplyr_step_subset" ) } step_subset_i <- function(parent, i) { if (is_empty(i)) { return(parent) } if (length(parent$groups) > 0) { parent <- compute(parent) nm <- sym(parent$name) i <- expr((!!nm)[, .I[!!i]]) i <- add_grouping_param(i, parent, FALSE) i <- call("$", i, quote(V1)) } step_subset(parent, i = i) } step_subset_j <- function(parent, vars = parent$vars, groups = parent$groups, arrange = parent$arrange, j = NULL) { if (can_merge_subset(parent)) { i <- parent$i on <- parent$on parent <- parent$parent } else { i <- NULL on <- character() } step_subset( parent, vars = vars, groups = groups, arrange = arrange, i = i, j = j, on = on ) } can_merge_subset <- function(x) { if (!inherits(x, "dtplyr_step_subset")) { return(FALSE) } is.null(x$j) } dt_sources.dtplyr_step_subset <- function(x) { if (is_step(x$i)) { utils::modifyList(dt_sources(x$parent), dt_sources(x$i)) } else { dt_sources(x$parent) } } dt_call.dtplyr_step_subset <- function(x, needs_copy = x$needs_copy) { if (is.null(x$i) && is.null(x$j)) { return(dt_call(x$parent)) } i <- if (is_step(x$i)) dt_call(x$i) else x$i parent <- dt_call(x$parent, needs_copy) if (is.null(i) && is.null(x$j)) { out <- parent } else if (is.null(i) && !is.null(x$j)) { out <- call2("[", parent, , x$j) } else if (!is.null(i) && is.null(x$j)) { out <- call2("[", parent, i) } else { out <- call2("[", parent, i, x$j) } if (!is.null(x$j)) { out <- add_grouping_param(out, x) } if (length(x$on) > 0) { out$on <- call2(".", !!!syms(x$on)) out$allow.cartesian <- x$allow_cartesian } out }
collapse_tree_at_resolution = function( tree, resolution = 0, by_edge_count = FALSE, shorten = TRUE, rename_collapsed_nodes = FALSE, criterion = 'max_tip_depth'){ Ntips = length(tree$tip.label); Nnodes = tree$Nnode; Nedges = nrow(tree$edge); if(!(criterion %in% c('max_tip_depth','sum_tip_paths','max_tip_pair_dist'))) stop(sprintf("criterion must be one of 'max_tip_depth', 'sum_tip_paths', 'max_tip_pair_dist' (got '%s' instead)",criterion)); results = collapse_tree_at_resolution_CPP( Ntips = Ntips, Nnodes = Nnodes, Nedges = Nedges, tree_edge = as.vector(t(tree$edge)) - 1, edge_length = (if(by_edge_count || is.null(tree$edge.length)) numeric() else tree$edge.length), resolution = resolution, shorten = shorten, criterion = criterion); Ntips_new = results$Ntips_new Nnodes_new = results$Nnodes_new Nclades_new = Ntips_new+Nnodes_new new2old_clade = results$new2old_clade + 1; new2old_edge = results$new2old_edge + 1; clade_labels = c(tree$tip.label, tree$node.label) collapsed_nodes = results$collapsed_nodes + 1; collapsed_tree = list( Nnode = Nnodes_new, tip.label = clade_labels[new2old_clade[1:Ntips_new]], node.label = (if(is.null(tree$node.label)) NULL else clade_labels[new2old_clade[(Ntips_new+1):Nclades_new]]), edge = matrix(results$new_tree_edge,ncol=2,byrow=TRUE) + 1, edge.length = (if(is.null(tree$edge.length)) NULL else (if(shorten) tree$edge.length[new2old_edge] else results$new_edge_length)), root = results$new_root+1) if(rename_collapsed_nodes){ old2new_clade = results$old2new_clade + 1; collapsed_tree$tip.label[old2new_clade[Ntips+results$collapsed_nodes+1]] = tree$tip.label[results$farthest_tips+1]; } class(collapsed_tree) = "phylo"; attr(collapsed_tree,"order") = NULL return(list(tree = collapsed_tree, root_shift = results$root_shift, collapsed_nodes = collapsed_nodes, farthest_tips = results$farthest_tips+1, new2old_clade = new2old_clade, new2old_edge = new2old_edge)) }
library("bigmemory") context("backingpath option when attaching") tmp <- tempfile() tmp.dir <- dirname(tmp) tmp.file <- basename(tmp) desc.path <- paste0(tmp.file, ".desc") X <- big.matrix(10, 10, backingfile = tmp.file, backingpath = tmp.dir, descriptorfile = desc.path, init = 0) X.desc <- describe(X) test_that("Format_path puts an '/' at the end if there isn't", { expect_equal(format_path("test"), "test/") expect_equal(format_path("test/"), "test/") expect_equal(format_path("test/test/"), "test/test/") }) test_that("New element 'dirname' in description", { expect_equal(X.desc@description$dirname, format_path(tmp.dir)) }) test_that("you can attach from a full path", { X2 <- attach.big.matrix(file.path(tmp.dir, desc.path)) expect_false(is.nil(X2@address), info = "the matrix exists") X2[] <- 1 expect_equal(X[,], matrix(1, 10, 10), info = "modifying X2 modifies also X") }) test_that("you can attach from a composed path", { X3 <- attach.big.matrix(desc.path, backingpath = tmp.dir) expect_false(is.nil(X3@address), info = "the matrix exists") X3[] <- 2 expect_equal(X[,], matrix(2, 10, 10), info = "modifying X3 modifies also X") }) test_that("you can attach with or without backingpath", { X4 <- attach.big.matrix(X.desc) expect_false(is.nil(X4@address), info = "the matrix exists") X4[] <- 3 expect_equal(X[,], matrix(3, 10, 10), info = "modifying X4 modifies also X") X5 <- attach.big.matrix(X.desc, backingpath = tmp.dir) expect_false(is.nil(X5@address), info = "the matrix exists") X5[] <- 4 expect_equal(X[,], matrix(4, 10, 10), info = "modifying X5 modifies also X") }) test_that("you can sub with or without backingpath", { X6 <- sub.big.matrix(X.desc, lastCol = 5) expect_false(is.nil(X6@address), info = "the matrix exists") X6[] <- 1 expect_equal(X[,], cbind(matrix(1, 10, 5), matrix(4, 10, 5)), info = "modifying X6 modifies also X") X7 <- sub.big.matrix(X.desc, firstCol = 6, backingpath = tmp.dir) expect_false(is.nil(X7@address), info = "the matrix exists") X7[] <- 2 expect_equal(X[,], cbind(matrix(1, 10, 5), matrix(2, 10, 5)), info = "modifying X7 modifies also X") })
which_points_on_edge <- function(front) { which(.Call(do_which_points_on_edge, front)) }
print_dust_latex <- function(x, ..., asis=TRUE) { if (!is.null(x$caption) & x$caption_number) increment_pixie_count() label <- if (is.null(x[["label"]])) { chunk_label <- knitr::opts_current$get("label") if (is.null(chunk_label)) paste0("tab:pixie-", getOption("pixie_count")) else paste0("tab:", chunk_label) } else { paste0("tab:", x[["label"]]) } label <- if (x[["bookdown"]]) { paste0("(\\ } else { paste0("\\label{", label, "}") } if (!is.numeric(x$longtable) & x$longtable) longtable_rows <- 25L else if (!is.numeric(x$longtable) & !x$longtable) longtable_rows <- as.integer(max(x$body$row)) else longtable_rows <- as.integer(x$longtable) tab_env <- if (is.numeric(x$longtable) || x$longtable) "longtable" else "tabular" Joint <- joint_reference_table(x) col_width <- determine_column_width(Joint) col_halign_default <- get_column_halign(Joint) row_height <- lapply(list(x$head, x$body, x$foot, x$interfoot), determine_row_height) head <- part_prep_latex(x$head, col_width, col_halign_default, head = TRUE) body <- part_prep_latex(x$body, col_width, col_halign_default) foot <- if (!is.null(x$foot)) part_prep_latex(x$foot, col_width, col_halign_default) else NULL interfoot <- if (!is.null(x$interfoot)) part_prep_latex(x$interfoot, col_width, col_halign_default) else NULL prebegin <- numeric_longtable_newline(longtable_rows, is.numeric(x$longtable)) prebegin <- paste0(prebegin, "\\setlength{\\tabcolsep}{", x$tabcolsep, "pt}", sep = "\n") if (tab_env == "longtable") { begin <- paste0("\\begin{longtable}[", gsub("n", "l", substr(x[["justify"]], 1, 1)), "]{", paste0(col_halign_default$default_halign, collapse = ""), "}\n", if (!is.null(x$caption)) paste0("\\caption", if (x$caption_number) "" else "*", "{", x$caption, "}") else "", "\n", label, "\\\\ \n") end <- "\\end{longtable}" } else if (x$float) { begin <- paste0("\\begin{table}\n", if (x[["justify"]] == "center") "\\centering\n" else "", if (!is.null(x$caption)) paste0("\\caption", if (x$caption_number) "" else "*", "{", x$caption, "}") else "", "\n", label, "\\begin{tabular}{", paste0(col_halign_default$default_halign, collapse = ""), "}\n") end <- paste0("\\end{tabular}\n\\end{table}\n") } else { begin <- paste0(if (x[["justify"]] == "center") "\\begin{center}\n" else "", if (!is.null(x$caption)) paste0("\\captionof{table}{", x$caption, "}") else "", "\n", label, "\\begin{tabular}{", paste0(col_halign_default$default_halign, collapse = ""), "}\n") end <- paste0("\\end{tabular}\n", if (x[["justify"]] == "center") "\\end{center}\n" else "") } tbl <- mapply(paste_latex_part, list(head, body, foot, interfoot), row_height, MoreArgs = list(newline = if (is.numeric(x$longtable)) " \\ltabnewline" else " \\\\")) if (is.numeric(x$longtable) || x$longtable){ tbl <- paste0(tbl[c(1, 4, 3, 2)], c("\n\\endhead\n", "\n\\endfoot\n", "\n\\endlastfoot\n", "")) } tbl <- paste(tbl, collapse = "\n") if (asis) knitr::asis_output(paste(prebegin, begin, tbl, end, collapse = "\n")) else paste(prebegin, begin, tbl, end, collapse = "\n") } part_prep_latex <- function(part, col_width, col_halign_default, head=FALSE) { part <- part[!names(part) %in% "width"] part <- merge(part, col_width, by = "col", all.x = TRUE, sort = FALSE) part <- merge(part, col_halign_default, by = "col", all.x = TRUE, sort = FALSE) part$width_units <- rep("pt", nrow(part)) part$halign <- ifelse(part$halign == "", part$default_halign, part$halign) Widths <- part[c("html_row", "html_col", "width", "merge")] Widths <- Widths[!duplicated(Widths), ] Widths <- split(Widths, Widths[c("html_row", "html_col")]) Widths <- lapply(Widths, function(x){ x$width <- ifelse(x$merge == TRUE, sum(x$width[x$merge]), x$width) x }) Widths <- do.call(".rbind_internal", Widths) numeric_classes <- c("double", "numeric") part <- perform_function(part) logic <- part$round == "" & part$col_class %in% numeric_classes part$round[logic] <- getOption("digits") logic <- part$col_class %in% numeric_classes if (any(logic)) part$value[logic] <- as.character(roundSafe(part$value[logic], as.numeric(part$round[logic]))) logic <- !is.na(part[["replace"]]) part[["value"]][logic] <- part[["replace"]][logic] logic <- is.na(part$value) & !is.na(part$na_string) part$value[logic] <- part$na_string[logic] logic <- part[["sanitize"]] part[["value"]][logic] <- sanitize(part[["value"]][logic], part[["sanitize_args"]][logic]) boldify <- part$bold part$value[boldify] <- paste0("\\textbf{", part$value[boldify], "}") italicize <- part$italic part$value[italicize] <- paste0("\\emph{", part$value[italicize], "}") logic <- part$font_color != "" part$font_color <- vapply(part$font_color, convertColor, character(1)) part$value[logic] <- paste0("\\textcolor", part$font_color[logic], "{", part$value[logic], "}") logic <- part$font_size != "" part$font_size_units[logic] <- ifelse(part$font_size_units[logic] %in% c("%", "px"), "pt", part$font_size_units[logic]) part$value[logic] <- paste0("{\\fontsize{", part$font_size[logic], part$font_size_units[logic], "}{1em}\\selectfont ", part$value[logic], "}") logic <- part$rotate_degree != "" part$value[logic] <- paste0("\\rotatebox{", part$rotate_degree[logic], "}{", part$value[logic], "}") logic <- part$bg != "" part$bg[logic] <- paste0("{\\cellcolor", vapply(part$bg[logic], convertColor, character(1)), "}") part$value[logic] <- paste(part$bg[logic], part$value[logic]) logic <- part$left_border != "" part$left_border[logic] <- vapply(part$left_border[logic], latex_vertical_border_code, character(1)) logic <- part$right_border != "" part$right_border[logic] <- vapply(part$right_border[logic], latex_vertical_border_code, character(1)) logic <- part$bottom_border != "" part$bottom_border[logic] <- mapply(latex_horizontal_border_code, part$bottom_border[logic], part$col[logic]) bottom_borders <- part[c("row", "col", "bottom_border")] bottom_borders <- reshape2::dcast(bottom_borders, row ~ col, value.var = "bottom_border") bottom_borders <- bottom_borders[!names(bottom_borders) %in% "row"] bottom_borders <- apply(bottom_borders, MARGIN = 1, paste0, collapse = "") logic <- part$top_border != "" part$top_border[logic] <- mapply(latex_horizontal_border_code, part$top_border[logic], part$col[logic]) top_borders <- part[c("row", "col", "top_border")] top_borders <- reshape2::dcast(top_borders, row ~ col, value.var = "top_border", fill = "") top_borders <- top_borders[!names(top_borders) %in% "row"] top_borders <- apply(top_borders, MARGIN = 1, FUN = paste0, collapse = "") parbox <- needs_parbox(part) part$halign_parbox <- part$halign part$halign_parbox[parbox] <- c("r" = "\\raggedleft", "c" = "\\centering", "l" = "\\raggedright", "p" = "\\raggedright")[substr(part$halign[parbox], 1, 1)] part$value[parbox] <- paste0("\\parbox[", substr(part$valign[parbox], 1, 1), "]{", part$width[parbox], "pt}{", part$halign_parbox[parbox], " ", part$value[parbox], "}") logic <- part$rowspan > 1 part$value[logic] <- paste0("\\multirow{", part$rowspan[logic], "}{*}{", part$value[logic], "}") logic <- part$html_row != part$row & part$html_col == part$col part$value[logic] <- paste0("\\multicolumn{", part$colspan[logic], "}", "{", part$left_border[logic], "c", part$right_border[logic], "}{}") part$rowspan[logic] <- -1 part$colspan[logic] <- part$colspan[logic] * -1 logic <- part$colspan > 1 | (part$left_border != "" | part$right_border != "") | !(part$html_row != part$row & part$html_col == part$col) part$value[logic] <- paste0("\\multicolumn{", part$colspan[logic], "}{", part$left_border[logic], sub("p", "r", substr(part$halign[logic], 1, 1)), part$right_border[logic], "}{", part$value[logic], "}") ncol <- max(part$col) part <- part[!(part$rowspan == 0 | part$colspan == 0), ] proper_multirow <- part[part$colspan != 1, ] proper_multirow$group <- paste0(proper_multirow$html_row, proper_multirow$html_col) proper_multirow <- split(proper_multirow, proper_multirow$group) proper_multirow <- lapply(proper_multirow, function(x){ x[order(x$colspan, decreasing = TRUE), ] x$row <- sort(x$row) x }) proper_multirow <- do.call(".rbind_internal", proper_multirow) part <- part[part$colspan == 1, ] part <- .rbind_internal(part, proper_multirow) part <- .make_dataframe_wide(part) cbind(top_borders, bottom_borders, part) } paste_latex_part <- function(part, row_height, newline = " \\\\"){ paste_row <- function(r) paste(r[!is.na(r)], collapse = " & ") if (is.null(part)) return("") apply(part[, -(1:2), drop = FALSE], 1, paste_row) %>% paste(row_height) %>% paste(newline) %>% paste(part[, 2]) %>% paste(part[, 1], .) %>% paste0(collapse = "\n") } convertColor <- function(color){ if (length(color) == 0) return(character(0)) color <- gsub("rgba[(]255,255,255,0[)]", "", color) if (grepl(" return(paste0("[HTML]{", sub(" } else if (grepl("rgb", color, ignore.case = TRUE)){ rgb <- str_extract_base(color, "\\d{1,3}")[1, 1:3] return(paste0("[RGB]{", paste0(rgb, collapse=","), "}")) } else return(paste0("{", color, "}")) } numeric_longtable_newline <- function(n, redefine = FALSE){ if (redefine) return(paste0("\\newcount\\mylineno \n", "\\mylineno=0 \n", "\\def\\ltabnewline{% \n", "\\global\\advance\\mylineno by 1 \n", "\\ifnum\\mylineno=", n, " \n", "\\global\\mylineno=0 \n", "\\\\ \n", "\\newpage \n", "\\else \n", "\\\\ \n", "\\fi \n", "}")) else return("") } needs_parbox <- function(x) { is.finite(x$width) | (x$halign != x$default_halign) | x$valign != "" | x$merge } joint_reference_table <- function(x){ numeric_classes <- c("double", "numeric") addPartCol <- function(p, part_name) { if (is.null(p)) return(NULL) p$part <- part_name return(p) } Joint <- mapply(addPartCol, x[c("head", "body", "foot", "interfoot")], part_name = c("head", "body", "foot", "interfoot"), SIMPLIFY = FALSE) Joint <- do.call(".rbind_internal", Joint) Joint$width <- as.numeric(Joint$width) Joint$table_width <- x$table_width * 72.27 Joint$width <- ifelse(Joint$width_units == "in", Joint$width * 72.27, ifelse(Joint$width_units == "cm", Joint$width * 27.45, ifelse(Joint$width_units == "%", Joint$width/100 * Joint$table_width, Joint$width))) Joint <- perform_function(Joint) logic <- Joint$round != "" & Joint$col_class %in% numeric_classes if (any(logic)) Joint$value[logic] <- as.character(roundSafe(Joint$value[logic], as.numeric(Joint$round[logic]))) Joint$halign[Joint$halign == ""] <- vapply(Joint$col_class[Joint$halign == ""], default_halign, character(1)) Joint$halign <- substr(Joint$halign, 1, 1) Joint <- split(Joint, Joint$col) Joint <- lapply(Joint, function(x){ x$default_halign <- names(sort(table(x$halign), decreasing = TRUE))[1] x }) Joint <- do.call(".rbind_internal", Joint) Joint$parbox <- needs_parbox(Joint) Joint$width_by_char <- nchar(Joint$value) * 4.5 Joint <- split(Joint, Joint$col) Joint <- lapply(Joint, function(x){ x$replace <- all(is.na(x$width)) && any(Joint$parbox) x$width_by_char <- max(x$width_by_char, na.rm = TRUE) x }) Joint <- do.call(".rbind_internal", Joint) Joint$width <- ifelse(Joint$replace, Joint$width_by_char, Joint$width) Joint <- Joint[c("col", "row", "width", "default_halign")] Joint } determine_column_width <- function(Joint, x) { Joint <- Joint[c("row", "col", "width")] suppressWarnings(Joint <- tapply(Joint$width, Joint$col, max, na.rm = TRUE)) Joint <- data.frame(col = names(Joint), width = unname(Joint), stringsAsFactors = FALSE) Joint$width <- ifelse(is.finite(Joint$width), Joint$width, NA) Joint } determine_row_height <- function(part) { if (is.null(part)) return("") part <- part[c("row", "col", "height", "height_units")] part$height <- as.numeric(part$height) part$height <- ifelse(part$height_units == "in", part$height * 72.27, ifelse(part$height_units == "cm", part$height * 28.45, part$height)) suppressWarnings(part <- tapply(part$height, INDEX = part$row, FUN = max, na.rm = TRUE)) part <- data.frame(row = names(part), height = unname(part), stringsAsFactors = FALSE) part$height <- ifelse(!is.finite(part$height), "", paste0("\\\\[", part$height, "pt]")) part$height } get_column_halign <- function(Joint){ Joint$default_halign <- ifelse(is.na(Joint$width), Joint$default_halign, paste0("p{", Joint$width, "pt}")) Joint <- Joint[c("row", "col", "default_halign")] Joint <- tapply(Joint$default_halign, Joint$col, function(x) x[1]) Joint <- data.frame(col = names(Joint), default_halign = unname(Joint), stringsAsFactors = FALSE) Joint } default_halign <- function(col_class, print_method = "latex"){ tag <- if (print_method == "latex") c("r", "l") else c("right", "left") if (col_class %in% c("numeric", "int", "double")) tag[1] else tag[2] } latex_vertical_border_code <- function(x){ border <- str_split_fixed_base(x, " ", 3) border[, 1] <- gsub("px", "pt", border[, 1]) border[, 2] <- ifelse(border[, 2] %in% c("dashed", "dotted"), "dashed", ifelse(border[, 2] %in% c("groove", "ridge", "inset", "outset", "hidden"), "solid", border[, 2])) if (border[, 2] %in% c("hidden", "none")) return("") if (border[, 2] == "dashed"){ border_code <- paste("!{\\color", convertColor(border[, 3]), "\\vdashline}") return(border_code) } if (border[, 2] %in% c("solid", "double")){ border_code <- paste0("!{\\color", convertColor(border[, 3]), "\\vrule width ", border[, 1], "}") return(border_code) } } latex_horizontal_border_code <- function(x, col){ border <- str_split_fixed_base(x, " ", 3) border[, 1] <- gsub("px", "pt", border[, 1]) border[, 2] <- ifelse(border[, 2] %in% c("dashed", "dotted"), "dashed", ifelse(border[, 2] %in% c("groove", "ridge", "inset", "outset", "hidden"), "solid", border[, 2])) if (border[, 2] %in% c("hidden", "none")) return("") if (border[, 2] == "dashed"){ border_code <- paste0("\\arrayrulecolor", convertColor(border[, 3]), "\\cdashline{", col, "-", col, "}") return(border_code) } if (border[, 2] %in% c("solid", "double")){ border_code <- paste0("\\arrayrulecolor", convertColor(border[, 3]), "\\cline{", col, "-", col, "}") return(border_code) } } sanitize <- function(x, args) { sanitize_index <- !is.na(x) if (sum(sanitize_index)) { x[sanitize_index] <- do.call(what = sanitize_latex, args = c(list(object = x[sanitize_index]), eval(parse(text = args[sanitize_index]))) ) } x } utils::globalVariables(c("halign", "left_border", "right_border", "bottom_border", "top_border", "require_multicol", "height", "width", "height_units", "width_units", "table_width", "parbox", "width_by_char", "html_row", "html_col", "rowspan", "colspan", "value", "col_name", "col_class", "group", "."))
import_mplus <- function(font_dir =system.file("fonts", "mplus", package="fontMPlus")) { suppressWarnings(suppressMessages(extrafont::font_import(font_dir, prompt=FALSE))) message(sprintf("You will likely need to install these fonts on your system as well. You can find them in [%s]", font_dir)) } "mplus.fonttable" "mplus.fontfamilies"
dsample <- function(expr, rpmat, n=1e3, nk=1e4, wconst){ variables <- all.vars(expr) if(missing(wconst)) wconst <- 1 cl <- match.call(expand.dots=FALSE) stopifnot(is.expression(expr), is.list(rpmat), is.numeric(nk), is.numeric(n), is.numeric(wconst)) y <- eval(expr=expr, envir=rpmat) fmla <- stats::as.formula(paste("y~", expr, sep="")) X <- as.data.frame(rpmat) yX <- cbind(y, X) yX <- yX[which(y>0),] yX <- yX[order(yX$y, decreasing=TRUE), ] cnt <- graphics::hist(yX$y, breaks=seq(from=min(yX$y), to=max(yX$y), length.out=nk+1), plot=FALSE) cnames <- paste("e", seq_len(nk), sep="") yX$cid <- cut(yX$y, cnt$breaks, include.lowest=TRUE) yX$cnt.name <- cnames[match(yX$cid, names(table(yX$cid)))] cnt.counts <- cnt$counts cnt.mids <- cnt$mids names(cnt.mids) <- names(cnt.counts) <- cnames gpdf <- rev(cnt.counts * cnt.mids) gpdf[nk] <- wconst*gpdf[nk] rev.cntc <- rev(cnt.counts) cdf <- cumsum(gpdf)/sum(gpdf) cumcdf <- cumsum(cdf) pptns <- graphics::hist(stats::runif(n), breaks=c(0,cdf), plot=FALSE)$counts names(pptns) <- rev(paste("e", seq_len(nk), sep="")) scnt <- mapply(FUN=sample, MoreArgs=list(replace=TRUE), rev.cntc, pptns) idx <- unlist( mapply("+", as.list( c(0, cumsum(rev.cntc))[-(nk+1)] ), scnt) ) yX <- yX[order(yX$y, decreasing=TRUE)[idx], ] robj <- list(formula=fmla, expr=expr, yX=yX, X=yX[all.vars(expr)], cnt.counts=cnt.counts, cnt.mids=cnt.mids, gpdf=gpdf, cdf=cdf, cumcdf=cumcdf, pptns=pptns, scnt=scnt, idx=idx) class(robj) <- "dsample" return(robj) } summary.dsample <- function(object, n=5, ...) { stopifnot(inherits(object, "dsample")) fmla <- object$formula X <- object$X hc <- stats::hclust(stats::dist(X)) grp <- stats::cutree(hc, ...) cdf <- object$cdf means <- colMeans(X) stdevs <- do.call(c, lapply(X, stats::sd, na.rm=TRUE)) modes <- cbind(X)[1:n,] robj <- list(formula=fmla, means=means, stdevs=stdevs, modes=modes, hc=hc, grp=grp, X=X, cdf=cdf) class(robj) <- "dsample" return(robj) } plot.dsample <- function(x, ...){ X <- x$X cdf <- x$cdf grp <- x$grp graphics::par(mfrow=c(2,2)) graphics::plot(cdf, main="CDF", xlab="E", ylab="F(E)", cex=0.5) graphics::plot(X, cex=0.5, main="Scatter and Contour Plots", xlab="x1", ylab="x2", col=grp) density <- MASS::kde2d(X[,1], X[,2], n=1e3) graphics::contour(density, nlevels=5, add=TRUE) graphics::hist(X[,1], main="Histogram", ylab="density", xlab=expression(x[1]), prob=TRUE, breaks=20) graphics::hist(X[,2], main="Histogram", ylab="density", xlab=expression(x[2]), prob=TRUE, breaks=20) }
library(difNLR) data(GMAT, package = "difNLR") Data <- GMAT[, 1:20] zscore <- scale(rowSums(Data)) fun <- function(x, a, b, c, d) { c + (d - c) * exp(a * (x - b)) / (1 + exp(a * (x - b))) } start <- startNLR( Data, GMAT[, "group"], model = "4PLcgdg", parameterization = "classic" )[[1]][, 1:4] fit2PL <- nls(Data[, 1] ~ fun(zscore, a, b, c = 0, d = 1), algorithm = "port", start = start[1:2] ) fit3PL <- nls(Data[, 1] ~ fun(zscore, a, b, c, d = 1), algorithm = "port", start = start[1:3], lower = c(-Inf, -Inf, 0), upper = c(Inf, Inf, 1) ) fit4PL <- nls(Data[, 1] ~ fun(zscore, a, b, c, d), algorithm = "port", start = start, lower = c(-Inf, -Inf, 0, 0), upper = c(Inf, Inf, 1, 1) ) AIC(fit2PL) AIC(fit3PL) AIC(fit4PL) BIC(fit2PL) BIC(fit3PL) BIC(fit4PL)
combine_factor <- function(fac, variable=levels(fac), other.label="Other") { n <- length(levels(fac)) if (length(variable) < n) { nvar <- c(seq(1, length(variable)), rep(length(variable)+1, n - length(variable))) factor(nvar[as.numeric(fac)], labels=c(levels(fac)[variable], other.label)) } else { factor(variable[as.numeric(fac)], labels=levels(fac)[!duplicated(variable)]) } }
test_that("ansi_html", { str <- c( "\033[1mbold\033[22m", "\033[2mfaint", "\033[3mitalic\033[0m", "\033[4munderline", "\033[5mblink", "\033[7minverse", "\033[8mhide", "\033[9mcrossedout", "\033[30mblack", "\033[31mred", "\033[32mgreen", "\033[33myellow", "\033[34mblue", "\033[35mmagenta", "\033[36mcyan", "\033[37mwhite", "\033[90mbblack", "\033[91mbred", "\033[92mbgreen", "\033[93mbyellow", "\033[94mbblue", "\033[95mbmagenta", "\033[96mbcyan", "\033[97mbwhite", "\033[38;5;156mcolor-156", "\033[38;2;1;22;255mcolor-1-22-255", "\033[40mbg-black", "\033[41mbg-red", "\033[42mbg-green", "\033[43mbg-yellow", "\033[44mbg-blue", "\033[45mbg-magenta", "\033[46mbg-cyan", "\033[47mbg-white", "\033[100mbg-bblack", "\033[101mbg-bred", "\033[102mbg-bgreen", "\033[103mbg-byellow", "\033[104mbg-bblue", "\033[105mbg-bmagenta", "\033[106mbg-bcyan", "\033[107mbg-bwhite", "\033[48;5;156mbg-color-156", "\033[48;2;1;22;255mbg-color-1-22-255" ) expect_snapshot( ansi_html(str) ) }) test_that("multiple styles", { expect_snapshot( ansi_html("\033[1;2;35;45mmultiple") ) }) test_that("CSI", { expect_equal( ansi_html("foo\033[10Abar", csi = "drop"), "foobar" ) expect_equal( ansi_html("\033[1mfoo\033[0m\033[10Abar", csi = "drop"), "<span class=\"ansi ansi-bold\">foo</span>bar" ) expect_equal( ansi_html("foo\033[10Abar", csi = "keep"), "foo\033[10Abar" ) expect_equal( ansi_html("\033[1mfoo\033[0m\033[10Abar", csi = "keep"), "<span class=\"ansi ansi-bold\">foo</span>\033[10Abar" ) }) test_that("ansi_html_style", { expect_snapshot( ansi_html_style(colors = 8) ) expect_snapshot( ansi_html_style(colors = 256, palette = "ubuntu") ) })
optimbase.checkx0 <- function(this=NULL){ this <- optimbase.log(this=this,msg='Checking initial guess...') tmp <- optimbase.isfeasible(this=this,x=this$x0) this <- tmp$this isfeasible <- tmp$isfeasible rm(tmp) isok <- (isfeasible==1) if (isok){ this <- optimbase.log(this=this,msg='... initial guess is feasible.') } else { this <- optimbase.log(this=this,msg='... initial guess is not feasible.') } varargout <- list(this=this,isok=isok) return(varargout) }
`pair.sum` <- function(X) { d<-dim(X) matrix(.C("pairsum", as.double(X),as.integer(d), res=double(choose(d[1],2)*d[2]),PACKAGE="ICSNP")$res,ncol=d[2],byrow=T) }
library(dynbenchmark) library(fs) library(tidyverse) readme_paths <- c( fs::dir_ls("scripts", regexp = "README\\.Rmd", recursive = TRUE), fs::dir_ls("derived", regexp = "README\\.Rmd", recursive = TRUE), c("./README.Rmd", "results/README.Rmd") ) walk(readme_paths, function(readme_path) { print(paste0("Processing ", readme_path)) rmarkdown::render( readme_path, output_format = dynbenchmark::github_markdown_nested(html_preview = FALSE), quiet = TRUE ) })
plot.twostageTE <- function(x, ...){ if(!inherits(x,"twostageTE")){ stop("Error: Object is not of class twostageTE") } plot_gpava <- function (x, main = "PAVA Plot", xlab = "Predictor", ylab = "Response", col = "lightblue",...) { o <- order(x$z) xval <- x$z[o] yval <- x$x[o] xcum <- c(xval[1] - mean(diff(xval)), xval) jumps <- ((1:length(yval))[!duplicated(yval)]-1)[-1] jumps <- c(1, jumps, length(xval)) lines(xval, yval, col = col, lwd = 1, type <- "S") points(xval[jumps], yval[jumps], col = col, pch = 13) } pava1 <- gpava(z=x$X1, y=x$Y1) if (!is.na(x$L2)) { pava2 <- gpava(z=x$X2, y=x$Y2) } if (!is.na(x$L2)) { plot(x=x$X1,y=x$Y1, pch="1", cex=1.5, xlab="", ylab="", ylim=range(c(x$Y1,x$Y2)), col="grey80") abline(h=x$threshold, lty=3, lwd=1, col=2) points(x=x$X2,y=x$Y2, pch="2", cex=1.5, col="grey65") plot_gpava(pava2, col="blue") } else { plot(x=x$X1,y=x$Y1, pch="1", cex=1.5, xlab="", ylab="", col="grey80") abline(h=x$threshold, lty=3, lwd=1, col=2) plot_gpava(pava1, col=1) } abline(v=x$L1, lty=2, lwd=2) abline(v=x$U1, lty=2, lwd=2) if (!is.na(x$L2)) { abline(v=x$L2, col="blue", lwd=2) abline(v=x$U2, col="blue", lwd=2) } points(x=x$estimate, y=x$threshold, col="blue", pch=4, cex=1.5) if (!is.na(x$L2)) { segments(x$estimate,min(c(x$Y1,x$Y2))-1,x$estimate, x$threshold , lwd=2, col="blue") } else { segments(x$estimate,min(x$Y1)-1,x$estimate, x$threshold, lwd=2, col="blue") } mtext("Explanatory", side=1, line=2.5, cex=1.65) mtext("Response", side=2 , line=2, cex=1.65) if (!is.na(x$L2)) { legend("topleft", c("Estimate", "1st Stage CI", "2nd Stage CI", "2nd Stage Iso-Regression"), pch=c(4, NA, NA, 13), col=c("blue",1, "blue","blue"), lty=c(NA,2,1,1), lwd=c(NA,2,2,1), bg="white") } else { legend("topleft", c("Estimate", "1st Stage CI", "1st Stage Iso-Regression"), pch=c(4, NA, 13), col=c("blue",1,1), lty=c(NA,2,1), lwd=c(NA,2,1), bg="white") } }
droplet <- function(id, ...) { x <- do_GET(sprintf("droplets/%s", id), ...) structure(x$droplet, class = "droplet") } as.droplet <- function(x) UseMethod("as.droplet") as.droplet.numeric <- function(x) droplet(x) as.droplet.character <- function(x) droplets()[[x]] as.droplet.droplet <- function(x) x as.droplet.action <- function(x) { if (x$resource_type != "droplet") { stop("Resource type: ", x$resource_type, call. = FALSE) } action_wait(x) } print.droplet <- function(x, ...) { cat("<droplet>", x$name, " (", x$id, ")\n", sep = "") cat(" IP: ", droplet_ip_safe(x), "\n", sep = "") cat(" Status: ", x$status, "\n", sep = "") cat(" Region: ", x$region$name, "\n", sep = "") cat(" Image: ", x$image$name, "\n", sep = "") cat(" Size: ", x$size_slug, "\n", sep = "") cat(" Volumes: ", paste0(unlist(x$volume_ids), collapse = ", "), "\n", sep = "") } summary.droplet <- function(object, ...) { price <- object$size$price_hourly crat <- as.POSIXct(strptime(object$created_at, "%Y-%m-%dT%H:%M:%S", "UTC")) now <- as.POSIXlt(Sys.time(), "UTC") cost <- round(difftime(now, crat, units = "hours")[[1]] * price, 3) cat("<droplet_detail>", object$name, " (", object$id, ")\n", sep = "") cat(" Status: ", object$status, "\n", sep = "") cat(" Region: ", object$region$name, "\n", sep = "") cat(" Image: ", object$image$name, "\n", sep = "") cat(" Size: ", object$size_slug, " ($", price, " / hr)" ,"\n", sep = "") cat(" Estimated cost ($): ", cost, "\n", sep = "") cat(" Locked: ", object$locked, "\n", sep = "") cat(" Created at: ", object$created_at, " UTC", "\n", sep = "") cat(" Networks: ", "\n", sep = "") cat(" v4: ", make_list(object$networks$v4), "\n", sep = "") cat(" v6: ", make_list(object$networks$v6), "\n", sep = "") cat(" Kernel: ", make_list(list(object$kernel)), "\n") cat(" Snapshots: ", unlist(object$snapshot_ids), "\n") cat(" Backups: ", unlist(object$backup_ids), "\n") cat(" Tags: ", paste0(unlist(object$tags), collapse = ", "), "\n") } make_list <- function(y){ if(length(y) > 0){ y <- y[[1]] out <- list() for(i in seq_along(y)){ out[[i]] <- paste0(names(y)[i], " (", y[i], ")") } paste0(out, collapse = ", ") } else { "none" } }
addNodeWeights = function(x, weights = NULL) { n = getNumberOfNodes(x) checkmate::assertNumeric(weights, len = n, any.missing = FALSE, all.missing = FALSE) x$node.weights = weights return(x) }
MEA = function(dataframe, sampRate, filter = "raw", id, session, group, s1Name, s2Name, uid = paste(group,id,session,sep="_") ){ x = list("MEA"=dataframe, "ccf" = NULL, "ccfRes" = NULL ) attributes(x) = c(attributes(x),list( id = id, session = session, group = group, sampRate = sampRate, filter = filter, ccf = "", s1Name = s1Name, s2Name = s2Name, uid = uid )) class(x) = c("MEA",class(x)) return(x) } MEAlist = function(listOfMea){ if(!is.list(listOfMea) || any(!sapply(listOfMea, is.MEA) ) ) stop("The supplied object must be a list containing only MEA objects", call.=F) if(length(listOfMea)==0) stop("The supplied list is empty",call.=F) class(listOfMea) = "MEAlist" attributes(listOfMea) = c(attributes(listOfMea), list( nId = length(unique(sapply(listOfMea, attr, "id"))), n = length(listOfMea), groups = unique(sapply(listOfMea, attr, "group")), sampRate = ifelse(length(unique(sapply(listOfMea, attr, "sampRate")))==1, unique(sapply(listOfMea, attr, "sampRate")), stop("Cannot construct a MEAlist with different sampling rates") ), filter = ifelse(length(unique(sapply(listOfMea, attr, "filter")))==1, unique(sapply(listOfMea, attr, "filter")), stop("Cannot construct a MEAlist with different filtering procedures") ), s1Name = ifelse(length(unique(sapply(listOfMea, attr, "s1Name")))==1, unique(sapply(listOfMea, attr, "s1Name")), stop("Cannot construct a MEAlist with multiple s1Name labels") ), s2Name = ifelse(length(unique(sapply(listOfMea, attr, "s2Name")))==1, unique(sapply(listOfMea, attr, "s2Name")), stop("Cannot construct a MEAlist with multiple s2Name labels") ), ccf = if(all( sapply(listOfMea, function(x) !is.null(x$ccf)) )) { list( filter = ifelse(length(unique(sapply(listOfMea, function(x) attr(x, "ccf")$filter )))==1, unique(sapply(listOfMea, function(x) attr(x, "ccf")$filter)), stop("Cannot construct a MEAlist with CCF computed with different settings") ), lag = ifelse(length(unique(sapply(listOfMea, function(x) attr(x, "ccf")$lag )))==1, unique(sapply(listOfMea, function(x) attr(x, "ccf")$lag )), stop("Cannot construct a MEAlist with CCF computed with different settings") ), win = ifelse(length(unique(sapply(listOfMea, function(x) attr(x, "ccf")$win )))==1, unique(sapply(listOfMea, function(x) attr(x, "ccf")$win )), stop("Cannot construct a MEAlist with CCF computed with different settings") ), inc = ifelse(length(unique(sapply(listOfMea, function(x) attr(x, "ccf")$inc )))==1, unique(sapply(listOfMea, function(x) attr(x, "ccf")$inc )), stop("Cannot construct a MEAlist with CCF computed with different settings") ) ) } else if(all( sapply(listOfMea, function(x) is.null(x$ccf)) )){ "" } else stop("Cannot construct a MEAlist when only a part of the MEA objects have ccf analyses") )) names(listOfMea) = sapply(listOfMea, attr, "uid") return(listOfMea) } is.MEA = function(x) inherits(x,"MEA") && length(x) hasCCF = function(x){ if(is.MEA(x)) !is.null(x$ccf) else if(is.list(x)) all(sapply(x, hasCCF)) else stop() } is.MEAlist = function(x) inherits(x,"MEAlist") && length(x) c.MEAlist = function(...){ dots = list(...) dots = lapply(dots, function(x){attributes(x)=NULL;x}) MEAlist(do.call("c",dots)) } "[.MEAlist" <- function(x,...,drop=FALSE){ y = NextMethod("[") MEAlist(y) } setGroup = function(mea,group){ UseMethod("setGroup", mea) } setGroup.MEA = function(mea,group){ attr(mea,"uid") = paste(group, attr(mea,"id"), attr(mea, "session"),sep ="_") attr(mea,"group") = group mea } setGroup.default = function(mea,group){ if(is.list(mea)){ newNames = vector("character",length(mea)) for(i in 1:length(mea)){ if(is.MEA(mea[[i]])) { mea[[i]] = setGroup(mea[[i]],group) newNames[i] = attr(mea[[i]],"uid") } else stop("An object of a class different from MEA was found in the list") } } else stop("unrecognized format") MEAlist(mea) } summary.MEAlist = function(object, ...){ filters = sapply(object, attr, "filter") if (length(unique(filters))>1) stop("Different processing pipeline found in MEAlist:\r\n",unique(filters),call. = F) s1_perc = sapply(object, function(x){length(x$MEA[[1]][x$MEA[[1]]>0])/nrow(x$MEA)}) s2_perc = sapply(object, function(x){length(x$MEA[[2]][x$MEA[[2]]>0])/nrow(x$MEA)}) noCCF = any(sapply(object, function(x){is.null(x$ccf)})) if(noCCF){ Q = data.frame( "dyad" = factor(sapply(object, function(mea){attr(mea, "id")})), "session" = factor(sapply(object, function(mea){attr(mea, "session")})), "group" = factor(sapply(object, function(mea){attr(mea, "group")})), "s1_perc" = round(s1_perc*100,1), "s2_perc" = round(s2_perc*100,1) ) names(Q)[names(Q) == 's1_perc'] = paste0(attr(object, "s1Name"),"_%") names(Q)[names(Q) == 's2_perc'] = paste0(attr(object, "s2Name"),"_%") cat("\r\nMEA analysis results:\r\n") print(Q) cat("\r\nData processing: ",attr(object, "filter")) } else { pace = sapply(object, function(mea){mean(unlist(mea$ccf[, 1:(attr(mea, "ccf")$lag*attr(mea, "sampRate"))]),na.rm=T)} ) zero = sapply(object, function(mea){mean(unlist(mea$ccf[, (attr(mea, "ccf")$lag*attr(mea, "sampRate")+1)]),na.rm=T)} ) lead = sapply(object, function(mea){mean(unlist(mea$ccf[, (attr(mea, "ccf")$lag*attr(mea, "sampRate")+2):(attr(mea, "ccf")$lag*attr(mea, "sampRate")*2+1) ]),na.rm=T)} ) pace0 = sapply(object, function(mea){mean(unlist(mea$ccf[, 1:(attr(mea, "ccf")$lag*attr(mea, "sampRate") +1)]),na.rm=T)} ) lead0 = sapply(object, function(mea){mean(unlist(mea$ccf[, (attr(mea, "ccf")$lag*attr(mea, "sampRate") +1):(attr(mea, "ccf")$lag*attr(mea, "sampRate")*2+1) ]),na.rm=T)} ) grandAver = unlist(lapply(object, function(mea) { mean(unlist(mea$ccf),na.rm = T)})) Q = data.frame( "dyad" = factor(sapply(object, function(mea){attr(mea, "id")})), "session" = factor(sapply(object, function(mea){attr(mea, "session")})), "group" = factor(sapply(object, function(mea){attr(mea, "group")})), "s1_perc" = round(s1_perc*100,1), "s2_perc" = round(s2_perc*100,1), "all_lags" = round(grandAver,4), "s1_lead" = round(lead,4), "s2_lead" = round(pace,4), "lag_zero" = round(zero,4) ) names(Q)[names(Q) == 's1_perc'] = paste0(attr(object, "s1Name"),"_%") names(Q)[names(Q) == 's2_perc'] = paste0(attr(object, "s2Name"),"_%") names(Q)[names(Q) == 's1_lead'] = paste0(attr(object, "s1Name"),"_lead") names(Q)[names(Q) == 's2_lead'] = paste0(attr(object, "s2Name"),"_lead") cat("\r\nMEA analysis results:\r\n") print(Q) cat("\r\nData processing: ",attr(object, "filter")) cat0("\r\nCCF settings:\r\nWindow = ",attr(object, "ccf")$win, " s | Increments = ",attr(object, "ccf")$inc," s | Lag = ",attr(object, "ccf")$lag, " s.") cat0("\r\n",attr(object, "ccf")$filter) } invisible(Q) }
RMGline <- function(y) { abline(h=y,col="gray",lwd=3) }
plot.equivalence_test <- function(x, ...) { insight::check_if_installed("see", "to plot results from equivalence-test") NextMethod() } plot.p_direction <- function(x, ...) { insight::check_if_installed("see", "to plot results from p_direction()") NextMethod() } plot.point_estimate <- function(x, ...) { insight::check_if_installed("see", "to plot point-estimates") NextMethod() } plot.map_estimate <- function(x, ...) { insight::check_if_installed("see", "to plot point-estimates") NextMethod() } plot.rope <- function(x, ...) { insight::check_if_installed("see", "to plot ROPE") NextMethod() } plot.bayestestR_hdi <- function(x, ...) { insight::check_if_installed("see", "to plot HDI") NextMethod() } plot.bayestestR_eti <- function(x, ...) { insight::check_if_installed("see", "to plot credible intervals") NextMethod() } plot.bayestestR_si <- function(x, ...) { insight::check_if_installed("see", "to plot support intervals") NextMethod() } plot.bayesfactor_parameters <- function(x, ...) { insight::check_if_installed("see", "to plot Savage-Dickey Bayes factor") NextMethod() } plot.bayesfactor_models <- function(x, ...) { insight::check_if_installed("see", "to plot models' Bayes factors") NextMethod() } plot.estimate_density <- function(x, ...) { insight::check_if_installed("see", "to plot densities") NextMethod() } plot.estimate_density_df <- function(x, ...) { insight::check_if_installed("see", "to plot models' densities") NextMethod() } plot.p_significance <- function(x, ...) { insight::check_if_installed("see", "to plot practical significance") NextMethod() } plot.describe_posterior <- function(x, stack = FALSE, ...) { insight::check_if_installed("see", "to plot posterior samples") insight::check_if_installed("ggplot2", "to plot posterior samples") model <- .retrieve_model(x) if (!is.null(model)) { plot(estimate_density(model), stack = stack, ...) + ggplot2::labs(title = "Posterior Samples", x = NULL, y = NULL) } else { warning(insight::format_message("Could not find model-object. Try ' plot(estimate_density(model))' instead."), call. = FALSE) } }
are_equivalent_xml_files <- function( filename_1, filename_2, section = NA ) { beautier::check_file_exists(filename_1, "filename_1") beautier::check_file_exists(filename_2, "filename_2") are_equivalent_xml_lines( readLines(filename_1), readLines(filename_2), section = section ) } are_equivalent_xml_lines <- function( lines_1, lines_2, section = NA, verbose = FALSE ) { if (beautier::is_one_na(section)) { return( beautier::are_equivalent_xml_lines_all( lines_1 = lines_1, lines_2 = lines_2, verbose = verbose ) ) } else { testit::assert(!beautier::is_one_na(section)) return( beautier::are_equivalent_xml_lines_section( lines_1 = lines_1, lines_2 = lines_2, section = section, verbose = verbose ) ) } } are_equivalent_xml_lines_all <- function( lines_1, lines_2, verbose = FALSE ) { lines_1 <- stringr::str_subset( string = lines_1, pattern = "^[:blank:]*$", negate = TRUE ) lines_2 <- stringr::str_subset( string = lines_2, pattern = "^[:blank:]*$", negate = TRUE ) if (length(lines_1) != length(lines_2)) { if (verbose) { message( "different lengths: ", length(lines_1), " vs ", length(lines_2) ) } return(FALSE) } for (line in lines_1) { if (!line %in% lines_2) { if (verbose) { message("line '", line, "' not found") } return(FALSE) } } TRUE } are_equivalent_xml_lines_section <- function( lines_1, lines_2, section, verbose = FALSE ) { assertive::assert_is_a_string(section) if (section == "operators") { return( beautier::are_equivalent_xml_lines_operators(lines_1, lines_2, verbose) ) } if (section == "loggers") { return( beautier::are_equivalent_xml_lines_loggers(lines_1, lines_2, verbose) ) } if (!beautier::has_xml_opening_tag(lines = lines_1, section = section)) { stop( "Opening tag for 'section' could not be found in 'lines_1', ", "'section' has value '", section, "'" ) } if (!beautier::has_xml_closing_tag(lines = lines_1, section = section)) { stop( "Closing tag for 'section' could not be found in 'lines_1', ", "'section' has value '", section, "'" ) } if (!beautier::has_xml_opening_tag(lines = lines_2, section = section)) { stop( "Opening tag for 'section' could not be found in 'lines_2', ", "'section' has value '", section, "'" ) } if (!beautier::has_xml_closing_tag(lines = lines_2, section = section)) { stop( "Closing tag for 'section' could not be found in 'lines_2', ", "'section' has value '", section, "'" ) } section_1 <- beautier::extract_xml_section_from_lines( lines = lines_1, section = section) section_2 <- beautier::extract_xml_section_from_lines( lines = lines_2, section = section) beautier::are_equivalent_xml_lines_all( section_1, section_2, verbose = verbose ) } are_equivalent_xml_lines_operators <- function( lines_1, lines_2, verbose = FALSE ) { section_1 <- beautier::extract_xml_operators_from_lines(lines_1) section_2 <- beautier::extract_xml_operators_from_lines(lines_2) beautier::are_equivalent_xml_lines_all( section_1, section_2, verbose = verbose ) } are_equivalent_xml_lines_loggers <- function( lines_1, lines_2, verbose = FALSE ) { section_1 <- beautier::extract_xml_loggers_from_lines(lines_1) section_2 <- beautier::extract_xml_loggers_from_lines(lines_2) beautier::are_equivalent_xml_lines_all( section_1, section_2, verbose = verbose ) }
CheckInputs <- function(Y, DM, W, Time, Starting, Hypers, Tuning, MCMC, Family, Distance, Weights, Rho, ScaleY, ScaleDM) { N <- length(Y) M <- length(DM) Nu <- length(Time) if (!Family %in% c("normal", "probit", "tobit")) stop('Family: must be one of "normal", "probit" or "tobit"') if (!Distance %in% c("euclidean", "circumference")) stop('Distance: must be one of "euclidean" or "circumference"') if (!Weights %in% c("continuous", "binary")) stop('Weights: must be one of "continuous" or "binary"') if (missing(Rho)) stop("Rho: missing") if (!is.scalar(Rho)) stop('Rho must be a scalar') if (is.na(Rho)) stop('Rho cannot be NA') if (!is.finite(Rho)) stop('Rho cannot be infinite') if (!((Rho < 1) & (Rho > 0))) stop('Rho must be in (0, 1)') if (missing(ScaleY)) stop("ScaleY: missing") if (!is.scalar(ScaleY)) stop('ScaleY must be a scalar') if (is.na(ScaleY)) stop('ScaleY cannot be NA') if (!is.finite(ScaleY)) stop('ScaleY cannot be infinite') if (!(ScaleY > 0)) stop('ScaleY must be positive') if (missing(ScaleDM)) stop("ScaleDM: missing") if (!is.scalar(ScaleDM)) stop('ScaleDM must be a scalar') if (is.na(ScaleDM)) stop('ScaleDM cannot be NA') if (!is.finite(ScaleDM)) stop('ScaleDM cannot be infinite') if (!(ScaleDM > 0)) stop('ScaleDM must be positive') if (!is.numeric(Y)) stop('Y must be a vector') if (length(Y) != N) stop(paste0('Y must have length ', N)) if (any(is.na(Y))) stop("Y may have no missing values") if (any(!is.finite(Y))) stop("Y must have strictly finite entries") if ((Family == "probit") & ((sum(Y == 1) + sum(Y == 0)) != N)) stop('Y: for "probit" observed data must be binary') if ((Family == "tobit") & (any(Y < 0))) stop('Y: for "tobit" observed data must be non-negative') if (!is.numeric(DM)) stop('DM must be a vector') if (length(DM) != M) stop(paste0('DM must have length ', M)) if (any(is.na(DM))) stop("DM may have no missing values") if (any(!is.finite(DM))) stop("DM must have strictly finite entries") if (!is.matrix(W)) stop('W must be a matrix') if (!dim(W)[1] == M) stop(paste0('W must be a ',M ,' x ', M, ' dimensional matrix')) if (!dim(W)[2] == M) stop('W must be square') if (sum(!((W) == t(W))) > 0) stop('W must be symmetric') if (length(table(W)) > 2) stop('W must only contain binaries (i.e. 0\'s or 1\'s)') if (any(diag(W) != 0)) stop('W must have only zeros on the diagonal') if (!is.numeric(Time)) stop('Time must be a vector') if (length(Time) != Nu) stop(paste0('Time must have length ', Nu)) if (any(is.na(Time))) stop("Time may have no missing values") if (any(!is.finite(Time))) stop("Time must have strictly finite entries") if (is.unsorted(Time)) stop('Time vector is not in increasing order') if (!all(Time >= 0)) stop('Time vector has at least one negative point') M_W <- dim(W)[1] if (M != M_W) stop('DM and W have contradictory dimensions') if ((N / M) != Nu) stop('Time, DM and Y have contradictory dimensions') if (!is.null(Hypers)) { if (!is.list(Hypers)) stop('Hypers must be a list') if (!all(names(Hypers) %in% c("Delta", "Sigma", "Alpha"))) stop('Hypers: Can only contain lists with names "Delta", "Sigma" and "Alpha"') if ("Delta" %in% names(Hypers)) { if (!is.list(Hypers$Delta)) stop('Hypers: "Delta" must be a list') if (!"Kappa2" %in% names(Hypers$Delta)) stop('Hypers: "Kappa2" value missing') if (!is.scalar(Hypers$Delta$Kappa2)) stop('Hypers: "Kappa2" must be a scalar') if (is.na(Hypers$Delta$Kappa2)) stop('Hypers: "Kappa2" cannot be NA') if (!is.finite(Hypers$Delta$Kappa2)) stop('Hypers: "Kappa2" cannot be infinite') if (Hypers$Delta$Kappa2 <= 0) stop('Hypers: "Kappa2" must be strictly positive') } if ("Sigma" %in% names(Hypers)) { if (!is.list(Hypers$Sigma)) stop('Hypers: "Sigma" must be a list') if (!"Xi" %in% names(Hypers$Sigma)) stop('Hypers: "Xi" value missing') if (!is.scalar(Hypers$Sigma$Xi)) stop('Hypers: "Xi" must be a scalar') if (is.na(Hypers$Sigma$Xi)) stop('Hypers: "Xi" cannot be NA') if (!is.finite(Hypers$Sigma$Xi)) stop('Hypers: "Xi" cannot be infinite') if (Hypers$Sigma$Xi < 3) stop('Hypers: "Xi" must be greater than or equal to 5') if (!"Psi" %in% names(Hypers$Sigma)) stop('Hypers: "Psi" value missing') if (!is.matrix(Hypers$Sigma$Psi)) stop('Hypers: "Psi" must be a matrix') if (!dim(Hypers$Sigma$Psi)[1] == 5) stop('Hypers: "Psi" must be 5 dimensional') if (!all(!is.na(Hypers$Sigma$Psi))) stop('Hypers: "Psi" cannot have missing values') if (!all(is.finite(Hypers$Sigma$Psi))) stop('Hypers: "Psi" cannot have infinite values') if (!dim(Hypers$Sigma$Psi)[2] == 5) stop('Hypers: "Psi" must be square') if (sum( !( (Hypers$Sigma$Psi) == t(Hypers$Sigma$Psi) ) ) > 0) stop('Hypers: "Psi" must be symmetric') if ((det(Hypers$Sigma$Psi) - 0) < 0.00001) stop('Hypers: "Psi" is close to singular') } if ("Alpha" %in% names(Hypers)) { if (!is.list(Hypers$Alpha)) stop('Hypers: "Alpha" must be a list') if (!"AAlpha" %in% names(Hypers$Alpha)) stop('Hypers: "AAlpha" value missing') if (!is.scalar(Hypers$Alpha$AAlpha)) stop('Hypers: "AAlpha" must be a scalar') if (is.na(Hypers$Alpha$AAlpha)) stop('Hypers: "AAlpha" cannot be NA') if (!is.finite(Hypers$Alpha$AAlpha)) stop('Hypers: "AAlpha" cannot be infinite') if (!"BAlpha" %in% names(Hypers$Alpha)) stop('Hypers: "BAlpha" value missing') if (!is.scalar(Hypers$Alpha$BAlpha)) stop('Hypers: "BAlpha" must be a scalar') if (is.na(Hypers$Alpha$BAlpha)) stop('Hypers: "BAlpha" cannot be NA') if (!is.finite(Hypers$Alpha$BAlpha)) stop('Hypers: "BAlpha" cannot be infinite') if (Hypers$Alpha$AAlpha < 0) stop('Hypers: "AAlpha" must be non-negative') if (Hypers$Alpha$BAlpha <= 0) stop('Hypers: "BAlpha" must be strictly positive') if (Hypers$Alpha$BAlpha < Hypers$Alpha$AAlpha) stop('Hypers: "BAlpha" must be greater than "AAlpha"') } } if (!is.null(Starting)) { if (!is.list(Starting)) stop('Starting must be a list') if (!all(names(Starting) %in% c("Delta", "Sigma", "Alpha"))) stop('Starting: Can only contain objects with names "Delta", "Sigma" and "Alpha"') if ("Delta" %in% names(Starting)) { if (!is.numeric(Starting$Delta)) stop('Starting: "Delta" must be a vector') if (length(Starting$Delta) != 5) stop('Starting: "Delta" must be length 5') if (!all(!is.na(Starting$Delta))) stop('Starting: "Delta" cannot have missing values') if (!all(is.finite(Starting$Delta))) stop('Starting: "Delta" cannot have infinite values') } if ("Sigma" %in% names(Starting)) { if (!is.matrix(Starting$Sigma)) stop('Starting: "Sigma" must be a matrix') if (!dim(Starting$Sigma)[1] == 5) stop('Starting: "Sigma" must be 5 dimensional') if (!dim(Starting$Sigma)[2] == 5) stop('Starting: "Sigma" must be square') if (!all(!is.na(Starting$Sigma))) stop('Starting: "Sigma" cannot have missing values') if (!all(is.finite(Starting$Sigma))) stop('Starting: "Sigma" cannot have infinite values') if (sum( !( (Starting$Sigma) == t(Starting$Sigma) ) ) > 0) stop('Starting: "Sigma" must be symmetric') if ((det(Starting$Sigma) - 0) < 0.0000000001) stop('Starting: "Sigma" is close to singular') } if ("Alpha" %in% names(Starting)) { if (!is.scalar(Starting$Alpha)) stop('Starting: "Alpha" must be a scalar') if (is.na(Starting$Alpha)) stop('Starting: "Alpha" cannot be NA') if (!is.finite(Starting$Alpha)) stop('Starting: "Alpha" cannot be infinite') } } if (!is.null(Tuning)) { if (!is.list(Tuning)) stop('Tuning must be a list') if (!all(names(Tuning) %in% c("Lambda0Vec", "Lambda1Vec", "EtaVec", "Alpha"))) stop('Tuning: Can only contain objects with names "Lambda0Vec", "Lambda1Vec", "EtaVec" or "Alpha"') if ("Lambda0Vec" %in% names(Tuning)) { if (!is.numeric(Tuning$Lambda0Vec)) stop('Tuning: "Lambda0Vec" must be a vector') if (length(Tuning$Lambda0Vec) != M) stop(paste0('Tuning: "Lambda0Vec" must have length ', M)) if (!all(!is.na(Tuning$Lambda0Vec))) stop('Tuning: "Lambda0Vec" cannot have missing values') if (!all(is.finite(Tuning$Lambda0Vec))) stop('Tuning: "Lambda0Vec" cannot have infinite values') if (any(Tuning$Lambda0Vec < 0)) stop('Tuning: "Lambda0Vec" must have non-negative components') } if ("Lambda1Vec" %in% names(Tuning)) { if (!is.numeric(Tuning$Lambda1Vec)) stop('Tuning: "Lambda1Vec" must be a vector') if (length(Tuning$Lambda1Vec) != M) stop(paste0('Tuning: "Lambda1Vec" must have length ', M)) if (!all(!is.na(Tuning$Lambda1Vec))) stop('Tuning: "Lambda1Vec" cannot have missing values') if (!all(is.finite(Tuning$Lambda1Vec))) stop('Tuning: "Lambda1Vec" cannot have infinite values') if (any(Tuning$Lambda1Vec < 0)) stop('Tuning: "Lambda1Vec" must have non-negative components') } if ("EtaVec" %in% names(Tuning)) { if (!is.numeric(Tuning$EtaVec)) stop('Tuning: "EtaVec" must be a vector') if (length(Tuning$EtaVec) != M) stop(paste0('Tuning: "EtaVec" must have length ', M)) if (!all(!is.na(Tuning$EtaVec))) stop('Tuning: "EtaVec" cannot have missing values') if (!all(is.finite(Tuning$EtaVec))) stop('Tuning: "EtaVec" cannot have infinite values') if (any(Tuning$EtaVec < 0)) stop('Tuning: "EtaVec" must have non-negative components') } if ("Alpha" %in% names(Tuning)) { if (!is.scalar(Tuning$Alpha)) stop('Tuning: "Alpha" must be a scalar') if (is.na(Tuning$Alpha)) stop('Tuning: "Alpha" cannot be NA') if (!is.finite(Tuning$Alpha)) stop('Tuning: "Alpha" cannot be infinite') if (Tuning$Alpha < 0) stop('Tuning: "Alpha" must be non-negative') } } if (!is.null(MCMC)) { if (!is.list(MCMC)) stop('MCMC must be a list') if (!all(names(MCMC) %in% c("NBurn", "NSims", "NThin", "NPilot"))) stop('MCMC: Can only contain objects with names "NBurn", "NSims", "NThin" and "NPilot"') if ("NBurn" %in% names(MCMC)) { if (!is.scalar(MCMC$NBurn)) stop('MCMC: "NBurn" must be a scalar') if (is.na(MCMC$NBurn)) stop('MCMC: "NBurn" cannot be NA') if (!is.finite(MCMC$NBurn)) stop('MCMC: "NBurn" cannot be infinite') if (!is.wholenumber(MCMC$NBurn) | MCMC$NBurn < 0) stop('MCMC: "NBurn" must be a non-negative integer') if (MCMC$NBurn < 100) stop('MCMC: "NBurn" must be at least 100') } if ("NSims" %in% names(MCMC)) { if (!is.scalar(MCMC$NSims)) stop('MCMC: "NSims" must be a scalar') if (is.na(MCMC$NSims)) stop('MCMC: "NSims" cannot be NA') if (!is.finite(MCMC$NSims)) stop('MCMC: "NSims" cannot be infinite') if (!is.wholenumber(MCMC$NSims) | MCMC$NSims <= 0) stop('MCMC: "NSims" must be a positive integer') if (MCMC$NSims < 100) stop('MCMC: "NSims" must be at least 100') } if ("NThin" %in% names(MCMC)) { if (!is.scalar(MCMC$NThin)) stop('MCMC: "NThin" must be a scalar') if (is.na(MCMC$NThin)) stop('MCMC: "NThin" cannot be NA') if (!is.finite(MCMC$NThin)) stop('MCMC: "NThin" cannot be infinite') if (!is.wholenumber(MCMC$NThin) | MCMC$NThin <= 0) stop('MCMC: "NThin" must be a positive integer') } if ("NPilot" %in% names(MCMC)) { if (!is.scalar(MCMC$NPilot)) stop('MCMC: "NPilot" must be a scalar') if (is.na(MCMC$NPilot)) stop('MCMC: "NPilot" cannot be NA') if (!is.finite(MCMC$NPilot)) stop('MCMC: "NPilot" cannot be infinite') if (!is.wholenumber(MCMC$NPilot) | MCMC$NPilot < 0) stop('MCMC: "NPilot" must be a positive integer') } } } is.scalar <- function(x) ((is.numeric(x)) & (length(x) == 1)) is.wholenumber <- function(x, tol = .Machine$double.eps^0.5) abs(x - round(x)) < tol
knitr::opts_chunk$set( collapse = TRUE, comment = " ) knitr::include_graphics("figures/fig2.png") knitr::include_graphics("figures/fig3.png") knitr::include_graphics("figures/fig4.png")
extendMPT <- function(fittedModel, n.iter = 10000, n.adapt = 1000, n.burnin = 0, ...){ args <- list(...) if ("n.thin" %in% names(args)) warning("Thinnning interval cannot be changed and is ignored!") args$n.thin <- args$burnin <- args$adapt <- args$sample <- args$summarise <- args$model <- NULL sel.cor <- grep("cor_", varnames(fittedModel$runjags$mcmc), fixed=TRUE) if (inherits(fittedModel, "betaMPT")) sel.cor <- c(sel.cor, grep("rho", varnames(fittedModel$runjags$mcmc), fixed=TRUE)) if (length(sel.cor)>0) fittedModel$runjags$mcmc <- fittedModel$runjags$mcmc[,- sel.cor] args_extend <- c(list(runjags.object = fittedModel$runjags, burnin = n.burnin, sample = ceiling((n.iter-n.burnin)/fittedModel$runjags$thin), adapt = n.adapt, summarise = FALSE), args) fittedModel$runjags <- do.call("extend.jags", args_extend) covData <- fittedModel$mptInfo$covData predTable <- fittedModel$mptInfo$predTable if (!is.null(covData) | fittedModel$mptInfo$model == "betaMPT"){ if (!is.null(predTable) & fittedModel$mptInfo$model == "traitMPT"){ isPred <- (1:ncol(covData)) %in% predTable$covIdx } else { isPred <- rep(FALSE, length(fittedModel$mptInfo$predType)) } sel <- fittedModel$mptInfo$predType == "c" & !isPred if (any(sel) || inherits(fittedModel, "betaMPT")){ cdat <- covData[,sel,drop = FALSE] fittedModel$runjags$mcmc <- as.mcmc.list( lapply(fittedModel$runjags$mcmc, corSamples, covData=cdat, thetaUnique=fittedModel$mptInfo$thetaUnique, rho = ifelse(inherits(fittedModel, "betaMPT"), TRUE, FALSE), corProbit = fittedModel$mptInfo$corProbit)) } } fittedModel$mcmc.summ <- summarizeMCMC(fittedModel$runjags$mcmc, batchSize = 10) fittedModel$summary <- summarizeMPT(mcmc = fittedModel$runjags$mcmc, summ = fittedModel$mcmc.summ, mptInfo = fittedModel$mptInfo) fittedModel$call <- c(fittedModel$call, match.call()) fittedModel }
Cens.SMN.PCR = function(x, y, c, cens="left", tt, nu=NULL, error=10^-6, iter.max=200, type = "Normal", alpha.FIX = TRUE, nu.FIX = TRUE, alpha.in = 10^-3, k = 1, Diagnostic = TRUE, a = 2) { namesx = ('x1 ') if(ncol(as.matrix(x))>1) { for(i in 2:ncol(as.matrix(x))){namesx = cbind(namesx, paste("x",i," ",sep=""))} } if(ncol(as.matrix(y)) > 1) stop("Only univariate partially regression models, argument must be one-dimensional.") if(ncol(as.matrix(tt)) > 1) stop("Only univariate partially regression models, argument must be one-dimensional.") if(ncol(as.matrix(c)) > 1) stop("Only univariate partially regression models, argument must be one-dimensional.") if( length(y) != nrow(as.matrix(x)) ) stop("The number of rows in the matrix X it must be the same than Y.") if( length(c) != nrow(as.matrix(x)) ) stop("The number of rows in the matrix X it must be the same than c.") if( length(tt) != nrow(as.matrix(x)) ) stop("The number of rows in the matrix X it must be the same than tt.") if( (length(x) == 0) | (length(y) == 0) ) stop("All parameters must be provided.") if(sum(is.na(y)==TRUE) > 0) stop("NA's values in y") if(sum(is.na(tt)==TRUE) > 0) stop("NA's values in tt") if(sum(is.na(x)==TRUE) > 0) stop("NA's values in X") if( (type != "Normal") && (type != "T") && (type != "Slash") && (type != "NormalC")) stop("Family not recognized. Please check documentation.") if( (cens != "left") && (cens != "right")) stop("Censoring not recognized: 'left' for left censoring and 'right' for right censoring.") if(cens=="left"){cens = 1}else{cens = 2} if(type == "Normal" && !is.null(nu)){warning("Nu parameter not considered for normal case.",immediate. = TRUE)} if( type == "T") { if(length(nu) > 1 | nu <= 0) stop("For the Student's-t distribution, nu parameter must be a positive scalar") if(is.null(nu)) stop("nu parameter must be provided.") } if( type == "Slash") { if(length(nu) > 1 | nu <= 0) stop("For the Slash distribution, nu parameter must be a positive scalar") if(is.null(nu)) stop("nu parameter must be provided.") } if(type == "NormalC") { warning("When nu parameters are close to the bounds, i.e., 0 or 1, computational problems could arrise.",immediate. = TRUE) if(length(nu) != 2) stop("For the Contaminated Normal distribution, nu must be a bidimensional vector.") if(nu[1] <=0 | nu[1] >= 1) stop("nu1 must belong to the interval (0,1)") if(nu[2] <=0 | nu[2] >= 1) stop("nu2 must belong to the interval (0,1)") } if(alpha.in <= 0) stop("alpha parameter must be positive.") if(!any(k == seq(1,ncol(as.matrix(x))))) stop("k must be positive integer <= ncol(x).") if(a <= 0) stop("a must be positive constant.") if(iter.max <= 0 | iter.max%%1 != 0) stop("iter.max must be a positive integer.") if(error <=0 | error > 1) stop("error must belong to the interval (0,1]") if(!is.logical(alpha.FIX) | !is.logical(nu.FIX) | !is.logical(Diagnostic)) stop("Parameters lambda.FIX, nu.FIX and Diagnostic must be logical (TRUE/FALSE) variables.") out.EM = EMSpline.censFinal_MobsSMN(x, y, c, cens, tt, nu, error, iter.max, type, delta.in=NA, alpha.FIX, nu.FIX, alpha.in, k) betas = round(out.EM$beta, 4) sigma2 = round(out.EM$sigma2, 4) MI_obs = sqrt(diag(solve(out.EM$MI))) SEbeta = MI_obs[1:length(betas)] SEsigma2 = MI_obs[length(betas)+length(out.EM$ff)+1] SE = round(c(SEbeta,SEsigma2), 4) Estimates = cbind(rbind(betas,sigma2),SE) namesEstimates = colnames(x) colx = ncol(as.matrix(x)) greeks = c(alpha='\u03b1', sigma='\u03c3\u00B2', nu='\u03BD') if(length(namesEstimates)==0) namesEstimates = namesx[1:colx] dimnames(Estimates) = list(c(namesEstimates,paste0(greeks['sigma'])),c("Estimates", "SE")) if( (type=="T") || (type=="Slash")) { param1 = matrix(round(out.EM$nu, 5),ncol=1,nrow=1) dimnames(param1) = list(c(paste0(greeks['nu'])),"") } if( type=="NormalC") { param2 = matrix(round(out.EM$nu, 5),ncol=1,nrow=2) dimnames(param2) = list(c(paste0(greeks['nu'],"1"), paste0(greeks['nu'],"2")),"") } if( type=="Normal") { alpha = t(as.matrix(out.EM$Alpha)) row.names(alpha) = paste0(greeks['alpha']) colnames(alpha) = " " } if( (type=="T") || (type=="Slash")) { alpha1 = matrix(out.EM$Alpha,ncol=1,nrow=1) dimnames(alpha1) = list(c(paste0(greeks['alpha'])),"") } if( type=="NormalC") { alpha2 = matrix(out.EM$Alpha,ncol=1,nrow=1) dimnames(alpha2) = list(c(paste0(greeks['alpha'])),"") } cat('\n') cat('---------------------------------------------------------------------\n') cat(' Partially linear censored regression models with SMN errors \n') cat('---------------------------------------------------------------------\n') print(Estimates) if(type!="Normal") { if(type=="T"|type=="Slash") { print(param1) } else { print(param2) } } if ( type=="Normal") { print(alpha) } else if ( type=="T"|type=="Slash") { print(alpha1) } else print(alpha2) cat('--------------------------------------------------------------\n') cat('\r \n') criteriaPCR = c(out.EM$loglik, out.EM$AIC) criteriaFin = round(t(as.matrix(criteriaPCR)),digits=3) dimnames(criteriaFin) = list(c("Value"),c("Loglik", "AIC")) cat('\n') cat('Model selection criteria\n') cat('------------------------------------\n') print(criteriaFin) cat('------------------------------------\n') cat('\r \n') if(Diagnostic=="TRUE") { CaseDeletion = plot(out.EM$D$GD1, las=1, ylab=expression(paste("GD"[i]^{1}))) dev.new() par(mfrow=c(2,2), mar=c(4,4.5,1.5,1.5) + 0.1) WeightScheme = plot(out.EM$D$Curvature_W, las=1, ylab="Case weight, M(0)") abline(h=mean(out.EM$D$Curvature_W)+a*sd(out.EM$D$Curvature_W), col="red", lty=2) ScaleScheme = plot(out.EM$D$Curvature_S, las=1, ylab="Case scale, M(0)") abline(h=mean(out.EM$D$Curvature_S)+a*sd(out.EM$D$Curvature_S), col="red", lty=2) ExplanatoryScheme = plot(out.EM$D$Curvature_E, las=1, ylab="Case explanatory, M(0)") abline(h=mean(out.EM$D$Curvature_E)+a*sd(out.EM$D$Curvature_E), col="red", lty=2) ResponseScheme = plot(out.EM$D$Curvature_R, las=1, ylab="Case response, M(0)") abline(h=mean(out.EM$D$Curvature_R)+a*sd(out.EM$D$Curvature_R), col="red", lty=2) } out.EM }
GeneticFuzzyAprioriDC_A <- function(dat, seed=1286082570,NumberofEvaluations=10000,PopulationSize=50,ProbabilityofMutation=0.01,ProbabilityofCrossover=0.8,ParameterdforMMACrossover=0.35,NumberofFuzzyRegionsforNumericAttributes=3,UseMaxOperatorfor1FrequentItemsets="false",MinimumSupport=0.1,MinimumConfidence=0.8){ alg <- RKEEL::R6_GeneticFuzzyAprioriDC_A$new() alg$setParameters(dat,seed,NumberofEvaluations,PopulationSize,ProbabilityofMutation,ProbabilityofCrossover,ParameterdforMMACrossover,NumberofFuzzyRegionsforNumericAttributes,UseMaxOperatorfor1FrequentItemsets,MinimumSupport,MinimumConfidence) return (alg) } R6_GeneticFuzzyAprioriDC_A <- R6::R6Class("R6_GeneticFuzzyAprioriDC_A", inherit = AssociationRulesAlgorithm, public = list( seed=1286082570, NumberofEvaluations=10000, PopulationSize=50, ProbabilityofMutation=0.01, ProbabilityofCrossover=0.8, ParameterdforMMACrossover=0.35, NumberofFuzzyRegionsforNumericAttributes=3, UseMaxOperatorfor1FrequentItemsets="false", MinimumSupport=0.1, MinimumConfidence=0.8, setParameters = function(dat, seed=1286082570,NumberofEvaluations=10000,PopulationSize=50,ProbabilityofMutation=0.01,ProbabilityofCrossover=0.8,ParameterdforMMACrossover=0.35,NumberofFuzzyRegionsforNumericAttributes=3,UseMaxOperatorfor1FrequentItemsets="false",MinimumSupport=0.1,MinimumConfidence=0.8){ super$setParameters(dat) self$seed <- seed self$NumberofEvaluations <- NumberofEvaluations self$PopulationSize <- PopulationSize self$ProbabilityofMutation <- ProbabilityofMutation self$ProbabilityofCrossover <- ProbabilityofCrossover self$ParameterdforMMACrossover <- ParameterdforMMACrossover self$NumberofFuzzyRegionsforNumericAttributes <- NumberofFuzzyRegionsforNumericAttributes self$UseMaxOperatorfor1FrequentItemsets <- UseMaxOperatorfor1FrequentItemsets self$MinimumSupport <- MinimumSupport self$MinimumConfidence <- MinimumConfidence } ), private = list( jarName = "GeneticFuzzyAprioriDC.jar", algorithmName = "GeneticFuzzyAprioriDC_A", algorithmString = "GeneticFuzzyAprioriDC_A", algorithmOutputNumTxt = 2, getParametersText = function(){ text <- "" text <- paste0(text, "seed = ", self$seed, "\n") text <- paste0(text, "Number of Evaluations = ", self$NumberofEvaluations, "\n") text <- paste0(text, "Population Size = ", self$PopulationSize, "\n") text <- paste0(text, "Probability of Mutation = ", self$ProbabilityofMutation, "\n") text <- paste0(text, "Probability of Crossover = ", self$ProbabilityofCrossover, "\n") text <- paste0(text, "Parameter d for MMA Crossover = ", self$ParameterdforMMACrossover, "\n") text <- paste0(text, "Number of Fuzzy Regions for Numeric Attributes = ", self$NumberofFuzzyRegionsforNumericAttributes, "\n") text <- paste0(text, "Use Max Operator for 1-Frequent Itemsets = ", self$UseMaxOperatorfor1FrequentItemsets, "\n") text <- paste0(text, "Minimum Support = ", self$MinimumSupport, "\n") text <- paste0(text, "Minimum Confidence = ", self$MinimumConfidence, "\n") return(text) } ) )
context("Serialisation") reportr::setOutputLevel(Warning) options(reportrStderrLevel=reportr::OL$Fatal) test_that("images can be serialised and deserialised", { path <- system.file("extdata", "nifti", "maskedb0.nii.gz", package="tractor.base") image <- readImageFile(path) file <- tempfile() image$serialise(file) raw <- deserialiseReferenceObject(file, raw=TRUE) object <- deserialiseReferenceObject(file) expect_equal(serialiseReferenceObject(image), raw) expect_true(isDeserialisable(raw)) expect_type(raw, "list") expect_false(is.nilObject(object)) expect_s4_class(object, "MriImage") })
rpartXse <- function(form,data,se=1,cp=0,minsplit=6,verbose=F,...) { tree <- rpart::rpart(form,data,cp=cp,minsplit=minsplit,...) if (verbose && ncol(tree$cptable) < 5) warning("No pruning will be carried out because no estimates were obtained.") rt.prune(tree,se,verbose) } rt.prune <- function(tree,se=1,verbose=T,...) { if (ncol(tree$cptable) < 5) tree else { lin.min.err <- which.min(tree$cptable[,4]) if (verbose && lin.min.err == nrow(tree$cptable)) warning("Minimal Cross Validation Error is obtained at the largest tree.\n Further tree growth (achievable through smaller 'cp' parameter value),\n could produce more accurate tree.\n") tol.err <- tree$cptable[lin.min.err,4] + se * tree$cptable[lin.min.err,5] se.lin <- which(tree$cptable[,4] <= tol.err)[1] rpart::prune.rpart(tree,cp=tree$cptable[se.lin,1]+1e-9) } }
check_factorstructure <- function(x, ...) { kmo <- check_kmo(x, ...) sphericity <- check_sphericity_bartlett(x, ...) text <- paste0(" - KMO: ", attributes(kmo)$text, "\n - Sphericity: ", attributes(sphericity)$text) if (attributes(kmo)$color == "red" | attributes(sphericity)$color == "red") { color <- "red" } else { color <- "green" } out <- list(KMO = kmo, sphericity = sphericity) attr(out, "text") <- text attr(out, "color") <- color attr(out, "title") <- "Is the data suitable for Factor Analysis?" class(out) <- c("easystats_check", class(out)) out } check_kmo <- function(x, ...) { cormatrix <- stats::cor(x, use = "pairwise.complete.obs", ...) Q <- solve(cormatrix) Q <- stats::cov2cor(Q) diag(Q) <- 0 diag(cormatrix) <- 0 sumQ2 <- sum(Q^2) sumr2 <- sum(cormatrix^2) MSA <- sumr2 / (sumr2 + sumQ2) MSA_variable <- colSums(cormatrix^2) / (colSums(cormatrix^2) + colSums(Q^2)) out <- list(MSA = MSA, MSA_variable = MSA_variable) if (MSA < 0.5) { text <- sprintf("The Kaiser, Meyer, Olkin (KMO) measure of sampling adequacy suggests that factor analysis is likely to be inappropriate (KMO = %.2f).", MSA) color <- "red" } else { text <- sprintf("The Kaiser, Meyer, Olkin (KMO) measure of sampling adequacy suggests that data seems appropriate for factor analysis (KMO = %.2f).", MSA) color <- "green" } attr(out, "text") <- text attr(out, "color") <- color attr(out, "title") <- "KMO Measure of Sampling Adequacy" class(out) <- c("easystats_check", class(out)) out } check_sphericity_bartlett <- function(x, ...) { cormatrix <- stats::cor(x, use = "pairwise.complete.obs", ...) n <- nrow(x) p <- dim(cormatrix)[2] detR <- det(cormatrix) statistic <- -log(detR) * (n - 1 - (2 * p + 5) / 6) df <- p * (p - 1) / 2 pval <- stats::pchisq(statistic, df, lower.tail = FALSE) out <- list(chisq = statistic, p = pval, dof = df) if (pval < 0.001) { text <- sprintf("Bartlett's test of sphericity suggests that there is sufficient significant correlation in the data for factor analysis (Chisq(%i) = %.2f, %s).", df, statistic, insight::format_p(pval)) color <- "green" } else { text <- sprintf("Bartlett's test of sphericity suggests that there is not enough significant correlation in the data for factor analysis (Chisq(%i) = %.2f, %s).", df, statistic, insight::format_p(pval)) color <- "red" } attr(out, "text") <- text attr(out, "color") <- color attr(out, "title") <- "Test of Sphericity" class(out) <- c("easystats_check", class(out)) out }
exb_to_df <- function(file_name) { l <- xml2::read_xml(file_name) t <- xml2::xml_find_all(l, "basic-body/tier") tier_names <- xml2::xml_attr(t, "display-name") tier_types <- xml2::xml_attr(t, "type") tier_categories <- xml2::xml_attr(t, "category") tier_speakers <- xml2::xml_attr(t, "speaker") r <- lapply(seq_along(t), function(i) { content <- xml2::xml_text(xml2::xml_find_all(t[[i]], "event")) ts1 <- xml2::xml_attr(xml2::xml_children(t[[i]]), "start") ts2 <- xml2::xml_attr(xml2::xml_children(t[[i]]), "end") if (length(content) == 0) { content <- "" } if (length(ts1) == 0) { ts1 <- "" } if (length(ts2) == 0) { ts2 <- "" } data.frame( tier = i, id = seq_along(content), content = content, tier_name = tier_names[i], tier_type = tier_types[i], tier_category = tier_categories[i], tier_speaker = tier_speakers[i], ts_start = ts1, ts_end = ts2, stringsAsFactors = FALSE ) }) r <- do.call(rbind, r) tli <- xml2::xml_find_all(l, "basic-body/common-timeline/tli") ts <- data.frame( ts_id = xml2::xml_attr(tli, "id"), time_value = xml2::xml_attr(tli, "time"), stringsAsFactors = FALSE ) r <- merge(r, ts, by.x = "ts_start", by.y = "ts_id") names(r)[names(r) == "time_value"] <- "time_start" r <- merge(r, ts, by.x = "ts_end", by.y = "ts_id") names(r)[names(r) == "time_value"] <- "time_end" r <- r[order(r$tier, r$id), -c(1:2)] r$time_start <- as.double(r$time_start) r$time_end <- as.double(r$time_end) r$source <- basename(file_name) return(r) }
comment_width <- function(width = "option", a4portrait_width = 80){ valid_width_arguments <- c("option", "script_width", "a4portrait", "a4landscape", "a3portrait", "a3landscape") if ( !(is.numeric(width) | width %in% valid_width_arguments)){ stop("Invalid width argument! See ?comment_width for help!") } if (width == "option" & !is.null(getOption("comment_width"))){ width <- getOption("comment_width") } else if (width == "option" & is.null(getOption("comment_width"))){ width <- "a4portrait" } a4portrait <- a4portrait_width golden_ratio <- 8 / 5 text2num <- function(width_text){ if (is.character(width_text)){ switch(width_text, "script_width" = getOption("width") - 5, "a4portrait" = a4portrait, "a4landscape" = a4portrait * golden_ratio, "a3portrait" = a4portrait * golden_ratio, "a3landscape" = 2 * a4portrait) } else{ width_text } } if (width %in% valid_width_arguments){ width <- text2num(width) } min(width, text2num("script_width")) }
defer <- function(expr, envir = parent.frame()) { handler <- renv_exit_handlers_add( list(expr = substitute(expr), envir = parent.frame()), envir = envir ) invisible(handler) }
xgb.opt.depth <- function(initial = 8, min_depth = 1, max_depth = 25, patience = 2, sd_effect = 0.001, worst_score = 0, learner = NA, better = max_better) { Laurae.xgb.opt.depth.df = Laurae.xgb.opt.depth.iter = Laurae.xgb.opt.depth.best = NULL Laurae.xgb.opt.depth.df <<- data.frame(depth = 1:max_depth, nrounds = rep(NA, max_depth), mean = rep(NA, max_depth), sd = rep(NA, max_depth), score = rep(NA, max_depth)) Laurae.xgb.opt.depth.iter <<- data.frame(Iteration = 1:3, Depth = c(initial, initial - 2, initial + 2), Score = rep(worst_score, 3), Best = rep(worst_score, 3)) for (i in 1:3) { xgb.opt.depth.callback(i, learner, better, sd_effect) } if (Laurae.xgb.opt.depth.iter[1, "Score"] == better(Laurae.xgb.opt.depth.df[, "score"])) { Laurae.xgb.opt.depth.iter[4, ] <<- c(4, initial - 1, worst_score, worst_score) Laurae.xgb.opt.depth.iter[5, ] <<- c(5, initial + 1, worst_score, worst_score) for (i in 4:5) { xgb.opt.depth.callback(i, learner, better, sd_effect) } cat("\n") cat("Best depth found was: ", Laurae.xgb.opt.depth.best, ".\n", sep = "") return() } else if (Laurae.xgb.opt.depth.iter[2, "Score"] == better(Laurae.xgb.opt.depth.df[, "score"])) { Laurae.xgb.opt.depth.iter[4, ] <<- c(4, initial - 1, worst_score, worst_score) xgb.opt.depth.callback(4, learner, better, sd_effect) if (Laurae.xgb.opt.depth.iter[4, "Best"] == Laurae.xgb.opt.depth.iter[4, "Score"]) { cat("\n") cat("Best depth found was: ", Laurae.xgb.opt.depth.best, ".\n", sep = "") return() } j <- 0 for (i in 5:99999) { Laurae.xgb.opt.depth.iter[i, ] <<- c(i, initial - (i - 2), worst_score, worst_score) xgb.opt.depth.callback(i, learner, better, sd_effect) if ((Laurae.xgb.opt.depth.best == (initial - (i - 2))) & (Laurae.xgb.opt.depth.iter[i, "Depth"] > min_depth)) { j <- 0 } else { j <- j + 1 if ((j == patience) | (Laurae.xgb.opt.depth.iter[i, "Depth"] == min_depth)) { cat("\n") cat("Best depth found was: ", Laurae.xgb.opt.depth.best, ".\n", sep = "") return() } } } } else { Laurae.xgb.opt.depth.iter[4, ] <<- c(4, initial + 1, worst_score, worst_score) xgb.opt.depth.callback(4, learner, better, sd_effect) if (Laurae.xgb.opt.depth.iter[4, "Best"] == Laurae.xgb.opt.depth.iter[4, "Score"]) { cat("\n") cat("Best depth found was: ", Laurae.xgb.opt.depth.best, ".\n", sep = "") return() } j <- 0 for (i in 5:99999) { Laurae.xgb.opt.depth.iter[i, ] <<- c(i, initial + (i - 2), worst_score, worst_score) xgb.opt.depth.callback(i, learner, better, sd_effect) if ((Laurae.xgb.opt.depth.best == (initial + (i - 2))) & (Laurae.xgb.opt.depth.iter[i, "Depth"] < max_depth)) { j <- 0 } else { j <- j + 1 if ((j == patience) | (Laurae.xgb.opt.depth.iter[i, "Depth"] == max_depth)) { cat("\n") cat("Best depth found was: ", Laurae.xgb.opt.depth.best, ".\n", sep = "") return() } } } } }
gg_partial_coplot.rfsrc <- function(object, xvar, groups, surv_type=c("mort", "rel.freq", "surv", "years.lost", "cif", "chf"), time, ...){ if (inherits(object, "rfsrc") == FALSE){ stop(paste("This function only works for Forests grown with the", "randomForestSRC package.")) } if (is.null(object$forest)) { stop(paste("The function requires the \"forest = TRUE\"", "attribute when growing the randomForest")) } arg_list <- list(...) surv_type <- match.arg(surv_type) dta.train <- object$xvar if(missing(groups)){ if(is.null(arg_list$subset)) stop(paste("partial_coplot requires a groups argument to", "stratify the partial plots.")) else{ } } dta.train$group <- groups[1:nrow(dta.train)] lvl <- levels(groups) lng <- length(lvl) sbst <- parallel::mclapply(1:lng, function(ind){ st <- which(dta.train$group == levels(groups)[ind]) if(length(st) == 0) NULL else st }) for (ind in lng:1){ if(is.null(sbst[[ind]])){ sbst[[ind]] <- NULL lvl <- lvl[-ind] } } pdat.partlist <- lapply(1:length(sbst), function(ind){ randomForestSRC::plot.variable(object, surv.type=surv_type, time=time, subset = sbst[[ind]], xvar.names=xvar, partial=TRUE) }) gg_part <- parallel::mclapply(pdat.partlist, gg_partial) for(ind in 1:length(gg_part)){ gg_part[[ind]]$group <- lvl[ind] } gg_merge <- do.call(rbind, gg_part) gg_merge$group <- factor(gg_merge$group, levels=unique(gg_merge$group)) class(gg_merge) <- c("gg_partial_coplot", class(gg_merge)) gg_merge } gg_partial_coplot.default <- gg_partial_coplot.rfsrc gg_partial_coplot <- function (object, xvar, groups, surv_type=c("mort", "rel.freq", "surv", "years.lost", "cif", "chf"), time, ...) { UseMethod("gg_partial_coplot", object) } gg_partial_coplot.randomForest <- function(object, xvar, groups, surv_type=NULL, time = NULL, ...){ stop("gg_partial_coplot is not yet support for randomForest objects") }
population_transitions <- function(populations, demographic_stochasticity, fecundity_matrix, fecundity_max, survival_matrix) { stages <- nrow(fecundity_matrix) multiple_survival_columns <- which(.colSums(+(survival_matrix > 0), m = stages, n = stages) > 1) abundance_array_indices <- array(1:(stages*populations), c(stages, populations))[, rep(1:populations, each = stages)] t_array_indices <- aperm(array(1:(stages*stages*populations), c(stages, stages, populations)), c(2, 1, 3)) t_fecundity_indices <- matrix(which(array(t(fecundity_matrix), c(stages, stages, populations)) > 0), ncol = populations) fecundity_t_indices <- matrix(t_array_indices[t_fecundity_indices], ncol = populations) abundance_fecundity_indices <- matrix(abundance_array_indices[t_fecundity_indices], ncol = populations) t_survival_indices <- matrix(which(array(t(survival_matrix), c(stages, stages, populations)) > 0), ncol = populations) survival_t_indices <- matrix(t_array_indices[t_survival_indices], ncol = populations) abundance_survival_indices <- matrix(abundance_array_indices[t_survival_indices], ncol = populations) abundance_array_indices <- NULL t_array_indices <- NULL if (demographic_stochasticity && is.numeric(fecundity_max)) { fecundity_lookup <- data.frame(original = NA, shifted = seq(0.00, fecundity_max, 0.01)) for (i in 1:nrow(fecundity_lookup)) { fecundity_lookup$original[i] <- mean(pmin(stats::rpois(100000, fecundity_lookup$shifted[i]), fecundity_max)) } } calculate <- function(fecundity_array, survival_array, stage_abundance, occupied_indices) { occupied_populations <- length(occupied_indices) generated_newborns <- array(0, c(stages, stages, populations)) occupied_t_fecundity_indices <- as.vector(t_fecundity_indices[, occupied_indices]) if (demographic_stochasticity) { if (is.numeric(fecundity_max)) { original_fecundity <- fecundity_array[as.vector(fecundity_t_indices[, occupied_indices])] shifted_fecundity <- array(0, length(original_fecundity)) for (i in 1:length(original_fecundity)) { if (is.finite(original_fecundity[i])) { shifted_fecundity[i] <- fecundity_lookup$shifted[which.min(abs(fecundity_lookup$original - original_fecundity[i]))] } } selected_abundance <- stage_abundance[as.vector(abundance_fecundity_indices[, occupied_indices])] generated_newborns[occupied_t_fecundity_indices] <- apply(matrix(1:length(shifted_fecundity)), 1, function(i) sum(pmin(stats::rpois(selected_abundance[i], shifted_fecundity[i]), fecundity_max))) } else { generated_newborns[occupied_t_fecundity_indices] <- stats::rpois(length(occupied_t_fecundity_indices), stage_abundance[as.vector(abundance_fecundity_indices[, occupied_indices])]* fecundity_array[as.vector(fecundity_t_indices[, occupied_indices])]) } } else { if (is.numeric(fecundity_max)) { selected_abundance <- stage_abundance[as.vector(abundance_fecundity_indices[, occupied_indices])] generated_newborns[occupied_t_fecundity_indices] <- pmin(round(selected_abundance*fecundity_array[as.vector(fecundity_t_indices[, occupied_indices])]), trunc(selected_abundance*fecundity_max)) } else { generated_newborns[occupied_t_fecundity_indices] <- round(stage_abundance[as.vector(abundance_fecundity_indices[, occupied_indices])]* fecundity_array[as.vector(fecundity_t_indices[, occupied_indices])]) } } generated_survivals <- array(0, c(stages, stages, populations)) occupied_t_survival_indices <- as.vector(t_survival_indices[, occupied_indices]) if (demographic_stochasticity) { generated_survivals[occupied_t_survival_indices] <- stats::rbinom(length(occupied_t_survival_indices), stage_abundance[as.vector(abundance_survival_indices[, occupied_indices])], survival_array[as.vector(survival_t_indices[, occupied_indices])]) } else { generated_survivals[occupied_t_survival_indices] <- round(stage_abundance[as.vector(abundance_survival_indices[, occupied_indices])]* survival_array[as.vector(survival_t_indices[, occupied_indices])]) } for (mult_surv_col in multiple_survival_columns) { excessive_survivals <- (.colSums(generated_survivals[mult_surv_col, , occupied_indices], m = stages, n = occupied_populations) - stage_abundance[mult_surv_col, occupied_indices]) for (i in which(excessive_survivals > 0)) { pop_index <- occupied_indices[i] sample_indices <- sample(1:stages, size = excessive_survivals[i], replace = TRUE, prob = survival_array[, mult_surv_col, pop_index]) for (sample_index in sample_indices) { generated_survivals[mult_surv_col, sample_index, pop_index] <- generated_survivals[mult_surv_col, sample_index, pop_index] - 1 } } } stage_abundance[, occupied_indices] <- .colSums(generated_newborns[, , occupied_indices] + generated_survivals[, , occupied_indices], m = stages, n = stages*occupied_populations) return(stage_abundance) } return(calculate) }
BP <- Rcpp::setRcppClass("BP") LVQs <- Rcpp::setRcppClass("LVQs") MAM <- Rcpp::setRcppClass("MAM") NN <- Rcpp::setRcppClass("NN")
ts_daily_decomp <- function(data, start){ data$date <- as.Date(data$date, "%m/%d/%Y %H:%M") dates <- unique(data$date) z <- as.data.frame(table(data$date)) z$Var1 <- lubridate::parse_date_time(z$Var1, orders = "%Y-%m-%d") z$Var1 <- z[order(as.Date(z$Var1)),] dat <- z[,1] dataseries <- stats::ts(dat$Freq, frequency=365, start=c(start,1)) ds_decomposed <- stats::stl(dataseries, "per") tssmooth <- stats::HoltWinters(ds_decomposed$time.series[,1], beta=FALSE, gamma=FALSE) ds_decomposed$time.series[2:nrow(ds_decomposed$time.series),1] <- tssmooth$fitted[,1] return(ds_decomposed) }
diagnostics <- function(project = NULL) { renv_scope_error_handler() project <- renv_project_resolve(project) renv_scope_lock(project = project) if (renv_file_type(project, symlinks = FALSE) != "directory") { fmt <- "project %s is not a directory" stopf(fmt, renv_path_pretty(project)) } renv_scope_options(renv.verbose = TRUE) reporters <- list( renv_diagnostics_session, renv_diagnostics_project, renv_diagnostics_status, renv_diagnostics_packages, renv_diagnostics_abi, renv_diagnostics_profile, renv_diagnostics_settings, renv_diagnostics_options, renv_diagnostics_envvars, renv_diagnostics_path, renv_diagnostics_cache ) fmt <- "Diagnostics Report [renv %s]" title <- sprintf(fmt, renv_package_version("renv")) lines <- paste(rep.int("=", nchar(title)), collapse = "") vwritef(c(title, lines, "")) for (reporter in reporters) { tryCatch(reporter(project), error = renv_error_handler) vwritef() } } renv_diagnostics_session <- function(project) { vwritef(header("Session Info")) renv_scope_options(width = 80) print(sessionInfo()) } renv_diagnostics_project <- function(project) { vwritef(header("Project")) vwritef("Project path: %s", renv_path_pretty(project)) } renv_diagnostics_status <- function(project) { vwritef(header("Status")) status(project = project) } renv_diagnostics_packages <- function(project) { vwritef(header("Packages")) lockfile <- renv_diagnostics_packages_lockfile(project) libstate <- renv_diagnostics_packages_library(project) used <- unique(renv_diagnostics_packages_dependencies(project)$Package) recdeps <- renv_package_dependencies( packages = used, project = project ) all <- c( names(lockfile$Packages), names(libstate$Packages), names(recdeps), used ) renv_scope_locale(category = "LC_COLLATE", locale = "C") all <- sort(unique(all)) deps <- rep.int(NA_character_, length(all)) names(deps) <- all deps[names(recdeps)] <- "indirect" deps[used] <- "direct" libpaths <- dirname(map_chr(all, renv_package_find)) flibpaths <- factor(libpaths, levels = .libPaths()) libcodes <- as.integer(flibpaths) libcodes[!is.na(libcodes)] <- sprintf("[%i]", libcodes[!is.na(libcodes)]) data <- data.frame( Library = renv_diagnostics_packages_version(libstate, all), Source = renv_diagnostics_packages_sources(libstate, all), Lockfile = renv_diagnostics_packages_version(lockfile, all), Source = renv_diagnostics_packages_sources(lockfile, all), Path = libcodes, Dependency = deps, stringsAsFactors = FALSE, check.names = FALSE ) renv_scope_options(width = 9000) print(data) fmt <- "[%s]: %s" vwritef() vwritef(fmt, format(seq_along(levels(flibpaths))), format(levels(flibpaths))) } renv_diagnostics_packages_version <- function(lockfile, all) { data <- rep.int(NA_character_, length(all)) names(data) <- all formatted <- map_chr(lockfile$Packages, `[[`, "Version") data[names(formatted)] <- formatted data } renv_diagnostics_packages_sources <- function(lockfile, all) { data <- rep.int(NA_character_, length(all)) names(data) <- all sources <- map_chr(lockfile$Packages, function(record) { record$Repository %||% record$Source %||% "<unknown>" }) data[names(sources)] <- sources data } renv_diagnostics_packages_lockfile <- function(project) { lockpath <- renv_lockfile_path(project = project) if (!file.exists(lockpath)) { vwritef("This project has not yet been snapshotted: 'renv.lock' does not exist.") return(list()) } renv_lockfile_read(lockpath) } renv_diagnostics_packages_library <- function(project) { library <- renv_paths_library(project = project) if (!file.exists(library)) { fmt <- "The project library %s does not exist." vwritef(fmt, renv_path_pretty(library)) } snapshot(project = project, lockfile = NULL, type = "all") } renv_diagnostics_packages_dependencies <- function(project) { dependencies(project, progress = FALSE, errors = "reported", dev = TRUE) } renv_diagnostics_abi <- function(project) { vwritef(header("ABI")) tryCatch( renv_abi_check(), error = function(e) { vwritef(conditionMessage(e)) } ) } renv_diagnostics_profile <- function(project) { vwritef(header("User Profile")) userprofile <- "~/.Rprofile" if (!file.exists(userprofile)) return(vwritef("[no user profile detected]")) deps <- dependencies(userprofile, progress = FALSE, errors = "reported", dev = TRUE) if (empty(deps)) return(vwritef("[no R packages referenced in user profile")) renv_scope_options(width = 200) print(deps) } renv_diagnostics_settings <- function(project) { vwritef(header("Settings")) str(renv_settings_get(project)) } renv_diagnostics_options <- function(project) { vwritef(header("Options")) keys <- c( "defaultPackages", "download.file.method", "download.file.extra", "install.packages.compile.from.source", "pkgType", "repos", grep("^renv[.]", names(.Options), value = TRUE) ) vals <- .Options[keys] names(vals) <- keys str(vals) } renv_diagnostics_envvars <- function(project) { vwritef(header("Environment Variables")) envvars <- convert(as.list(Sys.getenv()), "character") useful <- c( "R_LIBS_USER", "R_LIBS_SITE", "R_LIBS", "HOME", "LANG", "MAKE", grep("^RENV_", names(envvars), value = TRUE) ) matches <- envvars[useful] if (empty(matches)) return(vwritef("[no renv environment variables available]")) names(matches) <- useful matches[is.na(matches)] <- "<NA>" matches <- matches[order(names(matches))] keys <- names(matches) vals <- matches formatted <- paste(format(keys), vals, sep = " = ") vwritef(formatted) } renv_diagnostics_path <- function(project) { vwritef(header("PATH")) path <- strsplit(Sys.getenv("PATH"), .Platform$path.sep, fixed = TRUE)[[1]] vwritef(paste("-", path)) } renv_diagnostics_cache <- function(project) { vwritef(header("Cache")) fmt <- "There are a total of %i package(s) installed in the renv cache." cachelist <- renv_cache_list() vwritef(fmt, length(cachelist)) vwritef("Cache path: %s", renv_path_pretty(renv_paths_cache())) }
smoothm <- function(y, method="smhuber", k=0.862, sn=sqrt(2.046/length(y)), tol = 1e-06, s=mad(y), init="median") { estim <- switch(method, huber=sehuber(y,k,tol,s,init), smhuber=smhuber(y,k,sn,tol,s,init=init), bisquare=mbisquare(y,k,tol,s,init), smbisquare=smbisquare(y,k,tol,sn,s,init), cauchy=mlcauchy(y,tol,s), smcauchy=smcauchy(y,tol,sn,s), smmed=smhuber(y,k,sn,tol,s,smmed=TRUE,init)) out <- list(mu=estim$mu,method=method,k=k,sn=sn,tol=tol,s=s) out } sehuber <- function (y, k = 0.862, tol = 1e-06, s=mad(y), init="median") { y <- y[!is.na(y)] n <- length(y) if (init=="median") mu <- median(y) if (init=="mean") mu <- mean(y) repeat { yy <- pmin(pmax(mu - k * s, y), mu + k * s) mu1 <- sum(yy)/n if (abs(mu - mu1) < tol * s) break mu <- mu1 } list(mu = mu, s = s) } psicauchy <- function(x) 2*x/(1+x^2) psidcauchy <- function(x) (2*(1+x^2)-4*x^2)/(1+x^2)^2 likcauchy <- function(x,mu) prod(dcauchy(x-mu)) flikcauchy <- function(y,x,mu,sn){ out <- c() for (i in 1:length(y)) out[i] <- sum(log(dcauchy(x-y[i]-mu)))*dnorm(y[i],sd=sn) out } smtfcauchy <- function(x,mu,sn) integrate(flikcauchy, -Inf, Inf, x=x, mu=mu, sn=sn)$value smcipsi <- function(y, x, sn=sqrt(2/length(x))) psicauchy(x-y)*dnorm(y,sd=sn) smcipsid <- function(y, x, sn=sqrt(2/length(x))) psidcauchy(x-y)*dnorm(y,sd=sn) smcpsi <- function(x, sn=sqrt(2/length(x))){ out <- c() for (y in x) out <- c(out,integrate(smcipsi, -Inf, Inf, x=y, sn=sn)$value) out } smcpsid <- function(x, sn=sqrt(2/length(x))){ out <- c() for (y in x) out <- c(out,integrate(smcipsid, -Inf, Inf, x=y, sn=sn)$value) out } smbpsi <- function(y, x, k=4.685, sn=sqrt(2/length(x))){ require(MASS) (x-y)*psi.bisquare(x-y,c=k)*dnorm(y,sd=sn) } smbpsid <- function(y, x, k=4.685, sn=sqrt(2/length(x))){ require(MASS) psi.bisquare(x-y,c=k,deriv=1)*dnorm(y,sd=sn) } smbpsii <- function(x, k=4.685, sn=sqrt(2/length(x))){ out <- c() for (y in x) out <- c(out,integrate(smbpsi, -Inf, Inf, x=y, k=k, sn=sn)$value) out } smbpsidi <- function(x, k=4.685, sn=sqrt(2/length(x))){ out <- c() for (y in x) out <- c(out,integrate(smbpsid, -Inf, Inf, x=y, k=k, sn=sn)$value) out } mbisquare <- function (y, k=4.685, tol = 1e-06, s=mad(y), init="median") { require(MASS) y <- y[!is.na(y)] n <- length(y) if (init=="median") mu <- median(y) if (init=="mean") mu <- mean(y) ic <- 0 repeat { ic <- ic+1 s1 <- sum((y-mu)*psi.bisquare((y-mu)/s,c=k)) s2 <- sum(psi.bisquare((y-mu)/s,c=k,deriv=1)) if (abs(s2)>tol) mu1 <- mu+s1/s2 else mu1 <- mu if (mu1>max(y) | mu1<min(y) | ic>200) break if (abs(mu - mu1) < tol * s) break mu <- mu1 } list(mu = mu, s = s) } smbisquare <- function (y, k=4.685, tol = 1e-06, sn=sqrt(1.0526/length(y)), s=mad(y), init="median") { y <- y[!is.na(y)] n <- length(y) if (init=="median") mu <- median(y) if (init=="mean") mu <- mean(y) ic <- 0 repeat { ic <- ic+1 s1 <- sum(s*smbpsii((y-mu)/s, k=k, sn=sn)) s2 <- sum(smbpsidi((y-mu)/s, k=k, sn=sn)) if (abs(s2)>tol) mu1 <- mu+s1/s2 else mu1 <- mu if (mu1>max(y) | mu1<min(y) | ic>200) break if (abs(mu - mu1) < tol * s) break mu <- mu1 } list(mu = mu, s = s) } mlcauchy <- function (y, tol = 1e-06, s=mad(y)) { y <- y[!is.na(y)] n <- length(y) me <- mu <- median(y) ic <- 0 lmed <- likcauchy(y,mu) repeat { ic <- ic+1 s1 <- sum(s*psicauchy((y-mu)/s)) s2 <- sum(psidcauchy((y-mu)/s)) mu1 <- mu+s1/s2 if (mu1>max(y) | mu1<min(y) | ic>200) break if (abs(mu - mu1) < tol * s) break mu <- mu1 } lmu <- likcauchy(y,mu) if (lmu<=lmed){ md <- abs(rank(y)-(n+1)/2) bi <- 0 md[md==0] <- n mus <- me for (q in (1+2*(0:(ceiling(n/2)-2)))){ for (j in 0:1){ i <- order(md)[q+j] mu <- y[i] lmunew <- likcauchy(y,mu) if (lmunew>lmed){ bi <- i lmed <- lmunew } ic <- 0 repeat { ic <- ic+1 s1 <- sum(s*psicauchy((y-mu)/s)) s2 <- sum(psidcauchy((y-mu)/s)) mu1 <- mu+s1/s2 if (mu1>max(y) | mu1<min(y) | ic>200) break if (abs(mu - mu1) < tol * s) break mu <- mu1 } lk <- likcauchy(y,mu) if(lk>lmu){ lmu <- lk mus <- mu } } mu <- mus if (lmu>lmed) break } if (lmu<=lmed){ mu <- max(y) mu1 <- min(y) sa <- sum(psicauchy((y-mu)/s)) sb <- sum(psicauchy((y-mu1)/s)) repeat{ mu2 <- (mu+mu1)/2 sc <- sum(psicauchy((y-mu2)/s)) if (sign(sc)==sign(sa)) mu <- mu2 else mu1 <- mu2 if (abs(mu - mu1) < tol * s) break } lmu <- likcauchy(y,mu) } } if (lmu>lmed) mu1 <- mu else mu1 <- ifelse(bi==0,me,y[bi]) list(mu = mu1, s = s) } smcauchy <- function (y, tol = 1e-06, sn=sqrt(2/length(y)), s=mad(y)) { y <- y[!is.na(y)] n <- length(y) me <- mu <- median(y) ic <- 0 lmed <- smtfcauchy(y,mu,sn) repeat { ic <- ic+1 mu1 <- mu+sum(s*smcpsi((y-mu)/s, sn=sn))/ sum(smcpsid((y-mu)/s, sn=sn)) if (mu1>max(y) | mu1<min(y) | ic>200) break if (abs(mu - mu1) < tol * s) break mu <- mu1 } lmu <- smtfcauchy(y,mu,sn) if (lmu<=lmed){ md <- abs(rank(y)-(n+1)/2) bi <- 0 md[md==0] <- n mus <- me for (q in (1+2*(0:(ceiling(n/2)-2)))){ for (j in 0:1){ i <- order(md)[q+j] mu <- y[i] lmunew <- smtfcauchy(y,mu,sn) if (lmunew>lmed){ bi <- i lmed <- lmunew } ic <- 0 repeat { ic <- ic+1 mu1 <- mu+sum(s*smcpsi((y-mu)/s, sn=sn))/ sum(smcpsid((y-mu)/s, sn=sn)) if (mu1>max(y) | mu1<min(y) | ic>200) break if (abs(mu - mu1) < tol * s) break mu <- mu1 } lk <- smtfcauchy(y,mu,sn) if(lk>lmu){ lmu <- lk mus <- mu } } mu <- mus if (lmu>lmed) break } } if (lmu>lmed) mu1 <- mu else mu1 <- ifelse(bi==0,me,y[bi]) list(mu = mu1, s = s) } smpsi <- function(x,k=0.862,sn=sqrt(2/length(x))) { k*pnorm(x,k,sn)-k*(1-pnorm(x,-k,sn))+x*(pnorm(x,-k,sn)-pnorm(x,k,sn))+ sn*(dnorm((x+k)/sn)-dnorm((x-k)/sn)) } smpmed <- function(x,sn=sqrt(1/5)) { -pnorm(0,x,sn)+pnorm(0,-x,sn) } smhuber <- function (y, k = 0.862, sn=sqrt(2.046/length(y)), tol = 1e-06, s=mad(y), smmed=FALSE, init="median") { y <- y[!is.na(y)] n <- length(y) if (init=="median") mu <- median(y) if (init=="mean") mu <- mean(y) repeat { if(smmed){ w <- smpmed(y-mu,sn=sn)/(y-mu) w[abs(w)==Inf] <- NA w[is.na(w)] <- max(w,na.rm=TRUE) w[is.na(w)] <- 1 } else{ w <- smpsi(y-mu,k=k,sn=sn)/(y-mu) w[abs(w)==Inf] <- NA w[is.na(w)] <- max(w,na.rm=TRUE) w[is.na(w)] <- 1 } yy <- w*y mu1 <- sum(yy)/sum(w) if (abs(mu - mu1) < tol * s) break mu <- mu1 } list(mu = mu, s = s) } dens <- function(x, dfunction, ...){ prod(dfunction(x, ...)) } pdens <- function(z, x, dfunction, ...) { out <- c() for (j in 1:length(z)){ out[j] <- z[j]*dens(x-z[j], dfunction, ...) } out } sdens <- function(z, x, dfunction, ...) { out <- c() for (j in 1:length(z)) out[j] <- dens(x-z[j], dfunction, ...) out } pitman <- function(y, d=ddoublex, lower=-Inf, upper=Inf, s=mad(y), ...) { n <- length(y) z <- y/s out <- s*integrate(pdens, lower, upper, x=z, dfunction=d, ...)$value/ integrate(sdens, lower, upper, x=z, dfunction=d, ...)$value if (is.na(out)) out <- median(y) out } edhuber <- function(x, k=0.862, mu=0, sigma=1) { z <- (x-mu)/sigma fk <- dnorm(k) eps <- 1-1/(pnorm(k)-pnorm(-k)+2*fk/k) val=c() for (y in z){ if (y<(-k)) val <- c(val,(1-eps)*fk*exp(k*(y+k))) else{ if (y>k) val <- c(val,(1-eps)*fk*exp(-k*(y-k))) else val <- c(val,(1-eps)*dnorm(y)) } } out <- list(val=val/sigma,eps=eps) out } dhuber <- function(x, k=0.862, mu=0, sigma=1) edhuber(x, k, mu, sigma)$val rhuber <- function(n,k=0.862, mu=0, sigma=1) { l <- c() while(length(l)<n){ x <- rexp(1) s <- sample(c(-1,1),size=1) y <- s*x/k u <- runif(1) if (abs(y)>=k | u<=exp(k*abs(y)-(k*k+y*y)/2)) l <- c(l,y) } sigma*l+mu } ddoublex <- function(x, mu=0, lambda=1) exp(-abs(x-mu)/lambda)/(2*lambda) rdoublex <- function(n,mu=0,lambda=1) { x <- rexp(n) s <- sample(c(-1,1),size=n,replace=TRUE) y <- s*x lambda*y+mu }
flexit <- function(x, par=NULL, P=NULL, S=NULL, K1=NULL, K2=NULL, zero=1E-9, error0 = 0, error1 = 1) { if (!is.null(par)) { if (is.na(par["K1"])) par <- c(par, K1=1) if (is.na(par["K2"])) par <- c(par, K2=1) K1 <- ifelse(par["K1"] == 0, zero, par["K1"]) K2 <- ifelse(par["K2"] == 0, zero, par["K2"]) S <- par["S"] P <- par["P"] } if (is.null(K1)) K1 <- 1 if (is.null(K2)) K2 <- 1 K1 <- ifelse(K1 == 0, zero, K1) K2 <- ifelse(K2 == 0, zero, K2) if (is.infinite(2^(K1))) { S1 <- K1*S/2 } else { S1 <- (2^(K1 - 1)*K1*S)/(2^(K1) - 1) } Test1 <- (1 + (2^K1 - 1) * exp(4 * S1 * (P - x))) Test1_p <- ifelse(!is.infinite(Test1), Test1^(-1/K1), exp((-1/K1)*(K1*log(2)+(4*S1*(P-x))))) if (is.infinite(2^(K2))) { S2 <- K2*S/2 } else { S2 <- (2^(K2 - 1)*K2*S)/(2^(K2) - 1) } Test2 <- (1 + (2^K2 - 1) * exp(4 * S2 * (x - P))) Test2_p <- ifelse(!is.infinite(Test2), 1 - Test2^(-1/K2), 1 - exp((-1/K2)*(K2*log(2)+(4*S2*(x - P))))) p <- ifelse(x < P, ifelse(Test1 < 0, ifelse(S1 < 0, error1, error0), Test1_p) , ifelse(Test2 < 0, ifelse(S2 < 0, error0, error1), Test2_p) ) return(p) }
print.EFA_AVERAGE <- function(x, stat = c("average", "range"), plot = TRUE, ...) { checkmate::assert_subset(stat, c("average", "sd", "range", "min", "max"), empty.ok = FALSE) settings <- x$settings method <- settings$method rotation <- settings$rotation N <- settings$N grid <- x$implementations_grid averaging <- settings$averaging varied_settings <- grid varied_settings[, c("errors", "error_m", "converged", "heywood", "chisq", "p_chi", "cfi", "caf", "rmsea", "aic", "bic")] <- NULL varied_settings <- apply(varied_settings, 2, function(x)unique(x[!is.na(x)])) varied_settings <- sapply(varied_settings, length) varied_settings <- names(varied_settings[varied_settings > 1]) no_efas <- nrow(grid) cat("\n") cat("Averaging performed with averaging method ", crayon::bold(ifelse(averaging == "median", "median", paste0("mean (trim = ", settings$trim, ")", sep = ""))), " across ", crayon::bold(no_efas), " EFAs, ", "varying the following settings: ", .settings_string(varied_settings), ".", sep = "") cat("\n") cat("\n") cat("The error rate is at ", crayon::bold(round(mean(grid$errors, na.rm = TRUE) * 100), "%", sep = ""), ". Of the solutions that did not result in an error, ", crayon::bold(round(mean(grid$converged == 0, na.rm = TRUE) * 100), "%", sep = ""), " converged, ", crayon::bold(round(mean(grid$heywood, na.rm = TRUE) * 100), "%", sep = ""), " contained Heywood cases, and ", crayon::bold(round(mean(grid$admissible, na.rm = TRUE) * 100), "%", sep = ""), " were admissible.", sep = "") cat("\n") cat("\n") if(all(grid$converged != 0 | grid$errors | grid$heywood)){ warning(crayon::yellow$bold("!"), crayon::yellow(" No solutions were achieved across which averaging was possible. Best try again with a different number of factors.\n")) } else { fit <- x$fit_indices rownames(fit) <- fit$index cat("\n") cat(cli::rule(left = crayon::bold("Indicator-to-Factor Correspondences"), col = "blue", line = 2)) cat("\n") cat("\n") cat("For each cell, the proportion of solutions including the respective indicator-to-factor correspondence. A salience threshold of", crayon::bold(settings$salience_threshold), "was used to determine indicator-to-factor correspondences.") cat("\n") cat("\n") cat(print.LOADINGS(x$ind_fac_corres, cutoff = 1e-4, digits = 2)) cat("\n") cat("\n") cat(cli::rule(left = crayon::bold("Loadings"), col = "blue", line = 2)) cat("\n") .print_average(x, what = c("loadings"), stat = stat, averaging = averaging) cat("\n") if(!all(is.na(x$Phi))){ cat("\n") cat(cli::rule(left = crayon::bold("Factor Intercorrelations from Oblique Solutions"), col = "blue", line = 2)) cat("\n") .print_average(x, what = c("Phi"), stat = stat, averaging = averaging) cat("\n") } cat("\n") cat(cli::rule(left = crayon::bold("Variances Accounted for"), col = "blue", line = 2)) cat("\n") .print_average(x, what = c("vars_accounted"), stat = stat, averaging = averaging) cat("\n") if (fit["df", "average"] == 0) { cat("\n") cat(crayon::yellow$bold("!"), crayon::yellow(" The model is just identified (df = 0). Goodness of fit indices may not be interpretable.")) cat("\n") } cat("\n") cat(cli::rule(left = crayon::bold("Model Fit"), col = "blue", line = 2)) cat("\n") cat("\n") cat(crayon::blue(" ", ifelse(averaging == "mean", "M", "Md"), " (SD) [Min; Max]", sep = "")) cat("\n") if(all(method == "PAF") || is.na(N)){ .print_gof(fit, ind = "caf", ind_name = "CAF: ", print_zero = FALSE, digits = 2) cat(crayon::blue("df: "), .numformat(fit["df", "average"], 0, print_zero = TRUE), "\n", sep = "") } else { .print_gof(fit, ind = c("chisq"), ind_name = "\U1D712\U00B2: ", print_zero = TRUE, digits = 2) cat(crayon::blue("df: "), .numformat(fit["df", "average"], 0, print_zero = TRUE), "\n", sep = "") .print_gof(fit, ind = c("p_chi", "cfi", "rmsea", "aic", "bic", "caf"), ind_name = c(crayon::italic("p: "), "CFI: ", "RMSEA: ", "AIC: ", "BIC: ", "CAF: "), print_zero = c(FALSE, FALSE, FALSE, TRUE, TRUE, FALSE), digits = c(3, 2, 2, 2, 2, 2)) } if(isTRUE(plot)){ if(ncol(x$loadings$average) <= 10){ plot(x) } else { message(cli::col_cyan(cli::symbol$info, " The factor solution contained more than 10 factors, no plot was generated. If you still want to create the plot, use 'plot(", substitute(x) ,")'.\n")) } } } } .print_average <- function(x, what, stat, averaging){ if("average" %in% stat){ if(averaging == "mean"){ cat("\n") cat(cli::rule(left = crayon::bold("Mean"), col = "blue")) cat("\n") cat("\n") } else { cat("\n") cat(cli::rule(left = crayon::bold("Median"), col = "blue")) cat("\n") cat("\n") } if(what == "loadings"){ print(x$loadings$average) } else if(what == "Phi"){ cat(.get_compare_matrix(x$Phi$average, r_red = Inf, n_char = 17, var_names = paste0("F", seq_len(ncol(x$Phi$average))))) } else { cat(.get_compare_matrix(x$vars_accounted$average, r_red = Inf, n_char = 17)) } } if("sd" %in% stat){ cat("\n") cat(cli::rule(left = crayon::bold("Standard Deviation"), col = "blue")) cat("\n") cat("\n") if(what == "loadings"){ cat(.get_compare_matrix(x$loadings$sd, r_red = Inf, n_char = 17)) } else if(what == "Phi"){ cat(.get_compare_matrix(x$Phi$sd, r_red = Inf, n_char = 17, var_names = paste0("F", seq_len(ncol(x$Phi$sd))))) } else { cat(.get_compare_matrix(x$vars_accounted$sd, r_red = Inf, n_char = 17)) } } if("range" %in% stat){ cat("\n") cat(cli::rule(left = crayon::bold("Range"), col = "blue")) cat("\n") cat("\n") if(what == "loadings"){ cat(.get_compare_matrix(x$loadings$range, r_red = Inf, n_char = 17)) } else if(what == "Phi"){ cat(.get_compare_matrix(x$Phi$range, r_red = Inf, n_char = 17, var_names = paste0("F", seq_len(ncol(x$Phi$range))))) } else { cat(.get_compare_matrix(x$vars_accounted$range, r_red = Inf, n_char = 17)) } } if("min" %in% stat){ cat("\n") cat(cli::rule(left = crayon::bold("Minimum"), col = "blue")) cat("\n") cat("\n") if(what == "loadings"){ print(x$loadings$min) } else if(what == "Phi"){ cat(.get_compare_matrix(x$Phi$min, r_red = Inf, n_char = 17, var_names = paste0("F", seq_len(ncol(x$Phi$min))))) } else { cat(.get_compare_matrix(x$vars_accounted$min, r_red = Inf, n_char = 17)) } } if("max" %in% stat){ cat("\n") cat(cli::rule(left = crayon::bold("Maximum"), col = "blue")) cat("\n") cat("\n") if(what == "loadings"){ print(x$loadings$max) } else if(what == "Phi"){ cat(.get_compare_matrix(x$Phi$max, r_red = Inf, n_char = 17, var_names = paste0("F", seq_len(ncol(x$Phi$max))))) } else { cat(.get_compare_matrix(x$vars_accounted$max, r_red = Inf, n_char = 17)) } } } .print_gof <- function(fit, ind, ind_name, print_zero, digits){ for(i in seq_along(ind)){ if(ind[i] %in% c("p_chi", "cfi", "rmsea", "caf")){ cat(crayon::blue(ind_name[i], sep = ""), ifelse(round(fit[ind[i], "average"], digits[i]) < 1, substr(.numformat(fit[ind[i], "average"], digits = digits[i], print_zero = print_zero[i]), 2, digits + 2), .numformat(fit[ind[i], "average"], digits = digits[i], print_zero = print_zero[i])), " (", ifelse(round(fit[ind[i], "sd"], digits[i]) < 1, substr(.numformat(fit[ind[i], "sd"], digits = digits[i], print_zero = print_zero[i]), 2, digits + 2), .numformat(fit[ind[i], "sd"], digits = digits[i], print_zero = print_zero[i])), ") [", ifelse(round(fit[ind[i], "min"], digits[i]) < 1, substr(.numformat(fit[ind[i], "min"], digits = digits[i], print_zero = print_zero[i]), 2, digits + 2), .numformat(fit[ind[i], "min"], digits = digits[i], print_zero = print_zero[i])), "; ", ifelse(round(fit[ind[i], "max"], digits[i]) < 1, substr(.numformat(fit[ind[i], "max"], digits = digits[i], print_zero = print_zero[i]), 2, digits + 2), .numformat(fit[ind[i], "max"], digits = digits[i], print_zero = print_zero[i])), "]\n", sep = "") } else { cat(crayon::blue(ind_name[i], sep = ""), .numformat(fit[ind[i], "average"], digits = digits[i], print_zero = print_zero[i]), " (", .numformat(fit[ind[i], "sd"], digits = digits[i], print_zero = print_zero[i]), ") [", .numformat(fit[ind[i], "min"], digits = digits[i], print_zero = print_zero[i]), "; ", .numformat(fit[ind[i], "max"], digits = digits[i], print_zero = print_zero[i]), "]\n", sep = "") } } }
zeroGrob <- function() .zeroGrob .zeroGrob <- NULL widthDetails.zeroGrob <- function(x) unit(0, "cm") heightDetails.zeroGrob <- function(x) unit(0, "cm") grobWidth.zeroGrob <- function(x) unit(0, "cm") grobHeight.zeroGrob <- function(x) unit(0, "cm") drawDetails.zeroGrob <- function(x, recording) {} is.zero <- function(x) is.null(x) || inherits(x, "zeroGrob")
library(tibble) library(data.table) data(din32645) test_that("calibration() handles data.tables and tibbles well", { expect_silent(calibration(Area ~ Conc, data = as_tibble(din32645), check_assumptions = F)) expect_silent(calibration(Area ~ Conc, data = data.table(din32645), check_assumptions = F)) }) data(clayloam) test_that("texture() handles data.tables and tibbles well", { expect_silent(texture(reading ~ blank + time + temperature, as_tibble(clayloam), model = "W1.2")) expect_silent(texture(reading ~ blank + time + temperature, data.frame(clayloam), model = "W1.2")) })
centdis<-function(C,num,Meth){ C<-t(C); A<-as.matrix(C[1:num,]); B<-as.matrix(C[(1+num):nrow(C),]); if (Meth=="stat") { C<-rbind(A,B); cr<-nrow(C); cc<-ncol(C); for(n in 1:cc){ A[,n]<-A[,n]*(1/sd(C[,n])); B[,n]<-B[,n]*(1/sd(C[,n])); } AA<-apply(A,2,mean); BB<-apply(B,2,mean); C<-rbind(AA,BB) D<-as.matrix(dist(C,method="euclidean",diag=TRUE,upper=TRUE)) } else {AA<-apply(A,2,mean); BB<-apply(B,2,mean); C<-rbind(AA,BB) D<-as.matrix(dist(C,method=Meth,diag=TRUE,upper=TRUE));} return (D[1,2]) }
`studentGrowthProjections` <- function(panel.data, sgp.labels, grade.progression, content_area.progression=NULL, year_lags.progression=NULL, grade.projection.sequence=NULL, content_area.projection.sequence=NULL, year_lags.projection.sequence=NULL, max.forward.progression.years=NULL, max.forward.progression.grade=NULL, max.order.for.progression=NULL, use.my.knots.boundaries, use.my.coefficient.matrices, panel.data.vnames, achievement.level.prior.vname=NULL, performance.level.cutscores, calculate.sgps=TRUE, convert.0and100=TRUE, trajectories.chunk.size=50000L, sgp.projections.equated=NULL, projection.unit="YEAR", projection.unit.label=NULL, percentile.trajectory.values=NULL, return.percentile.trajectory.values=NULL, return.projection.group.identifier=NULL, return.projection.group.scale.scores=NULL, return.projection.group.dates=NULL, isotonize=TRUE, lag.increment=0L, sgp.exact.grade.progression=FALSE, projcuts.digits=NULL, sgp.projections.use.only.complete.matrices=NULL, SGPt=NULL, print.time.taken=TRUE) { started.at=proc.time() started.date <- prettyDate() GRADE <- NULL .smooth.bound.iso.row <- function(tmp.dt, grade, tmp.year, tmp.content_area, iso=isotonize, missing.taus, na.replace, equated.year) { X <- NULL if (!is.null(equated.year)) tmp.year <- equated.year bnd <- eval(parse(text=paste0("panel.data[['Knots_Boundaries']]", get.my.knots.boundaries.path(tmp.content_area, tmp.year), "[['loss.hoss_", grade, "']]"))) tmp.dt[X < bnd[1L], X:=bnd[1L]] tmp.dt[X > bnd[2L], X:=bnd[2L]] if (!iso) return(round(tmp.dt[['X']], digits=5)) if (iso & missing.taus) { na.row <- rep(NA,length(tmp.dt[['X']])) na.row[na.replace] <- round(data.table(tmp.dt[!is.na(X)], key=c("ID", "X"))[['X']], digits=5) return(na.row) } else { setkey(tmp.dt, ID, X) return(round(tmp.dt[c(matrix(seq.int(dim(tmp.dt)[1L]), ncol=100L, byrow=TRUE))][['X']], digits=5)) } } .create.path <- function(labels, pieces=c("my.subject", "my.year", "my.extra.label")) { sub(' ', '_', toupper(sub('\\.+$', '', paste(unlist(lapply(labels[pieces], as.character)), collapse=".")))) } .get.trajectory.chunks <- function(seq.for.data) { split(seq.for.data, ceiling(seq.for.data/trajectories.chunk.size)) } get.my.knots.boundaries.path <- function(content_area, year) { tmp.path.knots.boundaries <- paste(sgp.labels[['my.subject']], sgp.labels[['my.year']], sep=".") if (is.null(sgp.projections.equated)) { tmp.knots.boundaries.names <- names(panel.data[['Knots_Boundaries']][[tmp.path.knots.boundaries]])[content_area==sapply(strsplit(names(panel.data[['Knots_Boundaries']][[tmp.path.knots.boundaries]]), "[.]"), '[', 1L)] if (length(tmp.knots.boundaries.names)==0L) { return(paste0("[['", tmp.path.knots.boundaries, "']]")) } else { tmp.knots.boundaries.years <- sapply(strsplit(tmp.knots.boundaries.names, "[.]"), function(x) x[2L]) tmp.sum <- sum(year >= sort(tmp.knots.boundaries.years), na.rm=TRUE) return(paste0("[['", tmp.path.knots.boundaries, "']][['", paste(c(content_area, sort(tmp.knots.boundaries.years)[tmp.sum]), collapse="."), "']]")) } } else { return(paste0("[['", tmp.path.knots.boundaries, "']][['", content_area, ".", sgp.projections.equated[['Year']], "']]")) } } .get.panel.data <- function(tmp.data, grade.progression, content_area.progression, num.prior=NULL, completed.ids=NULL, bound.data=TRUE, equated.year=NULL) { if (is.null(num.prior)) num.prior <- length(grade.progression) if (is.character(tmp.data[[1L+num.panels]])) { tmp.data <- eval(parse(text=paste0("na.omit(tmp.data[.(", paste(rev(paste0("'", grade.progression, "'"))[seq(num.prior)], collapse=", "), "), on=names(tmp.data)[c(", paste(1+num.panels-(1L:num.prior-1L), collapse=", ") , ")]], cols=names(tmp.data)[c(",paste(1L+2*num.panels-(1L:num.prior-1L), collapse=", "), ")])[,c(1, ", paste(rev(1+2*num.panels-(1L:num.prior-1L)), collapse=", "), ")]"))) } else { tmp.data <- eval(parse(text=paste0("na.omit(tmp.data[.(", paste(rev(grade.progression)[seq(num.prior)], collapse=", "), "), on=names(tmp.data)[c(", paste(1+num.panels-(1L:num.prior-1L), collapse=", ") , ")]], cols=names(tmp.data)[c(",paste(1+2*num.panels-(1L:num.prior-1L), collapse=", "), ")])[,c(1, ", paste(rev(1+2*num.panels-(1L:num.prior-1L)), collapse=", "), ")]"))) } if (!is.null(completed.ids)) tmp.data <- tmp.data[!ID %in% completed.ids] if (bound.data) { if (!is.null(equated.year)) tmp.year <- equated.year else tmp.year <- as.character(sgp.labels$my.year) for (i in seq(dim(tmp.data)[2L]-1L)) { bnd <- eval(parse(text=paste0("panel.data[['Knots_Boundaries']]", get.my.knots.boundaries.path(content_area.progression[i], tmp.year), "[['loss.hoss_", grade.progression[i], "']]"))) eval(parse(text=paste0("tmp.data[", names(tmp.data)[i+1L], "<bnd[1L], names(tmp.data)[i+1L] := bnd[1L]]"))) eval(parse(text=paste0("tmp.data[", names(tmp.data)[i+1L], ">bnd[2L], names(tmp.data)[i+1L] := bnd[2L]]"))) } } return(tmp.data) } get.my.cutscore.state.year.sgprojection <- function(Cutscores, content_area, year, my.state) { if (!is.na(my.state)) { tmp.cutscore.state <- sapply(strsplit(names(Cutscores)[grep(content_area, names(Cutscores))], "[.]"), function(x) x[2L]) if (my.state %in% tmp.cutscore.state) { content_area <- paste(content_area, my.state, sep=".") year.split.index <- -2L } else year.split.index <- -1L } else year.split.index <- -1L tmp.cutscore.years <- sapply(sapply(strsplit(names(Cutscores)[grep(content_area, names(Cutscores))], "[.]"), function(x) tail(x, year.split.index)), paste, collapse=".") tmp.cutscore.years[tmp.cutscore.years==""] <- NA if (any(!is.na(tmp.cutscore.years))) { if (year %in% tmp.cutscore.years) { return(paste(content_area, year, sep=".")) } else { if (year==sort(c(year, tmp.cutscore.years))[1L]) { return(content_area) } else { return(paste(content_area, sort(tmp.cutscore.years)[which(year==sort(c(year, tmp.cutscore.years)))-1L], sep=".")) } } } else { return(content_area) } } get.grade.projection.sequence.matrices <- function( grade.progression, content_area.progression, year_lags.progression, grade.projection.sequence, content_area.projection.sequence, year_lags.projection.sequence, sgp.exact.grade.progression, SGPt) { add.missing.taus.to.matrix <- function(my.matrix) { augmented.mtx <- matrix(NA, nrow=dim(my.matrix)[1L], ncol=100L) tau.num <- ceiling(as.numeric(substr(colnames(my.matrix), 6, nchar(colnames(my.matrix))))*100L) na.replace <- seq.int(100) %in% tau.num augmented.mtx[,na.replace] <- my.matrix return(augmented.mtx) } if (sgp.exact.grade.progression) grade.progression.index <- length(grade.progression) else grade.progression.index <- seq_along(grade.progression) tmp.list <- list() for (i in seq_along(grade.progression.index)) { tmp.list[[i]] <- list() for (j in seq_along(grade.projection.sequence)) { tmp.years_lags <- c(tail(year_lags.progression, grade.progression.index[i]-1L), head(year_lags.projection.sequence, j)) if (length(grep("BASELINE", sgp.labels[['my.extra.label']])) > 0) { tmp.years <- rep("BASELINE", length(tmp.years_lags)+1L) } else { tmp.years <- yearIncrement(sgp.labels$my.year, rev(c(0L, -cumsum(rev(tmp.years_lags))))) } tmp.matrix <- getsplineMatrices( tmp.matrices, c(tail(content_area.progression, grade.progression.index[i]), head(content_area.projection.sequence, j)), c(tail(grade.progression, grade.progression.index[i]), head(grade.projection.sequence, j)), tmp.years, tmp.years_lags, return.highest.order.matrix=TRUE, my.matrix.highest.order=max.order.for.progression, my.matrix.time.dependency=if (is.null(SGPt)) NULL else list(TIME="TIME", TIME_LAG="TIME_LAG")) if (length(tmp.matrix)==0L) { if (length(grep("BASELINE", sgp.labels[['my.extra.label']])) == 0) { tmp.years2 <- yearIncrement(sgp.labels$my.year, rev(c(0L, -cumsum(rev(tmp.years_lags))))) } else { tmp.years2 <- rep("BASELINE", length(tmp.years_lags)+1L) } tmp.matrix <- getsplineMatrices( tmp.matrices, c(tail(content_area.progression, grade.progression.index[i]), head(content_area.projection.sequence, j)), c(tail(grade.progression, grade.progression.index[i]), head(grade.projection.sequence, j)), tmp.years2, tmp.years_lags, return.highest.order.matrix=TRUE, my.matrix.highest.order=max.order.for.progression, my.matrix.time.dependency=if (is.null(SGPt)) NULL else list(TIME="TIME", TIME_LAG="TIME_LAG")) if (length(tmp.matrix)==0L) next tmp.matrix[[1L]]@Time[[1L]] <- tmp.years } tmp.list[[i]][[j]] <- tmp.matrix[[1L]] names(tmp.list[[i]])[j] <- content_area.projection.sequence[j] if (dim(tmp.list[[i]][[j]]@.Data)[2L] != 100L) { tmp.list[[i]][[j]]@.Data <- add.missing.taus.to.matrix(tmp.list[[i]][[j]]@.Data) missing.taus <- TRUE } } } for (f in seq_along(tmp.list)) tmp.list[[f]] <- tmp.list[[f]][which(!sapply(tmp.list[[f]], is.null))] return(rev(tmp.list)) } .get.percentile.trajectories <- function(ss.data, projection.matrices) { tmp.percentile.trajectories <- list() completed.ids <- TEMP_1 <- TEMP_2 <- TIME <- TIME_LAG <- TMP_KEY <- NULL for (i in seq_along(projection.matrices)) { if (any(!ss.data[[1L]] %in% completed.ids)) { tmp.dt <- .get.panel.data( ss.data, head(projection.matrices[[i]][[1L]]@Grade_Progression[[1L]], -1L), head(projection.matrices[[i]][[1L]]@Content_Areas[[1L]], -1L), completed.ids=completed.ids, equated.year=yearIncrement(sgp.projections.equated[['Year']], -1L)) if (dim(tmp.dt)[1L] > 0L) { completed.ids <- c(unique(tmp.dt, by=(1L))[[1L]], completed.ids) tmp.dt <- tmp.dt[list(rep(tmp.dt[[1L]], 100L))] missing.taus <- FALSE; na.replace <- NULL label.iter <- 1L for (j in seq_along(projection.matrices[[i]])) { tmp.matrix <- projection.matrices[[i]][[j]] mod <- character() int <- "data.table(ID=tmp.dt[[1L]], INT=1L," for (k in seq_along(projection.matrices[[i]][[j]]@Time_Lags[[1L]])) { knt <- paste0("tmp.matrix@Knots[[", k, "]]") bnd <- paste0("tmp.matrix@Boundaries[[", k, "]]") mod <- paste0(mod, ", bs(tmp.dt[[", dim(tmp.dt)[2L]-k+1L, "]], knots=", knt, ", Boundary.knots=", bnd, ")") } tmp.scores <- eval(parse(text=paste0(int, substring(mod, 2L), ", key='ID')"))) if (!is.null(SGPt)) { grade.projection.sequence.labels <- c(paste(tail(grade.progression, 1L), "EOW", sep="."), grade.projection.sequence) content_area.projection.sequence.labels <- c(tail(content_area.progression, 1L), content_area.projection.sequence) if (j==1L) { tmp.scores <- panel.data$Panel_Data[,c("ID", SGPt), with=FALSE][tmp.scores, on="ID"] for (k in unlist(tmp.matrix@Version[['Matrix_Information']][['SGPt']][c("MAX_TIME_PRIOR", "MAX_TIME")])) { tmp.scores[,TIME:=k] tmp.time.shift.index <- getTimeShiftIndex(as.numeric(max(tmp.scores[[SGPt]])), tmp.matrix) tmp.scores[,TIME_LAG:=(k+365*tmp.time.shift.index)-as.numeric(get(SGPt))] tmp.scores[,TMP_KEY:=rep(seq.int(100), dim(tmp.scores)[1L]/100)] tmp.dt[,TEMP_1:=tmp.scores[, as.matrix(.SD) %*% [email protected][,TMP_KEY], by=TMP_KEY, .SDcols=3:(dim(tmp.scores)[2L]-1L)][['V1']]] tmp.dt[,TEMP_2:=.smooth.bound.iso.row( data.table(ID=tmp.dt[[1L]], X=TEMP_1), tail(grade.progression, 1L), yearIncrement(sgp.labels[['my.year']], j, lag.increment), content_area.projection.sequence[j], missing.taus=missing.taus, na.replace=na.replace, equated.year=yearIncrement(sgp.projections.equated[['Year']], -1L))] setnames(tmp.dt, "TEMP_2", paste("SS", grade.projection.sequence.labels[label.iter], content_area.projection.sequence.labels[label.iter], sep=".")) tmp.dt[,TEMP_1:=NULL] label.iter <- label.iter + 1 } tmp.scores[,(SGPt):=NULL] tmp.max.time <- k } else { tmp.scores[,TIME:=tmp.matrix@Version[['Matrix_Information']][['SGPt']][['MAX_TIME']]] tmp.time.shift.index <- getTimeShiftIndex(as.numeric(tmp.max.time), tmp.matrix) tmp.scores[,TIME_LAG:=(tmp.matrix@Version[['Matrix_Information']][['SGPt']][['MAX_TIME']]+365L*tmp.time.shift.index)-tmp.max.time] tmp.scores[,TMP_KEY:=rep(seq.int(100), dim(tmp.scores)[1L]/100)] tmp.max.time <- tmp.matrix@Version[['Matrix_Information']][['SGPt']][['MAX_TIME']] tmp.dt[,TEMP_1:=tmp.scores[, as.matrix(.SD) %*% [email protected][,TMP_KEY], by=TMP_KEY, .SDcols=2:(dim(tmp.scores)[2L]-1L)][['V1']]] tmp.dt[,TEMP_2:=.smooth.bound.iso.row( data.table(ID=tmp.dt[[1L]], X=TEMP_1), grade.projection.sequence[j], yearIncrement(sgp.labels[['my.year']], j, lag.increment), content_area.projection.sequence[j], missing.taus=missing.taus, na.replace=na.replace, equated.year=yearIncrement(sgp.projections.equated[['Year']], -1L))] setnames(tmp.dt, "TEMP_2", paste("SS", grade.projection.sequence.labels[label.iter], content_area.projection.sequence.labels[label.iter], sep=".")) tmp.dt[,TEMP_1:=NULL] label.iter <- label.iter + 1L } } else { grade.projection.sequence.labels <- grade.projection.sequence content_area.projection.sequence.labels <- content_area.projection.sequence tmp.scores[,TMP_KEY:=rep(seq.int(100), dim(tmp.scores)[1L]/100)] tmp.dt[,TEMP_1:=tmp.scores[, as.matrix(.SD) %*% [email protected][,TMP_KEY], by=TMP_KEY, .SDcols=2:(dim(tmp.scores)[2L]-1L)][['V1']]] tmp.dt[,TEMP_2:=.smooth.bound.iso.row( data.table(ID=tmp.dt[[1L]], X=TEMP_1), grade.projection.sequence[j], yearIncrement(sgp.labels[['my.year']], j, lag.increment), content_area.projection.sequence[j], missing.taus=missing.taus, na.replace=na.replace, equated.year=yearIncrement(sgp.projections.equated[['Year']], -1L))] setnames(tmp.dt, "TEMP_2", paste("SS", grade.projection.sequence.labels[label.iter], content_area.projection.sequence.labels[label.iter], sep=".")) tmp.dt[,TEMP_1:=NULL] label.iter <- label.iter + 1L } } setkeyv(tmp.dt, names(tmp.dt)[1L]) tmp.percentile.trajectories[[i]] <- tmp.dt[,c("ID", intersect(names(tmp.dt), paste("SS", grade.projection.sequence.labels, content_area.projection.sequence.labels, sep="."))), with=FALSE] } } } return(rbindlist(tmp.percentile.trajectories)) } .sgp.targets <- function(data, cut, convert.0and100) { if (is.na(cut)) { return(as.integer(NA)) } else { tmp <- which.min(c(data < cut, FALSE)) if (tmp==101L) tmp <- 100L if (convert.0and100 && tmp==0L) return(1L) if (convert.0and100 && tmp==100L) return(99L) return(tmp) } } .get.trajectories.and.cuts <- function(percentile.trajectories, trajectories.tf, cuts.tf, projection.unit=projection.unit) { CUT <- STATE <- NULL if (!is.null(SGPt)) { content_area.projection.sequence <- c(tail(content_area.progression, 1L), content_area.projection.sequence) grade.projection.sequence.labels <- c(paste(tail(grade.progression, 1L), "EOW", sep="."), grade.projection.sequence) grade.projection.sequence <- c(tail(grade.progression, 1L), grade.projection.sequence) } else { grade.projection.sequence.labels <- grade.projection.sequence } if (trajectories.tf) { if (is.numeric(percentile.trajectory.values)) { tmp.name.prefix <- "P" tmp.traj <- percentile.trajectories[rep(seq.int(100), dim(percentile.trajectories)[1L]/100L) %in% percentile.trajectory.values] tmp.num.years.forward <- length(grade.projection.sequence) } if (is.character(percentile.trajectory.values)) { tmp.name.prefix <- "SCALE_SCORE_" tmp.num.years.forward <- min(length(grade.projection.sequence), lapply(strsplit(percentile.trajectory.values, "_")[[1L]], type.convert)[sapply(lapply(strsplit(percentile.trajectory.values, "_")[[1L]], type.convert), is.numeric)][[1L]]) if (!any(grepl("CURRENT", percentile.trajectory.values))) tmp.num.years.forward <- min(length(grade.projection.sequence), tmp.num.years.forward+1L) tmp.indices <- as.integer(rep(dim(percentile.trajectories)[1L]/uniqueN(percentile.trajectories[['ID']])*(seq(uniqueN(percentile.trajectories[['ID']]))-1L), each=length(percentile.trajectory.values)) + c(t(as.matrix(data.table(panel.data[["Panel_Data"]], key="ID")[list(unique(percentile.trajectories, by='ID')[['ID']])][,percentile.trajectory.values, with=FALSE])))) tmp.traj <- percentile.trajectories[tmp.indices, 1L:(2L+tmp.num.years.forward-1L), with=FALSE][,ID:=rep(unique(percentile.trajectories, by='ID')[['ID']], each=length(percentile.trajectory.values))] if (tmp.num.years.forward==1L) { tmp.target.name <- tail(names(tmp.traj), 1L) if ("STATE" %in% names(panel.data[["Panel_Data"]])) { included.states <- unique(panel.data[["Panel_Data"]][['STATE']]) content_area.index <- grep(sgp.labels$my.subject, sapply(names(SGP::SGPstateData[[performance.level.cutscores]][["Achievement"]][["Cutscores"]]), function(x) strsplit(x, "[.]")[[1L]][1L], USE.NAMES=FALSE)) available.states <- unique(sapply(names(SGP::SGPstateData[[performance.level.cutscores]][["Achievement"]][["Cutscores"]]), function(x) strsplit(x, "[.]")[[1L]][2L], USE.NAMES=FALSE)[content_area.index]) unavailable.states <- included.states[!included.states %in% available.states] percentile.trajectories <- na.omit(data.table(panel.data[["Panel_Data"]][,c("ID", "STATE"), with=FALSE], key="ID")[STATE %in% available.states][percentile.trajectories], cols="STATE") tmp.traj <- percentile.trajectories[which(!duplicated(percentile.trajectories[['ID']]))] if (length(percentile.trajectory.values)==2L) tmp.traj <- data.table(rbindlist(list(tmp.traj, tmp.traj)), key="ID") for (state.iter in unique(tmp.traj$STATE)) { my.cutscore.year <- get.my.cutscore.state.year.sgprojection(Cutscores, content_area.projection.sequence[1L], yearIncrement(sgp.labels$my.year, 1L, lag.increment), my.state=state.iter) tmp.cutscores.by.grade <- tmp.cutscores[[my.cutscore.year]][[paste0("GRADE_", grade.projection.sequence[1L])]] if (length(percentile.trajectory.values)==1L) { tmp.state.level <- which(sapply(lapply(SGP::SGPstateData[[performance.level.cutscores]][["Achievement"]][["Cutscore_Information"]][['State_Levels']], '[[', 1L), function(x) state.iter %in% x)) cuku.level.to.get <- which.max(SGP::SGPstateData[[performance.level.cutscores]][["Achievement"]][["Cutscore_Information"]][[ 'State_Levels']][[tmp.state.level]][["Levels"]]=="Proficient")-1L tmp.traj[which(STATE==state.iter), (tmp.target.name):=tmp.cutscores.by.grade[cuku.level.to.get]] } if (length(percentile.trajectory.values)==2L) { tmp.state.level <- which(sapply(lapply(SGP::SGPstateData[[performance.level.cutscores]][["Achievement"]][["Cutscore_Information"]][['State_Levels']], '[[', 1L), function(x) state.iter %in% x)) cuku.level.to.get <- which.max(SGP::SGPstateData[[performance.level.cutscores]][["Achievement"]][["Cutscore_Information"]][[ 'State_Levels']][[tmp.state.level]][["Levels"]]=="Proficient")-1L musu.level.to.get <- which.max(SGP::SGPstateData[[performance.level.cutscores]][["Achievement"]][["Cutscore_Information"]][[ 'State_Levels']][[tmp.state.level]][["Levels"]]=="Proficient") tmp.traj[which(STATE==state.iter), (tmp.target.name):=c(tmp.cutscores.by.grade[cuku.level.to.get], tmp.cutscores.by.grade[musu.level.to.get])] } } tmp.traj[,STATE:=NULL] } else { my.cutscore.year <- get.my.cutscore.state.year.sgprojection(Cutscores, content_area.projection.sequence[1L], yearIncrement(sgp.labels$my.year, 1L, lag.increment), my.state=NA) tmp.cutscores.by.grade <- tmp.cutscores[[my.cutscore.year]][[paste0("GRADE_", grade.projection.sequence[1L])]] if (length(percentile.trajectory.values)==1L) { cuku.level.to.get <- which.max(SGP::SGPstateData[[performance.level.cutscores]][["Achievement"]][["Levels"]][["Proficient"]]=="Proficient")-1L tmp.target.scores <- rep(tmp.cutscores.by.grade[cuku.level.to.get], uniqueN(tmp.traj[['ID']])) } if (length(percentile.trajectory.values)==2L) { cuku.level.to.get <- which.max(SGP::SGPstateData[[performance.level.cutscores]][["Achievement"]][["Levels"]][["Proficient"]]=="Proficient")-1L musu.level.to.get <- which.max(SGP::SGPstateData[[performance.level.cutscores]][["Achievement"]][["Levels"]][["Proficient"]]=="Proficient") tmp.target.scores <- rep(c(tmp.cutscores.by.grade[cuku.level.to.get], tmp.cutscores.by.grade[musu.level.to.get]), uniqueN(tmp.traj[['ID']])) } tmp.target.scores[is.na(tmp.traj[[tmp.target.name]])] <- NA tmp.traj[,(tmp.target.name):=tmp.target.scores] } } } tmp.traj[,(2:dim(tmp.traj)[2L]):=round(tmp.traj[,2:dim(tmp.traj)[2L], with=FALSE], digits=projcuts.digits)] trajectories <- ddcast(tmp.traj[, CUT:=rep(percentile.trajectory.values, dim(tmp.traj)[1L]/length(percentile.trajectory.values))], ID ~ CUT, value.var=setdiff(names(tmp.traj), c("ID", "CUT")), sep=".") if (any(grepl("CURRENT", percentile.trajectory.values))) percentile.trajectory.values <- unlist(strsplit(percentile.trajectory.values, "_CURRENT")) if (projection.unit=="GRADE") { tmp.vec <- expand.grid(tmp.name.prefix, percentile.trajectory.values, paste0("_PROJ_", projection.unit.label, "_"), paste(grade.projection.sequence.labels, content_area.projection.sequence, sep="_"), lag.increment.label)[seq.int(length(percentile.trajectory.values)*tmp.num.years.forward),] } else { tmp.vec <- expand.grid(tmp.name.prefix, percentile.trajectory.values, paste0("_PROJ_", projection.unit.label, "_"), seq_along(grade.projection.sequence.labels), lag.increment.label)[seq.int(length(percentile.trajectory.values)*tmp.num.years.forward),] } setnames(trajectories, c("ID", do.call(paste0, tmp.vec))) if (!cuts.tf) return(trajectories) } if (cuts.tf) { setkey(percentile.trajectories, ID) tmp.cuts.list <- list() if ("STATE" %in% names(panel.data[["Panel_Data"]])) { included.states <- unique(panel.data[["Panel_Data"]][['STATE']]); state.arg <- "STATE == states[n.state]" content_area.index <- grep(sgp.labels$my.subject, sapply(names(SGP::SGPstateData[[performance.level.cutscores]][["Achievement"]][["Cutscores"]]), function(x) strsplit(x, "[.]")[[1L]][1L], USE.NAMES=FALSE)) available.states <- unique(sapply(names(SGP::SGPstateData[[performance.level.cutscores]][["Achievement"]][["Cutscores"]]), function(x) strsplit(x, "[.]")[[1L]][2L], USE.NAMES=FALSE)[content_area.index]) unavailable.states <- included.states[!included.states %in% available.states] percentile.trajectories <- na.omit(data.table(panel.data[["Panel_Data"]][,c("ID", "STATE"), with=FALSE], key="ID")[STATE %in% available.states][percentile.trajectories], cols="STATE") states <- included.states[included.states %in% available.states] } else { states <- NA; state.arg <- "is.na(STATE)" percentile.trajectories[, STATE:=NA] } for (n.state in seq(states)) { k <- 1L cuts.arg <- names.arg <- character() for (i in seq_along(grade.projection.sequence)) { my.cutscore.state.year <- get.my.cutscore.state.year.sgprojection(Cutscores, content_area.projection.sequence[i], yearIncrement(sgp.labels[['my.year']], i, lag.increment), my.state=states[n.state]) tmp.cutscores.by.grade <- tmp.cutscores[[my.cutscore.state.year]][[paste0("GRADE_", grade.projection.sequence[i])]] if (!is.null(tmp.cutscores.by.grade)) { for (j in seq_along(tmp.cutscores.by.grade)) { cuts.arg[k] <- paste0(".sgp.targets(SS", ".", grade.projection.sequence.labels[i], ".", content_area.projection.sequence[i], ", ", tmp.cutscores.by.grade[j], ", ", convert.0and100, ")") if (projection.unit=="GRADE") { names.arg[k] <- paste0("LEVEL_", j, "_SGP_TARGET_", projection.unit.label, "_", grade.projection.sequence.labels[i], lag.increment.label) } else { names.arg[k] <- paste0("LEVEL_", j, "_SGP_TARGET_", projection.unit.label, "_", i, lag.increment.label) } k <- k+1L } } } arg <- paste0("list(", paste(cuts.arg, collapse=", "), ")") tmp.cuts.list[[n.state]] <- eval(parse(text=paste0("percentile.trajectories[which(", state.arg, "),", arg, ", by=ID]"))) setnames(tmp.cuts.list[[n.state]], c("ID", names.arg)) if (!is.na(states[n.state])) { tmp.cuts.list[[n.state]][,STATE:=states[n.state]] setcolorder(tmp.cuts.list[[n.state]], c("ID", "STATE", names.arg)) } } tmp.cuts <- rbindlist(tmp.cuts.list, fill=TRUE) if (dim(tmp.cuts)[1L]==0L) { if (!trajectories.tf) { return(NULL) } else { return(trajectories) } } else { setcolorder(tmp.cuts, names(tmp.cuts.list[[which.max(sapply(tmp.cuts.list, ncol))]])) setkey(tmp.cuts, ID) if (!trajectories.tf) { return(tmp.cuts) } else { return(merge(tmp.cuts, trajectories, all=TRUE)) } } } } ID <- tmp.messages <- SGP_PROJECTION_GROUP <- SGP_PROJECTION_GROUP_SCALE_SCORES <- SGP_PROJECTION_GROUP_DATES <- index <- NULL if (!calculate.sgps) { tmp.messages <- c(tmp.messages, paste("\t\tNOTE: Student growth projections not calculated for", sgp.labels$my.year, sgp.labels$my.subject, "due to argument calculate.sgps=FALSE.\n")) return(panel.data) } if (missing(panel.data)) { stop("User must supply student achievement data for student growth percentile calculations. See help page for details.") } if (!is.list(panel.data)) { stop("Supplied panel.data not of a supported class. See help for details of supported classes") } else { if (!(all(c("Panel_Data", "Coefficient_Matrices", "Knots_Boundaries") %in% names(panel.data)))) { stop("Supplied panel.data missing Panel_Data, Coefficient_Matrices, and/or Knots_Boundaries. See help page for details") } if (identical(class(panel.data[["Panel_Data"]]), "data.frame")) { panel.data[["Panel_Data"]] <- as.data.table(panel.data[["Panel_Data"]]) }} if (missing(sgp.labels)) { stop("User must supply a list of SGP function labels (sgp.labels). See help page for details.") } else { if (!is.list(sgp.labels)) { stop("Please specify an appropriate list of SGP function labels (sgp.labels). See help page for details.") } if (!all(names(sgp.labels) %in% c("my.year", "my.subject")) & !all(names(sgp.labels) %in% c("my.year", "my.subject", "my.grade")) & !all(names(sgp.labels) %in% c("my.year", "my.subject", "my.extra.label")) & !all(names(sgp.labels) %in% c("my.year", "my.subject", "my.grade", "my.extra.label"))) { stop("Please specify an appropriate list for sgp.labels. See help page for details.") } sgp.labels <- lapply(sgp.labels, toupper) tmp.path <- .create.path(sgp.labels) } if (missing(grade.progression)) { stop("User must supply a grade progression from which projections/trajectories will be derived. See help page for details.") } grade.progression <- as.character(grade.progression) if (!missing(use.my.knots.boundaries)) { if (!is.list(use.my.knots.boundaries) & !is.character(use.my.knots.boundaries)) { stop("use.my.knots.boundaries must be supplied as a list or character abbreviation. See help page for details.") } if (is.list(use.my.knots.boundaries)) { if (!is.list(panel.data)) { stop("use.my.knots.boundaries is only appropriate when panel data is of class list. See help page for details.") } if (!identical(names(use.my.knots.boundaries), c("my.year", "my.subject")) & !identical(names(use.my.knots.boundaries), c("my.year", "my.subject", "my.extra.label"))) { stop("Please specify an appropriate list for use.my.knots.boundaries. See help page for details.") } if (is.null(panel.data[["Knots_Boundaries"]]) | is.null(panel.data[["Knots_Boundaries"]][[.create.path(use.my.knots.boundaries, pieces=c("my.subject", "my.year"))]])) { stop("Knots and Boundaries indicated by use.my.knots.boundaries are not included.") } } if (is.character(use.my.knots.boundaries)) { if (!use.my.knots.boundaries %in% objects(SGP::SGPstateData)) { stop(paste0("Knots and Boundaries are currently not implemented for the state (", use.my.knots.boundaries, ") indicated. Please contact the SGP package administrator to have your Knots and Boundaries included in the package")) } } } if (!missing(use.my.coefficient.matrices) & is.null(content_area.projection.sequence)) { if (!is.list(use.my.coefficient.matrices)) { stop("Please specify an appropriate list for use.my.coefficient.matrices. See help page for details.") } if (!identical(names(use.my.coefficient.matrices), c("my.year", "my.subject")) & !identical(names(use.my.coefficient.matrices), c("my.year", "my.subject", "my.extra.label"))) { stop("Please specify an appropriate list for use.my.coefficient.matrices. See help page for details.") } tmp.path.coefficient.matrices <- .create.path(use.my.coefficient.matrices) if (is.null(panel.data[["Coefficient_Matrices"]]) | is.null(panel.data[["Coefficient_Matrices"]][[tmp.path.coefficient.matrices]])) { if (length(grep("BASELINE", sgp.labels[['my.extra.label']])) > 0 & !is.null(SGP::SGPstateData[[performance.level.cutscores]][["Baseline_splineMatrix"]])) { panel.data[["Coefficient_Matrices"]][[tmp.path.coefficient.matrices]] <- SGP::SGPstateData[[performance.level.cutscores]][["Baseline_splineMatrix"]][["Coefficient_Matrices"]] } else { messageSGP("\t\tNOTE: Coefficient matrices indicated by argument use.my.coefficient.matrices are not included.") return(NULL) } } } if (missing(use.my.coefficient.matrices) & is.null(content_area.projection.sequence)) { if (length(grep("BASELINE", sgp.labels[['my.extra.label']])) > 0) { tmp.path.coefficient.matrices <- paste(sgp.labels$my.subject, "BASELINE", sep=".") if (is.null(panel.data[["Coefficient_Matrices"]]) | is.null(panel.data[["Coefficient_Matrices"]][[tmp.path.coefficient.matrices]])) { stop(paste0("\t\tNOTE: Coefficient matrices indicated by argument sgp.labels, '", tmp.path.coefficient.matrices, "', are not included. Please check supplied list to make sure appropriate coefficient matrices are included.\n")) return(NULL) } } else { tmp.path.coefficient.matrices <- tmp.path if (is.null(panel.data[["Coefficient_Matrices"]]) | is.null(panel.data[["Coefficient_Matrices"]][[tmp.path.coefficient.matrices]])) { messageSGP(paste0("\t\tNOTE: Coefficient matrices indicated by argument sgp.labels, '", tmp.path.coefficient.matrices, "', are not included. Bypassing plot production.\n")) return(NULL) } } } if (!is.null(content_area.projection.sequence)) { if (length(grep("BASELINE", sgp.labels[['my.extra.label']])) > 0) { tmp.path.coefficient.matrices <- paste(unique(content_area.projection.sequence), "BASELINE", sep=".") } else { tmp.path.coefficient.matrices <- paste(unique(content_area.projection.sequence), sgp.labels$my.year, sep=".") } } if (!missing(performance.level.cutscores)) { if (is.character(performance.level.cutscores)) { if (!(performance.level.cutscores %in% objects(SGP::SGPstateData))) { tmp.messages <- c(tmp.messages, "\t\tNOTE: To use state cutscores, supply an appropriate two letter state abbreviation. \nRequested state may not be included. See help page for details.\n") tf.cutscores <- FALSE } if (is.null(names(SGP::SGPstateData[[performance.level.cutscores]][["Achievement"]][["Cutscores"]]))) { tmp.messages <- c(tmp.messages, "\t\tNOTE: Cutscores are currently not implemented for the state indicated.\n\t\t\tPlease contact the SGP package administrator to have your cutscores included in the package.\n") tf.cutscores <- FALSE } if (!sgp.labels$my.subject %in% unique(sapply(names(SGP::SGPstateData[[performance.level.cutscores]][["Achievement"]][["Cutscores"]]), function(x) strsplit(x, "[.]")[[1L]][1L], USE.NAMES=FALSE))) { tmp.messages <- c(tmp.messages, paste("\t\tNOTE: Cutscores provided in SGPstateData does not include", sgp.labels$my.subject, "(CASE SENSITIVE). See help page for details.\n")) tf.cutscores <- FALSE } else { tmp.cutscores <- SGP::SGPstateData[[performance.level.cutscores]][["Achievement"]][["Cutscores"]] if (!is.character(percentile.trajectory.values)) tf.cutscores <- TRUE else tf.cutscores <- FALSE }} if (is.list(performance.level.cutscores)) { if (any(names(performance.level.cutscores) %in% sgp.labels$my.subject)) { tmp.cutscores <- performance.level.cutscores tf.cutscores <- TRUE } else { stop("\nList of cutscores provided in performance.level.cutscores must include a subject name that matches my.subject in sgp.labels (CASE SENSITIVE). See help page for details.\n\n") tf.cutscores <- FALSE }}} else { tf.cutscores <- FALSE } if (!(toupper(projection.unit)=="YEAR" | toupper(projection.unit)=="GRADE")) { stop("Projection unit must be specified as either YEAR or GRADE. See help page for details.") } if (is.null(percentile.trajectory.values) & !tf.cutscores) { stop("\t\tNOTE: Either percentile trajectories and/or performance level cutscores must be supplied for the analyses.\n") } if (!is.null(percentile.trajectory.values) && is.numeric(percentile.trajectory.values) && !all(percentile.trajectory.values %in% seq.int(100))) { messageSGP("\t\tNOTE: Integer supplied 'percentile.trajectory.values' must be between 1 and 100. Only supplied values in that range will be used.\n") percentile.trajectory.values <- intersect(percentile.trajectory.values, seq.int(100)) } if (!is.null(percentile.trajectory.values) && is.character(percentile.trajectory.values) && !all(percentile.trajectory.values %in% names(panel.data[["Panel_Data"]]))) { messageSGP("\t\tNOTE: Character 'percentile.trajectory.values' must correspond to individual specific variable in panel.data[['Panel_Data']]. Please check for appropriate variables.\n") } if (!is.null(achievement.level.prior.vname)) { if (!achievement.level.prior.vname %in% names(panel.data[["Panel_Data"]])) { tmp.messages <- c(tmp.messages, "\t\tNOTE: Supplied achievement.level.prior.vname is not in supplied panel.data. No ACHIEVEMENT_LEVEL_PRIOR variable will be produced.\n") achievement.level.prior.vname <- NULL } } if (lag.increment==0) lag.increment.label <- "_CURRENT" else lag.increment.label <- "" if (!is.null(grade.projection.sequence) & !is.null(content_area.projection.sequence) && length(grade.projection.sequence) != length(content_area.projection.sequence)) { stop("\t\tNOTE: Supplied 'grade.projection.sequence' and 'content_area.projection.sequence' must be of the same length.\n") } if (!is.null(grade.projection.sequence) & !is.null(year_lags.projection.sequence) && length(grade.projection.sequence)-1L != length(year_lags.projection.sequence)) { stop("\t\tNOTE: Supplied 'year_lags.projection.sequence' must have length 1 less than 'grade.projection.sequence'.\n") } if (is.null(projcuts.digits)) { projcuts.digits <- 3L } if (is.null(projection.unit.label)) { projection.unit.label <- projection.unit } if (!is.null(return.projection.group.dates) && is.null(SGPt)) { return.projection.group.dates <- NULL } if (identical(return.projection.group.dates, TRUE)) { return.projection.group.dates <- "DATE[.]" } tmp.objects <- c("SGProjections", "Cutscores") for (i in tmp.objects) { if (!is.null(panel.data[[i]])) { assign(i, panel.data[[i]]) } else { assign(i, list()) } } if ((tf.cutscores || is.character(percentile.trajectory.values)) && exists("tmp.cutscores")) { Cutscores <- tmp.cutscores } if (dim(panel.data[['Panel_Data']])[1L] == 0L) { tmp.messages <- c(tmp.messages, "\t\tNOTE: Supplied data together with grade progression contains no data for analysis. Check data, function arguments and see help page for details.\n") messageSGP(paste("\tStarted studentGrowthProjections", started.date)) messageSGP(paste0("\t\tSubject: ", sgp.labels$my.subject, ", Year: ", sgp.labels$my.year, ", Grade Progression: ", paste(grade.progression, collapse=", "), " ", sgp.labels$my.extra.label)) messageSGP(c(tmp.messages, "\tFinished studentGrowthProjections: ", prettyDate(), " in ", convertTime(timetakenSGP(started.at)), "\n")) return( list(Coefficient_Matrices=panel.data[["Coefficient_Matrices"]], Cutscores=panel.data[["Cutscores"]], Goodness_of_Fit=panel.data[["Goodness_of_Fit"]], Knots_Boundaries=panel.data[["Knots_Boundaries"]], Panel_Data=NULL, SGPercentiles=panel.data[["SGPercentiles"]], SGProjections=panel.data[["SGProjections"]], Simulated_SGPs=panel.data[["Simulated_SGPs"]])) } if (!missing(panel.data.vnames)) { if (!all(panel.data.vnames %in% names(panel.data[["Panel_Data"]]))) { tmp.messages <- c(tmp.messages, "\t\tNOTE: Supplied 'panel.data.vnames' are not all in the supplied 'Panel_Data'.\n\t\t\tAnalyses will utilize the variables contained in both Panel_Data and those provided in the supplied argument 'panel.data.vnames'.\n") } ss.data <- panel.data[["Panel_Data"]][,intersect(panel.data.vnames, names(panel.data[["Panel_Data"]])), with=FALSE] } else { ss.data <- panel.data[["Panel_Data"]] } if (dim(ss.data)[2L] %% 2L != 1L) { stop(paste("Number of columns of supplied panel data (", dim(ss.data)[2L], ") does not conform to data requirements. See help page for details.")) } num.panels <- (dim(ss.data)[2L]-1L)/2L if (length(grade.progression) > num.panels) { tmp.messages <- c(tmp.messages, paste0("\t\tNOTE: Supplied 'grade progression', grade.progression=c(", paste(grade.progression, collapse=","), "), exceeds number of panels (", num.panels, ") in provided data.\n\t\t\tAnalyses will utilize maximum number of priors supplied by the data.\n")) grade.progression <- tail(grade.progression, num.panels) if (!is.null(content_area.progression)) content_area.progression <- tail(content_area.progression, length(grade.progression)) if (!is.null(year_lags.progression)) year_lags.progression <- tail(year_lags.progression, length(grade.progression)-1L) } if (!is.null(max.order.for.progression)) { grade.progression <- tail(grade.progression, max.order.for.progression) if (!is.null(content_area.progression)) content_area.progression <- tail(content_area.progression, length(grade.progression)) if (!is.null(year_lags.progression)) year_lags.progression <- tail(year_lags.progression, length(grade.progression)-1L) } tmp.last <- tail(grade.progression, 1L) ss.data <- data.table(ss.data[,c(1L, (1L+num.panels-(length(grade.progression)-1L)):(1L+num.panels), (1L+2L*num.panels-(length(grade.progression)-1L)):(1L+2L*num.panels)), with=FALSE], key=names(ss.data)[1L]) num.panels <- (dim(ss.data)[2L]-1L)/2L setnames(ss.data, c(1L, (1L+num.panels-length(grade.progression)+1L):(1L+num.panels), (1L+2L*num.panels-length(grade.progression)+1L):(1L+2L*num.panels)), c("ID", paste("GD", grade.progression, content_area.progression, sep="."), paste("SS", grade.progression, content_area.progression, sep="."))) if (is.logical(sgp.projections.use.only.complete.matrices)) { if (sgp.projections.use.only.complete.matrices) sgp.projections.use.only.complete.matrices <- NULL } if (is.null(sgp.projections.use.only.complete.matrices)) { if (any(is.na(match(tmp.path.coefficient.matrices, names(panel.data[["Coefficient_Matrices"]]))))) { tmp.fix.index <- which(is.na(match(tmp.path.coefficient.matrices, names(panel.data[["Coefficient_Matrices"]])))) if (length(grep("BASELINE", sgp.labels[['my.extra.label']])) > 0) { tmp.path.coefficient.matrices2 <- paste(unique(content_area.projection.sequence), sgp.labels$my.year, sep=".")[tmp.fix.index] } else { tmp.path.coefficient.matrices2 <- paste(unique(content_area.projection.sequence), "BASELINE", sep=".")[tmp.fix.index] } if (any(is.na(match(tmp.path.coefficient.matrices2, names(panel.data[["Coefficient_Matrices"]]))))) { tmp.messages <- c(tmp.messages, "\t\tNOTE: Requested grade & content area progression are missing one or more coefficient matrices.\n") messageSGP(paste("\tStarted studentGrowthProjections", started.date)) messageSGP(paste0("\t\tSubject: ", sgp.labels$my.subject, ", Year: ", sgp.labels$my.year, ", Grade Progression: ", paste(grade.progression, collapse=", "), " ", sgp.labels$my.extra.label, " ", return.projection.group.identifier)) messageSGP(paste(tmp.messages, "\tStudent Growth Projections NOT RUN", prettyDate(), "\n")) return( list(Coefficient_Matrices=panel.data[["Coefficient_Matrices"]], Cutscores=panel.data[["Cutscores"]], Goodness_of_Fit=panel.data[["Goodness_of_Fit"]], Knots_Boundaries=panel.data[["Knots_Boundaries"]], Panel_Data=NULL, SGPercentiles=panel.data[["SGPercentiles"]], SGProjections=panel.data[["SGProjections"]], Simulated_SGPs=panel.data[["Simulated_SGPs"]])) } else { if (length(grep("BASELINE", sgp.labels[['my.extra.label']])) > 0) { tmp.messages <- c(tmp.messages, paste0("\t\tNOTE: Not all CONTENT_AREA values in content_area.progression have associated BASELINE referenced coefficient matrices.\n\tCOHORT referenced matrices for missing content areas (", paste(gsub(paste0(".", sgp.labels$my.year), "", tmp.path.coefficient.matrices2), collapse=", "), ") have been found and will be used.\n\t\tPlease note the inconsistency and ensure this is correct!\n")) } else { tmp.messages <- c(tmp.messages, paste0("\t\tNOTE: Not all CONTENT_AREA values in content_area.progression have associated COHORT referenced coefficient matrices.\n\tBASELINE referenced matrices for missing content areas (", paste(gsub(".BASELINE", "", tmp.path.coefficient.matrices2), collapse=", "), ") have been found and will be used.\n\t\tPlease note the inconsistency and ensure this is correct!\n")) } } tmp.matrices <- unlist(panel.data[["Coefficient_Matrices"]][c(match(tmp.path.coefficient.matrices, names(panel.data[["Coefficient_Matrices"]])), match(tmp.path.coefficient.matrices2, names(panel.data[["Coefficient_Matrices"]])))], recursive=FALSE) } else { tmp.matrices <- unlist(panel.data[["Coefficient_Matrices"]][match(tmp.path.coefficient.matrices, names(panel.data[["Coefficient_Matrices"]]))], recursive=FALSE) } } else { if (any(is.na(match(tmp.path.coefficient.matrices, names(panel.data[["Coefficient_Matrices"]]))))) { tmp.fix.index <- which(is.na(match(tmp.path.coefficient.matrices, names(panel.data[["Coefficient_Matrices"]])))) tmp.messages <- c(tmp.messages, paste0("\t\tNOTE: Not all CONTENT_AREA values in content_area.progression have associated COHORT referenced coefficient matrices:\n\t\t", paste(tmp.path.coefficient.matrices[tmp.fix.index], collapse=", "), ".\n")) } tmp.match <- sort(match(tmp.path.coefficient.matrices, names(panel.data[["Coefficient_Matrices"]]))) tmp.matrices <- unlist(panel.data[["Coefficient_Matrices"]][tmp.match], recursive=FALSE) tmp.tf <- content_area.projection.sequence %in% unique(unlist(lapply(tmp.matrices, function(x) x@Content_Areas))) & grade.projection.sequence %in% unique(unlist(lapply(tmp.matrices, function(x) x@Grade_Progression))) grade.projection.sequence <- grade.projection.sequence[tmp.tf] content_area.projection.sequence <- content_area.projection.sequence[tmp.tf] } if (is.null(content_area.progression)) { content_area.progression <- rep(sgp.labels[['my.subject']], length(grade.progression)) } else { if (!identical(class(content_area.progression), "character")) { stop("content_area.progression should be a character vector. See help page for details.") } if (length(content_area.progression) != length(grade.progression)) { tmp.messages <- c(tmp.messages, "\t\tNOTE: The content_area.progression vector does not have the same number of elements as the grade.progression vector.\n") } } if (is.null(year_lags.progression)) { year_lags.progression <- rep(1L, length(grade.progression)-1L) } if (is.null(grade.projection.sequence)) { grade.projection.sequence <- as.character(unique(sort(type.convert(sapply(tmp.matrices, function(x) tail(slot(x, "Grade_Progression")[[1L]], 2L)), as.is=TRUE)))) } if (identical(grade.projection.sequence, numeric(0))) { stop("Supplied grade.progression and coefficient matrices do not allow projection. See help page for details.") } if (is.null(content_area.projection.sequence)) { content_area.projection.sequence <- rep(tail(content_area.progression, 1L), length(grade.projection.sequence)) } grade.content_area.progression <- paste(content_area.progression, paste("GRADE", grade.progression, sep="_"), sep=".") grade.content_area.projection.sequence <- paste(content_area.projection.sequence, paste("GRADE", grade.projection.sequence, sep="_"), sep=".") tmp.index <- seq(which(tail(grade.content_area.progression, 1L)==grade.content_area.projection.sequence)+1L, length(grade.projection.sequence)) if (!is.null(max.forward.progression.grade)) { tmp.index <- intersect(tmp.index, which(sapply(grade.projection.sequence, function(x) type.convert(x, as.is=TRUE) <= type.convert(as.character(max.forward.progression.grade), as.is=TRUE)))) } if (!is.null(max.forward.progression.years)) tmp.index <- head(tmp.index, max.forward.progression.years) grade.projection.sequence <- grade.projection.sequence[tmp.index] content_area.projection.sequence <- content_area.projection.sequence[tmp.index] if (is.null(year_lags.projection.sequence)) { if (is.numeric(type.convert(grade.projection.sequence))) { year_lags.projection.sequence <- diff(as.numeric(c(tail(grade.progression, 1L), grade.projection.sequence))) } else { year_lags.projection.sequence <- rep(1L, length(grade.projection.sequence)) } } else { year_lags.projection.sequence <- year_lags.projection.sequence[tmp.index-1L] } grade.content_area.projection.sequence <- grade.content_area.projection.sequence[tmp.index] if (dim(.get.panel.data(ss.data, grade.progression, content_area.progression, 1L, bound.data=FALSE))[1L] == 0L | length(tmp.index)==0L) { tmp.messages <- c(tmp.messages, "\t\tNOTE: Supplied data together with grade progression contains no data for analysis. Check data, function arguments and see help page for details.\n") messageSGP(paste("\tStarted studentGrowthProjections", started.date)) messageSGP(paste0("\t\tSubject: ", sgp.labels$my.subject, ", Year: ", sgp.labels$my.year, ", Grade Progression: ", paste(grade.progression, collapse=", "), " ", sgp.labels$my.extra.label)) messageSGP(c(tmp.messages, "\tFinished studentGrowthProjections: ", prettyDate(), " in ", convertTime(timetakenSGP(started.at)), "\n")) return( list(Coefficient_Matrices=panel.data[["Coefficient_Matrices"]], Cutscores=panel.data[["Cutscores"]], Goodness_of_Fit=panel.data[["Goodness_of_Fit"]], Knots_Boundaries=panel.data[["Knots_Boundaries"]], Panel_Data=NULL, SGPercentiles=panel.data[["SGPercentiles"]], SGProjections=panel.data[["SGProjections"]], Simulated_SGPs=panel.data[["Simulated_SGPs"]])) } grade.projection.sequence.matrices <- get.grade.projection.sequence.matrices( grade.progression, content_area.progression, year_lags.progression, grade.projection.sequence, content_area.projection.sequence, year_lags.projection.sequence, sgp.exact.grade.progression, SGPt) if (length(grade.projection.sequence.matrices[[1L]]) < 1L) { tmp.messages <- c(tmp.messages, "\t\tNOTE: Requested grade & content area progression are missing one or more coefficient matrices.\n") messageSGP(paste("\tStarted studentGrowthProjections", started.date)) messageSGP(paste0("\t\tSubject: ", sgp.labels$my.subject, ", Year: ", sgp.labels$my.year, ", Grade Progression: ", paste(grade.progression, collapse=", "), " ", sgp.labels$my.extra.label, " ", return.projection.group.identifier)) messageSGP(paste(tmp.messages, "\tStudent Growth Projections NOT RUN", prettyDate(), "\n")) return( list(Coefficient_Matrices=panel.data[["Coefficient_Matrices"]], Cutscores=panel.data[["Cutscores"]], Goodness_of_Fit=panel.data[["Goodness_of_Fit"]], Knots_Boundaries=panel.data[["Knots_Boundaries"]], Panel_Data=NULL, SGPercentiles=panel.data[["SGPercentiles"]], SGProjections=panel.data[["SGProjections"]], Simulated_SGPs=panel.data[["Simulated_SGPs"]])) } num.matrices <- max(sapply(grade.projection.sequence.matrices, length)) if (num.matrices < length(grade.projection.sequence)) { if (is.null(sgp.projections.use.only.complete.matrices)) { tmp.messages <- c(tmp.messages, "\t\tNOTE: Requested grade & content area progression are missing one or more coefficient matrices.\n") messageSGP(paste("\tStarted studentGrowthProjections", started.date)) messageSGP(paste0("\t\tSubject: ", sgp.labels$my.subject, ", Year: ", sgp.labels$my.year, ", Grade Progression: ", paste(grade.progression, collapse=", "), " ", sgp.labels$my.extra.label, " ", return.projection.group.identifier)) messageSGP(paste(tmp.messages, "\tStudent Growth Projections NOT RUN", prettyDate(), "\n")) return( list(Coefficient_Matrices=panel.data[["Coefficient_Matrices"]], Cutscores=panel.data[["Cutscores"]], Goodness_of_Fit=panel.data[["Goodness_of_Fit"]], Knots_Boundaries=panel.data[["Knots_Boundaries"]], Panel_Data=NULL, SGPercentiles=panel.data[["SGPercentiles"]], SGProjections=panel.data[["SGProjections"]], Simulated_SGPs=panel.data[["Simulated_SGPs"]])) } tmp.matrices.tf <- content_area.projection.sequence %in% names(grade.projection.sequence.matrices[[1L]]) tmp.messages <- c(tmp.messages, paste0("\t\tNOTE: Not all CONTENT_AREA values in content_area.progression have the appropriate coefficient matrices - MISSING:\n\t\t\t", paste(content_area.projection.sequence[!tmp.matrices.tf], collapse=", "), ".\n")) grade.projection.sequence <- grade.projection.sequence[tmp.matrices.tf] content_area.projection.sequence <- content_area.projection.sequence[tmp.matrices.tf] grade.content_area.projection.sequence <- grade.content_area.projection.sequence[tmp.matrices.tf] } if (dim(ss.data)[1L]/trajectories.chunk.size > 1.5) { percentile.trajectories <- rbindlist(lapply(.get.trajectory.chunks(seq.int(dim(ss.data)[1L])), function(index) .get.percentile.trajectories(ss.data[index], grade.projection.sequence.matrices))) } else { percentile.trajectories <- .get.percentile.trajectories(ss.data, grade.projection.sequence.matrices) } if (tf.cutscores) { tmp.cutscore.grade.content_area <- unlist(lapply(seq_along(tmp.cutscores), function(x) paste(unlist(strsplit(names(tmp.cutscores)[x], '[.]'))[1L], names(tmp.cutscores[[x]]), sep="."))) if ("STATE" %in% names(panel.data[["Panel_Data"]])) { included.states <- unique(panel.data[["Panel_Data"]][['STATE']]); state.arg <- "STATE == states[n.state]" content_area.index <- grep(sgp.labels$my.subject, sapply(names(SGP::SGPstateData[[performance.level.cutscores]][["Achievement"]][["Cutscores"]]), function(x) strsplit(x, "[.]")[[1L]][1L], USE.NAMES=FALSE)) available.states <- unique(sapply(names(SGP::SGPstateData[[performance.level.cutscores]][["Achievement"]][["Cutscores"]]), function(x) strsplit(x, "[.]")[[1L]][2L], USE.NAMES=FALSE)[content_area.index]) unavailable.states <- included.states[!included.states %in% available.states] if (length(unavailable.states) > 0L) { tmp.messages <- c(tmp.messages, paste("\t\tNOTE: The required state specific cutscores for ", sgp.labels$my.subject, " provided in SGPstateData do not include:\n\t\t\t", paste(unavailable.states[order(unavailable.states)], collapse = ", "), ".\n\t\t\tTarget projections will not be produced for students in these states.\n", sep = "")) } tmp.grade.content_area.projection.sequence <- sapply(available.states, function(x) paste(content_area.projection.sequence, x, paste("GRADE", grade.projection.sequence, sep="_"), sep=".")) if (!all(tmp.grade.content_area.projection.sequence %in% tmp.cutscore.grade.content_area)) { tmp.messages <- c(tmp.messages, "\t\tNOTE: Cutscores provided do not include cutscores for all grades/content areas in projection.\n\t\tProjections to grades/content areas without cutscores will be missing.\n") } } else { if (!all(grade.content_area.projection.sequence %in% tmp.cutscore.grade.content_area)) { tmp.messages <- c(tmp.messages, "\t\tNOTE: Cutscores provided do not include cutscores for all grades/content areas in projection.\n\t\tProjections to grades/content areas without cutscores will be missing.\n") }}} trajectories.and.cuts <- .get.trajectories.and.cuts(percentile.trajectories, !is.null(percentile.trajectory.values), tf.cutscores, toupper(projection.unit)) if (!is.null(achievement.level.prior.vname)) { trajectories.and.cuts <- panel.data[["Panel_Data"]][,c("ID", achievement.level.prior.vname), with=FALSE][trajectories.and.cuts, on="ID"] setnames(trajectories.and.cuts, achievement.level.prior.vname, "ACHIEVEMENT_LEVEL_PRIOR") } if (!is.null(return.percentile.trajectory.values) && percentile.trajectory.values %in% names(panel.data$Panel_Data)) { trajectories.and.cuts <- panel.data[["Panel_Data"]][,c("ID", percentile.trajectory.values), with=FALSE][trajectories.and.cuts, on="ID"] } if (!is.null(return.projection.group.identifier)) { trajectories.and.cuts[,SGP_PROJECTION_GROUP:=return.projection.group.identifier] } if (!is.null(return.projection.group.scale.scores)) { my.tmp <- ss.data[,c("ID", grep("SS[.]", names(ss.data), value=TRUE)), with=FALSE][list(trajectories.and.cuts$ID),-1L,with=FALSE,on="ID"] trajectories.and.cuts[,SGP_PROJECTION_GROUP_SCALE_SCORES:=gsub("NA; ", "", do.call(paste, c(my.tmp, list(sep="; "))))] } if (!is.null(return.projection.group.dates)) { my.tmp <- panel.data$Panel_Data[,c("ID", grep(return.projection.group.dates, names(panel.data$Panel_Data), value=TRUE)), with=FALSE][list(trajectories.and.cuts$ID),-1L,with=FALSE,on="ID"] trajectories.and.cuts[,SGP_PROJECTION_GROUP_DATES:=gsub("NA; ", "", do.call(paste, c(my.tmp, list(sep="; "))))] } if ("YEAR_WITHIN" %in% names(panel.data[["Panel_Data"]])) { trajectories.and.cuts <- panel.data[["Panel_Data"]][,c("ID", "YEAR_WITHIN"), with=FALSE][trajectories.and.cuts, on="ID"] } if (!is.null(sgp.labels$my.grade)) { trajectories.and.cuts[, GRADE := sgp.labels$my.grade] } SGProjections[[tmp.path]] <- rbindlist(list(SGProjections[[tmp.path]], trajectories.and.cuts), fill=TRUE) if (print.time.taken) { messageSGP(paste("\tStarted studentGrowthProjections:", started.date)) messageSGP(paste0("\t\tContent Area: ", sgp.labels$my.subject, ", Year: ", sgp.labels$my.year, ", Grade Progression: ", paste(grade.progression, collapse=", "), " ", sgp.labels$my.extra.label, " (N=", format(dim(trajectories.and.cuts)[1L], big.mark=","), ")")) messageSGP(c(tmp.messages, "\tFinished studentGrowthProjections: ", prettyDate(), " in ", convertTime(timetakenSGP(started.at)), "\n")) } list(Coefficient_Matrices=panel.data[["Coefficient_Matrices"]], Cutscores=Cutscores, Goodness_of_Fit=panel.data[["Goodness_of_Fit"]], Knots_Boundaries=panel.data[["Knots_Boundaries"]], Panel_Data=panel.data[["Panel_Data"]], SGPercentiles=panel.data[["SGPercentiles"]], SGProjections=SGProjections, Simulated_SGPs=panel.data[["Simulated_SGPs"]]) }
bw.dnrd <- function(y,sig,error='normal') { homo = FALSE; sig = sig^2; if(length(sig)==1) homo=TRUE; if(!homo){ if(length(y)!=length(sig))stop("Different length of 'y' and the variances."); if(any(is.na(y))||any(is.na(sig))){ sele = (!is.na(y))&(!is.na(sig)); y=y[sele];sig=sig[sele]; } s2bar = mean(sig);sbar=sqrt(s2bar); } else{ if(is.na(sig)||is.null(sig)) stop("SD(s) can not be empty!"); s2bar=sig;sbar=sqrt(s2bar);} if(length(y)<3){stop("Data set is too small!");} else{n=length(y);} result= switch(substr(tolower(error),1,3), lap = (5*sbar^4/n)^(1/9), nor = sbar*(log(n)/2)^{-.5}, stop("This error type is not supported yet!") ); return(result); }
NAM <- function(level){ x <- NULL if(level==1){ x1 <- github.cssegisanddata.covid19(country = "Namibia") x2 <- ourworldindata.org(id = "NAM") x <- full_join(x1, x2, by = "date") } return(x) }
bondpv <- function(coupon, mat, yield, principal=1000, freq=1) { couponpd <- coupon/freq numpd <- mat*freq yieldpd <- yield/freq cf <- c(rep(couponpd, numpd-1), principal + couponpd) pv <- sum(cf*(1+yieldpd)^(-(1:numpd))) return(pv) } bondyield <- function(price, coupon, mat, principal=1000, freq=1) { pricediff <- function(x) bondpv(coupon, mat, x, principal, freq) - price irr <- uniroot(pricediff, c(-.25, 20), tol=1e-08) return(irr$root) } duration <- function(price, coupon, mat, principal=1000, freq=1, modified=FALSE) { yield <- bondyield(price, coupon, mat, principal, freq) yieldpd <- yield/freq couponpd <- coupon/freq numpd <- mat*freq cf <- c(rep(couponpd, numpd-1), principal+couponpd) dur <- sum((1:(numpd))*cf/(1+yieldpd)^(1:(numpd)))/ price/freq/(1+yieldpd*modified) return(dur) } convexity <- function(price, coupon, mat, principal=1000, freq=1) { numpd <- mat*freq couponpd <- coupon/freq yield <- bondyield(price, coupon, mat, principal, freq) yieldpd <- yield/freq cf <- c(rep(couponpd, (numpd)-1), principal+couponpd) pv <- sum((1:numpd + (1:numpd)^2)*cf/(1+yieldpd)^(1:numpd)) conv <- pv/price/(1+yieldpd)^2/freq^2 return(conv) }
.representationPlot<-function(data,period,tm,varNames){ plotW<-tktoplevel() tktitle(plotW)<-tm$titlePlotWindow selectionFrame<-tkwidget(plotW,"labelframe",borderwidth = 0) tkgrid(selectionFrame,column=0, row=0) varList<-tk2listbox(selectionFrame, height=20, selectmode="single", background="white") for(i in 1:length(varNames)){ tkinsert(varList,"end",varNames[i]) } tkselection.set(varList, 0) tkgrid(varList, row=0,column=0) displayButton<-tk2button(selectionFrame,text=tm$displayButtonLabel,command=function(){ if (tclvalue(tkcurselection(varList))!="") { selection<-as.numeric(tkcurselection(varList))+1 tempSelect<-varNames[selection] myPlot<-tkrplot(plotW,hscale=2.5,vscale=2,function()plot(data[,tempSelect]~chron(period),ylab="Level",xlab="Date",main=tempSelect)) tkgrid(myPlot,row=0,column=1,sticky="w") tkconfigure(myPlot, bg="white") } else { tkmessageBox(message=tm$noParamMsg,type="ok",icon="info", title=tm$warningLabel) } }) tkgrid(displayButton,row=1,column=0) } .savePlot<-function(data,period,tm,output){ for(i in 1:ncol(data)){ figure<-paste(output,colnames(data)[i],".jpg",sep="") jpeg(figure,750,600) plot(data[,i]~chron(period),xlab="Dates",ylab="Level",main=colnames(data)[i]) dev.off() figure<-paste(output,"boxplot_",colnames(data)[i],".jpg",sep="") jpeg(figure,750,600) boxplot(data[,i],main=colnames(data)[i]) dev.off() } } .boxplotWindow<-function(data,varNames,tm){ boxplotWin<-tktoplevel() tktitle(boxplotWin)<-tm$titleBoxplotWindow selectionFrame<-tkwidget(boxplotWin,"labelframe",borderwidth = 0) tkgrid(selectionFrame,column=0, row=0) varList<-tk2listbox(selectionFrame, height=20, selectmode="single", background="white") for(i in 1:length(varNames)){tkinsert(varList,"end",varNames[i])} tkselection.set(varList, 0) tkgrid(varList, row=0,column=0) displayButton<-tk2button(selectionFrame,text=tm$displayButtonLabel,command=function(){ if (tclvalue(tkcurselection(varList))!="") { selection<-as.numeric(tkcurselection(varList))+1 tempVarNames<-varNames[selection] myBoxplot<-tkrplot(boxplotWin,hscale=2.5,vscale=2,function(){boxplot(data[,tempVarNames],main=tempVarNames)}) tkgrid(myBoxplot,row=0,column=1) tkconfigure(myBoxplot, bg="white") } else { tkmessageBox(message=tm$noParamMsg,type="ok",icon="info", title=tm$warningLabel) } }) tkgrid(displayButton,row=1,column=0) } .PCAwindow<-function(data,quanti.sup,tm){ fenetrePlotACP<-tktoplevel() tktitle(fenetrePlotACP)<-tm$titlePcaCircle ToRemove<-apply(is.na(data),MARGIN=1, FUN=any); data <- data[!ToRemove,] if (is.null(quanti.sup)){ PCAdudi<-PCA(X=data,scale.unit = TRUE,ncp=ncol(data),graph=FALSE) }else{ PCAdudi<-PCA(X=data,quanti.sup=quanti.sup,scale.unit = TRUE,ncp=ncol(data),graph=FALSE) } monACP<-tkrplot(fenetrePlotACP,hscale=2.5,vscale=2,function()plot(PCAdudi, axes = c(1, 2), choix = "var",cex.axis=1.5,cex.lab=1.5,cex=1.5)) tkgrid(monACP,row=0,sticky="w") tkconfigure(monACP, bg="white") textblanc<-tklabel(fenetrePlotACP,text=" ") tkgrid(textblanc,row=1, sticky="w") dimensionFrame <- tkwidget(fenetrePlotACP,"labelframe",borderwidth = 0) tkgrid(dimensionFrame , row=2) textDim1 <-tklabel(dimensionFrame ,text="x : Dim ") tkgrid(textDim1 ,row=0, column=0) laDim1<-tkentry(dimensionFrame, width=3, textvariable=tclVar("1")) tkgrid(laDim1, row=0, column=1, sticky="w") textDim1 <-tklabel(dimensionFrame ,text=" ") tkgrid(textDim1 ,row=0, column=2) textDim2 <-tklabel(dimensionFrame ,text="y : Dim ") tkgrid(textDim2 ,row=0, column=3) laDim2<-tkentry(dimensionFrame, width=3, textvariable=tclVar("2")) tkgrid(laDim2, row=0, column=4, sticky="w") textDim1 <-tklabel(dimensionFrame ,text=" ") tkgrid(textDim1 ,row=0, column=5) boutonVisualiser<-tk2button(dimensionFrame,text=tm$displayButtonLabel,command=function(){ Dim1Ecrit<-as.numeric(tclvalue(tkget(laDim1))); Dim2Ecrit<-as.numeric(tclvalue(tkget(laDim2))); if(Dim1Ecrit>0){ if(Dim2Ecrit>0){ if(Dim1Ecrit<(ncol(data)+1)){ if(Dim2Ecrit<(ncol(data)+1)){ monACP<-tkrplot(fenetrePlotACP,hscale=2.5,vscale=2,function()plot(PCAdudi, axes = c(Dim1Ecrit, Dim2Ecrit), choix = "var",cex.axis=1.5,cex.lab=1.5,cex=1.5)) tkgrid(monACP,row=0,sticky="w") tkconfigure(monACP, bg="white") } else{ tkmessageBox(message=paste(tm$dimensionWarning,ncol(data),"!",sep=""),type="ok",icon="info", title=tm$warningLabel) } } else{ tkmessageBox(message=paste(tm$dimensionWarning,ncol(data),"!",sep=""),type="ok",icon="info", title=tm$warningLabel) } } else{ tkmessageBox(message=paste(tm$dimensionWarning,ncol(data),"!",sep=""),type="ok",icon="info", title=tm$warningLabel) } } else{ tkmessageBox(message=paste(tm$dimensionWarning,ncol(data),"!",sep=""),type="ok",icon="info", title=tm$warningLabel) } }) tkgrid(boutonVisualiser,row=0,column=6) return(PCAdudi) }
plotNominalFittedVariable <- function(nameVar,numcateg,beta,varstudyC,rowCoords,levelsVar=NULL,numFactors=2,planex = 1,planey = 2,xi=-3.5,xu=3.5,yi=-3.5,yu=3.5, CexVar=0.7,ColorVar="blue",PchVar=0.7,addToPlot=FALSE,QuitNotPredicted=TRUE,ShowResults=TRUE, linesVoronoi=TRUE,LabelVar=TRUE){ mvv = mvvSingleVariable(nameVar,numcateg,beta,varstudyC,rowCoords,planex,planey,numFactors, QuitNotPredicted,ShowResults) AtLeastR2 = 0.01 LabValVar=levelsVar if(addToPlot == FALSE){ dev.new() plot(0, 0, cex = 0,asp=1, xaxt = "s", yaxt = "s" ,xlim=c(xi,xu),ylim=c(yi,yu), main="Nominal Logistic Biplot", xlab=paste("Axis ",planex,sep=""), ylab=paste("Axis ",planey,sep="")) } if(mvv$numFit==1){ Barx=sum(rowCoords[,1])/nrow(rowCoords) Bary=sum(rowCoords[,2])/nrow(rowCoords) points(Barx,Bary,pch=PchVar,cex=CexVar,col=ColorVar) text(Barx,Bary, paste(mvv$equivFit,"_",nameVar ,sep="") , col = ColorVar, cex = CexVar,pos=1,offset=0.1) }else if(mvv$numFit==2){ if(numcateg == 2){ x = cbind(rowCoords[,1],rowCoords[,2]) plot2CategLine(mvv,x,AtLeastR2,line=linesVoronoi,LabelVar=LabelVar,CexVar=CexVar,ColorVar=ColorVar,PchVar=PchVar,LabValVar=LabValVar) }else if(QuitNotPredicted == TRUE){ x = cbind(rowCoords[,1],rowCoords[,2]) plot2CategLine(mvv,x,AtLeastR2,line=linesVoronoi,LabelVar=LabelVar,CexVar=CexVar,ColorVar=ColorVar,PchVar=PchVar,LabValVar=LabValVar) }else{ plot.voronoiprob(mvv,LabelVar=LabelVar,CexVar=CexVar,ColorVar=ColorVar,PchVar=PchVar,AtLeastR2=AtLeastR2,lines=linesVoronoi,LabValVar=LabValVar) } }else{ plot.voronoiprob(mvv,LabelVar=LabelVar,CexVar=CexVar,ColorVar=ColorVar,PchVar=PchVar,AtLeastR2=AtLeastR2,lines=linesVoronoi,LabValVar=LabValVar) } }
makeListInstance<-function( object ,targetClassName ,targetListClassName ,permittedValueClassName ,key_value_func ){ if(allElementsAreOfClass(object,targetClassName)){ return(as(object,targetListClassName)) }else{ if(allElementsAreOfClass(object,permittedValueClassName)){ keys=names(object) l=lapply( keys ,function(key){ value=object[[key]] key_value_func(key,value) } ) return(as(l,targetListClassName)) }else{ stop( paste( 'The list must contain either instances of class ' ,targetClassName ,'or must have values of class:', ,permittedValueClassName ,"but I got:" ,object ) ) } } }
continuous_summary <- function(variable) { variable_to_summarize <- variable function(data, stat_display, ...) { summarize_continuous( data = data, variable = variable_to_summarize, by = NULL, stat_display = stat_display, summary_type = "continuous" ) %>% dplyr::select(-.data$variable, -.data$stat_display) } } ratio_summary <- function(numerator, denominator, na.rm = TRUE, conf.level = 0.95) { function(data, ...) { num <- sum(data[[numerator]], na.rm = na.rm) denom <- sum(data[[denominator]], na.rm = na.rm) ratio <- num / denom if (num %% 1 == 0) { ci_poisson <- stats::poisson.test(num, denom, conf.level = conf.level)$conf.int } else { ci_poisson <- c(NA, NA) } dplyr::tibble( num = num, denom = denom, ratio = ratio, conf.low = ci_poisson[1], conf.high = ci_poisson[2] ) } } proportion_summary <- function(variable, value, weights = NULL, na.rm = TRUE, conf.level = 0.95, method = c("wilson", "wilson.no.correct", "exact", "asymptotic")) { method <- match.arg(method) variable_to_summarize <- variable function(data, ...) { if (is.null(weights)) { n <- sum(data[[variable_to_summarize]] %in% value, na.rm = na.rm) N <- sum(!is.na(data[[variable_to_summarize]]), na.rm = na.rm) } else { n <- sum((data[[variable_to_summarize]] %in% value) * data[[weights]], na.rm = na.rm) N <- sum((!is.na(data[[variable_to_summarize]])) * data[[weights]], na.rm = na.rm) } if (anyNA(n, N)) { ci <- c(NA, NA) } else { if (method %in% c("wilson", "wilson.no.correct")) { ci <- stats::prop.test(n, N, conf.level = conf.level, correct = isTRUE(method == "wilson")) %>% purrr::pluck("conf.int") } else if (method %in% c("exact", "asymptotic")) { assert_package("Hmisc", fn = 'proportion_summary(method = c("exact", "asymptotic"))') ci <- Hmisc::binconf(n, N, method = method, alpha = 1 - conf.level)[2:3] } } dplyr::tibble( n = n, N = N, prop = n/N, conf.low = ci[1], conf.high = ci[2] ) } }
metacont <- function(n.e, mean.e, sd.e, n.c, mean.c, sd.c, studlab, data = NULL, subset = NULL, exclude = NULL, id = NULL, median.e, q1.e, q3.e, min.e, max.e, median.c, q1.c, q3.c, min.c, max.c, method.mean = "Luo", method.sd = "Shi", approx.mean.e, approx.mean.c = approx.mean.e, approx.sd.e, approx.sd.c = approx.sd.e, sm = gs("smcont"), pooledvar = gs("pooledvar"), method.smd = gs("method.smd"), sd.glass = gs("sd.glass"), exact.smd = gs("exact.smd"), method.ci = gs("method.ci.cont"), level = gs("level"), level.ma = gs("level.ma"), fixed = gs("fixed"), random = gs("random") | !is.null(tau.preset), overall = fixed | random, overall.hetstat = fixed | random, hakn = gs("hakn"), adhoc.hakn = gs("adhoc.hakn"), method.tau = gs("method.tau"), method.tau.ci = gs("method.tau.ci"), tau.preset = NULL, TE.tau = NULL, tau.common = gs("tau.common"), prediction = gs("prediction"), level.predict = gs("level.predict"), method.bias = gs("method.bias"), backtransf = gs("backtransf"), text.fixed = gs("text.fixed"), text.random = gs("text.random"), text.predict = gs("text.predict"), text.w.fixed = gs("text.w.fixed"), text.w.random = gs("text.w.random"), title = gs("title"), complab = gs("complab"), outclab = "", label.e = gs("label.e"), label.c = gs("label.c"), label.left = gs("label.left"), label.right = gs("label.right"), subgroup, subgroup.name = NULL, print.subgroup.name = gs("print.subgroup.name"), sep.subgroup = gs("sep.subgroup"), test.subgroup = gs("test.subgroup"), byvar, keepdata = gs("keepdata"), warn = gs("warn"), warn.deprecated = gs("warn.deprecated"), control = NULL, ...) { chknull(sm) sm <- setchar(sm, gs("sm4cont")) chklevel(level) chklogical(hakn) adhoc.hakn <- setchar(adhoc.hakn, gs("adhoc4hakn")) method.tau <- setchar(method.tau, gs("meth4tau")) missing.id <- missing(id) if (is.null(method.tau.ci)) if (method.tau == "DL") method.tau.ci <- "J" else if (!missing.id) method.tau.ci <- "PL" else method.tau.ci <- "QP" method.tau.ci <- setchar(method.tau.ci, gs("meth4tau.ci")) chklogical(tau.common) chklogical(prediction) chklevel(level.predict) method.bias <- setmethodbias(method.bias, c(1:3, if (sm == "SMD") 8)) if (!is.null(text.fixed)) chkchar(text.fixed, length = 1) if (!is.null(text.random)) chkchar(text.random, length = 1) if (!is.null(text.predict)) chkchar(text.predict, length = 1) if (!is.null(text.w.fixed)) chkchar(text.w.fixed, length = 1) if (!is.null(text.w.random)) chkchar(text.w.random, length = 1) chklogical(keepdata) fun <- "metacont" if (sm != "MD") method.ci <- "z" method.ci <- setchar(method.ci, gs("ci4cont")) method.mean <- setchar(method.mean, c("Luo", "Wan")) method.sd <- setchar(method.sd, c("Shi", "Wan")) chklogical(pooledvar) method.smd <- setchar(method.smd, c("Hedges", "Cohen", "Glass")) sd.glass <- setchar(sd.glass, c("control", "experimental")) chklogical(warn) args <- list(...) chklogical(warn.deprecated) level.ma <- deprecated(level.ma, missing(level.ma), args, "level.comb", warn.deprecated) chklevel(level.ma) fixed <- deprecated(fixed, missing(fixed), args, "comb.fixed", warn.deprecated) chklogical(fixed) random <- deprecated(random, missing(random), args, "comb.random", warn.deprecated) chklogical(random) missing.subgroup.name <- missing(subgroup.name) subgroup.name <- deprecated(subgroup.name, missing.subgroup.name, args, "bylab", warn.deprecated) print.subgroup.name <- deprecated(print.subgroup.name, missing(print.subgroup.name), args, "print.byvar", warn.deprecated) print.subgroup.name <- replaceNULL(print.subgroup.name, FALSE) chklogical(print.subgroup.name) sep.subgroup <- deprecated(sep.subgroup, missing(sep.subgroup), args, "byseparator", warn.deprecated) if (!is.null(sep.subgroup)) chkchar(sep.subgroup, length = 1) chklogical(overall) chklogical(overall.hetstat) nulldata <- is.null(data) if (nulldata) data <- sys.frame(sys.parent()) mf <- match.call() missing.mean.e <- missing(mean.e) missing.sd.e <- missing(sd.e) missing.mean.c <- missing(mean.c) missing.sd.c <- missing(sd.c) missing.median.e <- missing(median.e) missing.q1.e <- missing(q1.e) missing.q3.e <- missing(q3.e) missing.min.e <- missing(min.e) missing.max.e <- missing(max.e) missing.median.c <- missing(median.c) missing.q1.c <- missing(q1.c) missing.q3.c <- missing(q3.c) missing.min.c <- missing(min.c) missing.max.c <- missing(max.c) if (!missing.id & is.null(id)) missing.id <- TRUE if (missing.mean.e & missing.median.e) stop("Provide either argument 'mean.e' or 'median.e'.", call. = FALSE) if (missing.mean.c & missing.median.c) stop("Provide either argument 'mean.c' or 'median.c'.", call. = FALSE) if (missing.sd.e & !((!missing.q1.e & !missing.q3.e) | (!missing.min.e & !missing.max.e))) stop("Provide either argument 'sd.e' and ", "arguments 'q1.e' & 'q3.e' or 'min.e & 'max.e'.", call. = FALSE) if (missing.sd.c & !((!missing.q1.c & !missing.q3.c) | (!missing.min.c & !missing.max.c))) stop("Provide either argument 'sd.c' and ", "arguments 'q1.c' & 'q3.c' or 'min.c & 'max.c'.", call. = FALSE) n.e <- eval(mf[[match("n.e", names(mf))]], data, enclos = sys.frame(sys.parent())) chknull(n.e) k.All <- length(n.e) mean.e <- eval(mf[[match("mean.e", names(mf))]], data, enclos = sys.frame(sys.parent())) if (!missing.mean.e) chknull(mean.e) else mean.e <- rep(NA, k.All) sd.e <- eval(mf[[match("sd.e", names(mf))]], data, enclos = sys.frame(sys.parent())) if (!missing.sd.e) chknull(sd.e) else sd.e <- rep(NA, k.All) n.c <- eval(mf[[match("n.c", names(mf))]], data, enclos = sys.frame(sys.parent())) chknull(n.c) mean.c <- eval(mf[[match("mean.c", names(mf))]], data, enclos = sys.frame(sys.parent())) if (!missing.mean.c) chknull(mean.c) else mean.c <- rep(NA, k.All) sd.c <- eval(mf[[match("sd.c", names(mf))]], data, enclos = sys.frame(sys.parent())) if (!missing.sd.c) chknull(sd.c) else sd.c <- rep(NA, k.All) id <- eval(mf[[match("id", names(mf))]], data, enclos = sys.frame(sys.parent())) studlab <- eval(mf[[match("studlab", names(mf))]], data, enclos = sys.frame(sys.parent())) studlab <- setstudlab(studlab, k.All) missing.subgroup <- missing(subgroup) subgroup <- eval(mf[[match("subgroup", names(mf))]], data, enclos = sys.frame(sys.parent())) missing.byvar <- missing(byvar) byvar <- eval(mf[[match("byvar", names(mf))]], data, enclos = sys.frame(sys.parent())) subgroup <- deprecated2(subgroup, missing.subgroup, byvar, missing.byvar, warn.deprecated) by <- !is.null(subgroup) subset <- eval(mf[[match("subset", names(mf))]], data, enclos = sys.frame(sys.parent())) missing.subset <- is.null(subset) exclude <- eval(mf[[match("exclude", names(mf))]], data, enclos = sys.frame(sys.parent())) missing.exclude <- is.null(exclude) median.e <- eval(mf[[match("median.e", names(mf))]], data, enclos = sys.frame(sys.parent())) q1.e <- eval(mf[[match("q1.e", names(mf))]], data, enclos = sys.frame(sys.parent())) q3.e <- eval(mf[[match("q3.e", names(mf))]], data, enclos = sys.frame(sys.parent())) min.e <- eval(mf[[match("min.e", names(mf))]], data, enclos = sys.frame(sys.parent())) max.e <- eval(mf[[match("max.e", names(mf))]], data, enclos = sys.frame(sys.parent())) median.c <- eval(mf[[match("median.c", names(mf))]], data, enclos = sys.frame(sys.parent())) q1.c <- eval(mf[[match("q1.c", names(mf))]], data, enclos = sys.frame(sys.parent())) q3.c <- eval(mf[[match("q3.c", names(mf))]], data, enclos = sys.frame(sys.parent())) min.c <- eval(mf[[match("min.c", names(mf))]], data, enclos = sys.frame(sys.parent())) max.c <- eval(mf[[match("max.c", names(mf))]], data, enclos = sys.frame(sys.parent())) missing.approx.mean.e <- missing(approx.mean.e) approx.mean.e <- eval(mf[[match("approx.mean.e", names(mf))]], data, enclos = sys.frame(sys.parent())) if (!missing.approx.mean.e) missing.approx.mean.c <- FALSE else missing.approx.mean.c <- missing(approx.mean.c) approx.mean.c <- eval(mf[[match("approx.mean.c", names(mf))]], data, enclos = sys.frame(sys.parent())) missing.approx.sd.e <- missing(approx.sd.e) approx.sd.e <- eval(mf[[match("approx.sd.e", names(mf))]], data, enclos = sys.frame(sys.parent())) if (!missing.approx.sd.e) missing.approx.sd.c <- FALSE else missing.approx.sd.c <- missing(approx.sd.c) approx.sd.c <- eval(mf[[match("approx.sd.c", names(mf))]], data, enclos = sys.frame(sys.parent())) if (!by & tau.common) { warning("Value for argument 'tau.common' set to FALSE as ", "argument 'subgroup' is missing.") tau.common <- FALSE } if (by & !tau.common & !is.null(tau.preset)) { warning("Argument 'tau.common' set to TRUE as ", "argument tau.preset is not NULL.") tau.common <- TRUE } arg <- "n.e" chklength(mean.e, k.All, arg) chklength(sd.e, k.All, arg) chklength(n.c, k.All, arg) chklength(mean.c, k.All, arg) chklength(sd.c, k.All, arg) chklength(studlab, k.All, arg) if (!missing.id) chklength(id, k.All, arg) if (!missing.median.e) chklength(median.e, k.All, arg) if (!missing.q1.e) chklength(q1.e, k.All, arg) if (!missing.q3.e) chklength(q3.e, k.All, arg) if (!missing.min.e) chklength(min.e, k.All, arg) if (!missing.max.e) chklength(max.e, k.All, arg) if (!missing.median.c) chklength(median.c, k.All, arg) if (!missing.q1.c) chklength(q1.c, k.All, arg) if (!missing.q3.c) chklength(q3.c, k.All, arg) if (!missing.min.c) chklength(min.c, k.All, arg) if (!missing.max.c) chklength(max.c, k.All, arg) if (!missing.approx.mean.e) { if (length(approx.mean.e) == 1) rep_len(approx.mean.e, k.All) else chklength(approx.mean.e, k.All, arg) approx.mean.e <- setchar(approx.mean.e, c("", "iqr.range", "iqr", "range")) } if (!missing.approx.mean.c) { if (length(approx.mean.c) == 1) rep_len(approx.mean.c, k.All) else chklength(approx.mean.c, k.All, arg) approx.mean.c <- setchar(approx.mean.c, c("", "iqr.range", "iqr", "range")) } if (!missing.approx.sd.e) { if (length(approx.sd.e) == 1) rep_len(approx.sd.e, k.All) else chklength(approx.sd.e, k.All, arg) approx.sd.e <- setchar(approx.sd.e, c("", "iqr.range", "iqr", "range")) } if (!missing.approx.sd.c) { if (length(approx.sd.c) == 1) rep_len(approx.sd.c, k.All) else chklength(approx.sd.c, k.All, arg) approx.sd.c <- setchar(approx.sd.c, c("", "iqr.range", "iqr", "range")) } if (by) { chklength(subgroup, k.All, arg) chklogical(test.subgroup) } if (!missing.subset) if ((is.logical(subset) & (sum(subset) > k.All)) || (length(subset) > k.All)) stop("Length of subset is larger than number of studies.") if (!missing.exclude) { if ((is.logical(exclude) & (sum(exclude) > k.All)) || (length(exclude) > k.All)) stop("Length of argument 'exclude' is larger than number of studies.") exclude2 <- rep(FALSE, k.All) exclude2[exclude] <- TRUE exclude <- exclude2 } else exclude <- rep(FALSE, k.All) if (keepdata) { if (nulldata) data <- data.frame(.n.e = n.e) else data$.n.e <- n.e data$.mean.e <- mean.e data$.sd.e <- sd.e data$.n.c <- n.c data$.mean.c <- mean.c data$.sd.c <- sd.c data$.studlab <- studlab if (!missing.median.e) data$.median.e <- median.e if (!missing.q1.e) data$.q1.e <- q1.e if (!missing.q3.e) data$.q3.e <- q3.e if (!missing.min.e) data$.min.e <- min.e if (!missing.max.e) data$.max.e <- max.e if (!missing.median.c) data$.median.c <- median.c if (!missing.q1.c) data$.q1.c <- q1.c if (!missing.q3.c) data$.q3.c <- q3.c if (!missing.min.c) data$.min.c <- min.c if (!missing.max.c) data$.max.c <- max.c if (!missing.approx.mean.e) data$.approx.mean.e <- approx.mean.e if (!missing.approx.mean.c) data$.approx.mean.c <- approx.mean.c if (!missing.approx.sd.e) data$.approx.sd.e <- approx.sd.e if (!missing.approx.sd.c) data$.approx.sd.c <- approx.sd.c if (by) data$.subgroup <- subgroup if (!missing.subset) { if (length(subset) == dim(data)[1]) data$.subset <- subset else { data$.subset <- FALSE data$.subset[subset] <- TRUE } } if (!missing.exclude) data$.exclude <- exclude if (!missing.id) data$.id <- id } if (!missing.subset) { n.e <- n.e[subset] mean.e <- mean.e[subset] sd.e <- sd.e[subset] n.c <- n.c[subset] mean.c <- mean.c[subset] sd.c <- sd.c[subset] studlab <- studlab[subset] if (!missing.id) id <- id[subset] exclude <- exclude[subset] if (!missing.median.e) median.e <- median.e[subset] if (!missing.q1.e) q1.e <- q1.e[subset] if (!missing.q3.e) q3.e <- q3.e[subset] if (!missing.min.e) min.e <- min.e[subset] if (!missing.max.e) max.e <- max.e[subset] if (!missing.median.c) median.c <- median.c[subset] if (!missing.q1.c) q1.c <- q1.c[subset] if (!missing.q3.c) q3.c <- q3.c[subset] if (!missing.min.c) min.c <- min.c[subset] if (!missing.max.c) max.c <- max.c[subset] if (!missing.approx.mean.e) approx.mean.e <- approx.mean.e[subset] if (!missing.approx.mean.c) approx.mean.c <- approx.mean.c[subset] if (!missing.approx.sd.e) approx.sd.e <- approx.sd.e[subset] if (!missing.approx.sd.c) approx.sd.c <- approx.sd.c[subset] if (by) subgroup <- subgroup[subset] } k.all <- length(n.e) if (k.all == 0) stop("No studies to combine in meta-analysis.") if (k.all == 1) { fixed <- FALSE random <- FALSE prediction <- FALSE overall <- FALSE overall.hetstat <- FALSE } chknumeric(n.e) chknumeric(mean.e) chknumeric(sd.e) chknumeric(n.c) chknumeric(mean.c) chknumeric(sd.c) if (!missing.median.e) chknumeric(median.e) if (!missing.q1.e) chknumeric(q1.e) if (!missing.q3.e) chknumeric(q3.e) if (!missing.min.e) chknumeric(min.e) if (!missing.max.e) chknumeric(max.e) if (!missing.median.c) chknumeric(median.c) if (!missing.q1.c) chknumeric(q1.c) if (!missing.q3.c) chknumeric(q3.c) if (!missing.min.c) chknumeric(min.c) if (!missing.max.c) chknumeric(max.c) n.e <- int2num(n.e) mean.e <- int2num(mean.e) sd.e <- int2num(sd.e) n.c <- int2num(n.c) mean.c <- int2num(mean.c) sd.c <- int2num(sd.c) if (!missing.median.e) median.e <- int2num(median.e) if (!missing.q1.e) q1.e <- int2num(q1.e) if (!missing.q3.e) q3.e <- int2num(q3.e) if (!missing.min.e) min.e <- int2num(min.e) if (!missing.max.e) max.e <- int2num(max.e) if (!missing.median.c) median.c <- int2num(median.c) if (!missing.q1.c) q1.c <- int2num(q1.c) if (!missing.q3.c) q3.c <- int2num(q3.c) if (!missing.min.c) min.c <- int2num(min.c) if (!missing.max.c) max.c <- int2num(max.c) if (by) { chkmiss(subgroup) if (missing.subgroup.name & is.null(subgroup.name)) { if (!missing.subgroup) subgroup.name <- byvarname(mf[[match("subgroup", names(mf))]]) else if (!missing.byvar) subgroup.name <- byvarname(mf[[match("byvar", names(mf))]]) } } if (!is.null(subgroup.name)) chkchar(subgroup.name, length = 1) if (missing.approx.mean.e) { approx.mean.e <- rep_len("", length(n.e)) sel.NA.e <- is.na(mean.e) if (any(sel.NA.e) & !missing.median.e & !missing.q1.e & !missing.q3.e & !missing.min.e & !missing.max.e) { j <- sel.NA.e & !is.na(median.e) & !is.na(q1.e) & !is.na(q3.e) & !is.na(min.e) & !is.na(max.e) approx.mean.e[j] <- "iqr.range" mean.e[j] <- mean.sd.iqr.range(n.e[j], median.e[j], q1.e[j], q3.e[j], min.e[j], max.e[j], method.mean)$mean } sel.NA.e <- is.na(mean.e) if (any(sel.NA.e) & !missing.median.e & !missing.q1.e & !missing.q3.e) { j <- sel.NA.e & !is.na(median.e) & !is.na(q1.e) & !is.na(q3.e) approx.mean.e[j] <- "iqr" mean.e[j] <- mean.sd.iqr(n.e[j], median.e[j], q1.e[j], q3.e[j], method.mean)$mean } sel.NA.e <- is.na(mean.e) if (any(sel.NA.e) & !missing.median.e & !missing.min.e & !missing.max.e) { j <- sel.NA.e & !is.na(median.e) & !is.na(min.e) & !is.na(max.e) approx.mean.e[j] <- "range" mean.e[j] <- mean.sd.range(n.e[j], median.e[j], min.e[j], max.e[j], method.mean)$mean } } else { j <- 0 for (i in approx.mean.e) { j <- j + 1 if (i == "iqr.range") mean.e[j] <- mean.sd.iqr.range(n.e[j], median.e[j], q1.e[j], q3.e[j], min.e[j], max.e[j], method.mean)$mean else if (i == "iqr") mean.e[j] <- mean.sd.iqr(n.e[j], median.e[j], q1.e[j], q3.e[j], method.mean)$mean else if (i == "range") mean.e[j] <- mean.sd.range(n.e[j], median.e[j], min.e[j], max.e[j], method.mean)$mean } } if (missing.approx.mean.c) { approx.mean.c <- rep_len("", length(n.c)) sel.NA.c <- is.na(mean.c) if (any(sel.NA.c) & !missing.median.c & !missing.q1.c & !missing.q3.c & !missing.min.c & !missing.max.c) { j <- sel.NA.c & !is.na(median.c) & !is.na(q1.c) & !is.na(q3.c) & !is.na(min.c) & !is.na(max.c) approx.mean.c[j] <- "iqr.range" mean.c[j] <- mean.sd.iqr.range(n.c[j], median.c[j], q1.c[j], q3.c[j], min.c[j], max.c[j], method.mean)$mean } sel.NA.c <- is.na(mean.c) if (any(sel.NA.c) & !missing.median.c & !missing.q1.c & !missing.q3.c) { j <- sel.NA.c & !is.na(median.c) & !is.na(q1.c) & !is.na(q3.c) approx.mean.c[j] <- "iqr" mean.c[j] <- mean.sd.iqr(n.c[j], median.c[j], q1.c[j], q3.c[j], method.mean)$mean } sel.NA.c <- is.na(mean.c) if (any(sel.NA.c) & !missing.median.c & !missing.min.c & !missing.max.c) { j <- sel.NA.c & !is.na(median.c) & !is.na(min.c) & !is.na(max.c) approx.mean.c[j] <- "range" mean.c[j] <- mean.sd.range(n.c[j], median.c[j], min.c[j], max.c[j], method.mean)$mean } } else { j <- 0 for (i in approx.mean.c) { j <- j + 1 if (i == "iqr.range") mean.c[j] <- mean.sd.iqr.range(n.c[j], median.c[j], q1.c[j], q3.c[j], min.c[j], max.c[j], method.mean)$mean else if (i == "iqr") mean.c[j] <- mean.sd.iqr(n.c[j], median.c[j], q1.c[j], q3.c[j], method.mean)$mean else if (i == "range") mean.c[j] <- mean.sd.range(n.c[j], median.c[j], min.c[j], max.c[j], method.mean)$mean } } if (missing.median.e) { median.e.sd <- mean.e missing.median.e <- FALSE export.median.e <- FALSE } else { median.e.sd <- median.e median.e.sd[is.na(median.e.sd)] <- mean.e[is.na(median.e.sd)] export.median.e <- TRUE } if (missing.approx.sd.e) { approx.sd.e <- rep_len("", length(n.e)) sel.NA.e <- is.na(sd.e) if (any(sel.NA.e) & !missing.median.e & !missing.q1.e & !missing.q3.e & !missing.min.e & !missing.max.e) { j <- sel.NA.e & !is.na(median.e.sd) & !is.na(q1.e) & !is.na(q3.e) & !is.na(min.e) & !is.na(max.e) approx.sd.e[j] <- "iqr.range" sd.e[j] <- mean.sd.iqr.range(n.e[j], median.e.sd[j], q1.e[j], q3.e[j], min.e[j], max.e[j], method.sd = method.sd)$sd } sel.NA.e <- is.na(sd.e) if (any(sel.NA.e) & !missing.median.e & !missing.q1.e & !missing.q3.e) { j <- sel.NA.e & !is.na(median.e.sd) & !is.na(q1.e) & !is.na(q3.e) approx.sd.e[j] <- "iqr" sd.e[j] <- mean.sd.iqr(n.e[j], median.e.sd[j], q1.e[j], q3.e[j])$sd } sel.NA.e <- is.na(sd.e) if (any(sel.NA.e) & !missing.median.e & !missing.min.e & !missing.max.e) { j <- sel.NA.e & !is.na(median.e.sd) & !is.na(min.e) & !is.na(max.e) approx.sd.e[j] <- "range" sd.e[j] <- mean.sd.range(n.e[j], median.e.sd[j], min.e[j], max.e[j])$sd } } else { j <- 0 for (i in approx.sd.e) { j <- j + 1 if (i == "iqr.range") sd.e[j] <- mean.sd.iqr.range(n.e[j], median.e.sd[j], q1.e[j], q3.e[j], min.e[j], max.e[j], method.sd = method.sd)$sd else if (i == "iqr") sd.e[j] <- mean.sd.iqr(n.e[j], median.e.sd[j], q1.e[j], q3.e[j])$sd else if (i == "range") sd.e[j] <- mean.sd.range(n.e[j], median.e.sd[j], min.e[j], max.e[j])$sd } } if (missing.median.c) { median.c.sd <- mean.c missing.median.c <- FALSE export.median.c <- FALSE } else { median.c.sd <- median.c median.c.sd[is.na(median.c.sd)] <- mean.c[is.na(median.c.sd)] export.median.c <- TRUE } if (missing.approx.sd.c) { approx.sd.c <- rep_len("", length(n.c)) sel.NA.c <- is.na(sd.c) if (any(sel.NA.c) & !missing.median.c & !missing.q1.c & !missing.q3.c & !missing.min.c & !missing.max.c) { j <- sel.NA.c & !is.na(median.c.sd) & !is.na(q1.c) & !is.na(q3.c) & !is.na(min.c) & !is.na(max.c) approx.sd.c[j] <- "iqr.range" sd.c[j] <- mean.sd.iqr.range(n.c[j], median.c.sd[j], q1.c[j], q3.c[j], min.c[j], max.c[j], method.sd = method.sd)$sd } sel.NA.c <- is.na(sd.c) if (any(sel.NA.c) & !missing.median.c & !missing.q1.c & !missing.q3.c) { j <- sel.NA.c & !is.na(median.c.sd) & !is.na(q1.c) & !is.na(q3.c) approx.sd.c[j] <- "iqr" sd.c[j] <- mean.sd.iqr(n.c[j], median.c.sd[j], q1.c[j], q3.c[j])$sd } sel.NA.c <- is.na(sd.c) if (any(sel.NA.c) & !missing.median.c & !missing.min.c & !missing.max.c) { j <- sel.NA.c & !is.na(median.c.sd) & !is.na(min.c) & !is.na(max.c) approx.sd.c[j] <- "range" sd.c[j] <- mean.sd.range(n.c[j], median.c.sd[j], min.c[j], max.c[j])$sd } } else { j <- 0 for (i in approx.sd.c) { j <- j + 1 if (i == "iqr.range") sd.c[j] <- mean.sd.iqr.range(n.c[j], median.c[j], q1.c[j], q3.c[j], min.c[j], max.c[j], method.sd = method.sd)$sd else if (i == "iqr") sd.c[j] <- mean.sd.iqr(n.c[j], median.c.sd[j], q1.c[j], q3.c[j])$sd else if (i == "range") sd.c[j] <- mean.sd.range(n.c[j], median.c.sd[j], min.c[j], max.c[j])$sd } } if (keepdata) { if (!isCol(data, ".subset")) { data$.sd.e <- sd.e data$.mean.e <- mean.e data$.sd.c <- sd.c data$.mean.c <- mean.c if (!missing.approx.sd.e) data$.approx.sd.e <- approx.sd.e if (!missing.approx.sd.c) data$.approx.sd.c <- approx.sd.c if (!missing.approx.mean.e) data$.approx.mean.e <- approx.mean.e if (!missing.approx.mean.c) data$.approx.mean.c <- approx.mean.c } else { data$.sd.e[data$.subset] <- sd.e data$.mean.e[data$.subset] <- mean.e data$.sd.c[data$.subset] <- sd.c data$.mean.c[data$.subset] <- mean.c if (!missing.approx.sd.e) data$.approx.sd.e[data$.subset] <- approx.sd.e if (!missing.approx.sd.c) data$.approx.sd.c[data$.subset] <- approx.sd.c if (!missing.approx.mean.e) data$.approx.mean.e[data$.subset] <- approx.mean.e if (!missing.approx.mean.c) data$.approx.mean.c[data$.subset] <- approx.mean.c } } npn.n <- npn(n.e) | npn(n.c) N <- n.e + n.c if (sm == "MD" | sm == "ROM") var.pooled <- ((n.e - 1) * sd.e^2 + (n.c - 1) * sd.c^2) / (N - 2) if (any(npn.n) & warn) warning("Note, studies with non-positive values for n.e and / or n.c get no weight in meta-analysis.") if (sm == "MD") { TE <- ifelse(npn.n, NA, mean.e - mean.c) if (pooledvar) seTE <- ifelse(npn.n, NA, sqrt(var.pooled * (1 / n.e + 1 / n.c))) else seTE <- ifelse(npn.n, NA, sqrt(sd.e^2 / n.e + sd.c^2 / n.c)) seTE[is.na(TE)] <- NA if (method.ci == "t") ci.study <- ci(TE, seTE, df = n.e + n.c - 2) } else if (sm == "SMD") { J <- function(x) exp(lgamma(x / 2) - log(sqrt(x / 2)) - lgamma((x - 1) / 2)) K <- function(x) 1 - (x - 2) / (x * J(x)^2) if (method.smd %in% c("Hedges", "Cohen")) S.within <- sqrt(((n.e - 1) * sd.e^2 + (n.c - 1) * sd.c^2) / (N - 2)) else S.within <- if (sd.glass == "control") sd.c else sd.e smd <- ifelse(npn.n, NA, (mean.e - mean.c) / S.within) if (method.smd == "Cohen") { TE <- smd if (exact.smd) { J <- function(x) exp(lgamma(x / 2) - log(sqrt(x / 2)) - lgamma((x - 1) / 2)) K <- function(x) 1 - (x - 2) / (x * J(x)^2) seTE <- ifelse(npn.n, NA, sqrt(1 / n.e + 1 / n.c + (J(N - 2) * smd)^2 * K(N - 2))) } else seTE <- ifelse(npn.n, NA, sqrt(1 / n.e + 1 / n.c + TE^2 / (2 * N))) } else if (method.smd == "Hedges") { if (exact.smd) { J <- function(x) exp(lgamma(x / 2) - log(sqrt(x / 2)) - lgamma((x - 1) / 2)) K <- function(x) 1 - (x - 2) / (x * J(x)^2) } else { J <- function(x) 1 - 3 / (4 * x - 1) K <- function(x) 1 / (2 * (x - 1.94)) } TE <- J(N - 2) * smd seTE <- ifelse(npn.n, NA, sqrt(1 / n.e + 1 / n.c + TE^2 * K(N - 2))) } else if (method.smd == "Glass") { n.g <- if (sd.glass == "control") n.c else n.e TE <- smd seTE <- ifelse(npn.n, NA, sqrt(1 / n.e + 1 / n.c + TE^2 / (2 * (n.g - 1)))) } seTE[is.na(TE)] <- NA } else if (sm == "ROM") { npn.mean <- npn(mean.e) | npn(mean.c) if (any(npn.mean) & warn) warning("Note, studies with negative or zero means get no weight in meta-analysis.") TE <- ifelse(npn.n | npn.mean, NA, log(mean.e / mean.c)) if (pooledvar) seTE <- ifelse(npn.n, NA, sqrt(var.pooled * (1 / (n.e * mean.e^2) + 1 / (n.c * mean.c^2)))) else seTE <- ifelse(npn.n | npn.mean, NA, sqrt(sd.e^2 / (n.e * mean.e^2) + sd.c^2 / (n.c * mean.c^2))) seTE[is.na(TE)] <- NA } sel <- sd.e <= 0 | sd.c <= 0 if (any(sel, na.rm = TRUE) & warn) warning("Note, studies with non-positive values for sd.e or sd.c get no weight in meta-analysis.") seTE[sel] <- NA if (sm == "SMD") TE[sel] <- NA multi.level <- FALSE sel.ni <- !is.infinite(TE) & !is.infinite(seTE) if (!missing.id && length(unique(id[sel.ni])) != length(id[sel.ni])) multi.level <- TRUE if (multi.level) { if (!(method.tau %in% c("REML", "ML"))) { if (!missing(method.tau)) warning("For three-level model, argument 'method.tau' set to \"REML\".", call. = FALSE) method.tau <- "REML" } if (by & !tau.common) { if (!missing(tau.common)) warning("For three-level model, argument 'tau.common' set to ", "\"TRUE\".", call. = FALSE) tau.common <- TRUE } } m <- metagen(TE, seTE, studlab, exclude = if (missing.exclude) NULL else exclude, id = id, sm = sm, level = level, level.ma = level.ma, fixed = fixed, random = random, overall = overall, overall.hetstat = overall.hetstat, hakn = hakn, adhoc.hakn = adhoc.hakn, method.tau = method.tau, method.tau.ci = method.tau.ci, tau.preset = tau.preset, TE.tau = TE.tau, tau.common = FALSE, prediction = prediction, level.predict = level.predict, method.bias = method.bias, backtransf = backtransf, text.fixed = text.fixed, text.random = text.random, text.predict = text.predict, text.w.fixed = text.w.fixed, text.w.random = text.w.random, title = title, complab = complab, outclab = outclab, label.e = label.e, label.c = label.c, label.left = label.left, label.right = label.right, keepdata = FALSE, warn = warn, control = control) if (by & tau.common) { hcc <- hetcalc(TE, seTE, method.tau, "", TE.tau, level.ma, subgroup, control) } res <- list(n.e = n.e, mean.e = mean.e, sd.e = sd.e, n.c = n.c, mean.c = mean.c, sd.c = sd.c, pooledvar = pooledvar, method.smd = method.smd, sd.glass = sd.glass, exact.smd = exact.smd, method.ci = method.ci) if (export.median.e) res$median.e <- median.e if (!missing.q1.e) res$q1.e <- q1.e if (!missing.q3.e) res$q3.e <- q3.e if (!missing.min.e) res$min.e <- min.e if (!missing.max.e) res$max.e <- max.e if (export.median.c) res$median.c <- median.c if (!missing.q1.c) res$q1.c <- q1.c if (!missing.q3.c) res$q3.c <- q3.c if (!missing.min.c) res$min.c <- min.c if (!missing.max.c) res$max.c <- max.c res$approx.sd.e <- approx.sd.e res$approx.sd.c <- approx.sd.c res$approx.mean.e <- approx.mean.e res$approx.mean.c <- approx.mean.c m$n.e <- NULL m$n.c <- NULL res <- c(res, m) res$n.e.pooled <- sum(res$n.e, na.rm = TRUE) res$n.c.pooled <- sum(res$n.c, na.rm = TRUE) res$method.mean <- method.mean res$method.sd <- method.sd res$call <- match.call() if (keepdata) { res$data <- data if (!missing.subset) res$subset <- subset } if (method.ci == "t") { res$lower <- ci.study$lower res$upper <- ci.study$upper res$statistic <- ci.study$statistic res$pval <- ci.study$p res$df <- ci.study$df } else if (!is.null(res$df) && all(is.na(res$df))) res$df <- NULL if (all(res$approx.mean.e == "")) { res$approx.mean.e <- NULL res$data$.approx.mean.e <- NULL } if (all(res$approx.sd.e == "")) { res$approx.sd.e <- NULL res$data$.approx.sd.e <- NULL } if (all(res$approx.mean.c == "")) { res$approx.mean.c <- NULL res$data$.approx.mean.c <- NULL } if (all(res$approx.sd.c == "")) { res$approx.sd.c <- NULL res$data$.approx.sd.c <- NULL } class(res) <- c(fun, "meta") if (by) { res$subgroup <- subgroup res$subgroup.name <- subgroup.name res$print.subgroup.name <- print.subgroup.name res$sep.subgroup <- sep.subgroup res$test.subgroup <- test.subgroup res$tau.common <- tau.common if (!tau.common) res <- c(res, subgroup(res)) else if (!is.null(tau.preset)) res <- c(res, subgroup(res, tau.preset)) else res <- c(res, subgroup(res, hcc$tau.resid)) if (!tau.common || !is.null(tau.preset)) { res$tau2.resid <- res$lower.tau2.resid <- res$upper.tau2.resid <- NA res$tau.resid <- res$lower.tau.resid <- res$upper.tau.resid <- NA res$Q.resid <- res$df.Q.resid <- res$pval.Q.resid <- NA res$H.resid <- res$lower.H.resid <- res$upper.H.resid <- NA res$I2.resid <- res$lower.I2.resid <- res$upper.I2.resid <- NA } else { res$Q.w.random <- hcc$Q.resid res$df.Q.w.random <- hcc$df.Q.resid res$pval.Q.w.random <- hcc$pval.Q.resid res$tau2.resid <- hcc$tau2.resid res$lower.tau2.resid <- hcc$lower.tau2.resid res$upper.tau2.resid <- hcc$upper.tau2.resid res$tau.resid <- hcc$tau.resid res$lower.tau.resid <- hcc$lower.tau.resid res$upper.tau.resid <- hcc$upper.tau.resid res$sign.lower.tau.resid <- hcc$sign.lower.tau.resid res$sign.upper.tau.resid <- hcc$sign.upper.tau.resid res$Q.resid <- hcc$Q.resid res$df.Q.resid <- hcc$df.Q.resid res$pval.Q.resid <- hcc$pval.Q.resid res$H.resid <- hcc$H.resid res$lower.H.resid <- hcc$lower.H.resid res$upper.H.resid <- hcc$upper.H.resid res$I2.resid <- hcc$I2.resid res$lower.I2.resid <- hcc$lower.I2.resid res$upper.I2.resid <- hcc$upper.I2.resid } res$event.e.w <- NULL res$event.c.w <- NULL res$event.w <- NULL res$n.w <- NULL res$time.e.w <- NULL res$time.c.w <- NULL } res$comb.fixed <- fixed res$comb.random <- random res$level.comb <- level.ma if (by) { res$byvar <- subgroup res$bylab <- subgroup.name res$print.byvar <- print.subgroup.name res$byseparator <- sep.subgroup } class(res) <- c(fun, "meta") res }
"jsmurf" <- function(y, x = 1:length(y), x0 = 2 * x[1] - x[2], q, alpha = 0.05, r = 4e3, lengths = 2^(floor(log2(length(y))):floor(log2(max(length(param$kern) + 1, 1 / param$param$cutoff)))), param, rm.out = FALSE, jumpint = confband, confband = FALSE) { sdi <- sdrobnorm(y, lag = length(param$kern) + 1) if(rm.out) { cutmin <- quantile(y, 1e-4) + qnorm(1 / length(y)) * sdi cutmax <- quantile(y, 1 - 1e-4) - qnorm(1 / length(y)) * sdi outlier <- y < cutmin | y > cutmax if(any(outlier)) { ycut <- y[-neighbours(which(outlier), 1:length(y), length(param$kern) - 1)] xcut <- x[-neighbours(which(outlier), 1:length(y), length(param$kern) - 1)] } else { ycut <- y xcut <- x } } else { ycut <- y xcut <- x } if(missing(q)) { if(is.null(r)) stop("q or r need to be specified!") q <- kMRC.quant(1 - alpha, length(y), r, param$kern, lengths) } else { alpha <- NA } bs <- bounds.MRC(ycut, q = q, lengths = lengths, family = 'gaussKern', param = param) allblocks <- stepbound.default(ycut, bounds = bs, x = xcut, x0 = x0, family = 'gaussKern', param = param, refit = FALSE, jumpint = jumpint, confband = confband) if(!is.na(alpha)) attr(allblocks, "alpha") <- alpha attr(allblocks, "q") <- q attr(allblocks, "sd") <- sdi allblocks }
sign_out_from_shiny <- function( session = shiny::getDefaultReactiveDomain(), redirect_page = "?page=sign_in" ) { user <- isolate(session$userData$user()) if (is.null(user)) stop("session$userData$user() does not exist", call. = FALSE) .global_sessions$sign_out(user$hashed_cookie) shiny::updateQueryString( queryString = redirect_page, session = session, mode = "replace" ) }
"isfuncCOP" <- function(cop=NULL, para=NULL, delta=0.002, ...) { if(is.null(cop)) { warning("must have copula argument specified, returning NULL") return(NULL) } if(delta < 0 | delta > 0.5) { warning("invalid delta argument specified, returning NULL") return(NULL) } uvs <- seq(delta, 1-delta, by=delta) message("Checking condition C(u,0) = 0 for all u---", appendLF=FALSE) cond <- unique(cop(u=uvs, v=0, cop=cop, para=para, ...)) if(length(cond) != 1 | cond[1] != 0) { message("FALSE.") return(FALSE) } else { message("TRUE.") } message("Checking condition C(0,v) = 0 for all v---", appendLF=FALSE) cond <- unique(cop(u=0, v=uvs, cop=cop, para=para, ...)) if(length(cond) != 1 | cond[1] != 0) { message("FALSE.") return(FALSE) } else { message("TRUE.") } message("Checking condition C(u,1) = u for all u---", appendLF=FALSE) cond <- unique(uvs - cop(u=uvs, v=1, cop=cop, para=para, ...)) if(length(cond) != 1 | cond[1] != 0) { message("FALSE.") return(FALSE) } else { message("TRUE.") } message("Checking condition C(1,v) = v for all v---", appendLF=FALSE) cond <- unique(uvs - cop(u=1, v=uvs, cop=cop, para=para, ...)) if(length(cond) != 1 | cond[1] != 0) { message("FALSE") return(FALSE) } else { message("TRUE.") } message("Checking 2-increasing condition for all (u,v)---", appendLF=FALSE) tmp <- sapply(uvs, function(u) { if(any(densityCOP(u, uvs, cop=cop, para=para, truncate.at.zero=FALSE, ...) < 0)) { message("FALSE.") return(FALSE) }}) message("TRUE.") return(TRUE) }
liuest <- function(formula, data, d = 1.0, scaling = c("centered", "sc", "scaled"), ...) { if (is.null(d)) { d <- 1 } else { d <- d } mf <- model.frame(formula = formula, data = data) x <- model.matrix(attr(mf, "term"), data = mf) y <- model.response(mf) mt <- attr(mf, "terms") p <- ncol(x) n <- nrow(x) if (Inter <- attr(mt, "intercept")) { Xm <- colMeans(x[,-Inter]) Ym <- mean(y) Y <- y - Ym p <- p - 1 X <- x[,-Inter] - rep(Xm, rep(n,p)) } else{ Xm <- colMeans(x) Ym <- mean(y) Y <- y - Ym X <- x - rep(Xm, rep(n,p)) } scaling <- match.arg(scaling) if (scaling == "sc") { Xscale <- (drop(rep(1 / (n - 1), n) %*% X ^ 2) ^ 0.5) * sqrt(n - 1) } else if (scaling == "scaled") { Xscale <- drop(rep(1 / (n - 1), n) %*% X ^ 2) ^ 0.5 } else{ Xscale <- drop(rep(1,p)) } X <- X / rep(Xscale, rep(n,p)) bols <- lm.fit(X , as.matrix(Y))$coefficients coef <- lapply(d, function(d) { (solve(t(X) %*% X + diag(p)) %*% (t(X) %*% X + d * diag(p))) %*% bols }) coef <- do.call(cbind, coef) rownames(coef) <- colnames(X) colnames(coef) <- paste("d=", d, sep = "") lfit <- apply(coef, 2, function(x) { X %*% x }) list( coef = coef, xscale = Xscale, xs = X, Inter = Inter, xm = Xm, y = Y, scaling = scaling, call = match.call(), d = d, lfit = lfit, mf =mf, terms = mt ) }
context("walk_regions") test_that("map_region works as expected", { source(file.path(test_path(), "testdata", "testdata.R")) x <- as_workbook(t_report_stack) style <- openxlsx::createStyle(fgFill = " walk_regions(x, .fun = openxlsx::addStyle, style = style) walk_regions(x, .fun = openxlsx::setColWidths, widths = 2) walk_regions(x, .fun = openxlsx::setRowHeights, heights = 32) expect_identical( x$rowHeights[[1]], structure(c("32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32"), .Names = c("1", "2", "3", "4", "5", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "23", "24", "25", "26", "27", "30", "31", "32", "33", "34", "35", "38", "39", "40", "41", "42", "43", "44", "45", "48", "49", "50", "52", "53", "54", "55", "56", "57", "58", "59", "61", "63", "64")) ) expect_identical( x$rowHeights[[2]], structure(c("32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32", "32"), .Names = c("1", "2", "3", "4", "5", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "23", "24", "25", "26", "27", "35", "36", "37", "38", "39", "40", "48", "49", "50", "51", "52", "53", "54", "55", "63", "64", "65", "67", "68", "69", "70", "71", "72", "73", "74", "76", "78", "79")) ) expect_identical( x$colWidths, list(structure(c("2", "2", "2", "2", "2", "2", "2", "2", "2", "2", "2", "2"), .Names = c("1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12"), hidden = c("0", "0", "0", "0", "0", "0", "0", "0", "0", "0", "0", "0")), structure(c("2", "2", "2", "2", "2", "2", "2", "2", "2", "2", "2", "2"), .Names = c("1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12"), hidden = c("0", "0", "0", "0", "0", "0", "0", "0", "0", "0", "0", "0"))) ) expect_identical( length(openxlsx::getStyles(x)), 1066L ) })
theme_graph <- function(base_family = 'Arial Narrow', base_size = 11, background = 'white', foreground = NULL, border = TRUE, text_colour = 'black', bg_text_colour = text_colour, fg_text_colour = text_colour, title_family = base_family, title_size = 18, title_face = 'bold', title_margin = 10, title_colour = bg_text_colour, subtitle_family = base_family, subtitle_size = 12, subtitle_face = 'plain', subtitle_margin = 15, subtitle_colour = bg_text_colour, strip_text_family = base_family, strip_text_size = 10, strip_text_face = 'bold', strip_text_colour = fg_text_colour, caption_family = base_family, caption_size = 9, caption_face = 'italic', caption_margin = 10, caption_colour = bg_text_colour, plot_margin = margin(30, 30, 30, 30)) { style <- theme_bw(base_size = base_size, base_family = base_family) style <- style + theme( text = element_text(colour = text_colour), plot.title = element_text( family = title_family, size = title_size, face = title_face, colour = title_colour, margin = margin(b = title_margin) ), plot.subtitle = element_text( family = subtitle_family, size = subtitle_size, face = subtitle_face, colour = subtitle_colour, margin = margin(b = subtitle_margin) ), plot.caption = element_text( family = caption_family, size = caption_size, face = caption_face, colour = caption_colour, margin = margin(t = caption_margin) ), strip.text = element_text( family = strip_text_family, size = strip_text_size, face = strip_text_face, colour = strip_text_colour ), plot.margin = plot_margin, legend.background = element_blank(), legend.box.background = element_blank(), legend.key = element_blank(), panel.background = element_blank(), axis.title = element_blank(), axis.text = element_blank(), axis.line = element_blank(), axis.ticks = element_blank(), panel.grid = element_blank(), strip.background = if (is.null(foreground)) element_blank() else element_rect(fill = foreground, colour = foreground), plot.background = if (is.null(background)) element_blank() else element_rect(fill = background, colour = NA), panel.border = if (border && !is.null(foreground)) element_rect(fill = NA, colour = foreground) else element_blank() ) style } th_foreground <- function(foreground = 'grey80', fg_text_colour = NULL, border = FALSE) { th <- theme( strip.background = if (is.null(foreground)) element_blank() else element_rect(fill = foreground, colour = foreground), panel.border = if (border && !is.null(foreground)) element_rect(fill = NA, colour = foreground) else element_blank() ) if (!is.null(fg_text_colour)) { th <- th + theme(strip.text = element_text(colour = fg_text_colour)) } th } th_no_axes <- function() { theme( panel.grid = element_blank(), axis.title = element_blank(), axis.text = element_blank(), axis.ticks = element_blank(), axis.line = element_blank(), NULL ) } set_graph_style <- function(family = 'Arial Narrow', face = 'plain', size = 11, text_size = 11, text_colour = 'black', ...) { style <- theme_graph( base_family = family, base_size = size, text_colour = text_colour, ... ) theme_set(style) text_size <- text_size / .pt update_geom_defaults(GeomEdgePath, list( family = family, fontface = face, label_size = text_size )) update_geom_defaults(GeomText, list( family = family, fontface = face, size = text_size )) update_geom_defaults(GeomTextRepel, list( family = family, fontface = face, size = text_size )) update_geom_defaults(GeomLabel, list( family = family, fontface = face, size = text_size )) update_geom_defaults(GeomLabelRepel, list( family = family, fontface = face, size = text_size )) } unset_graph_style <- function() { style <- theme_gray() theme_set(style) update_geom_defaults(GeomEdgePath, list( family = '', fontface = 1, label_size = 3.88 )) update_geom_defaults(GeomText, list( family = '', fontface = 1, size = 3.88 )) update_geom_defaults(GeomTextRepel, list( family = '', fontface = 1, size = 3.88 )) update_geom_defaults(GeomLabel, list( family = '', fontface = 1, size = 3.88 )) update_geom_defaults(GeomLabelRepel, list( family = '', fontface = 1, size = 3.88 )) update_geom_defaults(GeomAxisHive, list( family = '', fontface = 1, size = 3.88 )) }
write_command_via_csv <- function(x, path_terminal = "", fileName = "SystemControl"){ if(exists("x") == TRUE && nrow(x) != 0) { for(i in 1:nrow(x)) { composed_name <- paste0(fileName, as.character(x[i, 1]), ".csv") f_name <- file.path(path_terminal, composed_name) write.csv(x[i, ], file = f_name, row.names = FALSE) } } }
test_missMDA_EM <- function(X_hat, list) { index <- lapply(list, is.na) missMDA_EM_imp <- function(X) { ncomp <- missMDA::estim_ncpPCA(X, ncp.max = ncol(X) - 2) res.imp <- missMDA::imputePCA(X, ncp = ncomp$ncp, method = "EM") imp_matrix <- res.imp$completeObs list(Imputed = imp_matrix) } print("missMDA EM imputation - in progress") start_time <- Sys.time() log_output <- utils::capture.output(results <- lapply(list, missMDA_EM_imp)) end_time <- Sys.time() time <- as.numeric(end_time - start_time, units = "mins") orig_MCAR <- X_hat[index[[1]]] orig_MAR <- X_hat[index[[2]]] orig_MNAR <- X_hat[index[[3]]] if (length(index) == 4) orig_MAP <- X_hat[index[[4]]] imp_MCAR <- results$MCAR_matrix$Imputed[index[[1]]] imp_MAR <- results$MAR_matrix$Imputed[index[[2]]] imp_MNAR <- results$MNAR_matrix$Imputed[index[[3]]] if (length(index) == 4) imp_MAP <- results$MAP_matrix$Imputed[index[[4]]] rmse_MCAR <- sqrt(mean((orig_MCAR - imp_MCAR)^2)) rmse_MAR <- sqrt(mean((orig_MAR - imp_MAR)^2)) rmse_MNAR <- sqrt(mean((orig_MNAR - imp_MNAR)^2)) if (length(index) == 4) rmse_MAP <- sqrt(mean((orig_MAP - imp_MAP)^2)) mae_MCAR <- mean(abs(orig_MCAR - imp_MCAR)) mae_MAR <- mean(abs(orig_MAR - imp_MAR)) mae_MNAR <- mean(abs(orig_MNAR - imp_MNAR)) if (length(index) == 4) mae_MAP <- mean(abs(orig_MAP - imp_MAP)) ks_MCAR <- stats::ks.test(orig_MCAR, imp_MCAR, exact=TRUE)$statistic ks_MAR <- stats::ks.test(orig_MAR, imp_MAR, exact=TRUE)$statistic ks_MNAR <- stats::ks.test(orig_MNAR, imp_MNAR, exact=TRUE)$statistic if (length(index) == 4) ks_MAP <- stats::ks.test(orig_MAP, imp_MAP, exact=TRUE)$statistic if (length(index) == 4) list(Comp_time = time, MCAR_RMSE = rmse_MCAR, MAR_RMSE = rmse_MAR, MNAR_RMSE = rmse_MNAR, MAP_RMSE = rmse_MAP, MCAR_MAE = mae_MCAR, MAR_MAE = mae_MAR, MNAR_MAE = mae_MNAR, MAP_MAE = mae_MAP, MCAR_KS = ks_MCAR, MAR_KS = ks_MAR, MNAR_KS = ks_MNAR, MAP_KS = ks_MAP) else list(Comp_time = time, MCAR_RMSE = rmse_MCAR, MAR_RMSE = rmse_MAR, MNAR_RMSE = rmse_MNAR, MCAR_MAE = mae_MCAR, MAR_MAE = mae_MAR, MNAR_MAE = mae_MNAR, MCAR_KS = ks_MCAR, MAR_KS = ks_MAR, MNAR_KS = ks_MNAR) }
library(dplyr) library(tidyr) test_that("one-parameter eye plots work", { skip_if_no_vdiffr() set.seed(123) df = data.frame(x = rnorm(1000), y = 1) p = ggplot(df, aes(x = x, y = y)) expect_warning(vdiffr::expect_doppelganger("one-parameter horizontal eye", p + geom_eyeh(n = 20) ), "Deprecated") expect_warning(vdiffr::expect_doppelganger("one-parameter horizontal half-eye", p + geom_halfeyeh(n = 20) ), "Deprecated") p = ggplot(df, aes(x = y, y = x)) expect_warning(vdiffr::expect_doppelganger("one-parameter vertical eye", p + geom_eye(n = 20) ), "Deprecated") vdiffr::expect_doppelganger("one-parameter vertical halfeye", p + stat_halfeye(n = 20)) p = ggplot(df, aes(x = x, y = y)) expect_warning(vdiffr::expect_doppelganger("one-parameter horizontal eye (mode_hdi)", p + stat_eyeh(point_interval = mode_hdi, n = 20) ), "Deprecated") }) test_that("two-parameter eye plots work", { skip_if_no_vdiffr() set.seed(123) df = data.frame(x = rnorm(1000), y = "a", y_int = 1) %>% rbind(data.frame(x = rnorm(1000, 1), y = "b", y_int = 2)) p = ggplot(df, aes(x = x, y = y)) expect_warning(vdiffr::expect_doppelganger("two-parameter (factor) horizontal half-eye", p + stat_halfeyeh(scale = 0.5, n = 20) ), "Deprecated") vdiffr::expect_doppelganger("two-parameter (factor) horizontal eye (fill)", p + stat_eye(aes(fill = y), scale = 0.5, n = 20)) })
colVarsC <- function(x) { if (!is.matrix(x)) x <- as.matrix(x) CN <- colnames(x) if (NCOL(x) == 1) stop("colVarsC: 'x' should have more than one column!") OUT <- .Call("C_colVarsC", x, PACKAGE = "propagate") names(OUT) <- CN OUT } rowVarsC <- function(x) { if (!is.matrix(x)) x <- as.matrix(x) RN <- rownames(x) if (NROW(t(x)) == 1) stop("rowVarsC: 'x' should have more than one row!") OUT <- .Call("C_rowVarsC", x, PACKAGE = "propagate") names(OUT) <- RN OUT }
EQsampleDensity <- function(sx, boundaryVec, NoNote=215, size=100){ n<-NoNote SXdim<-length(sx[1,]) space<-NULL for(i in 1:SXdim){ a<-boundaryVec[2*i-1] b<-boundaryVec[2*i] d<-(b-a)/(n-1) space<-c(space, d) } d<-min(space) XP<-NULL vectorL<-NULL for(i in 1:SXdim){ a<-boundaryVec[2*i-1] b<-boundaryVec[2*i] xposition<-seq(from=a, to=b, by=d) L<-length(xposition) XP<-c(XP,xposition) vectorL<-c(vectorL,L) } NotePositions<-positions(XP, vectorL) H<-ks::Hpi(x=sx) fhat<-ks::kde(x=sx, H=H) prob<-predict(fhat,x=NotePositions) outSample<-actualPosition(vectorL, prob, boundaryVec, size) return(outSample) }
ngraph<-function(el, vertexlabels, xyz=NULL, diam=NULL, directed=TRUE, weights=FALSE, vertex.attributes=NULL, graph.attributes=NULL){ if(any(duplicated(vertexlabels))) stop("Vertex labels must be unique!") rawel=match(t(el), vertexlabels) if(isTRUE(weights) && !is.null(xyz)){ rawel.mat=matrix(rawel, nrow=2) starts=rawel.mat[1,] stops=rawel.mat[2,] vecs=xyz[stops, , drop=FALSE] - xyz[starts, , drop=FALSE] weights=sqrt(rowSums(vecs*vecs)) } g=igraph::graph(rawel, n=length(vertexlabels), directed=directed) igraph::V(g)$label=vertexlabels if(is.numeric(weights)) igraph::E(g)$weight=weights if(!is.null(xyz)) { if(ncol(xyz)==4 && is.null(diam)){ diam=xyz[,4] xyz=xyz[, 1:3, drop=FALSE] } xyzmatrix(g)<-xyz } if(!is.null(diam)) igraph::V(g)$diam=diam for(n in names(vertex.attributes)){ g=igraph::set.vertex.attribute(g,name=n,value=vertex.attributes[[n]]) } for(n in names(graph.attributes)){ g=igraph::set.graph.attribute(g,name=n,value=graph.attributes[[n]]) } class(g)=c("ngraph",class(g)) g } as.ngraph<-function(x, ...) UseMethod('as.ngraph') as.ngraph.ngraph<-function(x, ...) x as.ngraph.data.frame<-function(x, directed=TRUE, ...){ el=x[x$Parent!=-1,c("Parent","PointNo")] ngraph(data.matrix(el), x$PointNo, directed=directed, xyz=xyzmatrix(x), diam=x$W, ...) } as.ngraph.neuron<-function(x, directed=TRUE, method=c('swc','seglist'), ...){ method=match.arg(method, several.ok=TRUE) if('swc'%in%method && !is.null(x$d$Parent) && !is.null(x$d$PointNo)){ as.ngraph(x$d, directed=directed, ...) } else { as.ngraph(seglist2swc(x)$d, directed=directed, ...) } } as.ngraph.igraph<-function(x, directed=TRUE, root, mode=c('out','in'), ...){ if(inherits(x,'ngraph')) if(igraph::is.directed(x)==directed) return(x) if(igraph::is.directed(x) && !directed) x=as.undirected(x, ...) else if(!igraph::is.directed(x) && directed) x=as.directed.usingroot(x, root, mode=mode, ...) if(!inherits(x,'ngraph')){ class(x)=c("ngraph",class(x)) } x } as.directed.usingroot<-function(g, root, mode=c('out','in')){ mode=match.arg(mode) if(!igraph::is.directed(g)) dg=igraph::as.directed(g, mode='arbitrary') else dg=g dfs=igraph::graph.dfs(dg, root, unreachable=FALSE, dist=TRUE, neimode='all') el=igraph::get.edgelist(dg) connected_vertices=which(is.finite(dfs$order)) edges_to_check=which(el[,1]%in%connected_vertices) parent.dists=dfs$dist[el[edges_to_check,1]] child.dists=dfs$dist[el[edges_to_check,2]] parent_closer=parent.dists<child.dists same_dist=parent.dists==child.dists parent_further=parent.dists>child.dists if(any(same_dist)) warning(sum(same_dist)," edges connect vertices that are the same distance from the root => cycles.") edges_to_flip <- edges_to_check[if(mode=='out') parent_further else parent_closer] dg=igraph::delete.edges(dg,edges_to_flip) dg=igraph::add.edges(dg,t(el[edges_to_flip,2:1])) dg } spine <- function(n, UseStartPoint=FALSE, SpatialWeights=TRUE, invert=FALSE, rval=c("neuron", "length", "ids")) { ng <- as.ngraph(n, weights=SpatialWeights) rval=match.arg(rval) if(invert && rval=="length") stop("invert=TRUE is not implemented for rval='length'") if(UseStartPoint) { lps=shortest.paths(graph = ng, n$StartPoint, to = n$EndPoints, mode = 'all') if(rval=='length') return(max(lps)) to=n$EndPoints[which.max(lps)] longestpath=get.shortest.paths(ng, from = n$StartPoint, to = to, mode = 'all')$vpath[[1]] } else { if(rval=='length') { return(diameter(ng, directed=FALSE)) } else { longestpath=get.diameter(ng, directed=FALSE) } } if(rval=='ids') { if(invert) { ie=setdiff(igraph::E(ng), igraph::E(ng, path=longestpath)) edgemat=igraph::get.edges(ng, ie) return(unique(as.integer(t(edgemat)))) } else return(as.integer(longestpath)) } prune_edges(ng, edges = longestpath, invert = !invert) } segmentgraph<-function(x, weights=TRUE, segids=FALSE, exclude.isolated=FALSE, include.xyz=FALSE, reverse.edges=FALSE){ g=graph.empty() pointnos=x$d$PointNo sts=as.seglist(x, all=TRUE, flatten = TRUE) topntail<-function(x) if(length(x)==1) x else x[c(1,length(x))] simple_sts=lapply(sts,topntail) all_nodes=sort(unique(unlist(simple_sts))) g=graph.empty(n=length(all_nodes)) igraph::V(g)$label=pointnos[all_nodes] igraph::V(g)$vid=all_nodes el=EdgeListFromSegList(simple_sts) if(reverse.edges) el=el[,2:1] elred=match(t(el),all_nodes) if(identical(segids, FALSE)) { segids=NULL } else if(isTRUE(segids)){ segids=seq_along(sts) } if(weights){ weights=seglengths(x, all=TRUE) g=add.edges(g, elred, weight=weights, segid=segids) } else { g=add.edges(g, elred, segid=segids) } if(include.xyz){ igraph::V(g)$x=x$d$X[all_nodes] igraph::V(g)$y=x$d$Y[all_nodes] igraph::V(g)$z=x$d$Z[all_nodes] } if(exclude.isolated){ isolated_vertices=igraph::V(g)[igraph::degree(g)==0] g=igraph::delete.vertices(graph=g,isolated_vertices) } g } strahler_order<-function(x){ s=segmentgraph(x, weights = F) roots=rootpoints(s, original.ids=FALSE) if(length(roots)>1) stop("strahler_order not yet defined for multiple subtrees") b=graph.bfs(s, root=roots, neimode = 'out', unreachable=F, father=T) n=neighborhood(s, 1, mode='out') so_red_nodes=integer(vcount(s)) for(i in rev(b$order)) { children=setdiff(n[[i]], i) if(length(children)==0L) { so_red_nodes[i]=1L next } child_orders=so_red_nodes[children] if(length(children)==1L) { so_red_nodes[i]=max(child_orders) } else if(max(child_orders)==min(child_orders)){ so_red_nodes[i]=max(child_orders)+1L } else { so_red_nodes[i]=max(child_orders) } } so_orig_nodes=integer(length(nrow(x$d))) sts=as.seglist(x, all=TRUE, flatten = TRUE) so_segs=integer(length(sts)) svids=V(s)$vid topntail<-function(x) x[c(1L,length(x))] segendmat=sapply(sts, topntail) idxs=apply(segendmat, 1, match, svids) for(i in seq_along(sts)){ segends=segendmat[,i] so_segends=so_red_nodes[idxs[i,]] so_orig_nodes[segends]=so_segends so_this_seg=min(so_segends) so_segs[i]=so_this_seg internal=setdiff(sts[[i]], segends) if(length(internal)) { so_orig_nodes[internal]=so_this_seg } } list(points=so_orig_nodes, segments=so_segs) } prune_strahler<-function(x, orderstoprune=1:2, ...) { tryCatch( prune_vertices(x, which(strahler_order(x)$points %in% orderstoprune), ...), error = function(c) stop(paste0("No points left after pruning. ", "Consider lowering orders to prune!")) ) } prune_vertices<-function(x, verticestoprune, invert=FALSE, ...) { g=as.ngraph(x) if(inherits(verticestoprune, "igraph.vs")) verticestoprune=as.integer(verticestoprune) if(invert) { nvertices=nrow(xyzmatrix(x)) verticestoprune=setdiff(seq_len(nvertices), verticestoprune) } dg=igraph::delete.vertices(g, verticestoprune) as.neuron(as.ngraph(dg), ...) } prune_edges<-function(x, edges, invert=FALSE, ...) { g=as.ngraph(x) if(!inherits(edges, "igraph.es")){ if(!is.numeric(edges)) stop("I can't understand the edges you have given me!") if(is.matrix(edges)){ if(ncol(edges)!=2) stop("Edge matrix must have 2 columns!") } else { edges=cbind(edges[-length(edges)], edges[-1]) } edges=igraph::E(g, P = as.vector(t(edges)), directed = FALSE) } if(invert) edges=setdiff(igraph::E(g), edges) dg=igraph::delete.edges(g, edges = edges) dg=igraph::delete.vertices(dg, which(igraph::degree(dg, mode='all')==0)) as.neuron(as.ngraph(dg), ...) } EdgeListFromSegList<-function(SegList){ lsl=sapply(SegList,length) sl=SegList[lsl>1] lsl=lsl[lsl>1] ends=unlist(lapply(sl,function(x) x[-1])) starts=unlist(lapply(sl,function(x) x[-length(x)])) cbind(starts,ends) }
str(dip2) bs1 <- bootstrap_f2(data = dip2[dip2$batch %in% c("b0", "b4"), ], tcol = 5:8, grouping = "batch", rand_mode = "complete", R = 200, new_seed = 421, use_EMA = "no") pbs1 <- plot(bs1) class(bs1) class(pbs1) bs2 <- bootstrap_f2(data = dip2[dip2$batch %in% c("b0", "b4"), ], tcol = 5:8, grouping = "batch", rand_mode = "individual", R = 200, new_seed = 421, use_EMA = "no") plot(bs2)
import <- function(file, format, setclass, which, ...) { if (grepl("^http.*://", file)) { file <- remote_to_local(file, format = format) } if ((file != "clipboard") && !file.exists(file)) { stop("No such file") } if (grepl("\\.zip$", file)) { if (missing(which)) { file <- parse_zip(file) } else { file <- parse_zip(file, which = which) } } else if(grepl("\\.tar", file)) { if (missing(which)) { which <- 1 } file <- parse_tar(file, which = which) } if (missing(format)) { fmt <- get_ext(file) if (fmt %in% c("gz", "gzip")) { fmt <- tools::file_ext(tools::file_path_sans_ext(file, compression = FALSE)) file <- gzfile(file) } else { fmt <- get_type(fmt) } } else { fmt <- get_type(format) } args_list <- list(...) class(file) <- c(paste0("rio_", fmt), class(file)) if (missing(which)) { x <- .import(file = file, ...) } else { x <- .import(file = file, which = which, ...) } if (inherits(file, c("rio_rdata", "rio_rds", "rio_json"))) { return(x) } if (missing(setclass) || is.null(setclass)) { if ("data.table" %in% names(args_list) && isTRUE(args_list[["data.table"]])) { return(set_class(x, class = "data.table")) } else { return(set_class(x, class = "data.frame")) } } else { if ("data.table" %in% names(args_list) && isTRUE(args_list[["data.table"]])) { if (setclass != "data.table") { warning(sprintf("'data.table = TRUE' argument overruled. Using setclass = '%s'", setclass)) return(set_class(x, class = setclass)) } else { return(set_class(x, class = "data.table")) } } else { return(set_class(x, class = setclass)) } } }
so_TargetToolMessages_new <- function() { obj = .Call("r_so_TargetToolMessages_new") } so_TargetToolMessages_copy <- function(self) { .Call("r_so_TargetToolMessages_copy", self) } so_TargetToolMessages_free <- function(self) { .Call("r_so_TargetToolMessages_free", self) } so_TargetToolMessages_ref <- function(self) { .Call("r_so_TargetToolMessages_ref", self) } so_TargetToolMessages_unref <- function(self) { .Call("r_so_TargetToolMessages_unref", self) } so_TargetToolMessages_get_Termination <- function(self) { .Call("r_so_TargetToolMessages_get_Termination", self) } so_TargetToolMessages_set_Termination <- function(self, value) { .Call("r_so_TargetToolMessages_set_Termination", self, value) } so_TargetToolMessages_get_Warnings <- function(self) { .Call("r_so_TargetToolMessages_get_Warnings", self) } so_TargetToolMessages_set_Warnings <- function(self, value) { .Call("r_so_TargetToolMessages_set_Warnings", self, value) } so_TargetToolMessages_get_Errors <- function(self) { .Call("r_so_TargetToolMessages_get_Errors", self) } so_TargetToolMessages_set_Errors <- function(self, value) { .Call("r_so_TargetToolMessages_set_Errors", self, value) } so_TargetToolMessages_get_ElapsedTime <- function(self) { .Call("r_so_TargetToolMessages_get_ElapsedTime", self) } so_TargetToolMessages_set_ElapsedTime <- function(self, value) { .Call("r_so_TargetToolMessages_set_ElapsedTime", self, value) } so_TargetToolMessages_get_OutputFilePath <- function(self) { .Call("r_so_TargetToolMessages_get_OutputFilePath", self) } so_TargetToolMessages_set_OutputFilePath <- function(self, value) { .Call("r_so_TargetToolMessages_set_OutputFilePath", self, value) } so_TargetToolMessages_create_OutputFilePath <- function(self) { .Call("r_so_TargetToolMessages_create_OutputFilePath", self) } so_TargetToolMessages_get_ChainsNumber <- function(self) { .Call("r_so_TargetToolMessages_get_ChainsNumber", self) } so_TargetToolMessages_set_ChainsNumber <- function(self, value) { .Call("r_so_TargetToolMessages_set_ChainsNumber", self, value) } so_TargetToolMessages_get_IterationNumber <- function(self) { .Call("r_so_TargetToolMessages_get_IterationNumber", self) } so_TargetToolMessages_set_IterationNumber <- function(self, value) { .Call("r_so_TargetToolMessages_set_IterationNumber", self, value) } Termination_acc <- function(value) { if (!isnull(.self$.cobj)) { if (missing(value)) { so_TargetToolMessages_get_Termination(.self$.cobj) } else { if (!is(value, "character")) { stop("object must be of type 'character'") } so_TargetToolMessages_set_Termination(.self$.cobj, value) } } } Warnings_acc <- function(value) { if (!isnull(.self$.cobj)) { if (missing(value)) { so_TargetToolMessages_get_Warnings(.self$.cobj) } else { if (!is(value, "character")) { stop("object must be of type 'character'") } so_TargetToolMessages_set_Warnings(.self$.cobj, value) } } } Errors_acc <- function(value) { if (!isnull(.self$.cobj)) { if (missing(value)) { so_TargetToolMessages_get_Errors(.self$.cobj) } else { if (!is(value, "character")) { stop("object must be of type 'character'") } so_TargetToolMessages_set_Errors(.self$.cobj, value) } } } ElapsedTime_acc <- function(value) { if (!isnull(.self$.cobj)) { if (missing(value)) { so_TargetToolMessages_get_ElapsedTime(.self$.cobj) } else { if (!is(value, "numeric")) { stop("object must be of type 'numeric'") } so_TargetToolMessages_set_ElapsedTime(.self$.cobj, value) } } } OutputFilePath_acc <- function(value) { if (!isnull(.self$.cobj)) { if (missing(value)) { child = so_TargetToolMessages_get_OutputFilePath(.self$.cobj) if (!isnull(child)) { so_ExternalFile_ref(child) so_ExternalFile$new(cobj=child) } } else { if (!is(value, "so_ExternalFile")) { stop("object must be of type 'so_ExternalFile'") } so_TargetToolMessages_set_OutputFilePath(.self$.cobj, value$.cobj) so_ExternalFile_ref(value$.cobj) } } } ChainsNumber_acc <- function(value) { if (!isnull(.self$.cobj)) { if (missing(value)) { so_TargetToolMessages_get_ChainsNumber(.self$.cobj) } else { if (!is(value, "numeric")) { stop("object must be of type 'numeric'") } so_TargetToolMessages_set_ChainsNumber(.self$.cobj, value) } } } IterationNumber_acc <- function(value) { if (!isnull(.self$.cobj)) { if (missing(value)) { so_TargetToolMessages_get_IterationNumber(.self$.cobj) } else { if (!is(value, "numeric")) { stop("object must be of type 'numeric'") } so_TargetToolMessages_set_IterationNumber(.self$.cobj, value) } } } so_TargetToolMessages = setRefClass("so_TargetToolMessages", fields=list( Termination = Termination_acc, Warnings = Warnings_acc, Errors = Errors_acc, ElapsedTime = ElapsedTime_acc, OutputFilePath = OutputFilePath_acc, ChainsNumber = ChainsNumber_acc, IterationNumber = IterationNumber_acc, .cobj = "externalptr" ), methods=list( copy = function() { copy = so_TargetToolMessages_copy(.self$.cobj) so_TargetToolMessages$new(cobj=copy) }, initialize = function(cobj) { if (missing(cobj)) { .cobj <<- so_TargetToolMessages_new() } else { .cobj <<- cobj } }, finalize = function() { so_TargetToolMessages_unref(.self$.cobj) } ) )
library(hamcrest) library(stats) set.seed(1) test.rbinom.1 <- function() assertThat({set.seed(1);rbinom(n = 0x1p+0, size = c(0x0p+0, 0x1p+0, 0x1.4p+2, 0x1.4p+3, 0x1.4p+4), prob = c(0x0p+0, 0x1.999999999999ap-4, 0x1.999999999999ap-3, 0x1p-1, 0x1p+0))}, identicalTo(0L)) test.rbinom.2 <- function() assertThat({set.seed(1);rbinom(n = 1:5, size = c(0x0p+0, 0x1p+0, 0x1.4p+2, 0x1.4p+3, 0x1.4p+4), prob = c(0x0p+0, 0x1.999999999999ap-4, 0x1.999999999999ap-3, 0x1p-1, 0x1p+0))}, identicalTo(c(0L, 0L, 1L, 5L, 20L))) test.rbinom.3 <- function() assertThat({set.seed(1);rbinom(n = 0x1.ep+3, size = c(0x0p+0, 0x1p+0, 0x1.4p+2, 0x1.4p+3, 0x1.4p+4), prob = c(0x0p+0, 0x1.999999999999ap-4, 0x1.999999999999ap-3, 0x1p-1, 0x1p+0))}, identicalTo(c(0L, 0L, 1L, 5L, 20L, 0L, 1L, 0L, 7L, 20L, 0L, 1L, 1L, 6L, 20L))) test.rbinom.4 <- function() assertThat({set.seed(1);rbinom(n = numeric(0), size = c(0x0p+0, 0x1p+0, 0x1.4p+2, 0x1.4p+3, 0x1.4p+4), prob = c(0x0p+0, 0x1.999999999999ap-4, 0x1.999999999999ap-3, 0x1p-1, 0x1p+0))}, identicalTo(integer(0))) test.rbinom.5 <- function() assertThat({set.seed(1);rbinom(n = 0x1.8p+1, size = c(NA, 0x1p+0, 0x1.4p+2, 0x1.4p+3, 0x1.4p+4), prob = c(0x0p+0, 0x1.999999999999ap-4, 0x1.999999999999ap-3, 0x1p-1, 0x1p+0))}, identicalTo(c(NA, 0L, 1L)))
pl.mv <- function(port){ ggplot(data = port, aes_string(x = "stdev", "return")) + geom_point() + ggtitle("Portfolio simulation") + theme(plot.title = element_text(hjust = 0.5)) + labs(y = "Returns", x = "Standard Deviation") }
graph.diss <- function(G, sim=FALSE, loops=FALSE) { dmat <- GraphDiss2(G) dmat[is.na(dmat)] <- 1 if (!loops) diag(dmat) <- 0 if (sim) dmat <- 1-dmat dmat } GraphDiss2 <- function(G) { Gprod <- G %*% G Gdiag <- Matrix::diag(Gprod) degProd <- Gdiag %*% t(Gdiag) 1 - (Gprod / sqrt(degProd)) } natural.connectivity <- function(G, eig=NULL, norm=TRUE) { if (is.null(eig)) { eig <- eigen(G) } estrind <- exp(eig$values) nc <- log(mean(estrind)) if (norm) { n <- length(estrind) nc <- nc / (n - log(n)) } return(nc) } .adj2elist <- function(G) { if (inherits(G, "sparseMatrix")) { G <- Matrix::triu(G, k=1) return(Matrix::summary(G)[,-3]) } else { p <- ncol(G) return(arrayInd(which(as.logical(triu(G))), c(p,p))) } } gcvec <- function(G, orbind=c(0, 2, 5, 7, 8, 10, 11, 6, 9, 4, 1)+1) { if (length(orbind) < 2) stop("Only one orbit selected, need at least two to calculate graphlet correlations") if (any(orbind > 15)) stop("Only 15 orbits, from 4-node graphlets, can be selected") Elist <- .adj2elist(G) n <- length(orbind) if (ncol(Elist) < 1 || nrow(Elist) < 1) { return(rep(0, n*(n-1)/2)) } p <- ncol(G) gcount <- orca::count4(Elist) buffer <- matrix(0, nrow=p-nrow(gcount), ncol=ncol(gcount)) gcount <- rbind(gcount, buffer) gcor <- suppressWarnings(cor(rbind(gcount[,orbind],1), method='spearman')) gcor[upper.tri(gcor)] } subgraph.centrality <- function(Graph, eigs=NULL, rmdiag=FALSE) { if (rmdiag) diag(Graph) <- 0 if (is.null(eigs)) eigs <- eigen(Graph) l <- eigs$value v <- eigs$vector v2 <- v^2 dl <- l edl <- exp(dl) fb <- v2 %*% edl sinhl <- sinh(dl) fbodd <- v2 %*% sinhl coshl <- cosh(dl) feven <- v2 %*% coshl out <- list(central=fb, odd=fbodd, even=feven, evec=v, evals=l) class(out) <- 'subgraph.centrality' return(out) } .SMA <- function(x) ((mean(abs(x)))) estrada.class <- function(G, evthresh=1e-3) { if (class(G) != "subgraph.centrality") G <- subgraph.centrality(G) ev1 <- G$evec[,1] eval <- G$evals[1] if (length(unique(sign(ev1))) == 1 && G$evals[2] > 0) { ev1 <- G$evec[,2] eval <- G$evals[2] } subgodd <- G$odd Evratio <- pmax(ev1^2 * sinh(eval) / subgodd, evthresh) Evratio[is.nan(Evratio)] <- 0 if (sum(Evratio==evthresh) > (2/3)*length(Evratio)) return(0) delLogEv1 <- log10(sqrt(Evratio)) delSplit <- split(delLogEv1, sign(delLogEv1)) Devs <- lapply(delSplit, .SMA) if (is.null(Devs$`-1`) && is.null(Devs$`1`)) return(0) if (is.null(Devs$`-1`)) Devs$`-1` <- 0 else if (is.null(Devs$`1`)) Devs$`1` <- 0 if (length(Devs) != 2) return(0) if (log10(Devs$`1`+ 1e-3) > -2.1) eclass <- c(3,4) else eclass <- c(1,2) if (log10(Devs$`-1`+1e-3) > -2.1) eclass <- eclass[2] else eclass <- eclass[1] return(eclass) }
aoristic.plot <- function (data1){ df3 <- data.frame(matrix(0, ncol = 7, nrow = 24)) output.row <- 1 output.col <- 1 f <- the.hour <- the.day <- rat.hour <- NULL for (k in 1:168) { cur.column.name <- paste("hour", k, sep = "") z <- sum(as.numeric(data1[ ,cur.column.name]), na.rm = TRUE) df3[output.row,output.col] <- trimws(format(round(z, 3), nsmall=3)) output.row <- output.row + 1 if (output.row == 25) { output.row <- 1 output.col <- output.col + 1 } } colnames(df3) <- c('Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat') Range <- c('0000-0059', '0000-0159', '0200-0259', '0300-0359', '0400-0459', '0500-0559', '0600-0659', '0700-0759', '0800-0859', '0900-0959', '1000-1059', '1100-1159', '1200-1259', '1300-1359', '1400-1459', '1500-1559', '1600-1659', '1700-1759', '1800-1859', '1900-1959', '2000-2059', '2100-2159', '2200-2259', '2300-2359' ) df4 <- data.frame(Range) df4 <- data.frame(df4,df3) df4 <- df4[, c(1, 3, 4, 5, 6, 7, 8, 2)] rm(df3) for (j in 2:8){ df4[ ,j] <- as.numeric(df4[, j]) } days=c("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun") df5 <- df4 %>% select(-c(Range)) %>% tidyr::gather (df4, f) %>% mutate (the.hour = rep(0:23, 7)) %>% mutate (the.day = rep(days, times = c(24, 24, 24, 24, 24, 24, 24))) %>% mutate (rat.hour = (seq(1, 168, by = 1))) %>% mutate (f = as.numeric(f)) a.min <- min(df5$f) a.max <- max(df5$f) a.med <- ((a.max-a.min)/2)+a.min p <- ggplot(data = df5, aes(x = the.hour, y = reorder(the.day, -(rat.hour)))) + geom_tile(aes(fill = f), color = "white") + geom_text(aes(label = round(f))) + scale_x_continuous(breaks = seq(0,23,1)) + scale_fill_gradient2(low = muted("lightblue"), mid = "gray80", high = scales::muted("red"), midpoint = a.med, breaks = scales::pretty_breaks(n = 6)) + labs(fill = "Frequency", x = "Hour", y = "") p <- p + theme(legend.title = element_text( size = 16), legend.key.height = unit(1, "cm"), legend.text = element_text(size = 14), axis.title.x = element_text(size = 14), axis.title.y = element_text(size = 14), axis.text = element_text(size = 14), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_rect(fill = 'white'), panel.border = element_blank()) p <- p + ggtitle(" ") p <- p + theme(plot.title = element_text(size = 16)) print(p) }
ts_seasonal <- function(ts.obj, type = "normal", title = NULL, Ygrid = TRUE, Xgrid = TRUE, last = NULL, palette = "Set1", palette_normal = "viridis") { `%>%` <- magrittr::`%>%` hex_to_rgb <- function(hex){ rgb <- base::paste0(as.numeric(grDevices::col2rgb(hex) %>% base::t()), collapse = ",") return(rgb) } df <- freq <- obj.name <- brewer_palettes <- viridis_palettes <- palette_type <- NULL n_colors <- color_list_normal <- main <- NULL diff_mean <- col_class <- date_col <- numeric_col <- NULL obj.name <- base::deparse(base::substitute(ts.obj)) if(base::is.null(title)){ title <- paste("Seasonality Plot -", obj.name, sep = " ") } else if(!base::is.character(title)){ warning("The 'title' object is not character object, using the default option") title <- paste("Seasonality Plot -", obj.name, sep = " ") } if(!base::is.null(last)){ if(!base::is.numeric(last) | last <= 0){ stop("The 'last' parameter is not valid") } else { if(last != base::round(last)){ stop("The 'last' parameter is not integer") } } } if(type != "normal" && type != "cycle" && type != "box" && type != "all" ){ type <- "normal" warning("The 'type' parameter is invalide,", "using the default option - 'normal'") } if(!base::is.logical(Ygrid)){ Ygrid <- TRUE warning("The 'Ygrid' argument is not a boolean operator, setting it to TRUE") } if(!base::is.logical(Xgrid)){ Xgrid <- TRUE warning("The 'Xgrid' argument is not a boolean operator, setting it to TRUE") } if(stats::is.ts(ts.obj)){ if(stats::is.mts(ts.obj)){ ts.obj <- ts.obj[,1] warning("The input object is a 'mts' class, by defualt will use only the first series as an input") } freq <- stats::frequency(ts.obj) if(base::length(ts.obj) < freq){ stop("The length of the series is smaller than the length of full cycle") } start_main <- stats::start(ts.obj)[1] start_minor <- stats::start(ts.obj)[2] if(freq %in% c(7, 52, 365, 12, 4)){ minor1 <- base::seq(from = start_minor, to = freq, by = 1) minor2 <- base::rep(x = 1:freq, length.out = base::length(ts.obj) - base::length(minor1)) main1 <- base::rep(x = start_main, length.out = base::length(minor1)) main2 <- base::rep(x = (start_main + 1):stats::end(ts.obj)[1], each = freq, len = base::length(ts.obj) - base::length(minor1)) df <- base::data.frame(main = c(main1, main2), minor = c(minor1, minor2), y = base::as.numeric(ts.obj)) if(freq == 12){ df$minor <- base::factor(base::month.abb[df$minor], levels = month.abb) } } } else if(xts::is.xts(ts.obj) | zoo::is.zoo(ts.obj)){ if(!base::is.null(base::ncol(ts.obj))){ if(base::ncol(ts.obj) > 1){ ts.obj <- ts.obj[,1] warning("The input object is a multiple time series object, by defualt will use only the first series as an input") } } if(lubridate::is.Date(zoo::index(ts.obj))){ if(xts::periodicity(ts.obj)$scale == "daily"){ df <- base::data.frame(main = lubridate::year(zoo::index(ts.obj)), minor = lubridate::yday(zoo::index(ts.obj)), y = base::as.numeric(ts.obj[,1])) } else if(xts::periodicity(ts.obj)$scale == "weekly"){ df <- base::data.frame(main = lubridate::year(zoo::index(ts.obj)), minor = lubridate::week(zoo::index(ts.obj)), y = base::as.numeric(ts.obj[,1])) } else if(xts::periodicity(ts.obj)$scale == "monthly"){ df <- base::data.frame(main = lubridate::year(zoo::index(ts.obj)), minor = lubridate::month(zoo::index(ts.obj), label = TRUE), y = base::as.numeric(ts.obj[,1])) } else if(xts::periodicity(ts.obj)$scale == "quarterly"){ df <- base::data.frame(main = lubridate::year(zoo::index(ts.obj)), minor = lubridate::quarter(zoo::index(ts.obj)), y = base::as.numeric(ts.obj[,1])) } } else if(class(zoo::index(ts.obj)) == "yearqtr" & xts::periodicity(ts.obj)$scale == "quarterly"){ df <- base::data.frame(main = lubridate::year(zoo::index(ts.obj)), minor = lubridate::quarter(zoo::index(ts.obj)), y = base::as.numeric(ts.obj[,1])) } else if(class(zoo::index(ts.obj)) == "yearmon" & xts::periodicity(ts.obj)$scale == "monthly"){ df <- base::data.frame(main = lubridate::year(zoo::index(ts.obj)), minor = lubridate::quarter(zoo::index(ts.obj)), y = base::as.numeric(ts.obj[,1])) } } else if(base::is.data.frame(ts.obj) | dplyr::is.tbl(ts.obj) | data.table::is.data.table(ts.obj)){ ts.obj <- base::as.data.frame(ts.obj) col_class <- base::lapply(ts.obj, class) col_date <- base::lapply(ts.obj, lubridate::is.Date) col_POSIXt <- base::lapply(ts.obj, lubridate::is.POSIXt) if(base::any(col_date == TRUE) & base::any(col_POSIXt == TRUE)){ d <- t <- NULL d <- base::min(base::which(col_date == TRUE)) t <- base::min(base::which(col_POSIXt == TRUE)) if(d > t){ warning("The data frame contains multiple date or time objects,", "using the first one as the plot index") date_col <- t } else { warning("The data frame contains multiple date or time objects,", "using the first one as the plot index") date_col <- d } } else if(base::any(col_date == TRUE) | base::any(col_POSIXt == TRUE)){ if(base::any(col_date == TRUE)){ if(base::length(base::which(col_date == TRUE)) > 1){ date_col <- base::min(base::which(col_date == TRUE)) warning("There are multipe 'date' objects in the data frame,", "using the first one object as the plot index") } else { date_col <- base::min(base::which(col_date == TRUE)) } } else if(base::any(col_POSIXt == TRUE)){ if(base::length(base::which(col_POSIXt == TRUE)) > 1){ date_col <- base::min(base::which(col_POSIXt == TRUE)) warning("There are multipe 'POSIXt' objects in the data frame,", "using the first one as the plot index") } else { date_col <- base::min(base::which(col_POSIXt == TRUE)) } } }else { stop("No 'Date' or 'POSIXt' object available in the data frame,", "please check if the data format defined properly") } numeric_col <- base::which(col_class == "numeric" | col_class == "integer") if(base::length(numeric_col) == 0){ stop("None of the data frame columns is numeric,", "please check if the data format is defined properly") } df_temp <- NULL if(length(numeric_col) == 1){ df_temp <- base::data.frame(date = ts.obj[, date_col], y = ts.obj[, numeric_col]) } else { warning("The input object is a multiple time series object, by defualt will use only the first series as an input") df_temp <- base::data.frame(date = ts.obj[, date_col], ts.obj[, numeric_col[1]]) } dt_temp <- dt_temp %>% dplyr::arrange(date) data_diff <- NULL date_diff <- base::diff(as.numeric(df_temp$date)) if(base::min(date_diff) == base::max(date_diff) & base::mean(date_diff) == 1){ df <- base::data.frame(main = lubridate::year(df_temp$date), minor = lubridate::yday(df_temp$date), y = df_temp$y) } else if(base::min(date_diff) == base::max(date_diff) & base::mean(date_diff) == 7){ df <- base::data.frame(main = lubridate::year(df_temp$date), minor = lubridate::week(df_temp$date), y = df_temp$y) } else if(base::min(date_diff) >= 28 & base::max(date_diff) <= 31 & base::mean(date_diff) < 31 & base::mean(date_diff) > 28){ df <- base::data.frame(main = lubridate::year(df_temp$date), minor = lubridate::month(df_temp$date, label = TRUE), y = df_temp$y) } else if(base::min(date_diff) >= 90 & base::max(date_diff) <= 92 & base::mean(date_diff) < 92 & base::mean(date_diff) > 90){ df <- base::data.frame(main = lubridate::year(df_temp$date), minor = lubridate::quarter(df_temp$date), y = df_temp$y) } else{ stop("The frequency of the input dataset is not valid, must be on of the following - daily, weekly, monthly or quarterly") } } if(!base::is.null(last)){ df <- df[(base::nrow(df) - last + 1):base::nrow(df),] } brewer_palettes <- row.names(RColorBrewer::brewer.pal.info) viridis_palettes <- c("viridis", "magma", "plasma", "inferno", "cividis") if(type %in% c("cycle", "box", "all")){ if(palette %in% brewer_palettes){ n_colors <- NULL n_colors <- RColorBrewer::brewer.pal.info$maxcolors[row.names(RColorBrewer::brewer.pal.info) == palette] colors_list <- grDevices::colorRampPalette(RColorBrewer::brewer.pal(n_colors, palette))(base::length(base::unique(df$minor))) } else if (palette %in% viridis_palettes){ colors_list <- viridis::viridis_pal(option = base::eval(palette))(base::length(base::unique(df$minor))) } else { warning("The value of the 'palette' argument is invalid, using the default option 'Set1'") palette <- "Set1" n_colors <- NULL n_colors <- RColorBrewer::brewer.pal.info$maxcolors[row.names(RColorBrewer::brewer.pal.info) == palette] colors_list <- grDevices::colorRampPalette(RColorBrewer::brewer.pal(n_colors, palette))(base::length(base::unique(df$minor))) } } if(type %in% c("normal", "all")){ if(palette_normal %in% brewer_palettes){ n_colros <- NULL n_colors <- RColorBrewer::brewer.pal.info$maxcolors[row.names(RColorBrewer::brewer.pal.info) == palette_normal] colors_list_normal <- grDevices::colorRampPalette(RColorBrewer::brewer.pal(n_colors, palette_normal))(base::max(df$main) - base::min(df$main) + 1) } else if (palette_normal %in% viridis_palettes){ colors_list_normal <- viridis::viridis_pal(option = base::eval(palette_normal))(base::max(df$main) - base::min(df$main) + 1) } else { warning("The value of the 'palette_normal' argument is in valid, using the default option 'Spectral'") palette_normal <- "Spectral" n_colors <- RColorBrewer::brewer.pal.info$maxcolors[row.names(RColorBrewer::brewer.pal.info) == palette_normal] colors_list_normal <- grDevices::colorRampPalette(RColorBrewer::brewer.pal(n_colors, palette_normal))(base::max(df$main) - base::min(df$main) + 1) } } if(type == "normal" | type == "all"){ p_normal <- plotly::plot_ly() for(i in base::min(df$main):base::max(df$main)){ temp <- NULL temp <- df %>% dplyr::filter(main == i) p_normal <- p_normal %>% plotly::add_lines(x = temp$minor, y = temp$y, name = i, line = list(color = colors_list_normal[i + 1 - base::min(df$main)])) } p_normal <- p_normal %>% plotly::layout(yaxis = list(title = "By Frequency Cycle")) } if(type == "cycle" | type == "all"){ df_t <- NULL df_t <- base::suppressMessages(df %>% reshape2::dcast(main ~ minor)) showlegend <- legendgroup <- NULL showlegend <- TRUE p_cycle <- plotly::plot_ly() for(i in 2:ncol(df_t)){ legendgroup <- ifelse(type == "all", base::paste("all", colnames(df_t)[i], sep = "_"), base::paste("cycle", colnames(df_t)[i], sep = "_")) p_cycle <- p_cycle %>% plotly::add_lines(x = df_t[, 1], y = df_t[, i], name = colnames(df_t)[i], line = list(color = colors_list[i - 1]), showlegend = showlegend, legendgroup = legendgroup) } p_cycle <- p_cycle %>% plotly::layout(yaxis = list(title = "By Frequency Unit")) } if(type == "box" | type == "all"){ minor <- base::levels(df$minor) showlegend <- legendgroup <- NULL showlegend <- ifelse(type == "all", FALSE, TRUE) p_box <- plotly::plot_ly() c <- NULL c <- 1 for(i in minor){ legendgroup <- ifelse(type == "all", base::paste("all", i, sep = "_"), base::paste("box", i, sep = "_")) p_box <- p_box %>% plotly::add_trace(data = df %>% dplyr::filter(minor == i), y = ~ y, type = "box", fillcolor = base::paste("rgba(", hex_to_rgb(colors_list[c]), ", 0.5)", sep = ""), line = list(color = colors_list[c]), marker = list(color = colors_list[c]), boxpoints = "all", jitter = 0.3, pointpos = -1.8, name = i, showlegend = showlegend, legendgroup = legendgroup) c <- c + 1 } p_box <- p_box %>% plotly::layout(yaxis = list(title = "By Frequency Unit")) } if(type == "all"){ p <- plotly::subplot(p_normal, p_cycle, p_box, nrows = 3, titleY = TRUE) } else if(type == "normal"){ p <- p_normal } else if(type == "cycle"){ p <- p_cycle } else if(type == "box"){ p <- p_box } p <- p %>% plotly::layout(title = title) return(p) } ts_polar <- function(ts.obj, title = NULL, width = 600, height = 600, left = 25, right = 25, top = 25, bottom = 25) { `%>%` <- magrittr::`%>%` df <- df_wide <- p <- obj.name <- NULL obj.name <- base::deparse(base::substitute(ts.obj)) if(is.null(title)){ title <- paste("Polar Plot -", obj.name) } else if(!is.character(title)){ title <- paste("Polar Plot -", obj.name) warning("The 'title' value is not valid, using the default title") } if (stats::is.ts(ts.obj)) { if (stats::is.mts(ts.obj)) { warning("The 'ts.obj' has multiple columns, only the first column will be plot") ts.obj <- ts.obj[, 1] } df <- base::data.frame(dec_left = floor(stats::time(ts.obj)), dec_right = stats::cycle(ts.obj), value = base::as.numeric(ts.obj)) if(stats::frequency(ts.obj) == 12){ df$dec_right <- base::factor(df$dec_right, levels = base::unique(df$dec_right), labels = base::month.abb[as.numeric(base::unique(df$dec_right))]) } else if(stats::frequency(ts.obj) == 4){ df$dec_right <- base::paste("Qr.", df$dec_right, sep = " ") } else { stop("The frequency of the series is invalid, ", "the function support only 'monthly' or 'quarterly' frequencies") } } else if (xts::is.xts(ts.obj) | zoo::is.zoo(ts.obj)) { if (!is.null(base::dim(ts.obj))) { if (base::dim(ts.obj)[2] > 1) { warning("The 'ts.obj' has multiple columns, only the first column will be plot") ts.obj <- ts.obj[, 1] } } freq <- xts::periodicity(ts.obj)[[6]] if (freq == "quarterly") { df <- base::data.frame(dec_left = lubridate::year(ts.obj), dec_right = lubridate::quarter(ts.obj), value = as.numeric(ts.obj)) } else if (freq == "monthly") { df <- base::data.frame(dec_left = lubridate::year(ts.obj), dec_right = lubridate::month(ts.obj), value = as.numeric(ts.obj)) df$dec_right <- base::factor(df$dec_right, levels = base::unique(df$dec_right), labels = base::month.abb[as.numeric(base::unique(df$dec_right))]) } else if (freq != "quarterly" & freq != "monthly") { stop("The frequency of the series is invalid,", "the function support only 'monthly' or 'quarterly' frequencies") } } p <- plotly::plot_ly(r = df$value, t = df$dec_right, width = width, height = height) %>% plotly::add_area(color = factor(df$dec_left, ordered = TRUE)) %>% plotly::layout(orientation = -90, autosize = T, title = title, margin = list( l = left, r = right, b = bottom, t = top, pad = 4 )) return(p) } ts_heatmap <- function(ts.obj, last = NULL, wday = TRUE, color = "Blues", title = NULL, padding = TRUE) { `%>%` <- magrittr::`%>%` df <- df1 <- df2 <- freq <- obj.name <- NULL diff_mean <- col_class <- date_col <- numeric_col <- NULL main <- minor <- wday1 <- week <- y <- NULL obj.name <- base::deparse(base::substitute(ts.obj)) if(base::is.null(title)){ title <- base::paste("Heatmap -", obj.name, sep = " ") } else if(!base::is.character(title)){ warning("The 'title' object is not character object, using the default option") title <- base::paste("Heatmap -", obj.name, sep = " ") } if(!base::is.logical(padding)){ warning("The 'padding' argument is not valid, setting it to TRUE (default)") padding <- TRUE } if(!base::is.null(last)){ if(!base::is.numeric(last) | last <= 0){ stop("The 'last' parameter is not valid") } else { if(last != base::round(last)){ stop("The 'last' parameter is not integer") } } } if(stats::is.ts(ts.obj)){ if(stats::is.mts(ts.obj)){ ts.obj <- ts.obj[,1] warning("The input object is a 'mts' class, by defualt will use only the first series as an input") } freq <- stats::frequency(ts.obj) if(base::length(ts.obj) < freq){ stop("The length of the series is smaller than the length of full cycle") } start_main <- stats::start(ts.obj)[1] start_minor <- stats::start(ts.obj)[2] if(freq %in% c(7, 52, 365, 12, 4)){ minor1 <- base::seq(from = start_minor, to = freq, by = 1) minor2 <- base::rep(x = 1:freq, length.out = base::length(ts.obj) - base::length(minor1)) main1 <- base::rep(x = start_main, length.out = base::length(minor1)) main2 <- base::rep(x = (start_main + 1):stats::end(ts.obj)[1], each = freq, len = base::length(ts.obj) - base::length(minor1)) df <- base::data.frame(main = c(main1, main2), minor = c(minor1, minor2), y = base::as.numeric(ts.obj)) if(freq == 365){ time_unit <- "Day of the year" } else if(freq == 7){ time_unit <- "Day of the week" } else if(freq == 52){ time_unit <- "Week" } else if(freq == 12){ df$minor <- base::factor(base::month.abb[df$minor], levels = month.abb) time_unit <- "Month" } else if(freq == 4){ time_unit <- "Quarter" } } else { stop("The frequency of the input object is not valid, must be on of the following - daily, weekly, monthly or quarterly") } } else if(xts::is.xts(ts.obj) | zoo::is.zoo(ts.obj)){ if(!base::is.null(base::ncol(ts.obj))){ if(base::ncol(ts.obj) > 1){ ts.obj <- ts.obj[,1] warning("The input object is a multiple time series object, by defualt will use only the first series as an input") } } if(lubridate::is.Date(zoo::index(ts.obj))){ if(xts::periodicity(ts.obj)$scale == "daily"){ df <- base::data.frame(main = lubridate::year(zoo::index(ts.obj)), minor = lubridate::yday(zoo::index(ts.obj)), wday = lubridate::wday(zoo::index(ts.obj)), wday1 = lubridate::wday(zoo::index(ts.obj),label = TRUE), y = base::as.numeric(ts.obj[,1])) time_unit <- "Day" } else if(xts::periodicity(ts.obj)$scale == "weekly"){ df <- base::data.frame(main = lubridate::year(zoo::index(ts.obj)), minor = lubridate::week(zoo::index(ts.obj)), y = base::as.numeric(ts.obj[,1])) time_unit <- "Week" } else if(xts::periodicity(ts.obj)$scale == "monthly"){ df <- base::data.frame(main = lubridate::year(zoo::index(ts.obj)), minor = lubridate::month(zoo::index(ts.obj), label = TRUE), y = base::as.numeric(ts.obj[,1])) time_unit <- "Month" } else if(xts::periodicity(ts.obj)$scale == "quarterly"){ df <- base::data.frame(main = lubridate::year(zoo::index(ts.obj)), minor = lubridate::quarter(zoo::index(ts.obj)), y = base::as.numeric(ts.obj[,1])) time_unit <- "Quarter" } } else if(class(zoo::index(ts.obj)) == "yearqtr" & xts::periodicity(ts.obj)$scale == "quarterly"){ df <- base::data.frame(main = lubridate::year(zoo::index(ts.obj)), minor = lubridate::quarter(zoo::index(ts.obj)), y = base::as.numeric(ts.obj[,1])) time_unit <- "Quarter" } else if(class(zoo::index(ts.obj)) == "yearmon" & xts::periodicity(ts.obj)$scale == "monthly"){ df <- base::data.frame(main = lubridate::year(zoo::index(ts.obj)), minor = lubridate::month(zoo::index(ts.obj), label = TRUE), y = base::as.numeric(ts.obj[,1])) time_unit <- "Month" } } else if(base::is.data.frame(ts.obj) | dplyr::is.tbl(ts.obj) | data.table::is.data.table(ts.obj)){ ts.obj <- base::as.data.frame(ts.obj) col_class <- base::lapply(ts.obj, class) col_date <- base::lapply(ts.obj, lubridate::is.Date) col_POSIXt <- base::lapply(ts.obj, lubridate::is.POSIXt) if(base::any(col_date == TRUE) & base::any(col_POSIXt == TRUE)){ d <- t <- NULL d <- base::min(base::which(col_date == TRUE)) t <- base::min(base::which(col_POSIXt == TRUE)) if(d > t){ warning("The data frame contains multiple date or time objects,", "using the first one as the plot index") date_col <- t } else { warning("The data frame contains multiple date or time objects,", "using the first one as the plot index") date_col <- d } } else if(base::any(col_date == TRUE) | base::any(col_POSIXt == TRUE)){ if(base::any(col_date == TRUE)){ if(base::length(base::which(col_date == TRUE)) > 1){ date_col <- base::min(base::which(col_date == TRUE)) warning("There are multipe 'date' objects in the data frame,", "using the first one object as the plot index") } else { date_col <- base::min(base::which(col_date == TRUE)) } } else if(base::any(col_POSIXt == TRUE)){ if(base::length(base::which(col_POSIXt == TRUE)) > 1){ date_col <- base::min(base::which(col_POSIXt == TRUE)) warning("There are multipe 'POSIXt' objects in the data frame,", "using the first one as the plot index") } else { date_col <- base::min(base::which(col_POSIXt == TRUE)) } } }else { stop("No 'Date' or 'POSIXt' object available in the data frame,", "please check if the data format defined properly") } numeric_col <- base::which(col_class == "numeric" | col_class == "integer") if(base::length(numeric_col) == 0){ stop("None of the data frame columns is numeric,", "please check if the data format is defined properly") } df_temp <- NULL if(length(numeric_col) == 1){ df_temp <- base::data.frame(date = ts.obj[, date_col], y = ts.obj[, numeric_col]) } else { warning("The input object is a multiple time series object, by defualt will use only the first series as an input") df_temp <- base::data.frame(date = ts.obj[, date_col], ts.obj[, numeric_col[1]]) } data_diff <- NULL date_diff <- base::diff(as.numeric(df_temp$date)) if(base::min(date_diff) == base::max(date_diff) & base::mean(date_diff) == 1){ df <- base::data.frame(main = lubridate::year(df_temp$date), minor = lubridate::yday(df_temp$date), wday = lubridate::wday(df_temp$date), wday1 = lubridate::wday(df_temp$date, label = TRUE), y = df_temp$y) time_unit <- "Day" } else if(base::min(date_diff) == base::max(date_diff) & base::mean(date_diff) == 7){ df <- base::data.frame(main = lubridate::year(df_temp$date), minor = lubridate::week(df_temp$date), y = df_temp$y) time_unit <- "Week" } else if(base::min(date_diff) >= 28 & base::max(date_diff) <= 31 & base::mean(date_diff) < 31 & base::mean(date_diff) > 28){ df <- base::data.frame(main = lubridate::year(df_temp$date), minor = lubridate::month(df_temp$date, label = TRUE), y = df_temp$y) time_unit <- "Month" } else if(base::min(date_diff) >= 90 & base::max(date_diff) <= 92 & base::mean(date_diff) < 92 & base::mean(date_diff) > 90){ df <- base::data.frame(main = lubridate::year(df_temp$date), minor = lubridate::quarter(df_temp$date), y = df_temp$y) time_unit <- "Quarter" } else{ stop("The frequency of the input dataset is not valid, must be on of the following - daily, weekly, monthly or quarterly") } } if(!base::is.null(last)){ df <- df[(base::nrow(df) - last + 1):base::nrow(df),] } if(padding){ if(time_unit %in% c("Month", "Quarter")){ xgap = 3 ygap = 3 } else if(time_unit == "Day" & wday){ xgap = 1 ygap = 1 }else { xgap = 1 ygap = NULL } } else { xgap <- NULL ygap <- NULL } if(time_unit == "Day" & wday){ p_list <- vals <- o <- cols <- colz <- NULL df$vals <- scales::rescale(df$y) vals <- unique(scales::rescale(df$y)) o <- base::order(vals, decreasing = FALSE) cols <- scales::col_numeric(color, domain = NULL)(vals) colz <- stats::setNames(base::data.frame(vals[o], cols[o]), NULL) colz_name <- colz names(colz_name) <- c("vals", "cols") df <- base::suppressMessages(df %>% dplyr::left_join(colz_name)) p_list <- base::lapply(base::min(df$main):base::max(df$main), function(i){ df1 <- NULL df1 <- df %>% dplyr::filter(main == i) %>% dplyr::arrange(minor) if(df1$minor[1] != 1){ df1$week <- base::rep(base::ceiling(df1$minor[1] / 7):53, each = 7 )[df1$wday[1]:(base::nrow(df1) + df1$wday[1] - 1)] } else if(df1$minor[1] == 1){ df1$week <- base::rep(1:53, each = 7)[df1$wday[1]:(base::nrow(df1) + df1$wday[1] - 1)] } colz_sub <- df1 %>% dplyr::select(vals, cols) %>% dplyr::arrange(-vals) colz_sub <- stats::setNames(colz_sub, NULL) df2 <- base::suppressMessages(df1 %>% dplyr::select(wday1, week, y) %>%reshape2::dcast(wday1 ~ week)) z <- base::as.matrix(df2[, -1]) z_text <- base::matrix(NA, nrow = nrow(z), ncol = ncol(z)) for(c in 1:base::ncol(z_text)){ for(r in 1:base::nrow(z_text)){ z_text[r, c] <- base::paste('Value: ', z[r,c], '<br> Year : ', i, '<br>' ,time_unit, ' :', r, sep = " ") } } if(i == base::min(df$main)){ showscale <- TRUE } else { showscale <- FALSE } df_temp <- base::suppressMessages(df1 %>% dplyr::select(y, cols) %>% dplyr::distinct() %>% dplyr::arrange(y)) df_temp$scale <- scales::rescale(df_temp$y) colz_sub <- df_temp %>% dplyr::select(scale, cols) colz_sub <- stats::setNames(colz_sub, NULL) p_day <- plotly::plot_ly(z = z, x = colnames(df2[,-1]), y = df2[,1], type = "heatmap", colorscale = colz_sub, hoverinfo = 'text', text = z_text, xgap = xgap, ygap = xgap, showscale = showscale ) %>% plotly::layout( xaxis = list(title = i, range = c(0,54)), yaxis = list(title = time_unit), annotations = list(text = i, showarrow = FALSE, yref = "paper", yanchor = "bottom", xanchor = "center", align = "center", x = 25, y = -0.25) ) %>% plotly::colorbar(limits = c(base::min(df1$y), base::max(df1$y))) return(p_day) }) p <- plotly::subplot(p_list, nrows = base::length(base::unique(df$main))) %>% plotly::layout(title = title) } else { df1 <- base::suppressMessages(df %>% reshape2::dcast(minor ~ main)) z <- base::as.matrix(df1[, -1]) z_text <- base::matrix(NA, nrow = nrow(z), ncol = ncol(z)) for(c in 1:base::ncol(z_text)){ for(r in 1:base::nrow(z_text)){ z_text[r, c] <- base::paste('Value: ', z[r,c], '<br> Year : ', base::colnames(z)[c], '<br>' ,time_unit, ' :', r, sep = " ") } } vals <- base::unique(scales::rescale(c(df$y))) o <- order(vals, decreasing = FALSE) cols <- scales::col_numeric(color, domain = NULL)(vals) colz <- stats::setNames(base::data.frame(vals[o], cols[o]), NULL) p <- plotly::plot_ly(z = z, x = colnames(df1[,-1]), y = df1[,1], type = "heatmap", colorscale = colz, hoverinfo = 'text', text = z_text, xgap = xgap, ygap = ygap ) %>% plotly::layout( title = title, xaxis = list(title = "Year"), yaxis = list(title = time_unit) ) } return(p) } ts_surface <- function(ts.obj) { `%>%` <- magrittr::`%>%` df <- p <- obj.name <- NULL obj.name <- base::deparse(base::substitute(ts.obj)) df <- TSstudio::ts_reshape(ts.obj, type = "wide") z <- base::as.matrix(df[, -1]) z_text <- base::matrix(NA, nrow = nrow(z), ncol = ncol(z)) time_unit <- base::trimws(base::names(df)[1]) time_unit_up <- base::paste(base::toupper(base::substr(time_unit, 1, 1)), base::substr(time_unit,2, base::nchar(time_unit)), sep = "") for(c in 1:base::ncol(z_text)){ for(r in 1:base::nrow(z_text)){ z_text[r, c] <- base::paste('Value: ', z[r,c], '<br> Year : ', base::colnames(z)[c], '<br>' ,time_unit_up, ' :', r, sep = " ") } } p <- plotly::plot_ly(z = z, x = colnames(df[,-1]), y = df[,1], hoverinfo = 'text', text = z_text ) %>% plotly::add_surface() %>% plotly::layout( title = base::paste("Surface Plot -", obj.name, sep = " "), scene = list(xaxis = list(title = "Years"), yaxis= list(title = time_unit_up), zaxis= list(title = "Value") ) ) return(p) } ts_ma <- function(ts.obj, n = c(3, 6, 9), n_left = NULL, n_right = NULL, double = NULL, plot = TRUE, show_legend = TRUE, multiple = FALSE, separate = TRUE, margin = 0.03, title = NULL, Xtitle = NULL, Ytitle = NULL){ `%>%` <- magrittr::`%>%` obj.name <- ts_merged <- ts_obj <- ts_temp <- ts_ma <- c <- p <- p_m <- ma_order <- NULL output <- titles <- dobule <- NULL left_flag <- right_flag <- k_flag <- FALSE obj.name <- base::deparse(base::substitute(ts.obj)) if(stats::is.ts(ts.obj)){ if(stats::is.mts(ts.obj)){ ts.obj <- ts.obj[,1] warning("The input object is a 'mts' class, by defualt will use only the first series as an input") } } else if(xts::is.xts(ts.obj) | zoo::is.zoo(ts.obj)){ if(!base::is.null(base::ncol(ts.obj))){ if(base::ncol(ts.obj) > 1){ ts.obj <- ts.obj[,1] warning("The input object is a multiple time series object, by defualt will use only the first series as an input") } } } if((base::is.null(n) & base::is.null(n_left) & base::is.null(n_right)) | (!base::is.numeric(n) & !base::is.numeric(n_left) & !base::is.numeric(n_right))){ stop("Neither of the moving averages arguments set properly ('n', 'n_left', 'n_right')") } if(!base::is.logical(plot)){ warning("The value of the 'plot' argument is not valid (can apply either TRUE or FALSE) and will be ignore") plot <- TRUE } if(!base::is.logical(show_legend)){ warning("The value of the 'show_legend' argument is not valid (can apply either TRUE or FALSE) and will be ignore") show_legend <- FALSE } if(!base::is.logical(separate)){ warning("The value of the 'separate' argument is not valid (can apply either TRUE or FALSE) and will be ignore") separate <- TRUE } if(!base::is.logical(multiple)){ warning("The value of the 'multiple' argument is not valid (can apply either TRUE or FALSE) and will be ignore") multiple <- FALSE } else if(base::length(n) == 1 & multiple & base::is.null(n_left) & base::is.null(n_right)){ warning("The 'multiple' aregument cannot be used when using multiple moving averages") multiple <- FALSE } else if((base::length(n) > 1 | (base::length(n) ==1 & (!base::is.null(n_left) | !base::is.null(n_right)))) & multiple){ p_m <- list() } if(!base::is.null(title)){ if(!base::is.character(title)){ warning("The value of the 'title' is not valid (only character can be used as an input), and will be ignore") title <- NULL } } else { title <- paste(obj.name, "- Moving Average", sep = " ") } if(!base::is.null(Xtitle)){ if(!base::is.character(Xtitle)){ warning("The value of the 'Xtitle' is not valid (only character can be used as an input), and will be ignore") Xtitle <- NULL } } if(!base::is.null(Ytitle)){ if(!base::is.character(Ytitle)){ warning("The value of the 'Ytitle' is not valid (only character can be used as an input), and will be ignore") Ytitle <- NULL } } if(!base::is.null(double)){ if(!base::is.numeric(double)){ warning("The 'double' parameter is not a numeric number and will be ignore") double <- NULL } else if(!base::all(double %% 1 == 0)){ warning("The 'double' parameter is not an integer number and will be ignore") double <- NULL } } else if(base::length(double) > 1){ warning("The 'double' parameter is restricted to single value (integer), only the first one will be used") double <- dobule[1] } if(!base::is.null(n_left)){ if(!base::is.numeric(n_left)){ stop("The 'n_left' argument is not valid, please make sure that you are using only integers as input") } else if(base::length(n_left) != 1){ warning("The 'n_left' argument has too many inputs, can hanlde only single integer. Will use only the first input") n_left <- n_left[1] } else if(n_left %% 1 != 0){ stop("The 'n_left' argument is not an integer type") } else { ma_order <- n_left } } if(!base::is.null(n_right)){ if(!base::is.numeric(n_right)){ stop("The 'n_right' argument is not valid, please make sure that you are using only integers as input") } else if(base::length(n_right) != 1){ warning("The 'n_right' argument has too many inputs, can hanlde only single integer. Will use only the first input") n_right <- n_right[1] } else if(n_right %% 1 != 0){ stop("The 'n_right' argument is not an integer type") } else if(!base::is.null(n_left)){ ma_order <- n_left + n_right } else { ma_order <- n_right } } if(!base::is.null(n)){ if(!base::is.numeric(n)){ stop("The 'n' argument is not valid, please make sure that you are using only integers as input") } else if(!base::all(n %% 1 == 0)){ stop("The 'n' argument is not valid, please make sure that you are using only integers as input") } else if(base::length(n) > 8){ warning("The 'n' parameter is restricted up to 8 inputs (integers), only the first 8 values will be used") n <- n[1:8] } else{ if(stats::is.ts(ts.obj) | xts::is.xts(ts.obj) | zoo::is.zoo(ts.obj)){ if(base::max(n) * 2 + 1 > base::as.data.frame(ts.obj) %>% base::nrow()){ stop("The length of the series is too short to apply the moving average with the given 'n' parameter") } } else if(base::is.data.frame(ts.obj) | dplyr::is.tbl(ts.obj) | data.table::is.data.table(ts.obj)){ if(base::max(n) * 2 + 1 > base::nrow(ts.obj)){ stop("The length of the series is too short to apply the moving average with the given 'n' parameter") } } } } ma_fun <- function(ts.obj, n_left, n_right){ if(stats::is.ts(ts.obj)){ ts_left <- ts_right <- ts_intersect <- ma_order <- NULL if(!base::is.null(n_left)){ for(i in 1:n_left){ ts_left <- stats::ts.intersect(stats::lag(ts.obj, k = -i), ts_left) } ma_order <- n_left } if(!base::is.null(n_right)){ for(i in 1:n_right){ ts_right <- stats::ts.intersect(stats::lag(ts.obj, k = i), ts_right) } if(!base::is.null(n_left)){ ma_order <- ma_order + n_right } else { ma_order <- n_right } } ma_order <- ma_order + 1 ts_intersect <- TSstudio::ts_sum(stats::ts.intersect(ts_left, ts.obj, ts_right)) / (ma_order) } else if(xts::is.xts(ts.obj)){ ts_left <- ts_right <- ts_intersect <- ma_order <- NULL if(!base::is.null(n_left)){ ts_left <- stats::lag(ts.obj, k = c(1:n_left)) ma_order <- n_left } if(!base::is.null(n_right)){ ts_right <- stats::lag(ts.obj, k = c((-1):(-n_right))) if(!base::is.null(n_left)){ ma_order <- ma_order + n_right ts.merged <- xts::merge.xts(ts_left, ts.obj, ts_right) } else { ma_order <- n_right ts.merged <- xts::merge.xts(ts.obj, ts_right) } } else if(!base::is.null(n_left)){ ts.merged <- xts::merge.xts(ts_left, ts.obj) } ma_order <- ma_order + 1 ts.merged$total <- base::rowSums(ts.merged) / (ma_order) ts_intersect <- ts.merged$total }else if(zoo::is.zoo(ts.obj)){ ts_left <- ts_right <- ts_intersect <- ma_order <- NULL if(!base::is.null(n_left)){ ts_left <- stats::lag(ts.obj, k = c((-1):(-n_left))) ma_order <- n_left } if(!base::is.null(n_right)){ ts_right <- stats::lag(ts.obj, k = c(1:n_right)) if(!base::is.null(n_left)){ ma_order <- ma_order + n_right ts.merged <- zoo::merge.zoo(ts_left, ts.obj, ts_right) } else { ma_order <- n_right ts.merged <- zoo::merge.zoo(ts.obj, ts_right) } } else if(!base::is.null(n_left)){ ts.merged <- zoo::merge.zoo(ts_left, ts.obj) } ma_order <- ma_order + 1 ts.merged$total <- base::rowSums(ts.merged) / (ma_order) ts_intersect <- ts.merged$total } return(ts_intersect) } output <- list() titles <- list() if(!base::is.null(n)){ for(i in n){ ts_ma1 <- ma_title <- ma_order <- NULL ma_order <- 2 * i + 1 ts_ma1 <- ma_fun(ts.obj = ts.obj, n_left = i, n_right = i) ma_title <- paste("Two Sided Moving Average - Order", 2 * i + 1, sep = " ") base::eval(base::parse(text = base::paste("output$ma_", i, " <- ts_ma1", sep = ""))) base::eval(base::parse(text = base::paste("titles$ma_", i, " <- ma_title", sep = ""))) if(!base::is.null(double)){ ts_ma_d <- ma_title <- NULL ts_ma_d <- ma_fun(ts.obj = ts_ma1, n_left = double, n_right = double) ma_title <- paste("Double Two Sided Moving Average - Order", 2 * double + 1, "x", ma_order, sep = " ") base::eval(base::parse(text = base::paste("output$double_ma_", 2 * double + 1,"_x_", ma_order, " <- ts_ma_d", sep = ""))) base::eval(base::parse(text = base::paste("titles$double_ma_", 2 * double + 1,"_x_", ma_order, " <- ma_title", sep = ""))) } } } if(!base::is.null(n_left) | !base::is.null(n_right)){ ts_ma2 <- ma_title <- ma_order <- NULL ma_order <- 1 if(!base::is.null(n_right)){ ma_order <- ma_order + n_right } if(!base::is.null(n_left)){ ma_order <- ma_order + n_left } ts_ma2 <- ma_fun(ts.obj = ts.obj, n_left = n_left, n_right = n_right) ma_title <- paste("Two Sided Moving Average - Order", ma_order, sep = " ") base::eval(base::parse(text = base::paste("output$unbalanced_ma_", ma_order, " <- ts_ma2", sep = ""))) base::eval(base::parse(text = base::paste("titles$unbalanced_ma_", ma_order, " <- ma_title", sep = ""))) if(!base::is.null(double)){ ts_ma_d <- ma_title <- NULL ts_ma_d <- ma_fun(ts.obj = ts_ma2, n_left = double, n_right = double) ma_title <- paste("Double Two Sided Moving Average - Order", 2 * double + 1, "x", ma_order, sep = " ") base::eval(base::parse(text = base::paste("output$double_unbalanced_ma_", 2 * double + 1,"_x_", ma_order, " <- ts_ma2", sep = ""))) base::eval(base::parse(text = base::paste("titles$double_unbalanced_ma_", 2 * double + 1,"_x_", ma_order, " <- ma_title", sep = ""))) } } ma_list <- base::names(output)[base::which(base::names(output) != "series")] if(separate & multiple){ plots <- c <- NULL plots <- list() plots[[1]] <- plotly::plot_ly(x = stats::time(ts.obj), y = base::as.numeric(ts.obj), name = obj.name, type = "scatter", mode = "lines", line = list(color = " showlegend = show_legend) %>% plotly::layout(annotations = list(text = obj.name, xref = "paper", yref = "paper", yanchor = "bottom", xanchor = "center", align = "cneter", x = 0.5, y = 1, showarrow = FALSE, font = list(size = 12))) c <- 2 color_ramp <- viridis::inferno(base::length(output), alpha = 1, direction = 1, begin = 0, end = 0.9) for(i in names(output)){ plots[[c]] <- plotly::plot_ly(x = stats::time(output[[i]]), y = base::as.numeric(output[[i]]), type = "scatter", mode = "line", line = list(color = color_ramp[c -1])) %>% plotly::layout(annotations = list(text = titles[[i]], xref = "paper", yref = "paper", yanchor = "bottom", xanchor = "center", align = "cneter", x = 0.5, y = 1, showarrow = FALSE, font = list(size = 12))) c <- c + 1 } plot_rows <- ifelse(length(plots) > 5, base::ceiling(base::length(plots)/2), base::length(plots)) if(show_legend){ output$plot <- plotly::subplot(plots, nrows = plot_rows, margin = margin) } else { output$plot <- plotly::subplot(plots, nrows = plot_rows, margin = margin) %>% plotly::hide_legend() } } else if(!separate & multiple){ plots <- c <- NULL plots <- list() c <- 1 color_ramp <- viridis::inferno(base::length(output), alpha = 1, direction = 1, begin = 0, end = 0.9) for(i in names(output)){ plots[[c]] <- plotly::plot_ly(x = stats::time(ts.obj), y = base::as.numeric(ts.obj), name = obj.name, type = "scatter", mode = "lines", line = list(color = " showlegend = show_legend) %>% plotly::add_lines(x = stats::time(output[[i]]), y = base::as.numeric(output[[i]]), line = list(color = color_ramp[c - 1], dash = "dash") ) %>% plotly::layout(annotations = list(text = titles[[i]], xref = "paper", yref = "paper", yanchor = "bottom", xanchor = "center", align = "cneter", x = 0.5, y = 1, showarrow = FALSE, font = list(size = 12))) c <- c + 1 } plot_rows <- ifelse(length(plots) > 5, base::ceiling(base::length(plots)/2), base::length(plots)) if(show_legend){ output$plot <- plotly::subplot(plots, nrows = plot_rows, margin = margin) } else { output$plot <- plotly::subplot(plots, nrows = plot_rows, margin = margin) %>% plotly::hide_legend() } } else if(separate & !multiple){ p1 <- p2 <- c <- NULL p1 <- plotly::plot_ly(x = stats::time(ts.obj), y = base::as.numeric(ts.obj), name = obj.name, type = "scatter", mode = "lines", line = list(color = " showlegend = TRUE) %>% plotly::layout(annotations = list(text = obj.name, xref = "paper", yref = "paper", yanchor = "bottom", xanchor = "center", align = "cneter", x = 0.5, y = 1, showarrow = FALSE, font = list(size = 12))) c <- 1 color_ramp <- viridis::inferno(base::length(output), alpha = 1, direction = 1, begin = 0, end = 0.9) p2 <- plotly::plot_ly() for(i in names(output)){ p2 <- p2 %>% plotly::add_lines( x = stats::time(output[[i]]), y = base::as.numeric(output[[i]]), line = list(color = color_ramp[c], dash = "dash"), name = titles[[i]] ) c <- c + 1 } p2 <- p2 %>% plotly::layout(annotations = list(text = "Moving Average Output", xref = "paper", yref = "paper", yanchor = "bottom", xanchor = "center", align = "cneter", x = 0.5, y = 1, showarrow = FALSE, font = list(size = 12))) if(show_legend){ output$plot <- plotly::subplot(p1, p2, nrows = 2, margin = margin) } else { output$plot <- plotly::subplot(p1, p2, nrows = 2, margin = margin) %>% plotly::hide_legend() } }else if(!separate & !multiple){ p <- c <- NULL color_ramp <- viridis::inferno(base::length(output), alpha = 1, direction = 1, begin = 0, end = 0.9) p <- plotly::plot_ly(x = stats::time(ts.obj), y = base::as.numeric(ts.obj), name = obj.name, type = "scatter", mode = "lines", line = list(color = " showlegend = TRUE) %>% plotly::layout(annotations = list(text = obj.name, xref = "paper", yref = "paper", yanchor = "bottom", xanchor = "center", align = "cneter", x = 0.5, y = 1, showarrow = FALSE, font = list(size = 12))) c <- 1 for(i in names(output)){ p <- p %>% plotly::add_lines( x = stats::time(output[[i]]), y = base::as.numeric(output[[i]]), line = list(color = color_ramp[c], dash = "dash"), name = titles[[i]] ) c <- c + 1 } p <- p %>% plotly::layout(annotations = list(text = "Moving Average Output", xref = "paper", yref = "paper", yanchor = "bottom", xanchor = "center", align = "cneter", x = 0.5, y = 1, showarrow = FALSE, font = list(size = 12))) if(show_legend){ output$plot <- p } else { output$plot <- p %>% plotly::hide_legend() } } if(plot){ print(output$plot) } output$series <- ts.obj class(output) <- "ts_ma" return(output) } ts_quantile <- function(ts.obj, upper = 0.75, lower = 0.25, period = NULL, n = 1, title = NULL, Xtitle = NULL, Ytitle = NULL){ `%>%` <- magrittr::`%>%` freq <- quantiles <- palette <- obj.name <- NULL category <- name <- maxcolors <- to <- data <- NULL obj.name <- base::deparse(base::substitute(ts.obj)) if(base::is.null(title)){ title <- paste("Quantile Plot -", obj.name, sep = " ") } else if(!base::is.character(title)){ warning("The 'title' object is not character object, using the default option") title <- paste("Quantile Plot -", obj.name, sep = " ") } if(!base::is.null(Xtitle)){ if(!base::is.character(Xtitle)){ warning("The value of the 'Xtitle' is not valid") Xtitle <- "" } } else { Xtitle <- "" } if(!base::is.null(Ytitle)){ if(!base::is.character(Ytitle)){ warning("The value of the 'Ytitle' is not valid") Ytitle <- "" } } else { Ytitle <- "" } if(!base::is.numeric(upper)){ warning("The value of the 'upper' argument is invalid, using the default - 0.75") upper <- 0.75 } else if(upper >1 | upper <= 0){ warning("The value of the 'upper' argument is invalid, using the default - 0.75") upper <- 0.75 } if(!base::is.numeric(lower)){ warning("The value of the 'upper' argument is invalid, using the default - 0.25") lower <- 0.25 } else if(lower >=1 | lower < 0){ warning("The value of the 'lower' argument is invalid, using the default - 0.25") upper <- 0.25 } if(lower >= upper){ stop("The value of the 'lower' argument cannot be greater or equal than the 'upper' argument") } if(!base::is.numeric(n)){ warning("The value of the 'n' argument is invalid (cannot use non numeric and intgeres values as input),", " using the default value - 1") n <- 1 } else if(n%%1 != 0){ warning("The value of the 'n' argument is invalid (cannot use non integer values as input),", " using the default value - 1") n <- 1 } if(n != 1 & base::is.null(period)){ warning("The value of the 'n' argument is invalid (cannot apply more than one row when period is set to NULL),", " using the default value - 1") n <- 1 } quantiles <- c(lower, upper) palette <- base::data.frame(name = row.names(RColorBrewer::brewer.pal.info), RColorBrewer::brewer.pal.info, stringsAsFactors = FALSE) %>% dplyr::filter(category == "seq") %>% dplyr::select(name, n = maxcolors) palette <- palette[c(18, 1, 16, 3, 10, 17, 13, 8, 6, 2, 11, 5, 14, 12, 15, 9, 7, 4), ] if(xts::is.xts(ts.obj) || zoo::is.zoo(ts.obj)){ df <- base::data.frame(date = zoo::index(ts.obj), data = base::as.numeric(ts.obj)) if(xts::periodicity(ts.obj)$scale == "monthly"){ freq <- "monthly" dtick <- 12 } else if(xts::periodicity(ts.obj)$scale == "daily"){ freq <- "daily" if(base::is.null(period)){ } } else if(xts::periodicity(ts.obj)$scale == "hourly" && xts::periodicity(ts.obj)$frequency == 3600){ freq <- "hourly" } else if(xts::periodicity(ts.obj)$scale == "minute" && xts::periodicity(ts.obj)$frequency == 30){ freq <- "half-hour" } else{ stop("The frequency of the input object is invalid, the function support only 'daily', 'hourly' or 'half-hour'") } } else if(base::is.data.frame(ts.obj) | dplyr::is.tbl(ts.obj) | data.table::is.data.table(ts.obj)){ ts.obj <- base::as.data.frame(ts.obj) col_class <- base::lapply(ts.obj, class) col_POSIXt <- base::lapply(ts.obj, lubridate::is.POSIXt) col_date <- base::lapply(ts.obj, lubridate::is.Date) numeric_col <- base::which(col_class == "numeric" | col_class == "integer") if(base::any(col_date == TRUE) & base::any(col_POSIXt == TRUE)){ d <- t <- NULL d <- base::min(base::which(col_date == TRUE)) t <- base::min(base::which(col_POSIXt == TRUE)) if(d > t){ warning("The data frame contains multiple date or time objects,", "using the first one as the series index") date_col <- t } else { warning("The data frame contains multiple date or time objects,", "using the first one as the plot index") date_col <- d } } else if(base::any(col_date == TRUE) | base::any(col_POSIXt == TRUE)){ if(base::any(col_date == TRUE)){ if(base::length(base::which(col_date == TRUE)) > 1){ date_col <- base::min(base::which(col_date == TRUE)) warning("There are multipe 'date' objects in the data frame,", "using the first one object as the plot index") } else { date_col <- base::min(base::which(col_date == TRUE)) } } else if(base::any(col_POSIXt == TRUE)){ if(base::length(base::which(col_POSIXt == TRUE)) > 1){ date_col <- base::min(base::which(col_POSIXt == TRUE)) warning("There are multipe 'POSIXt' objects in the data frame,", "using the first one as the plot index") } else { date_col <- base::min(base::which(col_POSIXt == TRUE)) } } }else { stop("No 'Date' or 'POSIXt' object available in the data frame,", "please check if the data format defined properly") } numeric_col <- base::which(col_class == "numeric" | col_class == "integer") if(base::length(numeric_col) == 0){ stop("None of the data frame columns is numeric,", "please check if the data format is defined properly") } df <- NULL if(length(numeric_col) == 1){ df<- base::data.frame(date = ts.obj[, date_col], data = ts.obj[, numeric_col]) } else { warning("The input object is a multiple time series object, by defualt will use only the first series as an input") df <- base::data.frame(date = ts.obj[, date_col], data = ts.obj[, numeric_col[1]]) } date_diff <- NULL date_diff <- base::diff(base::as.numeric(df$date)) if(base::min(date_diff) == base::max(date_diff) & base::mean(date_diff) == 1){ freq <- "daily" } else if(base::min(date_diff) == base::max(date_diff) & base::mean(date_diff) == 3600){ freq <- "hourly" } else if(base::min(date_diff) == base::max(date_diff) & base::mean(date_diff) == 1800){ freq <- "half-hour" } else { stop("The frequency of the input object is invalid, the function support only 'daily', 'hourly' or 'half-hour'") } } else { stop("The input value is invalid, the function support only 'xts', 'zoo', 'data.frame', 'data.table' or 'tbl' objects") } if(!base::is.null(period)){ if(freq == "daily" && period == "weekdays"){ warning("The value of the period argument is invalid, cannot apply a 'weekdays' subset with daily frequency. Using the default value - NULL") period <- NULL } } if(freq == "quarterly"){ df$to <- lubridate::quarter(df$date) df$to_num <- lubridate::quarter(df$date) dtick <- 1 }else if(freq == "monthly"){ df$to <- lubridate::month(df$date, label = TRUE) df$to_num <- lubridate::month(df$date) dtick <- 1 } else if(freq == "daily"){ df$to <- lubridate::wday(df$date, label = TRUE) df$to_num <- lubridate::wday(df$date) dtick <- 1 } else if(freq == "hourly"){ df$to <- lubridate::hour(df$date) df$to_num <- lubridate::hour(df$date) dtick <- 4 } else if(freq == "half-hour"){ df$to <- lubridate::hour(df$date) + lubridate::minute(df$date) / 60 df$to_num <- lubridate::hour(df$date) + lubridate::minute(df$date) / 60 dtick <- 4 } if(base::is.null(period)){ df$period <- "Total" df$period_num <- 1 if(n != 1){ warning("The value of the 'n' argument is invalid, setting it to 1") n <- 1 } } else if(period == "weekdays"){ df$period <- lubridate::wday(df$date, label = TRUE) df$period_num <- lubridate::wday(df$date) } else if(period == "monthly"){ df$period <- lubridate::month(df$date, label = TRUE) df$period_num <- lubridate::month(df$date) } else if(period == "quarterly"){ df$period <- base::factor(base::paste("Qr.", lubridate::quarter(df$date), sep = "")) df$period_num <- lubridate::quarter(df$date) } else if(period == "yearly"){ df$period <- lubridate::year(df$date) df$period_num <- lubridate::year(df$date) - min(lubridate::year(df$date)) + 1 } min_q <- max_q <- NULL plot <- base::lapply(unique(df$period), function(x){ plot_range <- c(base::min(df$data), base::max(df$data)) colors_set <- df1 <- p <- NULL df1 <- df %>% dplyr::filter(period == x) m <- base::unique(df1$period_num) df1 <- df1 %>% dplyr::group_by(to) %>% dplyr::summarise(mean = base::mean(data, na.rm = TRUE), median = stats::median(data, na.rm = TRUE), upper = stats::quantile(data, probs = quantiles[2], na.rm = TRUE), lower = stats::quantile(data, probs = quantiles[1], na.rm = TRUE)) min_q <- base::min(df1$lower) max_q <- base::max(df1$upper) colors_set <- RColorBrewer::brewer.pal(palette$n[m], palette$name[m]) p <- plotly::plot_ly(data = df1) %>% plotly::add_ribbons(data = df1, x = ~ to, ymin = ~ lower, ymax = ~ upper, line = list(color = colors_set[4]), fillcolor = colors_set[3], showlegend = F, name = "Quantiles") %>% plotly::add_lines(x = ~ to, y = ~ median, line = list(color = colors_set[9]), name = x) %>% plotly::layout(xaxis = list(dtick = dtick), annotations = list(text = x, showarrow = FALSE, xref = "paper", yref = "paper", yanchor = "bottom", xanchor = "center", align = "center", x = 0.1, y = 0) ) output <- base::list() output$plot <- p output$min <- min_q output$max <- max_q return(output) }) min_q <- max_q <- NULL for(i in 1:base::length(plot)){ if(i == 1){ min_q <- plot[[i]]$min max_q <- plot[[i]]$max } else{ if(min_q > plot[[i]]$min){ min_q <- plot[[i]]$min } if(max_q < plot[[i]]$max){ max_q <- plot[[i]]$max } } } p <- NULL p <- base::lapply(1:base::length(plot), function(x){ plot[[x]]$plot %>% plotly::layout(yaxis = list(range = c(min_q, max_q))) }) output <- plotly::subplot(p, nrows = n, shareY = T, shareX = T, titleX = F, titleY = F) %>% plotly::layout(title = title, xaxis = list(title = Xtitle), yaxis = list(title = Ytitle)) return(output) }
opt_fisher <- function(obj) { obj$est$NR_iter <- 1 obj$est$NR_status <- FALSE obj$est$max_iter_flag <- FALSE obj$est$step_err <- 1 obj$est$NR_step_len_ <- obj$est$NR_step_len if (obj$verbose > 1) { cat("\n") } if (any(is.nan(ergm.eta(obj$est$theta, obj$net$etamap)))) { obj$est$ML_status_fail <- TRUE } while (!obj$est$NR_status & (obj$est$NR_iter <= obj$est$NR_max_iter) & !obj$est$ML_status_fail) { if (obj$verbose > 1) { cat("\n Computing step.") } weights <- comp_weights(obj) if (weights == "broken") { obj$est$ML_status_fail <- TRUE } if (!obj$est$par_flag & (obj$est$ML_status_fail == FALSE)) { info_mat <- comp_info_mat(obj, weights) } else if (obj$est$par_flag & (obj$est$ML_status_fail == FALSE)) { info_mat <- comp_info_mat_par(obj, weights) } if (obj$est$ML_status_fail == FALSE) { obj$est$info_mat <- info_mat } if (obj$est$ML_status_fail == FALSE) { step_list <- comp_step(obj, weights, info_mat) step <- step_list$step if (!is.null(step_list$ML_status_fail)) { obj$est$ML_status_fail <- step_list$ML_status_fail } if (obj$est$adaptive_step_len) { obj$est$NR_step_len_ <- 1 / (1 + sum(abs(step)^2)) } if (any(is.nan(ergm.eta(obj$est$theta + step * obj$est$NR_step_len_, obj$net$etamap)))) { obj$est$ML_status_fail <- TRUE } } if (!obj$est$ML_status_fail) { if (obj$verbose > 1) { val <- sum(abs(step)) print_1 <- val print_1[val < 0.0001] <- "<.0001" print_1[val >= 0.0001] <- formatC(val[val >= 0.0001], digits = 4, format = "f") cat(paste0("\n L1-norm of increment of parameter vector: ", print_1)) } obj$est$score_val <- step_list$score_val if (obj$est$NR_iter > 1) { if (norm(obj$est$score_val, type = "2") < norm(obj$est$score_min, type = "2")) { obj$est$theta_min <- obj$est$theta obj$est$score_min <- obj$est$score_val } } else { obj$est$theta_min <- obj$est$theta obj$est$score_min <- obj$est$score_val } obj$est$theta <- obj$est$theta + step * obj$est$NR_step_len_ if (any(is.nan(ergm.eta(obj$est$theta, obj$net$etamap)))) { obj$est$ML_status_fail <- TRUE } if (obj$verbose > 1) { cat(paste0("\n Iteration: ", obj$est$NR_iter, ", ", obj$net$theta_names, " = ", round(obj$est$theta, digits = 4))) cat("\n") } if (!obj$est$ML_status_fail) { obj$est$NR_iter <- obj$est$NR_iter + 1 obj <- check_convergence(obj, step) } } else { if (obj$est$step_err < 3 & !obj$est$adaptive_step_len) { if (obj$verbose > 1) { cat("\n Optimization did not converge. Decreasing step length and restarting.\n") } obj$est$step_err <- obj$est$step_err + 1 obj$est$NR_step_len_ <- obj$est$NR_step_len_ * obj$est$NR_step_len_multiplier obj$est$theta <- obj$est$theta_0 obj$est$NR_iter <- 1 obj$est$ML_status_fail <- FALSE } else { cat("\n Optimization failed to converge.") cat("\n Proposed model may be near-degenerate or the MCMLE may be unstable or not exist.") cat("\n Decreasing the step length (argument 'NR_step_len' in set_options())") cat(" or increasing the MCMC sample-size may help.") } } } if (obj$est$NR_iter >= obj$est$NR_max_iter) { if (obj$verbose > 0) { cat("\n NOTE: Optimization reached maximum number of allowed iterations.") cat(paste0("\n\n - Minimum theta found:")) cat(paste0("\n ", obj$net$theta_names, " = ", round(obj$est$theta_min, digits = 4))) cat("\n\n") cat(" - L2-norm of score at this theta: ") cat(paste(round(sum(abs(obj$est$score_min^2)), digits = 4))) } obj$est$max_iter_flag <- TRUE } obj$est$theta_0 <- obj$est$theta obj$est$NR_conv_thresh <- sqrt(sum(step^2)) return(obj) } comp_weights <- function(obj) { if (obj$net$na_flag) { weights <- list(weight_full = rep(list(numeric(obj$sim$num_obs)), obj$net$num_clust), weight_cond = rep(list(numeric(obj$sim$num_obs)), obj$net$num_clust)) } else { weights <- list(weight_full = rep(list(numeric(obj$sim$num_obs)), obj$net$num_clust)) } theta_diff <- ergm.eta(obj$est$theta, obj$net$etamap) - ergm.eta(obj$est$theta_0, obj$net$etamap) for (i in 1:obj$net$num_clust) { adjust_flag <- FALSE if (any(is.na(obj$sim$stats[[i]] %*% theta_diff))) { return("broken") } norm_const_full <- sum(exp_fun(obj$sim$stats[[i]] %*% theta_diff)) if (norm_const_full == Inf) { norm_const_full <- .Machine$double.xmax adjust_flag <- TRUE } if (norm_const_full == 0) { norm_const_full <- .Machine$double.xmin adjust_flag <- TRUE } weights$weight_full[[i]] <- exp_fun(obj$sim$stats[[i]] %*% theta_diff) / norm_const_full if (adjust_flag) { weights$weight_full[[i]] <- weights$weight_full[[i]] / sum(weights$weight_full[[i]]) } if (obj$net$na_flag) { if (any(is.na(obj$sim$cond_stats[[i]] %*% theta_diff))) { return("broken") } norm_const_cond <- sum(exp_fun(obj$sim$cond_stats[[i]] %*% theta_diff)) weights$weight_cond[[i]] <- exp_fun(obj$sim$cond_stats[[i]] %*% theta_diff) / norm_const_cond } } return(weights) } comp_info_mat <- function(obj, weights) { info_mat <- rep(list(NULL), obj$net$num_clust) theta_grad_val <- t(ergm.etagrad(obj$est$theta, obj$net$etamap)) for (i in 1:obj$net$num_clust) { term_1_full <- numeric(obj$net$num_terms) term_2_full <- numeric(obj$net$num_terms) if (obj$net$na_flag) { term_1_cond <- numeric(obj$net$num_terms) term_2_cond <- numeric(obj$net$num_terms) } list_ <- as.list(data.frame(t(obj$sim$stats[[i]]))) outers_ <- lapply(list_, outer_fun) term_1_full <- Reduce("+", Map("*", outers_, weights$weight_full[[i]])) term_2_full <- as.vector(t(obj$sim$stats[[i]]) %*% weights$weight_full[[i]]) J_full <- t(theta_grad_val) %*% (term_1_full - outer(term_2_full, term_2_full)) %*% theta_grad_val if (obj$net$na_flag) { list_ <- as.list(data.frame(t(obj$sim$cond_stats[[i]]))) outers_ <- lapply(list_, outer_fun) term_1_cond <- Reduce("+", Map("*", outers_, weights$weight_cond[[i]])) term_2_cond <- as.vector(t(obj$sim$cond_stats[[i]]) %*% weights$weight_cond[[i]]) J_cond <- t(theta_grad_val) %*% (term_1_cond - outer(term_2_cond, term_2_cond)) %*% theta_grad_val } if (obj$net$na_flag) { info_mat[[i]] <- J_full - J_cond } else { info_mat[[i]] <- J_full } } return(Reduce("+", info_mat)) } comp_info_mat_par <- function(obj, weights) { theta_grad_val <- t(ergm.etagrad(obj$est$theta, obj$net$etamap)) par_fun <- function(i, obj, weights, theta_grad_val, job_split) { split_ <- job_split[[i]] info_mat_list <- rep(list(NULL), length(split_)) for (ll in 1:length(split_)) { cur_ind <- split_[ll] term_1_full <- numeric(obj$net$num_terms) term_2_full <- numeric(obj$net$num_terms) if (obj$net$na_flag) { term_1_cond <- numeric(obj$net$num_terms) term_2_cond <- numeric(obj$net$num_terms) } list_ <- as.list(data.frame(t(obj$sim$stats[[cur_ind]]))) outers_ <- lapply(list_, outer_fun) term_1_full <- Reduce("+", Map("*", outers_, weights$weight_full[[cur_ind]])) term_2_full <- as.vector(t(obj$sim$stats[[cur_ind]]) %*% weights$weight_full[[cur_ind]]) J_full <- t(theta_grad_val) %*% (term_1_full - outer(term_2_full, term_2_full)) %*% theta_grad_val if (obj$net$na_flag) { list_ <- as.list(data.frame(t(obj$sim$cond_stats[[cur_ind]]))) outers_ <- lapply(list_, outer_fun) term_1_cond <- Reduce("+", Map("*", outers_, weights$weight_cond[[cur_ind]])) term_2_cond <- as.vector(t(obj$sim$cond_stats[[cur_ind]]) %*% weights$weight_cond[[cur_ind]]) J_cond <- t(theta_grad_val) %*% (term_1_cond - outer(term_2_cond, term_2_cond)) %*% theta_grad_val } if (obj$net$na_flag) { info_mat <- J_full - J_cond } else { info_mat <- J_full } info_mat_list[[ll]] <- info_mat } return(info_mat_list) } if (obj$est$par_n_cores > obj$net$num_clust) { split_num <- obj$net$num_clust } else if (obj$net$num_clust >= obj$est$par_n_cores) { split_num <- obj$est$par_n_cores } split_ <- msplit(1:obj$net$num_clust, 1:split_num) if (.Platform$OS.type == "windows") { cl <- makeCluster(split_num) clusterEvalQ(cl, library(mlergm)) info_mats <- clusterApply(cl, 1:length(split_), par_fun, obj = obj, weights = weights, theta_grad_val = theta_grad_val, job_split = split_) stopCluster(cl) } else { info_mats <- mclapply(1:length(split_), par_fun, obj, weights, theta_grad_val, split_, mc.cores = split_num) } return(Reduce("+", unlist(info_mats, recursive = FALSE))) } comp_step <- function(obj, weights, info_mat) { if (is.nan(norm(info_mat))) { ML_status_fail <- TRUE cat("\n Information matrix is near-singular.") } else { ML_status_fail <- FALSE } if (!ML_status_fail) { info_mat_inv <- tryCatch({ solve(info_mat) }, error = function(e) { return("error") }) if (is.character(info_mat_inv)) { ML_status_fail <- TRUE cat("\n Information matrix is near-singular.") } } if (!ML_status_fail) { theta_grad_val <- t(ergm.etagrad(obj$est$theta, obj$net$etamap)) exp_approx <- as.vector(Reduce("+", Map("%*%", lapply(weights$weight_full, t), obj$sim$stats))) if (obj$net$na_flag) { obs_approx <- as.vector(Reduce("+", Map("%*%", lapply(weights$weight_cond, t), obj$sim$cond_stats))) } else { obs_approx <- obj$net$obs_stats_step } if ((norm(theta_grad_val) == Inf) | is.nan(norm(theta_grad_val))) { ML_status_fail <- TRUE cat("\n Gradient is not finite. Stopping optimization.") } if (!ML_status_fail) { score_val <- t(theta_grad_val) %*% (obs_approx - exp_approx) step <- info_mat_inv %*% score_val } } if (ML_status_fail) { step <- NA score_val <- NA } return(list(step = step, score_val = score_val, ML_status_fail = ML_status_fail)) } check_convergence <- function(obj, step) { step_norm <- sqrt(sum(step^2)) if (!(step_norm == Inf)) { conv_check <- sum(abs(step) > obj$est$adj_NR_tol) == 0 if (conv_check) { obj$est$NR_status <- TRUE } } else { obj$est$step_err <- obj$est$step_err + 1 if (obj$est$step_err < 3) { obj$est$NR_step_len_ <- obj$est$NR_step_len_ * obj$est$NR_step_len_multiplier obj$est$NR_iter <- 1 } else { obj$est$ML_status_fail <- TRUE } } return(obj) }
setMethod( f = "getCellSize", signature = signature( object = "CompactStratification" ), definition = function(object) { spatialPixelsDataFrame <- suppressWarnings(as(object, "SpatialPixelsDataFrame")) cellSize <- getCellSize(spatialPixelsDataFrame) cellSize } )
FC_format <- function(log.base.data = 2, log.base.labels = 10, digits = 3, ...) { function(x) FC_plain(x, log.base.data = log.base.data, log.base.labels = log.base.labels, digits = digits, ...) } FC_plain <- function(x, log.base.data = 2, log.base.labels = 10, digits = 3, ...) { if (log.base.data != log.base.labels) { if (log.base.data %in% c(2, exp(1), 10)) { x <- log.base.data^x } else if (log.base.data) { stop("Unsuported logarithm base ", log.base.data) } if (log.base.labels %in% c(2, exp(1), 10)) { x <- log(x, base = log.base.labels) } else if (log.base.labels) { stop("Unsuported logarithm base ", log.base.labels) } } if (log.base.labels) { x <- signif(x, max(digits, 1)) format(x, trim = TRUE, scientific = FALSE, ...) } else { as.character( ifelse(x < 1, paste("1/", signif(1 / x, max(digits, 1)), sep = ""), signif(x, max(digits, 1))) ) } } FC_name <- function(name = "Abundance%unit", log.base = FALSE, format = getOption("photobiology.math", default = "R.expression")) { if (!grepl("%unit$", name)) { return(name) } else { name <- sub("%unit$", "", name) } log.base <- as.integer(log.base) stopifnot(log.base %in% c(0L, 2L, 10L)) if (tolower(format) == "latex") { if (!log.base) { paste(name, " (fold change)", sep = "") } else { paste(name, " ($log_", log.base, "$ fold change)", sep = "") } } else if (format %in% c("R.expression")) { if (!log.base) { bquote(plain(.(name))~~(plain("fold change"))) } else { bquote(plain(.(name))~~(log[.(log.base)]~~plain("fold change"))) } } else if (format == "R.character") { if (!log.base) { paste(name, "(fold change)", sep = "") } else { paste(name, "(log", log.base, " fold change)", sep = "") } } else { warning("'format = ", format, "' not implemented for fold change") } } symmetric_limits <- function(x) { max <- max(abs(x)) c(-max, max) } scale_x_logFC <- function(name = "Abundance of x%unit", breaks = NULL, labels = NULL, limits = symmetric_limits, oob = scales::squish, expand = expansion(mult = 0.15, add = 0), log.base.labels = FALSE, log.base.data = 2L, ...) { if (is.null(breaks)) { if (!log.base.labels) { breaks <- 10^seq(from = -4, to = 4, by = 1) } else if (log.base.labels == 2L) { breaks <- 2^seq(from = -10, to = 10, by = 3) } else if (log.base.labels == 10L) { breaks <- 10^seq(from = -4, to = 4, by = 1) } if (log.base.data) { breaks <- log(breaks, base = log.base.data) } } if (is.null(labels)) { labels = FC_format(log.base.labels = log.base.labels, log.base.data = log.base.data) } ggplot2::scale_x_continuous(name = FC_name(name = name, log.base = log.base.labels), breaks = breaks, labels = labels, oob = oob, expand = expand, limits = limits, ...) } scale_y_logFC <- function(name = "Abundance of y%unit", breaks = NULL, labels = NULL, limits = symmetric_limits, oob = scales::squish, expand = expansion(mult = 0.15, add = 0), log.base.labels = FALSE, log.base.data = 2L, ...) { if (is.null(breaks)) { if (!log.base.labels) { breaks <- 10^seq(from = -4, to = 4, by = 1) } else if (log.base.labels == 2L) { breaks <- 2^seq(from = -10, to = 10, by = 3) } else if (log.base.labels == 10L) { breaks <- 10^seq(from = -4, to = 4, by = 1) } if (log.base.data) { breaks <- log(breaks, base = log.base.data) } } if (is.null(labels)) { labels = FC_format(log.base.labels = log.base.labels, log.base.data = log.base.data) } ggplot2::scale_y_continuous(name = FC_name(name = name, log.base = log.base.labels), breaks = breaks, labels = labels, oob = oob, expand = expand, limits = limits, ...) }
lod_cca <- function(formula, df, type) { if (class(formula) != "formula") stop("formula must be a formula") if (!is.data.frame(df)) stop("df must be a data.frame") type <- deparse(substitute(type)) type <- match.arg(type, c("linear", "logistic")) df <- stats::na.omit(df) mod.fr <- stats::model.frame(formula, data = df) if (type == "linear") model <- stats::lm(mod.fr) else model <- stats::glm(mod.fr, family = stats::binomial()) list(model = model, formula = formula, data = df) } lod_root2 <- function(formula, df, lod, type) { if (class(formula) != "formula") stop("formula must be a formula") if (!is.data.frame(df)) stop("df must be a data.frame") transform.init <- rlang::parse_expr(labels(stats::terms(formula))[1]) exposure <- all.vars(transform.init) if (length(exposure) > 1) stop("Complicated transformation on exposure. See help for fix.") assign(exposure, quote(x)) transform <- eval(substitute(substitute(transform.init))) t.function <- function(x) x body(t.function) <- transform environment(t.function) <- rlang::caller_env() assign("x", substitute(lod)) transform.lod <- deparse(eval(substitute(substitute(transform)))) lod <- deparse(substitute(lod)) if (is.null(df[[lod]])) stop(paste(lod, "not in data.")) type <- deparse(substitute(type)) type <- match.arg(type, c("linear", "logistic")) tmp <- df[[exposure]] for (i in 1:nrow(df)) tmp[i] <- ifelse(is.na(tmp[i]), df[[lod]][i] / sqrt(2), tmp[i]) df[[exposure]] <- tmp df[[transform.lod]] <- t.function(df[[lod]]) df[[deparse(transform.init)]] <- t.function(df[[exposure]]) mod.fr <- stats::model.frame(formula, data = df) if (type == "lm") model <- stats::lm(mod.fr) else model <- stats::glm(mod.fr, family = stats::binomial()) list(model = model, formula = formula, data = df, t.function = t.function) }
log.lik.data.z.p<-function(ntrial.temp,nsucc.temp,z.temp,p.temp) sum(dbinom(nsucc.temp,ntrial.temp,z.temp*p.temp,log=TRUE),na.rm=TRUE)
prop_correct_sentence <- function(data, responses, key, key.trial, id, id.trial, cutoff = 0, flag = FALSE, group.by = NULL, token.split = " "){ DF <- as.data.frame(data) colnames(DF)[grepl(responses, colnames(DF))] <- "Responses" colnames(DF)[grepl(id, colnames(DF))] <- "Sub.ID" colnames(DF)[grepl(id.trial, colnames(DF))] <- "Trial.ID" if (length(key) > 1){ answer_key <- data.frame("Answer" = key, "Trial.ID" = key.trial) } else { answer_key <- data.frame("Answer" = data[ , key], "Trial.ID" = data[ , key.trial]) } answer_key <- unique(answer_key) dups <- duplicated(answer_key$Trial.ID) if(sum(dups) > 0){ stop("You have duplicate trial ids for your answer key. Please check your data.") } DF <- merge(DF, answer_key, by = "Trial.ID") DF$Answer <- tolower(DF$Answer) DF$Responses <- tolower(DF$Responses) DF$Answer <- gsub("[[:punct:]]", "", DF$Answer) DF$Responses <- gsub("[[:punct:]]", "", DF$Responses) DF$Answer <- gsub("\\s+", " ", DF$Answer) DF$Answer <- trimws(DF$Answer) DF$Responses <- gsub("\\s+", " ", DF$Responses) DF$Responses <- trimws(DF$Responses) DF$Proportion.Match <- NA DF$Shared.Items <- NA DF$Corrected.Items <- NA DF$Omitted.Items <- NA DF$Extra.Items <- NA for (i in 1:nrow(DF)){ answer.tokens <- unlist(strsplit(DF$Answer[i], split = token.split)) response.tokens <- unlist(strsplit(DF$Responses[i], split = token.split)) shared <- c() omitted <- c() extras <- c() corrected <- c() shared <- intersect(answer.tokens, response.tokens) omitted <- setdiff(answer.tokens, response.tokens) extras <- setdiff(response.tokens, answer.tokens) omitted_final <- omitted extras_final <- extras if (length(extras) > 0 & length(omitted) > 0){ for (j in 1:length(extras)){ lev_score <- adist(extras[j], omitted) names(lev_score) <- omitted if(min(lev_score) <= cutoff) { corrected <- c(corrected, extras[j]) extras_final <- extras_final[!grepl(extras[j], extras_final)] omitted_final <- omitted_final[!grepl(attr(which.min(lev_score), "names"), omitted_final)] } } } if(length(shared) > 0){ DF$Shared.Items[i] <- paste(shared, collapse = " ") } else { DF$Shared.Items[i] <- NA } if(length(omitted_final) > 0){ DF$Omitted.Items[i] <- paste(omitted_final, collapse = " ") } else { DF$Omitted.Items[i] <- NA } if(length(corrected) > 0){ DF$Corrected.Items[i] <- paste(corrected, collapse = " ") } else { DF$Corrected.Items[i] <- NA } if(length(extras_final) > 0){ DF$Extra.Items[i] <- paste(extras_final, collapse = " ") } else { DF$Extra.Items[i] <- NA } DF$Proportion.Match[i] <- (length(shared) + length(corrected)) / length(answer.tokens) } k <- tapply(DF$Trial.ID, DF$Sub.ID, length) if(min(k) != max(k)){ warning("The number of trials is not the same for every participant. This summary represents an average of the avaliable trials for each participant.") } if (!is.null(group.by)){ DF_participant <- aggregate(DF$Proportion.Match, by = DF[ , c(group.by, "Sub.ID")], mean) colnames(DF_participant) <- c(group.by, "Sub.ID", "Proportion.Correct") } else { DF_participant <- aggregate(DF$Proportion.Match, list(DF$Sub.ID), mean) colnames(DF_participant) <- c("Sub.ID", "Proportion.Correct") } other.columns <- setdiff(colnames(DF), c("Responses", "Sub.ID", "Answer", "Scored", colnames(DF_participant))) for (col in other.columns){ DF_temp <- unique(DF[ , c("Sub.ID", col)]) if (sum(duplicated(DF_temp$Sub.ID)) == 0){ DF_participant <- merge(DF_participant, DF_temp, by = "Sub.ID") } } if (flag) { if (!is.null(group.by)){ DF_participant$Z.Score.Group <- ave(DF_participant$Proportion.Correct, DF_participant[ , group.by], FUN = scale) } DF_participant$Z.Score.Participant <- scale(DF_participant$Proportion.Correct) } if (!is.null(group.by)){ DF_group_person <- aggregate(DF$Proportion.Match, by = DF[ , c(group.by, "Sub.ID")], mean) colnames(DF_group_person) <- c(group.by,"Sub.ID", "Mean") if (length(group.by) > 1){ DF_group <- aggregate(DF_group_person$Mean, by = DF_group_person[ , group.by], mean) DF_group$SD <- aggregate(DF_group_person$Mean, by = DF_group_person[ , group.by], sd)$x DF_group$N <- aggregate(DF_group_person$Mean, by = DF_group_person[ , group.by], length)$x } else { DF_group <- aggregate(DF_group_person$Mean, by = list(DF_group_person[ , group.by]), mean) DF_group$SD <- aggregate(DF_group_person$Mean, by = list(DF_group_person[ , group.by]), sd)$x DF_group$N <- aggregate(DF_group_person$Mean, by = list(DF_group_person[ , group.by]), length)$x } colnames(DF_group) <- c(group.by, "Mean", "SD", "N") return(list(DF_Scored = DF, DF_Participant = DF_participant, DF_Group = DF_group)) } else { return(list(DF_Scored = DF, DF_Participant = DF_participant)) } }
context("test-getdata-method") library(lme4) my_data="my data" test_that("glm with data in x$data", { x<-list(data=my_data) class(x)<-"glm" expect_equal(getData(x),my_data) }) test_that("glm with data in x$model", { x<-list(model=my_data) class(x)<-"glm" expect_equal(getData(x),my_data) }) test_that("lm with data extrated from call",{ x1<-rnorm(10) x2<-rnorm(10) y<-x1+x2+rnorm(10) .dff<<-data.frame(xa=x1,xb=x2,yy=y) lm.1<-lm(yy~xa+xb, data=.dff) expect_equal(getData(lm.1),.dff) .dff<<-NULL }) test_that("lmer with data extrated from call",{ x1<-rnorm(10) x2<-rnorm(10) g<-gl(5,2) y<-x1+x2+rnorm(10) dff<-data.frame(xa=x1,xb=x2,yy=y,g=g) lmer.1<-lmer(yy~xa+xb+(1|g), data=dff) expect_equal(getData(lmer.1)[,colnames(dff)],dff) }) test_that("lm with data extrated from model",{ x<-list(model=my_data) class(x)<-"lm" expect_equal(getData(x),my_data) }) test_that("lm without data should raise error",{ x<-list() class(x)<-"lm" expect_error(getData(x),"Can't get data") })
library(testthat) library(rly) context("A grammar with an unused rule") Parser1 <- R6::R6Class("Parser1", public = list( tokens = c('NAME','NUMBER', 'PLUS','MINUS','TIMES','DIVIDE','EQUALS', 'LPAREN','RPAREN'), precedence = list(c('left','PLUS','MINUS'), c('left','TIMES','DIVIDE'), c('right','UMINUS')), names = new.env(hash=TRUE), p_statement_assign = function(doc='statement : NAME EQUALS expression', p) { self$names[[as.character(p$get(2))]] <- p$get(4) }, p_statement_expr = function(doc='statement : expression', p) { cat(p$get(2)) cat('\n') }, p_expression_binop = function(doc='expression : expression PLUS expression | expression MINUS expression | expression TIMES expression | expression DIVIDE expression', p) { if(p$get(3) == 'PLUS') p$set(1, p$get(2) + p$get(4)) else if(p$get(3) == 'MINUS') p$set(1, p$get(2) - p$get(4)) else if(p$get(3) == 'TIMES') p$set(1, p$get(2) * p$get(4)) else if(p$get(3) == 'DIVIDE') p$set(1, p$get(2) / p$get(4)) }, p_expression_uminus = function(doc='expression : MINUS expression %prec UMINUS', p) { p$set(1, -p$get(3)) }, p_expression_group = function(doc='expression : LPAREN expression RPAREN', p) { p$set(1, p$get(3)) }, p_expression_number = function(doc='expression : NUMBER', p) { p$set(1, p$get(2)) }, p_expression_name = function(doc='expression : NAME', p) { p$set(1, self$names[[as.character(p$get(2))]]) }, p_expression_list = function(doc='exprlist : exprlist COMMA expression', p) { }, p_error = function(p) { cat(sprintf("Syntax error at '%s'", p$value)) } ) ) Parser2 <- R6::R6Class("Parser2", public = list( tokens = c('NAME','NUMBER', 'PLUS','MINUS','TIMES','DIVIDE','EQUALS', 'LPAREN','RPAREN'), precedence = list(c('left','PLUS','MINUS'), c('left','TIMES','DIVIDE'), c('right','UMINUS')), names = new.env(hash=TRUE), p_statement_assign = function(doc='statement : NAME EQUALS expression', p) { self$names[[as.character(p$get(2))]] <- p$get(4) }, p_statement_expr = function(doc='statement : expression', p) { cat(p$get(2)) cat('\n') }, p_expression_binop = function(doc='expression : expression PLUS expression | expression MINUS expression | expression TIMES expression | expression DIVIDE expression', p) { if(p$get(3) == 'PLUS') p$set(1, p$get(2) + p$get(4)) else if(p$get(3) == 'MINUS') p$set(1, p$get(2) - p$get(4)) else if(p$get(3) == 'TIMES') p$set(1, p$get(2) * p$get(4)) else if(p$get(3) == 'DIVIDE') p$set(1, p$get(2) / p$get(4)) }, p_expression_uminus = function(doc='expression : MINUS expression %prec UMINUS', p) { p$set(1, -p$get(3)) }, p_expression_group = function(doc='expression : LPAREN expression RPAREN', p) { p$set(1, p$get(3)) }, p_expression_number = function(doc='expression : NUMBER', p) { p$set(1, p$get(2)) }, p_expression_name = function(doc='expression : NAME', p) { p$set(1, self$names[[as.character(p$get(2))]]) }, p_expression_list_2 = function(doc='exprlist : expression', p) { }, p_error = function(p) { cat(sprintf("Syntax error at '%s'", p$value)) } ) ) test_that("grammar", { expect_output(expect_error(rly::yacc(Parser1), "Unable to build parser"), "ERROR .* Symbol COMMA used, but not defined as a token or a rule WARN .* Symbol COMMA is unreachable WARN .* Symbol exprlist is unreachable ERROR .* Infinite recursion detected for symbol exprlist") expect_output(rly::yacc(Parser2), "WARN .* Rule exprlist defined, but not used WARN .* There is 1 unused rule WARN .* Symbol exprlist is unreachable") })
context("testing graph partitioning methods") dat <- data.frame( id = 1:48, mom = c(NA, NA, 2L, 2L, 2L, NA, NA, 7L, 7L, 7L, 3L, 3L, 3L, 3L, NA, 15L, 15L, 43L, 18L, NA, NA, 21L, 21L, 9L, 9L, 9L, 9L, NA, NA, 29L, 29L, 29L, 30L, 30L, NA, NA, 36L, 36L, 36L, 38L, 38L, NA, NA, 43L, 43L, 43L, 32L, 32L), dad = c(NA, NA, 1L, 1L, 1L, NA, NA, 6L, 6L, 6L, 8L, 8L, 8L, 8L, NA, 4L, 4L, 42L, 5L, NA, NA, 20L, 20L, 22L, 22L, 22L, 22L, NA, NA, 28L, 28L, 28L, 23L, 23L, NA, NA, 35L, 35L, 35L, 31L, 31L, NA, NA, 42L, 42L, 42L, 45L, 45L), sex = c(1L, 2L, 2L, 1L, 1L, 1L, 2L, 1L, 2L, 2L, 2L, 1L, 1L, 1L, 2L, 2L, 2L, 2L, 1L, 1L, 2L, 1L, 1L, 2L, 1L, 1L, 2L, 1L, 2L, 2L, 1L, 2L, 2L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 1L, 2L, 2L, 1L, 1L, 2L, 2L)) is_final <- c(16:17, 11:14, 24:27, 33:34, 40:41, 47:48, 19L) dat$id_weight <- ifelse(dat$id %in% is_final, 1., 1e-5) dat$father_weight <- dat$mother_weight <- ifelse(dat$id %in% is_final, 10., 1.) test_that("the _pedigree methods give the same", { cuts <- with( dat, biconnected_components_pedigree(id = id, father.id = dad, mother.id = mom)) expect_known_value(cuts, "biconnected_components_pedigree.RDS") tree <- with( dat, block_cut_tree_pedigree(id = id, father.id = dad, mother.id = mom)) expect_known_value(cuts, "block_cut_tree_pedigree.RDS") partition <- with( dat, max_balanced_partition_pedigree( id = id, father.id = dad, mother.id = mom, do_reorder = FALSE)) expect_known_value(partition, "max_balanced_partition_pedigree.RDS") expect_true(length(intersect(partition$set_1, partition$set_2)) < 1) expect_true(all(dat$id %in% c(partition$set_1, partition$set_2))) partition_reordered <- with( dat, max_balanced_partition_pedigree( id = id, father.id = dad, mother.id = mom, do_reorder = TRUE)) expect_equal(partition, partition_reordered) partition <- with( dat, max_balanced_partition_pedigree( id = id, father.id = dad, mother.id = mom, slack = .1, max_kl_it = 50L, max_kl_it_inner = 1000L, do_reorder = FALSE)) expect_known_value(partition, "max_balanced_partition_pedigree-w_cut.RDS") expect_true(length(intersect(partition$set_1, partition$set_2)) < 1) expect_true(all(dat$id %in% c(partition$set_1, partition$set_2))) partition_reordered <- with( dat, max_balanced_partition_pedigree( id = id, father.id = dad, mother.id = mom, slack = .1, max_kl_it = 50L, max_kl_it_inner = 1000L, do_reorder = TRUE)) expect_equal(partition, partition_reordered) partition <- with( dat, max_balanced_partition_pedigree( id = id, father.id = dad, mother.id = mom, slack = .1, max_kl_it = 50L, max_kl_it_inner = 1000L, id_weight = id_weight, do_reorder = FALSE)) expect_known_value(partition, "max_balanced_partition_pedigree-w_cut-n-weights.RDS") expect_true(length(intersect(partition$set_1, partition$set_2)) < 1) expect_true(all(dat$id %in% c(partition$set_1, partition$set_2))) partition_reordered <- with( dat, max_balanced_partition_pedigree( id = id, father.id = dad, mother.id = mom, slack = .1, max_kl_it = 50L, max_kl_it_inner = 1000L, id_weight = id_weight, do_reorder = TRUE)) expect_equal(partition, partition_reordered) partition <- with( dat, max_balanced_partition_pedigree( id = id, father.id = dad, mother.id = mom, slack = .1, max_kl_it = 50L, max_kl_it_inner = 1000L, id_weight = id_weight, do_reorder = FALSE, father_weight = father_weight, mother_weight = mother_weight)) expect_known_value(partition, "max_balanced_partition_pedigree-w_cut-n-2xweights.RDS") expect_true(length(intersect(partition$set_1, partition$set_2)) < 1) expect_true(all(dat$id %in% c(partition$set_1, partition$set_2))) partition_reordered <- with( dat, max_balanced_partition_pedigree( id = id, father.id = dad, mother.id = mom, slack = .1, max_kl_it = 50L, max_kl_it_inner = 1000L, id_weight = id_weight, do_reorder = TRUE, father_weight = father_weight, mother_weight = mother_weight)) expect_equal(partition, partition_reordered) skip_on_os("solaris") connected_partition <- partition partition <- with( dat, unconnected_partition_pedigree( id = id, father.id = dad, mother.id = mom, slack = .1, max_kl_it = 50L, max_kl_it_inner = 1000L, id_weight = id_weight, father_weight = father_weight, mother_weight = mother_weight)) expect_known_value(partition, "unconnected_partition_pedigree-w_cut-n-2xweights.RDS") expect_true(length(intersect(partition$set_1, partition$set_2)) < 1) expect_true(all(dat$id %in% c(partition$set_1, partition$set_2))) partition <- with( dat, unconnected_partition_pedigree( id = id, father.id = dad, mother.id = mom, slack = .1, max_kl_it = 50L, max_kl_it_inner = 1000L, id_weight = id_weight, init = connected_partition$set_1)) expect_known_value(partition, "unconnected_partition_pedigree-start.RDS") expect_true(length(intersect(partition$set_1, partition$set_2)) < 1) expect_true(all(dat$id %in% c(partition$set_1, partition$set_2))) }) dat <- readRDS("graph.RDS") test_that("the partitioning functions for graphs give the same", { cuts <- with( dat, biconnected_components(from = from, to = to)) expect_known_value(cuts, "biconnected_components.RDS") tree <- with( dat, block_cut_tree(from = from, to = to)) expect_known_value(cuts, "block_cut_tree.RDS") partition <- with( dat, max_balanced_partition(from = from, to = to, do_reorder = FALSE)) expect_known_value(partition, "max_balanced_partition.RDS") expect_true(length(intersect(partition$set_1, partition$set_2)) < 1) expect_true(all(c(dat$from, dat$to) %in% c(partition$set_1, partition$set_2))) partition_reordered <- with( dat, max_balanced_partition(from = from, to = to, do_reorder = TRUE)) expect_equal(partition, partition_reordered) connected_partition <- partition partition <- with( dat, max_balanced_partition( from = from, to = to, slack = .1, max_kl_it = 50L, max_kl_it_inner = 1000L, do_reorder = FALSE)) expect_known_value(partition, "max_balanced_partition-w_cut.RDS") expect_true(length(intersect(partition$set_1, partition$set_2)) < 1) expect_true(all(c(dat$from, dat$to) %in% c(partition$set_1, partition$set_2))) partition_reordered <- with( dat, max_balanced_partition( from = from, to = to, slack = .1, max_kl_it = 50L, max_kl_it_inner = 1000L, do_reorder = FALSE)) expect_equal(partition, partition_reordered) skip_on_os("solaris") partition <- with( dat, unconnected_partition( from = from, to = to, slack = .1, max_kl_it = 50L, max_kl_it_inner = 1000L)) expect_known_value(partition, "unconnected_partition-w_cut.RDS") expect_true(length(intersect(partition$set_1, partition$set_2)) < 1) expect_true(all(c(dat$from, dat$to) %in% c(partition$set_1, partition$set_2))) partition <- with( dat, unconnected_partition( from = from, to = to, slack = .1, max_kl_it = 50L, max_kl_it_inner = 1000L, init = connected_partition$set_1)) expect_known_value(partition, "unconnected_partition-start-w_cut.RDS") expect_true(length(intersect(partition$set_1, partition$set_2)) < 1) expect_true(all(c(dat$from, dat$to) %in% c(partition$set_1, partition$set_2))) partition <- with( dat, max_balanced_partition( from = from, to = to, slack = .1, max_kl_it = 50L, do_reorder = FALSE, max_kl_it_inner = 1000L, weight_data = list(id = dat$from[3], weight = 100))) expect_known_value(partition, "max_balanced_partition-w_cut-n-weights.RDS") expect_true(length(intersect(partition$set_1, partition$set_2)) < 1) expect_true(all(c(dat$from, dat$to) %in% c(partition$set_1, partition$set_2))) partition_reordered <- with( dat, max_balanced_partition( from = from, to = to, slack = .1, max_kl_it = 50L, do_reorder = TRUE, max_kl_it_inner = 1000L, weight_data = list(id = dat$from[3], weight = 100))) expect_equal(partition, partition_reordered) })
transform_boundary <- function(data, noisy_centroids, new_centroids) { original_poly <- sf::st_union(data) original_boundary <- sf::st_boundary(original_poly) if (sum(class(original_boundary) == "sfc_MULTILINESTRING") == 0) { original_boundary <- sf::st_cast(original_boundary, "MULTILINESTRING") } boundary_points <- sf::st_cast(original_boundary, "MULTIPOINT") boundary_coords <- data.frame(sf::st_coordinates(boundary_points)) prop <- lengths(original_boundary[[1]]) / sum(lengths(original_boundary[[1]])) sample_size <- ceiling(prop * 1000) sample_groups <- which(sample_size > 3) subset <- boundary_coords[which(boundary_coords$L1 == sample_groups[1]),] if (sample_size[sample_groups[1]] > nrow(subset)) { index <- 1:nrow(subset) } else { index <- c(1, sort(sample(2:(nrow(subset)-1), sample_size[sample_groups[1]]-2)), nrow(subset)) } sample <- subset[index,] if (length(sample_groups) > 1) { for (i in 2:length(sample_groups)) { subset <- boundary_coords[which(boundary_coords$L1 == sample_groups[i]),] if (sample_size[sample_groups[i]] > nrow(subset)) { index <- 1:nrow(subset) } else { index <- c(1, sort(sample(2:(nrow(subset)-1), sample_size[sample_groups[i]]-2)), nrow(subset)) } sample <- rbind(sample, subset[index,]) } } samp_points <- sf::st_sfc(sf::st_multipoint(as.matrix(sample[,1:2])), crs = sf::st_crs(data)) samp_points <- sf::st_cast(samp_points, "POINT") k <- 3 dist_matrix <- matrix(as.numeric(sf::st_distance(samp_points, noisy_centroids)), ncol = length(noisy_centroids)) M <- matrix(rep(0,nrow(sample)*k), ncol = k) for (i in 1:nrow(sample)) { M[i,] <- order(dist_matrix[i,])[1:k] } W <- matrix(rep(0,nrow(sample)*k), ncol = k) for (i in 1:nrow(sample)) { W[i,] <- exp(-dist_matrix[i,M[i,]]^2 / (2*min(dist_matrix[i,M[i,]]^2))) } W <- W / rowSums(W) noisy_coords <- data.frame(sf::st_coordinates(noisy_centroids)) v <- matrix(rep(0, 2*nrow(sample)), ncol = 2) for (i in 1:nrow(v)) { x <- sum((sample[i,1] - noisy_coords[M[i,],1]) * W[i,]) y <- sum((sample[i,2] - noisy_coords[M[i,],2]) * W[i,]) v[i,] <- c(x,y) } new_coords <- data.frame(sf::st_coordinates(new_centroids)) new_boundary <- matrix(rep(0, 2*nrow(sample)), ncol = 2) R <- length(noisy_centroids) A <- sum(sf::st_area(data)) s <- as.numeric(sqrt(A/R)) for (i in 1:nrow(sample)) { weighted_centroids <- new_coords[M[i,],] * W[i,] x <- sum(weighted_centroids$X) y <- sum(weighted_centroids$Y) new_boundary[i,] <- v[i,] * sqrt(s / sqrt(sum(v[i,]^2))) + c(x,y) } new_boundary_coords <- data.frame(new_boundary) colnames(new_boundary_coords) <- c("X","Y") new_boundary_coords$L1 <- sample$L1 coords_list <- list() for (i in 1:length(sample_groups)) { points <- new_boundary_coords[which(new_boundary_coords$L1 == sample_groups[i]),1:2] coords_list[[i]] <- as.matrix(points) } new_boundary <- sf::st_sfc(sf::st_polygon(coords_list), crs = sf::st_crs(data)) if (!sf::st_is_valid(new_boundary)) { new_boundary <- sf::st_make_valid(new_boundary) if ("sfc_GEOMETRYCOLLECTION" %in% class(new_boundary)) { new_boundary <- sf::st_collection_extract(new_boundary, "POLYGON") } } new_boundary }
context("wbt_exp") test_that("Returns the exponential (base e) of values in a raster", { skip_on_cran() skip_if_not(check_whitebox_binary()) dem <- system.file("extdata", "DEM.tif", package = "whitebox") ret <- wbt_exp(input = dem, output = "output.tif") expect_match(ret, "Elapsed Time") })
require(apsimx) apsimx_options(warn.versions = FALSE) run.inspect.tests <- get(".run.local.tests", envir = apsimx.options) if(run.inspect.tests) ex.dir <- auto_detect_apsimx_examples() if(run.inspect.tests){ i <- "Barley.apsimx" inspect_apsimx(i, src.dir = ex.dir, node = "Clock") inspect_apsimx(i, src.dir = ex.dir, node = "Weather") inspect_apsimx(i, src.dir = ex.dir, node = "Soil") inspect_apsimx(i, src.dir = ex.dir, node = "Soil", soil.child = "Physical") inspect_apsimx(i, src.dir = ex.dir, node = "Soil", soil.child = "SoilWater") inspect_apsimx(i, src.dir = ex.dir, node = "Soil", soil.child = "SoilWater", parm = "SummerDate") inspect_apsimx(i, src.dir = ex.dir, node = "Soil", soil.child = "SoilWater", parm = "SummerCona") inspect_apsimx(i, src.dir = ex.dir, node = "Soil", soil.child = "Organic") inspect_apsimx(i, src.dir = ex.dir, node = "Soil", soil.child = "InitialWater") inspect_apsimx(i, src.dir = ex.dir, node = "Soil", soil.child = "Chemical") inspect_apsimx(i, src.dir = ex.dir, node = "Soil", soil.child = "InitialN") inspect_apsimx(i, src.dir = ex.dir, node = "Soil", soil.child = "CERESSoilTemperature") inspect_apsimx(i, src.dir = ex.dir, node = "SurfaceOrganicMatter") inspect_apsimx(i, src.dir = ex.dir, node = "MicroClimate") inspect_apsimx(i, src.dir = ex.dir, node = "Crop") inspect_apsimx(i, src.dir = ex.dir, node = "Manager") inspect_apsimx(i, src.dir = ex.dir, node = "Manager", parm = list("SowingFertiliser", NA)) inspect_apsimx(i, src.dir = ex.dir, node = "Manager", parm = list("Sow on a fixed date", NA)) inspect_apsimx(i, src.dir = ex.dir, node = "Manager", parm = list("Sow on a fixed date", 1)) inspect_apsimx(i, src.dir = ex.dir, node = "Manager", parm = list("Sow on a fixed date", 2)) inspect_apsimx(i, src.dir = ex.dir, node = "Manager", parm = list("Sow on a fixed date", 3)) inspect_apsimx(i, src.dir = ex.dir, node = "Manager", parm = list("Sow on a fixed date", 4)) inspect_apsimx(i, src.dir = ex.dir, node = "Manager", parm = list("Sow on a fixed date", 5)) inspect_apsimx(i, src.dir = ex.dir, node = "Manager", parm = list("Sow on a fixed date", 6)) inspect_apsimx(i, src.dir = ex.dir, node = "Report") inspect_apsimx(i, src.dir = ex.dir, node = "Report", parm = "VariableNames") inspect_apsimx(i, src.dir = ex.dir, node = "Report", parm = "EventNames") i <- "Maize.apsimx" inspect_apsimx(i, src.dir = ex.dir, node = "Clock") inspect_apsimx(i, src.dir = ex.dir, node = "Weather") inspect_apsimx(i, src.dir = ex.dir, node = "Soil") inspect_apsimx(i, src.dir = ex.dir, node = "Soil", soil.child = "Physical") inspect_apsimx(i, src.dir = ex.dir, node = "Soil", soil.child = "SoilWater") inspect_apsimx(i, src.dir = ex.dir, node = "Soil", soil.child = "Organic") inspect_apsimx(i, src.dir = ex.dir, node = "Soil", soil.child = "Chemical") inspect_apsimx(i, src.dir = ex.dir, node = "Soil", soil.child = "InitialWater") inspect_apsimx(i, src.dir = ex.dir, node = "Soil", soil.child = "InitialN") inspect_apsimx(i, src.dir = ex.dir, node = "SurfaceOrganicMatter") inspect_apsimx(i, src.dir = ex.dir, node = "MicroClimate") inspect_apsimx(i, src.dir = ex.dir, node = "Crop") inspect_apsimx(i, src.dir = ex.dir, node = "Manager") inspect_apsimx(i, src.dir = ex.dir, node = "Manager", parm = list('SowingRule', NA)) inspect_apsimx(i, src.dir = ex.dir, node = "Manager", parm = list('SowingFertiliser', NA)) inspect_apsimx(i, src.dir = ex.dir, node = "Report", parm = "VariableNames") inspect_apsimx(i, src.dir = ex.dir, node = "Report", parm = "EventNames") i <- "Oats" inspect_apsimx(i, src.dir = ex.dir, node = "Clock") inspect_apsimx(i, src.dir = ex.dir, node = "Weather") inspect_apsimx(i, src.dir = ex.dir, node = "Soil") inspect_apsimx(i, src.dir = ex.dir, node = "Soil", soil.child = "Water") inspect_apsimx(i, src.dir = ex.dir, node = "Soil", soil.child = "SoilWater") inspect_apsimx(i, src.dir = ex.dir, node = "Soil", soil.child = "SoilWater", parm = "SummerDate") inspect_apsimx(i, src.dir = ex.dir, node = "Soil", soil.child = "Organic") inspect_apsimx(i, src.dir = ex.dir, node = "Soil", soil.child = "InitialWater") inspect_apsimx(i, src.dir = ex.dir, node = "Soil", soil.child = "Chemical") inspect_apsimx(i, src.dir = ex.dir, node = "SurfaceOrganicMatter") inspect_apsimx(i, src.dir = ex.dir, node = "MicroClimate") inspect_apsimx(i, src.dir = ex.dir, node = "Crop") inspect_apsimx(i, src.dir = ex.dir, node = "Manager") inspect_apsimx(i, src.dir = ex.dir, node = "Manager", parm = list('SowingFert', NA)) inspect_apsimx(i, src.dir = ex.dir, node = "Manager", parm = list('SowingRule1', NA)) inspect_apsimx(i, src.dir = ex.dir, node = "Manager", parm = list('SowingRule1', 1)) inspect_apsimx(i, src.dir = ex.dir, node = "Manager", parm = list('SowingRule1', 2)) inspect_apsimx(i, src.dir = ex.dir, node = "Manager", parm = list('SowingRule1', 3)) inspect_apsimx(i, src.dir = ex.dir, node = "Manager", parm = list('SowingRule1', 4)) inspect_apsimx(i, src.dir = ex.dir, node = "Manager", parm = list('SowingRule1', 5)) inspect_apsimx(i, src.dir = ex.dir, node = "Manager", parm = list('SowingRule1', 6)) inspect_apsimx(i, src.dir = ex.dir, node = "Manager", parm = list('SowingRule1', 7)) inspect_apsimx(i, src.dir = ex.dir, node = "Manager", parm = list('SowingRule1', 8)) inspect_apsimx(i, src.dir = ex.dir, node = "Manager", parm = list('SowingRule1', 9)) inspect_apsimx(i, src.dir = ex.dir, node = "Report", parm = "VariableNames") inspect_apsimx(i, src.dir = ex.dir, node = "Report", parm = "EventNames") i <- "OilPalm.apsimx" inspect_apsimx(i, src.dir = ex.dir, node = "Clock") inspect_apsimx(i, src.dir = ex.dir, node = "Weather") inspect_apsimx(i, src.dir = ex.dir, node = "Soil") inspect_apsimx(i, src.dir = ex.dir, node = "Soil", soil.child = "Physical") inspect_apsimx(i, src.dir = ex.dir, node = "Soil", soil.child = "SoilWater") inspect_apsimx(i, src.dir = ex.dir, node = "Soil", soil.child = "SoilWater", parm = "SummerDate") inspect_apsimx(i, src.dir = ex.dir, node = "Soil", soil.child = "Organic") inspect_apsimx(i, src.dir = ex.dir, node = "Soil", soil.child = "InitialWater") inspect_apsimx(i, src.dir = ex.dir, node = "Soil", soil.child = "Chemical") inspect_apsimx(i, src.dir = ex.dir, node = "SurfaceOrganicMatter") inspect_apsimx(i, src.dir = ex.dir, node = "Manager") inspect_apsimx(i, src.dir = ex.dir, node = "Manager", parm = list("Palm Management", NA)) inspect_apsimx(i, src.dir = ex.dir, node = "Manager", parm = list("Palm Management", 3)) inspect_apsimx(i, src.dir = ex.dir, node = "Report") inspect_apsimx(i, src.dir = ex.dir, node = "Report", parm = list("Annual", NA)) inspect_apsimx(i, src.dir = ex.dir, node = "Report", parm = list("Monthly", NA)) inspect_apsimx(i, src.dir = ex.dir, node = "Report", parm = list("Annual", "Variable")) inspect_apsimx(i, src.dir = ex.dir, node = "Report", parm = list("Monthly", "Event")) } run.inspect.print.path.tests <- get(".run.local.tests", envir = apsimx.options) if(run.inspect.print.path.tests){ i <- "Barley.apsimx" pp <- inspect_apsimx(i, src.dir = ex.dir, print.path = TRUE) if(pp != ".Simulations.Simulation.Clock") stop("Error in inspect_apsimx, Barley, Clock, print.path") pp <- inspect_apsimx(i, src.dir = ex.dir, parm = "Start", print.path = TRUE) if(pp != ".Simulations.Simulation.Clock.Start") stop("Error in inspect_apsimx, Barley, Start, print.path") pp <- inspect_apsimx(i, node = "Weather", src.dir = ex.dir, print.path = TRUE) if(pp != ".Simulations.Simulation.Weather") stop("Error in inspect_apsimx, Barley, Weather, print.path") pp <- inspect_apsimx(i, node = "Soil", src.dir = ex.dir, print.path = TRUE) if(pp != ".Simulations.Simulation.Field.Soil") stop("Error in inspect_apsimx, Barley, Soil, Metadata, print.path") pp <- inspect_apsimx(i, node = "Soil", src.dir = ex.dir, parm = "Latitude", print.path = TRUE) if(pp != ".Simulations.Simulation.Field.Soil.Latitude") stop("Error in inspect_apsimx, Barley, Soil, Metadata, Latitude, print.path") pp <- inspect_apsimx(i, node = "Soil", soil.child = "Physical", src.dir = ex.dir, print.path = TRUE) if(pp != ".Simulations.Simulation.Field.Soil.Physical") stop("Error in inspect_apsimx, Barley, Soil, Physical, print.path") pp <- inspect_apsimx(i, node = "Soil", soil.child = "Physical", parm = "DUL", src.dir = ex.dir, print.path = TRUE) if(pp != ".Simulations.Simulation.Field.Soil.Physical.DUL") stop("Error in inspect_apsimx, Barley, Soil, Physical, DUL, print.path") pp <- inspect_apsimx(i, node = "Soil", soil.child = "Physical", parm = "Barley XF", src.dir = ex.dir, print.path = TRUE) if(pp != ".Simulations.Simulation.Field.Soil.Physical.Barley XF") stop("Error in inspect_apsimx, Barley, Soil, Physical, Barley XF, print.path") pp <- inspect_apsimx(i, node = "Soil", soil.child = "SoilWater", src.dir = ex.dir, print.path = TRUE) if(pp != ".Simulations.Simulation.Field.Soil.SoilWater") stop("Error in inspect_apsimx, Barley, Soil, SoilWater, print.path") pp <- inspect_apsimx(i, node = "Soil", soil.child = "SoilWater", parm = "Salb", src.dir = ex.dir, print.path = TRUE) if(pp != ".Simulations.Simulation.Field.Soil.SoilWater.Salb") stop("Error in inspect_apsimx, Barley, Soil, SoilWater, SWCON, print.path") pp <- inspect_apsimx(i, node = "Soil", soil.child = "SoilWater", parm = "SWCON", src.dir = ex.dir, print.path = TRUE) if(pp != ".Simulations.Simulation.Field.Soil.SoilWater.SWCON") stop("Error in inspect_apsimx, Barley, Soil, SoilWater, SWCON, print.path") pp <- inspect_apsimx(i, node = "Soil", soil.child = "Chemical", src.dir = ex.dir, print.path = TRUE) if(pp != ".Simulations.Simulation.Field.Soil.Chemical") stop("Error in inspect_apsimx, Barley, Soil, Chemical, print.path") pp <- inspect_apsimx(i, node = "Soil", soil.child = "Chemical", parm = "PH", src.dir = ex.dir, print.path = TRUE) if(pp != ".Simulations.Simulation.Field.Soil.Chemical.PH") stop("Error in inspect_apsimx, Barley, Soil, Chemical, PH, print.path") pp <- inspect_apsimx(i, node = "Soil", soil.child = "InitialWater", src.dir = ex.dir, print.path = TRUE) if(pp != ".Simulations.Simulation.Field.Soil.InitialWater") stop("Error in inspect_apsimx, Barley, Soil, InitialWater, print.path") pp <- inspect_apsimx(i, node = "Soil", soil.child = "InitialWater", parm = "FractionFull", src.dir = ex.dir, print.path = TRUE) if(pp != ".Simulations.Simulation.Field.Soil.InitialWater.FractionFull") stop("Error in inspect_apsimx, Barley, Soil, InitialWater, FractionFull, print.path") pp <- inspect_apsimx(i, node = "Soil", soil.child = "Organic", src.dir = ex.dir, print.path = TRUE) if(pp != ".Simulations.Simulation.Field.Soil.Organic") stop("Error in inspect_apsimx, Barley, Soil, Organic, print.path") pp <- inspect_apsimx(i, node = "Soil", soil.child = "Organic", parm = "Carbon", src.dir = ex.dir, print.path = TRUE) if(pp != ".Simulations.Simulation.Field.Soil.Organic.Carbon") stop("Error in inspect_apsimx, Barley, Soil, Organic, Carbon, print.path") pp <- inspect_apsimx(i, node = "Soil", soil.child = "InitialN", src.dir = ex.dir, print.path = TRUE) if(pp != ".Simulations.Simulation.Field.Soil.InitialN") stop("Error in inspect_apsimx, Barley, Soil, InitialN, print.path") pp <- inspect_apsimx(i, node = "Manager", src.dir = ex.dir, print.path = TRUE) if(pp != ".Simulations.Simulation.Field") stop("Error in inspect_apsimx, Barley, Manager, print.path") pp <- inspect_apsimx(i, node = "Manager", parm = list("SowingFertiliser", NA), src.dir = ex.dir, print.path = TRUE) if(pp != ".Simulations.Simulation.Field.SowingFertiliser") stop("Error in inspect_apsimx, Barley, Manager, SowingFertiliser, print.path") pp <- inspect_apsimx(i, node = "Manager", parm = list("SowingFertiliser", 1), src.dir = ex.dir, print.path = TRUE) if(pp != ".Simulations.Simulation.Field.SowingFertiliser.Amount") stop("Error in inspect_apsimx, Barley, Manager, SowingFertiliser, Amount, print.path") pp <- inspect_apsimx(i, node = "Surface", src.dir = ex.dir, print.path = TRUE) if(pp != ".Simulations.Simulation.Field.SurfaceOrganicMatter") stop("Error in inspect_apsimx, Barley, SurfaceOrganicMatter, print.path") pp <- inspect_apsimx(i, node = "Surface", parm = "InitialResidueMass", src.dir = ex.dir, print.path = TRUE) if(pp != ".Simulations.Simulation.Field.SurfaceOrganicMatter.InitialResidueMass") stop("Error in inspect_apsimx, Barley, SurfaceOrganicMatter, InitialResidueMass print.path") pp <- inspect_apsimx(i, node = "MicroClimate", src.dir = ex.dir, print.path = TRUE) pp <- inspect_apsimx(i, node = "MicroClimate", parm = "soil_albedo", src.dir = ex.dir, print.path = TRUE) } inspect.replacement.test <- get(".run.local.tests", envir = apsimx.options) if(inspect.replacement.test){ inspect_apsimx_replacement("Factorial", src.dir = ex.dir, root = list("Experiment", 1), node = "Base", node.child = "Clock") inspect_apsimx("Factorial.apsimx", src.dir = ex.dir, root = c("^Experiment","Base"), node = "Clock") inspect_apsimx_replacement("Factorial", src.dir = ex.dir, root = list("Experiment", 1), node = "Base", node.child = "Weather") inspect_apsimx("Factorial.apsimx", src.dir = ex.dir, root = c("^Experiment","Base"), node = "Weather") inspect_apsimx_replacement("Factorial", src.dir = ex.dir, root = list("Experiment", 1), node = "Base", node.child = "Field", node.subchild = "Soil", display.available = TRUE) inspect_apsimx("Factorial.apsimx", src.dir = ex.dir, root = c("^Experiment","Base"), node = "Soil") inspect_apsimx_replacement("Factorial", src.dir = ex.dir, root = list("Experiment", 2), node = "Base", node.child = "Field", node.subchild = "Soil", display.available = TRUE) inspect_apsimx_replacement("Factorial", src.dir = ex.dir, root = list("Experiment", 2), node = "Base2", node.child = "Field", node.subchild = "Soil", node.subsubchild = "Water", display.available = TRUE) inspect_apsimx_replacement("Factorial", src.dir = ex.dir, root = list("Experiment", 2), node = "Base2", node.child = "Field", node.subchild = "Soil", node.subsubchild = "Water", parm = "Depth") } inspect.replacement.test2 <- get(".run.local.tests", envir = apsimx.options) extd.dir <- system.file("extdata", package = "apsimx") if(inspect.replacement.test2){ pp <- inspect_apsimx("MaizeSoybean.apsimx", src.dir = extd.dir, node = "Weather", root = "SimulationSoybean", parm = "FileName", print.path = TRUE) if(pp != ".Simulations.SimulationSoybean.Weather.FileName") stop("pp does not match for inspect_apsimx MaizeSoybean.apsimx root = SimulationSoybean", call. = FALSE) pp <- inspect_apsimx_replacement("MaizeSoybean.apsimx", src.dir = extd.dir, root = "SimulationSoybean", node = "Weather", parm = "FileName", print.path = TRUE) if(pp != ".Simulations.SimulationSoybean.Weather.FileName") stop("pp does not match for inspect_apsimx_replacement MaizeSoybean.apsimx root = SimulationSoybean", call. = FALSE) inspect_apsimx_replacement("MaizeSoybean.apsimx", src.dir = extd.dir, node = "Soybean", display.available = TRUE) inspect_apsimx_replacement("MaizeSoybean.apsimx", src.dir = extd.dir, node = "Soybean", node.child = "Leaf", display.available = TRUE) inspect_apsimx_replacement("MaizeSoybean.apsimx", src.dir = extd.dir, node = "Soybean", node.child = "Leaf", parm = "Albedo") inspect_apsimx_replacement("MaizeSoybean.apsimx", src.dir = extd.dir, node = "Soybean", node.child = "Leaf", parm = "Gsmax350") inspect_apsimx_replacement("MaizeSoybean.apsimx", src.dir = extd.dir, node = "Soybean", node.child = "Leaf", parm = "R50") inspect_apsimx_replacement("MaizeSoybean.apsimx", src.dir = extd.dir, node = "Soybean", node.child = "Leaf", node.subchild = "Photosynthesis", node.subsubchild = "RUE") inspect_apsimx_replacement("MaizeSoybean.apsimx", src.dir = extd.dir, node = "Soybean", node.child = "Leaf", node.subchild = "Photosynthesis", node.subsubchild = "RUE", parm = "FixedValue") pp <- inspect_apsimx_replacement("MaizeSoybean.apsimx", src.dir = extd.dir, node = "Soybean", node.child = "Cultivars", node.subchild = "USA", node.subsubchild = "PioneerP22T61_MG22", parm = "Vegetative.Target.FixedValue", print.path = TRUE) if(pp != ".Simulations.Replacements.Soybean.Cultivars.USA.PioneerP22T61_MG22.Vegetative.Target.FixedValue") stop("Error in inspect_apsimx_replacement, MaizeSoybean, PioneerP22T61_MG22, Vegetative.Target.FixedValue, print.path") pp <- inspect_apsimx_replacement("MaizeSoybean.apsimx", src.dir = extd.dir, node = "Soybean", node.child = "Cultivars", node.subchild = "USA", node.subsubchild = "PioneerP22T61_MG22", parm = "EarlyFlowering", print.path = TRUE) if(pp != ".Simulations.Replacements.Soybean.Cultivars.USA.PioneerP22T61_MG22.EarlyFlowering") stop("Error in inspect_apsimx_replacement, MaizeSoybean, PioneerP22T61_MG22, EarlyFlowering, print.path") inspect_apsimx_replacement("MaizeSoybean.apsimx", src.dir = extd.dir, root = "SimulationSoybean", node = "Weather", parm = "FileName") inspect_apsimx_replacement("MaizeSoybean.apsimx", src.dir = extd.dir, root = "SimulationMaize", node = "Weather", parm = "FileName") inspect_apsimx_replacement("WheatRye.apsimx", src.dir = extd.dir, node = "Wheat", node.child = "Cultivars", node.subchild = "USA", node.subsubchild = "Yecora") inspect_apsimx_replacement("WheatRye.apsimx", src.dir = extd.dir, node = "Wheat", node.child = "Cultivars", node.subchild = "USA", node.subsubchild = "Yecora", parm = "MinimumLeafNumber") inspect_apsimx_replacement("WheatRye.apsimx", src.dir = extd.dir, node = "Wheat", node.child = "Cultivars", node.subchild = "USA", node.subsubchild = "Yecora", parm = "Vrn") } inspect.replacement.test.parm.path <- get(".run.local.tests", envir = apsimx.options) if(inspect.replacement.test.parm.path){ pp <- inspect_apsimx_replacement("MaizeSoybean.apsimx", src.dir = extd.dir, print.path = TRUE) pp <- inspect_apsimx_replacement("MaizeSoybean.apsimx", src.dir = extd.dir, node = "Soybean", print.path = TRUE) pp <- inspect_apsimx_replacement("MaizeSoybean.apsimx", src.dir = extd.dir, node = "Soybean", node.child = "Leaf", print.path = TRUE) pp <- inspect_apsimx_replacement("MaizeSoybean.apsimx", src.dir = extd.dir, node = "Soybean", node.child = "Leaf", parm = "Albedo", print.path = TRUE) pp <- inspect_apsimx_replacement("MaizeSoybean.apsimx", src.dir = extd.dir, node = "Soybean", node.child = "Leaf", parm = "Gsmax350", print.path = TRUE) }
ext <- function(wt, scale=FALSE, plot=TRUE) { s <- wt$original maxresoln <- wt$maxresoln np <- wt$np extrema <- matrix(0, nrow=maxresoln, ncol=np) extrema <- t(extrema) dim(extrema) <- c(length(extrema), 1) t(wt$Wf) dim(wt$Wf) <- c(length(wt$Wf), 1) z <- .C("modulus_maxima", a=as.double(extrema), as.double(wt$Wf), as.integer(maxresoln), as.integer(np), PACKAGE="Rwave") extrema <- t(z$a) dim(extrema) <- c(np, maxresoln) cat("number of extrema =", sum(extrema!=0), "\n") if(plot) plotResult(extrema, s, maxresoln, scale) list(original=s,extrema=extrema,Sf=wt$Sf,maxresoln=maxresoln,np=np) }
pcaob_denied_firms <- memoise::memoise(function() { page <- read_html("https://pcaobus.org/International/Inspections/Pages/IssuerClientsWithoutAccess.aspx") tables <- page %>% html_table(fill = T) data <- tables[[length(tables)]] %>% as_tibble() data <- data %>% set_names(c("name_issuer","name_audit_firm", "country_audit_firm")) %>% mutate(is_pcaob_denied = T) data %>% munge_tbl() }) pcaob_auditors <- function(include_denied_firms = T) { data <- "https://pcaobus.org/RUSDocuments/FirmFilings.zip" %>% rio::import() data <- data %>% janitor::clean_names() %>% as_tibble() data <- data %>% mutate_if(is.character, list(function(x) { case_when(x == "" ~ NA_character_, TRUE ~ x) })) data <- data %>% rename( id_firm = firm_id, id_firm_filing = form_filing_id, datetime_audit = audit_report_date, datetime_fiscal_period_end = fiscal_period_end_date, is_dual_dated = dual_dated, datestime_dual_audit = audit_dual_date, datetime_signed = signed_date, datetime_filing = filing_date, id_ticker = issuer_ticker, id_cik = issuer_cik, name_issuer = issuer_name, name_audit_firm = firm_name, name_audit_form_other = firm_other_name, country_audit_country = firm_country, country_issuer = firm_issuing_country, state_issuer = firm_issuing_state, city_issuer = firm_issuing_city, id_issuer = issuer_id, type_audit = audit_report_type, telephone_signartory = signed_phone_number ) %>% mutate_if(is.character, list(function(x) { case_when(is.na(x) ~ "", TRUE ~ x) })) %>% mutate( name_issuer_signatory = glue("{signed_first_name} {signed_last_name}") %>% str_squish(), name_engagment_partner_primary = glue( "{engagement_partner_first_name} {engagement_partner_last_name}" ) %>% str_squish(), name_engagment_partner_secondary = glue( "{secondary_engagement_partner_first_name} {secondary_engagement_partner_last_name}" ) %>% str_squish(), location_issuer = glue("{city_issuer} {state_issuer}, {country_issuer}") %>% str_squish() ) data <- data %>% mutate_if(is.character, list(function(x) { case_when(x == "" ~ NA_character_, TRUE ~ x) })) data <- data %>% mutate(datestime_dual_audit = datestime_dual_audit %>% str_replace_all(" dates <- data %>% select(matches('datetime')) %>% names() data <- data %>% mutate_at(dates, lubridate::mdy_hms) data <- data %>% munge_tbl( data = data, snake_names = T, unformat = T, include_address = F, convert_case = T ) if (include_denied_firms ) { tbl_denied <- pcaob_denied_firms() data %>% left_join( tbl_denied %>% select(name_issuer, is_pcaob_denied), by = "name_issuer" ) } data }
message("\nTesting handling of HTTP errors") test_that("Error for invalid endpoint", { skip_on_cran() response <- get_response("/fakeEndpoint", query = list(format = "json", api_key = get_api_key())) expect_error(process_json_response(response), "\\[404\\]: [a-zA-Z0-9:/.]+fakeEndpoint") })
hybridize <- function(x1, x2, n, pop="hybrid", res.type=c("genind","df","STRUCTURE"), file=NULL, quiet=FALSE, sep="/", hyb.label="h"){ if(!is.genind(x1)) stop("x1 is not a valid genind object") if(!is.genind(x2)) stop("x2 is not a valid genind object") if(!all(ploidy(x1)==ploidy(x1)[1])) stop("varying ploidy (in x1) is not supported for this function") if(!all(ploidy(x2)==ploidy(x2)[1])) stop("varying ploidy (in x2) is not supported for this function") if(ploidy(x1)[1] %% 2 != 0) stop("not implemented for odd levels of ploidy") if(ploidy(x1)[1] != ploidy(x2)[1]) stop("x1 and x2 have different ploidy") checkType(x1) checkType(x2) n <- as.integer(n) ploidy <- ploidy(x1)[1] res.type <- match.arg(res.type) popNames(x1) <- "pop1" popNames(x2) <- "pop2" x1x2 <- repool(x1, x2) x1 <- x1x2[pop=1] x2 <- x1x2[pop=2] n1 <- nInd(x1) n2 <- nInd(x2) k <- nLoc(x1) y1 <- genind2genpop(x1,pop=factor(rep(1,n1)),quiet=TRUE) freq1 <- tab(y1, freq=TRUE) freq1 <- split(freq1, [email protected]) freq1 <- freq1[locNames(x1)] y2 <- genind2genpop(x2,pop=factor(rep(1,n2)),quiet=TRUE) freq2 <- tab(y2, freq=TRUE) freq2 <- split(freq2, [email protected]) freq2 <- freq2[locNames(x2)] kX1 <- lapply(freq1, function(v) t(rmultinom(n,ploidy/2,v))) names(kX1) <- locNames(x1) vec.paste1<-NULL Vec.all1<-NULL for(i in 1:k) { colnames(kX1[[i]]) <- alleles(x1)[[i]] vec.paste1<-c(vec.paste1, alleles(x1)[[i]]) Vec.all1<-c(Vec.all1, length(alleles(x1)[[i]])) } kX2 <- lapply(freq2, function(v) t(rmultinom(n,ploidy/2,v))) names(kX2) <- locNames(x2) vec.paste2<-NULL Vec.all2<-NULL for(i in 1:k) { colnames(kX2[[i]]) <- alleles(x2)[[i]] vec.paste2<-c(vec.paste2, alleles(x2)[[i]]) Vec.all2<-c(Vec.all2, length(alleles(x2)[[i]])) } tab1 <- as.matrix(cbind.data.frame(kX1)) colnames(tab1)<-paste(rep(locNames(x1), Vec.all1), ".",vec.paste1, sep = "") tab2 <- as.matrix(cbind.data.frame(kX2)) colnames(tab2)<-paste(rep(locNames(x2), Vec.all2), ".",vec.paste2, sep = "") zyg.rownames <- .genlab(hyb.label,n) zyg.colnames <- sort(unique(c(colnames(tab1),colnames(tab2)))) zyg <- matrix(0, nrow=n, ncol=length(zyg.colnames), dimnames=list(zyg.rownames, zyg.colnames)) zyg[, colnames(tab1)] <- zyg[, colnames(tab1)] + tab1 zyg[, colnames(tab2)] <- zyg[, colnames(tab2)] + tab2 zyg <- zyg zyg <- genind(zyg, type="codom", ploidy=ploidy) if(res.type=="STRUCTURE"){ temp <- genind2df(repool(x1,x2,zyg), usepop=FALSE, sep=" ") res <- unlist(apply(temp,1,strsplit," ")) res <- as.data.frame(matrix(res, nrow=nrow(temp), byrow=TRUE)) colnames(res) <- rep(colnames(temp),each=ploidy) res[is.na(res)] <- "-9" pop <- rep(1:3,c(nrow(x1@tab), nrow(x2@tab), n)) res <- cbind.data.frame(pop,res, stringsAsFactors = FALSE) names(res)[1] <- "" if(is.null(file)) { file <- gsub("[[:space:]]|:","-",date()) file <- paste("hybrid",file,sep="_") file <- paste(file,"str",sep=".") } write.table(res, file=file,row.names = TRUE, col.names = TRUE, quote=FALSE) if(!quiet) cat("\nWrote results to file", file, "\n") return(invisible()) } if(res.type=="df"){ res <- genind2df(zyg, sep=sep) return(res) } if(res.type=="genind"){ pop <- factor(rep(pop,n)) res <- zyg pop(res) <- pop res@call <- match.call() return(res) } }
wmf<-function(dat, times, scale.min=2, scale.max.input=NULL, sigma=1.05, f0 = 1){ errcheck_stdat(times,dat,"wmf") errcheck_wavparam(scale.min,scale.max.input,sigma,f0,times,"wmf") wtopt<-list(scale.min=scale.min,scale.max.input=scale.max.input, sigma=sigma,f0=f0) wavarray<-warray(dat, times, scale.min, scale.max.input, sigma, f0) timescales<-wavarray$timescales wavarray<-wavarray$wavarray wavarray<-normforcoh(wavarray,"powall") wmf<-apply(wavarray, c(2,3), mean, na.rm=T) errcheck_tts(times,timescales,wmf,"wmf") result<-list(values=wmf,times=times,timescales=timescales,dat=dat,wtopt=wtopt) class(result)<-c("wmf","tts","list") return(result) }